From 02c0595ac97f46105468f9da0a37f41db2b624fa Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 11 Jul 2024 13:00:21 +0700 Subject: [PATCH 01/69] fix(docs): Fix up docs issues --- utilities/local-cluster/Readme.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/utilities/local-cluster/Readme.md b/utilities/local-cluster/Readme.md index 7029822ff15..43032c69757 100644 --- a/utilities/local-cluster/Readme.md +++ b/utilities/local-cluster/Readme.md @@ -54,10 +54,10 @@ For testing purposes, the ScyllaDB is accessible on the Cluster IP Address: `192 ## Deploying the Cluster -### Setup hosts on windows +### Setup hosts on Windows -On Windows you need to setup the hosts before starting the cluster -From Windows terminal open the hosts file: +On Windows, you need to set up the hosts before starting the cluster +From the Windows terminal to open the hosts file: ```sh notepad %SystemRoot%\System32\drivers\etc\hosts @@ -75,7 +75,7 @@ From the root of the repo: just start-cluster ``` -#### MacOS +#### macOS From the root of the repo: @@ -176,11 +176,11 @@ vagrant ssh agent99 Found (and tested) description how to connect using only open-source via DBeaver: -1. Download dbeaver (Community Edition) -2. Download cassandra jdbc jar files: +1. Download DBeaver (Community Edition) +2. Download Cassandra JDBC jar files: (Downloading and Testing the Driver Binaries section have links to binary and source) -3. extract cassandra jdbc zip -4. run dbeaver +3. extract Cassandra JDBC zip +4. run DBeaver 5. go to Database > Driver Manager 6. click New 7. Fill in details as follows: @@ -192,7 +192,7 @@ Found (and tested) description how to connect using only open-source via DBeaver * Embedded: `no` * Category: * Description: `Cassandra` (or whatever you want it to say) -8. click Add File and add all the jars in the cassandra jdbc zip file. +8. click Add File and add all the jars in the Cassandra JDBC zip file. 9. click Find Class to make sure the Class Name is found okay 10. click OK 11. Create New Connection, selecting the database driver you just added From b3ba2f4222bf45f3826bd9736dd6c789e33fc5be Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 12 Jul 2024 00:44:04 +0700 Subject: [PATCH 02/69] fix(backend): Huge refactor to prep for scylladb config management --- .config/dictionaries/project.dic | 7 +- catalyst-gateway/Cargo.toml | 4 +- catalyst-gateway/bin/Cargo.toml | 2 + catalyst-gateway/bin/src/cardano/mod.rs | 160 ++++---- catalyst-gateway/bin/src/cli.rs | 35 +- .../src/event_db/cardano/chain_state/mod.rs | 94 +++-- .../cardano/cip36_registration/mod.rs | 46 ++- .../bin/src/event_db/cardano/config/mod.rs | 4 +- .../bin/src/event_db/cardano/utxo/mod.rs | 50 +-- .../event_db/legacy/queries/event/ballot.rs | 73 ++-- .../src/event_db/legacy/queries/event/mod.rs | 10 +- .../legacy/queries/event/objective.rs | 16 +- .../event_db/legacy/queries/event/proposal.rs | 20 +- .../event_db/legacy/queries/event/review.rs | 29 +- .../event_db/legacy/queries/registration.rs | 64 ++- .../bin/src/event_db/legacy/queries/search.rs | 60 ++- .../event_db/legacy/queries/vit_ss/fund.rs | 126 +++--- catalyst-gateway/bin/src/event_db/mod.rs | 156 ++++---- .../bin/src/event_db/schema_check/mod.rs | 4 +- catalyst-gateway/bin/src/logger.rs | 5 +- catalyst-gateway/bin/src/main.rs | 3 +- .../cardano/date_time_to_slot_number_get.rs | 41 +- .../bin/src/service/api/cardano/mod.rs | 21 +- .../service/api/cardano/registration_get.rs | 26 +- .../src/service/api/cardano/staked_ada_get.rs | 25 +- .../src/service/api/cardano/sync_state_get.rs | 23 +- .../src/service/api/health/inspection_get.rs | 26 +- .../bin/src/service/api/health/mod.rs | 13 +- .../bin/src/service/api/health/ready_get.rs | 12 +- .../service/api/legacy/registration/mod.rs | 24 +- .../bin/src/service/api/legacy/v0/mod.rs | 15 +- .../src/service/api/legacy/v0/plans_get.rs | 11 +- .../api/legacy/v1/account_votes_get.rs | 17 +- .../bin/src/service/api/legacy/v1/mod.rs | 26 +- catalyst-gateway/bin/src/service/api/mod.rs | 39 +- .../service/common/objects/server_error.rs | 4 +- catalyst-gateway/bin/src/service/mod.rs | 9 +- .../bin/src/service/poem_service.rs | 35 +- .../utilities/middleware/schema_validation.rs | 16 +- .../utilities/middleware/tracing_mw.rs | 40 +- .../bin/src/service/utilities/mod.rs | 1 + .../bin/src/service/utilities/net.rs | 42 ++ catalyst-gateway/bin/src/settings.rs | 367 +++++++++++++----- catalyst-gateway/bin/src/state/mod.rs | 66 ---- 44 files changed, 935 insertions(+), 932 deletions(-) create mode 100644 catalyst-gateway/bin/src/service/utilities/net.rs delete mode 100644 catalyst-gateway/bin/src/state/mod.rs diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index c120bab63e7..ec8b05694af 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -6,9 +6,9 @@ afinet androidx anypolicy appspot -Arissara Arbritrary ARGB +Arissara asmjs asyncio asyncpg @@ -28,8 +28,8 @@ cardano Catalyst CBOR cborg -CEST cdylib +CEST cfbundle Chotivichit chromedriver @@ -188,6 +188,7 @@ seckey sendfile slotno sqlfluff +sslmode Stefano stevenj stringzilla @@ -239,4 +240,4 @@ xcodeproj xctest xctestrun xcworkspace -yoroi \ No newline at end of file +yoroi diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index cbe8cfed01c..cd1684f1495 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -42,7 +42,7 @@ tokio-postgres = "0.7.10" tokio = "1" dotenvy = "0.15" local-ip-address = "0.6.1" -gethostname = "0.4.3" +gethostname = "0.5.0" hex = "0.4.3" handlebars = "5.1.2" anyhow = "1.0.71" @@ -51,6 +51,8 @@ ciborium = "0.2" pallas = { git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "709acb19c52c6b789279ecc4bc8793b5d8b5abe9", version = "0.25.0" } cardano-chain-follower = { git = "https://github.com/input-output-hk/hermes.git", version="0.0.1" } stringzilla = "3.8.4" +duration-string = "0.4.0" +once_cell = "1.19.0" [workspace.lints.rust] warnings = "deny" diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 7d702c6e017..3da1061ec76 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -69,3 +69,5 @@ cddl = { workspace = true } ciborium = { workspace = true } ed25519-dalek = "2.1.1" stringzilla = { workspace = true } +duration-string.workspace = true +once_cell.workspace = true diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index b7a084d0ea7..35523e30191 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -1,5 +1,5 @@ //! Logic for orchestrating followers -use std::{path::PathBuf, sync::Arc, time::Duration}; +use std::{path::PathBuf, time::Duration}; /// Handler for follower tasks, allows for control over spawned follower threads pub type ManageTasks = JoinHandle<()>; @@ -12,15 +12,18 @@ use pallas::ledger::traverse::{wellknown::GenesisValues, MultiEraBlock, MultiEra use tokio::{sync::mpsc, task::JoinHandle, time}; use tracing::{error, info}; -use crate::event_db::{ - cardano::{ - chain_state::{IndexedFollowerDataParams, MachineId}, - cip36_registration::IndexedVoterRegistrationParams, - config::FollowerConfig, - utxo::{IndexedTxnInputParams, IndexedTxnOutputParams, IndexedTxnParams}, +use crate::{ + event_db::{ + cardano::{ + chain_state::{IndexedFollowerDataParams, MachineId}, + cip36_registration::IndexedVoterRegistrationParams, + config::FollowerConfig, + utxo::{IndexedTxnInputParams, IndexedTxnOutputParams, IndexedTxnParams}, + }, + error::NotFoundError, + EventDB, }, - error::NotFoundError, - EventDB, + settings::Settings, }; pub(crate) mod cip36_registration; @@ -30,15 +33,13 @@ pub(crate) mod util; const MAX_BLOCKS_BATCH_LEN: usize = 1024; /// Returns a follower configs, waits until they present inside the db -async fn get_follower_config( - check_config_tick: u64, db: Arc, -) -> anyhow::Result> { +async fn get_follower_config(check_config_tick: u64) -> anyhow::Result> { let mut interval = time::interval(time::Duration::from_secs(check_config_tick)); loop { // tick until config exists interval.tick().await; - match db.get_follower_config().await { + match EventDB::get_follower_config().await { Ok(configs) => break Ok(configs), Err(err) if err.is::() => { error!("No follower config found"); @@ -51,22 +52,21 @@ async fn get_follower_config( /// Start followers as per defined in the config pub(crate) async fn start_followers( - db: Arc, check_config_tick: u64, data_refresh_tick: u64, machine_id: String, + check_config_tick: u64, data_refresh_tick: u64, ) -> anyhow::Result<()> { - let mut current_config = get_follower_config(check_config_tick, db.clone()).await?; + let mut current_config = get_follower_config(check_config_tick).await?; loop { // spawn followers and obtain thread handlers for control and future cancellation let follower_tasks = spawn_followers( current_config.clone(), - db.clone(), data_refresh_tick, - machine_id.clone(), + Settings::service_id().to_string(), ) .await?; // Followers should continue indexing until config has changed current_config = loop { - let new_config = get_follower_config(check_config_tick, db.clone()).await?; + let new_config = get_follower_config(check_config_tick).await?; if new_config != current_config { info!("Config has changed! restarting"); break new_config; @@ -83,7 +83,7 @@ pub(crate) async fn start_followers( /// Spawn follower threads and return associated handlers async fn spawn_followers( - configs: Vec, db: Arc, _data_refresh_tick: u64, machine_id: String, + configs: Vec, _data_refresh_tick: u64, machine_id: String, ) -> anyhow::Result> { let mut follower_tasks = Vec::new(); @@ -91,7 +91,6 @@ async fn spawn_followers( let follower_handler = spawn_follower( config.network, &config.relay, - db.clone(), machine_id.clone(), &config.mithril_snapshot.path, ) @@ -106,12 +105,12 @@ async fn spawn_followers( /// Initiate single follower and returns associated task handler /// which facilitates future control over spawned threads. async fn spawn_follower( - network: Network, relay: &str, db: Arc, machine_id: MachineId, snapshot: &str, + network: Network, relay: &str, machine_id: MachineId, snapshot: &str, ) -> anyhow::Result { // Establish point at which the last follower stopped updating in order to pick up // where it left off. If there was no previous follower, start indexing from // genesis point. - let start_from = match db.last_updated_state(network).await { + let start_from = match EventDB::last_updated_state(network).await { Ok((slot_no, block_hash, _)) => Point::new(slot_no.try_into()?, block_hash), Err(err) if err.is::() => Point::Origin, Err(err) => return Err(err), @@ -125,7 +124,7 @@ async fn spawn_follower( .ok_or(anyhow::anyhow!("Obtaining genesis values failed"))?; let task = tokio::spawn(async move { - process_blocks(&mut follower, db, network, machine_id, &genesis_values).await; + process_blocks(&mut follower, network, machine_id, &genesis_values).await; }); Ok(task) @@ -133,7 +132,7 @@ async fn spawn_follower( /// Process next block from the follower async fn process_blocks( - follower: &mut Follower, db: Arc, network: Network, machine_id: MachineId, + follower: &mut Follower, network: Network, machine_id: MachineId, genesis_values: &GenesisValues, ) { info!("Follower started processing blocks"); @@ -157,7 +156,7 @@ async fn process_blocks( blocks_buffer.push(block_data); if blocks_buffer.len() >= MAX_BLOCKS_BATCH_LEN { - index_block_buffer(db.clone(), &genesis_values, network, &machine_id, std::mem::take(&mut blocks_buffer)).await; + index_block_buffer(&genesis_values, network, &machine_id, std::mem::take(&mut blocks_buffer)).await; // Reset batch ticker since we just indexed the blocks buffer ticker.reset(); @@ -184,7 +183,7 @@ async fn process_blocks( } let current_buffer = std::mem::take(&mut blocks_buffer); - index_block_buffer(db.clone(), &genesis_values, network, &machine_id, current_buffer).await; + index_block_buffer(&genesis_values, network, &machine_id, current_buffer).await; // Reset the ticker so it counts the interval as starting after we wrote everything // to the database. @@ -197,31 +196,29 @@ async fn process_blocks( loop { match follower.next().await { - Ok(chain_update) => { - match chain_update { - ChainUpdate::Block(data) => { - if blocks_tx.send(data).await.is_err() { - error!("Block indexing task not running"); - break; - }; - }, - ChainUpdate::Rollback(data) => { - let block = match data.decode() { - Ok(block) => block, - Err(err) => { - error!("Unable to decode {network:?} block {err} - skip.."); - continue; - }, - }; - - info!( - "Rollback block NUMBER={} SLOT={} HASH={}", - block.number(), - block.slot(), - hex::encode(block.hash()), - ); - }, - } + Ok(chain_update) => match chain_update { + ChainUpdate::Block(data) => { + if blocks_tx.send(data).await.is_err() { + error!("Block indexing task not running"); + break; + }; + }, + ChainUpdate::Rollback(data) => { + let block = match data.decode() { + Ok(block) => block, + Err(err) => { + error!("Unable to decode {network:?} block {err} - skip.."); + continue; + }, + }; + + info!( + "Rollback block NUMBER={} SLOT={} HASH={}", + block.number(), + block.slot(), + hex::encode(block.hash()), + ); + }, }, Err(err) => { error!( @@ -235,7 +232,7 @@ async fn process_blocks( /// Consumes a block buffer and indexes its data. async fn index_block_buffer( - db: Arc, genesis_values: &GenesisValues, network: Network, machine_id: &MachineId, + genesis_values: &GenesisValues, network: Network, machine_id: &MachineId, buffer: Vec, ) { info!("Starting data batch indexing"); @@ -251,7 +248,7 @@ async fn index_block_buffer( } } - match index_many_blocks(db.clone(), genesis_values, network, machine_id, &blocks).await { + match index_many_blocks(genesis_values, network, machine_id, &blocks).await { Ok(()) => { info!("Finished indexing data batch"); }, @@ -263,7 +260,7 @@ async fn index_block_buffer( /// Index a slice of blocks. async fn index_many_blocks( - db: Arc, genesis_values: &GenesisValues, network: Network, machine_id: &MachineId, + genesis_values: &GenesisValues, network: Network, machine_id: &MachineId, blocks: &[MultiEraBlock<'_>], ) -> anyhow::Result<()> { let Some(last_block) = blocks.last() else { @@ -272,19 +269,18 @@ async fn index_many_blocks( let network_str = network.to_string(); - index_blocks(&db, genesis_values, &network_str, blocks).await?; - index_transactions(&db, blocks, &network_str).await?; - index_voter_registrations(&db, blocks, network).await?; - - match db - .refresh_last_updated( - chrono::offset::Utc::now(), - last_block.slot().try_into()?, - last_block.hash().to_vec(), - network, - machine_id, - ) - .await + index_blocks(genesis_values, &network_str, blocks).await?; + index_transactions(blocks, &network_str).await?; + index_voter_registrations(blocks, network).await?; + + match EventDB::refresh_last_updated( + chrono::offset::Utc::now(), + last_block.slot().try_into()?, + last_block.hash().to_vec(), + network, + machine_id, + ) + .await { Ok(()) => {}, Err(err) => { @@ -297,7 +293,7 @@ async fn index_many_blocks( /// Index the data from the given blocks. async fn index_blocks( - db: &EventDB, genesis_values: &GenesisValues, network_str: &str, blocks: &[MultiEraBlock<'_>], + genesis_values: &GenesisValues, network_str: &str, blocks: &[MultiEraBlock<'_>], ) -> anyhow::Result { let values: Vec<_> = blocks .iter() @@ -306,7 +302,7 @@ async fn index_blocks( }) .collect(); - db.index_many_follower_data(&values) + EventDB::index_many_follower_data(&values) .await .context("Indexing block data")?; @@ -314,24 +310,22 @@ async fn index_blocks( } /// Index transactions (and its inputs and outputs) from a slice of blocks. -async fn index_transactions( - db: &EventDB, blocks: &[MultiEraBlock<'_>], network_str: &str, -) -> anyhow::Result<()> { +async fn index_transactions(blocks: &[MultiEraBlock<'_>], network_str: &str) -> anyhow::Result<()> { let blocks_txs: Vec<_> = blocks .iter() .flat_map(|b| b.txs().into_iter().map(|tx| (b.slot(), tx))) .collect(); - index_transactions_data(db, network_str, &blocks_txs).await?; - index_transaction_outputs_data(db, &blocks_txs).await?; - index_transaction_inputs_data(db, &blocks_txs).await?; + index_transactions_data(network_str, &blocks_txs).await?; + index_transaction_outputs_data(&blocks_txs).await?; + index_transaction_inputs_data(&blocks_txs).await?; Ok(()) } /// Index transactions data. async fn index_transactions_data( - db: &EventDB, network_str: &str, blocks_txs: &[(u64, MultiEraTx<'_>)], + network_str: &str, blocks_txs: &[(u64, MultiEraTx<'_>)], ) -> anyhow::Result { let values: Vec<_> = blocks_txs .iter() @@ -344,7 +338,7 @@ async fn index_transactions_data( }) .collect::>>()?; - db.index_many_txn_data(&values) + EventDB::index_many_txn_data(&values) .await .context("Indexing transaction data")?; @@ -353,14 +347,14 @@ async fn index_transactions_data( /// Index transaction outputs data. async fn index_transaction_outputs_data( - db: &EventDB, blocks_txs: &[(u64, MultiEraTx<'_>)], + blocks_txs: &[(u64, MultiEraTx<'_>)], ) -> anyhow::Result { let values: Vec<_> = blocks_txs .iter() .flat_map(|(_, tx)| IndexedTxnOutputParams::from_txn_data(tx)) .collect(); - db.index_many_txn_output_data(&values) + EventDB::index_many_txn_output_data(&values) .await .context("Indexing transaction outputs")?; @@ -369,14 +363,14 @@ async fn index_transaction_outputs_data( /// Index transaction inputs data. async fn index_transaction_inputs_data( - db: &EventDB, blocks_txs: &[(u64, MultiEraTx<'_>)], + blocks_txs: &[(u64, MultiEraTx<'_>)], ) -> anyhow::Result { let values: Vec<_> = blocks_txs .iter() .flat_map(|(_, tx)| IndexedTxnInputParams::from_txn_data(tx)) .collect(); - db.index_many_txn_input_data(&values) + EventDB::index_many_txn_input_data(&values) .await .context("Indexing transaction inputs")?; @@ -385,7 +379,7 @@ async fn index_transaction_inputs_data( /// Index voter registrations from a slice of blocks. async fn index_voter_registrations( - db: &EventDB, blocks: &[MultiEraBlock<'_>], network: Network, + blocks: &[MultiEraBlock<'_>], network: Network, ) -> anyhow::Result { let values: Vec<_> = blocks .iter() @@ -393,7 +387,7 @@ async fn index_voter_registrations( .flatten() .collect(); - db.index_many_voter_registration_data(&values) + EventDB::index_many_voter_registration_data(&values) .await .context("Indexing voter registration")?; diff --git a/catalyst-gateway/bin/src/cli.rs b/catalyst-gateway/bin/src/cli.rs index 502b626c0f8..bb31ea5ac16 100644 --- a/catalyst-gateway/bin/src/cli.rs +++ b/catalyst-gateway/bin/src/cli.rs @@ -1,15 +1,12 @@ //! CLI interpreter for the service -use std::{io::Write, sync::Arc}; +use std::io::Write; use clap::Parser; use tracing::{error, info}; use crate::{ - cardano::start_followers, - logger, service::{self, started}, - settings::{DocsSettings, ServiceSettings}, - state::State, + settings::{DocsSettings, ServiceSettings, Settings}, }; #[derive(Parser)] @@ -35,22 +32,13 @@ impl Cli { /// - Failed to initialize the logger with the specified log level. /// - Failed to create a new `State` with the provided database URL. /// - Failed to run the service on the specified address. - pub(crate) async fn exec(self) -> anyhow::Result<()> { + pub(crate) fn exec(self) -> anyhow::Result<()> { match self { Self::Run(settings) => { - let logger_handle = logger::init(settings.log_level); - - // Unique machine id - let machine_id = settings.follower_settings.machine_uid; - - let state = Arc::new(State::new(Some(settings.database_url), logger_handle).await?); - let event_db = state.event_db(); - event_db - .modify_deep_query(settings.deep_query_inspection.into()) - .await; + Settings::init(settings)?; tokio::spawn(async move { - match service::run(&settings.docs_settings, state.clone()).await { + match service::run().await { Ok(()) => info!("Endpoints started ok"), Err(err) => { error!("Error starting endpoints {err}"); @@ -58,19 +46,19 @@ impl Cli { } }); + /* + let followers_fut = start_followers( event_db.clone(), settings.follower_settings.check_config_tick, settings.follower_settings.data_refresh_tick, machine_id, - ); + );*/ started(); - followers_fut.await?; - - Ok(()) + /*followers_fut.await?;*/ }, Self::Docs(settings) => { - let docs = service::get_app_docs(&settings); + let docs = service::get_app_docs(); match settings.output { Some(path) => { let mut docs_file = std::fs::File::create(path)?; @@ -78,8 +66,9 @@ impl Cli { }, None => println!("{docs}"), } - Ok(()) }, } + + Ok(()) } } diff --git a/catalyst-gateway/bin/src/event_db/cardano/chain_state/mod.rs b/catalyst-gateway/bin/src/event_db/cardano/chain_state/mod.rs index a7c8dc8a2ac..eb74590a64d 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/chain_state/mod.rs +++ b/catalyst-gateway/bin/src/event_db/cardano/chain_state/mod.rs @@ -6,7 +6,7 @@ use pallas::ledger::traverse::{wellknown::GenesisValues, MultiEraBlock}; use tokio_postgres::{binary_copy::BinaryCopyInWriter, types::Type}; use tracing::error; -use crate::event_db::{error::NotFoundError, EventDB}; +use crate::event_db::{error::NotFoundError, Error, EventDB, EVENT_DB_POOL}; /// Block time pub type DateTime = chrono::DateTime; @@ -58,23 +58,17 @@ impl SlotInfoQueryType { /// Get SQL query fn get_sql_query(&self) -> anyhow::Result { let tmpl_fields = match self { - SlotInfoQueryType::Previous => { - SlotInfoQueryTmplFields { - sign: "<", - ordering: Some("DESC"), - } + SlotInfoQueryType::Previous => SlotInfoQueryTmplFields { + sign: "<", + ordering: Some("DESC"), }, - SlotInfoQueryType::Current => { - SlotInfoQueryTmplFields { - sign: "=", - ordering: None, - } + SlotInfoQueryType::Current => SlotInfoQueryTmplFields { + sign: "=", + ordering: None, }, - SlotInfoQueryType::Next => { - SlotInfoQueryTmplFields { - sign: ">", - ordering: None, - } + SlotInfoQueryType::Next => SlotInfoQueryTmplFields { + sign: ">", + ordering: None, }, }; @@ -142,13 +136,14 @@ impl<'a> IndexedFollowerDataParams<'a> { impl EventDB { /// Batch writes follower data. pub(crate) async fn index_many_follower_data( - &self, values: &[IndexedFollowerDataParams<'_>], + values: &[IndexedFollowerDataParams<'_>], ) -> anyhow::Result<()> { if values.is_empty() { return Ok(()); } - let mut conn = self.pool.get().await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; let tx = conn.transaction().await?; tx.execute( @@ -161,13 +156,16 @@ impl EventDB { let sink = tx .copy_in("COPY tmp_cardano_slot_index (slot_no, network, epoch_no, block_time, block_hash) FROM STDIN BINARY") .await?; - let writer = BinaryCopyInWriter::new(sink, &[ - Type::INT8, - Type::TEXT, - Type::INT8, - Type::TIMESTAMPTZ, - Type::BYTEA, - ]); + let writer = BinaryCopyInWriter::new( + sink, + &[ + Type::INT8, + Type::TEXT, + Type::INT8, + Type::TIMESTAMPTZ, + Type::BYTEA, + ], + ); tokio::pin!(writer); for params in values { @@ -195,14 +193,13 @@ impl EventDB { /// Get slot info for the provided date-time and network and query type pub(crate) async fn get_slot_info( - &self, date_time: DateTime, network: Network, query_type: SlotInfoQueryType, + date_time: DateTime, network: Network, query_type: SlotInfoQueryType, ) -> anyhow::Result<(SlotNumber, BlockHash, DateTime)> { - let rows = self - .query(&query_type.get_sql_query()?, &[ - &network.to_string(), - &date_time, - ]) - .await?; + let rows = Self::query( + &query_type.get_sql_query()?, + &[&network.to_string(), &date_time], + ) + .await?; let row = rows.first().ok_or(NotFoundError)?; @@ -214,11 +211,9 @@ impl EventDB { /// Check when last update chain state occurred. pub(crate) async fn last_updated_state( - &self, network: Network, + network: Network, ) -> anyhow::Result<(SlotNumber, BlockHash, DateTime)> { - let rows = self - .query(SELECT_UPDATE_STATE_SQL, &[&network.to_string()]) - .await?; + let rows = Self::query(SELECT_UPDATE_STATE_SQL, &[&network.to_string()]).await?; let row = rows.first().ok_or(NotFoundError)?; @@ -232,8 +227,8 @@ impl EventDB { /// Mark point in time where the last follower finished indexing in order for future /// followers to pick up from this point pub(crate) async fn refresh_last_updated( - &self, last_updated: DateTime, slot_no: SlotNumber, block_hash: BlockHash, - network: Network, machine_id: &MachineId, + last_updated: DateTime, slot_no: SlotNumber, block_hash: BlockHash, network: Network, + machine_id: &MachineId, ) -> anyhow::Result<()> { // Rollback or update let update = true; @@ -242,16 +237,19 @@ impl EventDB { // An insert only happens once when there is no update metadata available // All future additions are just updates on ended, slot_no and block_hash - self.modify(INSERT_UPDATE_STATE_SQL, &[ - &i64::try_from(network_id)?, - &last_updated, - &last_updated, - &machine_id, - &slot_no, - &network.to_string(), - &block_hash, - &update, - ]) + Self::modify( + INSERT_UPDATE_STATE_SQL, + &[ + &i64::try_from(network_id)?, + &last_updated, + &last_updated, + &machine_id, + &slot_no, + &network.to_string(), + &block_hash, + &update, + ], + ) .await?; Ok(()) diff --git a/catalyst-gateway/bin/src/event_db/cardano/cip36_registration/mod.rs b/catalyst-gateway/bin/src/event_db/cardano/cip36_registration/mod.rs index c267790bdc9..4f6602f8044 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/cip36_registration/mod.rs +++ b/catalyst-gateway/bin/src/event_db/cardano/cip36_registration/mod.rs @@ -9,7 +9,9 @@ use crate::{ cip36_registration::{Cip36Metadata, VotingInfo}, util::valid_era, }, - event_db::{cardano::chain_state::SlotNumber, error::NotFoundError, EventDB}, + event_db::{ + cardano::chain_state::SlotNumber, error::NotFoundError, Error, EventDB, EVENT_DB_POOL, + }, }; /// Transaction id @@ -135,13 +137,14 @@ impl IndexedVoterRegistrationParams { impl EventDB { /// Batch writes voter registration data. pub(crate) async fn index_many_voter_registration_data( - &self, values: &[IndexedVoterRegistrationParams], + values: &[IndexedVoterRegistrationParams], ) -> anyhow::Result<()> { if values.is_empty() { return Ok(()); } - let mut conn = self.pool.get().await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; let tx = conn.transaction().await?; tx.execute( @@ -154,16 +157,19 @@ impl EventDB { let sink = tx .copy_in("COPY tmp_cardano_voter_registration (tx_id, stake_credential, public_voting_key, payment_address, nonce, metadata_cip36, stats, valid) FROM STDIN BINARY") .await?; - let writer = BinaryCopyInWriter::new(sink, &[ - Type::BYTEA, - Type::BYTEA, - Type::BYTEA, - Type::BYTEA, - Type::INT8, - Type::BYTEA, - Type::JSONB, - Type::BOOL, - ]); + let writer = BinaryCopyInWriter::new( + sink, + &[ + Type::BYTEA, + Type::BYTEA, + Type::BYTEA, + Type::BYTEA, + Type::INT8, + Type::BYTEA, + Type::JSONB, + Type::BOOL, + ], + ); tokio::pin!(writer); for params in values { @@ -197,15 +203,13 @@ impl EventDB { /// Get registration info pub(crate) async fn get_registration_info( - &self, stake_credential: StakeCredential, network: Network, slot_num: SlotNumber, + stake_credential: StakeCredential, network: Network, slot_num: SlotNumber, ) -> anyhow::Result<(TxId, PaymentAddress, PublicVotingInfo, Nonce)> { - let rows = self - .query(SELECT_VOTER_REGISTRATION_SQL, &[ - &stake_credential, - &network.to_string(), - &slot_num, - ]) - .await?; + let rows = Self::query( + SELECT_VOTER_REGISTRATION_SQL, + &[&stake_credential, &network.to_string(), &slot_num], + ) + .await?; let row = rows.first().ok_or(NotFoundError)?; diff --git a/catalyst-gateway/bin/src/event_db/cardano/config/mod.rs b/catalyst-gateway/bin/src/event_db/cardano/config/mod.rs index bbf7b42ff78..12590601478 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/config/mod.rs +++ b/catalyst-gateway/bin/src/event_db/cardano/config/mod.rs @@ -54,11 +54,11 @@ const SELECT_CONFIG_SQL: &str = include_str!("select_config.sql"); impl EventDB { /// Config query - pub(crate) async fn get_follower_config(&self) -> anyhow::Result> { + pub(crate) async fn get_follower_config() -> anyhow::Result> { let id = "cardano"; let id2 = "follower"; - let rows = self.query(SELECT_CONFIG_SQL, &[&id, &id2]).await?; + let rows = Self::query(SELECT_CONFIG_SQL, &[&id, &id2]).await?; let mut follower_configs = Vec::new(); for row in rows { diff --git a/catalyst-gateway/bin/src/event_db/cardano/utxo/mod.rs b/catalyst-gateway/bin/src/event_db/cardano/utxo/mod.rs index 17ab9c80a55..69cc5b214ab 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/utxo/mod.rs +++ b/catalyst-gateway/bin/src/event_db/cardano/utxo/mod.rs @@ -8,7 +8,7 @@ use tracing::error; use super::{chain_state::SlotNumber, cip36_registration::StakeCredential}; use crate::{ cardano::util::parse_policy_assets, - event_db::{error::NotFoundError, EventDB}, + event_db::{error::NotFoundError, Error, EventDB, EVENT_DB_POOL}, }; /// Stake amount. @@ -125,13 +125,14 @@ impl IndexedTxnInputParams { impl EventDB { /// Batch writes transaction output indexing data. pub(crate) async fn index_many_txn_output_data( - &self, values: &[IndexedTxnOutputParams], + values: &[IndexedTxnOutputParams], ) -> anyhow::Result<()> { if values.is_empty() { return Ok(()); } - let mut conn = self.pool.get().await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; let tx = conn.transaction().await?; tx.execute( @@ -144,13 +145,16 @@ impl EventDB { let sink = tx .copy_in("COPY tmp_cardano_utxo (tx_id, index, asset, stake_credential, value) FROM STDIN BINARY") .await?; - let writer = BinaryCopyInWriter::new(sink, &[ - Type::BYTEA, - Type::INT4, - Type::JSONB, - Type::BYTEA, - Type::INT8, - ]); + let writer = BinaryCopyInWriter::new( + sink, + &[ + Type::BYTEA, + Type::INT4, + Type::JSONB, + Type::BYTEA, + Type::INT8, + ], + ); tokio::pin!(writer); for params in values { @@ -178,13 +182,14 @@ impl EventDB { /// Batch writes transaction input indexing data. pub(crate) async fn index_many_txn_input_data( - &self, values: &[IndexedTxnInputParams], + values: &[IndexedTxnInputParams], ) -> anyhow::Result<()> { if values.is_empty() { return Ok(()); } - let mut conn = self.pool.get().await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; let tx = conn.transaction().await?; tx.execute( @@ -224,14 +229,13 @@ impl EventDB { } /// Batch writes transaction indexing data. - pub(crate) async fn index_many_txn_data( - &self, values: &[IndexedTxnParams<'_>], - ) -> anyhow::Result<()> { + pub(crate) async fn index_many_txn_data(values: &[IndexedTxnParams<'_>]) -> anyhow::Result<()> { if values.is_empty() { return Ok(()); } - let mut conn = self.pool.get().await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; let tx = conn.transaction().await?; tx.execute( @@ -270,15 +274,13 @@ impl EventDB { /// Get total utxo amount pub(crate) async fn total_utxo_amount( - &self, stake_credential: StakeCredential, network: Network, slot_num: SlotNumber, + stake_credential: StakeCredential, network: Network, slot_num: SlotNumber, ) -> anyhow::Result<(StakeAmount, SlotNumber)> { - let row = self - .query_one(SELECT_TOTAL_UTXO_AMOUNT_SQL, &[ - &stake_credential, - &network.to_string(), - &slot_num, - ]) - .await?; + let row = Self::query_one( + SELECT_TOTAL_UTXO_AMOUNT_SQL, + &[&stake_credential, &network.to_string(), &slot_num], + ) + .await?; // Aggregate functions as SUM and MAX return NULL if there are no rows, so we need to // check for it. diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/ballot.rs b/catalyst-gateway/bin/src/event_db/legacy/queries/event/ballot.rs index 87b0a5ed04f..6284ba08028 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/ballot.rs +++ b/catalyst-gateway/bin/src/event_db/legacy/queries/event/ballot.rs @@ -51,25 +51,21 @@ impl EventDB { /// Get ballot query #[allow(dead_code)] pub(crate) async fn get_ballot( - &self, event: EventId, objective: ObjectiveId, proposal: ProposalId, + event: EventId, objective: ObjectiveId, proposal: ProposalId, ) -> anyhow::Result { - let rows = self - .query(Self::BALLOT_VOTE_OPTIONS_QUERY, &[ - &event.0, - &objective.0, - &proposal.0, - ]) - .await?; + let rows = Self::query( + Self::BALLOT_VOTE_OPTIONS_QUERY, + &[&event.0, &objective.0, &proposal.0], + ) + .await?; let row = rows.first().ok_or(NotFoundError)?; let choices = row.try_get("objective")?; - let rows = self - .query(Self::BALLOT_VOTE_PLANS_QUERY, &[ - &event.0, - &objective.0, - &proposal.0, - ]) - .await?; + let rows = Self::query( + Self::BALLOT_VOTE_PLANS_QUERY, + &[&event.0, &objective.0, &proposal.0], + ) + .await?; let mut voteplans = Vec::new(); for row in rows { voteplans.push(VotePlan { @@ -94,25 +90,22 @@ impl EventDB { pub(crate) async fn get_objective_ballots( &self, event: EventId, objective: ObjectiveId, ) -> anyhow::Result> { - let rows = self - .query(Self::BALLOTS_VOTE_OPTIONS_PER_OBJECTIVE_QUERY, &[ - &event.0, - &objective.0, - ]) - .await?; + let rows = Self::query( + Self::BALLOTS_VOTE_OPTIONS_PER_OBJECTIVE_QUERY, + &[&event.0, &objective.0], + ) + .await?; let mut ballots = Vec::new(); for row in rows { let choices = row.try_get("objective")?; let proposal_id = ProposalId(row.try_get("proposal_id")?); - let rows = self - .query(Self::BALLOT_VOTE_PLANS_QUERY, &[ - &event.0, - &objective.0, - &proposal_id.0, - ]) - .await?; + let rows = Self::query( + Self::BALLOT_VOTE_PLANS_QUERY, + &[&event.0, &objective.0, &proposal_id.0], + ) + .await?; let mut voteplans = Vec::new(); for row in rows { voteplans.push(VotePlan { @@ -142,22 +135,18 @@ impl EventDB { pub(crate) async fn get_event_ballots( &self, event: EventId, ) -> anyhow::Result> { - let rows = self - .query(Self::BALLOTS_VOTE_OPTIONS_PER_EVENT_QUERY, &[&event.0]) - .await?; + let rows = Self::query(Self::BALLOTS_VOTE_OPTIONS_PER_EVENT_QUERY, &[&event.0]).await?; let mut ballots = HashMap::>::new(); for row in rows { let choices = row.try_get("objective")?; let proposal_id = ProposalId(row.try_get("proposal_id")?); let objective_id = ObjectiveId(row.try_get("objective_id")?); - let rows = self - .query(Self::BALLOT_VOTE_PLANS_QUERY, &[ - &event.0, - &objective_id.0, - &proposal_id.0, - ]) - .await?; + let rows = Self::query( + Self::BALLOT_VOTE_PLANS_QUERY, + &[&event.0, &objective_id.0, &proposal_id.0], + ) + .await?; let mut voteplans = Vec::new(); for row in rows { voteplans.push(VotePlan { @@ -185,11 +174,9 @@ impl EventDB { Ok(ballots .into_iter() - .map(|(objective_id, ballots)| { - ObjectiveBallots { - objective_id, - ballots, - } + .map(|(objective_id, ballots)| ObjectiveBallots { + objective_id, + ballots, }) .collect()) } diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/mod.rs b/catalyst-gateway/bin/src/event_db/legacy/queries/event/mod.rs index 329426a1518..03bdd4a2775 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/mod.rs +++ b/catalyst-gateway/bin/src/event_db/legacy/queries/event/mod.rs @@ -45,9 +45,7 @@ impl EventDB { pub(crate) async fn get_events( &self, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = self - .query(Self::EVENTS_QUERY, &[&limit, &offset.unwrap_or(0)]) - .await?; + let rows = Self::query(Self::EVENTS_QUERY, &[&limit, &offset.unwrap_or(0)]).await?; let mut events = Vec::new(); for row in rows { @@ -74,8 +72,8 @@ impl EventDB { /// Get event query #[allow(dead_code)] - pub(crate) async fn get_event(&self, event: EventId) -> anyhow::Result { - let rows = self.query(Self::EVENT_QUERY, &[&event.0]).await?; + pub(crate) async fn get_event(event: EventId) -> anyhow::Result { + let rows = Self::query(Self::EVENT_QUERY, &[&event.0]).await?; let row = rows.first().ok_or(NotFoundError)?; let ends = row @@ -129,7 +127,7 @@ impl EventDB { .map(|val| val.and_local_timezone(Utc).unwrap()), }; - let rows = self.query(Self::EVENT_GOALS_QUERY, &[&event.0]).await?; + let rows = Self::query(Self::EVENT_GOALS_QUERY, &[&event.0]).await?; let mut goals = Vec::new(); for row in rows { goals.push(EventGoal { diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/objective.rs b/catalyst-gateway/bin/src/event_db/legacy/queries/event/objective.rs index d702d2efb8e..ca1adf0ead0 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/objective.rs +++ b/catalyst-gateway/bin/src/event_db/legacy/queries/event/objective.rs @@ -31,15 +31,13 @@ impl EventDB { /// Get objectives query #[allow(dead_code)] pub(crate) async fn get_objectives( - &self, event: EventId, limit: Option, offset: Option, + event: EventId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = self - .query(Self::OBJECTIVES_QUERY, &[ - &event.0, - &limit, - &offset.unwrap_or(0), - ]) - .await?; + let rows = Self::query( + Self::OBJECTIVES_QUERY, + &[&event.0, &limit, &offset.unwrap_or(0)], + ) + .await?; let mut objectives = Vec::new(); for row in rows { @@ -62,7 +60,7 @@ impl EventDB { }; let mut groups = Vec::new(); - let rows = self.query(Self::VOTING_GROUPS_QUERY, &[&row_id]).await?; + let rows = Self::query(Self::VOTING_GROUPS_QUERY, &[&row_id]).await?; for row in rows { let group = row.try_get::<_, Option>("group")?.map(VoterGroupId); let voting_token: Option<_> = row.try_get("voting_token")?; diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/proposal.rs b/catalyst-gateway/bin/src/event_db/legacy/queries/event/proposal.rs index f4465a4e7a4..5212f1e7505 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/proposal.rs +++ b/catalyst-gateway/bin/src/event_db/legacy/queries/event/proposal.rs @@ -31,11 +31,10 @@ impl EventDB { /// Get proposal query #[allow(dead_code)] pub(crate) async fn get_proposal( - &self, event: EventId, objective: ObjectiveId, proposal: ProposalId, + event: EventId, objective: ObjectiveId, proposal: ProposalId, ) -> anyhow::Result { - let rows = self - .query(Self::PROPOSAL_QUERY, &[&event.0, &objective.0, &proposal.0]) - .await?; + let rows = + Self::query(Self::PROPOSAL_QUERY, &[&event.0, &objective.0, &proposal.0]).await?; let row = rows.first().ok_or(NotFoundError)?; let proposer = vec![ProposerDetails { @@ -68,14 +67,11 @@ impl EventDB { pub(crate) async fn get_proposals( &self, event: EventId, objective: ObjectiveId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = self - .query(Self::PROPOSALS_QUERY, &[ - &event.0, - &objective.0, - &limit, - &offset.unwrap_or(0), - ]) - .await?; + let rows = Self::query( + Self::PROPOSALS_QUERY, + &[&event.0, &objective.0, &limit, &offset.unwrap_or(0)], + ) + .await?; let mut proposals = Vec::new(); for row in rows { diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/review.rs b/catalyst-gateway/bin/src/event_db/legacy/queries/event/review.rs index 7faa54c32d4..49bd2ba0b88 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/review.rs +++ b/catalyst-gateway/bin/src/event_db/legacy/queries/event/review.rs @@ -38,18 +38,20 @@ impl EventDB { /// Get reviews query #[allow(dead_code)] pub(crate) async fn get_reviews( - &self, event: EventId, objective: ObjectiveId, proposal: ProposalId, limit: Option, + event: EventId, objective: ObjectiveId, proposal: ProposalId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = self - .query(Self::REVIEWS_QUERY, &[ + let rows = Self::query( + Self::REVIEWS_QUERY, + &[ &event.0, &objective.0, &proposal.0, &limit, &offset.unwrap_or(0), - ]) - .await?; + ], + ) + .await?; let mut reviews = Vec::new(); for row in rows { @@ -57,9 +59,7 @@ impl EventDB { let review_id: i32 = row.try_get("row_id")?; let mut ratings = Vec::new(); - let rows = self - .query(Self::RATINGS_PER_REVIEW_QUERY, &[&review_id]) - .await?; + let rows = Self::query(Self::RATINGS_PER_REVIEW_QUERY, &[&review_id]).await?; for row in rows { ratings.push(Rating { review_type: row.try_get("metric")?, @@ -79,14 +79,11 @@ impl EventDB { pub(crate) async fn get_review_types( &self, event: EventId, objective: ObjectiveId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = self - .query(Self::REVIEW_TYPES_QUERY, &[ - &event.0, - &objective.0, - &limit, - &offset.unwrap_or(0), - ]) - .await?; + let rows = Self::query( + Self::REVIEW_TYPES_QUERY, + &[&event.0, &objective.0, &limit, &offset.unwrap_or(0)], + ) + .await?; let mut review_types = Vec::new(); for row in rows { let map = row diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/registration.rs b/catalyst-gateway/bin/src/event_db/legacy/queries/registration.rs index 805fe73222d..30dc237aaea 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/registration.rs +++ b/catalyst-gateway/bin/src/event_db/legacy/queries/registration.rs @@ -74,14 +74,12 @@ impl EventDB { /// Get voter query #[allow(dead_code)] pub(crate) async fn get_voter( - &self, event: &Option, voting_key: String, with_delegations: bool, + event: &Option, voting_key: String, with_delegations: bool, ) -> anyhow::Result { let rows = if let Some(event) = event { - self.query(Self::VOTER_BY_EVENT_QUERY, &[&voting_key, &event.0]) - .await? + Self::query(Self::VOTER_BY_EVENT_QUERY, &[&voting_key, &event.0]).await? } else { - self.query(Self::VOTER_BY_LAST_EVENT_QUERY, &[&voting_key]) - .await? + Self::query(Self::VOTER_BY_LAST_EVENT_QUERY, &[&voting_key]).await? }; let voter = rows.first().ok_or(NotFoundError)?; @@ -89,14 +87,13 @@ impl EventDB { let voting_power = voter.try_get("voting_power")?; let rows = if let Some(event) = event { - self.query(Self::TOTAL_BY_EVENT_VOTING_QUERY, &[ - &voting_group.0, - &event.0, - ]) + Self::query( + Self::TOTAL_BY_EVENT_VOTING_QUERY, + &[&voting_group.0, &event.0], + ) .await? } else { - self.query(Self::TOTAL_BY_LAST_EVENT_VOTING_QUERY, &[&voting_group.0]) - .await? + Self::query(Self::TOTAL_BY_LAST_EVENT_VOTING_QUERY, &[&voting_group.0]).await? }; let total_voting_power_per_group: i64 = rows @@ -118,13 +115,12 @@ impl EventDB { let delegator_addresses = if with_delegations { let rows = if let Some(event) = event { - self.query(Self::VOTER_DELEGATORS_LIST_QUERY, &[&voting_key, &event.0]) - .await? + Self::query(Self::VOTER_DELEGATORS_LIST_QUERY, &[&voting_key, &event.0]).await? } else { - self.query(Self::VOTER_DELEGATORS_LIST_QUERY, &[ - &voting_key, - &voter.try_get::<_, i32>("event")?, - ]) + Self::query( + Self::VOTER_DELEGATORS_LIST_QUERY, + &[&voting_key, &voter.try_get::<_, i32>("event")?], + ) .await? }; @@ -161,28 +157,29 @@ impl EventDB { /// Get delegator query #[allow(dead_code)] pub(crate) async fn get_delegator( - &self, event: &Option, stake_public_key: String, + event: &Option, stake_public_key: String, ) -> anyhow::Result { let rows = if let Some(event) = event { - self.query(Self::DELEGATOR_SNAPSHOT_INFO_BY_EVENT_QUERY, &[&event.0]) - .await? + Self::query(Self::DELEGATOR_SNAPSHOT_INFO_BY_EVENT_QUERY, &[&event.0]).await? } else { - self.query(Self::DELEGATOR_SNAPSHOT_INFO_BY_LAST_EVENT_QUERY, &[]) - .await? + Self::query(Self::DELEGATOR_SNAPSHOT_INFO_BY_LAST_EVENT_QUERY, &[]).await? }; let delegator_snapshot_info = rows.first().ok_or(NotFoundError)?; let delegation_rows = if let Some(event) = event { - self.query(Self::DELEGATIONS_BY_EVENT_QUERY, &[ - &stake_public_key, - &event.0, - ]) + Self::query( + Self::DELEGATIONS_BY_EVENT_QUERY, + &[&stake_public_key, &event.0], + ) .await? } else { - self.query(Self::DELEGATIONS_BY_EVENT_QUERY, &[ - &stake_public_key, - &delegator_snapshot_info.try_get::<_, i32>("event")?, - ]) + Self::query( + Self::DELEGATIONS_BY_EVENT_QUERY, + &[ + &stake_public_key, + &delegator_snapshot_info.try_get::<_, i32>("event")?, + ], + ) .await? }; if delegation_rows.is_empty() { @@ -200,11 +197,9 @@ impl EventDB { } let rows = if let Some(version) = event { - self.query(Self::TOTAL_POWER_BY_EVENT_QUERY, &[&version.0]) - .await? + Self::query(Self::TOTAL_POWER_BY_EVENT_QUERY, &[&version.0]).await? } else { - self.query(Self::TOTAL_POWER_BY_LAST_EVENT_QUERY, &[]) - .await? + Self::query(Self::TOTAL_POWER_BY_LAST_EVENT_QUERY, &[]).await? }; let total_power: i64 = rows .first() @@ -214,6 +209,7 @@ impl EventDB { #[allow(clippy::indexing_slicing)] // delegation_rows already checked to be not empty. let reward_address = RewardAddress::new(delegation_rows[0].try_get("reward_address")?); + Ok(Delegator { raw_power: delegations.iter().map(|delegation| delegation.value).sum(), reward_address, diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/search.rs b/catalyst-gateway/bin/src/event_db/legacy/queries/search.rs index 4ea8d650be4..7a388c35c76 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/search.rs +++ b/catalyst-gateway/bin/src/event_db/legacy/queries/search.rs @@ -105,15 +105,14 @@ impl EventDB { /// Search for a total. async fn search_total( - &self, search_query: SearchQuery, limit: Option, offset: Option, + search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = self - .query(&Self::construct_count_query(&search_query), &[ - &limit, - &offset.unwrap_or(0), - ]) - .await - .map_err(|_| NotFoundError)?; + let rows: Vec = Self::query( + &Self::construct_count_query(&search_query), + &[&limit, &offset.unwrap_or(0)], + ) + .await + .map_err(|_| NotFoundError)?; let row = rows.first().ok_or(NotFoundError)?; Ok(SearchResult { @@ -124,15 +123,14 @@ impl EventDB { /// Search for events async fn search_events( - &self, search_query: SearchQuery, limit: Option, offset: Option, + search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = self - .query(&Self::construct_query(&search_query), &[ - &limit, - &offset.unwrap_or(0), - ]) - .await - .map_err(|_| NotFoundError)?; + let rows: Vec = Self::query( + &Self::construct_query(&search_query), + &[&limit, &offset.unwrap_or(0)], + ) + .await + .map_err(|_| NotFoundError)?; let mut events = Vec::new(); for row in rows { @@ -166,13 +164,12 @@ impl EventDB { async fn search_objectives( &self, search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = self - .query(&Self::construct_query(&search_query), &[ - &limit, - &offset.unwrap_or(0), - ]) - .await - .map_err(|_| NotFoundError)?; + let rows: Vec = Self::query( + &Self::construct_query(&search_query), + &[&limit, &offset.unwrap_or(0)], + ) + .await + .map_err(|_| NotFoundError)?; let mut objectives = Vec::new(); for row in rows { @@ -201,13 +198,12 @@ impl EventDB { async fn search_proposals( &self, search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = self - .query(&Self::construct_query(&search_query), &[ - &limit, - &offset.unwrap_or(0), - ]) - .await - .map_err(|_| NotFoundError)?; + let rows: Vec = Self::query( + &Self::construct_query(&search_query), + &[&limit, &offset.unwrap_or(0)], + ) + .await + .map_err(|_| NotFoundError)?; let mut proposals = Vec::new(); for row in rows { @@ -237,10 +233,10 @@ impl EventDB { &self, search_query: SearchQuery, total: bool, limit: Option, offset: Option, ) -> anyhow::Result { if total { - self.search_total(search_query, limit, offset).await + Self::search_total(search_query, limit, offset).await } else { match search_query.table { - SearchTable::Events => self.search_events(search_query, limit, offset).await, + SearchTable::Events => Self::search_events(search_query, limit, offset).await, SearchTable::Objectives => { self.search_objectives(search_query, limit, offset).await }, diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/fund.rs b/catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/fund.rs index 844846ae266..ce10e1ab6c7 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/fund.rs +++ b/catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/fund.rs @@ -108,8 +108,8 @@ impl EventDB { /// Get fund query // TODO(stevenj): https://github.com/input-output-hk/catalyst-voices/issues/68 #[allow(dead_code, clippy::too_many_lines)] - pub(crate) async fn get_fund(&self) -> anyhow::Result { - let rows = self.query(Self::FUND_QUERY, &[]).await?; + pub(crate) async fn get_fund() -> anyhow::Result { + let rows = Self::query(Self::FUND_QUERY, &[]).await?; let row = rows.first().ok_or(NotFoundError)?; let fund_id = row.try_get("id")?; @@ -130,7 +130,7 @@ impl EventDB { .and_local_timezone(Utc) .unwrap(); - let rows = self.query(Self::FUND_VOTE_PLANS_QUERY, &[&fund_id]).await?; + let rows = Self::query(Self::FUND_VOTE_PLANS_QUERY, &[&fund_id]).await?; let mut chain_vote_plans = Vec::new(); for row in rows { chain_vote_plans.push(Voteplan { @@ -150,7 +150,7 @@ impl EventDB { }); } - let rows = self.query(Self::FUND_CHALLENGES_QUERY, &[&fund_id]).await?; + let rows = Self::query(Self::FUND_CHALLENGES_QUERY, &[&fund_id]).await?; let mut challenges = Vec::new(); for row in rows { challenges.push(Challenge { @@ -175,7 +175,7 @@ impl EventDB { }); } - let rows = self.query(Self::FUND_GOALS_QUERY, &[&fund_id]).await?; + let rows = Self::query(Self::FUND_GOALS_QUERY, &[&fund_id]).await?; let mut goals = Vec::new(); for row in rows { goals.push(Goal { @@ -185,7 +185,7 @@ impl EventDB { }); } - let rows = self.query(Self::FUND_GROUPS_QUERY, &[&fund_id]).await?; + let rows = Self::query(Self::FUND_GROUPS_QUERY, &[&fund_id]).await?; let mut groups = Vec::new(); for row in rows { groups.push(Group { @@ -276,64 +276,62 @@ impl EventDB { }; let next = match row.try_get::<_, Option>("next_id")? { - Some(id) => { - Some(FundNextInfo { - id, - fund_name: row.try_get("next_fund_name")?, - stage_dates: FundStageDates { - insight_sharing_start: row - .try_get::<_, Option>("next_insight_sharing_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - proposal_submission_start: row - .try_get::<_, Option>("next_proposal_submission_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - refine_proposals_start: row - .try_get::<_, Option>("next_refine_proposals_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - finalize_proposals_start: row - .try_get::<_, Option>("next_finalize_proposals_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - proposal_assessment_start: row - .try_get::<_, Option>("next_proposal_assessment_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - assessment_qa_start: row - .try_get::<_, Option>("next_assessment_qa_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - snapshot_start: row - .try_get::<_, Option>("next_snapshot_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - voting_start: row - .try_get::<_, Option>("next_voting_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - voting_end: row - .try_get::<_, Option>("next_voting_end")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - tallying_end: row - .try_get::<_, Option>("next_tallying_end")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - }, - }) - }, + Some(id) => Some(FundNextInfo { + id, + fund_name: row.try_get("next_fund_name")?, + stage_dates: FundStageDates { + insight_sharing_start: row + .try_get::<_, Option>("next_insight_sharing_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + proposal_submission_start: row + .try_get::<_, Option>("next_proposal_submission_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + refine_proposals_start: row + .try_get::<_, Option>("next_refine_proposals_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + finalize_proposals_start: row + .try_get::<_, Option>("next_finalize_proposals_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + proposal_assessment_start: row + .try_get::<_, Option>("next_proposal_assessment_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + assessment_qa_start: row + .try_get::<_, Option>("next_assessment_qa_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + snapshot_start: row + .try_get::<_, Option>("next_snapshot_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + voting_start: row + .try_get::<_, Option>("next_voting_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + voting_end: row + .try_get::<_, Option>("next_voting_end")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + tallying_end: row + .try_get::<_, Option>("next_tallying_end")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + }, + }), None => None, }; diff --git a/catalyst-gateway/bin/src/event_db/mod.rs b/catalyst-gateway/bin/src/event_db/mod.rs index 9dcb83b9aa3..8b57b23834c 100644 --- a/catalyst-gateway/bin/src/event_db/mod.rs +++ b/catalyst-gateway/bin/src/event_db/mod.rs @@ -1,13 +1,19 @@ //! Catalyst Election Database crate -use std::{str::FromStr, sync::Arc}; +use std::{ + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, OnceLock, + }, +}; use bb8::Pool; use bb8_postgres::PostgresConnectionManager; -use dotenvy::dotenv; use stringzilla::StringZilla; -use tokio::sync::RwLock; use tokio_postgres::{types::ToSql, NoTls, Row}; -use tracing::{debug, debug_span, Instrument}; +use tracing::{debug, debug_span, error, Instrument}; + +use crate::settings::Settings; pub(crate) mod cardano; pub(crate) mod error; @@ -22,36 +28,17 @@ const DATABASE_URL_ENVVAR: &str = "EVENT_DB_URL"; /// Must equal the last Migrations Version Number. pub(crate) const DATABASE_SCHEMA_VERSION: i32 = 9; -#[derive(Clone, Copy, Default, Debug, PartialEq, Eq)] -/// Settings for deep query inspection -pub(crate) enum DeepQueryInspectionFlag { - /// Enable deep query inspection - Enabled, - /// Disable deep query inspection - #[default] - Disabled, -} +/// Postgres Connection Manager DB Pool +type SqlDbPool = Arc>>; -impl From for DeepQueryInspectionFlag { - fn from(b: bool) -> Self { - if b { - Self::Enabled - } else { - Self::Disabled - } - } -} +/// Postgres Connection Manager DB Pool Instance +static EVENT_DB_POOL: OnceLock = OnceLock::new(); -#[allow(unused)] -/// Connection to the Election Database -pub(crate) struct EventDB { - /// Internal database connection. DO NOT MAKE PUBLIC. - /// All database operations (queries, inserts, etc) should be constrained - /// to this crate and should be exported with a clean data access api. - pool: Pool>, - /// Deep query inspection flag. - deep_query_inspection_flag: Arc>, -} +/// Is Deep Query Analysis enabled or not? +static DEEP_QUERY_INSPECT: AtomicBool = AtomicBool::new(false); + +/// The Catalyst Event SQL Database +pub(crate) struct EventDB {} /// `EventDB` Errors #[derive(thiserror::Error, Debug, PartialEq, Eq)] @@ -65,12 +52,15 @@ pub(crate) enum Error { /// No DB URL was provided #[error("DB URL is undefined")] NoDatabaseUrl, + /// Failed to get a DB Pool + #[error("DB Pool uninitialized")] + DbPoolUninitialized, } impl EventDB { /// Determine if deep query inspection is enabled. - pub(crate) async fn is_deep_query_enabled(&self) -> bool { - *self.deep_query_inspection_flag.read().await == DeepQueryInspectionFlag::Enabled + pub(crate) fn is_deep_query_enabled() -> bool { + DEEP_QUERY_INSPECT.load(Ordering::SeqCst) } /// Modify the deep query inspection setting. @@ -78,11 +68,8 @@ impl EventDB { /// # Arguments /// /// * `deep_query` - `DeepQueryInspection` setting. - pub(crate) async fn modify_deep_query( - &self, deep_query_inspection_flag: DeepQueryInspectionFlag, - ) { - let mut flag = self.deep_query_inspection_flag.write().await; - *flag = deep_query_inspection_flag; + pub(crate) fn modify_deep_query(enable: bool) { + DEEP_QUERY_INSPECT.store(enable, Ordering::SeqCst); } /// Query the database. @@ -100,18 +87,14 @@ impl EventDB { /// `Result, anyhow::Error>` #[must_use = "ONLY use this function for SELECT type operations which return row data, otherwise use `modify()`"] pub(crate) async fn query( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], + stmt: &str, params: &[&(dyn ToSql + Sync)], ) -> Result, anyhow::Error> { - if self.is_deep_query_enabled().await { - // Check if this is a query statement - // if is_query_stmt(stmt) { - // self.explain_analyze_rollback(stmt, params).await?; - // } else { - // return Err(Error::InvalidQueryStatement.into()); - // } - self.explain_analyze_rollback(stmt, params).await?; + if Self::is_deep_query_enabled() { + Self::explain_analyze_rollback(stmt, params).await?; } - let rows = self.pool.get().await?.query(stmt, params).await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let conn = pool.get().await?; + let rows = conn.query(stmt, params).await?; Ok(rows) } @@ -127,18 +110,14 @@ impl EventDB { /// `Result` #[must_use = "ONLY use this function for SELECT type operations which return row data, otherwise use `modify()`"] pub(crate) async fn query_one( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], + stmt: &str, params: &[&(dyn ToSql + Sync)], ) -> Result { - if self.is_deep_query_enabled().await { - // Check if this is a query statement - // if is_query_stmt(stmt) { - // self.explain_analyze_rollback(stmt, params).await?; - // } else { - // return Err(Error::InvalidQueryStatement.into()); - // } - self.explain_analyze_rollback(stmt, params).await?; + if Self::is_deep_query_enabled() { + Self::explain_analyze_rollback(stmt, params).await?; } - let row = self.pool.get().await?.query_one(stmt, params).await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let conn = pool.get().await?; + let row = conn.query_one(stmt, params).await?; Ok(row) } @@ -155,33 +134,29 @@ impl EventDB { /// # Returns /// /// `anyhow::Result<()>` - pub(crate) async fn modify( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], - ) -> anyhow::Result<()> { - if self.is_deep_query_enabled().await { - // Check if this is a query statement - // if is_query_stmt(stmt) { - // return Err(Error::InvalidModifyStatement.into()); - // } - self.explain_analyze_commit(stmt, params).await?; + pub(crate) async fn modify(stmt: &str, params: &[&(dyn ToSql + Sync)]) -> anyhow::Result<()> { + if Self::is_deep_query_enabled() { + Self::explain_analyze_commit(stmt, params).await?; } else { - self.pool.get().await?.query(stmt, params).await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let conn = pool.get().await?; + conn.query(stmt, params).await?; } Ok(()) } /// Prepend `EXPLAIN ANALYZE` to the query, and rollback the transaction. async fn explain_analyze_rollback( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], + stmt: &str, params: &[&(dyn ToSql + Sync)], ) -> anyhow::Result<()> { - self.explain_analyze(stmt, params, true).await + Self::explain_analyze(stmt, params, true).await } /// Prepend `EXPLAIN ANALYZE` to the query, and commit the transaction. async fn explain_analyze_commit( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], + stmt: &str, params: &[&(dyn ToSql + Sync)], ) -> anyhow::Result<()> { - self.explain_analyze(stmt, params, false).await + Self::explain_analyze(stmt, params, false).await } /// Prepend `EXPLAIN ANALYZE` to the query. @@ -194,7 +169,7 @@ impl EventDB { /// * `params` - `&[&(dyn ToSql + Sync)]` SQL parameters. /// * `rollback` - `bool` whether to roll back the transaction or not. async fn explain_analyze( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], rollback: bool, + stmt: &str, params: &[&(dyn ToSql + Sync)], rollback: bool, ) -> anyhow::Result<()> { let span = debug_span!( "query_plan", @@ -204,7 +179,8 @@ impl EventDB { ); async move { - let mut conn = self.pool.get().await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; let transaction = conn.transaction().await?; let explain_stmt = transaction .prepare(format!("EXPLAIN ANALYZE {stmt}").as_str()) @@ -248,26 +224,26 @@ impl EventDB { /// /// The env var "`DATABASE_URL`" can be set directly as an anv var, or in a /// `.env` file. -pub(crate) async fn establish_connection(url: Option) -> anyhow::Result { - // Support env vars in a `.env` file, doesn't need to exist. - dotenv().ok(); +pub(crate) fn establish_connection() -> anyhow::Result<()> { + let (url, user, pass) = Settings::event_db_settings(); - let database_url = match url { - Some(url) => url, - // If the Database connection URL is not supplied, try and get from the env var. - None => std::env::var(DATABASE_URL_ENVVAR).map_err(|_| Error::NoDatabaseUrl)?, - }; - - let config = tokio_postgres::config::Config::from_str(&database_url)?; + let mut config = tokio_postgres::config::Config::from_str(url)?; + if let Some(user) = user { + config.user(user); + } + if let Some(pass) = pass { + config.password(pass); + } let pg_mgr = PostgresConnectionManager::new(config, tokio_postgres::NoTls); - let pool = Pool::builder().build(pg_mgr).await?; + let pool = Pool::builder().build_unchecked(pg_mgr); + + if EVENT_DB_POOL.set(Arc::new(pool)).is_err() { + error!("Failed to set event db pool. Called Twice?"); + } - Ok(EventDB { - pool, - deep_query_inspection_flag: Arc::default(), - }) + Ok(()) } /// Determine if the statement is a query statement. diff --git a/catalyst-gateway/bin/src/event_db/schema_check/mod.rs b/catalyst-gateway/bin/src/event_db/schema_check/mod.rs index 2fe81bbef9c..34029a5ba4a 100644 --- a/catalyst-gateway/bin/src/event_db/schema_check/mod.rs +++ b/catalyst-gateway/bin/src/event_db/schema_check/mod.rs @@ -19,8 +19,8 @@ impl EventDB { /// Check the schema version. /// return the current schema version if its current. /// Otherwise return an error. - pub(crate) async fn schema_version_check(&self) -> anyhow::Result { - let schema_check = self.query_one(SELECT_MAX_VERSION_SQL, &[]).await?; + pub(crate) async fn schema_version_check() -> anyhow::Result { + let schema_check = Self::query_one(SELECT_MAX_VERSION_SQL, &[]).await?; let current_ver = schema_check.try_get("max")?; diff --git a/catalyst-gateway/bin/src/logger.rs b/catalyst-gateway/bin/src/logger.rs index 4fc0abcb6b0..f746a71b16c 100644 --- a/catalyst-gateway/bin/src/logger.rs +++ b/catalyst-gateway/bin/src/logger.rs @@ -47,8 +47,11 @@ impl From for tracing::log::LevelFilter { } } +/// Handle to our Logger +pub(crate) type LoggerHandle = Handle; + /// Initialize the tracing subscriber -pub(crate) fn init(log_level: LogLevel) -> Handle { +pub(crate) fn init(log_level: LogLevel) -> LoggerHandle { // Create the formatting layer let layer = fmt::layer() .json() diff --git a/catalyst-gateway/bin/src/main.rs b/catalyst-gateway/bin/src/main.rs index 05b436d63d8..b86169cb706 100644 --- a/catalyst-gateway/bin/src/main.rs +++ b/catalyst-gateway/bin/src/main.rs @@ -8,10 +8,9 @@ mod event_db; mod logger; mod service; mod settings; -mod state; #[tokio::main] async fn main() -> anyhow::Result<()> { - cli::Cli::parse().exec().await?; + cli::Cli::parse().exec()?; Ok(()) } diff --git a/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs b/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs index bc9738e8f02..a1a5b031233 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs @@ -6,6 +6,7 @@ use crate::{ event_db::{ cardano::chain_state::{BlockHash, DateTime, SlotInfoQueryType, SlotNumber}, error::NotFoundError, + EventDB, }, service::common::{ objects::cardano::{ @@ -14,7 +15,6 @@ use crate::{ }, responses::WithErrorResponses, }, - state::State, }; /// Endpoint responses. @@ -31,41 +31,40 @@ pub(crate) type AllResponses = WithErrorResponses; /// # GET `/date_time_to_slot_number` #[allow(clippy::unused_async)] pub(crate) async fn endpoint( - state: &State, date_time: Option, network: Option, + date_time: Option, network: Option, ) -> AllResponses { - let event_db = state.event_db(); - let date_time = date_time.unwrap_or_else(chrono::Utc::now); let network = network.unwrap_or(Network::Mainnet); let (previous, current, next) = tokio::join!( - event_db.get_slot_info( + EventDB::get_slot_info( date_time, network.clone().into(), SlotInfoQueryType::Previous ), - event_db.get_slot_info( + EventDB::get_slot_info( date_time, network.clone().into(), SlotInfoQueryType::Current ), - event_db.get_slot_info(date_time, network.into(), SlotInfoQueryType::Next) + EventDB::get_slot_info(date_time, network.into(), SlotInfoQueryType::Next) ); - let process_slot_info_result = - |slot_info_result: anyhow::Result<(SlotNumber, BlockHash, DateTime)>| { - match slot_info_result { - Ok((slot_number, block_hash, block_time)) => { - Ok(Some(Slot { - slot_number, - block_hash: From::from(block_hash), - block_time, - })) - }, - Err(err) if err.is::() => Ok(None), - Err(err) => Err(err), - } - }; + let process_slot_info_result = |slot_info_result: anyhow::Result<( + SlotNumber, + BlockHash, + DateTime, + )>| { + match slot_info_result { + Ok((slot_number, block_hash, block_time)) => Ok(Some(Slot { + slot_number, + block_hash: From::from(block_hash), + block_time, + })), + Err(err) if err.is::() => Ok(None), + Err(err) => Err(err), + } + }; let current = match process_slot_info_result(current) { Ok(current) => current, diff --git a/catalyst-gateway/bin/src/service/api/cardano/mod.rs b/catalyst-gateway/bin/src/service/api/cardano/mod.rs index 727d2b1bed9..23b9de2b45d 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/mod.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/mod.rs @@ -1,8 +1,4 @@ //! Cardano API endpoints - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{ param::{Path, Query}, OpenApi, @@ -17,7 +13,6 @@ use crate::{ }, utilities::middleware::schema_validation::schema_version_validation, }, - state::State, }; mod date_time_to_slot_number_get; @@ -41,7 +36,7 @@ impl CardanoApi { /// This endpoint returns the total Cardano's staked ada amount to the corresponded /// user's stake address. async fn staked_ada_get( - &self, data: Data<&Arc>, + &self, /// The stake address of the user. /// Should a valid Bech32 encoded address followed by the https://cips.cardano.org/cip/CIP-19/#stake-addresses. stake_address: Path, @@ -59,7 +54,7 @@ impl CardanoApi { #[oai(validator(minimum(value = "0"), maximum(value = "9223372036854775807")))] slot_number: Query>, ) -> staked_ada_get::AllResponses { - staked_ada_get::endpoint(&data, stake_address.0, network.0, slot_number.0).await + staked_ada_get::endpoint(stake_address.0, network.0, slot_number.0).await } #[oai( @@ -73,7 +68,7 @@ impl CardanoApi { /// This endpoint returns the registration info followed by the [CIP-36](https://cips.cardano.org/cip/CIP-36/) to the /// corresponded user's stake address. async fn registration_get( - &self, data: Data<&Arc>, + &self, /// The stake address of the user. /// Should a valid Bech32 encoded address followed by the https://cips.cardano.org/cip/CIP-19/#stake-addresses. stake_address: Path, @@ -91,7 +86,7 @@ impl CardanoApi { #[oai(validator(minimum(value = "0"), maximum(value = "9223372036854775807")))] slot_number: Query>, ) -> registration_get::AllResponses { - registration_get::endpoint(&data, stake_address.0, network.0, slot_number.0).await + registration_get::endpoint(stake_address.0, network.0, slot_number.0).await } #[oai( @@ -104,7 +99,7 @@ impl CardanoApi { /// /// This endpoint returns the current cardano follower's sync state info. async fn sync_state_get( - &self, data: Data<&Arc>, + &self, /// Cardano network type. /// If omitted `mainnet` network type is defined. /// As `preprod` and `preview` network types in the stake address encoded as a @@ -112,7 +107,7 @@ impl CardanoApi { /// query parameter. network: Query>, ) -> sync_state_get::AllResponses { - sync_state_get::endpoint(&data, network.0).await + sync_state_get::endpoint(network.0).await } #[oai( @@ -126,7 +121,7 @@ impl CardanoApi { /// This endpoint returns the closest cardano slot info to the provided /// date-time. async fn date_time_to_slot_number_get( - &self, data: Data<&Arc>, + &self, /// The date-time for which the slot number should be calculated. /// If omitted current date time is used. date_time: Query>, @@ -137,6 +132,6 @@ impl CardanoApi { /// query parameter. network: Query>, ) -> date_time_to_slot_number_get::AllResponses { - date_time_to_slot_number_get::endpoint(&data, date_time.0, network.0).await + date_time_to_slot_number_get::endpoint(date_time.0, network.0).await } } diff --git a/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs b/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs index 86f284dfbf5..96472b75a1f 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs @@ -3,7 +3,7 @@ use poem_openapi::{payload::Json, ApiResponse}; use crate::{ - event_db::{cardano::chain_state::SlotNumber, error::NotFoundError}, + event_db::{cardano::chain_state::SlotNumber, error::NotFoundError, EventDB}, service::{ common::{ objects::cardano::{ @@ -13,7 +13,6 @@ use crate::{ }, utilities::check_network, }, - state::State, }; /// Endpoint responses @@ -33,11 +32,8 @@ pub(crate) type AllResponses = WithErrorResponses; /// # GET `/registration` pub(crate) async fn endpoint( - state: &State, stake_address: StakeAddress, provided_network: Option, - slot_num: Option, + stake_address: StakeAddress, provided_network: Option, slot_num: Option, ) -> AllResponses { - let event_db = state.event_db(); - let date_time = slot_num.unwrap_or(SlotNumber::MAX); let stake_credential = stake_address.payload().as_hash().to_vec(); let network = match check_network(stake_address.network(), provided_network) { @@ -46,19 +42,11 @@ pub(crate) async fn endpoint( }; // get the total utxo amount from the database - match event_db - .get_registration_info(stake_credential, network.into(), date_time) - .await - { - Ok((tx_id, payment_address, voting_info, nonce)) => { - Responses::Ok(Json(RegistrationInfo::new( - tx_id, - &payment_address, - voting_info, - nonce, - ))) - .into() - }, + match EventDB::get_registration_info(stake_credential, network.into(), date_time).await { + Ok((tx_id, payment_address, voting_info, nonce)) => Responses::Ok(Json( + RegistrationInfo::new(tx_id, &payment_address, voting_info, nonce), + )) + .into(), Err(err) if err.is::() => Responses::NotFound.into(), Err(err) => AllResponses::handle_error(&err), } diff --git a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs index 7e039c3f5a6..40d60c15d46 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs @@ -3,7 +3,7 @@ use poem_openapi::{payload::Json, ApiResponse}; use crate::{ - event_db::{cardano::chain_state::SlotNumber, error::NotFoundError}, + event_db::{cardano::chain_state::SlotNumber, error::NotFoundError, EventDB}, service::{ common::{ objects::cardano::{ @@ -13,7 +13,6 @@ use crate::{ }, utilities::check_network, }, - state::State, }; /// Endpoint responses. @@ -32,11 +31,8 @@ pub(crate) type AllResponses = WithErrorResponses; /// # GET `/staked_ada` pub(crate) async fn endpoint( - state: &State, stake_address: StakeAddress, provided_network: Option, - slot_num: Option, + stake_address: StakeAddress, provided_network: Option, slot_num: Option, ) -> AllResponses { - let event_db = state.event_db(); - let date_time = slot_num.unwrap_or(SlotNumber::MAX); let stake_credential = stake_address.payload().as_hash().to_vec(); @@ -46,17 +42,12 @@ pub(crate) async fn endpoint( }; // get the total utxo amount from the database - match event_db - .total_utxo_amount(stake_credential, network.into(), date_time) - .await - { - Ok((amount, slot_number)) => { - Responses::Ok(Json(StakeInfo { - amount, - slot_number, - })) - .into() - }, + match EventDB::total_utxo_amount(stake_credential, network.into(), date_time).await { + Ok((amount, slot_number)) => Responses::Ok(Json(StakeInfo { + amount, + slot_number, + })) + .into(), Err(err) if err.is::() => Responses::NotFound.into(), Err(err) => AllResponses::handle_error(&err), } diff --git a/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs b/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs index e80e2a3b60d..677a8145d90 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs @@ -3,12 +3,11 @@ use poem_openapi::{payload::Json, ApiResponse}; use crate::{ - event_db::error::NotFoundError, + event_db::{error::NotFoundError, EventDB}, service::common::{ objects::cardano::{network::Network, sync_state::SyncState}, responses::WithErrorResponses, }, - state::State, }; /// Endpoint responses. @@ -27,20 +26,16 @@ pub(crate) type AllResponses = WithErrorResponses; /// # GET `/sync_state` #[allow(clippy::unused_async)] -pub(crate) async fn endpoint(state: &State, network: Option) -> AllResponses { - let event_db = state.event_db(); - +pub(crate) async fn endpoint(network: Option) -> AllResponses { let network = network.unwrap_or(Network::Mainnet); - match event_db.last_updated_state(network.into()).await { - Ok((slot_number, block_hash, last_updated)) => { - Responses::Ok(Json(SyncState { - slot_number, - block_hash: block_hash.into(), - last_updated, - })) - .into() - }, + match EventDB::last_updated_state(network.into()).await { + Ok((slot_number, block_hash, last_updated)) => Responses::Ok(Json(SyncState { + slot_number, + block_hash: block_hash.into(), + last_updated, + })) + .into(), Err(err) if err.is::() => Responses::NotFound.into(), Err(err) => AllResponses::handle_error(&err), } diff --git a/catalyst-gateway/bin/src/service/api/health/inspection_get.rs b/catalyst-gateway/bin/src/service/api/health/inspection_get.rs index 77031ff07af..f1b11295eb6 100644 --- a/catalyst-gateway/bin/src/service/api/health/inspection_get.rs +++ b/catalyst-gateway/bin/src/service/api/health/inspection_get.rs @@ -1,12 +1,10 @@ //! Implementation of the GET /health/inspection endpoint - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{ApiResponse, Enum}; use tracing::debug; -use crate::{event_db, logger, service::common::responses::WithErrorResponses, state::State}; +use crate::{ + event_db::EventDB, logger, service::common::responses::WithErrorResponses, settings::Settings, +}; /// `LogLevel` Open API definition. #[derive(Debug, Clone, Copy, Enum)] @@ -43,11 +41,11 @@ pub(crate) enum DeepQueryInspectionFlag { Disabled, } -impl From for event_db::DeepQueryInspectionFlag { +impl From for bool { fn from(val: DeepQueryInspectionFlag) -> Self { match val { - DeepQueryInspectionFlag::Enabled => event_db::DeepQueryInspectionFlag::Enabled, - DeepQueryInspectionFlag::Disabled => event_db::DeepQueryInspectionFlag::Disabled, + DeepQueryInspectionFlag::Enabled => true, + DeepQueryInspectionFlag::Disabled => false, } } } @@ -66,20 +64,16 @@ pub(crate) type AllResponses = WithErrorResponses; /// # GET /health/inspection /// /// Inspection settings endpoint. +#[allow(clippy::unused_async)] pub(crate) async fn endpoint( - state: Data<&Arc>, log_level: Option, - query_inspection: Option, + log_level: Option, query_inspection: Option, ) -> AllResponses { if let Some(level) = log_level { - match state.modify_logger_level(level.into()) { - Ok(()) => debug!("successfully set log level to: {:?}", level), - Err(err) => return AllResponses::handle_error(&err), - } + Settings::modify_logger_level(level.into()); } if let Some(inspection_mode) = query_inspection { - let event_db = state.event_db(); - event_db.modify_deep_query(inspection_mode.into()).await; + EventDB::modify_deep_query(inspection_mode.into()); debug!( "successfully set deep query inspection mode to: {:?}", inspection_mode diff --git a/catalyst-gateway/bin/src/service/api/health/mod.rs b/catalyst-gateway/bin/src/service/api/health/mod.rs index c5b0b12a726..037d1019351 100644 --- a/catalyst-gateway/bin/src/service/api/health/mod.rs +++ b/catalyst-gateway/bin/src/service/api/health/mod.rs @@ -1,10 +1,7 @@ //! Health Endpoints -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{param::Query, OpenApi}; -use crate::{service::common::tags::ApiTags, state::State}; +use crate::service::common::tags::ApiTags; mod inspection_get; mod live_get; @@ -42,8 +39,8 @@ impl HealthApi { /// /// *This endpoint is for internal use of the service deployment infrastructure. /// It may not be exposed publicly.* - async fn ready_get(&self, state: Data<&Arc>) -> ready_get::AllResponses { - ready_get::endpoint(state).await + async fn ready_get(&self) -> ready_get::AllResponses { + ready_get::endpoint().await } #[oai(path = "/live", method = "get", operation_id = "healthLive")] @@ -66,9 +63,9 @@ impl HealthApi { )] /// Options for service inspection. async fn inspection( - &self, state: Data<&Arc>, log_level: Query>, + &self, log_level: Query>, query_inspection: Query>, ) -> inspection_get::AllResponses { - inspection_get::endpoint(state, log_level.0, query_inspection.0).await + inspection_get::endpoint(log_level.0, query_inspection.0).await } } diff --git a/catalyst-gateway/bin/src/service/api/health/ready_get.rs b/catalyst-gateway/bin/src/service/api/health/ready_get.rs index cdf1d2ebb23..50caa8fd9dd 100644 --- a/catalyst-gateway/bin/src/service/api/health/ready_get.rs +++ b/catalyst-gateway/bin/src/service/api/health/ready_get.rs @@ -1,13 +1,9 @@ //! Implementation of the GET /health/ready endpoint - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::ApiResponse; use crate::{ - event_db::schema_check::MismatchedSchemaError, service::common::responses::WithErrorResponses, - state::State, + event_db::{schema_check::MismatchedSchemaError, EventDB}, + service::common::responses::WithErrorResponses, }; /// Endpoint responses. @@ -41,8 +37,8 @@ pub(crate) type AllResponses = WithErrorResponses; /// and is not able to properly service requests while it is occurring. /// This would let the load balancer shift traffic to other instances of this /// service that are ready. -pub(crate) async fn endpoint(state: Data<&Arc>) -> AllResponses { - match state.event_db().schema_version_check().await { +pub(crate) async fn endpoint() -> AllResponses { + match EventDB::schema_version_check().await { Ok(_) => { tracing::debug!("DB schema version status ok"); Responses::NoContent.into() diff --git a/catalyst-gateway/bin/src/service/api/legacy/registration/mod.rs b/catalyst-gateway/bin/src/service/api/legacy/registration/mod.rs index 398c68c1c46..239fa942e16 100644 --- a/catalyst-gateway/bin/src/service/api/legacy/registration/mod.rs +++ b/catalyst-gateway/bin/src/service/api/legacy/registration/mod.rs @@ -1,7 +1,4 @@ //! Registration Endpoints -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{ param::{Path, Query}, payload::Json, @@ -9,19 +6,16 @@ use poem_openapi::{ ApiResponse, OpenApi, }; -use crate::{ - service::{ - common::{ - objects::legacy::{ - event_id::EventId, voter_registration::VoterRegistration, - voting_public_key::VotingPublicKey, - }, - responses::WithErrorResponses, - tags::ApiTags, +use crate::service::{ + common::{ + objects::legacy::{ + event_id::EventId, voter_registration::VoterRegistration, + voting_public_key::VotingPublicKey, }, - utilities::middleware::schema_validation::schema_version_validation, + responses::WithErrorResponses, + tags::ApiTags, }, - state::State, + utilities::middleware::schema_validation::schema_version_validation, }; /// Registration API Endpoints @@ -58,7 +52,7 @@ impl RegistrationApi { #[allow(clippy::unused_async)] #[allow(unused_variables)] async fn get_voter_info( - &self, pool: Data<&Arc>, + &self, /// A Voters Public ED25519 Key (as registered in their most recent valid /// [CIP-15](https://cips.cardano.org/cips/cip15) or [CIP-36](https://cips.cardano.org/cips/cip36) registration). #[oai(validator(max_length = 66, min_length = 66, pattern = "0x[0-9a-f]{64}"))] diff --git a/catalyst-gateway/bin/src/service/api/legacy/v0/mod.rs b/catalyst-gateway/bin/src/service/api/legacy/v0/mod.rs index 060b7b81042..1f7cadcfe11 100644 --- a/catalyst-gateway/bin/src/service/api/legacy/v0/mod.rs +++ b/catalyst-gateway/bin/src/service/api/legacy/v0/mod.rs @@ -1,15 +1,8 @@ //! `v0` Endpoints - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{payload::Binary, OpenApi}; -use crate::{ - service::{ - common::tags::ApiTags, utilities::middleware::schema_validation::schema_version_validation, - }, - state::State, +use crate::service::{ + common::tags::ApiTags, utilities::middleware::schema_validation::schema_version_validation, }; mod message_post; @@ -43,7 +36,7 @@ impl V0Api { transform = "schema_version_validation", deprecated = true )] - async fn plans_get(&self, state: Data<&Arc>) -> plans_get::AllResponses { - plans_get::endpoint(state).await + async fn plans_get(&self) -> plans_get::AllResponses { + plans_get::endpoint().await } } diff --git a/catalyst-gateway/bin/src/service/api/legacy/v0/plans_get.rs b/catalyst-gateway/bin/src/service/api/legacy/v0/plans_get.rs index 8f7c600d883..b5b5065ea46 100644 --- a/catalyst-gateway/bin/src/service/api/legacy/v0/plans_get.rs +++ b/catalyst-gateway/bin/src/service/api/legacy/v0/plans_get.rs @@ -1,14 +1,7 @@ //! Implementation of the GET /vote/active/plans endpoint - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{payload::Json, ApiResponse}; -use crate::{ - service::common::{objects::legacy::vote_plan::VotePlan, responses::WithErrorResponses}, - state::State, -}; +use crate::service::common::{objects::legacy::vote_plan::VotePlan, responses::WithErrorResponses}; /// Endpoint responses #[derive(ApiResponse)] @@ -25,6 +18,6 @@ pub(crate) type AllResponses = WithErrorResponses; /// /// Get all active vote plans endpoint. #[allow(clippy::unused_async)] -pub(crate) async fn endpoint(_state: Data<&Arc>) -> AllResponses { +pub(crate) async fn endpoint() -> AllResponses { Responses::Ok(Json(Vec::new())).into() } diff --git a/catalyst-gateway/bin/src/service/api/legacy/v1/account_votes_get.rs b/catalyst-gateway/bin/src/service/api/legacy/v1/account_votes_get.rs index 70dea8d4d24..855e84c6c06 100644 --- a/catalyst-gateway/bin/src/service/api/legacy/v1/account_votes_get.rs +++ b/catalyst-gateway/bin/src/service/api/legacy/v1/account_votes_get.rs @@ -1,16 +1,9 @@ //! Implementation of the `GET /v1/votes/plan/account-votes/:account_id` endpoint - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{param::Path, payload::Json, ApiResponse}; -use crate::{ - service::common::{ - objects::legacy::account_votes::{AccountId, AccountVote}, - responses::WithErrorResponses, - }, - state::State, +use crate::service::common::{ + objects::legacy::account_votes::{AccountId, AccountVote}, + responses::WithErrorResponses, }; /// Endpoint responses @@ -31,8 +24,6 @@ pub(crate) type AllResponses = WithErrorResponses; /// For each active vote plan, this endpoint returns an array /// with the proposal index number that the account voted for. #[allow(clippy::unused_async)] -pub(crate) async fn endpoint( - _state: Data<&Arc>, _account_id: Path, -) -> AllResponses { +pub(crate) async fn endpoint(_account_id: Path) -> AllResponses { Responses::Ok(Json(Vec::new())).into() } diff --git a/catalyst-gateway/bin/src/service/api/legacy/v1/mod.rs b/catalyst-gateway/bin/src/service/api/legacy/v1/mod.rs index 692ec585362..2fa186e035d 100644 --- a/catalyst-gateway/bin/src/service/api/legacy/v1/mod.rs +++ b/catalyst-gateway/bin/src/service/api/legacy/v1/mod.rs @@ -1,26 +1,19 @@ //! `v1` Endpoints - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{ param::{Path, Query}, payload::Json, OpenApi, }; -use crate::{ - service::{ - common::{ - objects::legacy::{ - account_votes::AccountId, fragments_batch::FragmentsBatch, - fragments_processing_summary::FragmentId, - }, - tags::ApiTags, +use crate::service::{ + common::{ + objects::legacy::{ + account_votes::AccountId, fragments_batch::FragmentsBatch, + fragments_processing_summary::FragmentId, }, - utilities::middleware::schema_validation::schema_version_validation, + tags::ApiTags, }, - state::State, + utilities::middleware::schema_validation::schema_version_validation, }; mod account_votes_get; @@ -45,11 +38,10 @@ impl V1Api { /// Get from all active vote plans, the index of the voted proposals /// by the given account ID. async fn get_account_votes( - &self, state: Data<&Arc>, - /// A account ID to get the votes for. + &self, /// A account ID to get the votes for. account_id: Path, ) -> account_votes_get::AllResponses { - account_votes_get::endpoint(state, account_id).await + account_votes_get::endpoint(account_id).await } /// Process fragments diff --git a/catalyst-gateway/bin/src/service/api/mod.rs b/catalyst-gateway/bin/src/service/api/mod.rs index 0af519e0481..a3ce6555162 100644 --- a/catalyst-gateway/bin/src/service/api/mod.rs +++ b/catalyst-gateway/bin/src/service/api/mod.rs @@ -11,7 +11,7 @@ use local_ip_address::list_afinet_netifas; use poem_openapi::{ContactObject, LicenseObject, OpenApiService, ServerObject}; use self::cardano::CardanoApi; -use crate::settings::{DocsSettings, API_URL_PREFIX}; +use crate::settings::Settings; mod cardano; mod health; @@ -58,9 +58,7 @@ const TERMS_OF_SERVICE: &str = "https://github.com/input-output-hk/catalyst-voices/blob/main/CODE_OF_CONDUCT.md"; /// Create the `OpenAPI` definition -pub(crate) fn mk_api( - hosts: Vec, settings: &DocsSettings, -) -> OpenApiService<(HealthApi, CardanoApi, LegacyApi), ()> { +pub(crate) fn mk_api() -> OpenApiService<(HealthApi, CardanoApi, LegacyApi), ()> { let mut service = OpenApiService::new( ( HealthApi, @@ -74,25 +72,24 @@ pub(crate) fn mk_api( .description(API_DESCRIPTION) .license(get_api_license()) .terms_of_service(TERMS_OF_SERVICE) - .url_prefix(API_URL_PREFIX.as_str()); + .url_prefix(Settings::api_url_prefix()); - // Retrieve the port from the socket address - let port = settings.address.port().to_string(); - - let server_name = &settings.server_name; + let hosts = Settings::api_host_names(); for host in hosts { service = service.server(ServerObject::new(host).description("API Host")); } // Add server name if it is set - if let Some(name) = server_name { + if let Some(name) = Settings::server_name() { service = service.server(ServerObject::new(name).description("Server at server name")); } + let port = Settings::bound_address().port(); + // Get localhost name if let Ok(hostname) = gethostname().into_string() { - let hostname_address = format!("http://{hostname}:{port}"); + let hostname_address = format!("http://{hostname}:{port}",); service = service .server(ServerObject::new(hostname_address).description("Server at localhost name")); } @@ -102,18 +99,14 @@ pub(crate) fn mk_api( for (name, ip) in &network_interfaces { if *name == "en0" { let (address, desc) = match ip { - IpAddr::V4(_) => { - ( - format!("http://{ip}:{port}"), - "Server at local IPv4 address", - ) - }, - IpAddr::V6(_) => { - ( - format!("http://[{ip}]:{port}"), - "Server at local IPv6 address", - ) - }, + IpAddr::V4(_) => ( + format!("http://{ip}:{port}"), + "Server at local IPv4 address", + ), + IpAddr::V6(_) => ( + format!("http://[{ip}]:{port}"), + "Server at local IPv6 address", + ), }; service = service.server(ServerObject::new(address).description(desc)); } diff --git a/catalyst-gateway/bin/src/service/common/objects/server_error.rs b/catalyst-gateway/bin/src/service/common/objects/server_error.rs index 980f4329c62..b8499ea30c0 100644 --- a/catalyst-gateway/bin/src/service/common/objects/server_error.rs +++ b/catalyst-gateway/bin/src/service/common/objects/server_error.rs @@ -6,7 +6,7 @@ use uuid::Uuid; /// While using macro-vis lib, you will get the `uncommon_codepoints` warning, so you will /// probably want to place this in your crate root -use crate::settings::generate_github_issue_url; +use crate::settings::Settings; #[derive(Debug, Object)] #[oai(example, skip_serializing_if_is_none)] @@ -33,7 +33,7 @@ impl ServerError { ); let id = Uuid::new_v4(); let issue_title = format!("Internal Server Error - {id}"); - let issue = generate_github_issue_url(&issue_title); + let issue = Settings::generate_github_issue_url(&issue_title); Self { id, msg, issue } } diff --git a/catalyst-gateway/bin/src/service/mod.rs b/catalyst-gateway/bin/src/service/mod.rs index 30aa724ffca..99f1fd4efd6 100644 --- a/catalyst-gateway/bin/src/service/mod.rs +++ b/catalyst-gateway/bin/src/service/mod.rs @@ -1,7 +1,4 @@ //! Main entrypoint to the service -use std::sync::Arc; - -use crate::{settings::DocsSettings, state::State}; // These Modules contain endpoints mod api; @@ -9,7 +6,7 @@ mod docs; // These modules are utility or common types/functions mod common; mod poem_service; -mod utilities; +pub(crate) mod utilities; pub(crate) use api::started; pub(crate) use poem_service::get_app_docs; @@ -28,6 +25,6 @@ pub(crate) use poem_service::get_app_docs; /// `Error::CannotRunService` - cannot run the service /// `Error::EventDbError` - cannot connect to the event db /// `Error::IoError` - An IO error has occurred. -pub(crate) async fn run(settings: &DocsSettings, state: Arc) -> anyhow::Result<()> { - poem_service::run(settings, state).await +pub(crate) async fn run() -> anyhow::Result<()> { + poem_service::run().await } diff --git a/catalyst-gateway/bin/src/service/poem_service.rs b/catalyst-gateway/bin/src/service/poem_service.rs index 41067f31aa6..662bead8e17 100644 --- a/catalyst-gateway/bin/src/service/poem_service.rs +++ b/catalyst-gateway/bin/src/service/poem_service.rs @@ -2,8 +2,6 @@ //! //! This provides only the primary entrypoint to the service. -use std::sync::Arc; - use poem::{ endpoint::PrometheusExporter, listener::TcpListener, @@ -21,27 +19,24 @@ use crate::{ middleware::tracing_mw::{init_prometheus, Tracing}, }, }, - settings::{get_api_host_names, DocsSettings, API_URL_PREFIX}, - state::State, + settings::Settings, }; /// This exists to allow us to add extra routes to the service for testing purposes. -fn mk_app( - hosts: Vec, base_route: Option, state: &Arc, settings: &DocsSettings, -) -> impl Endpoint { +fn mk_app(base_route: Option) -> impl Endpoint { // Get the base route if defined, or a new route if not. let base_route = match base_route { Some(route) => route, None => Route::new(), }; - let api_service = mk_api(hosts, settings); + let api_service = mk_api(); let docs = docs(&api_service); let prometheus_registry = init_prometheus(); base_route - .nest(API_URL_PREFIX.as_str(), api_service) + .nest(Settings::api_url_prefix(), api_service) .nest("/docs", docs) .nest("/metrics", PrometheusExporter::new(prometheus_registry)) .nest("/favicon.ico", favicon()) @@ -49,12 +44,11 @@ fn mk_app( .with(Compression::new().with_quality(CompressionLevel::Fastest)) .with(CatchPanic::new().with_handler(ServicePanicHandler)) .with(Tracing) - .data(state.clone()) } /// Get the API docs as a string in the JSON format. -pub(crate) fn get_app_docs(setting: &DocsSettings) -> String { - let api_service = mk_api(vec![], setting); +pub(crate) fn get_app_docs() -> String { + let api_service = mk_api(); api_service.spec() } @@ -71,11 +65,10 @@ pub(crate) fn get_app_docs(setting: &DocsSettings) -> String { /// * `Error::CannotRunService` - cannot run the service /// * `Error::EventDbError` - cannot connect to the event db /// * `Error::IoError` - An IO error has occurred. -pub(crate) async fn run(settings: &DocsSettings, state: Arc) -> anyhow::Result<()> { +pub(crate) async fn run() -> anyhow::Result<()> { // The address to listen on - let addr = settings.address; - tracing::info!("Starting Poem Service ..."); - tracing::info!("Listening on {addr}"); + tracing::info!("Starting Cat-Gateway Service ..."); + tracing::info!("Listening on {}", Settings::bound_address()); // Set a custom panic hook, so we can catch panics and not crash the service. // And also get data from the panic so we can log it. @@ -83,11 +76,13 @@ pub(crate) async fn run(settings: &DocsSettings, state: Arc) -> anyhow::R // help find them in the logs if they happen in production. set_panic_hook(); - let hosts = get_api_host_names(&addr); - - let app = mk_app(hosts, None, &state, settings); + let app = mk_app(None); - Ok(poem::Server::new(TcpListener::bind(addr)).run(app).await?) + Ok( + poem::Server::new(TcpListener::bind(Settings::bound_address())) + .run(app) + .await?, + ) } #[cfg(test)] diff --git a/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs b/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs index 8279731ef1f..8a55738f95f 100644 --- a/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs +++ b/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs @@ -7,11 +7,9 @@ //! This middleware checks the `State.schema_version_status` value, if it is Ok, //! the wrapped endpoint is called and its response is returned. -use std::sync::Arc; +use poem::{http::StatusCode, Endpoint, EndpointExt, Middleware, Request, Result}; -use poem::{http::StatusCode, web::Data, Endpoint, EndpointExt, Middleware, Request, Result}; - -use crate::state::State; +use crate::event_db::EventDB; /// A middleware that raises an error with `ServiceUnavailable` and 503 status code /// if a DB schema version mismatch is found the existing `State`. @@ -35,12 +33,10 @@ impl Endpoint for SchemaVersionValidationImpl { type Output = E::Output; async fn call(&self, req: Request) -> Result { - if let Some(state) = req.data::>>() { - // Check if the inner schema version status is set to `Mismatch`, - // if so, return the `StatusCode::SERVICE_UNAVAILABLE` code. - if state.event_db().schema_version_check().await.is_err() { - return Err(StatusCode::SERVICE_UNAVAILABLE.into()); - } + // Check if the inner schema version status is set to `Mismatch`, + // if so, return the `StatusCode::SERVICE_UNAVAILABLE` code. + if EventDB::schema_version_check().await.is_err() { + return Err(StatusCode::SERVICE_UNAVAILABLE.into()); } // Calls the endpoint with the request, and returns the response. self.ep.call(req).await diff --git a/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs b/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs index b31d140ddd0..5710de17904 100644 --- a/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs +++ b/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs @@ -18,7 +18,7 @@ use tracing::{error, field, Instrument, Level, Span}; use ulid::Ulid; use uuid::Uuid; -use crate::settings::CLIENT_ID_KEY; +use crate::settings::Settings; /// Labels for the metrics const METRIC_LABELS: [&str; 3] = ["endpoint", "method", "status_code"]; @@ -117,6 +117,24 @@ pub(crate) struct TracingEndpoint { inner: E, } +/// Given a Clients IP Address, return the anonymized version of it. +fn anonymize_ip_address(remote_addr: &str) -> String { + // We are going to represent it as a UUID. + let mut b2b = Blake2b::new_keyed(16, Settings::client_id_key().as_bytes()); + let mut out = [0; 16]; + + b2b.input_str(Settings::client_id_key()); + b2b.input_str(remote_addr); + b2b.result(&mut out); + + uuid::Builder::from_bytes(out) + .with_version(uuid::Version::Random) + .with_variant(uuid::Variant::RFC4122) + .into_uuid() + .hyphenated() + .to_string() +} + /// Get an anonymized client ID from the request. /// /// This simply takes the clients IP address, @@ -125,31 +143,13 @@ pub(crate) struct TracingEndpoint { /// The Hash is unique per client IP, but not able to /// be reversed or analyzed without both the client IP and the key. async fn anonymous_client_id(req: &Request) -> String { - let mut b2b = Blake2b::new(16); // We are going to represent it as a UUID. - let mut out = [0; 16]; - let remote_addr = RealIp::from_request_without_body(req) .await .ok() .and_then(|real_ip| real_ip.0) .map_or_else(|| req.remote_addr().to_string(), |addr| addr.to_string()); - b2b.input_str(CLIENT_ID_KEY.as_str()); - b2b.input_str(&remote_addr); - b2b.result(&mut out); - - // Note: This will only panic if the `out` is not 16 bytes long. - // Which it is. - // Therefore the `unwrap()` is safe and will not cause a panic here under any - // circumstances. - #[allow(clippy::unwrap_used)] - uuid::Builder::from_slice(&out) - .unwrap() - .with_version(uuid::Version::Random) - .with_variant(uuid::Variant::RFC4122) - .into_uuid() - .hyphenated() - .to_string() + anonymize_ip_address(&remote_addr) } /// Data we collected about the response diff --git a/catalyst-gateway/bin/src/service/utilities/mod.rs b/catalyst-gateway/bin/src/service/utilities/mod.rs index 7765b0064b9..a4d415d7791 100644 --- a/catalyst-gateway/bin/src/service/utilities/mod.rs +++ b/catalyst-gateway/bin/src/service/utilities/mod.rs @@ -1,6 +1,7 @@ //! `API` Utility operations pub(crate) mod catch_panic; pub(crate) mod middleware; +pub(crate) mod net; use pallas::ledger::addresses::Network as PallasNetwork; use poem_openapi::types::ToJSON; diff --git a/catalyst-gateway/bin/src/service/utilities/net.rs b/catalyst-gateway/bin/src/service/utilities/net.rs new file mode 100644 index 00000000000..51ace89d367 --- /dev/null +++ b/catalyst-gateway/bin/src/service/utilities/net.rs @@ -0,0 +1,42 @@ +//! Networking utility functions. +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, UdpSocket}; + +use tracing::error; + +/// Get the public IPv4 Address of the Service. +/// +/// In the unlikely event this fails, the address will be 0.0.0.0 +pub(crate) fn get_public_ipv4() -> IpAddr { + if let Ok(socket) = UdpSocket::bind("0.0.0.0:0") { + // Note: UDP is connection-less, we don't actually connect to google here. + if let Err(error) = socket.connect("8.8.8.8:53") { + error!("Failed to connect IPv4 to Google DNS : {}", error); + } else if let Ok(local_addr) = socket.local_addr() { + return local_addr.ip(); + } else { + error!("Failed to get local address"); + } + } else { + error!("Failed to bind IPv4 Address"); + } + IpAddr::V4(Ipv4Addr::from([0, 0, 0, 0])) +} + +/// Get the public IPv4 Address of the Service. +/// +/// In the unlikely event this fails, the address will be `::` +pub(crate) fn get_public_ipv6() -> IpAddr { + if let Ok(socket) = UdpSocket::bind("[::]:0") { + // Note: UDP is connection-less, we don't actually connect to google here. + if let Err(error) = socket.connect("[2001:4860:4860::8888]:53") { + error!("Failed to connect IPv6 to Google DNS : {}", error); + } else if let Ok(local_addr) = socket.local_addr() { + return local_addr.ip(); + } else { + error!("Failed to get local IPv6 address"); + } + } else { + error!("Failed to bind IPv6 Address"); + } + IpAddr::V6(Ipv6Addr::from(0)) +} diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 0d566bada8e..48a6576c7cf 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -1,17 +1,26 @@ //! Command line and environment variable settings for the service use std::{ env, - net::{IpAddr, SocketAddr}, + net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, + sync::OnceLock, + time::Duration, }; use clap::Args; +use cryptoxide::{blake2b::Blake2b, mac::Mac}; use dotenvy::dotenv; -use lazy_static::lazy_static; -use tracing::log::error; +use duration_string::DurationString; +use once_cell::sync::Lazy; +use tracing::{level_filters::LevelFilter, log::error}; use url::Url; -use crate::logger::{LogLevel, LOG_LEVEL_DEFAULT}; +use crate::{ + event_db, + logger::{self, LogLevel, LoggerHandle, LOG_LEVEL_DEFAULT}, +}; + +use crate::service::utilities::net::{get_public_ipv4, get_public_ipv6}; /// Default address to start service on. const ADDRESS_DEFAULT: &str = "0.0.0.0:3030"; @@ -36,14 +45,37 @@ const API_HOST_NAMES_DEFAULT: &str = "https://api.prod.projectcatalyst.io"; const API_URL_PREFIX_DEFAULT: &str = "/api"; /// Default `CHECK_CONFIG_TICK` used in development. -const CHECK_CONFIG_TICK_DEFAULT: &str = "5"; +const CHECK_CONFIG_TICK_DEFAULT: &str = "5s"; /// Default `DATA_REFRESH_TICK` used in development -const DATA_REFRESH_TICK_DEFAULT: &str = "5"; +const DATA_REFRESH_TICK_DEFAULT: &str = "5s"; /// Default `MACHINE_UID` used in development const MACHINE_UID_DEFAULT: &str = "UID"; +/// Default Event DB URL. +const EVENT_DB_URL_DEFAULT: &str = + "postgresql://postgres:postgres@localhost/catalyst_events?sslmode=disable"; + +/// Hash the Public IPv4 and IPv6 address of the machine, and convert to a 128 bit V4 UUID. +fn calculate_service_uuid() -> String { + let mut hasher = Blake2b::new_keyed(16, "Catalyst-Gateway-Machine-UID".as_bytes()); + + let ipv4 = get_public_ipv4().to_canonical().to_string(); + let ipv6 = get_public_ipv6().to_canonical().to_string(); + + hasher.input(ipv4.as_bytes()); + hasher.input(ipv6.as_bytes()); + + let mut hash = [0u8; 16]; + + hasher.raw_result(&mut hash); + uuid::Builder::from_custom_bytes(hash) + .into_uuid() + .hyphenated() + .to_string() +} + /// Settings for the application. /// /// This struct represents the configuration settings for the application. @@ -132,11 +164,16 @@ impl StringEnvVar { /// assert_eq!(var.as_str(), "default"); /// ``` fn new(var_name: &str, default_value: &str) -> Self { - dotenv().ok(); let value = env::var(var_name).unwrap_or_else(|_| default_value.to_owned()); Self(value) } + /// New Env Var that is optional. + fn new_optional(var_name: &str) -> Option { + let value = env::var(var_name).ok()?; + Some(Self(value)) + } + /// Get the read env var as a str. /// /// # Returns @@ -145,36 +182,240 @@ impl StringEnvVar { pub(crate) fn as_str(&self) -> &str { &self.0 } + + /// Get the read env var as a str. + /// + /// # Returns + /// + /// * &str - the value + pub(crate) fn as_string(&self) -> String { + self.0.clone() + } } -// Lazy initialization of all env vars which are not command line parameters. -// All env vars used by the application should be listed here and all should have a -// default. The default for all NON Secret values should be suitable for Production, and -// NOT development. Secrets however should only be used with the default value in -// development. -lazy_static! { +/// All the `EnvVars` used by the service. +struct EnvVars { /// The github repo owner - pub(crate) static ref GITHUB_REPO_OWNER: StringEnvVar = StringEnvVar::new("GITHUB_REPO_OWNER", GITHUB_REPO_OWNER_DEFAULT); + github_repo_owner: StringEnvVar, /// The github repo name - pub(crate) static ref GITHUB_REPO_NAME: StringEnvVar = StringEnvVar::new("GITHUB_REPO_NAME", GITHUB_REPO_NAME_DEFAULT); + github_repo_name: StringEnvVar, /// The github issue template to use - pub(crate) static ref GITHUB_ISSUE_TEMPLATE: StringEnvVar = StringEnvVar::new("GITHUB_ISSUE_TEMPLATE", GITHUB_ISSUE_TEMPLATE_DEFAULT); + github_issue_template: StringEnvVar, + + /// The Service ID used to anonymize client connections. + service_id: StringEnvVar, /// The client id key used to anonymize client connections. - pub(crate) static ref CLIENT_ID_KEY: StringEnvVar = StringEnvVar::new("CLIENT_ID_KEY", CLIENT_ID_KEY_DEFAULT); + client_id_key: StringEnvVar, - /// A List of servers to provideThe client id key used to anonymize client connections. - pub(crate) static ref API_HOST_NAMES: StringEnvVar = StringEnvVar::new("API_HOST_NAMES", API_HOST_NAMES_DEFAULT); + /// A List of servers to provide + api_host_names: StringEnvVar, /// The base path the API is served at. - pub(crate) static ref API_URL_PREFIX: StringEnvVar = StringEnvVar::new("API_URL_PREFIX", API_URL_PREFIX_DEFAULT); + api_url_prefix: StringEnvVar, + + /// The Address of the Event DB. + event_db_url: StringEnvVar, + + /// The UserName to use for the Event DB. + event_db_username: Option, + + /// The Address of the Event DB. + event_db_password: Option, /// Tick every N seconds until config exists in db - pub(crate) static ref CHECK_CONFIG_TICK: StringEnvVar = StringEnvVar::new("CHECK_CONFIG_TICK", CHECK_CONFIG_TICK_DEFAULT); + check_config_tick: Duration, +} + +// Lazy initialization of all env vars which are not command line parameters. +// All env vars used by the application should be listed here and all should have a +// default. The default for all NON Secret values should be suitable for Production, and +// NOT development. Secrets however should only be used with the default value in +// development + +/// Handle to the mithril sync thread. One for each Network ONLY. +static ENV_VARS: Lazy = Lazy::new(|| { + // Support env vars in a `.env` file, doesn't need to exist. + dotenv().ok(); + + let check_interval = StringEnvVar::new("CHECK_CONFIG_TICK", CHECK_CONFIG_TICK_DEFAULT); + let check_config_tick = match DurationString::try_from(check_interval.as_string()) { + Ok(duration) => duration.into(), + Err(error) => { + error!( + "Invalid Check Config Tick Duration: {} : {}. Defaulting to 5 seconds.", + check_interval.as_str(), + error + ); + Duration::from_secs(5) + }, + }; + + EnvVars { + github_repo_owner: StringEnvVar::new("GITHUB_REPO_OWNER", GITHUB_REPO_OWNER_DEFAULT), + github_repo_name: StringEnvVar::new("GITHUB_REPO_NAME", GITHUB_REPO_NAME_DEFAULT), + github_issue_template: StringEnvVar::new( + "GITHUB_ISSUE_TEMPLATE", + GITHUB_ISSUE_TEMPLATE_DEFAULT, + ), + service_id: StringEnvVar::new("SERVICE_ID", &calculate_service_uuid()), + client_id_key: StringEnvVar::new("CLIENT_ID_KEY", CLIENT_ID_KEY_DEFAULT), + api_host_names: StringEnvVar::new("API_HOST_NAMES", API_HOST_NAMES_DEFAULT), + api_url_prefix: StringEnvVar::new("API_URL_PREFIX", API_URL_PREFIX_DEFAULT), + event_db_url: StringEnvVar::new("EVENT_DB_URL", EVENT_DB_URL_DEFAULT), + event_db_username: StringEnvVar::new_optional("EVENT_DB_USERNAME"), + event_db_password: StringEnvVar::new_optional("EVENT_DB_PASSWORD"), + check_config_tick, + } +}); + +/// All Settings/Options for the Service. +static SERVICE_SETTINGS: OnceLock = OnceLock::new(); + +/// Logger Handle for the Service. +static LOGGER_HANDLE: OnceLock = OnceLock::new(); + +/// Our Global Settings for this running service. +pub(crate) struct Settings(); + +impl Settings { + /// Initialize the settings data. + pub(crate) fn init(settings: ServiceSettings) -> anyhow::Result<()> { + if LOGGER_HANDLE.set(logger::init(settings.log_level)).is_err() { + error!("Failed to initialize logger handle. Called multiple times?"); + } + if SERVICE_SETTINGS.set(settings).is_err() { + error!("Failed to initialize service settings. Called multiple times?"); + } + event_db::establish_connection() + } + + /// Get the current Event DB settings for this service. + pub(crate) fn event_db_settings() -> (&'static str, Option<&'static str>, Option<&'static str>) + { + let url = ENV_VARS.event_db_url.as_str(); + let user = ENV_VARS + .event_db_username + .as_ref() + .map(StringEnvVar::as_str); + let pass = ENV_VARS + .event_db_password + .as_ref() + .map(StringEnvVar::as_str); + + (url, user, pass) + } + + /// The API Url prefix + pub(crate) fn api_url_prefix() -> &'static str { + ENV_VARS.api_url_prefix.as_str() + } + + /// The Key used to anonymize client connections in the logs. + pub(crate) fn client_id_key() -> &'static str { + ENV_VARS.client_id_key.as_str() + } + + /// The Service UUID + pub(crate) fn service_id() -> &'static str { + ENV_VARS.service_id.as_str() + } + + /// Get a list of all host names to serve the API on. + /// + /// Used by the `OpenAPI` Documentation to point to the correct backend. + /// Take a list of [scheme://] + host names from the env var and turns it into + /// a lits of strings. + /// + /// Host names are taken from the `API_HOST_NAMES` environment variable. + /// If that is not set, `addr` is used. + pub(crate) fn api_host_names() -> Vec { + if let Some(settings) = SERVICE_SETTINGS.get() { + let addr = settings.docs_settings.address; + string_to_api_host_names(&addr, ENV_VARS.api_host_names.as_str()) + } else { + Vec::new() + } + } + + /// The socket address we are bound to. + pub(crate) fn bound_address() -> SocketAddr { + if let Some(settings) = SERVICE_SETTINGS.get() { + settings.docs_settings.address + } else { + // This should never happen, needed to satisfy the compiler. + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + } + + /// Get the server name to be used in the `Server` object of the `OpenAPI` Document. + pub(crate) fn server_name() -> Option { + if let Some(settings) = SERVICE_SETTINGS.get() { + settings.docs_settings.server_name.clone() + } else { + None + } + } + + /// Modify the logger level setting. + /// This will reload the logger. + pub(crate) fn modify_logger_level(level: LogLevel) { + if let Some(logger_handle) = LOGGER_HANDLE.get() { + if let Err(error) = logger_handle.modify(|f| *f = LevelFilter::from_level(level.into())) + { + error!("Failed to modify log level to {:?} : {}", level, error); + } + } else { + // This should never happen. + error!( + "Failed to modify log level to {:?} : Logger handle not available.", + level + ); + } + } + /// Generate a github issue url with a given title + /// + /// ## Arguments + /// + /// * `title`: &str - the title to give the issue + /// + /// ## Returns + /// + /// * String - the url + /// + /// ## Example + /// + /// ```rust,no_run + /// # use cat_data_service::settings::generate_github_issue_url; + /// assert_eq!( + /// generate_github_issue_url("Hello, World! How are you?"), + /// "https://github.com/input-output-hk/catalyst-voices/issues/new?template=bug_report.yml&title=Hello%2C%20World%21%20How%20are%20you%3F" + /// ); + /// ``` + pub(crate) fn generate_github_issue_url(title: &str) -> Option { + let path = format!( + "https://github.com/{}/{}/issues/new", + ENV_VARS.github_repo_owner.as_str(), + ENV_VARS.github_repo_name.as_str() + ); + match Url::parse_with_params( + &path, + &[ + ("template", ENV_VARS.github_issue_template.as_str()), + ("title", title), + ], + ) { + Ok(url) => Some(url), + Err(e) => { + error!("Failed to generate github issue url {:?}", e.to_string()); + None + }, + } + } } /// Transform a string list of host names into a vec of host names. @@ -242,80 +483,26 @@ fn string_to_api_host_names(addr: &SocketAddr, hosts: &str) -> Vec { } } -/// Get a list of all host names to serve the API on. -/// -/// Used by the `OpenAPI` Documentation to point to the correct backend. -/// Take a list of [scheme://] + host names from the env var and turns it into -/// a lits of strings. -/// -/// Host names are taken from the `API_HOST_NAMES` environment variable. -/// If that is not set, `addr` is used. -pub(crate) fn get_api_host_names(addr: &SocketAddr) -> Vec { - string_to_api_host_names(addr, API_HOST_NAMES.as_str()) -} - -/// Generate a github issue url with a given title -/// -/// ## Arguments -/// -/// * `title`: &str - the title to give the issue -/// -/// ## Returns -/// -/// * String - the url -/// -/// ## Example -/// -/// ```rust,no_run -/// # use cat_data_service::settings::generate_github_issue_url; -/// assert_eq!( -/// generate_github_issue_url("Hello, World! How are you?"), -/// "https://github.com/input-output-hk/catalyst-voices/issues/new?template=bug_report.yml&title=Hello%2C%20World%21%20How%20are%20you%3F" -/// ); -/// ``` -pub(crate) fn generate_github_issue_url(title: &str) -> Option { - let path = format!( - "https://github.com/{}/{}/issues/new", - GITHUB_REPO_OWNER.as_str(), - GITHUB_REPO_NAME.as_str() - ); - - match Url::parse_with_params(&path, &[ - ("template", GITHUB_ISSUE_TEMPLATE.as_str()), - ("title", title), - ]) { - Ok(url) => Some(url), - Err(e) => { - error!("Failed to generate github issue url {:?}", e.to_string()); - None - }, - } -} - #[cfg(test)] mod tests { use super::*; - #[test] - fn github_repo_name_default() { - assert_eq!(GITHUB_REPO_NAME.as_str(), GITHUB_REPO_NAME_DEFAULT); - } - #[test] fn generate_github_issue_url_test() { let title = "Hello, World! How are you?"; assert_eq!( - generate_github_issue_url(title).expect("Failed to generate url").as_str(), + Settings::generate_github_issue_url(title).expect("Failed to generate url").as_str(), "https://github.com/input-output-hk/catalyst-voices/issues/new?template=bug_report.yml&title=Hello%2C+World%21+How+are+you%3F" ); } #[test] fn configured_hosts_default() { - let configured_hosts = get_api_host_names(&SocketAddr::from(([127, 0, 0, 1], 8080))); - assert_eq!(configured_hosts, vec![ - "https://api.prod.projectcatalyst.io" - ]); + let configured_hosts = Settings::api_host_names(); + assert_eq!( + configured_hosts, + vec!["https://api.prod.projectcatalyst.io"] + ); } #[test] @@ -324,10 +511,13 @@ mod tests { &SocketAddr::from(([127, 0, 0, 1], 8080)), "http://api.prod.projectcatalyst.io , https://api.dev.projectcatalyst.io:1234", ); - assert_eq!(configured_hosts, vec![ - "http://api.prod.projectcatalyst.io", - "https://api.dev.projectcatalyst.io:1234" - ]); + assert_eq!( + configured_hosts, + vec![ + "http://api.prod.projectcatalyst.io", + "https://api.dev.projectcatalyst.io:1234" + ] + ); } #[test] @@ -336,9 +526,10 @@ mod tests { &SocketAddr::from(([127, 0, 0, 1], 8080)), "not a hostname , https://api.dev.projectcatalyst.io:1234", ); - assert_eq!(configured_hosts, vec![ - "https://api.dev.projectcatalyst.io:1234" - ]); + assert_eq!( + configured_hosts, + vec!["https://api.dev.projectcatalyst.io:1234"] + ); } #[test] diff --git a/catalyst-gateway/bin/src/state/mod.rs b/catalyst-gateway/bin/src/state/mod.rs deleted file mode 100644 index 31a78c3560a..00000000000 --- a/catalyst-gateway/bin/src/state/mod.rs +++ /dev/null @@ -1,66 +0,0 @@ -//! Shared state used by all endpoints. -use std::sync::Arc; - -use tracing::level_filters::LevelFilter; -use tracing_subscriber::{reload::Handle, Registry}; - -use crate::{ - event_db::{establish_connection, EventDB}, - logger::LogLevel, -}; - -/// Settings for logger level -pub(crate) struct LoggerSettings { - /// Logger handle for formatting layer. - logger_handle: Handle, -} - -/// Global State of the service -pub(crate) struct State { - /// This can be None, or a handle to the DB. - /// If the DB fails, it can be set to None. - /// If its None, an attempt to get it will try and connect to the DB. - /// This is Private, it needs to be accessed with a function. - // event_db_handle: Arc>>, - // Private need to get it with a function. - event_db: Arc, /* This needs to be obsoleted, we want the DB - * to be able to be down. */ - /// Logger settings - logger_settings: Arc, -} - -impl State { - /// Create a new global [`State`] - pub(crate) async fn new( - database_url: Option, logger_handle: Handle, - ) -> anyhow::Result { - // Get a configured pool to the Database, runs schema version check internally. - let event_db = Arc::new(establish_connection(database_url).await?); - let logger_settings = Arc::new(LoggerSettings { logger_handle }); - - let state = Self { - event_db, - logger_settings, - }; - - // We don't care if this succeeds or not. - // We just try our best to connect to the event DB. - // let _ = state.event_db().await; - - Ok(state) - } - - /// Get the reference to the database connection pool for `EventDB`. - pub(crate) fn event_db(&self) -> Arc { - self.event_db.clone() - } - - /// Modify the logger level setting. - /// This will reload the logger. - pub(crate) fn modify_logger_level(&self, level: LogLevel) -> anyhow::Result<()> { - self.logger_settings - .logger_handle - .modify(|f| *f = LevelFilter::from_level(level.into()))?; - Ok(()) - } -} From 94a36899e2d730ec9f81ea2aaa14c4e3355ff51e Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 12 Jul 2024 15:30:20 +0700 Subject: [PATCH 03/69] fix(backend): Clean up logging a little, and add build info logs as required for production. --- catalyst-gateway/Cargo.toml | 3 + catalyst-gateway/bin/Cargo.toml | 8 +- catalyst-gateway/bin/build.rs | 4 + catalyst-gateway/bin/src/build_info.rs | 111 ++++++++++++++++++ catalyst-gateway/bin/src/cardano/mod.rs | 1 + catalyst-gateway/bin/src/cli.rs | 18 ++- catalyst-gateway/bin/src/logger.rs | 62 +++++++++- catalyst-gateway/bin/src/main.rs | 3 +- .../src/service/api/health/inspection_get.rs | 6 +- .../bin/src/service/poem_service.rs | 6 +- .../bin/src/service/utilities/net.rs | 4 +- catalyst-gateway/bin/src/settings.rs | 63 +++------- 12 files changed, 227 insertions(+), 62 deletions(-) create mode 100644 catalyst-gateway/bin/build.rs create mode 100644 catalyst-gateway/bin/src/build_info.rs diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index cd1684f1495..7e2c130e84c 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -53,6 +53,9 @@ cardano-chain-follower = { git = "https://github.com/input-output-hk/hermes.git" stringzilla = "3.8.4" duration-string = "0.4.0" once_cell = "1.19.0" +build-info = "0.0.37" +build-info-build = "0.0.37" +ed25519-dalek = "2.1.1" [workspace.lints.rust] warnings = "deny" diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 3da1061ec76..da18472914b 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -3,7 +3,7 @@ name = "cat-gateway" description = "The Catalyst Data Gateway" keywords = ["cardano", "catalyst", "gateway"] categories = ["command-line-utilities"] -version.workspace = true +version = "0.1.0" authors.workspace = true edition.workspace = true license.workspace = true @@ -15,6 +15,7 @@ repository.workspace = true workspace = true [dependencies] +build-info.workspace = true bb8 = { workspace = true } bb8-postgres = { workspace = true } tokio-postgres = { workspace = true, features = [ @@ -67,7 +68,10 @@ anyhow = { workspace = true } handlebars = { workspace = true } cddl = { workspace = true } ciborium = { workspace = true } -ed25519-dalek = "2.1.1" +ed25519-dalek.workspace = true stringzilla = { workspace = true } duration-string.workspace = true once_cell.workspace = true + +[build-dependencies] +build-info-build = { workspace = true } diff --git a/catalyst-gateway/bin/build.rs b/catalyst-gateway/bin/build.rs new file mode 100644 index 00000000000..76a7acdda99 --- /dev/null +++ b/catalyst-gateway/bin/build.rs @@ -0,0 +1,4 @@ +//! Build +fn main() { + build_info_build::build_script(); +} diff --git a/catalyst-gateway/bin/src/build_info.rs b/catalyst-gateway/bin/src/build_info.rs new file mode 100644 index 00000000000..51c8c87c391 --- /dev/null +++ b/catalyst-gateway/bin/src/build_info.rs @@ -0,0 +1,111 @@ +//! Hermes binary build info + +use build_info::{self as build_info_crate}; +use local_ip_address::list_afinet_netifas; +use tracing::info; + +use crate::service::utilities; + +/// Formatted hermes binary build info +pub(crate) const BUILD_INFO: &str = build_info_crate::format!(" +version: {}, +git info: {{{}}} +compiler: {} +build time: {} +", + $.crate_info.version, + $.version_control, + $.compiler, + $.timestamp +); + +build_info_crate::build_info!(fn build_info); + +/// Log Build Info to our logs. +pub(crate) fn log_build_info() { + let info = build_info(); + let timestamp = info.timestamp.to_rfc3339(); + let profile = info.profile.clone(); + let optimization_level = info.optimization_level.to_string(); + + let name = info.crate_info.name.clone(); + let version = info.crate_info.version.to_string(); + let features = info.crate_info.enabled_features.join(","); + + let triple = info.target.triple.clone(); + let family = info.target.family.clone(); + let os = info.target.os.clone(); + let cpu_arch = info.target.cpu.arch.clone(); + let cpu_features = info.target.cpu.features.join(","); + + let compiler_channel = info.compiler.channel.to_string(); + let compiler_version = info.compiler.version.to_string(); + + let mut commit_id = "Unknown".to_string(); + let mut commit_timestamp = "Unknown".to_string(); + let mut branch = "Unknown".to_string(); + let mut tags = "Unknown".to_string(); + + if let Some(ref vc) = info.version_control { + if let Some(git) = vc.git() { + commit_id = git.commit_short_id.clone(); + commit_timestamp = git.commit_timestamp.to_rfc3339(); + if let Some(git_branch) = git.branch.clone() { + branch = git_branch; + } + tags = git.tags.join(","); + } + } + + let ipv4 = utilities::net::get_public_ipv4().to_string(); + let ipv6 = utilities::net::get_public_ipv6().to_string(); + + let mut interfaces: String = "Unknown".to_string(); + + // Get local IP address v4 and v6 + if let Ok(network_interfaces) = list_afinet_netifas() { + if !network_interfaces.is_empty() { + interfaces.clear(); + for iface in network_interfaces { + if !interfaces.is_empty() { + interfaces.push(','); + } + interfaces.push_str(&format!("{}:{}", iface.0, iface.1)); + } + } + } + + info!( + BuildTime = timestamp, + Profile = profile, + OptimizationLevel = optimization_level, + Name = name, + Version = version, + Features = features, + TargetTriple = triple, + TargetFamily = family, + TargetOs = os, + CPUArch = cpu_arch, + CPUFeatures = cpu_features, + RustChannel = compiler_channel, + RustVersion = compiler_version, + GitCommitId = commit_id, + GitCommitTimestamp = commit_timestamp, + GitBranch = branch, + GitTags = tags, + PublicIPv4 = ipv4, + PublicIPv6 = ipv6, + NetworkInterfaces = interfaces, + "Catalyst Gateway" + ); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn build_info_test() { + println!("{BUILD_INFO}"); + } +} diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index 35523e30191..f264daac42a 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -51,6 +51,7 @@ async fn get_follower_config(check_config_tick: u64) -> anyhow::Result anyhow::Result<()> { diff --git a/catalyst-gateway/bin/src/cli.rs b/catalyst-gateway/bin/src/cli.rs index bb31ea5ac16..b25b63e308c 100644 --- a/catalyst-gateway/bin/src/cli.rs +++ b/catalyst-gateway/bin/src/cli.rs @@ -32,12 +32,16 @@ impl Cli { /// - Failed to initialize the logger with the specified log level. /// - Failed to create a new `State` with the provided database URL. /// - Failed to run the service on the specified address. - pub(crate) fn exec(self) -> anyhow::Result<()> { + pub(crate) async fn exec(self) -> anyhow::Result<()> { match self { Self::Run(settings) => { Settings::init(settings)?; - tokio::spawn(async move { + let mut tasks = Vec::new(); + + info!("Catalyst Gateway - Starting"); + + let handle = tokio::spawn(async move { match service::run().await { Ok(()) => info!("Endpoints started ok"), Err(err) => { @@ -45,6 +49,15 @@ impl Cli { }, } }); + tasks.push(handle); + + started(); + + for task in tasks { + task.await?; + } + + info!("Catalyst Gateway - Shut Down"); /* @@ -54,7 +67,6 @@ impl Cli { settings.follower_settings.data_refresh_tick, machine_id, );*/ - started(); /*followers_fut.await?;*/ }, Self::Docs(settings) => { diff --git a/catalyst-gateway/bin/src/logger.rs b/catalyst-gateway/bin/src/logger.rs index f746a71b16c..2ff9e6ca33a 100644 --- a/catalyst-gateway/bin/src/logger.rs +++ b/catalyst-gateway/bin/src/logger.rs @@ -1,7 +1,10 @@ //! Setup for logging for the service. +use std::sync::OnceLock; + use clap::ValueEnum; use tracing::level_filters::LevelFilter; +use tracing::log::error; use tracing_subscriber::{ fmt::{self, format::FmtSpan, time}, prelude::*, @@ -9,6 +12,7 @@ use tracing_subscriber::{ Registry, }; +use crate::settings::Settings; /// Default log level pub(crate) const LOG_LEVEL_DEFAULT: &str = "info"; @@ -47,24 +51,54 @@ impl From for tracing::log::LevelFilter { } } +/// Logger Handle for the Service. +static LOGGER_HANDLE: OnceLock = OnceLock::new(); + +/// Default Span Guard for the Service. +static GLOBAL_SPAN: OnceLock = OnceLock::new(); + +/// Default Span Guard for the Service. +static SPAN_GUARD: OnceLock = OnceLock::new(); + /// Handle to our Logger pub(crate) type LoggerHandle = Handle; +/// Set the default fields in a log, using a global span. +fn set_default_span() { + let server_id = Settings::service_id(); + // This is a hacky way to add fields to every log line. + // Add Fields here, as required. + let global_span = tracing::info_span!("Global", ServerID = server_id); + if GLOBAL_SPAN.set(global_span).is_err() { + error!("Failed to set default span. Is it already set?"); + } + + // It MUST be Some because of the above. + if let Some(global_span) = GLOBAL_SPAN.get() { + let span_guard = global_span.enter(); + if SPAN_GUARD.set(span_guard).is_err() { + error!("Failed to set default span. Is it already set?"); + } + } +} + /// Initialize the tracing subscriber -pub(crate) fn init(log_level: LogLevel) -> LoggerHandle { +pub(crate) fn init(log_level: LogLevel) { // Create the formatting layer let layer = fmt::layer() .json() .with_timer(time::UtcTime::rfc_3339()) .with_span_events(FmtSpan::CLOSE) + .with_current_span(true) + .with_span_list(true) .with_target(true) .with_file(true) .with_line_number(true) .with_level(true) .with_thread_names(true) .with_thread_ids(true) - .with_current_span(true) - .with_span_list(true); + .flatten_event(true); + // Create a reloadable layer with the specified log_level let filter = LevelFilter::from_level(log_level.into()); let (filter, logger_handle) = reload::Layer::new(filter); @@ -76,5 +110,25 @@ pub(crate) fn init(log_level: LogLevel) -> LoggerHandle { // Logging is globally disabled by default, so globally enable it to the required level. tracing::log::set_max_level(log_level.into()); - logger_handle + if LOGGER_HANDLE.set(logger_handle).is_err() { + error!("Failed to initialize logger handle. Called multiple times?"); + } + + set_default_span(); +} + +/// Modify the logger level setting. +/// This will reload the logger. +pub(crate) fn modify_logger_level(level: LogLevel) { + if let Some(logger_handle) = LOGGER_HANDLE.get() { + if let Err(error) = logger_handle.modify(|f| *f = LevelFilter::from_level(level.into())) { + error!("Failed to modify log level to {:?} : {}", level, error); + } + } else { + // This should never happen. + error!( + "Failed to modify log level to {:?} : Logger handle not available.", + level + ); + } } diff --git a/catalyst-gateway/bin/src/main.rs b/catalyst-gateway/bin/src/main.rs index b86169cb706..67b3be8a93b 100644 --- a/catalyst-gateway/bin/src/main.rs +++ b/catalyst-gateway/bin/src/main.rs @@ -1,6 +1,7 @@ //! Catalyst Data Gateway use clap::Parser; +mod build_info; mod cardano; mod cli; #[allow(dead_code)] @@ -11,6 +12,6 @@ mod settings; #[tokio::main] async fn main() -> anyhow::Result<()> { - cli::Cli::parse().exec()?; + cli::Cli::parse().exec().await?; Ok(()) } diff --git a/catalyst-gateway/bin/src/service/api/health/inspection_get.rs b/catalyst-gateway/bin/src/service/api/health/inspection_get.rs index f1b11295eb6..9dc09fef538 100644 --- a/catalyst-gateway/bin/src/service/api/health/inspection_get.rs +++ b/catalyst-gateway/bin/src/service/api/health/inspection_get.rs @@ -2,9 +2,7 @@ use poem_openapi::{ApiResponse, Enum}; use tracing::debug; -use crate::{ - event_db::EventDB, logger, service::common::responses::WithErrorResponses, settings::Settings, -}; +use crate::{event_db::EventDB, logger, service::common::responses::WithErrorResponses}; /// `LogLevel` Open API definition. #[derive(Debug, Clone, Copy, Enum)] @@ -69,7 +67,7 @@ pub(crate) async fn endpoint( log_level: Option, query_inspection: Option, ) -> AllResponses { if let Some(level) = log_level { - Settings::modify_logger_level(level.into()); + logger::modify_logger_level(level.into()); } if let Some(inspection_mode) = query_inspection { diff --git a/catalyst-gateway/bin/src/service/poem_service.rs b/catalyst-gateway/bin/src/service/poem_service.rs index 662bead8e17..ebc795f3917 100644 --- a/catalyst-gateway/bin/src/service/poem_service.rs +++ b/catalyst-gateway/bin/src/service/poem_service.rs @@ -67,8 +67,10 @@ pub(crate) fn get_app_docs() -> String { /// * `Error::IoError` - An IO error has occurred. pub(crate) async fn run() -> anyhow::Result<()> { // The address to listen on - tracing::info!("Starting Cat-Gateway Service ..."); - tracing::info!("Listening on {}", Settings::bound_address()); + tracing::info!( + ServiceAddr = Settings::bound_address().to_string(), + "Starting Cat-Gateway API Service ..." + ); // Set a custom panic hook, so we can catch panics and not crash the service. // And also get data from the panic so we can log it. diff --git a/catalyst-gateway/bin/src/service/utilities/net.rs b/catalyst-gateway/bin/src/service/utilities/net.rs index 51ace89d367..e047e610057 100644 --- a/catalyst-gateway/bin/src/service/utilities/net.rs +++ b/catalyst-gateway/bin/src/service/utilities/net.rs @@ -12,7 +12,7 @@ pub(crate) fn get_public_ipv4() -> IpAddr { if let Err(error) = socket.connect("8.8.8.8:53") { error!("Failed to connect IPv4 to Google DNS : {}", error); } else if let Ok(local_addr) = socket.local_addr() { - return local_addr.ip(); + return local_addr.ip().to_canonical(); } else { error!("Failed to get local address"); } @@ -31,7 +31,7 @@ pub(crate) fn get_public_ipv6() -> IpAddr { if let Err(error) = socket.connect("[2001:4860:4860::8888]:53") { error!("Failed to connect IPv6 to Google DNS : {}", error); } else if let Ok(local_addr) = socket.local_addr() { - return local_addr.ip(); + return local_addr.ip().to_canonical(); } else { error!("Failed to get local IPv6 address"); } diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 48a6576c7cf..0f003ddf666 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -12,12 +12,13 @@ use cryptoxide::{blake2b::Blake2b, mac::Mac}; use dotenvy::dotenv; use duration_string::DurationString; use once_cell::sync::Lazy; -use tracing::{level_filters::LevelFilter, log::error}; +use tracing::log::error; use url::Url; use crate::{ + build_info::{log_build_info, BUILD_INFO}, event_db, - logger::{self, LogLevel, LoggerHandle, LOG_LEVEL_DEFAULT}, + logger::{self, LogLevel, LOG_LEVEL_DEFAULT}, }; use crate::service::utilities::net::{get_public_ipv4, get_public_ipv6}; @@ -47,9 +48,6 @@ const API_URL_PREFIX_DEFAULT: &str = "/api"; /// Default `CHECK_CONFIG_TICK` used in development. const CHECK_CONFIG_TICK_DEFAULT: &str = "5s"; -/// Default `DATA_REFRESH_TICK` used in development -const DATA_REFRESH_TICK_DEFAULT: &str = "5s"; - /// Default `MACHINE_UID` used in development const MACHINE_UID_DEFAULT: &str = "UID"; @@ -61,8 +59,8 @@ const EVENT_DB_URL_DEFAULT: &str = fn calculate_service_uuid() -> String { let mut hasher = Blake2b::new_keyed(16, "Catalyst-Gateway-Machine-UID".as_bytes()); - let ipv4 = get_public_ipv4().to_canonical().to_string(); - let ipv6 = get_public_ipv6().to_canonical().to_string(); + let ipv4 = get_public_ipv4().to_string(); + let ipv6 = get_public_ipv6().to_string(); hasher.input(ipv4.as_bytes()); hasher.input(ipv6.as_bytes()); @@ -83,10 +81,11 @@ fn calculate_service_uuid() -> String { /// the URL to the `PostgreSQL` event database, /// and the logging level. #[derive(Args, Clone)] +#[clap(version = BUILD_INFO)] pub(crate) struct ServiceSettings { /// Url to the postgres event db #[clap(long, env)] - pub(crate) database_url: String, + pub(crate) event_db_url: Option, /// Logging level #[clap(long, default_value = LOG_LEVEL_DEFAULT)] @@ -99,10 +98,6 @@ pub(crate) struct ServiceSettings { /// Follower settings. #[clap(flatten)] pub(crate) follower_settings: FollowerSettings, - - /// Enable deep query inspection. - #[clap(long, action = clap::ArgAction::SetTrue)] - pub(crate) deep_query_inspection: bool, } /// Settings specifies `OpenAPI` docs generation. @@ -123,14 +118,6 @@ pub(crate) struct DocsSettings { /// Settings for follower mechanics. #[derive(Args, Clone)] pub(crate) struct FollowerSettings { - /// Check config tick - #[clap(long, default_value = CHECK_CONFIG_TICK_DEFAULT, env = "CHECK_CONFIG_TICK")] - pub(crate) check_config_tick: u64, - - /// Data Refresh tick - #[clap(long, default_value = DATA_REFRESH_TICK_DEFAULT, env = "DATA_REFRESH_TICK")] - pub(crate) data_refresh_tick: u64, - /// Machine UID #[clap(long, default_value = MACHINE_UID_DEFAULT, env = "MACHINE_UID")] pub(crate) machine_uid: String, @@ -226,6 +213,7 @@ struct EnvVars { event_db_password: Option, /// Tick every N seconds until config exists in db + #[allow(unused)] check_config_tick: Duration, } @@ -274,21 +262,24 @@ static ENV_VARS: Lazy = Lazy::new(|| { /// All Settings/Options for the Service. static SERVICE_SETTINGS: OnceLock = OnceLock::new(); -/// Logger Handle for the Service. -static LOGGER_HANDLE: OnceLock = OnceLock::new(); - /// Our Global Settings for this running service. pub(crate) struct Settings(); impl Settings { /// Initialize the settings data. pub(crate) fn init(settings: ServiceSettings) -> anyhow::Result<()> { - if LOGGER_HANDLE.set(logger::init(settings.log_level)).is_err() { - error!("Failed to initialize logger handle. Called multiple times?"); - } + let log_level = settings.log_level; + if SERVICE_SETTINGS.set(settings).is_err() { - error!("Failed to initialize service settings. Called multiple times?"); + // We use println here, because logger not yet configured. + println!("Failed to initialize service settings. Called multiple times?"); } + + // Init the logger. + logger::init(log_level); + + log_build_info(); + event_db::establish_connection() } @@ -319,6 +310,7 @@ impl Settings { } /// The Service UUID + #[allow(unused)] pub(crate) fn service_id() -> &'static str { ENV_VARS.service_id.as_str() } @@ -359,23 +351,6 @@ impl Settings { } } - /// Modify the logger level setting. - /// This will reload the logger. - pub(crate) fn modify_logger_level(level: LogLevel) { - if let Some(logger_handle) = LOGGER_HANDLE.get() { - if let Err(error) = logger_handle.modify(|f| *f = LevelFilter::from_level(level.into())) - { - error!("Failed to modify log level to {:?} : {}", level, error); - } - } else { - // This should never happen. - error!( - "Failed to modify log level to {:?} : Logger handle not available.", - level - ); - } - } - /// Generate a github issue url with a given title /// /// ## Arguments From 8308f340beb69ae3bc288188823d7dbd390e502c Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Tue, 16 Jul 2024 00:13:12 +0700 Subject: [PATCH 04/69] Refactor and setup cassandra config/session --- .config/dictionaries/project.dic | 3 + catalyst-gateway/Cargo.toml | 4 + catalyst-gateway/bin/Cargo.toml | 4 + catalyst-gateway/bin/src/cardano/mod.rs | 2 +- catalyst-gateway/bin/src/cli.rs | 2 +- .../chain_state/insert_update_state.sql | 0 .../event}/cardano/chain_state/mod.rs | 2 +- .../select_slot_info_by_datetime.sql.hbs | 0 .../chain_state/select_update_state.sql | 0 .../event}/cardano/cip36_registration/mod.rs | 2 +- .../select_cip36_registration.sql | 0 .../event}/cardano/config/mod.rs | 2 +- .../event}/cardano/config/select_config.sql | 0 .../src/{event_db => db/event}/cardano/mod.rs | 0 .../event}/cardano/utxo/mod.rs | 2 +- .../cardano/utxo/select_total_utxo_amount.sql | 0 .../bin/src/{event_db => db/event}/error.rs | 0 .../src/{event_db => db/event}/legacy/mod.rs | 0 .../event}/legacy/queries/event/ballot.rs | 2 +- .../event}/legacy/queries/event/mod.rs | 2 +- .../event}/legacy/queries/event/objective.rs | 2 +- .../event}/legacy/queries/event/proposal.rs | 2 +- .../event}/legacy/queries/event/review.rs | 2 +- .../event}/legacy/queries/mod.rs | 0 .../event}/legacy/queries/registration.rs | 3 +- .../event}/legacy/queries/search.rs | 2 +- .../event}/legacy/queries/vit_ss/fund.rs | 2 +- .../event}/legacy/queries/vit_ss/mod.rs | 0 .../event}/legacy/types/ballot.rs | 2 +- .../event}/legacy/types/event.rs | 0 .../event}/legacy/types/mod.rs | 0 .../event}/legacy/types/objective.rs | 2 +- .../event}/legacy/types/proposal.rs | 0 .../event}/legacy/types/registration.rs | 0 .../event}/legacy/types/review.rs | 0 .../event}/legacy/types/search.rs | 0 .../event}/legacy/types/vit_ss/challenge.rs | 0 .../event}/legacy/types/vit_ss/fund.rs | 0 .../event}/legacy/types/vit_ss/goal.rs | 0 .../event}/legacy/types/vit_ss/group.rs | 0 .../event}/legacy/types/vit_ss/mod.rs | 0 .../event}/legacy/types/vit_ss/vote_plan.rs | 0 .../event}/legacy/types/voting_status.rs | 0 .../bin/src/{event_db => db/event}/mod.rs | 49 --------- .../event}/schema_check/mod.rs | 2 +- .../schema_check/select_max_version.sql | 0 catalyst-gateway/bin/src/db/index/mod.rs | 4 + catalyst-gateway/bin/src/db/index/schema.rs | 13 +++ catalyst-gateway/bin/src/db/index/session.rs | 96 +++++++++++++++++ catalyst-gateway/bin/src/db/mod.rs | 4 + catalyst-gateway/bin/src/main.rs | 3 +- .../cardano/date_time_to_slot_number_get.rs | 2 +- .../bin/src/service/api/cardano/mod.rs | 2 +- .../service/api/cardano/registration_get.rs | 2 +- .../src/service/api/cardano/staked_ada_get.rs | 2 +- .../src/service/api/cardano/sync_state_get.rs | 2 +- .../src/service/api/health/inspection_get.rs | 2 +- .../bin/src/service/api/health/ready_get.rs | 2 +- .../objects/cardano/registration_info.rs | 32 +++--- .../common/objects/cardano/slot_info.rs | 2 +- .../common/objects/cardano/stake_info.rs | 2 +- .../common/objects/cardano/sync_state.rs | 2 +- .../service/common/objects/legacy/event_id.rs | 4 +- .../common/objects/legacy/voter_group_id.rs | 4 +- .../common/objects/legacy/voter_info.rs | 4 +- .../objects/legacy/voter_registration.rs | 4 +- .../utilities/middleware/schema_validation.rs | 2 +- catalyst-gateway/bin/src/settings.rs | 100 +++++++++++++++++- 68 files changed, 275 insertions(+), 110 deletions(-) rename catalyst-gateway/bin/src/{event_db => db/event}/cardano/chain_state/insert_update_state.sql (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/cardano/chain_state/mod.rs (99%) rename catalyst-gateway/bin/src/{event_db => db/event}/cardano/chain_state/select_slot_info_by_datetime.sql.hbs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/cardano/chain_state/select_update_state.sql (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/cardano/cip36_registration/mod.rs (99%) rename catalyst-gateway/bin/src/{event_db => db/event}/cardano/cip36_registration/select_cip36_registration.sql (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/cardano/config/mod.rs (98%) rename catalyst-gateway/bin/src/{event_db => db/event}/cardano/config/select_config.sql (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/cardano/mod.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/cardano/utxo/mod.rs (99%) rename catalyst-gateway/bin/src/{event_db => db/event}/cardano/utxo/select_total_utxo_amount.sql (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/error.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/mod.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/event/ballot.rs (99%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/event/mod.rs (99%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/event/objective.rs (99%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/event/proposal.rs (99%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/event/review.rs (99%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/mod.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/registration.rs (99%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/search.rs (99%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/vit_ss/fund.rs (99%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/vit_ss/mod.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/ballot.rs (95%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/event.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/mod.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/objective.rs (96%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/proposal.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/registration.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/review.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/search.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/challenge.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/fund.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/goal.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/group.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/mod.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/vote_plan.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/voting_status.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/mod.rs (82%) rename catalyst-gateway/bin/src/{event_db => db/event}/schema_check/mod.rs (95%) rename catalyst-gateway/bin/src/{event_db => db/event}/schema_check/select_max_version.sql (100%) create mode 100644 catalyst-gateway/bin/src/db/index/mod.rs create mode 100644 catalyst-gateway/bin/src/db/index/schema.rs create mode 100644 catalyst-gateway/bin/src/db/index/session.rs create mode 100644 catalyst-gateway/bin/src/db/mod.rs diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index ec8b05694af..1b0465b5037 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -28,7 +28,9 @@ cardano Catalyst CBOR cborg +cdrs cdylib +certdir CEST cfbundle Chotivichit @@ -102,6 +104,7 @@ junitreport junitxml Keyhash keyserver +keyspace KUBECONFIG kubernetescrd kubetail diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index 7e2c130e84c..ed42bb2e351 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -56,6 +56,10 @@ once_cell = "1.19.0" build-info = "0.0.37" build-info-build = "0.0.37" ed25519-dalek = "2.1.1" +scylla = { version = "0.13.1", features = ["ssl", "full-serialization"]} +strum = { version = "0.26.3", features = ["derive"] } +strum_macros = "0.26.4" +openssl = { version = "0.10.64", features = ["vendored"] } [workspace.lints.rust] warnings = "deny" diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index da18472914b..aeb9ea408c0 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -72,6 +72,10 @@ ed25519-dalek.workspace = true stringzilla = { workspace = true } duration-string.workspace = true once_cell.workspace = true +scylla.workspace = true +strum.workspace = true +strum_macros.workspace = true +openssl.workspace = true [build-dependencies] build-info-build = { workspace = true } diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index f264daac42a..92ab07de652 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -13,7 +13,7 @@ use tokio::{sync::mpsc, task::JoinHandle, time}; use tracing::{error, info}; use crate::{ - event_db::{ + db::event::{ cardano::{ chain_state::{IndexedFollowerDataParams, MachineId}, cip36_registration::IndexedVoterRegistrationParams, diff --git a/catalyst-gateway/bin/src/cli.rs b/catalyst-gateway/bin/src/cli.rs index b25b63e308c..af96228751a 100644 --- a/catalyst-gateway/bin/src/cli.rs +++ b/catalyst-gateway/bin/src/cli.rs @@ -35,7 +35,7 @@ impl Cli { pub(crate) async fn exec(self) -> anyhow::Result<()> { match self { Self::Run(settings) => { - Settings::init(settings)?; + Settings::init(settings).await?; let mut tasks = Vec::new(); diff --git a/catalyst-gateway/bin/src/event_db/cardano/chain_state/insert_update_state.sql b/catalyst-gateway/bin/src/db/event/cardano/chain_state/insert_update_state.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/chain_state/insert_update_state.sql rename to catalyst-gateway/bin/src/db/event/cardano/chain_state/insert_update_state.sql diff --git a/catalyst-gateway/bin/src/event_db/cardano/chain_state/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs similarity index 99% rename from catalyst-gateway/bin/src/event_db/cardano/chain_state/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs index eb74590a64d..2b5f10bc29a 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/chain_state/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs @@ -6,7 +6,7 @@ use pallas::ledger::traverse::{wellknown::GenesisValues, MultiEraBlock}; use tokio_postgres::{binary_copy::BinaryCopyInWriter, types::Type}; use tracing::error; -use crate::event_db::{error::NotFoundError, Error, EventDB, EVENT_DB_POOL}; +use crate::db::event::{error::NotFoundError, Error, EventDB, EVENT_DB_POOL}; /// Block time pub type DateTime = chrono::DateTime; diff --git a/catalyst-gateway/bin/src/event_db/cardano/chain_state/select_slot_info_by_datetime.sql.hbs b/catalyst-gateway/bin/src/db/event/cardano/chain_state/select_slot_info_by_datetime.sql.hbs similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/chain_state/select_slot_info_by_datetime.sql.hbs rename to catalyst-gateway/bin/src/db/event/cardano/chain_state/select_slot_info_by_datetime.sql.hbs diff --git a/catalyst-gateway/bin/src/event_db/cardano/chain_state/select_update_state.sql b/catalyst-gateway/bin/src/db/event/cardano/chain_state/select_update_state.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/chain_state/select_update_state.sql rename to catalyst-gateway/bin/src/db/event/cardano/chain_state/select_update_state.sql diff --git a/catalyst-gateway/bin/src/event_db/cardano/cip36_registration/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs similarity index 99% rename from catalyst-gateway/bin/src/event_db/cardano/cip36_registration/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs index 4f6602f8044..d2f56d8f79b 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/cip36_registration/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs @@ -9,7 +9,7 @@ use crate::{ cip36_registration::{Cip36Metadata, VotingInfo}, util::valid_era, }, - event_db::{ + db::event::{ cardano::chain_state::SlotNumber, error::NotFoundError, Error, EventDB, EVENT_DB_POOL, }, }; diff --git a/catalyst-gateway/bin/src/event_db/cardano/cip36_registration/select_cip36_registration.sql b/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/select_cip36_registration.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/cip36_registration/select_cip36_registration.sql rename to catalyst-gateway/bin/src/db/event/cardano/cip36_registration/select_cip36_registration.sql diff --git a/catalyst-gateway/bin/src/event_db/cardano/config/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/config/mod.rs similarity index 98% rename from catalyst-gateway/bin/src/event_db/cardano/config/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano/config/mod.rs index 12590601478..e10b3d834fe 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/config/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano/config/mod.rs @@ -4,7 +4,7 @@ use std::str::FromStr; use cardano_chain_follower::Network; use serde::{Deserialize, Serialize}; -use crate::event_db::{error::NotFoundError, EventDB}; +use crate::db::event::{error::NotFoundError, EventDB}; /// Representation of the `config` table id fields `id`, `id2`, `id3` enum ConfigId { diff --git a/catalyst-gateway/bin/src/event_db/cardano/config/select_config.sql b/catalyst-gateway/bin/src/db/event/cardano/config/select_config.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/config/select_config.sql rename to catalyst-gateway/bin/src/db/event/cardano/config/select_config.sql diff --git a/catalyst-gateway/bin/src/event_db/cardano/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano/mod.rs diff --git a/catalyst-gateway/bin/src/event_db/cardano/utxo/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs similarity index 99% rename from catalyst-gateway/bin/src/event_db/cardano/utxo/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs index 69cc5b214ab..79cf50fe76b 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/utxo/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs @@ -8,7 +8,7 @@ use tracing::error; use super::{chain_state::SlotNumber, cip36_registration::StakeCredential}; use crate::{ cardano::util::parse_policy_assets, - event_db::{error::NotFoundError, Error, EventDB, EVENT_DB_POOL}, + db::event::{error::NotFoundError, Error, EventDB, EVENT_DB_POOL}, }; /// Stake amount. diff --git a/catalyst-gateway/bin/src/event_db/cardano/utxo/select_total_utxo_amount.sql b/catalyst-gateway/bin/src/db/event/cardano/utxo/select_total_utxo_amount.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/utxo/select_total_utxo_amount.sql rename to catalyst-gateway/bin/src/db/event/cardano/utxo/select_total_utxo_amount.sql diff --git a/catalyst-gateway/bin/src/event_db/error.rs b/catalyst-gateway/bin/src/db/event/error.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/error.rs rename to catalyst-gateway/bin/src/db/event/error.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/mod.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/ballot.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/ballot.rs similarity index 99% rename from catalyst-gateway/bin/src/event_db/legacy/queries/event/ballot.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/event/ballot.rs index 6284ba08028..a2687d4a320 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/ballot.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/ballot.rs @@ -1,7 +1,7 @@ //! Ballot Queries use std::collections::HashMap; -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::{ ballot::{ diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/mod.rs similarity index 99% rename from catalyst-gateway/bin/src/event_db/legacy/queries/event/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/event/mod.rs index 03bdd4a2775..1acf7ddcb1c 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/mod.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/mod.rs @@ -1,7 +1,7 @@ //! Event Queries use chrono::{NaiveDateTime, Utc}; -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::event::{ Event, EventDetails, EventGoal, EventId, EventRegistration, EventSchedule, EventSummary, diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/objective.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/objective.rs similarity index 99% rename from catalyst-gateway/bin/src/event_db/legacy/queries/event/objective.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/event/objective.rs index ca1adf0ead0..a9d3959b51b 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/objective.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/objective.rs @@ -1,5 +1,5 @@ //! Objective Queries -use crate::event_db::{ +use crate::db::event::{ legacy::types::{ event::EventId, objective::{ diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/proposal.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/proposal.rs similarity index 99% rename from catalyst-gateway/bin/src/event_db/legacy/queries/event/proposal.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/event/proposal.rs index 5212f1e7505..52cf189aa65 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/proposal.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/proposal.rs @@ -1,5 +1,5 @@ //! Proposal Queries -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::{ event::EventId, diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/review.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/review.rs similarity index 99% rename from catalyst-gateway/bin/src/event_db/legacy/queries/event/review.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/event/review.rs index 49bd2ba0b88..701eec78060 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/review.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/review.rs @@ -1,5 +1,5 @@ //! Review Queries -use crate::event_db::{ +use crate::db::event::{ legacy::types::{ event::EventId, objective::ObjectiveId, diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/queries/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/mod.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/registration.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/registration.rs similarity index 99% rename from catalyst-gateway/bin/src/event_db/legacy/queries/registration.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/registration.rs index 30dc237aaea..16167eb571e 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/registration.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/registration.rs @@ -1,7 +1,7 @@ //! Registration Queries use chrono::{NaiveDateTime, Utc}; -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::{ event::EventId, @@ -209,7 +209,6 @@ impl EventDB { #[allow(clippy::indexing_slicing)] // delegation_rows already checked to be not empty. let reward_address = RewardAddress::new(delegation_rows[0].try_get("reward_address")?); - Ok(Delegator { raw_power: delegations.iter().map(|delegation| delegation.value).sum(), reward_address, diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/search.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/search.rs similarity index 99% rename from catalyst-gateway/bin/src/event_db/legacy/queries/search.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/search.rs index 7a388c35c76..22f66852d77 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/search.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/search.rs @@ -1,7 +1,7 @@ //! Search Queries use chrono::{NaiveDateTime, Utc}; -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::{ event::{EventId, EventSummary}, diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/fund.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/fund.rs similarity index 99% rename from catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/fund.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/fund.rs index ce10e1ab6c7..718c978fdd2 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/fund.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/fund.rs @@ -1,6 +1,6 @@ use chrono::{NaiveDateTime, Utc}; -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::vit_ss::{ challenge::{Challenge, ChallengeHighlights}, diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/mod.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/ballot.rs b/catalyst-gateway/bin/src/db/event/legacy/types/ballot.rs similarity index 95% rename from catalyst-gateway/bin/src/event_db/legacy/types/ballot.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/ballot.rs index 46d1d2f56af..7527756c20f 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/types/ballot.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/types/ballot.rs @@ -1,6 +1,6 @@ //! Ballot types use super::{objective::ObjectiveId, proposal::ProposalId}; -use crate::event_db::legacy::types::registration::VoterGroupId; +use crate::db::event::legacy::types::registration::VoterGroupId; #[derive(Debug, Clone, PartialEq, Eq)] /// Objective Choices diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/event.rs b/catalyst-gateway/bin/src/db/event/legacy/types/event.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/event.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/event.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/types/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/mod.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/objective.rs b/catalyst-gateway/bin/src/db/event/legacy/types/objective.rs similarity index 96% rename from catalyst-gateway/bin/src/event_db/legacy/types/objective.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/objective.rs index 3ccd708bcc5..920cccba2a6 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/types/objective.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/types/objective.rs @@ -1,7 +1,7 @@ //! Objective Types use serde_json::Value; -use crate::event_db::legacy::types::registration::VoterGroupId; +use crate::db::event::legacy::types::registration::VoterGroupId; #[allow(clippy::module_name_repetitions)] #[derive(Debug, Clone, PartialEq, Eq, Hash)] diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/proposal.rs b/catalyst-gateway/bin/src/db/event/legacy/types/proposal.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/proposal.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/proposal.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/registration.rs b/catalyst-gateway/bin/src/db/event/legacy/types/registration.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/registration.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/registration.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/review.rs b/catalyst-gateway/bin/src/db/event/legacy/types/review.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/review.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/review.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/search.rs b/catalyst-gateway/bin/src/db/event/legacy/types/search.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/search.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/search.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/challenge.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/challenge.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/challenge.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/challenge.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/fund.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/fund.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/fund.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/fund.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/goal.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/goal.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/goal.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/goal.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/group.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/group.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/group.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/group.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/mod.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/vote_plan.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/vote_plan.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/vote_plan.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/vote_plan.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/voting_status.rs b/catalyst-gateway/bin/src/db/event/legacy/types/voting_status.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/voting_status.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/voting_status.rs diff --git a/catalyst-gateway/bin/src/event_db/mod.rs b/catalyst-gateway/bin/src/db/event/mod.rs similarity index 82% rename from catalyst-gateway/bin/src/event_db/mod.rs rename to catalyst-gateway/bin/src/db/event/mod.rs index 8b57b23834c..8df16bd83a4 100644 --- a/catalyst-gateway/bin/src/event_db/mod.rs +++ b/catalyst-gateway/bin/src/db/event/mod.rs @@ -9,7 +9,6 @@ use std::{ use bb8::Pool; use bb8_postgres::PostgresConnectionManager; -use stringzilla::StringZilla; use tokio_postgres::{types::ToSql, NoTls, Row}; use tracing::{debug, debug_span, error, Instrument}; @@ -20,10 +19,6 @@ pub(crate) mod error; pub(crate) mod legacy; pub(crate) mod schema_check; -/// Database URL Environment Variable name. -/// eg: "`postgres://catalyst-dev:CHANGE_ME@localhost/CatalystDev`" -const DATABASE_URL_ENVVAR: &str = "EVENT_DB_URL"; - /// Database version this crate matches. /// Must equal the last Migrations Version Number. pub(crate) const DATABASE_SCHEMA_VERSION: i32 = 9; @@ -43,15 +38,6 @@ pub(crate) struct EventDB {} /// `EventDB` Errors #[derive(thiserror::Error, Debug, PartialEq, Eq)] pub(crate) enum Error { - /// Database statement is not a valid modify statement - #[error("Invalid Modify Statement")] - InvalidModifyStatement, - /// Database statement is not a valid query statement - #[error("Invalid Query Statement")] - InvalidQueryStatement, - /// No DB URL was provided - #[error("DB URL is undefined")] - NoDatabaseUrl, /// Failed to get a DB Pool #[error("DB Pool uninitialized")] DbPoolUninitialized, @@ -245,38 +231,3 @@ pub(crate) fn establish_connection() -> anyhow::Result<()> { Ok(()) } - -/// Determine if the statement is a query statement. -/// -/// Returns true f the query statement starts with `SELECT` or contains `RETURNING`. -fn is_query_stmt(stmt: &str) -> bool { - // First, determine if the statement is a `SELECT` operation - if let Some(stmt) = &stmt.get(..6) { - if *stmt == "SELECT" { - return true; - } - } - // Otherwise, determine if the statement contains `RETURNING` - stmt.sz_rfind("RETURNING").is_some() -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_is_query_statement() { - let stmt = "SELECT * FROM dummy"; - assert!(is_query_stmt(stmt)); - let stmt = "UPDATE dummy SET foo = $1 WHERE bar = $2 RETURNING *"; - assert!(is_query_stmt(stmt)); - } - - #[test] - fn test_is_not_query_statement() { - let stmt = "UPDATE dummy SET foo_count = foo_count + 1 WHERE bar = (SELECT bar_id FROM foo WHERE name = 'FooBar')"; - assert!(!is_query_stmt(stmt)); - let stmt = "UPDATE dummy SET foo = $1 WHERE bar = $2"; - assert!(!is_query_stmt(stmt)); - } -} diff --git a/catalyst-gateway/bin/src/event_db/schema_check/mod.rs b/catalyst-gateway/bin/src/db/event/schema_check/mod.rs similarity index 95% rename from catalyst-gateway/bin/src/event_db/schema_check/mod.rs rename to catalyst-gateway/bin/src/db/event/schema_check/mod.rs index 34029a5ba4a..f5a0ce379f5 100644 --- a/catalyst-gateway/bin/src/event_db/schema_check/mod.rs +++ b/catalyst-gateway/bin/src/db/event/schema_check/mod.rs @@ -1,6 +1,6 @@ //! Check if the schema is up-to-date. -use crate::event_db::{EventDB, DATABASE_SCHEMA_VERSION}; +use crate::db::event::{EventDB, DATABASE_SCHEMA_VERSION}; /// Schema in database does not match schema supported by the Crate. #[derive(thiserror::Error, Debug, PartialEq, Eq)] diff --git a/catalyst-gateway/bin/src/event_db/schema_check/select_max_version.sql b/catalyst-gateway/bin/src/db/event/schema_check/select_max_version.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/schema_check/select_max_version.sql rename to catalyst-gateway/bin/src/db/event/schema_check/select_max_version.sql diff --git a/catalyst-gateway/bin/src/db/index/mod.rs b/catalyst-gateway/bin/src/db/index/mod.rs new file mode 100644 index 00000000000..8d74bb486ef --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/mod.rs @@ -0,0 +1,4 @@ +//! Blockchain Index Database + +mod schema; +pub(crate) mod session; diff --git a/catalyst-gateway/bin/src/db/index/schema.rs b/catalyst-gateway/bin/src/db/index/schema.rs new file mode 100644 index 00000000000..10ccfed29dc --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema.rs @@ -0,0 +1,13 @@ +//! Index Schema + +use std::time::Duration; + +use super::session::CassandraSession; + +/// The version of the Schema we are using. +pub(crate) const SCHEMA_VERSION: u64 = 1; + +/// Create the Schema on the connected Cassandra DB +pub(crate) async fn create_schema(_session: CassandraSession) { + tokio::time::sleep(Duration::from_secs(2)).await; +} diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs new file mode 100644 index 00000000000..e045da6be92 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -0,0 +1,96 @@ +//! Session creation and storage + +use crate::settings::{CassandraEnvVars, Settings}; +use openssl::ssl::{SslContextBuilder, SslFiletype, SslMethod, SslVerifyMode}; +use scylla::{frame::Compression, Session, SessionBuilder}; +use std::{path::PathBuf, sync::Arc}; +use tokio::fs; + +use super::schema::{create_schema, SCHEMA_VERSION}; + +/// Configuration Choices for compression +#[derive(Clone, strum::EnumString)] +#[strum(ascii_case_insensitive)] +pub(crate) enum CompressionChoice { + /// LZ4 link data compression. + Lz4, + /// Snappy link data compression. + Snappy, + /// No compression. + None, +} + +/// Configuration Choices for TLS. +#[derive(Clone, strum::EnumString, PartialEq)] +#[strum(ascii_case_insensitive)] +pub(crate) enum TlsChoice { + /// Disable TLS. + Disabled, + /// Verifies that the peer's certificate is trusted. + Verified, + /// Disables verification of the peer's certificate. + Unverified, +} + +/// A Session on the cassandra database +pub(crate) type CassandraSession = Arc; + +/// Construct a session based on the given configuration. +async fn make_session(cfg: CassandraEnvVars) -> anyhow::Result { + let cluster_urls: Vec<&str> = cfg.url.as_str().split(',').collect(); + + let mut sb = SessionBuilder::new().known_nodes(cluster_urls); + + sb = match cfg.compression { + CompressionChoice::Lz4 => sb.compression(Some(Compression::Lz4)), + CompressionChoice::Snappy => sb.compression(Some(Compression::Snappy)), + CompressionChoice::None => sb.compression(None), + }; + + if cfg.tls != TlsChoice::Disabled { + let mut context_builder = SslContextBuilder::new(SslMethod::tls())?; + + if let Some(cert_name) = &cfg.tls_cert { + let certdir = fs::canonicalize(PathBuf::from(cert_name.as_str())).await?; + context_builder.set_certificate_file(certdir.as_path(), SslFiletype::PEM)?; + } + + if cfg.tls == TlsChoice::Verified { + context_builder.set_verify(SslVerifyMode::PEER); + } else { + context_builder.set_verify(SslVerifyMode::NONE); + } + + let ssl_context = context_builder.build(); + + sb = sb.ssl_context(Some(ssl_context)); + } + + // Build and set the Keyspace to use. + let keyspace = format!("{}_V{}", cfg.namespace.as_str(), SCHEMA_VERSION); + sb = sb.use_keyspace(keyspace, false); + + // Set the username and password, if required. + if let Some(username) = cfg.username { + if let Some(password) = cfg.password { + sb = sb.user(username.as_str(), password.as_str()); + } + } + + let session = Box::pin(sb.build()).await?; + + Ok(Arc::new(session)) +} + +/// Initialise the Cassandra Cluster Connections. +pub(crate) async fn init() -> anyhow::Result<()> { + let (persistent, volatile) = Settings::cassandra_db_cfg(); + + let persistent_session = make_session(persistent).await?; + let volatile_session = make_session(volatile).await?; + + create_schema(persistent_session.clone()).await; + create_schema(volatile_session.clone()).await; + + Ok(()) +} diff --git a/catalyst-gateway/bin/src/db/mod.rs b/catalyst-gateway/bin/src/db/mod.rs new file mode 100644 index 00000000000..6fcc83ba975 --- /dev/null +++ b/catalyst-gateway/bin/src/db/mod.rs @@ -0,0 +1,4 @@ +//! Database Interfaces + +pub(crate) mod event; +pub(crate) mod index; diff --git a/catalyst-gateway/bin/src/main.rs b/catalyst-gateway/bin/src/main.rs index 67b3be8a93b..7331e373d34 100644 --- a/catalyst-gateway/bin/src/main.rs +++ b/catalyst-gateway/bin/src/main.rs @@ -4,8 +4,7 @@ use clap::Parser; mod build_info; mod cardano; mod cli; -#[allow(dead_code)] -mod event_db; +mod db; mod logger; mod service; mod settings; diff --git a/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs b/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs index a1a5b031233..342f31dc375 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs @@ -3,7 +3,7 @@ use poem_openapi::{payload::Json, ApiResponse}; use crate::{ - event_db::{ + db::event::{ cardano::chain_state::{BlockHash, DateTime, SlotInfoQueryType, SlotNumber}, error::NotFoundError, EventDB, diff --git a/catalyst-gateway/bin/src/service/api/cardano/mod.rs b/catalyst-gateway/bin/src/service/api/cardano/mod.rs index 23b9de2b45d..3038a15d939 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/mod.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/mod.rs @@ -5,7 +5,7 @@ use poem_openapi::{ }; use crate::{ - event_db::cardano::chain_state::{DateTime, SlotNumber}, + db::event::cardano::chain_state::{DateTime, SlotNumber}, service::{ common::{ objects::cardano::{network::Network, stake_address::StakeAddress}, diff --git a/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs b/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs index 96472b75a1f..1f6462cdd4d 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs @@ -3,7 +3,7 @@ use poem_openapi::{payload::Json, ApiResponse}; use crate::{ - event_db::{cardano::chain_state::SlotNumber, error::NotFoundError, EventDB}, + db::event::{cardano::chain_state::SlotNumber, error::NotFoundError, EventDB}, service::{ common::{ objects::cardano::{ diff --git a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs index 40d60c15d46..422534c1dc7 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs @@ -3,7 +3,7 @@ use poem_openapi::{payload::Json, ApiResponse}; use crate::{ - event_db::{cardano::chain_state::SlotNumber, error::NotFoundError, EventDB}, + db::event::{cardano::chain_state::SlotNumber, error::NotFoundError, EventDB}, service::{ common::{ objects::cardano::{ diff --git a/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs b/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs index 677a8145d90..df897c5a8fe 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs @@ -3,7 +3,7 @@ use poem_openapi::{payload::Json, ApiResponse}; use crate::{ - event_db::{error::NotFoundError, EventDB}, + db::event::{error::NotFoundError, EventDB}, service::common::{ objects::cardano::{network::Network, sync_state::SyncState}, responses::WithErrorResponses, diff --git a/catalyst-gateway/bin/src/service/api/health/inspection_get.rs b/catalyst-gateway/bin/src/service/api/health/inspection_get.rs index 9dc09fef538..56850e491f0 100644 --- a/catalyst-gateway/bin/src/service/api/health/inspection_get.rs +++ b/catalyst-gateway/bin/src/service/api/health/inspection_get.rs @@ -2,7 +2,7 @@ use poem_openapi::{ApiResponse, Enum}; use tracing::debug; -use crate::{event_db::EventDB, logger, service::common::responses::WithErrorResponses}; +use crate::{db::event::EventDB, logger, service::common::responses::WithErrorResponses}; /// `LogLevel` Open API definition. #[derive(Debug, Clone, Copy, Enum)] diff --git a/catalyst-gateway/bin/src/service/api/health/ready_get.rs b/catalyst-gateway/bin/src/service/api/health/ready_get.rs index 50caa8fd9dd..df5db8db55b 100644 --- a/catalyst-gateway/bin/src/service/api/health/ready_get.rs +++ b/catalyst-gateway/bin/src/service/api/health/ready_get.rs @@ -2,7 +2,7 @@ use poem_openapi::ApiResponse; use crate::{ - event_db::{schema_check::MismatchedSchemaError, EventDB}, + db::event::{schema_check::MismatchedSchemaError, EventDB}, service::common::responses::WithErrorResponses, }; diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs index 581b0fe7506..e9a2dd6ac3d 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs @@ -3,7 +3,7 @@ use poem_openapi::{types::Example, Object, Union}; use crate::{ - event_db::cardano::cip36_registration::{Nonce, PaymentAddress, PublicVotingInfo, TxId}, + db::event::cardano::cip36_registration::{Nonce, PaymentAddress, PublicVotingInfo, TxId}, service::{common::objects::cardano::hash::Hash, utilities::to_hex_with_prefix}, }; @@ -73,24 +73,18 @@ impl RegistrationInfo { nonce: Nonce, ) -> Self { let voting_info = match voting_info { - PublicVotingInfo::Direct(voting_key) => { - VotingInfo::Direct(DirectVoter { - voting_key: to_hex_with_prefix(voting_key.bytes()), - }) - }, - PublicVotingInfo::Delegated(delegations) => { - VotingInfo::Delegated(Delegations { - delegations: delegations - .into_iter() - .map(|(voting_key, power)| { - Delegation { - voting_key: to_hex_with_prefix(voting_key.bytes()), - power, - } - }) - .collect(), - }) - }, + PublicVotingInfo::Direct(voting_key) => VotingInfo::Direct(DirectVoter { + voting_key: to_hex_with_prefix(voting_key.bytes()), + }), + PublicVotingInfo::Delegated(delegations) => VotingInfo::Delegated(Delegations { + delegations: delegations + .into_iter() + .map(|(voting_key, power)| Delegation { + voting_key: to_hex_with_prefix(voting_key.bytes()), + power, + }) + .collect(), + }), }; Self { tx_hash: tx_hash.into(), diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs index f4d22157265..f4ea1662211 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs @@ -3,7 +3,7 @@ use poem_openapi::{types::Example, Object}; use crate::{ - event_db::cardano::chain_state::{DateTime, SlotNumber}, + db::event::cardano::chain_state::{DateTime, SlotNumber}, service::common::objects::cardano::hash::Hash, }; diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs index bff2f734896..fda6ee0f7c9 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs @@ -2,7 +2,7 @@ use poem_openapi::{types::Example, Object}; -use crate::event_db::cardano::{chain_state::SlotNumber, utxo::StakeAmount}; +use crate::db::event::cardano::{chain_state::SlotNumber, utxo::StakeAmount}; /// User's cardano stake info. #[derive(Object)] diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs index d736775dc2a..c4b7dde539a 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs @@ -3,7 +3,7 @@ use poem_openapi::{types::Example, Object}; use crate::{ - event_db::cardano::chain_state::{DateTime, SlotNumber}, + db::event::cardano::chain_state::{DateTime, SlotNumber}, service::common::objects::cardano::hash::Hash, }; diff --git a/catalyst-gateway/bin/src/service/common/objects/legacy/event_id.rs b/catalyst-gateway/bin/src/service/common/objects/legacy/event_id.rs index 5f47b9f51a7..ba393ac3fad 100644 --- a/catalyst-gateway/bin/src/service/common/objects/legacy/event_id.rs +++ b/catalyst-gateway/bin/src/service/common/objects/legacy/event_id.rs @@ -13,8 +13,8 @@ impl Example for EventId { } } -impl From for crate::event_db::legacy::types::event::EventId { +impl From for crate::db::event::legacy::types::event::EventId { fn from(event_id: EventId) -> Self { - crate::event_db::legacy::types::event::EventId(event_id.0) + crate::db::event::legacy::types::event::EventId(event_id.0) } } diff --git a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_group_id.rs b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_group_id.rs index 00b9516d522..1a7d1a4b908 100644 --- a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_group_id.rs +++ b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_group_id.rs @@ -19,11 +19,11 @@ impl Example for VoterGroupId { } } -impl TryFrom for VoterGroupId { +impl TryFrom for VoterGroupId { type Error = String; fn try_from( - value: crate::event_db::legacy::types::registration::VoterGroupId, + value: crate::db::event::legacy::types::registration::VoterGroupId, ) -> Result { match value.0.as_str() { "rep" => Ok(Self::Rep), diff --git a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_info.rs b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_info.rs index 1a18140e050..ff6afdc24ac 100644 --- a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_info.rs @@ -54,11 +54,11 @@ impl Example for VoterInfo { } } -impl TryFrom for VoterInfo { +impl TryFrom for VoterInfo { type Error = String; fn try_from( - value: crate::event_db::legacy::types::registration::VoterInfo, + value: crate::db::event::legacy::types::registration::VoterInfo, ) -> Result { Ok(Self { voting_power: value.voting_power, diff --git a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_registration.rs b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_registration.rs index 197436dfd8a..0a60bad3621 100644 --- a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_registration.rs +++ b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_registration.rs @@ -34,11 +34,11 @@ impl Example for VoterRegistration { } } -impl TryFrom for VoterRegistration { +impl TryFrom for VoterRegistration { type Error = String; fn try_from( - value: crate::event_db::legacy::types::registration::Voter, + value: crate::db::event::legacy::types::registration::Voter, ) -> Result { Ok(Self { voter_info: value.info.try_into()?, diff --git a/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs b/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs index 8a55738f95f..9f4d70fc0b2 100644 --- a/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs +++ b/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs @@ -9,7 +9,7 @@ use poem::{http::StatusCode, Endpoint, EndpointExt, Middleware, Request, Result}; -use crate::event_db::EventDB; +use crate::db::event::EventDB; /// A middleware that raises an error with `ServiceUnavailable` and 503 status code /// if a DB schema version mismatch is found the existing `State`. diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 0f003ddf666..76b292327d8 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -3,6 +3,7 @@ use std::{ env, net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, + str::FromStr, sync::OnceLock, time::Duration, }; @@ -17,7 +18,10 @@ use url::Url; use crate::{ build_info::{log_build_info, BUILD_INFO}, - event_db, + db::{ + self, + index::session::{CompressionChoice, TlsChoice}, + }, logger::{self, LogLevel, LOG_LEVEL_DEFAULT}, }; @@ -55,6 +59,18 @@ const MACHINE_UID_DEFAULT: &str = "UID"; const EVENT_DB_URL_DEFAULT: &str = "postgresql://postgres:postgres@localhost/catalyst_events?sslmode=disable"; +/// Default Cassandra DB URL for the Persistent DB. +const CASSANDRA_PERSISTENT_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; + +/// Default Cassandra DB URL for the Persistent DB. +const CASSANDRA_PERSISTENT_DB_NAMESPACE_DEFAULT: &str = "immutable"; + +/// Default Cassandra DB URL for the Persistent DB. +const CASSANDRA_VOLATILE_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; + +/// Default Cassandra DB URL for the Persistent DB. +const CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT: &str = "volatile"; + /// Hash the Public IPv4 and IPv6 address of the machine, and convert to a 128 bit V4 UUID. fn calculate_service_uuid() -> String { let mut hasher = Blake2b::new_keyed(16, "Catalyst-Gateway-Machine-UID".as_bytes()); @@ -124,6 +140,7 @@ pub(crate) struct FollowerSettings { } /// An environment variable read as a string. +#[derive(Clone)] pub(crate) struct StringEnvVar(String); /// An environment variable read as a string. @@ -180,6 +197,31 @@ impl StringEnvVar { } } +/// Configuration for an individual cassandra cluster. +#[derive(Clone)] +pub(crate) struct CassandraEnvVars { + /// The Address/s of the DB. + pub(crate) url: StringEnvVar, + + /// The Namespace of Cassandra DB. + pub(crate) namespace: StringEnvVar, + + /// The UserName to use for the Cassandra DB. + pub(crate) username: Option, + + /// The Password to use for the Cassandra DB.. + pub(crate) password: Option, + + /// Use TLS for the connection? + pub(crate) tls: TlsChoice, + + /// Use TLS for the connection? + pub(crate) tls_cert: Option, + + /// Compression to use. + pub(crate) compression: CompressionChoice, +} + /// All the `EnvVars` used by the service. struct EnvVars { /// The github repo owner @@ -212,6 +254,12 @@ struct EnvVars { /// The Address of the Event DB. event_db_password: Option, + /// The Config of the Persistent Cassandra DB. + cassandra_persistent_db: CassandraEnvVars, + + /// The Config of the Volatile Cassandra DB. + cassandra_volatile_db: CassandraEnvVars, + /// Tick every N seconds until config exists in db #[allow(unused)] check_config_tick: Duration, @@ -223,6 +271,34 @@ struct EnvVars { // NOT development. Secrets however should only be used with the default value in // development +/// Create a config for a cassandra cluster, identified by a default namespace. +fn cassandra_cfg(url: &str, namespace: &str) -> CassandraEnvVars { + let name = namespace.to_uppercase(); + + // We can actually change the namespace, but can't change the name used for env vars. + let namespace = StringEnvVar::new(&format!("CASSANDRA_{name}_NAMESPACE"), namespace); + + let tls = TlsChoice::from_str( + StringEnvVar::new(&format!("CASSANDRA_{name}_TLS"), "Verified").as_str(), + ) + .unwrap_or(TlsChoice::Verified); + + let compression = CompressionChoice::from_str( + StringEnvVar::new(&format!("CASSANDRA_{name}_COMPRESSION"), "Lz4").as_str(), + ) + .unwrap_or(CompressionChoice::Lz4); + + CassandraEnvVars { + url: StringEnvVar::new(&format!("CASSANDRA_{name}_URL"), url), + namespace, + username: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_USERNAME")), + password: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_PASSWORD")), + tls, + tls_cert: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_TLS_CERT")), + compression, + } +} + /// Handle to the mithril sync thread. One for each Network ONLY. static ENV_VARS: Lazy = Lazy::new(|| { // Support env vars in a `.env` file, doesn't need to exist. @@ -255,6 +331,14 @@ static ENV_VARS: Lazy = Lazy::new(|| { event_db_url: StringEnvVar::new("EVENT_DB_URL", EVENT_DB_URL_DEFAULT), event_db_username: StringEnvVar::new_optional("EVENT_DB_USERNAME"), event_db_password: StringEnvVar::new_optional("EVENT_DB_PASSWORD"), + cassandra_persistent_db: cassandra_cfg( + CASSANDRA_PERSISTENT_DB_URL_DEFAULT, + CASSANDRA_PERSISTENT_DB_NAMESPACE_DEFAULT, + ), + cassandra_volatile_db: cassandra_cfg( + CASSANDRA_VOLATILE_DB_URL_DEFAULT, + CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT, + ), check_config_tick, } }); @@ -267,7 +351,7 @@ pub(crate) struct Settings(); impl Settings { /// Initialize the settings data. - pub(crate) fn init(settings: ServiceSettings) -> anyhow::Result<()> { + pub(crate) async fn init(settings: ServiceSettings) -> anyhow::Result<()> { let log_level = settings.log_level; if SERVICE_SETTINGS.set(settings).is_err() { @@ -280,7 +364,9 @@ impl Settings { log_build_info(); - event_db::establish_connection() + Box::pin(db::index::session::init()).await?; + + db::event::establish_connection() } /// Get the current Event DB settings for this service. @@ -299,6 +385,14 @@ impl Settings { (url, user, pass) } + /// Get the Persistent & Volatile Cassandra DB config for this service. + pub(crate) fn cassandra_db_cfg() -> (CassandraEnvVars, CassandraEnvVars) { + ( + ENV_VARS.cassandra_persistent_db.clone(), + ENV_VARS.cassandra_volatile_db.clone(), + ) + } + /// The API Url prefix pub(crate) fn api_url_prefix() -> &'static str { ENV_VARS.api_url_prefix.as_str() From 98176bd40d34e7ffa5303983bb849f5d166d7667 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 17 Jul 2024 23:50:43 +0700 Subject: [PATCH 05/69] feat(backend): Index DB schema setup seems to work --- .config/dictionaries/project.dic | 2 + catalyst-gateway/bin/src/cli.rs | 2 +- catalyst-gateway/bin/src/db/index/mod.rs | 2 +- catalyst-gateway/bin/src/db/index/schema.rs | 108 +++++- .../bin/src/db/index/schema/namespace.cql | 4 + .../db/index/schema/txi_by_stake_table.cql | 19 + .../schema/txo_assets_by_stake_table.cql | 24 ++ .../db/index/schema/txo_by_stake_table.cql | 23 ++ catalyst-gateway/bin/src/db/index/session.rs | 131 +++++-- .../bin/src/service/api/health/live_get.rs | 4 +- catalyst-gateway/bin/src/settings.rs | 330 +++++++++++++++--- utilities/local-cluster/justfile | 1 + 12 files changed, 568 insertions(+), 82 deletions(-) create mode 100644 catalyst-gateway/bin/src/db/index/schema/namespace.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/txi_by_stake_table.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index 1b0465b5037..e0a73d9e79c 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -217,10 +217,12 @@ TXNZD Typer unawaited unmanaged +Unstaked utxo UTXO utxos Utxos +varint vite vitss vkey diff --git a/catalyst-gateway/bin/src/cli.rs b/catalyst-gateway/bin/src/cli.rs index af96228751a..b25b63e308c 100644 --- a/catalyst-gateway/bin/src/cli.rs +++ b/catalyst-gateway/bin/src/cli.rs @@ -35,7 +35,7 @@ impl Cli { pub(crate) async fn exec(self) -> anyhow::Result<()> { match self { Self::Run(settings) => { - Settings::init(settings).await?; + Settings::init(settings)?; let mut tasks = Vec::new(); diff --git a/catalyst-gateway/bin/src/db/index/mod.rs b/catalyst-gateway/bin/src/db/index/mod.rs index 8d74bb486ef..5f3ad5020a6 100644 --- a/catalyst-gateway/bin/src/db/index/mod.rs +++ b/catalyst-gateway/bin/src/db/index/mod.rs @@ -1,4 +1,4 @@ //! Blockchain Index Database -mod schema; +pub(crate) mod schema; pub(crate) mod session; diff --git a/catalyst-gateway/bin/src/db/index/schema.rs b/catalyst-gateway/bin/src/db/index/schema.rs index 10ccfed29dc..387ffe07580 100644 --- a/catalyst-gateway/bin/src/db/index/schema.rs +++ b/catalyst-gateway/bin/src/db/index/schema.rs @@ -1,13 +1,115 @@ //! Index Schema -use std::time::Duration; +use anyhow::Context; +use handlebars::Handlebars; +use serde_json::json; +use tracing::error; + +use crate::settings::CassandraEnvVars; use super::session::CassandraSession; +/// Keyspace Create (Templated) +const CREATE_NAMESPACE_CQL: &str = include_str!("./schema/namespace.cql"); + +/// TXO by Stake Address Table Schema +const CREATE_TABLE_TXO_BY_STAKE_ADDRESS_CQL: &str = include_str!("./schema/txo_by_stake_table.cql"); +/// TXO Assets by Stake Address Table Schema +const CREATE_TABLE_TXO_ASSETS_BY_STAKE_ADDRESS_CQL: &str = + include_str!("./schema/txo_assets_by_stake_table.cql"); +/// TXI by Stake Address Table Schema +const CREATE_TABLE_TXI_BY_STAKE_ADDRESS_CQL: &str = include_str!("./schema/txi_by_stake_table.cql"); + /// The version of the Schema we are using. +/// Must be incremented if there is a breaking change in any schema tables below. pub(crate) const SCHEMA_VERSION: u64 = 1; +/// Get the namespace for a particular db configuration +pub(crate) fn namespace(cfg: &CassandraEnvVars) -> String { + // Build and set the Keyspace to use. + format!("{}_V{}", cfg.namespace.as_str(), SCHEMA_VERSION) +} + +/// Create the namespace we will use for this session +/// Ok to run this if the namespace already exists. +async fn create_namespace( + session: &mut CassandraSession, cfg: &CassandraEnvVars, +) -> anyhow::Result<()> { + let keyspace = namespace(cfg); + + let mut reg = Handlebars::new(); + // disable default `html_escape` function + // which transforms `<`, `>` symbols to `<`, `>` + reg.register_escape_fn(|s| s.into()); + let query = reg.render_template(CREATE_NAMESPACE_CQL, &json!({"keyspace": keyspace}))?; + + // Create the Keyspace if it doesn't exist already. + let stmt = session.prepare(query).await?; + session.execute(&stmt, ()).await?; + + // Wait for the Schema to be ready. + session.await_schema_agreement().await?; + + // Set the Keyspace to use for this session. + if let Err(error) = session.use_keyspace(keyspace.clone(), false).await { + error!(keyspace = keyspace, error = %error, "Failed to set keyspace"); + } + + Ok(()) +} + +/// Create tables for holding TXO data. +async fn create_txo_tables(session: &mut CassandraSession) -> anyhow::Result<()> { + let stmt = session + .prepare(CREATE_TABLE_TXO_BY_STAKE_ADDRESS_CQL) + .await + .context("Create Table TXO By Stake Address: Prepared")?; + session + .execute(&stmt, ()) + .await + .context("Create Table TXO By Stake Address: Executed")?; + + let stmt = session + .prepare(CREATE_TABLE_TXO_ASSETS_BY_STAKE_ADDRESS_CQL) + .await + .context("Create Table TXO Assets By Stake Address: Prepared")?; + session + .execute(&stmt, ()) + .await + .context("Create Table TXO Assets By Stake Address: Executed")?; + + Ok(()) +} + +/// Create tables for holding volatile TXI data +async fn create_txi_tables(session: &mut CassandraSession) -> anyhow::Result<()> { + let stmt = session + .prepare(CREATE_TABLE_TXI_BY_STAKE_ADDRESS_CQL) + .await + .context("Create Table TXI By Stake Address: Prepared")?; + + session + .execute(&stmt, ()) + .await + .context("Create Table TXI By Stake Address: Executed")?; + + Ok(()) +} + /// Create the Schema on the connected Cassandra DB -pub(crate) async fn create_schema(_session: CassandraSession) { - tokio::time::sleep(Duration::from_secs(2)).await; +pub(crate) async fn create_schema( + session: &mut CassandraSession, cfg: &CassandraEnvVars, persistent: bool, +) -> anyhow::Result<()> { + create_namespace(session, cfg).await?; + + create_txo_tables(session).await?; + + if !persistent { + create_txi_tables(session).await?; + } + + // Wait for the Schema to be ready. + session.await_schema_agreement().await?; + + Ok(()) } diff --git a/catalyst-gateway/bin/src/db/index/schema/namespace.cql b/catalyst-gateway/bin/src/db/index/schema/namespace.cql new file mode 100644 index 00000000000..29654d114e1 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/namespace.cql @@ -0,0 +1,4 @@ +-- Create the namespace in the DB. +-- Template. +CREATE KEYSPACE IF NOT EXISTS {{keyspace}} + With replication = {'class': 'NetworkTopologyStrategy','replication_factor': 1}; \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/schema/txi_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txi_by_stake_table.cql new file mode 100644 index 00000000000..46bb2609659 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/txi_by_stake_table.cql @@ -0,0 +1,19 @@ +-- This table IS NOT used for persistent data. +-- IF data is immutably spent, the required fields are added to the TXO table. +-- This could be ADA or a native asset being spent. +-- This can represent a spend on either immutable data or volatile data, +-- and represents a spend which is potentially subject to rollback. +CREATE TABLE IF NOT EXISTS txi_by_stake ( + stake_address ascii, -- stake address (CIP19 Formatted Text) + slot_no bigint, -- slot number the txi was spent in. + txn smallint, -- transaction number the txi is in. + ofs smallint, -- offset in the transaction the txi is in. + + txo_slot_no bigint, -- slot number the TXO was spent from. + txo_txn bigint, -- Transaction in the slot the txo was spent from. + txo_ofs smallint, -- TXO index in the transaction being spent. + + chain ascii, -- chain the txo was created in (stake_address should be unique, this is for disambiguation only). + + PRIMARY KEY (stake_address, slot_no, txn, ofs) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql new file mode 100644 index 00000000000..ceef4f7fb5c --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql @@ -0,0 +1,24 @@ +-- Transaction Outputs (Native Assets) per stake address. +-- Unstaked ADA address is an empty string. +CREATE TABLE IF NOT EXISTS txo_assets_by_stake ( + -- Priamry Key Fields + stake_address ascii, -- stake address (CIP19 Formatted Text) + slot_no bigint, -- slot number the txo was created in. + txn smallint, -- Which Transaction in the Slot is the TXO. + ofs smallint, -- offset in the txo list of the transaction the txo is in. + policy_hash ascii, -- asset policy hash (Hex Encoded Hash) + policy_id ascii, -- id of the policy + + policy_name text, -- name of the policy (UTF8) + value varint, -- Value of the asset (u64) + + -- stake_address should be unique, this is for the unlikely case we need disambiguation only. + chain ascii, -- chain the txo was created in (stake_address should be unique, this is for disambiguation only). + + -- Transaction Spend details. Only ever present in the persistent data. + spend_slot bigint, -- slot number the txo was spent in. + spend_txn smallint, -- transaction number the txo was spent in. + spend_ofs smallint, -- offset in the transaction inputs the txo was spent in. + + PRIMARY KEY (stake_address, slot_no, txn, ofs, policy_hash, policy_id) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql new file mode 100644 index 00000000000..25a02890f4a --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql @@ -0,0 +1,23 @@ +-- Transaction Outputs (ADA) per stake address. +-- Unstaked ADA address is an empty string. +CREATE TABLE IF NOT EXISTS txo_by_stake ( + -- Priamry Key Fields + stake_address ascii, -- stake address (CIP19 Formatted Text) + slot_no bigint, -- slot number the txo was created in. + txn smallint, -- Which Transaction in the Slot is the TXO. + ofs smallint, -- offset in the txo list of the transaction the txo is in. + + -- stake_address should be unique, this is for the unlikely case we need disambiguation only. + chain ascii, -- chain the txo was created in. + + -- Transaction Output Data + address ascii, -- TXO address (CIP19 Formatted Text). + value varint, -- Lovelace value of the TXO (u64). + + -- Transaction Spend details. Only ever present in the persistent data. + spend_slot bigint, -- slot number the txo was spent in. + spend_txn smallint, -- transaction number the txo was spent in. + spend_ofs smallint, -- offset in the transaction inputs the txo was spent in. + + PRIMARY KEY (stake_address, slot_no, txn, ofs) +); diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs index e045da6be92..1d195928337 100644 --- a/catalyst-gateway/bin/src/db/index/session.rs +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -2,14 +2,19 @@ use crate::settings::{CassandraEnvVars, Settings}; use openssl::ssl::{SslContextBuilder, SslFiletype, SslMethod, SslVerifyMode}; -use scylla::{frame::Compression, Session, SessionBuilder}; -use std::{path::PathBuf, sync::Arc}; +use scylla::{frame::Compression, ExecutionProfile, Session, SessionBuilder}; +use std::{ + path::PathBuf, + sync::{Arc, OnceLock}, + time::Duration, +}; use tokio::fs; +use tracing::{error, info}; -use super::schema::{create_schema, SCHEMA_VERSION}; +use super::schema::create_schema; /// Configuration Choices for compression -#[derive(Clone, strum::EnumString)] +#[derive(Clone, strum::EnumString, strum::Display, strum::VariantNames)] #[strum(ascii_case_insensitive)] pub(crate) enum CompressionChoice { /// LZ4 link data compression. @@ -21,7 +26,7 @@ pub(crate) enum CompressionChoice { } /// Configuration Choices for TLS. -#[derive(Clone, strum::EnumString, PartialEq)] +#[derive(Clone, strum::EnumString, strum::Display, strum::VariantNames, PartialEq)] #[strum(ascii_case_insensitive)] pub(crate) enum TlsChoice { /// Disable TLS. @@ -35,11 +40,36 @@ pub(crate) enum TlsChoice { /// A Session on the cassandra database pub(crate) type CassandraSession = Arc; +/// Create a new execution profile based on the given configuration. +fn make_execution_profile(_cfg: &CassandraEnvVars) -> ExecutionProfile { + ExecutionProfile::builder() + .consistency(scylla::statement::Consistency::LocalQuorum) + .serial_consistency(Some(scylla::statement::SerialConsistency::LocalSerial)) + .retry_policy(Box::new(scylla::retry_policy::DefaultRetryPolicy::new())) + .load_balancing_policy( + scylla::load_balancing::DefaultPolicyBuilder::new() + .permit_dc_failover(true) + .build(), + ) + .speculative_execution_policy(Some(Arc::new( + scylla::speculative_execution::SimpleSpeculativeExecutionPolicy { + max_retry_count: 3, + retry_interval: Duration::from_millis(100), + }, + ))) + .build() +} + /// Construct a session based on the given configuration. -async fn make_session(cfg: CassandraEnvVars) -> anyhow::Result { +async fn make_session(cfg: &CassandraEnvVars) -> anyhow::Result { let cluster_urls: Vec<&str> = cfg.url.as_str().split(',').collect(); - let mut sb = SessionBuilder::new().known_nodes(cluster_urls); + let mut sb = SessionBuilder::new() + .known_nodes(cluster_urls) + .auto_await_schema_agreement(false); + + let profile_handle = make_execution_profile(cfg).into_handle(); + sb = sb.default_execution_profile_handle(profile_handle); sb = match cfg.compression { CompressionChoice::Lz4 => sb.compression(Some(Compression::Lz4)), @@ -66,13 +96,9 @@ async fn make_session(cfg: CassandraEnvVars) -> anyhow::Result sb = sb.ssl_context(Some(ssl_context)); } - // Build and set the Keyspace to use. - let keyspace = format!("{}_V{}", cfg.namespace.as_str(), SCHEMA_VERSION); - sb = sb.use_keyspace(keyspace, false); - // Set the username and password, if required. - if let Some(username) = cfg.username { - if let Some(password) = cfg.password { + if let Some(username) = &cfg.username { + if let Some(password) = &cfg.password { sb = sb.user(username.as_str(), password.as_str()); } } @@ -82,15 +108,80 @@ async fn make_session(cfg: CassandraEnvVars) -> anyhow::Result Ok(Arc::new(session)) } +/// Persistent DB Session. +static PERSISTENT_SESSION: OnceLock = OnceLock::new(); +/// Volatile DB Session. +static VOLATILE_SESSION: OnceLock = OnceLock::new(); + +/// Continuously try and init the DB, if it fails, backoff. +/// +/// Display reasonable logs to help diagnose DB connection issues. +async fn retry_init(cfg: CassandraEnvVars, persistent: bool) { + let mut retry_delay = Duration::from_secs(0); + let db_type = if persistent { "Persistent" } else { "Volatile" }; + + info!(db_type = db_type, "Index DB Session Creation: Started."); + + cfg.log(persistent); + + loop { + tokio::time::sleep(retry_delay).await; + retry_delay = Duration::from_secs(30); // 30 seconds if we every try again. + + info!( + db_type = db_type, + "Attempting to connect to Cassandra DB..." + ); + + // Create a Session to the Cassandra DB. + let session = match make_session(&cfg).await { + Ok(session) => session, + Err(error) => { + let error = format!("{error:?}"); + error!( + db_type = db_type, + error = error, + "Failed to Create Cassandra DB Session" + ); + continue; + }, + }; + + // Set up the Schema for it. + if let Err(error) = create_schema(&mut session.clone(), &cfg, persistent).await { + let error = format!("{error:?}"); + error!( + db_type = db_type, + error = error, + "Failed to Create Cassandra DB Schema" + ); + } + + // Save the session so we can execute queries on the DB + if persistent { + if PERSISTENT_SESSION.set(session).is_err() { + error!("Persistent Session already set. This should not happen."); + }; + } else if VOLATILE_SESSION.set(session).is_err() { + error!("Volatile Session already set. This should not happen."); + }; + + // IF we get here, then everything seems to have worked, so finish init. + break; + } + + info!(db_type = db_type, "Index DB Session Creation: OK."); +} + /// Initialise the Cassandra Cluster Connections. -pub(crate) async fn init() -> anyhow::Result<()> { +pub(crate) fn init() { let (persistent, volatile) = Settings::cassandra_db_cfg(); - let persistent_session = make_session(persistent).await?; - let volatile_session = make_session(volatile).await?; - - create_schema(persistent_session.clone()).await; - create_schema(volatile_session.clone()).await; + let _join_handle = tokio::task::spawn(async move { retry_init(persistent, true).await }); + let _join_handle = tokio::task::spawn(async move { retry_init(volatile, false).await }); +} - Ok(()) +/// Check to see if the Cassandra Indexing DB is ready for use +pub(crate) fn is_ready() -> bool { + PERSISTENT_SESSION.get().is_some() && VOLATILE_SESSION.get().is_some() } diff --git a/catalyst-gateway/bin/src/service/api/health/live_get.rs b/catalyst-gateway/bin/src/service/api/health/live_get.rs index 3acf18bf278..29f43538646 100644 --- a/catalyst-gateway/bin/src/service/api/health/live_get.rs +++ b/catalyst-gateway/bin/src/service/api/health/live_get.rs @@ -4,7 +4,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; use poem_openapi::ApiResponse; -use crate::service::common::responses::WithErrorResponses; +use crate::{db, service::common::responses::WithErrorResponses}; /// Flag to determine if the service has started static IS_LIVE: AtomicBool = AtomicBool::new(true); @@ -17,7 +17,7 @@ pub(crate) fn set_live(flag: bool) { /// Get the started flag #[allow(dead_code)] fn is_live() -> bool { - IS_LIVE.load(Ordering::Acquire) + IS_LIVE.load(Ordering::Acquire) && db::index::session::is_ready() } /// Endpoint responses. diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 76b292327d8..4663382e228 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -1,6 +1,7 @@ //! Command line and environment variable settings for the service use std::{ - env, + env::{self, VarError}, + fmt::{self, Display}, net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, str::FromStr, @@ -13,7 +14,8 @@ use cryptoxide::{blake2b::Blake2b, mac::Mac}; use dotenvy::dotenv; use duration_string::DurationString; use once_cell::sync::Lazy; -use tracing::log::error; +use strum::VariantNames; +use tracing::{error, info}; use url::Url; use crate::{ @@ -63,7 +65,7 @@ const EVENT_DB_URL_DEFAULT: &str = const CASSANDRA_PERSISTENT_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; /// Default Cassandra DB URL for the Persistent DB. -const CASSANDRA_PERSISTENT_DB_NAMESPACE_DEFAULT: &str = "immutable"; +const CASSANDRA_PERSISTENT_DB_NAMESPACE_DEFAULT: &str = "persistent"; /// Default Cassandra DB URL for the Persistent DB. const CASSANDRA_VOLATILE_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; @@ -141,7 +143,52 @@ pub(crate) struct FollowerSettings { /// An environment variable read as a string. #[derive(Clone)] -pub(crate) struct StringEnvVar(String); +pub(crate) struct StringEnvVar { + /// Value of the env var. + value: String, + /// Whether the env var is displayed redacted or not. + redacted: bool, +} + +/// Ergonomic way of specifying if a env var needs to be redacted or not. +enum StringEnvVarParams { + /// The env var is plain and should not be redacted. + Plain(String, Option), + /// The env var is redacted and should be redacted. + Redacted(String, Option), +} + +impl From<&str> for StringEnvVarParams { + fn from(s: &str) -> Self { + StringEnvVarParams::Plain(String::from(s), None) + } +} + +impl From for StringEnvVarParams { + fn from(s: String) -> Self { + StringEnvVarParams::Plain(s, None) + } +} + +impl From<(&str, bool)> for StringEnvVarParams { + fn from((s, r): (&str, bool)) -> Self { + if r { + StringEnvVarParams::Redacted(String::from(s), None) + } else { + StringEnvVarParams::Plain(String::from(s), None) + } + } +} + +impl From<(&str, bool, &str)> for StringEnvVarParams { + fn from((s, r, c): (&str, bool, &str)) -> Self { + if r { + StringEnvVarParams::Redacted(String::from(s), Some(String::from(c))) + } else { + StringEnvVarParams::Plain(String::from(s), Some(String::from(c))) + } + } +} /// An environment variable read as a string. impl StringEnvVar { @@ -167,15 +214,127 @@ impl StringEnvVar { /// let var = StringEnvVar::new("MY_VAR", "default"); /// assert_eq!(var.as_str(), "default"); /// ``` - fn new(var_name: &str, default_value: &str) -> Self { - let value = env::var(var_name).unwrap_or_else(|_| default_value.to_owned()); - Self(value) + fn new(var_name: &str, param: StringEnvVarParams) -> Self { + let (default_value, redacted, choices) = match param { + StringEnvVarParams::Plain(s, c) => (s, false, c), + StringEnvVarParams::Redacted(s, c) => (s, true, c), + }; + + match env::var(var_name) { + Ok(value) => { + if redacted { + info!(env = var_name, value = "Redacted", "Env Var Defined"); + } else { + info!(env = var_name, value = value, "Env Var Defined"); + } + Self { value, redacted } + }, + Err(VarError::NotPresent) => { + if let Some(choices) = choices { + if redacted { + info!( + env = var_name, + default = "Default Redacted", + choices = choices, + "Env Var Defaulted" + ); + } else { + info!( + env = var_name, + default = default_value, + choices = choices, + "Env Var Defaulted" + ); + }; + } else if redacted { + info!( + env = var_name, + default = "Default Redacted", + "Env Var Defined" + ); + } else { + info!(env = var_name, default = default_value, "Env Var Defaulted"); + } + + Self { + value: default_value, + redacted, + } + }, + Err(error) => { + error!( + env = var_name, + default = default_value, + error = ?error, + "Env Var Error" + ); + Self { + value: default_value, + redacted, + } + }, + } } /// New Env Var that is optional. - fn new_optional(var_name: &str) -> Option { - let value = env::var(var_name).ok()?; - Some(Self(value)) + fn new_optional(var_name: &str, redacted: bool) -> Option { + match env::var(var_name) { + Ok(value) => { + if redacted { + info!(env = var_name, value = "Redacted", "Env Var Defined"); + } else { + info!(env = var_name, value = value, "Env Var Defined"); + } + Some(Self { value, redacted }) + }, + Err(VarError::NotPresent) => { + info!(env = var_name, "Env Var Not Set"); + None + }, + Err(error) => { + error!( + env = var_name, + error = ?error, + "Env Var Error" + ); + None + }, + } + } + + /// Convert an Envvar into the required Enum Type. + fn new_as_enum( + var_name: &str, default: T, redacted: bool, + ) -> T + where + ::Err: std::fmt::Display, + { + let mut choices = String::new(); + for name in T::VARIANTS { + if choices.is_empty() { + choices.push('['); + } else { + choices.push(','); + } + choices.push_str(name); + } + choices.push(']'); + + let tls = match T::from_str( + StringEnvVar::new( + var_name, + (default.to_string().as_str(), redacted, choices.as_str()).into(), + ) + .as_str(), + ) { + Ok(var) => var, + Err(error) => { + error!(error=%error, default=%default, choices=choices, "Invalid choice. Using Default."); + default + }, + }; + + tls } /// Get the read env var as a str. @@ -184,7 +343,7 @@ impl StringEnvVar { /// /// * &str - the value pub(crate) fn as_str(&self) -> &str { - &self.0 + &self.value } /// Get the read env var as a str. @@ -193,7 +352,25 @@ impl StringEnvVar { /// /// * &str - the value pub(crate) fn as_string(&self) -> String { - self.0.clone() + self.value.clone() + } +} + +impl fmt::Display for StringEnvVar { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.redacted { + return write!(f, "REDACTED"); + } + write!(f, "{}", self.value) + } +} + +impl fmt::Debug for StringEnvVar { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.redacted { + return write!(f, "REDACTED"); + } + write!(f, "env: {}", self.value) } } @@ -222,6 +399,77 @@ pub(crate) struct CassandraEnvVars { pub(crate) compression: CompressionChoice, } +impl CassandraEnvVars { + /// Create a config for a cassandra cluster, identified by a default namespace. + fn new(url: &str, namespace: &str) -> Self { + let name = namespace.to_uppercase(); + + // We can actually change the namespace, but can't change the name used for env vars. + let namespace = StringEnvVar::new(&format!("CASSANDRA_{name}_NAMESPACE"), namespace.into()); + + let tls = + StringEnvVar::new_as_enum(&format!("CASSANDRA_{name}_TLS"), TlsChoice::Disabled, false); + let compression = StringEnvVar::new_as_enum( + &format!("CASSANDRA_{name}_COMPRESSION"), + CompressionChoice::Lz4, + false, + ); + + /* + let tls = match TlsChoice::from_str( + StringEnvVar::new(&format!("CASSANDRA_{name}_TLS"), "Disabled".into()).as_str(), + ) { + Ok(tls) => tls, + Err(error) => { + error!(error=%error, default=%TlsChoice::Disabled, "Invalid TLS choice. Using Default."); + TlsChoice::Disabled + }, + };*/ + + /* + let compression = CompressionChoice::from_str( + StringEnvVar::new(&format!("CASSANDRA_{name}_COMPRESSION"), "Lz4".into()).as_str(), + ) + .unwrap_or(CompressionChoice::Lz4); + */ + + Self { + url: StringEnvVar::new(&format!("CASSANDRA_{name}_URL"), url.into()), + namespace, + username: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_USERNAME"), false), + password: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_PASSWORD"), true), + tls, + tls_cert: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_TLS_CERT"), false), + compression, + } + } + + /// Log the configuration of this Cassandra DB + pub(crate) fn log(&self, persistent: bool) { + let db_type = if persistent { "Persistent" } else { "Volatile" }; + + let auth = match (&self.username, &self.password) { + (Some(u), Some(_)) => format!("Username: {} Password: REDACTED", u.as_str()), + _ => "No Authentication".to_string(), + }; + + let tls_cert = match &self.tls_cert { + None => "No TLS Certificate Defined".to_string(), + Some(cert) => cert.as_string(), + }; + + info!( + url = self.url.as_str(), + namespace = db::index::schema::namespace(self), + auth = auth, + tls = self.tls.to_string(), + cert = tls_cert, + compression = self.compression.to_string(), + "Cassandra {db_type} DB Configuration" + ); + } +} + /// All the `EnvVars` used by the service. struct EnvVars { /// The github repo owner @@ -271,40 +519,12 @@ struct EnvVars { // NOT development. Secrets however should only be used with the default value in // development -/// Create a config for a cassandra cluster, identified by a default namespace. -fn cassandra_cfg(url: &str, namespace: &str) -> CassandraEnvVars { - let name = namespace.to_uppercase(); - - // We can actually change the namespace, but can't change the name used for env vars. - let namespace = StringEnvVar::new(&format!("CASSANDRA_{name}_NAMESPACE"), namespace); - - let tls = TlsChoice::from_str( - StringEnvVar::new(&format!("CASSANDRA_{name}_TLS"), "Verified").as_str(), - ) - .unwrap_or(TlsChoice::Verified); - - let compression = CompressionChoice::from_str( - StringEnvVar::new(&format!("CASSANDRA_{name}_COMPRESSION"), "Lz4").as_str(), - ) - .unwrap_or(CompressionChoice::Lz4); - - CassandraEnvVars { - url: StringEnvVar::new(&format!("CASSANDRA_{name}_URL"), url), - namespace, - username: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_USERNAME")), - password: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_PASSWORD")), - tls, - tls_cert: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_TLS_CERT")), - compression, - } -} - /// Handle to the mithril sync thread. One for each Network ONLY. static ENV_VARS: Lazy = Lazy::new(|| { // Support env vars in a `.env` file, doesn't need to exist. dotenv().ok(); - let check_interval = StringEnvVar::new("CHECK_CONFIG_TICK", CHECK_CONFIG_TICK_DEFAULT); + let check_interval = StringEnvVar::new("CHECK_CONFIG_TICK", CHECK_CONFIG_TICK_DEFAULT.into()); let check_config_tick = match DurationString::try_from(check_interval.as_string()) { Ok(duration) => duration.into(), Err(error) => { @@ -318,24 +538,24 @@ static ENV_VARS: Lazy = Lazy::new(|| { }; EnvVars { - github_repo_owner: StringEnvVar::new("GITHUB_REPO_OWNER", GITHUB_REPO_OWNER_DEFAULT), - github_repo_name: StringEnvVar::new("GITHUB_REPO_NAME", GITHUB_REPO_NAME_DEFAULT), + github_repo_owner: StringEnvVar::new("GITHUB_REPO_OWNER", GITHUB_REPO_OWNER_DEFAULT.into()), + github_repo_name: StringEnvVar::new("GITHUB_REPO_NAME", GITHUB_REPO_NAME_DEFAULT.into()), github_issue_template: StringEnvVar::new( "GITHUB_ISSUE_TEMPLATE", - GITHUB_ISSUE_TEMPLATE_DEFAULT, + GITHUB_ISSUE_TEMPLATE_DEFAULT.into(), ), - service_id: StringEnvVar::new("SERVICE_ID", &calculate_service_uuid()), - client_id_key: StringEnvVar::new("CLIENT_ID_KEY", CLIENT_ID_KEY_DEFAULT), - api_host_names: StringEnvVar::new("API_HOST_NAMES", API_HOST_NAMES_DEFAULT), - api_url_prefix: StringEnvVar::new("API_URL_PREFIX", API_URL_PREFIX_DEFAULT), - event_db_url: StringEnvVar::new("EVENT_DB_URL", EVENT_DB_URL_DEFAULT), - event_db_username: StringEnvVar::new_optional("EVENT_DB_USERNAME"), - event_db_password: StringEnvVar::new_optional("EVENT_DB_PASSWORD"), - cassandra_persistent_db: cassandra_cfg( + service_id: StringEnvVar::new("SERVICE_ID", calculate_service_uuid().into()), + client_id_key: StringEnvVar::new("CLIENT_ID_KEY", CLIENT_ID_KEY_DEFAULT.into()), + api_host_names: StringEnvVar::new("API_HOST_NAMES", API_HOST_NAMES_DEFAULT.into()), + api_url_prefix: StringEnvVar::new("API_URL_PREFIX", API_URL_PREFIX_DEFAULT.into()), + event_db_url: StringEnvVar::new("EVENT_DB_URL", EVENT_DB_URL_DEFAULT.into()), + event_db_username: StringEnvVar::new_optional("EVENT_DB_USERNAME", false), + event_db_password: StringEnvVar::new_optional("EVENT_DB_PASSWORD", true), + cassandra_persistent_db: CassandraEnvVars::new( CASSANDRA_PERSISTENT_DB_URL_DEFAULT, CASSANDRA_PERSISTENT_DB_NAMESPACE_DEFAULT, ), - cassandra_volatile_db: cassandra_cfg( + cassandra_volatile_db: CassandraEnvVars::new( CASSANDRA_VOLATILE_DB_URL_DEFAULT, CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT, ), @@ -351,7 +571,7 @@ pub(crate) struct Settings(); impl Settings { /// Initialize the settings data. - pub(crate) async fn init(settings: ServiceSettings) -> anyhow::Result<()> { + pub(crate) fn init(settings: ServiceSettings) -> anyhow::Result<()> { let log_level = settings.log_level; if SERVICE_SETTINGS.set(settings).is_err() { @@ -364,7 +584,7 @@ impl Settings { log_build_info(); - Box::pin(db::index::session::init()).await?; + db::index::session::init(); db::event::establish_connection() } diff --git a/utilities/local-cluster/justfile b/utilities/local-cluster/justfile index 22d44ce6b54..5b0c4fdb0b8 100644 --- a/utilities/local-cluster/justfile +++ b/utilities/local-cluster/justfile @@ -60,6 +60,7 @@ get-all-logs: temp-scylla-dev-db: mkdir -p /var/lib/scylla/data /var/lib/scylla/commitlog /var/lib/scylla/hints /var/lib/scylla/view_hints docker run --privileged -p 9042:9042 --name scylla-dev --volume /var/lib/scylla:/var/lib/scylla -d scylladb/scylla --developer-mode=0 --smp 24 + docker logs scylla-dev -f stop-temp-scylla-dev-db: docker stop scylla-dev From 042f5ed5cf148affc2157fb4232e290e29fd4524 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Sun, 21 Jul 2024 18:07:26 +0700 Subject: [PATCH 06/69] WIP --- .vscode/extensions.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.vscode/extensions.json b/.vscode/extensions.json index ad69134d3c4..4355c526b54 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -14,11 +14,11 @@ "tamasfe.even-better-toml", "rust-lang.rust-analyzer", "JScearcy.rust-doc-viewer", - "serayuzgur.crates", "anweiss.cddl-languageserver", "tintinweb.graphviz-interactive-preview", "terrastruct.d2", "bbenoist.vagrant", - "ms-kubernetes-tools.vscode-kubernetes-tools" + "ms-kubernetes-tools.vscode-kubernetes-tools", + "fill-labs.dependi" ] } \ No newline at end of file From b69c30f35338f0797014f40b432872f1e14b8d83 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 24 Jul 2024 15:38:27 +0700 Subject: [PATCH 07/69] fix(rust): Format fixes --- .../c509-certificate/Earthfile | 2 +- catalyst-gateway/Earthfile | 2 +- catalyst-gateway/bin/src/cardano/mod.rs | 48 ++++---- catalyst-gateway/bin/src/cli.rs | 16 ++- .../src/db/event/cardano/chain_state/mod.rs | 72 +++++------ .../event/cardano/cip36_registration/mod.rs | 32 +++-- .../bin/src/db/event/cardano/utxo/mod.rs | 26 ++-- .../db/event/legacy/queries/event/ballot.rs | 52 ++++---- .../event/legacy/queries/event/objective.rs | 9 +- .../db/event/legacy/queries/event/proposal.rs | 10 +- .../db/event/legacy/queries/event/review.rs | 27 ++--- .../db/event/legacy/queries/registration.rs | 35 +++--- .../bin/src/db/event/legacy/queries/search.rs | 37 +++--- .../db/event/legacy/queries/vit_ss/fund.rs | 114 +++++++++--------- catalyst-gateway/bin/src/db/index/schema.rs | 3 +- catalyst-gateway/bin/src/db/index/session.rs | 7 +- catalyst-gateway/bin/src/logger.rs | 3 +- .../cardano/date_time_to_slot_number_get.rs | 29 +++-- .../service/api/cardano/registration_get.rs | 13 +- .../src/service/api/cardano/staked_ada_get.rs | 12 +- .../src/service/api/cardano/sync_state_get.rs | 14 ++- catalyst-gateway/bin/src/service/api/mod.rs | 20 +-- .../objects/cardano/registration_info.rs | 30 +++-- catalyst-gateway/bin/src/settings.rs | 77 +++++------- 24 files changed, 349 insertions(+), 341 deletions(-) diff --git a/catalyst-gateway-crates/c509-certificate/Earthfile b/catalyst-gateway-crates/c509-certificate/Earthfile index 57d036136ce..7a232a662ec 100644 --- a/catalyst-gateway-crates/c509-certificate/Earthfile +++ b/catalyst-gateway-crates/c509-certificate/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.1.15 AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/faster-rust-tool-install AS rust-ci # builder : Set up our target toolchains, and copy our files. builder: diff --git a/catalyst-gateway/Earthfile b/catalyst-gateway/Earthfile index a67645339d1..beaedb41df4 100644 --- a/catalyst-gateway/Earthfile +++ b/catalyst-gateway/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.1.15 AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/faster-rust-tool-install AS rust-ci IMPORT github.com/input-output-hk/catalyst-ci/earthly/mithril_snapshot:v3.1.15 AS mithril-snapshot-ci #cspell: words rustfmt toolsets USERARCH diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index 92ab07de652..dc7978d075e 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -197,29 +197,31 @@ async fn process_blocks( loop { match follower.next().await { - Ok(chain_update) => match chain_update { - ChainUpdate::Block(data) => { - if blocks_tx.send(data).await.is_err() { - error!("Block indexing task not running"); - break; - }; - }, - ChainUpdate::Rollback(data) => { - let block = match data.decode() { - Ok(block) => block, - Err(err) => { - error!("Unable to decode {network:?} block {err} - skip.."); - continue; - }, - }; - - info!( - "Rollback block NUMBER={} SLOT={} HASH={}", - block.number(), - block.slot(), - hex::encode(block.hash()), - ); - }, + Ok(chain_update) => { + match chain_update { + ChainUpdate::Block(data) => { + if blocks_tx.send(data).await.is_err() { + error!("Block indexing task not running"); + break; + }; + }, + ChainUpdate::Rollback(data) => { + let block = match data.decode() { + Ok(block) => block, + Err(err) => { + error!("Unable to decode {network:?} block {err} - skip.."); + continue; + }, + }; + + info!( + "Rollback block NUMBER={} SLOT={} HASH={}", + block.number(), + block.slot(), + hex::encode(block.hash()), + ); + }, + } }, Err(err) => { error!( diff --git a/catalyst-gateway/bin/src/cli.rs b/catalyst-gateway/bin/src/cli.rs index b25b63e308c..9069974cc17 100644 --- a/catalyst-gateway/bin/src/cli.rs +++ b/catalyst-gateway/bin/src/cli.rs @@ -59,15 +59,13 @@ impl Cli { info!("Catalyst Gateway - Shut Down"); - /* - - let followers_fut = start_followers( - event_db.clone(), - settings.follower_settings.check_config_tick, - settings.follower_settings.data_refresh_tick, - machine_id, - );*/ - /*followers_fut.await?;*/ + // let followers_fut = start_followers( + // event_db.clone(), + // settings.follower_settings.check_config_tick, + // settings.follower_settings.data_refresh_tick, + // machine_id, + // ); + // followers_fut.await?; }, Self::Docs(settings) => { let docs = service::get_app_docs(); diff --git a/catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs index 2b5f10bc29a..ddc39f76a5a 100644 --- a/catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs @@ -58,17 +58,23 @@ impl SlotInfoQueryType { /// Get SQL query fn get_sql_query(&self) -> anyhow::Result { let tmpl_fields = match self { - SlotInfoQueryType::Previous => SlotInfoQueryTmplFields { - sign: "<", - ordering: Some("DESC"), + SlotInfoQueryType::Previous => { + SlotInfoQueryTmplFields { + sign: "<", + ordering: Some("DESC"), + } }, - SlotInfoQueryType::Current => SlotInfoQueryTmplFields { - sign: "=", - ordering: None, + SlotInfoQueryType::Current => { + SlotInfoQueryTmplFields { + sign: "=", + ordering: None, + } }, - SlotInfoQueryType::Next => SlotInfoQueryTmplFields { - sign: ">", - ordering: None, + SlotInfoQueryType::Next => { + SlotInfoQueryTmplFields { + sign: ">", + ordering: None, + } }, }; @@ -156,16 +162,13 @@ impl EventDB { let sink = tx .copy_in("COPY tmp_cardano_slot_index (slot_no, network, epoch_no, block_time, block_hash) FROM STDIN BINARY") .await?; - let writer = BinaryCopyInWriter::new( - sink, - &[ - Type::INT8, - Type::TEXT, - Type::INT8, - Type::TIMESTAMPTZ, - Type::BYTEA, - ], - ); + let writer = BinaryCopyInWriter::new(sink, &[ + Type::INT8, + Type::TEXT, + Type::INT8, + Type::TIMESTAMPTZ, + Type::BYTEA, + ]); tokio::pin!(writer); for params in values { @@ -195,10 +198,10 @@ impl EventDB { pub(crate) async fn get_slot_info( date_time: DateTime, network: Network, query_type: SlotInfoQueryType, ) -> anyhow::Result<(SlotNumber, BlockHash, DateTime)> { - let rows = Self::query( - &query_type.get_sql_query()?, - &[&network.to_string(), &date_time], - ) + let rows = Self::query(&query_type.get_sql_query()?, &[ + &network.to_string(), + &date_time, + ]) .await?; let row = rows.first().ok_or(NotFoundError)?; @@ -237,19 +240,16 @@ impl EventDB { // An insert only happens once when there is no update metadata available // All future additions are just updates on ended, slot_no and block_hash - Self::modify( - INSERT_UPDATE_STATE_SQL, - &[ - &i64::try_from(network_id)?, - &last_updated, - &last_updated, - &machine_id, - &slot_no, - &network.to_string(), - &block_hash, - &update, - ], - ) + Self::modify(INSERT_UPDATE_STATE_SQL, &[ + &i64::try_from(network_id)?, + &last_updated, + &last_updated, + &machine_id, + &slot_no, + &network.to_string(), + &block_hash, + &update, + ]) .await?; Ok(()) diff --git a/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs index d2f56d8f79b..669a55654f5 100644 --- a/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs @@ -157,19 +157,16 @@ impl EventDB { let sink = tx .copy_in("COPY tmp_cardano_voter_registration (tx_id, stake_credential, public_voting_key, payment_address, nonce, metadata_cip36, stats, valid) FROM STDIN BINARY") .await?; - let writer = BinaryCopyInWriter::new( - sink, - &[ - Type::BYTEA, - Type::BYTEA, - Type::BYTEA, - Type::BYTEA, - Type::INT8, - Type::BYTEA, - Type::JSONB, - Type::BOOL, - ], - ); + let writer = BinaryCopyInWriter::new(sink, &[ + Type::BYTEA, + Type::BYTEA, + Type::BYTEA, + Type::BYTEA, + Type::INT8, + Type::BYTEA, + Type::JSONB, + Type::BOOL, + ]); tokio::pin!(writer); for params in values { @@ -205,10 +202,11 @@ impl EventDB { pub(crate) async fn get_registration_info( stake_credential: StakeCredential, network: Network, slot_num: SlotNumber, ) -> anyhow::Result<(TxId, PaymentAddress, PublicVotingInfo, Nonce)> { - let rows = Self::query( - SELECT_VOTER_REGISTRATION_SQL, - &[&stake_credential, &network.to_string(), &slot_num], - ) + let rows = Self::query(SELECT_VOTER_REGISTRATION_SQL, &[ + &stake_credential, + &network.to_string(), + &slot_num, + ]) .await?; let row = rows.first().ok_or(NotFoundError)?; diff --git a/catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs index 79cf50fe76b..4ff39e5d65a 100644 --- a/catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs @@ -145,16 +145,13 @@ impl EventDB { let sink = tx .copy_in("COPY tmp_cardano_utxo (tx_id, index, asset, stake_credential, value) FROM STDIN BINARY") .await?; - let writer = BinaryCopyInWriter::new( - sink, - &[ - Type::BYTEA, - Type::INT4, - Type::JSONB, - Type::BYTEA, - Type::INT8, - ], - ); + let writer = BinaryCopyInWriter::new(sink, &[ + Type::BYTEA, + Type::INT4, + Type::JSONB, + Type::BYTEA, + Type::INT8, + ]); tokio::pin!(writer); for params in values { @@ -276,10 +273,11 @@ impl EventDB { pub(crate) async fn total_utxo_amount( stake_credential: StakeCredential, network: Network, slot_num: SlotNumber, ) -> anyhow::Result<(StakeAmount, SlotNumber)> { - let row = Self::query_one( - SELECT_TOTAL_UTXO_AMOUNT_SQL, - &[&stake_credential, &network.to_string(), &slot_num], - ) + let row = Self::query_one(SELECT_TOTAL_UTXO_AMOUNT_SQL, &[ + &stake_credential, + &network.to_string(), + &slot_num, + ]) .await?; // Aggregate functions as SUM and MAX return NULL if there are no rows, so we need to diff --git a/catalyst-gateway/bin/src/db/event/legacy/queries/event/ballot.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/ballot.rs index a2687d4a320..f88799c8f68 100644 --- a/catalyst-gateway/bin/src/db/event/legacy/queries/event/ballot.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/ballot.rs @@ -53,18 +53,20 @@ impl EventDB { pub(crate) async fn get_ballot( event: EventId, objective: ObjectiveId, proposal: ProposalId, ) -> anyhow::Result { - let rows = Self::query( - Self::BALLOT_VOTE_OPTIONS_QUERY, - &[&event.0, &objective.0, &proposal.0], - ) + let rows = Self::query(Self::BALLOT_VOTE_OPTIONS_QUERY, &[ + &event.0, + &objective.0, + &proposal.0, + ]) .await?; let row = rows.first().ok_or(NotFoundError)?; let choices = row.try_get("objective")?; - let rows = Self::query( - Self::BALLOT_VOTE_PLANS_QUERY, - &[&event.0, &objective.0, &proposal.0], - ) + let rows = Self::query(Self::BALLOT_VOTE_PLANS_QUERY, &[ + &event.0, + &objective.0, + &proposal.0, + ]) .await?; let mut voteplans = Vec::new(); for row in rows { @@ -90,10 +92,10 @@ impl EventDB { pub(crate) async fn get_objective_ballots( &self, event: EventId, objective: ObjectiveId, ) -> anyhow::Result> { - let rows = Self::query( - Self::BALLOTS_VOTE_OPTIONS_PER_OBJECTIVE_QUERY, - &[&event.0, &objective.0], - ) + let rows = Self::query(Self::BALLOTS_VOTE_OPTIONS_PER_OBJECTIVE_QUERY, &[ + &event.0, + &objective.0, + ]) .await?; let mut ballots = Vec::new(); @@ -101,10 +103,11 @@ impl EventDB { let choices = row.try_get("objective")?; let proposal_id = ProposalId(row.try_get("proposal_id")?); - let rows = Self::query( - Self::BALLOT_VOTE_PLANS_QUERY, - &[&event.0, &objective.0, &proposal_id.0], - ) + let rows = Self::query(Self::BALLOT_VOTE_PLANS_QUERY, &[ + &event.0, + &objective.0, + &proposal_id.0, + ]) .await?; let mut voteplans = Vec::new(); for row in rows { @@ -142,10 +145,11 @@ impl EventDB { let proposal_id = ProposalId(row.try_get("proposal_id")?); let objective_id = ObjectiveId(row.try_get("objective_id")?); - let rows = Self::query( - Self::BALLOT_VOTE_PLANS_QUERY, - &[&event.0, &objective_id.0, &proposal_id.0], - ) + let rows = Self::query(Self::BALLOT_VOTE_PLANS_QUERY, &[ + &event.0, + &objective_id.0, + &proposal_id.0, + ]) .await?; let mut voteplans = Vec::new(); for row in rows { @@ -174,9 +178,11 @@ impl EventDB { Ok(ballots .into_iter() - .map(|(objective_id, ballots)| ObjectiveBallots { - objective_id, - ballots, + .map(|(objective_id, ballots)| { + ObjectiveBallots { + objective_id, + ballots, + } }) .collect()) } diff --git a/catalyst-gateway/bin/src/db/event/legacy/queries/event/objective.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/objective.rs index a9d3959b51b..78c1391559a 100644 --- a/catalyst-gateway/bin/src/db/event/legacy/queries/event/objective.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/objective.rs @@ -33,10 +33,11 @@ impl EventDB { pub(crate) async fn get_objectives( event: EventId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = Self::query( - Self::OBJECTIVES_QUERY, - &[&event.0, &limit, &offset.unwrap_or(0)], - ) + let rows = Self::query(Self::OBJECTIVES_QUERY, &[ + &event.0, + &limit, + &offset.unwrap_or(0), + ]) .await?; let mut objectives = Vec::new(); diff --git a/catalyst-gateway/bin/src/db/event/legacy/queries/event/proposal.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/proposal.rs index 52cf189aa65..ec45313ee1a 100644 --- a/catalyst-gateway/bin/src/db/event/legacy/queries/event/proposal.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/proposal.rs @@ -67,10 +67,12 @@ impl EventDB { pub(crate) async fn get_proposals( &self, event: EventId, objective: ObjectiveId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = Self::query( - Self::PROPOSALS_QUERY, - &[&event.0, &objective.0, &limit, &offset.unwrap_or(0)], - ) + let rows = Self::query(Self::PROPOSALS_QUERY, &[ + &event.0, + &objective.0, + &limit, + &offset.unwrap_or(0), + ]) .await?; let mut proposals = Vec::new(); diff --git a/catalyst-gateway/bin/src/db/event/legacy/queries/event/review.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/review.rs index 701eec78060..e53a4cbd6eb 100644 --- a/catalyst-gateway/bin/src/db/event/legacy/queries/event/review.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/review.rs @@ -41,16 +41,13 @@ impl EventDB { event: EventId, objective: ObjectiveId, proposal: ProposalId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = Self::query( - Self::REVIEWS_QUERY, - &[ - &event.0, - &objective.0, - &proposal.0, - &limit, - &offset.unwrap_or(0), - ], - ) + let rows = Self::query(Self::REVIEWS_QUERY, &[ + &event.0, + &objective.0, + &proposal.0, + &limit, + &offset.unwrap_or(0), + ]) .await?; let mut reviews = Vec::new(); @@ -79,10 +76,12 @@ impl EventDB { pub(crate) async fn get_review_types( &self, event: EventId, objective: ObjectiveId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = Self::query( - Self::REVIEW_TYPES_QUERY, - &[&event.0, &objective.0, &limit, &offset.unwrap_or(0)], - ) + let rows = Self::query(Self::REVIEW_TYPES_QUERY, &[ + &event.0, + &objective.0, + &limit, + &offset.unwrap_or(0), + ]) .await?; let mut review_types = Vec::new(); for row in rows { diff --git a/catalyst-gateway/bin/src/db/event/legacy/queries/registration.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/registration.rs index 16167eb571e..cf7ae11a4dc 100644 --- a/catalyst-gateway/bin/src/db/event/legacy/queries/registration.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/registration.rs @@ -87,10 +87,10 @@ impl EventDB { let voting_power = voter.try_get("voting_power")?; let rows = if let Some(event) = event { - Self::query( - Self::TOTAL_BY_EVENT_VOTING_QUERY, - &[&voting_group.0, &event.0], - ) + Self::query(Self::TOTAL_BY_EVENT_VOTING_QUERY, &[ + &voting_group.0, + &event.0, + ]) .await? } else { Self::query(Self::TOTAL_BY_LAST_EVENT_VOTING_QUERY, &[&voting_group.0]).await? @@ -117,10 +117,10 @@ impl EventDB { let rows = if let Some(event) = event { Self::query(Self::VOTER_DELEGATORS_LIST_QUERY, &[&voting_key, &event.0]).await? } else { - Self::query( - Self::VOTER_DELEGATORS_LIST_QUERY, - &[&voting_key, &voter.try_get::<_, i32>("event")?], - ) + Self::query(Self::VOTER_DELEGATORS_LIST_QUERY, &[ + &voting_key, + &voter.try_get::<_, i32>("event")?, + ]) .await? }; @@ -167,19 +167,16 @@ impl EventDB { let delegator_snapshot_info = rows.first().ok_or(NotFoundError)?; let delegation_rows = if let Some(event) = event { - Self::query( - Self::DELEGATIONS_BY_EVENT_QUERY, - &[&stake_public_key, &event.0], - ) + Self::query(Self::DELEGATIONS_BY_EVENT_QUERY, &[ + &stake_public_key, + &event.0, + ]) .await? } else { - Self::query( - Self::DELEGATIONS_BY_EVENT_QUERY, - &[ - &stake_public_key, - &delegator_snapshot_info.try_get::<_, i32>("event")?, - ], - ) + Self::query(Self::DELEGATIONS_BY_EVENT_QUERY, &[ + &stake_public_key, + &delegator_snapshot_info.try_get::<_, i32>("event")?, + ]) .await? }; if delegation_rows.is_empty() { diff --git a/catalyst-gateway/bin/src/db/event/legacy/queries/search.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/search.rs index 22f66852d77..b26c96961e1 100644 --- a/catalyst-gateway/bin/src/db/event/legacy/queries/search.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/search.rs @@ -107,12 +107,13 @@ impl EventDB { async fn search_total( search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = Self::query( - &Self::construct_count_query(&search_query), - &[&limit, &offset.unwrap_or(0)], - ) - .await - .map_err(|_| NotFoundError)?; + let rows: Vec = + Self::query(&Self::construct_count_query(&search_query), &[ + &limit, + &offset.unwrap_or(0), + ]) + .await + .map_err(|_| NotFoundError)?; let row = rows.first().ok_or(NotFoundError)?; Ok(SearchResult { @@ -125,10 +126,10 @@ impl EventDB { async fn search_events( search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = Self::query( - &Self::construct_query(&search_query), - &[&limit, &offset.unwrap_or(0)], - ) + let rows: Vec = Self::query(&Self::construct_query(&search_query), &[ + &limit, + &offset.unwrap_or(0), + ]) .await .map_err(|_| NotFoundError)?; @@ -164,10 +165,10 @@ impl EventDB { async fn search_objectives( &self, search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = Self::query( - &Self::construct_query(&search_query), - &[&limit, &offset.unwrap_or(0)], - ) + let rows: Vec = Self::query(&Self::construct_query(&search_query), &[ + &limit, + &offset.unwrap_or(0), + ]) .await .map_err(|_| NotFoundError)?; @@ -198,10 +199,10 @@ impl EventDB { async fn search_proposals( &self, search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = Self::query( - &Self::construct_query(&search_query), - &[&limit, &offset.unwrap_or(0)], - ) + let rows: Vec = Self::query(&Self::construct_query(&search_query), &[ + &limit, + &offset.unwrap_or(0), + ]) .await .map_err(|_| NotFoundError)?; diff --git a/catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/fund.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/fund.rs index 718c978fdd2..1fac627dc53 100644 --- a/catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/fund.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/fund.rs @@ -276,62 +276,64 @@ impl EventDB { }; let next = match row.try_get::<_, Option>("next_id")? { - Some(id) => Some(FundNextInfo { - id, - fund_name: row.try_get("next_fund_name")?, - stage_dates: FundStageDates { - insight_sharing_start: row - .try_get::<_, Option>("next_insight_sharing_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - proposal_submission_start: row - .try_get::<_, Option>("next_proposal_submission_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - refine_proposals_start: row - .try_get::<_, Option>("next_refine_proposals_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - finalize_proposals_start: row - .try_get::<_, Option>("next_finalize_proposals_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - proposal_assessment_start: row - .try_get::<_, Option>("next_proposal_assessment_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - assessment_qa_start: row - .try_get::<_, Option>("next_assessment_qa_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - snapshot_start: row - .try_get::<_, Option>("next_snapshot_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - voting_start: row - .try_get::<_, Option>("next_voting_start")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - voting_end: row - .try_get::<_, Option>("next_voting_end")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - tallying_end: row - .try_get::<_, Option>("next_tallying_end")? - .unwrap_or_default() - .and_local_timezone(Utc) - .unwrap(), - }, - }), + Some(id) => { + Some(FundNextInfo { + id, + fund_name: row.try_get("next_fund_name")?, + stage_dates: FundStageDates { + insight_sharing_start: row + .try_get::<_, Option>("next_insight_sharing_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + proposal_submission_start: row + .try_get::<_, Option>("next_proposal_submission_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + refine_proposals_start: row + .try_get::<_, Option>("next_refine_proposals_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + finalize_proposals_start: row + .try_get::<_, Option>("next_finalize_proposals_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + proposal_assessment_start: row + .try_get::<_, Option>("next_proposal_assessment_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + assessment_qa_start: row + .try_get::<_, Option>("next_assessment_qa_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + snapshot_start: row + .try_get::<_, Option>("next_snapshot_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + voting_start: row + .try_get::<_, Option>("next_voting_start")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + voting_end: row + .try_get::<_, Option>("next_voting_end")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + tallying_end: row + .try_get::<_, Option>("next_tallying_end")? + .unwrap_or_default() + .and_local_timezone(Utc) + .unwrap(), + }, + }) + }, None => None, }; diff --git a/catalyst-gateway/bin/src/db/index/schema.rs b/catalyst-gateway/bin/src/db/index/schema.rs index 387ffe07580..5497c0e666c 100644 --- a/catalyst-gateway/bin/src/db/index/schema.rs +++ b/catalyst-gateway/bin/src/db/index/schema.rs @@ -5,9 +5,8 @@ use handlebars::Handlebars; use serde_json::json; use tracing::error; -use crate::settings::CassandraEnvVars; - use super::session::CassandraSession; +use crate::settings::CassandraEnvVars; /// Keyspace Create (Templated) const CREATE_NAMESPACE_CQL: &str = include_str!("./schema/namespace.cql"); diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs index 1d195928337..0c4d879e215 100644 --- a/catalyst-gateway/bin/src/db/index/session.rs +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -1,17 +1,18 @@ //! Session creation and storage -use crate::settings::{CassandraEnvVars, Settings}; -use openssl::ssl::{SslContextBuilder, SslFiletype, SslMethod, SslVerifyMode}; -use scylla::{frame::Compression, ExecutionProfile, Session, SessionBuilder}; use std::{ path::PathBuf, sync::{Arc, OnceLock}, time::Duration, }; + +use openssl::ssl::{SslContextBuilder, SslFiletype, SslMethod, SslVerifyMode}; +use scylla::{frame::Compression, ExecutionProfile, Session, SessionBuilder}; use tokio::fs; use tracing::{error, info}; use super::schema::create_schema; +use crate::settings::{CassandraEnvVars, Settings}; /// Configuration Choices for compression #[derive(Clone, strum::EnumString, strum::Display, strum::VariantNames)] diff --git a/catalyst-gateway/bin/src/logger.rs b/catalyst-gateway/bin/src/logger.rs index 2ff9e6ca33a..b02dbe1b15f 100644 --- a/catalyst-gateway/bin/src/logger.rs +++ b/catalyst-gateway/bin/src/logger.rs @@ -3,8 +3,7 @@ use std::sync::OnceLock; use clap::ValueEnum; -use tracing::level_filters::LevelFilter; -use tracing::log::error; +use tracing::{level_filters::LevelFilter, log::error}; use tracing_subscriber::{ fmt::{self, format::FmtSpan, time}, prelude::*, diff --git a/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs b/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs index 342f31dc375..7f7142026f0 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs @@ -50,21 +50,20 @@ pub(crate) async fn endpoint( EventDB::get_slot_info(date_time, network.into(), SlotInfoQueryType::Next) ); - let process_slot_info_result = |slot_info_result: anyhow::Result<( - SlotNumber, - BlockHash, - DateTime, - )>| { - match slot_info_result { - Ok((slot_number, block_hash, block_time)) => Ok(Some(Slot { - slot_number, - block_hash: From::from(block_hash), - block_time, - })), - Err(err) if err.is::() => Ok(None), - Err(err) => Err(err), - } - }; + let process_slot_info_result = + |slot_info_result: anyhow::Result<(SlotNumber, BlockHash, DateTime)>| { + match slot_info_result { + Ok((slot_number, block_hash, block_time)) => { + Ok(Some(Slot { + slot_number, + block_hash: From::from(block_hash), + block_time, + })) + }, + Err(err) if err.is::() => Ok(None), + Err(err) => Err(err), + } + }; let current = match process_slot_info_result(current) { Ok(current) => current, diff --git a/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs b/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs index 1f6462cdd4d..5b9f89cfed0 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs @@ -43,10 +43,15 @@ pub(crate) async fn endpoint( // get the total utxo amount from the database match EventDB::get_registration_info(stake_credential, network.into(), date_time).await { - Ok((tx_id, payment_address, voting_info, nonce)) => Responses::Ok(Json( - RegistrationInfo::new(tx_id, &payment_address, voting_info, nonce), - )) - .into(), + Ok((tx_id, payment_address, voting_info, nonce)) => { + Responses::Ok(Json(RegistrationInfo::new( + tx_id, + &payment_address, + voting_info, + nonce, + ))) + .into() + }, Err(err) if err.is::() => Responses::NotFound.into(), Err(err) => AllResponses::handle_error(&err), } diff --git a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs index 422534c1dc7..61b32381ba3 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs @@ -43,11 +43,13 @@ pub(crate) async fn endpoint( // get the total utxo amount from the database match EventDB::total_utxo_amount(stake_credential, network.into(), date_time).await { - Ok((amount, slot_number)) => Responses::Ok(Json(StakeInfo { - amount, - slot_number, - })) - .into(), + Ok((amount, slot_number)) => { + Responses::Ok(Json(StakeInfo { + amount, + slot_number, + })) + .into() + }, Err(err) if err.is::() => Responses::NotFound.into(), Err(err) => AllResponses::handle_error(&err), } diff --git a/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs b/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs index df897c5a8fe..db71beb44f1 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs @@ -30,12 +30,14 @@ pub(crate) async fn endpoint(network: Option) -> AllResponses { let network = network.unwrap_or(Network::Mainnet); match EventDB::last_updated_state(network.into()).await { - Ok((slot_number, block_hash, last_updated)) => Responses::Ok(Json(SyncState { - slot_number, - block_hash: block_hash.into(), - last_updated, - })) - .into(), + Ok((slot_number, block_hash, last_updated)) => { + Responses::Ok(Json(SyncState { + slot_number, + block_hash: block_hash.into(), + last_updated, + })) + .into() + }, Err(err) if err.is::() => Responses::NotFound.into(), Err(err) => AllResponses::handle_error(&err), } diff --git a/catalyst-gateway/bin/src/service/api/mod.rs b/catalyst-gateway/bin/src/service/api/mod.rs index a3ce6555162..6f680854d13 100644 --- a/catalyst-gateway/bin/src/service/api/mod.rs +++ b/catalyst-gateway/bin/src/service/api/mod.rs @@ -99,14 +99,18 @@ pub(crate) fn mk_api() -> OpenApiService<(HealthApi, CardanoApi, LegacyApi), ()> for (name, ip) in &network_interfaces { if *name == "en0" { let (address, desc) = match ip { - IpAddr::V4(_) => ( - format!("http://{ip}:{port}"), - "Server at local IPv4 address", - ), - IpAddr::V6(_) => ( - format!("http://[{ip}]:{port}"), - "Server at local IPv6 address", - ), + IpAddr::V4(_) => { + ( + format!("http://{ip}:{port}"), + "Server at local IPv4 address", + ) + }, + IpAddr::V6(_) => { + ( + format!("http://[{ip}]:{port}"), + "Server at local IPv6 address", + ) + }, }; service = service.server(ServerObject::new(address).description(desc)); } diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs index e9a2dd6ac3d..45dda1cf752 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs @@ -73,18 +73,24 @@ impl RegistrationInfo { nonce: Nonce, ) -> Self { let voting_info = match voting_info { - PublicVotingInfo::Direct(voting_key) => VotingInfo::Direct(DirectVoter { - voting_key: to_hex_with_prefix(voting_key.bytes()), - }), - PublicVotingInfo::Delegated(delegations) => VotingInfo::Delegated(Delegations { - delegations: delegations - .into_iter() - .map(|(voting_key, power)| Delegation { - voting_key: to_hex_with_prefix(voting_key.bytes()), - power, - }) - .collect(), - }), + PublicVotingInfo::Direct(voting_key) => { + VotingInfo::Direct(DirectVoter { + voting_key: to_hex_with_prefix(voting_key.bytes()), + }) + }, + PublicVotingInfo::Delegated(delegations) => { + VotingInfo::Delegated(Delegations { + delegations: delegations + .into_iter() + .map(|(voting_key, power)| { + Delegation { + voting_key: to_hex_with_prefix(voting_key.bytes()), + power, + } + }) + .collect(), + }) + }, }; Self { tx_hash: tx_hash.into(), diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 4663382e228..86947bd6729 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -25,10 +25,9 @@ use crate::{ index::session::{CompressionChoice, TlsChoice}, }, logger::{self, LogLevel, LOG_LEVEL_DEFAULT}, + service::utilities::net::{get_public_ipv4, get_public_ipv6}, }; -use crate::service::utilities::net::{get_public_ipv4, get_public_ipv6}; - /// Default address to start service on. const ADDRESS_DEFAULT: &str = "0.0.0.0:3030"; @@ -73,7 +72,8 @@ const CASSANDRA_VOLATILE_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; /// Default Cassandra DB URL for the Persistent DB. const CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT: &str = "volatile"; -/// Hash the Public IPv4 and IPv6 address of the machine, and convert to a 128 bit V4 UUID. +/// Hash the Public IPv4 and IPv6 address of the machine, and convert to a 128 bit V4 +/// UUID. fn calculate_service_uuid() -> String { let mut hasher = Blake2b::new_keyed(16, "Catalyst-Gateway-Machine-UID".as_bytes()); @@ -306,9 +306,7 @@ impl StringEnvVar { fn new_as_enum( var_name: &str, default: T, redacted: bool, ) -> T - where - ::Err: std::fmt::Display, - { + where ::Err: std::fmt::Display { let mut choices = String::new(); for name in T::VARIANTS { if choices.is_empty() { @@ -415,23 +413,20 @@ impl CassandraEnvVars { false, ); - /* - let tls = match TlsChoice::from_str( - StringEnvVar::new(&format!("CASSANDRA_{name}_TLS"), "Disabled".into()).as_str(), - ) { - Ok(tls) => tls, - Err(error) => { - error!(error=%error, default=%TlsChoice::Disabled, "Invalid TLS choice. Using Default."); - TlsChoice::Disabled - }, - };*/ - - /* - let compression = CompressionChoice::from_str( - StringEnvVar::new(&format!("CASSANDRA_{name}_COMPRESSION"), "Lz4".into()).as_str(), - ) - .unwrap_or(CompressionChoice::Lz4); - */ + // let tls = match TlsChoice::from_str( + // StringEnvVar::new(&format!("CASSANDRA_{name}_TLS"), "Disabled".into()).as_str(), + // ) { + // Ok(tls) => tls, + // Err(error) => { + // error!(error=%error, default=%TlsChoice::Disabled, "Invalid TLS choice. Using + // Default."); TlsChoice::Disabled + // }, + // }; + + // let compression = CompressionChoice::from_str( + // StringEnvVar::new(&format!("CASSANDRA_{name}_COMPRESSION"), "Lz4".into()).as_str(), + // ) + // .unwrap_or(CompressionChoice::Lz4); Self { url: StringEnvVar::new(&format!("CASSANDRA_{name}_URL"), url.into()), @@ -691,13 +686,10 @@ impl Settings { ENV_VARS.github_repo_name.as_str() ); - match Url::parse_with_params( - &path, - &[ - ("template", ENV_VARS.github_issue_template.as_str()), - ("title", title), - ], - ) { + match Url::parse_with_params(&path, &[ + ("template", ENV_VARS.github_issue_template.as_str()), + ("title", title), + ]) { Ok(url) => Some(url), Err(e) => { error!("Failed to generate github issue url {:?}", e.to_string()); @@ -788,10 +780,9 @@ mod tests { #[test] fn configured_hosts_default() { let configured_hosts = Settings::api_host_names(); - assert_eq!( - configured_hosts, - vec!["https://api.prod.projectcatalyst.io"] - ); + assert_eq!(configured_hosts, vec![ + "https://api.prod.projectcatalyst.io" + ]); } #[test] @@ -800,13 +791,10 @@ mod tests { &SocketAddr::from(([127, 0, 0, 1], 8080)), "http://api.prod.projectcatalyst.io , https://api.dev.projectcatalyst.io:1234", ); - assert_eq!( - configured_hosts, - vec![ - "http://api.prod.projectcatalyst.io", - "https://api.dev.projectcatalyst.io:1234" - ] - ); + assert_eq!(configured_hosts, vec![ + "http://api.prod.projectcatalyst.io", + "https://api.dev.projectcatalyst.io:1234" + ]); } #[test] @@ -815,10 +803,9 @@ mod tests { &SocketAddr::from(([127, 0, 0, 1], 8080)), "not a hostname , https://api.dev.projectcatalyst.io:1234", ); - assert_eq!( - configured_hosts, - vec!["https://api.dev.projectcatalyst.io:1234"] - ); + assert_eq!(configured_hosts, vec![ + "https://api.dev.projectcatalyst.io:1234" + ]); } #[test] From 209716d75a5f1f77aa0c98db6ee3c9cd90906f18 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 24 Jul 2024 16:08:14 +0700 Subject: [PATCH 08/69] fix(rust): Build fixes --- .gitignore | 3 +++ .secret.template | 1 + .../c509-certificate/rust-toolchain.toml | 2 +- catalyst-gateway/bin/src/build_info.rs | 2 +- catalyst-gateway/bin/src/settings.rs | 8 +++----- 5 files changed, 9 insertions(+), 7 deletions(-) create mode 100644 .secret.template diff --git a/.gitignore b/.gitignore index 9da52559b92..70e9baa196f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +## Secrets +**/.secret + ### Linux ### *~ diff --git a/.secret.template b/.secret.template new file mode 100644 index 00000000000..072b7c4f22c --- /dev/null +++ b/.secret.template @@ -0,0 +1 @@ +GITHUB_TOKEN=Make One at https://github.com/settings/tokens only need public repo, read packages permissions diff --git a/catalyst-gateway-crates/c509-certificate/rust-toolchain.toml b/catalyst-gateway-crates/c509-certificate/rust-toolchain.toml index 08feed89d60..ea7ce47679b 100644 --- a/catalyst-gateway-crates/c509-certificate/rust-toolchain.toml +++ b/catalyst-gateway-crates/c509-certificate/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.78" +channel = "1.79" profile = "default" \ No newline at end of file diff --git a/catalyst-gateway/bin/src/build_info.rs b/catalyst-gateway/bin/src/build_info.rs index 51c8c87c391..5cfd1ba2e81 100644 --- a/catalyst-gateway/bin/src/build_info.rs +++ b/catalyst-gateway/bin/src/build_info.rs @@ -48,7 +48,7 @@ pub(crate) fn log_build_info() { if let Some(ref vc) = info.version_control { if let Some(git) = vc.git() { - commit_id = git.commit_short_id.clone(); + commit_id.clone_from(&git.commit_short_id); commit_timestamp = git.commit_timestamp.to_rfc3339(); if let Some(git_branch) = git.branch.clone() { branch = git_branch; diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 86947bd6729..82f9a7fd225 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -381,7 +381,7 @@ pub(crate) struct CassandraEnvVars { /// The Namespace of Cassandra DB. pub(crate) namespace: StringEnvVar, - /// The UserName to use for the Cassandra DB. + /// The `UserName` to use for the Cassandra DB. pub(crate) username: Option, /// The Password to use for the Cassandra DB.. @@ -491,7 +491,7 @@ struct EnvVars { /// The Address of the Event DB. event_db_url: StringEnvVar, - /// The UserName to use for the Event DB. + /// The `UserName` to use for the Event DB. event_db_username: Option, /// The Address of the Event DB. @@ -780,9 +780,7 @@ mod tests { #[test] fn configured_hosts_default() { let configured_hosts = Settings::api_host_names(); - assert_eq!(configured_hosts, vec![ - "https://api.prod.projectcatalyst.io" - ]); + assert_eq!(configured_hosts, Vec::::new()); } #[test] From c1d09ff7453f6e7fd652b40f2bfc4ac16aaaac54 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 24 Jul 2024 19:15:03 +0700 Subject: [PATCH 09/69] fix(rust): Adjust index DB so we can index without querying, and can optimize on first detected spend. --- .vscode/extensions.json | 3 ++- catalyst-gateway/bin/src/db/index/schema.rs | 11 ++++------- .../db/index/schema/txi_by_stake_table.cql | 19 ------------------- .../db/index/schema/txi_by_txn_hash_table.cql | 9 +++++++++ .../schema/txo_assets_by_stake_table.cql | 14 +++++++------- .../db/index/schema/txo_by_stake_table.cql | 16 ++++++++-------- catalyst-gateway/bin/src/db/index/session.rs | 2 +- justfile | 16 ++++++++++++++++ utilities/local-cluster/justfile | 2 +- 9 files changed, 48 insertions(+), 44 deletions(-) delete mode 100644 catalyst-gateway/bin/src/db/index/schema/txi_by_stake_table.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql create mode 100644 justfile diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 4355c526b54..7b455feb129 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -19,6 +19,7 @@ "terrastruct.d2", "bbenoist.vagrant", "ms-kubernetes-tools.vscode-kubernetes-tools", - "fill-labs.dependi" + "fill-labs.dependi", + "lawrencegrant.cql" ] } \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/schema.rs b/catalyst-gateway/bin/src/db/index/schema.rs index 5497c0e666c..bccf768f5e9 100644 --- a/catalyst-gateway/bin/src/db/index/schema.rs +++ b/catalyst-gateway/bin/src/db/index/schema.rs @@ -17,7 +17,7 @@ const CREATE_TABLE_TXO_BY_STAKE_ADDRESS_CQL: &str = include_str!("./schema/txo_b const CREATE_TABLE_TXO_ASSETS_BY_STAKE_ADDRESS_CQL: &str = include_str!("./schema/txo_assets_by_stake_table.cql"); /// TXI by Stake Address Table Schema -const CREATE_TABLE_TXI_BY_STAKE_ADDRESS_CQL: &str = include_str!("./schema/txi_by_stake_table.cql"); +const CREATE_TABLE_TXI_BY_TXN_HASH_CQL: &str = include_str!("./schema/txi_by_txn_hash_table.cql"); /// The version of the Schema we are using. /// Must be incremented if there is a breaking change in any schema tables below. @@ -83,7 +83,7 @@ async fn create_txo_tables(session: &mut CassandraSession) -> anyhow::Result<()> /// Create tables for holding volatile TXI data async fn create_txi_tables(session: &mut CassandraSession) -> anyhow::Result<()> { let stmt = session - .prepare(CREATE_TABLE_TXI_BY_STAKE_ADDRESS_CQL) + .prepare(CREATE_TABLE_TXI_BY_TXN_HASH_CQL) .await .context("Create Table TXI By Stake Address: Prepared")?; @@ -97,16 +97,13 @@ async fn create_txi_tables(session: &mut CassandraSession) -> anyhow::Result<()> /// Create the Schema on the connected Cassandra DB pub(crate) async fn create_schema( - session: &mut CassandraSession, cfg: &CassandraEnvVars, persistent: bool, + session: &mut CassandraSession, cfg: &CassandraEnvVars, ) -> anyhow::Result<()> { create_namespace(session, cfg).await?; create_txo_tables(session).await?; - if !persistent { - create_txi_tables(session).await?; - } - + create_txi_tables(session).await?; // Wait for the Schema to be ready. session.await_schema_agreement().await?; diff --git a/catalyst-gateway/bin/src/db/index/schema/txi_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txi_by_stake_table.cql deleted file mode 100644 index 46bb2609659..00000000000 --- a/catalyst-gateway/bin/src/db/index/schema/txi_by_stake_table.cql +++ /dev/null @@ -1,19 +0,0 @@ --- This table IS NOT used for persistent data. --- IF data is immutably spent, the required fields are added to the TXO table. --- This could be ADA or a native asset being spent. --- This can represent a spend on either immutable data or volatile data, --- and represents a spend which is potentially subject to rollback. -CREATE TABLE IF NOT EXISTS txi_by_stake ( - stake_address ascii, -- stake address (CIP19 Formatted Text) - slot_no bigint, -- slot number the txi was spent in. - txn smallint, -- transaction number the txi is in. - ofs smallint, -- offset in the transaction the txi is in. - - txo_slot_no bigint, -- slot number the TXO was spent from. - txo_txn bigint, -- Transaction in the slot the txo was spent from. - txo_ofs smallint, -- TXO index in the transaction being spent. - - chain ascii, -- chain the txo was created in (stake_address should be unique, this is for disambiguation only). - - PRIMARY KEY (stake_address, slot_no, txn, ofs) -); diff --git a/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql b/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql new file mode 100644 index 00000000000..e7314c0325f --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql @@ -0,0 +1,9 @@ +-- This could be ADA or a native asset being spent. +-- This can represent a spend on either immutable data or volatile data. +CREATE TABLE IF NOT EXISTS txi_by_txn_hash ( + txn_hash blob, -- 32 Bytes Transaction Hash that was spent. + txo_index int, -- Index of the TXO which was spent + slot_no varint, -- slot number when the spend occured. + + PRIMARY KEY (txn_hash, txo_index, slot_no) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql index ceef4f7fb5c..daee794b6a9 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql @@ -5,20 +5,20 @@ CREATE TABLE IF NOT EXISTS txo_assets_by_stake ( stake_address ascii, -- stake address (CIP19 Formatted Text) slot_no bigint, -- slot number the txo was created in. txn smallint, -- Which Transaction in the Slot is the TXO. - ofs smallint, -- offset in the txo list of the transaction the txo is in. + txo_index smallint, -- offset in the txo list of the transaction the txo is in. policy_hash ascii, -- asset policy hash (Hex Encoded Hash) policy_id ascii, -- id of the policy policy_name text, -- name of the policy (UTF8) value varint, -- Value of the asset (u64) - -- stake_address should be unique, this is for the unlikely case we need disambiguation only. - chain ascii, -- chain the txo was created in (stake_address should be unique, this is for disambiguation only). + -- Data needed to correlate a spent TXO. + txn_hash blob, -- 32 byte hash of this transaction. - -- Transaction Spend details. Only ever present in the persistent data. - spend_slot bigint, -- slot number the txo was spent in. - spend_txn smallint, -- transaction number the txo was spent in. - spend_ofs smallint, -- offset in the transaction inputs the txo was spent in. + spent_slot varint, -- Slot this TXO was spent in. + -- This is ONLY calculated/stored + -- when first detected in a query lookup. + -- It serves as an optimization on subsequnt queries. PRIMARY KEY (stake_address, slot_no, txn, ofs, policy_hash, policy_id) ); diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql index 25a02890f4a..05f5e304470 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql @@ -5,19 +5,19 @@ CREATE TABLE IF NOT EXISTS txo_by_stake ( stake_address ascii, -- stake address (CIP19 Formatted Text) slot_no bigint, -- slot number the txo was created in. txn smallint, -- Which Transaction in the Slot is the TXO. - ofs smallint, -- offset in the txo list of the transaction the txo is in. - - -- stake_address should be unique, this is for the unlikely case we need disambiguation only. - chain ascii, -- chain the txo was created in. + txo_index smallint, -- offset in the txo list of the transaction the txo is in. -- Transaction Output Data address ascii, -- TXO address (CIP19 Formatted Text). value varint, -- Lovelace value of the TXO (u64). - -- Transaction Spend details. Only ever present in the persistent data. - spend_slot bigint, -- slot number the txo was spent in. - spend_txn smallint, -- transaction number the txo was spent in. - spend_ofs smallint, -- offset in the transaction inputs the txo was spent in. + -- Data needed to correlate a spent TXO. + txn_hash blob, -- 32 byte hash of this transaction. + + spent_slot varint, -- Slot this TXO was spent in. + -- This is ONLY calculated/stored + -- when first detected in a query lookup. + -- It serves as an optimization on subsequnt queries. PRIMARY KEY (stake_address, slot_no, txn, ofs) ); diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs index 0c4d879e215..eb030ef1302 100644 --- a/catalyst-gateway/bin/src/db/index/session.rs +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -149,7 +149,7 @@ async fn retry_init(cfg: CassandraEnvVars, persistent: bool) { }; // Set up the Schema for it. - if let Err(error) = create_schema(&mut session.clone(), &cfg, persistent).await { + if let Err(error) = create_schema(&mut session.clone(), &cfg).await { let error = format!("{error:?}"); error!( db_type = db_type, diff --git a/justfile b/justfile new file mode 100644 index 00000000000..6bfc8f19ff6 --- /dev/null +++ b/justfile @@ -0,0 +1,16 @@ +# use with https://github.com/casey/just +# + +# cspell: words prereqs, commitlog + +default: + @just --list --unsorted + +# Format the rust code +code_format: + cd catalyst-gateway && cargo +nightly fmtfix + +# Start the development cluster - linux/windows x86_64 +run-cat-gateway: code_format + cd catalyst-gateway && cargo build -r + ./catalyst-gateway/target/release/cat-gateway run diff --git a/utilities/local-cluster/justfile b/utilities/local-cluster/justfile index 5b0c4fdb0b8..1debd8763f2 100644 --- a/utilities/local-cluster/justfile +++ b/utilities/local-cluster/justfile @@ -59,7 +59,7 @@ get-all-logs: # TODO: Get the cluster scylla DB exposed on port 9042 of the cluster. temp-scylla-dev-db: mkdir -p /var/lib/scylla/data /var/lib/scylla/commitlog /var/lib/scylla/hints /var/lib/scylla/view_hints - docker run --privileged -p 9042:9042 --name scylla-dev --volume /var/lib/scylla:/var/lib/scylla -d scylladb/scylla --developer-mode=0 --smp 24 + docker run --privileged -p 9042:9042 --name scylla-dev --volume /var/lib/scylla:/var/lib/scylla -d scylladb/scylla --developer-mode=1 --smp 8 docker logs scylla-dev -f stop-temp-scylla-dev-db: From bd4d73be1ad9887c688c2d3421ec327560e9663b Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 25 Jul 2024 09:22:05 +0700 Subject: [PATCH 10/69] fix(rust): add more docs --- .../bin/src/db/index/schema/txo_assets_by_stake_table.cql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql index daee794b6a9..9e8f17a9601 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql @@ -19,6 +19,8 @@ CREATE TABLE IF NOT EXISTS txo_assets_by_stake ( -- This is ONLY calculated/stored -- when first detected in a query lookup. -- It serves as an optimization on subsequnt queries. + -- It is also only updated when the refenc is the same type + -- ie, an immutable txo can only record an immutable spend. PRIMARY KEY (stake_address, slot_no, txn, ofs, policy_hash, policy_id) ); From c53b198ec6f828f1063bca0c774d2ab0504b5aa6 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 25 Jul 2024 13:28:17 +0700 Subject: [PATCH 11/69] fix(rust): basic new follower integration --- catalyst-gateway/Cargo.toml | 8 +- .../bin/src/cardano/cip36_registration/mod.rs | 11 +- catalyst-gateway/bin/src/cardano/mod.rs | 116 +++++++-------- catalyst-gateway/bin/src/cli.rs | 17 ++- .../src/db/event/cardano/chain_state/mod.rs | 3 + .../event/cardano/cip36_registration/mod.rs | 2 + .../bin/src/db/event/cardano/config/mod.rs | 1 + .../bin/src/db/event/cardano/utxo/mod.rs | 5 + catalyst-gateway/bin/src/db/event/mod.rs | 13 +- .../service/common/objects/cardano/network.rs | 4 - .../bin/src/service/utilities/mod.rs | 7 +- catalyst-gateway/bin/src/settings.rs | 140 +++++++++++++----- 12 files changed, 195 insertions(+), 132 deletions(-) diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index ed42bb2e351..5e14c278676 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -44,12 +44,12 @@ dotenvy = "0.15" local-ip-address = "0.6.1" gethostname = "0.5.0" hex = "0.4.3" -handlebars = "5.1.2" +handlebars = "6.0.0" anyhow = "1.0.71" cddl = "0.9.2" ciborium = "0.2" -pallas = { git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "709acb19c52c6b789279ecc4bc8793b5d8b5abe9", version = "0.25.0" } -cardano-chain-follower = { git = "https://github.com/input-output-hk/hermes.git", version="0.0.1" } +pallas = "0.29.0" +cardano-chain-follower = { git = "https://github.com/input-output-hk/hermes.git", branch = "feat/auto-sync-mithril", version="0.1.0" } stringzilla = "3.8.4" duration-string = "0.4.0" once_cell = "1.19.0" @@ -59,7 +59,7 @@ ed25519-dalek = "2.1.1" scylla = { version = "0.13.1", features = ["ssl", "full-serialization"]} strum = { version = "0.26.3", features = ["derive"] } strum_macros = "0.26.4" -openssl = { version = "0.10.64", features = ["vendored"] } +openssl = { version = "0.10.66", features = ["vendored"] } [workspace.lints.rust] warnings = "deny" diff --git a/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs b/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs index 1c3082b4398..29d7a07b781 100644 --- a/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs +++ b/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs @@ -226,12 +226,11 @@ fn is_valid_rewards_address(rewards_address_prefix: u8, network: Network) -> boo return false; } }, - Network::Testnet => { + _ => { if addr_net != 0 { return false; } }, - _ => (), } // Valid addrs: 0x0?, 0x1?, 0x2?, 0x3?, 0x4?, 0x5?, 0x6?, 0x7?, 0xE?, 0xF?. @@ -477,11 +476,11 @@ fn test_rewards_addr_permutations() { for addr_type in valid_addr_types { let test_addr = addr_type << 4; - assert!(is_valid_rewards_address(test_addr, Network::Testnet)); + assert!(is_valid_rewards_address(test_addr, Network::Preprod)); assert!(!is_valid_rewards_address(test_addr, Network::Mainnet)); let test_addr = addr_type << 4 | 1; - assert!(!is_valid_rewards_address(test_addr, Network::Testnet)); + assert!(!is_valid_rewards_address(test_addr, Network::Preprod)); assert!(is_valid_rewards_address(test_addr, Network::Mainnet)); } @@ -489,11 +488,11 @@ fn test_rewards_addr_permutations() { for addr_type in invalid_addr_types { let test_addr = addr_type << 4; - assert!(!is_valid_rewards_address(test_addr, Network::Testnet)); + assert!(!is_valid_rewards_address(test_addr, Network::Preprod)); assert!(!is_valid_rewards_address(test_addr, Network::Mainnet)); let test_addr = addr_type << 4 | 1; - assert!(!is_valid_rewards_address(test_addr, Network::Testnet)); + assert!(!is_valid_rewards_address(test_addr, Network::Preprod)); assert!(!is_valid_rewards_address(test_addr, Network::Mainnet)); } } diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index dc7978d075e..dc98e3ab756 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -1,87 +1,69 @@ //! Logic for orchestrating followers -use std::{path::PathBuf, time::Duration}; - -/// Handler for follower tasks, allows for control over spawned follower threads -pub type ManageTasks = JoinHandle<()>; - -use anyhow::Context; -use cardano_chain_follower::{ - network_genesis_values, ChainUpdate, Follower, FollowerConfigBuilder, Network, Point, -}; -use pallas::ledger::traverse::{wellknown::GenesisValues, MultiEraBlock, MultiEraTx}; -use tokio::{sync::mpsc, task::JoinHandle, time}; -use tracing::{error, info}; - -use crate::{ - db::event::{ - cardano::{ - chain_state::{IndexedFollowerDataParams, MachineId}, - cip36_registration::IndexedVoterRegistrationParams, - config::FollowerConfig, - utxo::{IndexedTxnInputParams, IndexedTxnOutputParams, IndexedTxnParams}, - }, - error::NotFoundError, - EventDB, - }, - settings::Settings, -}; + +use cardano_chain_follower::{ChainFollower, ChainSyncConfig, Network, ORIGIN_POINT, TIP_POINT}; +use tracing::{error, info, warn}; + +use crate::settings::Settings; pub(crate) mod cip36_registration; pub(crate) mod util; /// Blocks batch length that will trigger the blocks buffer to be written to the database. +#[allow(dead_code)] const MAX_BLOCKS_BATCH_LEN: usize = 1024; -/// Returns a follower configs, waits until they present inside the db -async fn get_follower_config(check_config_tick: u64) -> anyhow::Result> { - let mut interval = time::interval(time::Duration::from_secs(check_config_tick)); - loop { - // tick until config exists - interval.tick().await; +/// Start syncing a particular network +async fn start_sync_for(chain: Network) -> anyhow::Result<()> { + let cfg = ChainSyncConfig::default_for(chain); + info!(chain = cfg.chain.to_string(), "Starting Sync"); - match EventDB::get_follower_config().await { - Ok(configs) => break Ok(configs), - Err(err) if err.is::() => { - error!("No follower config found"); - continue; - }, - Err(err) => break Err(err), - } + if let Err(error) = cfg.run().await { + error!(chain=%chain, error=%error, "Failed to start chain sync task"); + Err(error)?; } + + Ok(()) } /// Start followers as per defined in the config #[allow(unused)] -pub(crate) async fn start_followers( - check_config_tick: u64, data_refresh_tick: u64, -) -> anyhow::Result<()> { - let mut current_config = get_follower_config(check_config_tick).await?; - loop { - // spawn followers and obtain thread handlers for control and future cancellation - let follower_tasks = spawn_followers( - current_config.clone(), - data_refresh_tick, - Settings::service_id().to_string(), - ) - .await?; - - // Followers should continue indexing until config has changed - current_config = loop { - let new_config = get_follower_config(check_config_tick).await?; - if new_config != current_config { - info!("Config has changed! restarting"); - break new_config; +pub(crate) async fn start_followers() -> anyhow::Result<()> { + let cfg = Settings::follower_cfg(); + + cfg.log(); + + start_sync_for(cfg.chain).await?; + + tokio::spawn(async move { + // We can't sync until the local chain data is synced. + // This call will wait until we sync. + + // Initially simple pure follower. + // TODO, break the initial sync follower into multiple followers syncing the chain + // to the index DB in parallel. + info!(chain = %cfg.chain, "Following"); + let mut follower = ChainFollower::new(cfg.chain, ORIGIN_POINT, TIP_POINT).await; + + while let Some(chain_update) = follower.next().await { + match chain_update.kind { + cardano_chain_follower::Kind::ImmutableBlockRollForward => { + warn!("TODO: Immutable Chain roll forward"); + }, + cardano_chain_follower::Kind::Block => { + let block = chain_update.block_data().decode(); + }, + cardano_chain_follower::Kind::Rollback => { + warn!("TODO: Immutable Chain rollback"); + }, } - }; - - // Config has changed, terminate all followers and restart with new config. - info!("Terminating followers"); - for task in follower_tasks { - task.abort(); } - } + }); + + Ok(()) } +const _UNUSED_CODE: &str = r#" + /// Spawn follower threads and return associated handlers async fn spawn_followers( configs: Vec, _data_refresh_tick: u64, machine_id: String, @@ -421,3 +403,5 @@ async fn instantiate_follower( Ok(follower) } + +"#; diff --git a/catalyst-gateway/bin/src/cli.rs b/catalyst-gateway/bin/src/cli.rs index 9069974cc17..24fbbe823d3 100644 --- a/catalyst-gateway/bin/src/cli.rs +++ b/catalyst-gateway/bin/src/cli.rs @@ -5,6 +5,8 @@ use clap::Parser; use tracing::{error, info}; use crate::{ + cardano::start_followers, + db, service::{self, started}, settings::{DocsSettings, ServiceSettings, Settings}, }; @@ -41,6 +43,13 @@ impl Cli { info!("Catalyst Gateway - Starting"); + // Start the DB's + db::index::session::init(); + db::event::establish_connection(); + + // Start the chain indexing follower. + start_followers().await?; + let handle = tokio::spawn(async move { match service::run().await { Ok(()) => info!("Endpoints started ok"), @@ -58,14 +67,6 @@ impl Cli { } info!("Catalyst Gateway - Shut Down"); - - // let followers_fut = start_followers( - // event_db.clone(), - // settings.follower_settings.check_config_tick, - // settings.follower_settings.data_refresh_tick, - // machine_id, - // ); - // followers_fut.await?; }, Self::Docs(settings) => { let docs = service::get_app_docs(); diff --git a/catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs index ddc39f76a5a..ff4294df355 100644 --- a/catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs @@ -102,6 +102,7 @@ pub(crate) struct IndexedFollowerDataParams<'a> { impl<'a> IndexedFollowerDataParams<'a> { /// Creates a [`IndexedFollowerDataParams`] from block data. + #[allow(dead_code)] pub(crate) fn from_block_data( genesis_values: &GenesisValues, network: &'a str, block: &MultiEraBlock<'a>, ) -> Option { @@ -141,6 +142,7 @@ impl<'a> IndexedFollowerDataParams<'a> { impl EventDB { /// Batch writes follower data. + #[allow(dead_code)] pub(crate) async fn index_many_follower_data( values: &[IndexedFollowerDataParams<'_>], ) -> anyhow::Result<()> { @@ -229,6 +231,7 @@ impl EventDB { /// Mark point in time where the last follower finished indexing in order for future /// followers to pick up from this point + #[allow(dead_code)] pub(crate) async fn refresh_last_updated( last_updated: DateTime, slot_no: SlotNumber, block_hash: BlockHash, network: Network, machine_id: &MachineId, diff --git a/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs index 669a55654f5..1c487f31cf2 100644 --- a/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs @@ -63,6 +63,7 @@ pub(crate) struct IndexedVoterRegistrationParams { impl IndexedVoterRegistrationParams { /// Creates voter registration indexing data from block data. + #[allow(dead_code)] pub(crate) fn from_block_data( block: &MultiEraBlock, network: Network, ) -> Option> { @@ -136,6 +137,7 @@ impl IndexedVoterRegistrationParams { impl EventDB { /// Batch writes voter registration data. + #[allow(dead_code)] pub(crate) async fn index_many_voter_registration_data( values: &[IndexedVoterRegistrationParams], ) -> anyhow::Result<()> { diff --git a/catalyst-gateway/bin/src/db/event/cardano/config/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/config/mod.rs index e10b3d834fe..b511989fbfe 100644 --- a/catalyst-gateway/bin/src/db/event/cardano/config/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano/config/mod.rs @@ -54,6 +54,7 @@ const SELECT_CONFIG_SQL: &str = include_str!("select_config.sql"); impl EventDB { /// Config query + #[allow(dead_code)] pub(crate) async fn get_follower_config() -> anyhow::Result> { let id = "cardano"; let id2 = "follower"; diff --git a/catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs b/catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs index 4ff39e5d65a..bf3a9fa5e17 100644 --- a/catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs @@ -43,6 +43,7 @@ pub(crate) struct IndexedTxnOutputParams { impl IndexedTxnOutputParams { /// Creates transaction indexing data from transaction data. + #[allow(dead_code)] pub(crate) fn from_txn_data(tx: &MultiEraTx) -> Vec { tx.outputs() .into_iter() @@ -100,6 +101,7 @@ pub(crate) struct IndexedTxnInputParams { impl IndexedTxnInputParams { /// Creates transaction indexing data from transaction data. + #[allow(dead_code)] pub(crate) fn from_txn_data(tx: &MultiEraTx) -> Vec { tx.inputs() .into_iter() @@ -124,6 +126,7 @@ impl IndexedTxnInputParams { impl EventDB { /// Batch writes transaction output indexing data. + #[allow(dead_code)] pub(crate) async fn index_many_txn_output_data( values: &[IndexedTxnOutputParams], ) -> anyhow::Result<()> { @@ -178,6 +181,7 @@ impl EventDB { } /// Batch writes transaction input indexing data. + #[allow(dead_code)] pub(crate) async fn index_many_txn_input_data( values: &[IndexedTxnInputParams], ) -> anyhow::Result<()> { @@ -226,6 +230,7 @@ impl EventDB { } /// Batch writes transaction indexing data. + #[allow(dead_code)] pub(crate) async fn index_many_txn_data(values: &[IndexedTxnParams<'_>]) -> anyhow::Result<()> { if values.is_empty() { return Ok(()); diff --git a/catalyst-gateway/bin/src/db/event/mod.rs b/catalyst-gateway/bin/src/db/event/mod.rs index 8df16bd83a4..87256ad8706 100644 --- a/catalyst-gateway/bin/src/db/event/mod.rs +++ b/catalyst-gateway/bin/src/db/event/mod.rs @@ -120,6 +120,7 @@ impl EventDB { /// # Returns /// /// `anyhow::Result<()>` + #[allow(dead_code)] pub(crate) async fn modify(stmt: &str, params: &[&(dyn ToSql + Sync)]) -> anyhow::Result<()> { if Self::is_deep_query_enabled() { Self::explain_analyze_commit(stmt, params).await?; @@ -139,6 +140,7 @@ impl EventDB { } /// Prepend `EXPLAIN ANALYZE` to the query, and commit the transaction. + #[allow(dead_code)] async fn explain_analyze_commit( stmt: &str, params: &[&(dyn ToSql + Sync)], ) -> anyhow::Result<()> { @@ -210,10 +212,15 @@ impl EventDB { /// /// The env var "`DATABASE_URL`" can be set directly as an anv var, or in a /// `.env` file. -pub(crate) fn establish_connection() -> anyhow::Result<()> { +pub(crate) fn establish_connection() { let (url, user, pass) = Settings::event_db_settings(); - let mut config = tokio_postgres::config::Config::from_str(url)?; + // This was pre-validated and can't fail, but provide default in the impossible case it + // does. + let mut config = tokio_postgres::config::Config::from_str(url).unwrap_or_else(|_| { + error!(url = url, "Postgres URL Pre Validation has failed."); + tokio_postgres::config::Config::default() + }); if let Some(user) = user { config.user(user); } @@ -228,6 +235,4 @@ pub(crate) fn establish_connection() -> anyhow::Result<()> { if EVENT_DB_POOL.set(Arc::new(pool)).is_err() { error!("Failed to set event db pool. Called Twice?"); } - - Ok(()) } diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/network.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/network.rs index 7ce7cd464fc..c40e4e36deb 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/network.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/network.rs @@ -8,9 +8,6 @@ pub(crate) enum Network { /// Cardano mainnet. #[oai(rename = "mainnet")] Mainnet, - /// Cardano testnet. - #[oai(rename = "testnet")] - Testnet, /// Cardano preprod. #[oai(rename = "preprod")] Preprod, @@ -23,7 +20,6 @@ impl From for cardano_chain_follower::Network { fn from(value: Network) -> Self { match value { Network::Mainnet => Self::Mainnet, - Network::Testnet => Self::Testnet, Network::Preprod => Self::Preprod, Network::Preview => Self::Preview, } diff --git a/catalyst-gateway/bin/src/service/utilities/mod.rs b/catalyst-gateway/bin/src/service/utilities/mod.rs index a4d415d7791..c19d7daccfc 100644 --- a/catalyst-gateway/bin/src/service/utilities/mod.rs +++ b/catalyst-gateway/bin/src/service/utilities/mod.rs @@ -47,10 +47,7 @@ pub(crate) fn check_network( // one, and if not - we return an error. // if the `provided_network` omitted - we return the `testnet` network type if let Some(network) = provided_network { - if !matches!( - network, - Network::Testnet | Network::Preprod | Network::Preview - ) { + if !matches!(network, Network::Preprod | Network::Preview) { return Err(NetworkValidationError::NetworkMismatch( network.to_json_string(), "Testnet".to_string(), @@ -59,7 +56,7 @@ pub(crate) fn check_network( } Ok(network) } else { - Ok(Network::Testnet) + Ok(Network::Preprod) } }, PallasNetwork::Other(x) => Err(NetworkValidationError::UnknownNetwork(x).into()), diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 82f9a7fd225..7f0e88440fe 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -9,6 +9,8 @@ use std::{ time::Duration, }; +use anyhow::anyhow; +use cardano_chain_follower::Network; use clap::Args; use cryptoxide::{blake2b::Blake2b, mac::Mac}; use dotenvy::dotenv; @@ -53,9 +55,6 @@ const API_URL_PREFIX_DEFAULT: &str = "/api"; /// Default `CHECK_CONFIG_TICK` used in development. const CHECK_CONFIG_TICK_DEFAULT: &str = "5s"; -/// Default `MACHINE_UID` used in development -const MACHINE_UID_DEFAULT: &str = "UID"; - /// Default Event DB URL. const EVENT_DB_URL_DEFAULT: &str = "postgresql://postgres:postgres@localhost/catalyst_events?sslmode=disable"; @@ -72,6 +71,12 @@ const CASSANDRA_VOLATILE_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; /// Default Cassandra DB URL for the Persistent DB. const CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT: &str = "volatile"; +/// Default chain to follow. +const CHAIN_FOLLOWER_DEFAULT: Network = Network::Preprod; + +/// Default number of sync tasks (must be in the range 1 to 255 inclusive.) +const CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT: i64 = 16; + /// Hash the Public IPv4 and IPv6 address of the machine, and convert to a 128 bit V4 /// UUID. fn calculate_service_uuid() -> String { @@ -112,10 +117,6 @@ pub(crate) struct ServiceSettings { /// Docs settings. #[clap(flatten)] pub(crate) docs_settings: DocsSettings, - - /// Follower settings. - #[clap(flatten)] - pub(crate) follower_settings: FollowerSettings, } /// Settings specifies `OpenAPI` docs generation. @@ -133,14 +134,6 @@ pub(crate) struct DocsSettings { pub(crate) server_name: Option, } -/// Settings for follower mechanics. -#[derive(Args, Clone)] -pub(crate) struct FollowerSettings { - /// Machine UID - #[clap(long, default_value = MACHINE_UID_DEFAULT, env = "MACHINE_UID")] - pub(crate) machine_uid: String, -} - /// An environment variable read as a string. #[derive(Clone)] pub(crate) struct StringEnvVar { @@ -318,7 +311,7 @@ impl StringEnvVar { } choices.push(']'); - let tls = match T::from_str( + let value = match T::from_str( StringEnvVar::new( var_name, (default.to_string().as_str(), redacted, choices.as_str()).into(), @@ -332,7 +325,39 @@ impl StringEnvVar { }, }; - tls + value + } + + /// Convert an Envvar into an integer in the bounded range. + fn new_as_i64(var_name: &str, default: i64, min: i64, max: i64) -> i64 +where { + let choices = format!("A value in the range {min} to {max} inclusive"); + + let raw_value = StringEnvVar::new( + var_name, + (default.to_string().as_str(), false, choices.as_str()).into(), + ) + .as_string(); + + let value = match raw_value.parse::() { + Ok(value) => { + if value < min { + error!("{var_name} out of range. Range = {min} to {max} inclusive. Clamped to {min}"); + min + } else if value > max { + error!("{var_name} out of range. Range = {min} to {max} inclusive. Clamped to {max}"); + max + } else { + value + } + }, + Err(error) => { + error!(error=%error, default=default, "{var_name} not an integer. Range = {min} to {max} inclusive. Defaulted"); + default + }, + }; + + value } /// Get the read env var as a str. @@ -413,21 +438,6 @@ impl CassandraEnvVars { false, ); - // let tls = match TlsChoice::from_str( - // StringEnvVar::new(&format!("CASSANDRA_{name}_TLS"), "Disabled".into()).as_str(), - // ) { - // Ok(tls) => tls, - // Err(error) => { - // error!(error=%error, default=%TlsChoice::Disabled, "Invalid TLS choice. Using - // Default."); TlsChoice::Disabled - // }, - // }; - - // let compression = CompressionChoice::from_str( - // StringEnvVar::new(&format!("CASSANDRA_{name}_COMPRESSION"), "Lz4".into()).as_str(), - // ) - // .unwrap_or(CompressionChoice::Lz4); - Self { url: StringEnvVar::new(&format!("CASSANDRA_{name}_URL"), url.into()), namespace, @@ -465,6 +475,43 @@ impl CassandraEnvVars { } } +/// Configuration for the chain follower. +#[derive(Clone)] +pub(crate) struct ChainFollowerEnvVars { + /// The Blockchain we sync from. + pub(crate) chain: Network, + + /// Yje maximum number of sync tasks. + pub(crate) sync_tasks: u8, +} + +impl ChainFollowerEnvVars { + /// Create a config for a cassandra cluster, identified by a default namespace. + fn new() -> Self { + let chain = + StringEnvVar::new_as_enum(&format!("CHAIN_NETWORK"), CHAIN_FOLLOWER_DEFAULT, false); + let sync_tasks: u8 = StringEnvVar::new_as_i64( + &format!("CHAIN_FOLLOWER_SYNC_TASKS"), + CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT, + 1, + 255, + ) + .try_into() + .unwrap_or(CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT as u8); + + Self { chain, sync_tasks } + } + + /// Log the configuration of this Chain Follower + pub(crate) fn log(&self) { + info!( + chain = self.chain.to_string(), + sync_tasks = self.sync_tasks, + "Chain Follower Configuration" + ); + } +} + /// All the `EnvVars` used by the service. struct EnvVars { /// The github repo owner @@ -503,6 +550,9 @@ struct EnvVars { /// The Config of the Volatile Cassandra DB. cassandra_volatile_db: CassandraEnvVars, + /// The Chain Follower configuration + chain_follower: ChainFollowerEnvVars, + /// Tick every N seconds until config exists in db #[allow(unused)] check_config_tick: Duration, @@ -554,10 +604,26 @@ static ENV_VARS: Lazy = Lazy::new(|| { CASSANDRA_VOLATILE_DB_URL_DEFAULT, CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT, ), + chain_follower: ChainFollowerEnvVars::new(), check_config_tick, } }); +impl EnvVars { + /// Validate env vars in ways we couldn't when they were first loaded. + pub(crate) fn validate() -> anyhow::Result<()> { + let mut status = Ok(()); + + let url = ENV_VARS.event_db_url.as_str(); + if let Err(error) = tokio_postgres::config::Config::from_str(url) { + error!(error=%error, url=url, "Invalid Postgres DB URL."); + status = Err(anyhow!("Environment Variable Validation Error.")); + } + + status + } +} + /// All Settings/Options for the Service. static SERVICE_SETTINGS: OnceLock = OnceLock::new(); @@ -579,9 +645,8 @@ impl Settings { log_build_info(); - db::index::session::init(); - - db::event::establish_connection() + // Validate any settings we couldn't validate when loaded. + EnvVars::validate() } /// Get the current Event DB settings for this service. @@ -608,6 +673,11 @@ impl Settings { ) } + /// Get the configuration of the chain follower. + pub(crate) fn follower_cfg() -> ChainFollowerEnvVars { + ENV_VARS.chain_follower.clone() + } + /// The API Url prefix pub(crate) fn api_url_prefix() -> &'static str { ENV_VARS.api_url_prefix.as_str() From a9bb395b99f2cf3a58ab40cd4130b24085ac2919 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 25 Jul 2024 21:25:24 +0700 Subject: [PATCH 12/69] fix(rust): wip --- catalyst-gateway/Cargo.toml | 1 + catalyst-gateway/bin/Cargo.toml | 1 + catalyst-gateway/bin/src/db/index/block.rs | 45 +++++++++++++++++++ catalyst-gateway/bin/src/db/index/mod.rs | 1 + .../bin/src/db/index/queries/insert_txo.cql | 12 +++++ .../db/index/schema/txo_by_stake_table.cql | 4 +- catalyst-gateway/bin/src/db/index/session.rs | 10 +++++ 7 files changed, 72 insertions(+), 2 deletions(-) create mode 100644 catalyst-gateway/bin/src/db/index/block.rs create mode 100644 catalyst-gateway/bin/src/db/index/queries/insert_txo.cql diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index 5e14c278676..39f2524c539 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -60,6 +60,7 @@ scylla = { version = "0.13.1", features = ["ssl", "full-serialization"]} strum = { version = "0.26.3", features = ["derive"] } strum_macros = "0.26.4" openssl = { version = "0.10.66", features = ["vendored"] } +num-bigint = "0.4.6" [workspace.lints.rust] warnings = "deny" diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index aeb9ea408c0..3c04049c9b0 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -76,6 +76,7 @@ scylla.workspace = true strum.workspace = true strum_macros.workspace = true openssl.workspace = true +num-bigint.workspace = true [build-dependencies] build-info-build = { workspace = true } diff --git a/catalyst-gateway/bin/src/db/index/block.rs b/catalyst-gateway/bin/src/db/index/block.rs new file mode 100644 index 00000000000..e49cb3ccc28 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block.rs @@ -0,0 +1,45 @@ +//! Index a block + +use cardano_chain_follower::MultiEraBlock; +use scylla::{batch::Batch, SerializeRow}; + +use super::session::session; + + +/// TXO by Stake Address Table Schema +const INSERT_TXO_QUERY: &str = include_str!("./queries/insert_txo.cql"); + +/// Insert TXO Query Parameters +#[derive(SerializeRow)] +struct TxoInsertParams { + stake_address: String, + slot_no: num_bigint::BigInt, + txn: i16, + txo: i16, + address: String, + value: num_bigint::BigInt, + txn_hash: Vec +} + +/// Add all data needed from the block into the indexes. +pub(crate) async fn index_block(block: MultiEraBlock) -> anyhow::Result<()>{ + + // Get the session. This should never fail. + let Some(session) = session(block.immutable()) else { + anyhow::bail!("Failed to get Index DB Session. Can not index block."); + }; + + // Create a batch statement + let mut batch: Batch = Default::default(); + let mut values = Vec::::new(); + + batch.append_statement(INSERT_TXO_QUERY); + + // Prepare all statements in the batch at once + let prepared_batch: Batch = session.prepare_batch(&batch).await?; + + // Run the prepared batch + session.batch(&prepared_batch, values).await?; + + Ok(()) +} \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/mod.rs b/catalyst-gateway/bin/src/db/index/mod.rs index 5f3ad5020a6..28bf6523396 100644 --- a/catalyst-gateway/bin/src/db/index/mod.rs +++ b/catalyst-gateway/bin/src/db/index/mod.rs @@ -2,3 +2,4 @@ pub(crate) mod schema; pub(crate) mod session; +pub(crate) mod block; diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql b/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql new file mode 100644 index 00000000000..9461753e712 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql @@ -0,0 +1,12 @@ +-- Create the TXO Record for a stake address, +-- But nlyif it does not already exist. +INSERT INTO txo_by_stake ( + stake_address, + slot_no, + txn, + txo, + address, + value, + txn_hash,) +VALUES(?, ?, ?, ?, ?, ?, ?) +IF NOT EXISTS; \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql index 05f5e304470..5446a7a2d3b 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql @@ -3,9 +3,9 @@ CREATE TABLE IF NOT EXISTS txo_by_stake ( -- Priamry Key Fields stake_address ascii, -- stake address (CIP19 Formatted Text) - slot_no bigint, -- slot number the txo was created in. + slot_no varint, -- slot number the txo was created in. txn smallint, -- Which Transaction in the Slot is the TXO. - txo_index smallint, -- offset in the txo list of the transaction the txo is in. + txo smallint, -- offset in the txo list of the transaction the txo is in. -- Transaction Output Data address ascii, -- TXO address (CIP19 Formatted Text). diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs index eb030ef1302..a71405a244c 100644 --- a/catalyst-gateway/bin/src/db/index/session.rs +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -186,3 +186,13 @@ pub(crate) fn init() { pub(crate) fn is_ready() -> bool { PERSISTENT_SESSION.get().is_some() && VOLATILE_SESSION.get().is_some() } + + +/// Get the session needed to perform a query. +pub(crate) fn session(persistent: bool) -> Option { + if persistent { + PERSISTENT_SESSION.get().cloned() + } else { + VOLATILE_SESSION.get().cloned() + } +} \ No newline at end of file From d71c06349f29f7e8ae5d8933ebc945ecf98bca8d Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Mon, 29 Jul 2024 13:46:28 +0700 Subject: [PATCH 13/69] fix(ci): Bump rust compiler version to match CI --- catalyst-gateway-crates/c509-certificate/rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/catalyst-gateway-crates/c509-certificate/rust-toolchain.toml b/catalyst-gateway-crates/c509-certificate/rust-toolchain.toml index ea7ce47679b..20a42f2a9f7 100644 --- a/catalyst-gateway-crates/c509-certificate/rust-toolchain.toml +++ b/catalyst-gateway-crates/c509-certificate/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.79" +channel = "1.80" profile = "default" \ No newline at end of file From 7deebc30caf4982af14339c9edd2003e7c379745 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Mon, 29 Jul 2024 18:51:09 +0700 Subject: [PATCH 14/69] ci(backend): Bump rust version to match CI --- catalyst-gateway/rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/catalyst-gateway/rust-toolchain.toml b/catalyst-gateway/rust-toolchain.toml index f175cc34180..3b61003ab09 100644 --- a/catalyst-gateway/rust-toolchain.toml +++ b/catalyst-gateway/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.75.0" +channel = "1.80.0" profile = "default" components = [] targets = ["x86_64-unknown-linux-musl"] \ No newline at end of file From 63e0ac826a2d3b6356acb5d6b15c1cebb29d5795 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Mon, 29 Jul 2024 18:58:41 +0700 Subject: [PATCH 15/69] fix(backend): Fix code format and lints --- catalyst-gateway/bin/src/db/index/mod.rs | 2 +- catalyst-gateway/bin/src/db/index/session.rs | 3 +-- catalyst-gateway/bin/src/settings.rs | 17 +++++++---------- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/catalyst-gateway/bin/src/db/index/mod.rs b/catalyst-gateway/bin/src/db/index/mod.rs index 28bf6523396..5a37ebd0905 100644 --- a/catalyst-gateway/bin/src/db/index/mod.rs +++ b/catalyst-gateway/bin/src/db/index/mod.rs @@ -1,5 +1,5 @@ //! Blockchain Index Database +pub(crate) mod block; pub(crate) mod schema; pub(crate) mod session; -pub(crate) mod block; diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs index a71405a244c..1aae71a3613 100644 --- a/catalyst-gateway/bin/src/db/index/session.rs +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -187,7 +187,6 @@ pub(crate) fn is_ready() -> bool { PERSISTENT_SESSION.get().is_some() && VOLATILE_SESSION.get().is_some() } - /// Get the session needed to perform a query. pub(crate) fn session(persistent: bool) -> Option { if persistent { @@ -195,4 +194,4 @@ pub(crate) fn session(persistent: bool) -> Option { } else { VOLATILE_SESSION.get().cloned() } -} \ No newline at end of file +} diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 7f0e88440fe..40b3b5eb660 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -75,7 +75,7 @@ const CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT: &str = "volatile"; const CHAIN_FOLLOWER_DEFAULT: Network = Network::Preprod; /// Default number of sync tasks (must be in the range 1 to 255 inclusive.) -const CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT: i64 = 16; +const CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT: u8 = 16; /// Hash the Public IPv4 and IPv6 address of the machine, and convert to a 128 bit V4 /// UUID. @@ -339,7 +339,7 @@ where { ) .as_string(); - let value = match raw_value.parse::() { + match raw_value.parse::() { Ok(value) => { if value < min { error!("{var_name} out of range. Range = {min} to {max} inclusive. Clamped to {min}"); @@ -355,9 +355,7 @@ where { error!(error=%error, default=default, "{var_name} not an integer. Range = {min} to {max} inclusive. Defaulted"); default }, - }; - - value + } } /// Get the read env var as a str. @@ -488,16 +486,15 @@ pub(crate) struct ChainFollowerEnvVars { impl ChainFollowerEnvVars { /// Create a config for a cassandra cluster, identified by a default namespace. fn new() -> Self { - let chain = - StringEnvVar::new_as_enum(&format!("CHAIN_NETWORK"), CHAIN_FOLLOWER_DEFAULT, false); + let chain = StringEnvVar::new_as_enum("CHAIN_NETWORK", CHAIN_FOLLOWER_DEFAULT, false); let sync_tasks: u8 = StringEnvVar::new_as_i64( - &format!("CHAIN_FOLLOWER_SYNC_TASKS"), - CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT, + "CHAIN_FOLLOWER_SYNC_TASKS", + CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT.into(), 1, 255, ) .try_into() - .unwrap_or(CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT as u8); + .unwrap_or(CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT); Self { chain, sync_tasks } } From 0463d929a7ceff529217e7f89d77cc60f8472ff0 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Mon, 29 Jul 2024 18:59:10 +0700 Subject: [PATCH 16/69] feat(backend): simple new block indexer just to test the logic works --- catalyst-gateway/bin/src/cardano/mod.rs | 8 +- catalyst-gateway/bin/src/db/index/block.rs | 214 ++++++++++++++++-- .../bin/src/db/index/queries/insert_txi.cql | 8 + .../bin/src/db/index/queries/insert_txo.cql | 2 +- .../src/db/index/queries/insert_txo_asset.cql | 13 ++ .../db/index/schema/txi_by_txn_hash_table.cql | 2 +- .../schema/txo_assets_by_stake_table.cql | 10 +- .../db/index/schema/txo_by_stake_table.cql | 2 +- 8 files changed, 235 insertions(+), 24 deletions(-) create mode 100644 catalyst-gateway/bin/src/db/index/queries/insert_txi.cql create mode 100644 catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index dc98e3ab756..a227cd5d23a 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -3,7 +3,7 @@ use cardano_chain_follower::{ChainFollower, ChainSyncConfig, Network, ORIGIN_POINT, TIP_POINT}; use tracing::{error, info, warn}; -use crate::settings::Settings; +use crate::{db::index::block::index_block, settings::Settings}; pub(crate) mod cip36_registration; pub(crate) mod util; @@ -50,7 +50,11 @@ pub(crate) async fn start_followers() -> anyhow::Result<()> { warn!("TODO: Immutable Chain roll forward"); }, cardano_chain_follower::Kind::Block => { - let block = chain_update.block_data().decode(); + let block = chain_update.block_data(); + if let Err(error) = index_block(block).await { + error!(chain=%cfg.chain, error=%error, "Failed to index block"); + return; + } }, cardano_chain_follower::Kind::Rollback => { warn!("TODO: Immutable Chain rollback"); diff --git a/catalyst-gateway/bin/src/db/index/block.rs b/catalyst-gateway/bin/src/db/index/block.rs index e49cb3ccc28..08557d9d986 100644 --- a/catalyst-gateway/bin/src/db/index/block.rs +++ b/catalyst-gateway/bin/src/db/index/block.rs @@ -2,44 +2,232 @@ use cardano_chain_follower::MultiEraBlock; use scylla::{batch::Batch, SerializeRow}; +use tokio::try_join; +use tracing::{error, warn}; use super::session::session; - -/// TXO by Stake Address Table Schema +/// TXO by Stake Address Indexing query const INSERT_TXO_QUERY: &str = include_str!("./queries/insert_txo.cql"); +/// TXO Asset by Stake Address Indexing Query +const INSERT_TXO_ASSET_QUERY: &str = include_str!("./queries/insert_txo_asset.cql"); +/// TXI by Txn hash Index +const INSERT_TXI_QUERY: &str = include_str!("./queries/insert_txi.cql"); /// Insert TXO Query Parameters #[derive(SerializeRow)] struct TxoInsertParams { - stake_address: String, + /// Stake Address - Binary 28 bytes. 0 bytes = not staked. + stake_address: Vec, + /// Block Slot Number slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. txn: i16, + /// Transaction Output Offset inside the transaction. txo: i16, + /// Actual full TXO Address address: String, + /// Actual TXO Value in lovelace value: num_bigint::BigInt, - txn_hash: Vec + /// Transactions hash. + txn_hash: Vec, } -/// Add all data needed from the block into the indexes. -pub(crate) async fn index_block(block: MultiEraBlock) -> anyhow::Result<()>{ +/// Insert TXO Asset Query Parameters +#[derive(SerializeRow)] +struct TxoAssetInsertParams { + /// Stake Address - Binary 28 bytes. 0 bytes = not staked. + stake_address: Vec, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Policy hash of the asset + policy_id: Vec, + /// Policy name of the asset + policy_name: String, + /// Value of the asset + value: num_bigint::BigInt, + /// Transactions hash. + txn_hash: Vec, +} + +/// Insert TXI Query Parameters +#[derive(SerializeRow)] +struct TxiInsertParams { + /// Spent Transactions Hash + txn_hash: Vec, + /// TXO Index spent. + txo: i16, + /// Block Slot Number when spend occurred. + slot_no: num_bigint::BigInt, +} + +/// Extracts a stake address from a TXO if possible. +/// Returns None if it is not possible. +/// If we want to index, but can not determine a stake key hash, then return an empty Vec. +/// Otherwise return the stake key hash as a vec of 28 bytes. +fn extract_stake_address( + txo: &pallas::ledger::traverse::MultiEraOutput, +) -> Option<(Vec, String)> { + let stake_address = match txo.address() { + Ok(address) => { + let address_string = match address.to_bech32() { + Ok(address) => address, + Err(error) => { + error!(error=%error,"Error converting to bech32: skipping."); + return None; + }, + }; + + match address { + // Byron addresses do not have stake addresses. + pallas::ledger::addresses::Address::Byron(_) => (Vec::::new(), address_string), + pallas::ledger::addresses::Address::Shelley(address) => { + match address.delegation() { + pallas::ledger::addresses::ShelleyDelegationPart::Key(hash) => { + (hash.to_vec(), address_string) + }, + pallas::ledger::addresses::ShelleyDelegationPart::Script(_) => { + warn!("Script Stake address detected, not supported. Not indexing."); + (Vec::::new(), address_string) + }, + pallas::ledger::addresses::ShelleyDelegationPart::Pointer(_) => { + warn!("Pointer Stake address detected, not supported. Not indexing."); + (Vec::::new(), address_string) + }, + pallas::ledger::addresses::ShelleyDelegationPart::Null => { + (Vec::::new(), address_string) + }, + } + }, + pallas::ledger::addresses::Address::Stake(_) => { + // This should NOT appear in a TXO, so report if it does. But don't index it as + // a stake address. + warn!("Unexpected Stake address found in TXO. Refusing to index."); + return None; + }, + } + }, + Err(error) => { + // This should not ever happen. + error!(error=%error, "Failed to get Address from TXO. Skipping TXO."); + return None; + }, + }; + + Some(stake_address) +} +/// Convert a usize to an i16 and saturate at `i16::MAX` +fn usize_to_i16(value: usize) -> i16 { + value.try_into().unwrap_or(i16::MAX) +} + +/// Add all data needed from the block into the indexes. +#[allow(clippy::similar_names)] +pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { // Get the session. This should never fail. let Some(session) = session(block.immutable()) else { anyhow::bail!("Failed to get Index DB Session. Can not index block."); }; - + // Create a batch statement - let mut batch: Batch = Default::default(); - let mut values = Vec::::new(); + let mut txo_batch: Batch = Batch::default(); + let mut txo_values = Vec::::new(); + + let mut txo_asset_batch: Batch = Batch::default(); + let mut txo_asset_values = Vec::::new(); + + let mut txi_batch: Batch = Batch::default(); + let mut txi_values = Vec::::new(); + + let block_data = block.decode(); + let slot_no = block_data.slot(); + + for (txn_index, txs) in block_data.txs().iter().enumerate() { + let txn_hash = txs.hash().to_vec(); + + // Index the TXI's. + for txi in txs.inputs() { + let txn_hash = txi.hash().to_vec(); + let txo: i16 = txi.index().try_into().unwrap_or(i16::MAX); - batch.append_statement(INSERT_TXO_QUERY); + txi_batch.append_statement(INSERT_TXI_QUERY); + txi_values.push(TxiInsertParams { + txn_hash, + txo, + slot_no: slot_no.into(), + }); + } + + // TODO: Index minting. + // let mint = txs.mints().iter() {}; + + // TODO: Index Metadata. + + // TODO: Index Stake address hash to stake address reverse lookups. + + // Index the TXO's. + for (txo_index, txo) in txs.outputs().iter().enumerate() { + let Some((stake_address, address)) = extract_stake_address(txo) else { + continue; + }; + + let value = txo.lovelace_amount(); + + txo_batch.append_statement(INSERT_TXO_QUERY); + txo_values.push(TxoInsertParams { + stake_address: stake_address.clone(), + slot_no: slot_no.into(), + txn: usize_to_i16(txn_index), + txo: usize_to_i16(txo_index), + address, + value: value.into(), + txn_hash: txn_hash.clone(), + }); + + for asset in txo.non_ada_assets() { + let policy_id = asset.policy().to_vec(); + for policy_asset in asset.assets() { + if policy_asset.is_output() { + let policy_name = policy_asset.to_ascii_name().unwrap_or_default(); + let value = policy_asset.any_coin(); + + txo_asset_batch.append_statement(INSERT_TXO_ASSET_QUERY); + txo_asset_values.push(TxoAssetInsertParams { + stake_address: stake_address.clone(), + slot_no: slot_no.into(), + txn: usize_to_i16(txn_index), + txo: usize_to_i16(txo_index), + policy_id: policy_id.clone(), + policy_name, + value: value.into(), + txn_hash: txn_hash.clone(), + }); + } else { + error!("Minting MultiAsset in TXO."); + } + } + } + } + } // Prepare all statements in the batch at once - let prepared_batch: Batch = session.prepare_batch(&batch).await?; + let (prepared_txo_batch, prepared_txo_asset_batch, prepared_txi_batch) = try_join!( + session.prepare_batch(&txo_batch), + session.prepare_batch(&txo_asset_batch), + session.prepare_batch(&txi_batch), + )?; // Run the prepared batch - session.batch(&prepared_batch, values).await?; + let _res = try_join!( + session.batch(&prepared_txo_batch, txo_values), + session.batch(&prepared_txo_asset_batch, txo_asset_values), + session.batch(&prepared_txi_batch, txi_values), + )?; Ok(()) -} \ No newline at end of file +} diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql b/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql new file mode 100644 index 00000000000..1e37abbf4f2 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql @@ -0,0 +1,8 @@ +-- Create the TXI Record for a transaction hash, +-- But only if it does not already exist. +INSERT INTO txi_by_txn_hash ( + txn_hash, + txo, + slot_no) +VALUES(?, ?, ?) +IF NOT EXISTS; \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql b/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql index 9461753e712..b1cccb448af 100644 --- a/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql +++ b/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql @@ -7,6 +7,6 @@ INSERT INTO txo_by_stake ( txo, address, value, - txn_hash,) + txn_hash) VALUES(?, ?, ?, ?, ?, ?, ?) IF NOT EXISTS; \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql b/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql new file mode 100644 index 00000000000..a5a01011856 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql @@ -0,0 +1,13 @@ +-- Create the TXO Record for a stake address, +-- But only if it does not already exist. +INSERT INTO txo_by_stake ( + stake_address, + slot_no, + txn, + txo, + policy_id, + policy_name, + value, + txn_hash) +VALUES(?, ?, ?, ?, ?, ?, ?, ?) +IF NOT EXISTS; \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql b/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql index e7314c0325f..df0e915cdb8 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql @@ -2,7 +2,7 @@ -- This can represent a spend on either immutable data or volatile data. CREATE TABLE IF NOT EXISTS txi_by_txn_hash ( txn_hash blob, -- 32 Bytes Transaction Hash that was spent. - txo_index int, -- Index of the TXO which was spent + txo smallint, -- Index of the TXO which was spent slot_no varint, -- slot number when the spend occured. PRIMARY KEY (txn_hash, txo_index, slot_no) diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql index 9e8f17a9601..d5190a33e20 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql @@ -2,13 +2,11 @@ -- Unstaked ADA address is an empty string. CREATE TABLE IF NOT EXISTS txo_assets_by_stake ( -- Priamry Key Fields - stake_address ascii, -- stake address (CIP19 Formatted Text) + stake_address blob, -- stake address hash (28 bytes stake address hash, zero length if not staked.) slot_no bigint, -- slot number the txo was created in. txn smallint, -- Which Transaction in the Slot is the TXO. - txo_index smallint, -- offset in the txo list of the transaction the txo is in. - policy_hash ascii, -- asset policy hash (Hex Encoded Hash) - policy_id ascii, -- id of the policy - + txo smallint, -- offset in the txo list of the transaction the txo is in. + policy_id blob, -- asset policy hash (id) (28 byte binary hash) policy_name text, -- name of the policy (UTF8) value varint, -- Value of the asset (u64) @@ -22,5 +20,5 @@ CREATE TABLE IF NOT EXISTS txo_assets_by_stake ( -- It is also only updated when the refenc is the same type -- ie, an immutable txo can only record an immutable spend. - PRIMARY KEY (stake_address, slot_no, txn, ofs, policy_hash, policy_id) + PRIMARY KEY (stake_address, slot_no, txn, txo, policy_id, policy_name) ); diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql index 5446a7a2d3b..e216f492f33 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql @@ -2,7 +2,7 @@ -- Unstaked ADA address is an empty string. CREATE TABLE IF NOT EXISTS txo_by_stake ( -- Priamry Key Fields - stake_address ascii, -- stake address (CIP19 Formatted Text) + stake_address blob, -- stake address hash (28 bytes stake address hash, zero length if not staked.) slot_no varint, -- slot number the txo was created in. txn smallint, -- Which Transaction in the Slot is the TXO. txo smallint, -- offset in the txo list of the transaction the txo is in. From 9c5f21ac7eca1ddfa172b4a41cc189afdb435208 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Tue, 30 Jul 2024 12:12:41 +0700 Subject: [PATCH 17/69] feat(gateway): Simple indexing with cassandra seems to work --- .config/dictionaries/project.dic | 3 +- catalyst-gateway/bin/src/cardano/mod.rs | 13 + catalyst-gateway/bin/src/db/index/block.rs | 279 ++++++++++++------ .../bin/src/db/index/queries/insert_txi.cql | 12 +- .../bin/src/db/index/queries/insert_txo.cql | 19 +- .../src/db/index/queries/insert_txo_asset.cql | 23 +- .../db/index/schema/txi_by_txn_hash_table.cql | 4 +- .../schema/txo_assets_by_stake_table.cql | 4 +- .../db/index/schema/txo_by_stake_table.cql | 2 +- 9 files changed, 232 insertions(+), 127 deletions(-) diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index 75a00e07701..07d633702db 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -218,6 +218,8 @@ trailings TXNZD Typer unawaited +unchunk +Unlogged unmanaged Unstaked utxo @@ -248,4 +250,3 @@ xctest xctestrun xcworkspace yoroi -unchunk diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index a227cd5d23a..2fa840b2315 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -44,6 +44,9 @@ pub(crate) async fn start_followers() -> anyhow::Result<()> { info!(chain = %cfg.chain, "Following"); let mut follower = ChainFollower::new(cfg.chain, ORIGIN_POINT, TIP_POINT).await; + let mut blocks: u64 = 0; + let mut hit_tip: bool = false; + while let Some(chain_update) = follower.next().await { match chain_update.kind { cardano_chain_follower::Kind::ImmutableBlockRollForward => { @@ -51,6 +54,16 @@ pub(crate) async fn start_followers() -> anyhow::Result<()> { }, cardano_chain_follower::Kind::Block => { let block = chain_update.block_data(); + if blocks == 0 { + info!("Indexing first block."); + } + blocks += 1; + + if chain_update.tip && !hit_tip { + hit_tip = true; + info!("Hit tip after {blocks} blocks."); + } + if let Err(error) = index_block(block).await { error!(chain=%cfg.chain, error=%error, "Failed to index block"); return; diff --git a/catalyst-gateway/bin/src/db/index/block.rs b/catalyst-gateway/bin/src/db/index/block.rs index 08557d9d986..dc15b0c103a 100644 --- a/catalyst-gateway/bin/src/db/index/block.rs +++ b/catalyst-gateway/bin/src/db/index/block.rs @@ -1,9 +1,11 @@ //! Index a block +use std::sync::Arc; + use cardano_chain_follower::MultiEraBlock; -use scylla::{batch::Batch, SerializeRow}; -use tokio::try_join; -use tracing::{error, warn}; +use scylla::SerializeRow; +use tokio::join; +use tracing::{debug, error, warn}; use super::session::session; @@ -13,6 +15,9 @@ const INSERT_TXO_QUERY: &str = include_str!("./queries/insert_txo.cql"); const INSERT_TXO_ASSET_QUERY: &str = include_str!("./queries/insert_txo_asset.cql"); /// TXI by Txn hash Index const INSERT_TXI_QUERY: &str = include_str!("./queries/insert_txi.cql"); +/// This is used to indicate that there is no stake address, and still meet the +/// requirement for the index primary key to be non empty. +const NO_STAKE_ADDRESS: &[u8] = &[0; 1]; /// Insert TXO Query Parameters #[derive(SerializeRow)] @@ -67,39 +72,40 @@ struct TxiInsertParams { /// Extracts a stake address from a TXO if possible. /// Returns None if it is not possible. -/// If we want to index, but can not determine a stake key hash, then return an empty Vec. -/// Otherwise return the stake key hash as a vec of 28 bytes. +/// If we want to index, but can not determine a stake key hash, then return a Vec with a +/// single 0 byte. This is because the index DB needs data in the primary key, so we +/// use a single byte of 0 to indicate that there is no stake address, and still have a +/// primary key on the table. Otherwise return the stake key hash as a vec of 28 bytes. fn extract_stake_address( txo: &pallas::ledger::traverse::MultiEraOutput, ) -> Option<(Vec, String)> { let stake_address = match txo.address() { Ok(address) => { - let address_string = match address.to_bech32() { - Ok(address) => address, - Err(error) => { - error!(error=%error,"Error converting to bech32: skipping."); + match address { + // Byron addresses do not have stake addresses and are not supported. + pallas::ledger::addresses::Address::Byron(_) => { return None; }, - }; - - match address { - // Byron addresses do not have stake addresses. - pallas::ledger::addresses::Address::Byron(_) => (Vec::::new(), address_string), pallas::ledger::addresses::Address::Shelley(address) => { + let address_string = match address.to_bech32() { + Ok(address) => address, + Err(error) => { + error!(error=%error,"Error converting to bech32: skipping."); + return None; + }, + }; + match address.delegation() { - pallas::ledger::addresses::ShelleyDelegationPart::Key(hash) => { + pallas::ledger::addresses::ShelleyDelegationPart::Script(hash) + | pallas::ledger::addresses::ShelleyDelegationPart::Key(hash) => { (hash.to_vec(), address_string) }, - pallas::ledger::addresses::ShelleyDelegationPart::Script(_) => { - warn!("Script Stake address detected, not supported. Not indexing."); - (Vec::::new(), address_string) - }, pallas::ledger::addresses::ShelleyDelegationPart::Pointer(_) => { - warn!("Pointer Stake address detected, not supported. Not indexing."); - (Vec::::new(), address_string) + warn!("Pointer Stake address detected, not supported. Treat as if there is no stake address."); + (NO_STAKE_ADDRESS.to_vec(), address_string) }, pallas::ledger::addresses::ShelleyDelegationPart::Null => { - (Vec::::new(), address_string) + (NO_STAKE_ADDRESS.to_vec(), address_string) }, } }, @@ -126,6 +132,108 @@ fn usize_to_i16(value: usize) -> i16 { value.try_into().unwrap_or(i16::MAX) } +/// Index the transaction Inputs. +fn index_txi( + session: &Arc, txi_query: &scylla::prepared_statement::PreparedStatement, + txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64, +) -> Vec>> +{ + let mut query_handles: Vec< + tokio::task::JoinHandle>, + > = Vec::new(); + + // Index the TXI's. + for txi in txs.inputs() { + let txn_hash = txi.hash().to_vec(); + let txo: i16 = txi.index().try_into().unwrap_or(i16::MAX); + + let nested_txi_query = txi_query.clone(); + let nested_session = session.clone(); + query_handles.push(tokio::spawn(async move { + nested_session + .execute(&nested_txi_query, TxiInsertParams { + txn_hash, + txo, + slot_no: slot_no.into(), + }) + .await + })); + } + + query_handles +} + +/// Index the transaction Outputs. +fn index_txo( + session: &Arc, txo_query: &scylla::prepared_statement::PreparedStatement, + txo_asset_query: &scylla::prepared_statement::PreparedStatement, + txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64, txn_hash: &[u8], txn_index: i16, +) -> Vec>> +{ + let mut query_handles: Vec< + tokio::task::JoinHandle>, + > = Vec::new(); + + for (txo_index, txo) in txs.outputs().iter().enumerate() { + let Some((stake_address, address)) = extract_stake_address(txo) else { + continue; + }; + + let value = txo.lovelace_amount(); + + let nested_txo_query = txo_query.clone(); + let nested_session = session.clone(); + let nested_txn_hash = txn_hash.to_vec(); + let nested_stake_address = stake_address.clone(); + query_handles.push(tokio::spawn(async move { + nested_session + .execute(&nested_txo_query, TxoInsertParams { + stake_address: nested_stake_address, + slot_no: slot_no.into(), + txn: txn_index, + txo: usize_to_i16(txo_index), + address, + value: value.into(), + txn_hash: nested_txn_hash, + }) + .await + })); + + for asset in txo.non_ada_assets() { + let policy_id = asset.policy().to_vec(); + for policy_asset in asset.assets() { + if policy_asset.is_output() { + let policy_name = policy_asset.to_ascii_name().unwrap_or_default(); + let value = policy_asset.any_coin(); + + let nested_txo_asset_query = txo_asset_query.clone(); + let nested_session = session.clone(); + let nested_txn_hash = txn_hash.to_vec(); + let nested_stake_address = stake_address.clone(); + let nested_policy_id = policy_id.clone(); + query_handles.push(tokio::spawn(async move { + nested_session + .execute(&nested_txo_asset_query, TxoAssetInsertParams { + stake_address: nested_stake_address, + slot_no: slot_no.into(), + txn: txn_index, + txo: usize_to_i16(txo_index), + policy_id: nested_policy_id, + policy_name, + value: value.into(), + txn_hash: nested_txn_hash, + }) + .await + })); + } else { + error!("Minting MultiAsset in TXO."); + } + } + } + } + query_handles +} + /// Add all data needed from the block into the indexes. #[allow(clippy::similar_names)] pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { @@ -134,15 +242,42 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { anyhow::bail!("Failed to get Index DB Session. Can not index block."); }; - // Create a batch statement - let mut txo_batch: Batch = Batch::default(); - let mut txo_values = Vec::::new(); + // As our indexing operations span multiple partitions, they can not be batched. + // So use tokio threads to allow multiple writes to be dispatched simultaneously. + let mut query_handles: Vec< + tokio::task::JoinHandle>, + > = Vec::new(); + + // Pre-prepare our queries. + let (txo_query, txo_asset_query, txi_query) = join!( + session.prepare(INSERT_TXO_QUERY), + session.prepare(INSERT_TXO_ASSET_QUERY), + session.prepare(INSERT_TXI_QUERY), + ); + + if let Err(ref error) = txo_query { + error!(error=%error,"Failed to prepare Insert TXO Query."); + }; + if let Err(ref error) = txo_asset_query { + error!(error=%error,"Failed to prepare Insert TXO Asset Query."); + }; + if let Err(ref error) = txi_query { + error!(error=%error,"Failed to prepare Insert TXI Query."); + }; + + let mut txo_query = txo_query?; + let mut txo_asset_query = txo_asset_query?; + let mut txi_query = txi_query?; - let mut txo_asset_batch: Batch = Batch::default(); - let mut txo_asset_values = Vec::::new(); + // We just want to write as fast as possible, consistency at this stage isn't required. + txo_query.set_consistency(scylla::statement::Consistency::Any); + txo_asset_query.set_consistency(scylla::statement::Consistency::Any); + txi_query.set_consistency(scylla::statement::Consistency::Any); - let mut txi_batch: Batch = Batch::default(); - let mut txi_values = Vec::::new(); + // These operations are idempotent, because they are always the same data. + txo_query.set_is_idempotent(true); + txo_asset_query.set_is_idempotent(true); + txi_query.set_is_idempotent(true); let block_data = block.decode(); let slot_no = block_data.slot(); @@ -151,17 +286,7 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { let txn_hash = txs.hash().to_vec(); // Index the TXI's. - for txi in txs.inputs() { - let txn_hash = txi.hash().to_vec(); - let txo: i16 = txi.index().try_into().unwrap_or(i16::MAX); - - txi_batch.append_statement(INSERT_TXI_QUERY); - txi_values.push(TxiInsertParams { - txn_hash, - txo, - slot_no: slot_no.into(), - }); - } + query_handles.append(&mut index_txi(&session, &txi_query, txs, slot_no)); // TODO: Index minting. // let mint = txs.mints().iter() {}; @@ -171,63 +296,29 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { // TODO: Index Stake address hash to stake address reverse lookups. // Index the TXO's. - for (txo_index, txo) in txs.outputs().iter().enumerate() { - let Some((stake_address, address)) = extract_stake_address(txo) else { - continue; - }; - - let value = txo.lovelace_amount(); - - txo_batch.append_statement(INSERT_TXO_QUERY); - txo_values.push(TxoInsertParams { - stake_address: stake_address.clone(), - slot_no: slot_no.into(), - txn: usize_to_i16(txn_index), - txo: usize_to_i16(txo_index), - address, - value: value.into(), - txn_hash: txn_hash.clone(), - }); - - for asset in txo.non_ada_assets() { - let policy_id = asset.policy().to_vec(); - for policy_asset in asset.assets() { - if policy_asset.is_output() { - let policy_name = policy_asset.to_ascii_name().unwrap_or_default(); - let value = policy_asset.any_coin(); - - txo_asset_batch.append_statement(INSERT_TXO_ASSET_QUERY); - txo_asset_values.push(TxoAssetInsertParams { - stake_address: stake_address.clone(), - slot_no: slot_no.into(), - txn: usize_to_i16(txn_index), - txo: usize_to_i16(txo_index), - policy_id: policy_id.clone(), - policy_name, - value: value.into(), - txn_hash: txn_hash.clone(), - }); - } else { - error!("Minting MultiAsset in TXO."); - } + query_handles.append(&mut index_txo( + &session, + &txo_query, + &txo_asset_query, + txs, + slot_no, + &txn_hash, + usize_to_i16(txn_index), + )); + } + + // Wait for operations to complete, and display any errors + for handle in query_handles { + match handle.await { + Ok(join_res) => { + match join_res { + Ok(res) => debug!(res=?res,"Query OK"), + Err(error) => error!(error=%error,"Query Failed"), } - } + }, + Err(error) => error!(error=%error,"Query Join Failed"), } } - // Prepare all statements in the batch at once - let (prepared_txo_batch, prepared_txo_asset_batch, prepared_txi_batch) = try_join!( - session.prepare_batch(&txo_batch), - session.prepare_batch(&txo_asset_batch), - session.prepare_batch(&txi_batch), - )?; - - // Run the prepared batch - let _res = try_join!( - session.batch(&prepared_txo_batch, txo_values), - session.batch(&prepared_txo_asset_batch, txo_asset_values), - session.batch(&prepared_txi_batch, txi_values), - )?; - Ok(()) } diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql b/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql index 1e37abbf4f2..7b6b6227cea 100644 --- a/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql +++ b/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql @@ -1,8 +1,6 @@ -- Create the TXI Record for a transaction hash, --- But only if it does not already exist. -INSERT INTO txi_by_txn_hash ( - txn_hash, - txo, - slot_no) -VALUES(?, ?, ?) -IF NOT EXISTS; \ No newline at end of file +UPDATE txi_by_txn_hash SET + slot_no = :slot_no +WHERE + txn_hash = :txn_hash AND + txo = :txo ; \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql b/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql index b1cccb448af..de22b94c132 100644 --- a/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql +++ b/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql @@ -1,12 +1,11 @@ -- Create the TXO Record for a stake address, -- But nlyif it does not already exist. -INSERT INTO txo_by_stake ( - stake_address, - slot_no, - txn, - txo, - address, - value, - txn_hash) -VALUES(?, ?, ?, ?, ?, ?, ?) -IF NOT EXISTS; \ No newline at end of file +UPDATE txo_by_stake SET + address = :address, + value = :value, + txn_hash = :txn_hash +WHERE + stake_address = :stake_address AND + slot_no = :slot_no AND + txn = :txn and + txo = :txo ; diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql b/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql index a5a01011856..a03e25b99a6 100644 --- a/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql +++ b/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql @@ -1,13 +1,12 @@ -- Create the TXO Record for a stake address, --- But only if it does not already exist. -INSERT INTO txo_by_stake ( - stake_address, - slot_no, - txn, - txo, - policy_id, - policy_name, - value, - txn_hash) -VALUES(?, ?, ?, ?, ?, ?, ?, ?) -IF NOT EXISTS; \ No newline at end of file +-- Will not overwrite anything if it already exists. +UPDATE txo_assets_by_stake SET + value = :value, + txn_hash = :txn_hash +WHERE + stake_address = :stake_address AND + slot_no = :slot_no AND + txn = :txn AND + txo = :txo AND + policy_id = :policy_id AND + policy_name = :policy_name ; diff --git a/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql b/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql index df0e915cdb8..5f67ffcfba9 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql @@ -3,7 +3,9 @@ CREATE TABLE IF NOT EXISTS txi_by_txn_hash ( txn_hash blob, -- 32 Bytes Transaction Hash that was spent. txo smallint, -- Index of the TXO which was spent + + -- Non key data, we can only spend a transaction hash/txo once, so this should be unique in any event. slot_no varint, -- slot number when the spend occured. - PRIMARY KEY (txn_hash, txo_index, slot_no) + PRIMARY KEY (txn_hash, txo) ); diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql index d5190a33e20..f4bdd12cc6b 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql @@ -3,11 +3,13 @@ CREATE TABLE IF NOT EXISTS txo_assets_by_stake ( -- Priamry Key Fields stake_address blob, -- stake address hash (28 bytes stake address hash, zero length if not staked.) - slot_no bigint, -- slot number the txo was created in. + slot_no varint, -- slot number the txo was created in. txn smallint, -- Which Transaction in the Slot is the TXO. txo smallint, -- offset in the txo list of the transaction the txo is in. policy_id blob, -- asset policy hash (id) (28 byte binary hash) policy_name text, -- name of the policy (UTF8) + + -- None Key Data of the asset. value varint, -- Value of the asset (u64) -- Data needed to correlate a spent TXO. diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql index e216f492f33..8221138e126 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql @@ -19,5 +19,5 @@ CREATE TABLE IF NOT EXISTS txo_by_stake ( -- when first detected in a query lookup. -- It serves as an optimization on subsequnt queries. - PRIMARY KEY (stake_address, slot_no, txn, ofs) + PRIMARY KEY (stake_address, slot_no, txn, txo) ); From 316fdfdd05451bf7079082af6df0348fa8e122e8 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 31 Jul 2024 12:52:32 +0700 Subject: [PATCH 18/69] refactor(backend): Remove lazy and once_cell in favor of new standard library replacements --- catalyst-gateway/Cargo.toml | 48 ++++----- catalyst-gateway/bin/Cargo.toml | 2 - catalyst-gateway/bin/src/cardano/mod.rs | 2 +- catalyst-gateway/bin/src/db/index/block.rs | 32 +----- catalyst-gateway/bin/src/db/index/mod.rs | 1 + catalyst-gateway/bin/src/db/index/queries.rs | 69 ++++++++++++ catalyst-gateway/bin/src/db/index/session.rs | 31 ++++-- .../utilities/middleware/tracing_mw.rs | 101 +++++++++--------- catalyst-gateway/bin/src/settings.rs | 5 +- utilities/local-cluster/justfile | 3 +- 10 files changed, 179 insertions(+), 115 deletions(-) create mode 100644 catalyst-gateway/bin/src/db/index/queries.rs diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index 39f2524c539..deb2fb4102b 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -16,43 +16,41 @@ repository = "https://github.com/input-output-hk/catalyst-voices" license = "MIT OR Apache-2.0" [workspace.dependencies] -clap = "4" -tracing = "0.1.37" -tracing-subscriber = "0.3.16" -serde = "1.0" -serde_json = "1.0" -poem = "3.0.0" -poem-openapi = "5.0.0" -prometheus = "0.13.0" +clap = "4.5.11" +tracing = "0.1.40" +tracing-subscriber = "0.3.18" +serde = "1.0.204" +serde_json = "1.0.121" +poem = "3.0.4" +poem-openapi = "5.0.3" +prometheus = "0.13.4" cryptoxide = "0.4.4" -uuid = "1" -lazy_static = "1.4" +uuid = "1.10.0" panic-message = "0.3" cpu-time = "1.0" -ulid = "1.0.1" -rust-embed = "8" -url = "2.4.1" -thiserror = "1.0" -chrono = "0.4" -async-trait = "0.1.64" -rust_decimal = "1.29" -bb8 = "0.8.1" +ulid = "1.1.3" +rust-embed = "8.5.0" +url = "2.5.2" +thiserror = "1.0.63" +chrono = "0.4.38" +async-trait = "0.1.81" +rust_decimal = "1.35.0" +bb8 = "0.8.5" bb8-postgres = "0.8.1" -tokio-postgres = "0.7.10" -tokio = "1" -dotenvy = "0.15" +tokio-postgres = "0.7.11" +tokio = "1.39.2" +dotenvy = "0.15.7" local-ip-address = "0.6.1" gethostname = "0.5.0" hex = "0.4.3" handlebars = "6.0.0" -anyhow = "1.0.71" -cddl = "0.9.2" -ciborium = "0.2" +anyhow = "1.0.86" +cddl = "0.9.4" +ciborium = "0.2.2" pallas = "0.29.0" cardano-chain-follower = { git = "https://github.com/input-output-hk/hermes.git", branch = "feat/auto-sync-mithril", version="0.1.0" } stringzilla = "3.8.4" duration-string = "0.4.0" -once_cell = "1.19.0" build-info = "0.0.37" build-info-build = "0.0.37" ed25519-dalek = "2.1.1" diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 3c04049c9b0..a2e41a8e42d 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -52,7 +52,6 @@ poem-openapi = { workspace = true, features = [ prometheus = { workspace = true } cryptoxide = { workspace = true } uuid = { workspace = true, features = ["v4", "serde"] } -lazy_static = { workspace = true } url = { workspace = true } dotenvy = { workspace = true } panic-message = { workspace = true } @@ -71,7 +70,6 @@ ciborium = { workspace = true } ed25519-dalek.workspace = true stringzilla = { workspace = true } duration-string.workspace = true -once_cell.workspace = true scylla.workspace = true strum.workspace = true strum_macros.workspace = true diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index 2fa840b2315..424a0b5c706 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -70,7 +70,7 @@ pub(crate) async fn start_followers() -> anyhow::Result<()> { } }, cardano_chain_follower::Kind::Rollback => { - warn!("TODO: Immutable Chain rollback"); + warn!("TODO: Live Chain rollback"); }, } } diff --git a/catalyst-gateway/bin/src/db/index/block.rs b/catalyst-gateway/bin/src/db/index/block.rs index dc15b0c103a..c622bf8d979 100644 --- a/catalyst-gateway/bin/src/db/index/block.rs +++ b/catalyst-gateway/bin/src/db/index/block.rs @@ -4,17 +4,10 @@ use std::sync::Arc; use cardano_chain_follower::MultiEraBlock; use scylla::SerializeRow; -use tokio::join; use tracing::{debug, error, warn}; use super::session::session; -/// TXO by Stake Address Indexing query -const INSERT_TXO_QUERY: &str = include_str!("./queries/insert_txo.cql"); -/// TXO Asset by Stake Address Indexing Query -const INSERT_TXO_ASSET_QUERY: &str = include_str!("./queries/insert_txo_asset.cql"); -/// TXI by Txn hash Index -const INSERT_TXI_QUERY: &str = include_str!("./queries/insert_txi.cql"); /// This is used to indicate that there is no stake address, and still meet the /// requirement for the index primary key to be non empty. const NO_STAKE_ADDRESS: &[u8] = &[0; 1]; @@ -238,7 +231,7 @@ fn index_txo( #[allow(clippy::similar_names)] pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { // Get the session. This should never fail. - let Some(session) = session(block.immutable()) else { + let Some((session, queries)) = session(block.immutable()) else { anyhow::bail!("Failed to get Index DB Session. Can not index block."); }; @@ -248,26 +241,9 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { tokio::task::JoinHandle>, > = Vec::new(); - // Pre-prepare our queries. - let (txo_query, txo_asset_query, txi_query) = join!( - session.prepare(INSERT_TXO_QUERY), - session.prepare(INSERT_TXO_ASSET_QUERY), - session.prepare(INSERT_TXI_QUERY), - ); - - if let Err(ref error) = txo_query { - error!(error=%error,"Failed to prepare Insert TXO Query."); - }; - if let Err(ref error) = txo_asset_query { - error!(error=%error,"Failed to prepare Insert TXO Asset Query."); - }; - if let Err(ref error) = txi_query { - error!(error=%error,"Failed to prepare Insert TXI Query."); - }; - - let mut txo_query = txo_query?; - let mut txo_asset_query = txo_asset_query?; - let mut txi_query = txi_query?; + let mut txo_query = queries.txo_insert_query.clone(); + let mut txo_asset_query = queries.txo_asset_insert_query.clone(); + let mut txi_query = queries.txi_insert_query.clone(); // We just want to write as fast as possible, consistency at this stage isn't required. txo_query.set_consistency(scylla::statement::Consistency::Any); diff --git a/catalyst-gateway/bin/src/db/index/mod.rs b/catalyst-gateway/bin/src/db/index/mod.rs index 5a37ebd0905..f4157be8550 100644 --- a/catalyst-gateway/bin/src/db/index/mod.rs +++ b/catalyst-gateway/bin/src/db/index/mod.rs @@ -1,5 +1,6 @@ //! Blockchain Index Database pub(crate) mod block; +pub(crate) mod queries; pub(crate) mod schema; pub(crate) mod session; diff --git a/catalyst-gateway/bin/src/db/index/queries.rs b/catalyst-gateway/bin/src/db/index/queries.rs new file mode 100644 index 00000000000..5d22815e03c --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries.rs @@ -0,0 +1,69 @@ +//! Pre-prepare queries for a given session. +//! +//! This improves query execution time. + +use scylla::prepared_statement::PreparedStatement; +use tokio::join; +use tracing::error; + +use super::session::CassandraSession; + +/// TXO by Stake Address Indexing query +const INSERT_TXO_QUERY: &str = include_str!("./queries/insert_txo.cql"); +/// TXO Asset by Stake Address Indexing Query +const INSERT_TXO_ASSET_QUERY: &str = include_str!("./queries/insert_txo_asset.cql"); +/// TXI by Txn hash Index +const INSERT_TXI_QUERY: &str = include_str!("./queries/insert_txi.cql"); + +/// All prepared queries for a session. +#[allow(clippy::struct_field_names)] +pub(crate) struct PreparedQueries { + /// TXO Insert query. + pub txo_insert_query: PreparedStatement, + /// TXO Asset Insert query. + pub txo_asset_insert_query: PreparedStatement, + /// TXI Insert query. + pub txi_insert_query: PreparedStatement, +} + +impl PreparedQueries { + /// Create new prepared queries for a given session. + pub(crate) async fn new(session: &CassandraSession) -> anyhow::Result { + // Pre-prepare our queries. + let (txo_query, txo_asset_query, txi_query) = join!( + session.prepare(INSERT_TXO_QUERY), + session.prepare(INSERT_TXO_ASSET_QUERY), + session.prepare(INSERT_TXI_QUERY), + ); + + if let Err(ref error) = txo_query { + error!(error=%error,"Failed to prepare Insert TXO Query."); + }; + if let Err(ref error) = txo_asset_query { + error!(error=%error,"Failed to prepare Insert TXO Asset Query."); + }; + if let Err(ref error) = txi_query { + error!(error=%error,"Failed to prepare Insert TXI Query."); + }; + + let mut txo_query = txo_query?; + let mut txo_asset_query = txo_asset_query?; + let mut txi_query = txi_query?; + + // We just want to write as fast as possible, consistency at this stage isn't required. + txo_query.set_consistency(scylla::statement::Consistency::Any); + txo_asset_query.set_consistency(scylla::statement::Consistency::Any); + txi_query.set_consistency(scylla::statement::Consistency::Any); + + // These operations are idempotent, because they are always the same data. + txo_query.set_is_idempotent(true); + txo_asset_query.set_is_idempotent(true); + txi_query.set_is_idempotent(true); + + Ok(Self { + txo_insert_query: txo_query, + txo_asset_insert_query: txo_asset_query, + txi_insert_query: txi_query, + }) + } +} diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs index 1aae71a3613..1947028f719 100644 --- a/catalyst-gateway/bin/src/db/index/session.rs +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -11,8 +11,11 @@ use scylla::{frame::Compression, ExecutionProfile, Session, SessionBuilder}; use tokio::fs; use tracing::{error, info}; -use super::schema::create_schema; -use crate::settings::{CassandraEnvVars, Settings}; +use super::{queries::PreparedQueries, schema::create_schema}; +use crate::{ + db::index::queries, + settings::{CassandraEnvVars, Settings}, +}; /// Configuration Choices for compression #[derive(Clone, strum::EnumString, strum::Display, strum::VariantNames)] @@ -110,9 +113,10 @@ async fn make_session(cfg: &CassandraEnvVars) -> anyhow::Result = OnceLock::new(); +static PERSISTENT_SESSION: OnceLock<(CassandraSession, Arc)> = OnceLock::new(); + /// Volatile DB Session. -static VOLATILE_SESSION: OnceLock = OnceLock::new(); +static VOLATILE_SESSION: OnceLock<(CassandraSession, Arc)> = OnceLock::new(); /// Continuously try and init the DB, if it fails, backoff. /// @@ -156,14 +160,27 @@ async fn retry_init(cfg: CassandraEnvVars, persistent: bool) { error = error, "Failed to Create Cassandra DB Schema" ); + continue; } + let queries = match queries::PreparedQueries::new(&session).await { + Ok(queries) => Arc::new(queries), + Err(error) => { + error!( + db_type = db_type, + error = %error, + "Failed to Create Cassandra Prepared Queries" + ); + continue; + }, + }; + // Save the session so we can execute queries on the DB if persistent { - if PERSISTENT_SESSION.set(session).is_err() { + if PERSISTENT_SESSION.set((session, queries)).is_err() { error!("Persistent Session already set. This should not happen."); }; - } else if VOLATILE_SESSION.set(session).is_err() { + } else if VOLATILE_SESSION.set((session, queries)).is_err() { error!("Volatile Session already set. This should not happen."); }; @@ -188,7 +205,7 @@ pub(crate) fn is_ready() -> bool { } /// Get the session needed to perform a query. -pub(crate) fn session(persistent: bool) -> Option { +pub(crate) fn session(persistent: bool) -> Option<(CassandraSession, Arc)> { if persistent { PERSISTENT_SESSION.get().cloned() } else { diff --git a/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs b/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs index 5710de17904..7ac2cfbfcea 100644 --- a/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs +++ b/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs @@ -1,9 +1,8 @@ //! Full Tracing and metrics middleware. -use std::time::Instant; +use std::{sync::LazyLock, time::Instant}; use cpu_time::ProcessTime; // ThreadTime doesn't work. use cryptoxide::{blake2b::Blake2b, digest::Digest}; -use lazy_static::lazy_static; use poem::{ http::{header, HeaderMap}, web::RealIp, @@ -26,78 +25,84 @@ const METRIC_LABELS: [&str; 3] = ["endpoint", "method", "status_code"]; const CLIENT_METRIC_LABELS: [&str; 2] = ["client", "status_code"]; // Prometheus Metrics maintained by the service -lazy_static! { - static ref HTTP_REQ_DURATION_MS: HistogramVec = + +/// HTTP Request duration histogram. +static HTTP_REQ_DURATION_MS: LazyLock = LazyLock::new(|| { #[allow(clippy::ignored_unit_patterns)] register_histogram_vec!( "http_request_duration_ms", "Duration of HTTP requests in milliseconds", &METRIC_LABELS ) - .unwrap(); + .unwrap() +}); - static ref HTTP_REQ_CPU_TIME_MS: HistogramVec = +/// HTTP Request CPU Time histogram. +static HTTP_REQ_CPU_TIME_MS: LazyLock = LazyLock::new(|| { #[allow(clippy::ignored_unit_patterns)] register_histogram_vec!( "http_request_cpu_time_ms", "CPU Time of HTTP requests in milliseconds", &METRIC_LABELS ) - .unwrap(); - - // No Tacho implemented to enable this. - /* - static ref HTTP_REQUEST_RATE: GaugeVec = register_gauge_vec!( - "http_request_rate", - "Rate of HTTP requests per second", - &METRIC_LABELS - ) - .unwrap(); - */ - - static ref HTTP_REQUEST_COUNT: IntCounterVec = + .unwrap() +}); + +// No Tacho implemented to enable this. +// static ref HTTP_REQUEST_RATE: GaugeVec = register_gauge_vec!( +// "http_request_rate", +// "Rate of HTTP requests per second", +// &METRIC_LABELS +// ) +// .unwrap(); + +/// HTTP Request count histogram. +static HTTP_REQUEST_COUNT: LazyLock = LazyLock::new(|| { #[allow(clippy::ignored_unit_patterns)] register_int_counter_vec!( "http_request_count", "Number of HTTP requests", &METRIC_LABELS ) - .unwrap(); + .unwrap() +}); - static ref CLIENT_REQUEST_COUNT: IntCounterVec = +/// Client Request Count histogram. +static CLIENT_REQUEST_COUNT: LazyLock = LazyLock::new(|| { #[allow(clippy::ignored_unit_patterns)] register_int_counter_vec!( "client_request_count", "Number of HTTP requests per client", &CLIENT_METRIC_LABELS ) - .unwrap(); - - static ref PANIC_REQUEST_COUNT: IntCounterVec = - #[allow(clippy::ignored_unit_patterns)] - register_int_counter_vec!( - "panic_request_count", - "Number of HTTP requests that panicked", - &METRIC_LABELS - ) - .unwrap(); - - // Currently no way to get these values without reading the whole response which is BAD. - /* - static ref HTTP_REQUEST_SIZE_BYTES: HistogramVec = register_histogram_vec!( - "http_request_size_bytes", - "Size of HTTP requests in bytes", - &METRIC_LABELS - ) - .unwrap(); - static ref HTTP_RESPONSE_SIZE_BYTES: HistogramVec = register_histogram_vec!( - "http_response_size_bytes", - "Size of HTTP responses in bytes", - &METRIC_LABELS - ) - .unwrap(); - */ -} + .unwrap() +}); + +// Currently no way to get these values. TODO. +// Panic Request Count histogram. +// static PANIC_REQUEST_COUNT: LazyLock = LazyLock::new(|| { +// #[allow(clippy::ignored_unit_patterns)] +// register_int_counter_vec!( +// "panic_request_count", +// "Number of HTTP requests that panicked", +// &METRIC_LABELS +// ) +// .unwrap() +// }); + +// Currently no way to get these values without reading the whole response which is BAD. +// static ref HTTP_REQUEST_SIZE_BYTES: HistogramVec = register_histogram_vec!( +// "http_request_size_bytes", +// "Size of HTTP requests in bytes", +// &METRIC_LABELS +// ) +// .unwrap(); +// static ref HTTP_RESPONSE_SIZE_BYTES: HistogramVec = register_histogram_vec!( +// "http_response_size_bytes", +// "Size of HTTP responses in bytes", +// &METRIC_LABELS +// ) +// .unwrap(); /// Middleware for [`tracing`](https://crates.io/crates/tracing). #[derive(Default)] diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 40b3b5eb660..6e52fd76ce2 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -5,7 +5,7 @@ use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, str::FromStr, - sync::OnceLock, + sync::{LazyLock, OnceLock}, time::Duration, }; @@ -15,7 +15,6 @@ use clap::Args; use cryptoxide::{blake2b::Blake2b, mac::Mac}; use dotenvy::dotenv; use duration_string::DurationString; -use once_cell::sync::Lazy; use strum::VariantNames; use tracing::{error, info}; use url::Url; @@ -562,7 +561,7 @@ struct EnvVars { // development /// Handle to the mithril sync thread. One for each Network ONLY. -static ENV_VARS: Lazy = Lazy::new(|| { +static ENV_VARS: LazyLock = LazyLock::new(|| { // Support env vars in a `.env` file, doesn't need to exist. dotenv().ok(); diff --git a/utilities/local-cluster/justfile b/utilities/local-cluster/justfile index 1debd8763f2..b037de7c4ef 100644 --- a/utilities/local-cluster/justfile +++ b/utilities/local-cluster/justfile @@ -59,7 +59,8 @@ get-all-logs: # TODO: Get the cluster scylla DB exposed on port 9042 of the cluster. temp-scylla-dev-db: mkdir -p /var/lib/scylla/data /var/lib/scylla/commitlog /var/lib/scylla/hints /var/lib/scylla/view_hints - docker run --privileged -p 9042:9042 --name scylla-dev --volume /var/lib/scylla:/var/lib/scylla -d scylladb/scylla --developer-mode=1 --smp 8 + # docker run --privileged -p 9042:9042 --name scylla-dev --volume /var/lib/scylla:/var/lib/scylla -d scylladb/scylla:latest --developer-mode=1 --smp 8 + docker run --rm --privileged -p 9042:9042 --name scylla-dev --volume /var/lib/scylla:/var/lib/scylla -d scylladb/scylla:latest --smp 8 --io-setup 1 --developer-mode=0 docker logs scylla-dev -f stop-temp-scylla-dev-db: From 1c01aad128d5c1e0012c7861d17a4f9e74bb2dac Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 1 Aug 2024 23:13:15 +0700 Subject: [PATCH 19/69] fix(backend): WIP indexing for stake addresses and unstaked ada --- .config/dictionaries/project.dic | 2 + .../c509-certificate/deny.toml | 2 +- catalyst-gateway/Cargo.toml | 7 +- catalyst-gateway/bin/Cargo.toml | 3 + catalyst-gateway/bin/src/cardano/mod.rs | 359 ++++++++++++-- catalyst-gateway/bin/src/db/index/block.rs | 469 +++++++++++++++--- catalyst-gateway/bin/src/db/index/queries.rs | 28 +- .../db/index/queries/insert_unstaked_txo.cql | 11 + .../queries/insert_unstaked_txo_asset.cql | 12 + catalyst-gateway/bin/src/db/index/schema.rs | 54 ++ .../schema/stake_hash_to_stake_address.cql | 17 + .../schema/txo_assets_by_stake_table.cql | 2 +- .../db/index/schema/txo_by_stake_table.cql | 2 +- .../unstaked_txo_assets_by_txn_hash.cql | 25 + .../index/schema/unstaked_txo_by_txn_hash.cql | 22 + catalyst-gateway/bin/src/db/index/session.rs | 11 + catalyst-gateway/bin/src/settings.rs | 12 +- catalyst-gateway/deny.toml | 2 +- utilities/local-cluster/Vagrantfile | 2 +- 19 files changed, 937 insertions(+), 105 deletions(-) create mode 100644 catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo.cql create mode 100644 catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo_asset.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/stake_hash_to_stake_address.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/unstaked_txo_by_txn_hash.cql diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index 07d633702db..8c7938ddb9b 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -176,6 +176,7 @@ reqwest rfwtxt rgloader ripgrep +rngs RPATH rustc rustdoc @@ -190,6 +191,7 @@ Schemathesis Scripthash ScyllaDB seckey +Seedable sendfile slotno sqlfluff diff --git a/catalyst-gateway-crates/c509-certificate/deny.toml b/catalyst-gateway-crates/c509-certificate/deny.toml index d290f243fdc..5455931b26c 100644 --- a/catalyst-gateway-crates/c509-certificate/deny.toml +++ b/catalyst-gateway-crates/c509-certificate/deny.toml @@ -18,7 +18,7 @@ targets = [ version = 2 ignore = [ { id = "RUSTSEC-2020-0168", reason = "`mach` is used by wasmtime and we have no control over that." }, - { id = "RUSTSEC-2021-0145", reason = "we don't target windows, and don;t use a custom global allocator." }, + { id = "RUSTSEC-2021-0145", reason = "we don't target windows, and don't use a custom global allocator." }, ] [bans] diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index deb2fb4102b..e70ffc3c126 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -16,7 +16,7 @@ repository = "https://github.com/input-output-hk/catalyst-voices" license = "MIT OR Apache-2.0" [workspace.dependencies] -clap = "4.5.11" +clap = "4.5.13" tracing = "0.1.40" tracing-subscriber = "0.3.18" serde = "1.0.204" @@ -48,7 +48,7 @@ anyhow = "1.0.86" cddl = "0.9.4" ciborium = "0.2.2" pallas = "0.29.0" -cardano-chain-follower = { git = "https://github.com/input-output-hk/hermes.git", branch = "feat/auto-sync-mithril", version="0.1.0" } +cardano-chain-follower = { git = "https://github.com/input-output-hk/hermes.git", branch = "feat/auto-sync-mithril", version="0.2.0" } stringzilla = "3.8.4" duration-string = "0.4.0" build-info = "0.0.37" @@ -59,6 +59,9 @@ strum = { version = "0.26.3", features = ["derive"] } strum_macros = "0.26.4" openssl = { version = "0.10.66", features = ["vendored"] } num-bigint = "0.4.6" +futures = "0.3.30" +rand = "0.8.5" +moka = { version = "0.12.8", features=["future"] } [workspace.lints.rust] warnings = "deny" diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index a2e41a8e42d..49e97eb5029 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -75,6 +75,9 @@ strum.workspace = true strum_macros.workspace = true openssl.workspace = true num-bigint.workspace = true +futures.workspace = true +rand.workspace = true +moka.workspace = true [build-dependencies] build-info-build = { workspace = true } diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index 424a0b5c706..3abb03aae68 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -1,9 +1,19 @@ //! Logic for orchestrating followers -use cardano_chain_follower::{ChainFollower, ChainSyncConfig, Network, ORIGIN_POINT, TIP_POINT}; +use std::{fmt::Display, time::Duration}; + +use cardano_chain_follower::{ + ChainFollower, ChainSyncConfig, Network, Point, ORIGIN_POINT, TIP_POINT, +}; +use duration_string::DurationString; +use futures::{stream::FuturesUnordered, StreamExt}; +use rand::{Rng, SeedableRng}; use tracing::{error, info, warn}; -use crate::{db::index::block::index_block, settings::Settings}; +use crate::{ + db::index::{block::index_block, session::wait_is_ready}, + settings::Settings, +}; pub(crate) mod cip36_registration; pub(crate) mod util; @@ -12,10 +22,13 @@ pub(crate) mod util; #[allow(dead_code)] const MAX_BLOCKS_BATCH_LEN: usize = 1024; +/// How long we wait between checks for connection to the indexing DB to be ready. +const INDEXING_DB_READY_WAIT_INTERVAL: Duration = Duration::from_secs(1); + /// Start syncing a particular network async fn start_sync_for(chain: Network) -> anyhow::Result<()> { let cfg = ChainSyncConfig::default_for(chain); - info!(chain = cfg.chain.to_string(), "Starting Sync"); + info!(chain = %cfg.chain, "Starting Blockchain Sync"); if let Err(error) = cfg.run().await { error!(chain=%chain, error=%error, "Failed to start chain sync task"); @@ -25,55 +38,343 @@ async fn start_sync_for(chain: Network) -> anyhow::Result<()> { Ok(()) } -/// Start followers as per defined in the config -#[allow(unused)] -pub(crate) async fn start_followers() -> anyhow::Result<()> { - let cfg = Settings::follower_cfg(); +/// Data we return from a sync task. +struct SyncParams { + /// What blockchain are we syncing. + chain: Network, + /// The starting point of this sync. + start: Point, + /// The ending point of this sync. + end: Point, + /// The first block we successfully synced. + first_indexed_block: Option, + /// The last block we successfully synced. + last_indexed_block: Option, + /// The number of blocks we successfully synced. + total_blocks_synced: u64, + /// The number of blocks we successfully synced. + last_blocks_synced: u64, + /// The number of retries so far on this sync task. + retries: u64, + /// The number of retries so far on this sync task. + backoff_delay: Option, + /// If the sync completed without error or not. + result: Option>, +} - cfg.log(); +impl Display for SyncParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.result.is_none() { + write!(f, "Sync_Params {{ ")?; + } else { + write!(f, "Sync_Result {{ ")?; + } - start_sync_for(cfg.chain).await?; + write!(f, "start: {}, end: {}", self.start, self.end)?; + + if let Some(first) = self.first_indexed_block.as_ref() { + write!(f, ", first_indexed_block: {first}")?; + } + + if let Some(last) = self.last_indexed_block.as_ref() { + write!(f, ", last_indexed_block: {last}")?; + } + + if self.retries > 0 { + write!(f, ", retries: {}", self.retries)?; + } + + if self.retries > 0 || self.result.is_some() { + write!(f, ", synced_blocks: {}", self.total_blocks_synced)?; + } + + if self.result.is_some() { + write!(f, ", last_sync: {}", self.last_blocks_synced)?; + } + + if let Some(backoff) = self.backoff_delay.as_ref() { + write!(f, ", backoff: {}", DurationString::from(*backoff))?; + } + + if let Some(result) = self.result.as_ref() { + match result { + Ok(()) => write!(f, ", Success")?, + Err(error) => write!(f, ", {error}")?, + }; + } + + f.write_str(" }") + } +} + +/// The range we generate random backoffs within given a base backoff value. +const BACKOFF_RANGE_MULTIPLIER: u32 = 3; + +impl SyncParams { + /// Create a new `SyncParams`. + fn new(chain: Network, start: Point, end: Point) -> Self { + Self { + chain, + start, + end, + first_indexed_block: None, + last_indexed_block: None, + total_blocks_synced: 0, + last_blocks_synced: 0, + retries: 0, + backoff_delay: None, + result: None, + } + } + + /// Convert a result back into parameters for a retry. + fn retry(&self) -> Self { + let retry_count = self.retries + 1; + + let mut backoff = None; + + // If we did sync any blocks last time, first retry is immediate. + // Otherwise we backoff progressively more as we do more retries. + if self.last_blocks_synced == 0 { + // Calculate backoff based on number of retries so far. + backoff = match retry_count { + 1 => Some(Duration::from_secs(1)), // 1-3 seconds + 2..5 => Some(Duration::from_secs(10)), // 10-30 seconds + _ => Some(Duration::from_secs(30)), // 30-90 seconds. + }; + } + + Self { + chain: self.chain, + start: self.start.clone(), + end: self.end.clone(), + first_indexed_block: self.first_indexed_block.clone(), + last_indexed_block: self.last_indexed_block.clone(), + total_blocks_synced: self.total_blocks_synced, + last_blocks_synced: 0, + retries: retry_count, + backoff_delay: backoff, + result: None, + } + } + + /// Convert Params into the result of the sync. + fn done( + &self, first: Option, last: Option, synced: u64, result: anyhow::Result<()>, + ) -> Self { + Self { + chain: self.chain, + start: self.start.clone(), + end: self.end.clone(), + first_indexed_block: first, + last_indexed_block: last, + total_blocks_synced: synced + self.total_blocks_synced, + last_blocks_synced: synced, + retries: self.retries, + backoff_delay: self.backoff_delay, + result: Some(result), + } + } + + /// Get where this sync run actually needs to start from. + fn actual_start(&self) -> Point { + self.last_indexed_block + .as_ref() + .unwrap_or(&self.start) + .clone() + } + /// Do the backoff delay processing. + /// + /// The actual delay is a random time from the Delay itself to + /// `BACKOFF_RANGE_MULTIPLIER` times the delay. This is to prevent hammering the + /// service at regular intervals. + async fn backoff(&self) { + if let Some(backoff) = self.backoff_delay { + let mut rng = rand::rngs::StdRng::from_entropy(); + let actual_backoff = + rng.gen_range(backoff..backoff.saturating_mul(BACKOFF_RANGE_MULTIPLIER)); + + tokio::time::sleep(actual_backoff).await; + } + } +} + +/// Sync a portion of the blockchain. +/// Set end to `TIP_POINT` to sync the tip continuously. +fn sync_subchain(params: SyncParams) -> tokio::task::JoinHandle { tokio::spawn(async move { - // We can't sync until the local chain data is synced. - // This call will wait until we sync. + info!(chain = %params.chain, params=%params, "Indexing Blockchain"); + + // Backoff hitting the database if we need to. + params.backoff().await; - // Initially simple pure follower. - // TODO, break the initial sync follower into multiple followers syncing the chain - // to the index DB in parallel. - info!(chain = %cfg.chain, "Following"); - let mut follower = ChainFollower::new(cfg.chain, ORIGIN_POINT, TIP_POINT).await; + // Wait for indexing DB to be ready before continuing. + wait_is_ready(INDEXING_DB_READY_WAIT_INTERVAL).await; + info!(chain=%params.chain, params=%params,"Indexing DB is ready"); - let mut blocks: u64 = 0; - let mut hit_tip: bool = false; + let mut first_indexed_block = params.first_indexed_block.clone(); + let mut last_indexed_block = params.last_indexed_block.clone(); + let mut blocks_synced = 0u64; + let mut follower = + ChainFollower::new(params.chain, params.actual_start(), params.end.clone()).await; while let Some(chain_update) = follower.next().await { match chain_update.kind { cardano_chain_follower::Kind::ImmutableBlockRollForward => { - warn!("TODO: Immutable Chain roll forward"); + // We only process these on the follower tracking the TIP. + if params.end == TIP_POINT { + warn!("TODO: Immutable Chain roll forward"); + }; }, cardano_chain_follower::Kind::Block => { let block = chain_update.block_data(); - if blocks == 0 { - info!("Indexing first block."); - } - blocks += 1; - if chain_update.tip && !hit_tip { - hit_tip = true; - info!("Hit tip after {blocks} blocks."); + if let Err(error) = index_block(block).await { + let error_msg = format!("Failed to index block {}", block.point()); + error!(chain=%params.chain, error=%error, params=%params, error_msg); + return params.done( + first_indexed_block, + last_indexed_block, + blocks_synced, + Err(error.context(error_msg)), + ); } - if let Err(error) = index_block(block).await { - error!(chain=%cfg.chain, error=%error, "Failed to index block"); - return; + if first_indexed_block.is_none() { + first_indexed_block = Some(block.point()); } + last_indexed_block = Some(block.point()); + blocks_synced += 1; }, cardano_chain_follower::Kind::Rollback => { warn!("TODO: Live Chain rollback"); }, } } + + let result = params.done( + first_indexed_block, + last_indexed_block, + blocks_synced, + Ok(()), + ); + + info!(chain = %params.chain, result=%result, "Indexing Blockchain Completed: OK"); + + result + }) +} + +/// Start followers as per defined in the config +#[allow(unused)] +pub(crate) async fn start_followers() -> anyhow::Result<()> { + let cfg = Settings::follower_cfg(); + + // Log the chain follower configuration. + cfg.log(); + + // Start Syncing the blockchain, so we can consume its data as required. + start_sync_for(cfg.chain).await?; + info!(chain=%cfg.chain,"Chain Sync is started."); + + tokio::spawn(async move { + // We can't sync until the local chain data is synced. + // This call will wait until we sync. + let tips = cardano_chain_follower::ChainFollower::get_tips(cfg.chain).await; + let immutable_tip_slot = tips.0.slot_or_default(); + let live_tip_slot = tips.1.slot_or_default(); + info!(chain=%cfg.chain, immutable_tip=immutable_tip_slot, live_tip=live_tip_slot, "Blockchain ready to sync from."); + + let mut sync_tasks: FuturesUnordered> = + FuturesUnordered::new(); + + // Start the Immutable Chain sync tasks. + // If the number of sync tasks is zero, just have one. + // Note: this shouldn't be possible, but easy to handle if it is. + let sub_chain_slots = immutable_tip_slot + .checked_div(cfg.sync_tasks.into()) + .unwrap_or(immutable_tip_slot); + // Need steps in a usize, in the highly unlikely event the steps are > max usize, make + // them max usize. + let sub_chain_steps: usize = sub_chain_slots.try_into().unwrap_or(usize::MAX); + + let mut start_point = ORIGIN_POINT; + for slot_end in (sub_chain_slots..immutable_tip_slot).step_by(sub_chain_steps) { + let next_point = cardano_chain_follower::Point::fuzzy(slot_end); + + sync_tasks.push(sync_subchain(SyncParams::new( + cfg.chain, + start_point, + next_point.clone(), + ))); + + // Next start == last end. + start_point = next_point; + } + + // Start the Live Chain sync task - This never stops syncing. + sync_tasks.push(sync_subchain(SyncParams::new( + cfg.chain, + start_point, + TIP_POINT, + ))); + + // Wait Sync tasks to complete. If they fail and have not completed, reschedule them. + // They will return from this iterator in the order they complete. + while let Some(completed) = sync_tasks.next().await { + let remaining_followers = sync_tasks.len(); + + match completed { + Ok(finished) => { + // Sync task finished. Check if it completed OK or had an error. + // If it failed, we need to reschedule it. + + let last_block = finished + .last_indexed_block + .clone() + .map_or("None".to_string(), |v| v.to_string()); + + let first_block = finished + .first_indexed_block + .clone() + .map_or("None".to_string(), |v| v.to_string()); + + // The TIP follower should NEVER end, even without error, so report that as an + // error. It can fail if the index DB goes down in some way. + // Restart it always. + if finished.end == TIP_POINT { + error!(chain=%cfg.chain, report=%finished, + "The TIP follower failed, restarting it."); + + // Start the Live Chain sync task again from where it left off. + sync_tasks.push(sync_subchain(finished.retry())); + } else if let Some(result) = finished.result.as_ref() { + match result { + Ok(()) => { + info!(chain=%cfg.chain, report=%finished, + "The Immutable follower completed successfully."); + }, + Err(error) => { + // let report = &finished.to_string(); + error!(chain=%cfg.chain, report=%finished, + "An Immutable follower failed, restarting it."); + // Start the Live Chain sync task again from where it left off. + sync_tasks.push(sync_subchain(finished.retry())); + }, + } + } else { + error!(chain=%cfg.chain, report=%finished, + "The Immutable follower completed, but without a proper result."); + } + }, + Err(error) => { + error!(error=%error, "Sync task failed. Can not restart it, not enough information. Sync is probably failed at this point."); + }, + } + } + + error!("Sync tasks have all stopped. This is an unexpected error!"); }); Ok(()) diff --git a/catalyst-gateway/bin/src/db/index/block.rs b/catalyst-gateway/bin/src/db/index/block.rs index c622bf8d979..0fd69fb8d7a 100644 --- a/catalyst-gateway/bin/src/db/index/block.rs +++ b/catalyst-gateway/bin/src/db/index/block.rs @@ -1,16 +1,18 @@ //! Index a block -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; -use cardano_chain_follower::MultiEraBlock; +use anyhow::bail; +use cardano_chain_follower::{ChainFollower, MultiEraBlock, Point}; +use moka::future::{Cache, CacheBuilder}; use scylla::SerializeRow; use tracing::{debug, error, warn}; -use super::session::session; +use super::{queries::PreparedQueries, session::session}; +use crate::settings::Settings; -/// This is used to indicate that there is no stake address, and still meet the -/// requirement for the index primary key to be non empty. -const NO_STAKE_ADDRESS: &[u8] = &[0; 1]; +/// This is used to indicate that there is no stake address. +const NO_STAKE_ADDRESS: &[u8] = &[]; /// Insert TXO Query Parameters #[derive(SerializeRow)] @@ -63,6 +65,99 @@ struct TxiInsertParams { slot_no: num_bigint::BigInt, } +#[allow(dead_code)] +static POINTER_ADDRESS_CACHE: LazyLock, Arc>>> = + LazyLock::new(|| CacheBuilder::default().max_capacity(1024).build()); + +/// Dereference the Pointer and return the Stake Address if possible. +/// Returns an error if it can not be found for some reason. +/// +/// We probably don't need to support this, but keep code incase we do. +#[allow(dead_code)] +async fn deref_stake_pointer( + pointer: &pallas::ledger::addresses::Pointer, +) -> anyhow::Result>> { + // OK, we can look this up because we have a full chain to query, so + // try. + let cfg = Settings::follower_cfg(); + + let pointer_block_point = Point::fuzzy(pointer.slot()); + + let pointer_txn_offset: usize = pointer.tx_idx().try_into().unwrap_or(usize::MAX); + let pointer_cert_offset: usize = pointer.cert_idx().try_into().unwrap_or(usize::MAX); + + if let Some(pointer_block) = ChainFollower::get_block(cfg.chain, pointer_block_point).await { + let block_data = pointer_block.block_data().decode(); + if let Some(txn) = block_data.txs().get(pointer_txn_offset) { + if let Some(cert) = txn.certs().get(pointer_cert_offset) { + match cert { + pallas::ledger::traverse::MultiEraCert::AlonzoCompatible(cert) => { + match cert.clone().into_owned() { + pallas::ledger::primitives::alonzo::Certificate::StakeRegistration(cred) | + pallas::ledger::primitives::alonzo::Certificate::StakeDeregistration(cred) | + pallas::ledger::primitives::alonzo::Certificate::StakeDelegation(cred, _) => { + match cred { + pallas::ledger::primitives::conway::StakeCredential::AddrKeyhash(hash) | + pallas::ledger::primitives::conway::StakeCredential::Scripthash(hash) => { + return Ok(Arc::new(hash.to_vec())); + }, + } + }, + _ => { + bail!("Alonzo cert type not a stake address."); + } + } + }, + pallas::ledger::traverse::MultiEraCert::Conway(cert) => { + match cert.clone().into_owned() { + pallas::ledger::primitives::conway::Certificate::StakeRegistration(cred) | + pallas::ledger::primitives::conway::Certificate::StakeDeregistration(cred) | + pallas::ledger::primitives::conway::Certificate::StakeDelegation(cred, _) | + pallas::ledger::primitives::conway::Certificate::Reg(cred, _) | + pallas::ledger::primitives::conway::Certificate::UnReg(cred, _) | + pallas::ledger::primitives::conway::Certificate::VoteDeleg(cred, _) | + pallas::ledger::primitives::conway::Certificate::StakeVoteDeleg(cred, _, _) | + pallas::ledger::primitives::conway::Certificate::StakeRegDeleg(cred, _, _) | + pallas::ledger::primitives::conway::Certificate::VoteRegDeleg(cred, _, _) | + pallas::ledger::primitives::conway::Certificate::StakeVoteRegDeleg(cred, _, _, _) | + pallas::ledger::primitives::conway::Certificate::UpdateDRepCert(cred, _) => { + match cred { + pallas::ledger::primitives::conway::StakeCredential::AddrKeyhash(hash) | + pallas::ledger::primitives::conway::StakeCredential::Scripthash(hash)=> { + return Ok(Arc::new(hash.to_vec())); + }, + } + }, + _ => { + bail!("Conway cert type not a stake address."); + }, + } + }, + _ => { + bail!("Certificate type unknown."); + }, + } + } + bail!( + "Certificate index not found in block/txn. Treat as if there is no stake address." + ); + } + bail!("Pointer Stake address detected, but txn index not found in block. Treat as if there is no stake address."); + } + bail!("Pointer Stake address detected, but block not found. Treat as if there is no stake address."); +} + +/// Make a pointer pretty print. +#[allow(dead_code)] +fn fmt_pointer(pointer: &pallas::ledger::addresses::Pointer) -> String { + format!( + "Slot:{},Tx:{},Cert:{}", + pointer.slot(), + pointer.tx_idx(), + pointer.cert_idx() + ) +} + /// Extracts a stake address from a TXO if possible. /// Returns None if it is not possible. /// If we want to index, but can not determine a stake key hash, then return a Vec with a @@ -70,7 +165,7 @@ struct TxiInsertParams { /// use a single byte of 0 to indicate that there is no stake address, and still have a /// primary key on the table. Otherwise return the stake key hash as a vec of 28 bytes. fn extract_stake_address( - txo: &pallas::ledger::traverse::MultiEraOutput, + txo: &pallas::ledger::traverse::MultiEraOutput<'_>, slot_no: u64, txn_id: &str, ) -> Option<(Vec, String)> { let stake_address = match txo.address() { Ok(address) => { @@ -83,7 +178,7 @@ fn extract_stake_address( let address_string = match address.to_bech32() { Ok(address) => address, Err(error) => { - error!(error=%error,"Error converting to bech32: skipping."); + error!(error=%error, slot=slot_no, txn=txn_id,"Error converting to bech32: skipping."); return None; }, }; @@ -93,9 +188,42 @@ fn extract_stake_address( | pallas::ledger::addresses::ShelleyDelegationPart::Key(hash) => { (hash.to_vec(), address_string) }, - pallas::ledger::addresses::ShelleyDelegationPart::Pointer(_) => { - warn!("Pointer Stake address detected, not supported. Treat as if there is no stake address."); + pallas::ledger::addresses::ShelleyDelegationPart::Pointer(_pointer) => { + // These are not supported from Conway, so we don't support them either. (NO_STAKE_ADDRESS.to_vec(), address_string) + /* + let pointer_string = fmt_pointer(pointer); + info!( + slot = slot_no, + txn = txn_id, + pointer = pointer_string, + "Block has stake address pointer" + ); + + // First check if its cached, and if not look it up. + // Pointer addresses always resolve to the same result, so they are safe + // to cache. + match POINTER_ADDRESS_CACHE + .try_get_with( + pointer.to_vec().clone(), + deref_stake_pointer(pointer), + ) + .await + { + Ok(hash) => (hash.to_vec(), address_string), + Err(error) => { + error!(error=%error, slot=slot_no, txn=txn_id, pointer=pointer_string, + "Error looking up stake address via pointer: Treating as if there is no stake address."); + POINTER_ADDRESS_CACHE + .insert( + pointer.to_vec(), + Arc::new(NO_STAKE_ADDRESS.to_vec()), + ) + .await; + (NO_STAKE_ADDRESS.to_vec(), address_string) + }, + } + */ }, pallas::ledger::addresses::ShelleyDelegationPart::Null => { (NO_STAKE_ADDRESS.to_vec(), address_string) @@ -105,14 +233,18 @@ fn extract_stake_address( pallas::ledger::addresses::Address::Stake(_) => { // This should NOT appear in a TXO, so report if it does. But don't index it as // a stake address. - warn!("Unexpected Stake address found in TXO. Refusing to index."); + warn!( + slot = slot_no, + txn = txn_id, + "Unexpected Stake address found in TXO. Refusing to index." + ); return None; }, } }, Err(error) => { // This should not ever happen. - error!(error=%error, "Failed to get Address from TXO. Skipping TXO."); + error!(error=%error, slot = slot_no, txn = txn_id, "Failed to get Address from TXO. Skipping TXO."); return None; }, }; @@ -127,7 +259,7 @@ fn usize_to_i16(value: usize) -> i16 { /// Index the transaction Inputs. fn index_txi( - session: &Arc, txi_query: &scylla::prepared_statement::PreparedStatement, + session: &Arc, queries: &Arc, txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64, ) -> Vec>> { @@ -140,15 +272,18 @@ fn index_txi( let txn_hash = txi.hash().to_vec(); let txo: i16 = txi.index().try_into().unwrap_or(i16::MAX); - let nested_txi_query = txi_query.clone(); + let nested_txi_query = queries.txi_insert_query.clone(); let nested_session = session.clone(); query_handles.push(tokio::spawn(async move { nested_session - .execute(&nested_txi_query, TxiInsertParams { - txn_hash, - txo, - slot_no: slot_no.into(), - }) + .execute( + &nested_txi_query, + TxiInsertParams { + txn_hash, + txo, + slot_no: slot_no.into(), + }, + ) .await })); } @@ -156,39 +291,205 @@ fn index_txi( query_handles } +/// This TXO is NOT Staked, so index it as such. +#[allow(clippy::too_many_arguments)] +fn index_unstaked_txo( + session: &Arc, stake_address: &[u8], txo_index: usize, address: String, + value: u64, txo: &pallas::ledger::traverse::MultiEraOutput<'_>, queries: &Arc, + slot_no: u64, txn_hash: &[u8], txn_index: i16, +) -> Vec>> +{ + let mut query_handles: Vec< + tokio::task::JoinHandle>, + > = Vec::new(); + + let nested_txo_query = queries.txo_insert_query.clone(); + let nested_session = session.clone(); + let nested_txn_hash = txn_hash.to_vec(); + let nested_stake_address = stake_address.to_vec(); + query_handles.push(tokio::spawn(async move { + nested_session + .execute( + &nested_txo_query, + TxoInsertParams { + stake_address: nested_stake_address.clone(), + slot_no: slot_no.into(), + txn: txn_index, + txo: usize_to_i16(txo_index), + address, + value: value.into(), + txn_hash: nested_txn_hash, + }, + ) + .await + })); + + let inner_stake_address = stake_address.to_vec(); + for asset in txo.non_ada_assets() { + let policy_id = asset.policy().to_vec(); + for policy_asset in asset.assets() { + if policy_asset.is_output() { + let policy_name = policy_asset.to_ascii_name().unwrap_or_default(); + let value = policy_asset.any_coin(); + + let nested_txo_asset_query = queries.txo_asset_insert_query.clone(); + let nested_session = session.clone(); + let nested_txn_hash = txn_hash.to_vec(); + let nested_stake_address = inner_stake_address.clone(); + let nested_policy_id = policy_id.clone(); + query_handles.push(tokio::spawn(async move { + nested_session + .execute( + &nested_txo_asset_query, + TxoAssetInsertParams { + stake_address: nested_stake_address, + slot_no: slot_no.into(), + txn: txn_index, + txo: usize_to_i16(txo_index), + policy_id: nested_policy_id, + policy_name, + value: value.into(), + txn_hash: nested_txn_hash, + }, + ) + .await + })); + } else { + error!("Minting MultiAsset in TXO."); + } + } + } + + query_handles +} + +/// This TXO is Staked, so index it as such. +#[allow(clippy::too_many_arguments)] +fn index_staked_txo( + session: &Arc, stake_address: &[u8], txo_index: usize, address: String, + value: u64, txo: &pallas::ledger::traverse::MultiEraOutput<'_>, queries: &Arc, + slot_no: u64, txn_hash: &[u8], txn_index: i16, +) -> Vec>> +{ + let mut query_handles: Vec< + tokio::task::JoinHandle>, + > = Vec::new(); + + let nested_txo_query = queries.txo_insert_query.clone(); + let nested_session = session.clone(); + let nested_txn_hash = txn_hash.to_vec(); + let nested_stake_address = stake_address.to_vec(); + query_handles.push(tokio::spawn(async move { + nested_session + .execute( + &nested_txo_query, + TxoInsertParams { + stake_address: nested_stake_address.clone(), + slot_no: slot_no.into(), + txn: txn_index, + txo: usize_to_i16(txo_index), + address, + value: value.into(), + txn_hash: nested_txn_hash, + }, + ) + .await + })); + + let inner_stake_address = stake_address.to_vec(); + for asset in txo.non_ada_assets() { + let policy_id = asset.policy().to_vec(); + for policy_asset in asset.assets() { + if policy_asset.is_output() { + let policy_name = policy_asset.to_ascii_name().unwrap_or_default(); + let value = policy_asset.any_coin(); + + let nested_txo_asset_query = queries.txo_asset_insert_query.clone(); + let nested_session = session.clone(); + let nested_txn_hash = txn_hash.to_vec(); + let nested_stake_address = inner_stake_address.clone(); + let nested_policy_id = policy_id.clone(); + query_handles.push(tokio::spawn(async move { + nested_session + .execute( + &nested_txo_asset_query, + TxoAssetInsertParams { + stake_address: nested_stake_address, + slot_no: slot_no.into(), + txn: txn_index, + txo: usize_to_i16(txo_index), + policy_id: nested_policy_id, + policy_name, + value: value.into(), + txn_hash: nested_txn_hash, + }, + ) + .await + })); + } else { + error!("Minting MultiAsset in TXO."); + } + } + } + + query_handles +} + /// Index the transaction Outputs. fn index_txo( - session: &Arc, txo_query: &scylla::prepared_statement::PreparedStatement, - txo_asset_query: &scylla::prepared_statement::PreparedStatement, + session: &Arc, queries: &Arc, txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64, txn_hash: &[u8], txn_index: i16, ) -> Vec>> { + let txn_id = hex::encode_upper(txn_hash); + let mut query_handles: Vec< tokio::task::JoinHandle>, > = Vec::new(); for (txo_index, txo) in txs.outputs().iter().enumerate() { - let Some((stake_address, address)) = extract_stake_address(txo) else { + let Some((stake_address, address)) = extract_stake_address(txo, slot_no, &txn_id) else { continue; }; let value = txo.lovelace_amount(); + if stake_address == NO_STAKE_ADDRESS { + } else { + query_handles.extend(index_staked_txo( + session, + &stake_address, + txo_index, + address, + value, + txo, + queries, + slot_no, + txn_hash, + txn_index, + )); + } + /* + let value = txo.lovelace_amount(); + let nested_txo_query = txo_query.clone(); let nested_session = session.clone(); let nested_txn_hash = txn_hash.to_vec(); let nested_stake_address = stake_address.clone(); query_handles.push(tokio::spawn(async move { nested_session - .execute(&nested_txo_query, TxoInsertParams { - stake_address: nested_stake_address, - slot_no: slot_no.into(), - txn: txn_index, - txo: usize_to_i16(txo_index), - address, - value: value.into(), - txn_hash: nested_txn_hash, - }) + .execute( + &nested_txo_query, + TxoInsertParams { + stake_address: nested_stake_address, + slot_no: slot_no.into(), + txn: txn_index, + txo: usize_to_i16(txo_index), + address, + value: value.into(), + txn_hash: nested_txn_hash, + }, + ) .await })); @@ -206,16 +507,19 @@ fn index_txo( let nested_policy_id = policy_id.clone(); query_handles.push(tokio::spawn(async move { nested_session - .execute(&nested_txo_asset_query, TxoAssetInsertParams { - stake_address: nested_stake_address, - slot_no: slot_no.into(), - txn: txn_index, - txo: usize_to_i16(txo_index), - policy_id: nested_policy_id, - policy_name, - value: value.into(), - txn_hash: nested_txn_hash, - }) + .execute( + &nested_txo_asset_query, + TxoAssetInsertParams { + stake_address: nested_stake_address, + slot_no: slot_no.into(), + txn: txn_index, + txo: usize_to_i16(txo_index), + policy_id: nested_policy_id, + policy_name, + value: value.into(), + txn_hash: nested_txn_hash, + }, + ) .await })); } else { @@ -223,6 +527,7 @@ fn index_txo( } } } + */ } query_handles } @@ -241,20 +546,6 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { tokio::task::JoinHandle>, > = Vec::new(); - let mut txo_query = queries.txo_insert_query.clone(); - let mut txo_asset_query = queries.txo_asset_insert_query.clone(); - let mut txi_query = queries.txi_insert_query.clone(); - - // We just want to write as fast as possible, consistency at this stage isn't required. - txo_query.set_consistency(scylla::statement::Consistency::Any); - txo_asset_query.set_consistency(scylla::statement::Consistency::Any); - txi_query.set_consistency(scylla::statement::Consistency::Any); - - // These operations are idempotent, because they are always the same data. - txo_query.set_is_idempotent(true); - txo_asset_query.set_is_idempotent(true); - txi_query.set_is_idempotent(true); - let block_data = block.decode(); let slot_no = block_data.slot(); @@ -262,7 +553,7 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { let txn_hash = txs.hash().to_vec(); // Index the TXI's. - query_handles.append(&mut index_txi(&session, &txi_query, txs, slot_no)); + query_handles.extend(index_txi(&session, &queries, txs, slot_no)); // TODO: Index minting. // let mint = txs.mints().iter() {}; @@ -270,12 +561,52 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { // TODO: Index Metadata. // TODO: Index Stake address hash to stake address reverse lookups. + // Actually Index Certs, first ones to index are stake addresses. + let x = txs.required_signers(); + let x = txs.vkey_witnesses(); + txs.certs().iter().for_each(|cert| { + match cert { + pallas::ledger::traverse::MultiEraCert::NotApplicable => todo!(), + pallas::ledger::traverse::MultiEraCert::AlonzoCompatible(cert) => { + match cert.clone().into_owned() { + pallas::ledger::primitives::alonzo::Certificate::StakeRegistration(_) => todo!(), + pallas::ledger::primitives::alonzo::Certificate::StakeDeregistration(_) => todo!(), + pallas::ledger::primitives::alonzo::Certificate::StakeDelegation(_, _) => todo!(), + pallas::ledger::primitives::alonzo::Certificate::PoolRegistration { operator, vrf_keyhash, pledge, cost, margin, reward_account, pool_owners, relays, pool_metadata } => todo!(), + pallas::ledger::primitives::alonzo::Certificate::PoolRetirement(_, _) => todo!(), + pallas::ledger::primitives::alonzo::Certificate::GenesisKeyDelegation(_, _, _) => todo!(), + pallas::ledger::primitives::alonzo::Certificate::MoveInstantaneousRewardsCert(_) => todo!(), + } + }, + pallas::ledger::traverse::MultiEraCert::Conway(cert) => { + match cert.clone().into_owned() { + pallas::ledger::primitives::conway::Certificate::StakeRegistration(_) => todo!(), + pallas::ledger::primitives::conway::Certificate::StakeDeregistration(_) => todo!(), + pallas::ledger::primitives::conway::Certificate::StakeDelegation(_, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::PoolRegistration { operator, vrf_keyhash, pledge, cost, margin, reward_account, pool_owners, relays, pool_metadata } => todo!(), + pallas::ledger::primitives::conway::Certificate::PoolRetirement(_, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::Reg(_, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::UnReg(_, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::VoteDeleg(_, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::StakeVoteDeleg(_, _, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::StakeRegDeleg(_, _, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::VoteRegDeleg(_, _, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::StakeVoteRegDeleg(_, _, _, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::AuthCommitteeHot(_, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::ResignCommitteeCold(_, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::RegDRepCert(_, _, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::UnRegDRepCert(_, _) => todo!(), + pallas::ledger::primitives::conway::Certificate::UpdateDRepCert(_, _) => todo!(), + } + }, + _ => todo!(), + } + }); // Index the TXO's. - query_handles.append(&mut index_txo( + query_handles.extend(index_txo( &session, - &txo_query, - &txo_asset_query, + &queries, txs, slot_no, &txn_hash, @@ -283,18 +614,32 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { )); } + let mut result: anyhow::Result<()> = Ok(()); + // Wait for operations to complete, and display any errors for handle in query_handles { + if result.is_err() { + // Try and cancel all futures waiting tasks and return the first error we encountered. + handle.abort(); + continue; + } match handle.await { Ok(join_res) => { match join_res { Ok(res) => debug!(res=?res,"Query OK"), - Err(error) => error!(error=%error,"Query Failed"), + Err(error) => { + // IF a query fails, assume everything else is broken. + error!(error=%error,"Query Failed"); + result = Err(error.into()); + }, } }, - Err(error) => error!(error=%error,"Query Join Failed"), + Err(error) => { + error!(error=%error,"Query Join Failed"); + result = Err(error.into()); + }, } } - Ok(()) + result } diff --git a/catalyst-gateway/bin/src/db/index/queries.rs b/catalyst-gateway/bin/src/db/index/queries.rs index 5d22815e03c..3aa1ed9b6ce 100644 --- a/catalyst-gateway/bin/src/db/index/queries.rs +++ b/catalyst-gateway/bin/src/db/index/queries.rs @@ -15,6 +15,12 @@ const INSERT_TXO_ASSET_QUERY: &str = include_str!("./queries/insert_txo_asset.cq /// TXI by Txn hash Index const INSERT_TXI_QUERY: &str = include_str!("./queries/insert_txi.cql"); +/// Unstaked TXO by Stake Address Indexing query +const INSERT_UNSTAKED_TXO_QUERY: &str = include_str!("./queries/insert_unstaked_txo.cql"); +/// Unstaked TXO Asset by Stake Address Indexing Query +const INSERT_UNSTAKED_TXO_ASSET_QUERY: &str = + include_str!("./queries/insert_unstaked_txo_asset.cql"); + /// All prepared queries for a session. #[allow(clippy::struct_field_names)] pub(crate) struct PreparedQueries { @@ -22,6 +28,10 @@ pub(crate) struct PreparedQueries { pub txo_insert_query: PreparedStatement, /// TXO Asset Insert query. pub txo_asset_insert_query: PreparedStatement, + /// Unstaked TXO Insert query. + pub unstaked_txo_insert_query: PreparedStatement, + /// Unstaked TXO Asset Insert query. + pub unstaked_txo_asset_insert_query: PreparedStatement, /// TXI Insert query. pub txi_insert_query: PreparedStatement, } @@ -30,9 +40,11 @@ impl PreparedQueries { /// Create new prepared queries for a given session. pub(crate) async fn new(session: &CassandraSession) -> anyhow::Result { // Pre-prepare our queries. - let (txo_query, txo_asset_query, txi_query) = join!( + let (txo_query, txo_asset_query, unstaked_txo_query, unstaked_txo_asset_query, txi_query) = join!( session.prepare(INSERT_TXO_QUERY), session.prepare(INSERT_TXO_ASSET_QUERY), + session.prepare(INSERT_UNSTAKED_TXO_QUERY), + session.prepare(INSERT_UNSTAKED_TXO_ASSET_QUERY), session.prepare(INSERT_TXI_QUERY), ); @@ -42,27 +54,41 @@ impl PreparedQueries { if let Err(ref error) = txo_asset_query { error!(error=%error,"Failed to prepare Insert TXO Asset Query."); }; + if let Err(ref error) = unstaked_txo_query { + error!(error=%error,"Failed to prepare Insert Unstaked TXO Query."); + }; + if let Err(ref error) = unstaked_txo_asset_query { + error!(error=%error,"Failed to prepare Insert Unstaked TXO Asset Query."); + }; if let Err(ref error) = txi_query { error!(error=%error,"Failed to prepare Insert TXI Query."); }; let mut txo_query = txo_query?; let mut txo_asset_query = txo_asset_query?; + let mut txo_unstaked_query = unstaked_txo_query?; + let mut txo_unstaked_asset_query = unstaked_txo_asset_query?; let mut txi_query = txi_query?; // We just want to write as fast as possible, consistency at this stage isn't required. txo_query.set_consistency(scylla::statement::Consistency::Any); txo_asset_query.set_consistency(scylla::statement::Consistency::Any); + txo_unstaked_query.set_consistency(scylla::statement::Consistency::Any); + txo_unstaked_asset_query.set_consistency(scylla::statement::Consistency::Any); txi_query.set_consistency(scylla::statement::Consistency::Any); // These operations are idempotent, because they are always the same data. txo_query.set_is_idempotent(true); txo_asset_query.set_is_idempotent(true); + txo_unstaked_query.set_is_idempotent(true); + txo_unstaked_asset_query.set_is_idempotent(true); txi_query.set_is_idempotent(true); Ok(Self { txo_insert_query: txo_query, txo_asset_insert_query: txo_asset_query, + unstaked_txo_insert_query: txo_unstaked_query, + unstaked_txo_asset_insert_query: txo_unstaked_asset_query, txi_insert_query: txi_query, }) } diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo.cql b/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo.cql new file mode 100644 index 00000000000..de22b94c132 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo.cql @@ -0,0 +1,11 @@ +-- Create the TXO Record for a stake address, +-- But nlyif it does not already exist. +UPDATE txo_by_stake SET + address = :address, + value = :value, + txn_hash = :txn_hash +WHERE + stake_address = :stake_address AND + slot_no = :slot_no AND + txn = :txn and + txo = :txo ; diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo_asset.cql b/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo_asset.cql new file mode 100644 index 00000000000..a03e25b99a6 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo_asset.cql @@ -0,0 +1,12 @@ +-- Create the TXO Record for a stake address, +-- Will not overwrite anything if it already exists. +UPDATE txo_assets_by_stake SET + value = :value, + txn_hash = :txn_hash +WHERE + stake_address = :stake_address AND + slot_no = :slot_no AND + txn = :txn AND + txo = :txo AND + policy_id = :policy_id AND + policy_name = :policy_name ; diff --git a/catalyst-gateway/bin/src/db/index/schema.rs b/catalyst-gateway/bin/src/db/index/schema.rs index bccf768f5e9..e4f2f1eacad 100644 --- a/catalyst-gateway/bin/src/db/index/schema.rs +++ b/catalyst-gateway/bin/src/db/index/schema.rs @@ -19,6 +19,17 @@ const CREATE_TABLE_TXO_ASSETS_BY_STAKE_ADDRESS_CQL: &str = /// TXI by Stake Address Table Schema const CREATE_TABLE_TXI_BY_TXN_HASH_CQL: &str = include_str!("./schema/txi_by_txn_hash_table.cql"); +/// TXO by Stake Address Table Schema +const CREATE_TABLE_UNSTAKED_TXO_BY_TXN_HASH_CQL: &str = + include_str!("./schema/unstaked_txo_by_txn_hash.cql"); +/// TXO Assets by Stake Address Table Schema +const CREATE_TABLE_UNSTAKED_TXO_ASSETS_BY_TXN_HASH_CQL: &str = + include_str!("./schema/unstaked_txo_assets_by_txn_hash.cql"); + +/// Stake Address/Registration Table Schema +const CREATE_TABLE_STAKE_HASH_TO_STAKE_ADDRESS_CQL: &str = + include_str!("./schema/stake_hash_to_stake_address.cql"); + /// The version of the Schema we are using. /// Must be incremented if there is a breaking change in any schema tables below. pub(crate) const SCHEMA_VERSION: u64 = 1; @@ -80,6 +91,29 @@ async fn create_txo_tables(session: &mut CassandraSession) -> anyhow::Result<()> Ok(()) } +/// Create tables for holding unstaked TXO data. +async fn create_unstaked_txo_tables(session: &mut CassandraSession) -> anyhow::Result<()> { + let stmt = session + .prepare(CREATE_TABLE_UNSTAKED_TXO_BY_TXN_HASH_CQL) + .await + .context("Create Table Unstaked TXO By Txn Hash: Prepared")?; + session + .execute(&stmt, ()) + .await + .context("Create Table Unstaked TXO By Txn Hash: Executed")?; + + let stmt = session + .prepare(CREATE_TABLE_UNSTAKED_TXO_ASSETS_BY_TXN_HASH_CQL) + .await + .context("Create Table Unstaked TXO Assets By Txn Hash: Prepared")?; + session + .execute(&stmt, ()) + .await + .context("Create Table Unstaked TXO Assets By Txn Hash: Executed")?; + + Ok(()) +} + /// Create tables for holding volatile TXI data async fn create_txi_tables(session: &mut CassandraSession) -> anyhow::Result<()> { let stmt = session @@ -95,6 +129,21 @@ async fn create_txi_tables(session: &mut CassandraSession) -> anyhow::Result<()> Ok(()) } +/// Create tables for holding volatile TXI data +async fn create_stake_tables(session: &mut CassandraSession) -> anyhow::Result<()> { + let stmt = session + .prepare(CREATE_TABLE_STAKE_HASH_TO_STAKE_ADDRESS_CQL) + .await + .context("Create Table Stake Hash to Stake Address: Prepared")?; + + session + .execute(&stmt, ()) + .await + .context("Create Table Stake Hash to Stake Address: Executed")?; + + Ok(()) +} + /// Create the Schema on the connected Cassandra DB pub(crate) async fn create_schema( session: &mut CassandraSession, cfg: &CassandraEnvVars, @@ -103,7 +152,12 @@ pub(crate) async fn create_schema( create_txo_tables(session).await?; + create_unstaked_txo_tables(session).await?; + create_txi_tables(session).await?; + + create_stake_tables(session).await?; + // Wait for the Schema to be ready. session.await_schema_agreement().await?; diff --git a/catalyst-gateway/bin/src/db/index/schema/stake_hash_to_stake_address.cql b/catalyst-gateway/bin/src/db/index/schema/stake_hash_to_stake_address.cql new file mode 100644 index 00000000000..25c4df61326 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/stake_hash_to_stake_address.cql @@ -0,0 +1,17 @@ +-- This could be ADA or a native asset being spent. +-- This can represent a spend on either immutable data or volatile data. +CREATE TABLE IF NOT EXISTS stake_hash_to_stake_address ( + -- Primary Key Data + stake_hash blob, -- 32 Bytes Stake Key Hash. + slot_no varint, -- slot number when the key_was_registered. + txn smallint, -- Index of the TX which washolds the registration + + -- Non key data, we can only spend a transaction hash/txo once, so this should be unique in any event. + stake_address blob, -- 32 Bytes Stake address - not present for scripts + + register boolean, -- True if the stake was registered in this transaction. + deregister boolean, -- True if the stake key was deregisterd in this transaction. + pool_delegation blob, -- Stake was delegated to this Pool address. + + PRIMARY KEY (stake_hash, slot_no, txn) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql index f4bdd12cc6b..bcacbb3f845 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql @@ -1,5 +1,5 @@ -- Transaction Outputs (Native Assets) per stake address. --- Unstaked ADA address is an empty string. +-- Unstaked Assets are not present in this table. CREATE TABLE IF NOT EXISTS txo_assets_by_stake ( -- Priamry Key Fields stake_address blob, -- stake address hash (28 bytes stake address hash, zero length if not staked.) diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql index 8221138e126..fc331e14de6 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql @@ -1,5 +1,5 @@ -- Transaction Outputs (ADA) per stake address. --- Unstaked ADA address is an empty string. +-- Unstaked ADA is not present in this table. CREATE TABLE IF NOT EXISTS txo_by_stake ( -- Priamry Key Fields stake_address blob, -- stake address hash (28 bytes stake address hash, zero length if not staked.) diff --git a/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql b/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql new file mode 100644 index 00000000000..2c1211175c1 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql @@ -0,0 +1,25 @@ +-- Transaction Outputs (Native Assets) per stake address. +CREATE TABLE IF NOT EXISTS unstaked_txo_assets_by_txn_hash ( + -- Priamry Key Fields + txn_hash blob, -- 32 byte hash of this transaction. + txo smallint, -- offset in the txo list of the transaction the txo is in. + policy_id blob, -- asset policy hash (id) (28 byte binary hash) + policy_name text, -- name of the policy (UTF8) + + -- Secondary location information + slot_no varint, -- slot number the txo was created in. + txn smallint, -- Which Transaction in the Slot is the TXO. + + -- Value of the asset. + value varint, -- Value of the asset (u64) + + -- Data needed to correlate a spent TXO. + spent_slot varint, -- Slot this TXO was spent in. + -- This is ONLY calculated/stored + -- when first detected in a query lookup. + -- It serves as an optimization on subsequnt queries. + -- It is also only updated when the refenc is the same type + -- ie, an immutable txo can only record an immutable spend. + + PRIMARY KEY (txn_hash, txo, policy_id, policy_name) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_by_txn_hash.cql b/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_by_txn_hash.cql new file mode 100644 index 00000000000..1ff2ab2a96f --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_by_txn_hash.cql @@ -0,0 +1,22 @@ +-- Transaction Outputs (ADA) that are not staked, by their transaction hash. +CREATE TABLE IF NOT EXISTS unstaked_txo_by_txn_hash ( + -- Priamry Key Fields + txn_hash blob, -- 32 byte hash of this transaction. + txo smallint, -- offset in the txo list of the transaction the txo is in. + + -- Secondary Location information for the transaction. + slot_no varint, -- slot number the txo was created in. + txn smallint, -- Which Transaction in the Slot is the TXO. + + + -- Transaction Output Data + address ascii, -- TXO address (CIP19 Formatted Text). + value varint, -- Lovelace value of the TXO (u64). + + spent_slot varint, -- Slot this TXO was spent in. + -- This is ONLY calculated/stored + -- when first detected in a query lookup. + -- It serves as an optimization on subsequnt queries. + + PRIMARY KEY (txn_hash, txo) +); diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs index 1947028f719..08461f8ad1f 100644 --- a/catalyst-gateway/bin/src/db/index/session.rs +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -204,6 +204,17 @@ pub(crate) fn is_ready() -> bool { PERSISTENT_SESSION.get().is_some() && VOLATILE_SESSION.get().is_some() } +/// Wait for the Cassandra Indexing DB to be ready before continuing +pub(crate) async fn wait_is_ready(interval: Duration) { + loop { + if is_ready() { + break; + } + + tokio::time::sleep(interval).await; + } +} + /// Get the session needed to perform a query. pub(crate) fn session(persistent: bool) -> Option<(CassandraSession, Arc)> { if persistent { diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 6e52fd76ce2..6acc2d81086 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -71,10 +71,10 @@ const CASSANDRA_VOLATILE_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; const CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT: &str = "volatile"; /// Default chain to follow. -const CHAIN_FOLLOWER_DEFAULT: Network = Network::Preprod; +const CHAIN_FOLLOWER_DEFAULT: Network = Network::Mainnet; /// Default number of sync tasks (must be in the range 1 to 255 inclusive.) -const CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT: u8 = 16; +const CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT: u16 = 16; /// Hash the Public IPv4 and IPv6 address of the machine, and convert to a 128 bit V4 /// UUID. @@ -478,19 +478,19 @@ pub(crate) struct ChainFollowerEnvVars { /// The Blockchain we sync from. pub(crate) chain: Network, - /// Yje maximum number of sync tasks. - pub(crate) sync_tasks: u8, + /// The maximum number of sync tasks. + pub(crate) sync_tasks: u16, } impl ChainFollowerEnvVars { /// Create a config for a cassandra cluster, identified by a default namespace. fn new() -> Self { let chain = StringEnvVar::new_as_enum("CHAIN_NETWORK", CHAIN_FOLLOWER_DEFAULT, false); - let sync_tasks: u8 = StringEnvVar::new_as_i64( + let sync_tasks: u16 = StringEnvVar::new_as_i64( "CHAIN_FOLLOWER_SYNC_TASKS", CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT.into(), 1, - 255, + u16::MAX.into(), ) .try_into() .unwrap_or(CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT); diff --git a/catalyst-gateway/deny.toml b/catalyst-gateway/deny.toml index 7e6bd8d8121..54089796af8 100644 --- a/catalyst-gateway/deny.toml +++ b/catalyst-gateway/deny.toml @@ -18,7 +18,7 @@ targets = [ version = 2 ignore = [ { id = "RUSTSEC-2020-0168", reason = "`mach` is used by wasmtime and we have no control over that." }, - { id = "RUSTSEC-2021-0145", reason = "we don't target windows, and don;t use a custom global allocator." }, + { id = "RUSTSEC-2021-0145", reason = "we don't target windows, and don't use a custom global allocator." }, ] [bans] diff --git a/utilities/local-cluster/Vagrantfile b/utilities/local-cluster/Vagrantfile index bca02ee7620..68c633b9041 100644 --- a/utilities/local-cluster/Vagrantfile +++ b/utilities/local-cluster/Vagrantfile @@ -180,7 +180,7 @@ Vagrant.configure("2") do |config| control.vm.provision "shell", inline: helm_install_script control.vm.provision "shell", inline: control_plane_script control.vm.provision "shell", inline: cert_manager_install_script - # We use longhorn, so don;t setup the local-path-provisioner + # We use longhorn, so don't setup the local-path-provisioner # control.vm.provision "shell", inline: local_path_provisioner_script control.vm.provision "shell", inline: longhorn_install_script control.vm.provision "shell", inline: monitoring_install_script From a3f47df9074ad7a3faae1cf8a1d044233d992cce Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Tue, 6 Aug 2024 22:46:54 +0700 Subject: [PATCH 20/69] fix(backend): indexing WIP --- catalyst-gateway/Cargo.toml | 1 + catalyst-gateway/bin/Cargo.toml | 2 + catalyst-gateway/bin/src/cardano/mod.rs | 4 +- catalyst-gateway/bin/src/cardano/util.rs | 38 +- catalyst-gateway/bin/src/cli.rs | 4 +- catalyst-gateway/bin/src/db/index/block.rs | 619 +----------------- .../bin/src/db/index/index_certs.rs | 270 ++++++++ .../bin/src/db/index/index_txi.rs | 105 +++ .../bin/src/db/index/index_txo.rs | 508 ++++++++++++++ catalyst-gateway/bin/src/db/index/mod.rs | 3 + catalyst-gateway/bin/src/db/index/queries.rs | 196 ++++-- .../queries/insert_stake_registration.cql | 20 + .../bin/src/db/index/queries/insert_txi.cql | 14 +- .../bin/src/db/index/queries/insert_txo.cql | 27 +- .../src/db/index/queries/insert_txo_asset.cql | 27 +- .../db/index/queries/insert_unstaked_txo.cql | 27 +- .../queries/insert_unstaked_txo_asset.cql | 30 +- catalyst-gateway/bin/src/db/index/schema.rs | 153 ++--- .../schema/stake_hash_to_stake_address.cql | 17 - .../db/index/schema/stake_registration.cql | 20 + .../schema/txo_assets_by_stake_table.cql | 10 - .../unstaked_txo_assets_by_txn_hash.cql | 14 +- catalyst-gateway/bin/src/db/index/session.rs | 139 ++-- .../bin/src/service/api/health/live_get.rs | 4 +- catalyst-gateway/bin/src/settings.rs | 24 +- 25 files changed, 1361 insertions(+), 915 deletions(-) create mode 100644 catalyst-gateway/bin/src/db/index/index_certs.rs create mode 100644 catalyst-gateway/bin/src/db/index/index_txi.rs create mode 100644 catalyst-gateway/bin/src/db/index/index_txo.rs create mode 100644 catalyst-gateway/bin/src/db/index/queries/insert_stake_registration.cql delete mode 100644 catalyst-gateway/bin/src/db/index/schema/stake_hash_to_stake_address.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/stake_registration.cql diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index e70ffc3c126..caf304a7b63 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -62,6 +62,7 @@ num-bigint = "0.4.6" futures = "0.3.30" rand = "0.8.5" moka = { version = "0.12.8", features=["future"] } +crossbeam-skiplist = "0.1.3" [workspace.lints.rust] warnings = "deny" diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 49e97eb5029..81be5212fc0 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -78,6 +78,8 @@ num-bigint.workspace = true futures.workspace = true rand.workspace = true moka.workspace = true +crossbeam-skiplist.workspace = true + [build-dependencies] build-info-build = { workspace = true } diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index 3abb03aae68..166994e9cab 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -11,7 +11,7 @@ use rand::{Rng, SeedableRng}; use tracing::{error, info, warn}; use crate::{ - db::index::{block::index_block, session::wait_is_ready}, + db::index::{block::index_block, session::CassandraSession}, settings::Settings, }; @@ -210,7 +210,7 @@ fn sync_subchain(params: SyncParams) -> tokio::task::JoinHandle { params.backoff().await; // Wait for indexing DB to be ready before continuing. - wait_is_ready(INDEXING_DB_READY_WAIT_INTERVAL).await; + CassandraSession::wait_is_ready(INDEXING_DB_READY_WAIT_INTERVAL).await; info!(chain=%params.chain, params=%params,"Indexing DB is ready"); let mut first_indexed_block = params.first_indexed_block.clone(); diff --git a/catalyst-gateway/bin/src/cardano/util.rs b/catalyst-gateway/bin/src/cardano/util.rs index fb54d3217a4..75a9f48e289 100644 --- a/catalyst-gateway/bin/src/cardano/util.rs +++ b/catalyst-gateway/bin/src/cardano/util.rs @@ -1,5 +1,7 @@ //! Block stream parsing and filtering utils +use std::collections::HashMap; + use cryptoxide::{blake2b::Blake2b, digest::Digest}; use pallas::ledger::{ primitives::conway::{StakeCredential, VKeyWitness}, @@ -127,27 +129,31 @@ pub fn extract_stake_credentials_from_certs( stake_credentials } +/// Get a Blake2b-224 (28 byte) hash of some bytes +pub(crate) fn blake2b_224(value: &[u8]) -> [u8; 28] { + let mut digest = [0u8; 28]; + let mut context = Blake2b::new(28); + context.input(value); + context.result(&mut digest); + digest +} + +/// A map of hashed witnesses. +pub(crate) type HashedWitnesses = HashMap<[u8; 28], Vec>; + /// Extract witness pub keys and pair with blake2b hash of the pub key. -/// Hashes are generally 32-byte long on Cardano (or 256 bits), -/// except for credentials (i.e. keys or scripts) which are 28-byte long (or 224 bits) -#[allow(dead_code)] -pub fn extract_hashed_witnesses( - witnesses: &[VKeyWitness], -) -> anyhow::Result> { - let mut hashed_witnesses = Vec::new(); +/// This converts raw Addresses to their hashes as used on Cardano (Blake2b-224). +/// And allows them to be easily cross referenced. +pub(crate) fn extract_hashed_witnesses(witnesses: &[VKeyWitness]) -> HashedWitnesses { + let mut hashed_witnesses = HashMap::new(); for witness in witnesses { - let pub_key_bytes: [u8; 32] = witness.vkey.as_slice().try_into()?; - - let pub_key_hex = hex::encode(pub_key_bytes); + let pub_key = witness.vkey.to_vec(); + let hash = blake2b_224(&pub_key); - let mut digest = [0u8; 28]; - let mut context = Blake2b::new(28); - context.input(&pub_key_bytes); - context.result(&mut digest); - hashed_witnesses.push((pub_key_hex, hex::encode(digest))); + hashed_witnesses.insert(hash, pub_key); } - Ok(hashed_witnesses) + hashed_witnesses } /// Match hashed witness pub keys with hashed stake credentials from the TX certificates diff --git a/catalyst-gateway/bin/src/cli.rs b/catalyst-gateway/bin/src/cli.rs index 24fbbe823d3..e24f938befb 100644 --- a/catalyst-gateway/bin/src/cli.rs +++ b/catalyst-gateway/bin/src/cli.rs @@ -6,7 +6,7 @@ use tracing::{error, info}; use crate::{ cardano::start_followers, - db, + db::{self, index::session::CassandraSession}, service::{self, started}, settings::{DocsSettings, ServiceSettings, Settings}, }; @@ -44,7 +44,7 @@ impl Cli { info!("Catalyst Gateway - Starting"); // Start the DB's - db::index::session::init(); + CassandraSession::init(); db::event::establish_connection(); // Start the chain indexing follower. diff --git a/catalyst-gateway/bin/src/db/index/block.rs b/catalyst-gateway/bin/src/db/index/block.rs index 0fd69fb8d7a..72b895a658b 100644 --- a/catalyst-gateway/bin/src/db/index/block.rs +++ b/catalyst-gateway/bin/src/db/index/block.rs @@ -1,619 +1,66 @@ //! Index a block -use std::sync::{Arc, LazyLock}; +use cardano_chain_follower::MultiEraBlock; +use tracing::{debug, error}; -use anyhow::bail; -use cardano_chain_follower::{ChainFollower, MultiEraBlock, Point}; -use moka::future::{Cache, CacheBuilder}; -use scylla::SerializeRow; -use tracing::{debug, error, warn}; - -use super::{queries::PreparedQueries, session::session}; -use crate::settings::Settings; - -/// This is used to indicate that there is no stake address. -const NO_STAKE_ADDRESS: &[u8] = &[]; - -/// Insert TXO Query Parameters -#[derive(SerializeRow)] -struct TxoInsertParams { - /// Stake Address - Binary 28 bytes. 0 bytes = not staked. - stake_address: Vec, - /// Block Slot Number - slot_no: num_bigint::BigInt, - /// Transaction Offset inside the block. - txn: i16, - /// Transaction Output Offset inside the transaction. - txo: i16, - /// Actual full TXO Address - address: String, - /// Actual TXO Value in lovelace - value: num_bigint::BigInt, - /// Transactions hash. - txn_hash: Vec, -} - -/// Insert TXO Asset Query Parameters -#[derive(SerializeRow)] -struct TxoAssetInsertParams { - /// Stake Address - Binary 28 bytes. 0 bytes = not staked. - stake_address: Vec, - /// Block Slot Number - slot_no: num_bigint::BigInt, - /// Transaction Offset inside the block. - txn: i16, - /// Transaction Output Offset inside the transaction. - txo: i16, - /// Policy hash of the asset - policy_id: Vec, - /// Policy name of the asset - policy_name: String, - /// Value of the asset - value: num_bigint::BigInt, - /// Transactions hash. - txn_hash: Vec, -} - -/// Insert TXI Query Parameters -#[derive(SerializeRow)] -struct TxiInsertParams { - /// Spent Transactions Hash - txn_hash: Vec, - /// TXO Index spent. - txo: i16, - /// Block Slot Number when spend occurred. - slot_no: num_bigint::BigInt, -} - -#[allow(dead_code)] -static POINTER_ADDRESS_CACHE: LazyLock, Arc>>> = - LazyLock::new(|| CacheBuilder::default().max_capacity(1024).build()); - -/// Dereference the Pointer and return the Stake Address if possible. -/// Returns an error if it can not be found for some reason. -/// -/// We probably don't need to support this, but keep code incase we do. -#[allow(dead_code)] -async fn deref_stake_pointer( - pointer: &pallas::ledger::addresses::Pointer, -) -> anyhow::Result>> { - // OK, we can look this up because we have a full chain to query, so - // try. - let cfg = Settings::follower_cfg(); - - let pointer_block_point = Point::fuzzy(pointer.slot()); - - let pointer_txn_offset: usize = pointer.tx_idx().try_into().unwrap_or(usize::MAX); - let pointer_cert_offset: usize = pointer.cert_idx().try_into().unwrap_or(usize::MAX); - - if let Some(pointer_block) = ChainFollower::get_block(cfg.chain, pointer_block_point).await { - let block_data = pointer_block.block_data().decode(); - if let Some(txn) = block_data.txs().get(pointer_txn_offset) { - if let Some(cert) = txn.certs().get(pointer_cert_offset) { - match cert { - pallas::ledger::traverse::MultiEraCert::AlonzoCompatible(cert) => { - match cert.clone().into_owned() { - pallas::ledger::primitives::alonzo::Certificate::StakeRegistration(cred) | - pallas::ledger::primitives::alonzo::Certificate::StakeDeregistration(cred) | - pallas::ledger::primitives::alonzo::Certificate::StakeDelegation(cred, _) => { - match cred { - pallas::ledger::primitives::conway::StakeCredential::AddrKeyhash(hash) | - pallas::ledger::primitives::conway::StakeCredential::Scripthash(hash) => { - return Ok(Arc::new(hash.to_vec())); - }, - } - }, - _ => { - bail!("Alonzo cert type not a stake address."); - } - } - }, - pallas::ledger::traverse::MultiEraCert::Conway(cert) => { - match cert.clone().into_owned() { - pallas::ledger::primitives::conway::Certificate::StakeRegistration(cred) | - pallas::ledger::primitives::conway::Certificate::StakeDeregistration(cred) | - pallas::ledger::primitives::conway::Certificate::StakeDelegation(cred, _) | - pallas::ledger::primitives::conway::Certificate::Reg(cred, _) | - pallas::ledger::primitives::conway::Certificate::UnReg(cred, _) | - pallas::ledger::primitives::conway::Certificate::VoteDeleg(cred, _) | - pallas::ledger::primitives::conway::Certificate::StakeVoteDeleg(cred, _, _) | - pallas::ledger::primitives::conway::Certificate::StakeRegDeleg(cred, _, _) | - pallas::ledger::primitives::conway::Certificate::VoteRegDeleg(cred, _, _) | - pallas::ledger::primitives::conway::Certificate::StakeVoteRegDeleg(cred, _, _, _) | - pallas::ledger::primitives::conway::Certificate::UpdateDRepCert(cred, _) => { - match cred { - pallas::ledger::primitives::conway::StakeCredential::AddrKeyhash(hash) | - pallas::ledger::primitives::conway::StakeCredential::Scripthash(hash)=> { - return Ok(Arc::new(hash.to_vec())); - }, - } - }, - _ => { - bail!("Conway cert type not a stake address."); - }, - } - }, - _ => { - bail!("Certificate type unknown."); - }, - } - } - bail!( - "Certificate index not found in block/txn. Treat as if there is no stake address." - ); - } - bail!("Pointer Stake address detected, but txn index not found in block. Treat as if there is no stake address."); - } - bail!("Pointer Stake address detected, but block not found. Treat as if there is no stake address."); -} - -/// Make a pointer pretty print. -#[allow(dead_code)] -fn fmt_pointer(pointer: &pallas::ledger::addresses::Pointer) -> String { - format!( - "Slot:{},Tx:{},Cert:{}", - pointer.slot(), - pointer.tx_idx(), - pointer.cert_idx() - ) -} - -/// Extracts a stake address from a TXO if possible. -/// Returns None if it is not possible. -/// If we want to index, but can not determine a stake key hash, then return a Vec with a -/// single 0 byte. This is because the index DB needs data in the primary key, so we -/// use a single byte of 0 to indicate that there is no stake address, and still have a -/// primary key on the table. Otherwise return the stake key hash as a vec of 28 bytes. -fn extract_stake_address( - txo: &pallas::ledger::traverse::MultiEraOutput<'_>, slot_no: u64, txn_id: &str, -) -> Option<(Vec, String)> { - let stake_address = match txo.address() { - Ok(address) => { - match address { - // Byron addresses do not have stake addresses and are not supported. - pallas::ledger::addresses::Address::Byron(_) => { - return None; - }, - pallas::ledger::addresses::Address::Shelley(address) => { - let address_string = match address.to_bech32() { - Ok(address) => address, - Err(error) => { - error!(error=%error, slot=slot_no, txn=txn_id,"Error converting to bech32: skipping."); - return None; - }, - }; - - match address.delegation() { - pallas::ledger::addresses::ShelleyDelegationPart::Script(hash) - | pallas::ledger::addresses::ShelleyDelegationPart::Key(hash) => { - (hash.to_vec(), address_string) - }, - pallas::ledger::addresses::ShelleyDelegationPart::Pointer(_pointer) => { - // These are not supported from Conway, so we don't support them either. - (NO_STAKE_ADDRESS.to_vec(), address_string) - /* - let pointer_string = fmt_pointer(pointer); - info!( - slot = slot_no, - txn = txn_id, - pointer = pointer_string, - "Block has stake address pointer" - ); - - // First check if its cached, and if not look it up. - // Pointer addresses always resolve to the same result, so they are safe - // to cache. - match POINTER_ADDRESS_CACHE - .try_get_with( - pointer.to_vec().clone(), - deref_stake_pointer(pointer), - ) - .await - { - Ok(hash) => (hash.to_vec(), address_string), - Err(error) => { - error!(error=%error, slot=slot_no, txn=txn_id, pointer=pointer_string, - "Error looking up stake address via pointer: Treating as if there is no stake address."); - POINTER_ADDRESS_CACHE - .insert( - pointer.to_vec(), - Arc::new(NO_STAKE_ADDRESS.to_vec()), - ) - .await; - (NO_STAKE_ADDRESS.to_vec(), address_string) - }, - } - */ - }, - pallas::ledger::addresses::ShelleyDelegationPart::Null => { - (NO_STAKE_ADDRESS.to_vec(), address_string) - }, - } - }, - pallas::ledger::addresses::Address::Stake(_) => { - // This should NOT appear in a TXO, so report if it does. But don't index it as - // a stake address. - warn!( - slot = slot_no, - txn = txn_id, - "Unexpected Stake address found in TXO. Refusing to index." - ); - return None; - }, - } - }, - Err(error) => { - // This should not ever happen. - error!(error=%error, slot = slot_no, txn = txn_id, "Failed to get Address from TXO. Skipping TXO."); - return None; - }, - }; - - Some(stake_address) -} +use super::{ + index_certs::CertInsertQuery, index_txi::TxiInsertQuery, index_txo::TxoInsertQuery, + queries::FallibleQueryTasks, session::CassandraSession, +}; +use crate::cardano::util::extract_hashed_witnesses; /// Convert a usize to an i16 and saturate at `i16::MAX` -fn usize_to_i16(value: usize) -> i16 { +pub(crate) fn usize_to_i16(value: usize) -> i16 { value.try_into().unwrap_or(i16::MAX) } -/// Index the transaction Inputs. -fn index_txi( - session: &Arc, queries: &Arc, - txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64, -) -> Vec>> -{ - let mut query_handles: Vec< - tokio::task::JoinHandle>, - > = Vec::new(); - - // Index the TXI's. - for txi in txs.inputs() { - let txn_hash = txi.hash().to_vec(); - let txo: i16 = txi.index().try_into().unwrap_or(i16::MAX); - - let nested_txi_query = queries.txi_insert_query.clone(); - let nested_session = session.clone(); - query_handles.push(tokio::spawn(async move { - nested_session - .execute( - &nested_txi_query, - TxiInsertParams { - txn_hash, - txo, - slot_no: slot_no.into(), - }, - ) - .await - })); - } - - query_handles -} - -/// This TXO is NOT Staked, so index it as such. -#[allow(clippy::too_many_arguments)] -fn index_unstaked_txo( - session: &Arc, stake_address: &[u8], txo_index: usize, address: String, - value: u64, txo: &pallas::ledger::traverse::MultiEraOutput<'_>, queries: &Arc, - slot_no: u64, txn_hash: &[u8], txn_index: i16, -) -> Vec>> -{ - let mut query_handles: Vec< - tokio::task::JoinHandle>, - > = Vec::new(); - - let nested_txo_query = queries.txo_insert_query.clone(); - let nested_session = session.clone(); - let nested_txn_hash = txn_hash.to_vec(); - let nested_stake_address = stake_address.to_vec(); - query_handles.push(tokio::spawn(async move { - nested_session - .execute( - &nested_txo_query, - TxoInsertParams { - stake_address: nested_stake_address.clone(), - slot_no: slot_no.into(), - txn: txn_index, - txo: usize_to_i16(txo_index), - address, - value: value.into(), - txn_hash: nested_txn_hash, - }, - ) - .await - })); - - let inner_stake_address = stake_address.to_vec(); - for asset in txo.non_ada_assets() { - let policy_id = asset.policy().to_vec(); - for policy_asset in asset.assets() { - if policy_asset.is_output() { - let policy_name = policy_asset.to_ascii_name().unwrap_or_default(); - let value = policy_asset.any_coin(); - - let nested_txo_asset_query = queries.txo_asset_insert_query.clone(); - let nested_session = session.clone(); - let nested_txn_hash = txn_hash.to_vec(); - let nested_stake_address = inner_stake_address.clone(); - let nested_policy_id = policy_id.clone(); - query_handles.push(tokio::spawn(async move { - nested_session - .execute( - &nested_txo_asset_query, - TxoAssetInsertParams { - stake_address: nested_stake_address, - slot_no: slot_no.into(), - txn: txn_index, - txo: usize_to_i16(txo_index), - policy_id: nested_policy_id, - policy_name, - value: value.into(), - txn_hash: nested_txn_hash, - }, - ) - .await - })); - } else { - error!("Minting MultiAsset in TXO."); - } - } - } - - query_handles -} - -/// This TXO is Staked, so index it as such. -#[allow(clippy::too_many_arguments)] -fn index_staked_txo( - session: &Arc, stake_address: &[u8], txo_index: usize, address: String, - value: u64, txo: &pallas::ledger::traverse::MultiEraOutput<'_>, queries: &Arc, - slot_no: u64, txn_hash: &[u8], txn_index: i16, -) -> Vec>> -{ - let mut query_handles: Vec< - tokio::task::JoinHandle>, - > = Vec::new(); - - let nested_txo_query = queries.txo_insert_query.clone(); - let nested_session = session.clone(); - let nested_txn_hash = txn_hash.to_vec(); - let nested_stake_address = stake_address.to_vec(); - query_handles.push(tokio::spawn(async move { - nested_session - .execute( - &nested_txo_query, - TxoInsertParams { - stake_address: nested_stake_address.clone(), - slot_no: slot_no.into(), - txn: txn_index, - txo: usize_to_i16(txo_index), - address, - value: value.into(), - txn_hash: nested_txn_hash, - }, - ) - .await - })); - - let inner_stake_address = stake_address.to_vec(); - for asset in txo.non_ada_assets() { - let policy_id = asset.policy().to_vec(); - for policy_asset in asset.assets() { - if policy_asset.is_output() { - let policy_name = policy_asset.to_ascii_name().unwrap_or_default(); - let value = policy_asset.any_coin(); - - let nested_txo_asset_query = queries.txo_asset_insert_query.clone(); - let nested_session = session.clone(); - let nested_txn_hash = txn_hash.to_vec(); - let nested_stake_address = inner_stake_address.clone(); - let nested_policy_id = policy_id.clone(); - query_handles.push(tokio::spawn(async move { - nested_session - .execute( - &nested_txo_asset_query, - TxoAssetInsertParams { - stake_address: nested_stake_address, - slot_no: slot_no.into(), - txn: txn_index, - txo: usize_to_i16(txo_index), - policy_id: nested_policy_id, - policy_name, - value: value.into(), - txn_hash: nested_txn_hash, - }, - ) - .await - })); - } else { - error!("Minting MultiAsset in TXO."); - } - } - } - - query_handles -} - -/// Index the transaction Outputs. -fn index_txo( - session: &Arc, queries: &Arc, - txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64, txn_hash: &[u8], txn_index: i16, -) -> Vec>> -{ - let txn_id = hex::encode_upper(txn_hash); - - let mut query_handles: Vec< - tokio::task::JoinHandle>, - > = Vec::new(); - - for (txo_index, txo) in txs.outputs().iter().enumerate() { - let Some((stake_address, address)) = extract_stake_address(txo, slot_no, &txn_id) else { - continue; - }; - - let value = txo.lovelace_amount(); - - if stake_address == NO_STAKE_ADDRESS { - } else { - query_handles.extend(index_staked_txo( - session, - &stake_address, - txo_index, - address, - value, - txo, - queries, - slot_no, - txn_hash, - txn_index, - )); - } - /* - let value = txo.lovelace_amount(); - - let nested_txo_query = txo_query.clone(); - let nested_session = session.clone(); - let nested_txn_hash = txn_hash.to_vec(); - let nested_stake_address = stake_address.clone(); - query_handles.push(tokio::spawn(async move { - nested_session - .execute( - &nested_txo_query, - TxoInsertParams { - stake_address: nested_stake_address, - slot_no: slot_no.into(), - txn: txn_index, - txo: usize_to_i16(txo_index), - address, - value: value.into(), - txn_hash: nested_txn_hash, - }, - ) - .await - })); - - for asset in txo.non_ada_assets() { - let policy_id = asset.policy().to_vec(); - for policy_asset in asset.assets() { - if policy_asset.is_output() { - let policy_name = policy_asset.to_ascii_name().unwrap_or_default(); - let value = policy_asset.any_coin(); - - let nested_txo_asset_query = txo_asset_query.clone(); - let nested_session = session.clone(); - let nested_txn_hash = txn_hash.to_vec(); - let nested_stake_address = stake_address.clone(); - let nested_policy_id = policy_id.clone(); - query_handles.push(tokio::spawn(async move { - nested_session - .execute( - &nested_txo_asset_query, - TxoAssetInsertParams { - stake_address: nested_stake_address, - slot_no: slot_no.into(), - txn: txn_index, - txo: usize_to_i16(txo_index), - policy_id: nested_policy_id, - policy_name, - value: value.into(), - txn_hash: nested_txn_hash, - }, - ) - .await - })); - } else { - error!("Minting MultiAsset in TXO."); - } - } - } - */ - } - query_handles -} - /// Add all data needed from the block into the indexes. #[allow(clippy::similar_names)] pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { // Get the session. This should never fail. - let Some((session, queries)) = session(block.immutable()) else { + let Some(session) = CassandraSession::get(block.immutable()) else { anyhow::bail!("Failed to get Index DB Session. Can not index block."); }; - // As our indexing operations span multiple partitions, they can not be batched. - // So use tokio threads to allow multiple writes to be dispatched simultaneously. - let mut query_handles: Vec< - tokio::task::JoinHandle>, - > = Vec::new(); + let mut cert_index = CertInsertQuery::new(); + let mut txi_index = TxiInsertQuery::new(); + let mut txo_index = TxoInsertQuery::new(); let block_data = block.decode(); let slot_no = block_data.slot(); + // We add all transactions in the block to their respective index data sets. for (txn_index, txs) in block_data.txs().iter().enumerate() { + let txn = usize_to_i16(txn_index); + let txn_hash = txs.hash().to_vec(); - // Index the TXI's. - query_handles.extend(index_txi(&session, &queries, txs, slot_no)); + // Hash all the witnesses for easy lookup. + let witnesses = extract_hashed_witnesses(txs.vkey_witnesses()); + + // Index the TXIs. + txi_index.index(txs, slot_no); // TODO: Index minting. // let mint = txs.mints().iter() {}; // TODO: Index Metadata. - // TODO: Index Stake address hash to stake address reverse lookups. - // Actually Index Certs, first ones to index are stake addresses. - let x = txs.required_signers(); - let x = txs.vkey_witnesses(); - txs.certs().iter().for_each(|cert| { - match cert { - pallas::ledger::traverse::MultiEraCert::NotApplicable => todo!(), - pallas::ledger::traverse::MultiEraCert::AlonzoCompatible(cert) => { - match cert.clone().into_owned() { - pallas::ledger::primitives::alonzo::Certificate::StakeRegistration(_) => todo!(), - pallas::ledger::primitives::alonzo::Certificate::StakeDeregistration(_) => todo!(), - pallas::ledger::primitives::alonzo::Certificate::StakeDelegation(_, _) => todo!(), - pallas::ledger::primitives::alonzo::Certificate::PoolRegistration { operator, vrf_keyhash, pledge, cost, margin, reward_account, pool_owners, relays, pool_metadata } => todo!(), - pallas::ledger::primitives::alonzo::Certificate::PoolRetirement(_, _) => todo!(), - pallas::ledger::primitives::alonzo::Certificate::GenesisKeyDelegation(_, _, _) => todo!(), - pallas::ledger::primitives::alonzo::Certificate::MoveInstantaneousRewardsCert(_) => todo!(), - } - }, - pallas::ledger::traverse::MultiEraCert::Conway(cert) => { - match cert.clone().into_owned() { - pallas::ledger::primitives::conway::Certificate::StakeRegistration(_) => todo!(), - pallas::ledger::primitives::conway::Certificate::StakeDeregistration(_) => todo!(), - pallas::ledger::primitives::conway::Certificate::StakeDelegation(_, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::PoolRegistration { operator, vrf_keyhash, pledge, cost, margin, reward_account, pool_owners, relays, pool_metadata } => todo!(), - pallas::ledger::primitives::conway::Certificate::PoolRetirement(_, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::Reg(_, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::UnReg(_, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::VoteDeleg(_, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::StakeVoteDeleg(_, _, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::StakeRegDeleg(_, _, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::VoteRegDeleg(_, _, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::StakeVoteRegDeleg(_, _, _, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::AuthCommitteeHot(_, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::ResignCommitteeCold(_, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::RegDRepCert(_, _, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::UnRegDRepCert(_, _) => todo!(), - pallas::ledger::primitives::conway::Certificate::UpdateDRepCert(_, _) => todo!(), - } - }, - _ => todo!(), - } - }); + // Index Certificates inside the transaction. + cert_index.index(txs, slot_no, txn, &witnesses); - // Index the TXO's. - query_handles.extend(index_txo( - &session, - &queries, - txs, - slot_no, - &txn_hash, - usize_to_i16(txn_index), - )); + // Index the TXOs. + txo_index.index(txs, slot_no, &txn_hash, txn); } + // We then execute each batch of data from the block. + // This maximizes batching opportunities. + let mut query_handles: FallibleQueryTasks = Vec::new(); + + query_handles.extend(txo_index.execute(&session)); + query_handles.extend(txi_index.execute(&session)); + query_handles.extend(cert_index.execute(&session)); + let mut result: anyhow::Result<()> = Ok(()); // Wait for operations to complete, and display any errors @@ -630,7 +77,7 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { Err(error) => { // IF a query fails, assume everything else is broken. error!(error=%error,"Query Failed"); - result = Err(error.into()); + result = Err(error); }, } }, diff --git a/catalyst-gateway/bin/src/db/index/index_certs.rs b/catalyst-gateway/bin/src/db/index/index_certs.rs new file mode 100644 index 00000000000..b39cf8fdace --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/index_certs.rs @@ -0,0 +1,270 @@ +//! Index certs found in a transaction. + +use std::sync::Arc; + +use pallas::ledger::primitives::{alonzo, conway}; +use scylla::{frame::value::MaybeUnset, SerializeRow, Session}; +use tracing::error; + +use super::{ + queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, + session::CassandraSession, +}; +use crate::{cardano::util::HashedWitnesses, settings::CassandraEnvVars}; + +/// Insert TXI Query and Parameters +#[derive(SerializeRow)] +pub(crate) struct StakeRegistrationInsertQuery { + /// Stake key hash + stake_hash: Vec, + /// Slot Number the cert is in. + slot_no: num_bigint::BigInt, + /// Transaction Index. + txn: i16, + /// Full Stake Address (not hashed, 32 byte ED25519 Public key). + stake_address: MaybeUnset>, + /// Is the stake address a script or not. + script: bool, + /// Is the Certificate Registered? + register: MaybeUnset, + /// Is the Certificate Deregistered? + deregister: MaybeUnset, + /// Pool Delegation Address + pool_delegation: MaybeUnset>, +} + +/// TXI by Txn hash Index +const INSERT_STAKE_REGISTRATION_QUERY: &str = + include_str!("./queries/insert_stake_registration.cql"); + +impl StakeRegistrationInsertQuery { + /// Create a new Insert Query. + #[allow(clippy::too_many_arguments)] + pub fn new( + stake_hash: Vec, slot_no: u64, txn: i16, stake_address: Vec, script: bool, + register: bool, deregister: bool, pool_delegation: Option>, + ) -> Self { + StakeRegistrationInsertQuery { + stake_hash, + slot_no: slot_no.into(), + txn, + stake_address: if stake_address.is_empty() { + MaybeUnset::Unset + } else { + MaybeUnset::Set(stake_address) + }, + script, + register: if register { + MaybeUnset::Set(true) + } else { + MaybeUnset::Unset + }, + deregister: if deregister { + MaybeUnset::Set(true) + } else { + MaybeUnset::Unset + }, + pool_delegation: if let Some(pool_delegation) = pool_delegation { + MaybeUnset::Set(pool_delegation) + } else { + MaybeUnset::Unset + }, + } + } + + /// Prepare Batch of Insert TXI Index Data Queries + pub(crate) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_STAKE_REGISTRATION_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = insert_queries { + error!(error=%error,"Failed to prepare Insert Stake Registration Query."); + }; + + insert_queries + } +} + +/// Insert Cert Queries +pub(crate) struct CertInsertQuery { + /// Stake Registration Data captured during indexing. + stake_reg_data: Vec, +} + +impl CertInsertQuery { + /// Create new data set for Cert Insert Query Batch. + pub(crate) fn new() -> Self { + CertInsertQuery { + stake_reg_data: Vec::new(), + } + } + + /// Prepare Batch of Insert TXI Index Data Queries + pub(crate) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + // Note: for now we have one query, but there are many certs, and later we may have more + // to add here. + StakeRegistrationInsertQuery::prepare_batch(session, cfg).await + } + + /// Get the stake address for a hash, return an empty address if one can not be found. + #[allow(clippy::too_many_arguments)] + fn stake_address( + &mut self, cred: &alonzo::StakeCredential, slot_no: u64, txn: i16, register: bool, + deregister: bool, delegation: Option>, witnesses: &HashedWitnesses, + ) { + let default_addr = Vec::new(); + let (key_hash, pubkey, script) = match cred { + pallas::ledger::primitives::conway::StakeCredential::AddrKeyhash(cred) => { + let addr = witnesses.get(cred.as_ref()).unwrap_or(&default_addr); + // Note: it is totally possible for the Registration Certificate to not be + // witnessed. + (cred.to_vec(), addr.clone(), false) + }, + pallas::ledger::primitives::conway::StakeCredential::Scripthash(script) => { + (script.to_vec(), default_addr, true) + }, + }; + + if pubkey.is_empty() && !script && deregister { + error!( + "Stake Deregistration Certificate {:?} is NOT Witnessed.", + key_hash + ); + } + + if pubkey.is_empty() && !script && delegation.is_some() { + error!( + "Stake Delegation Certificate {:?} is NOT Witnessed.", + key_hash + ); + } + + // This may not be witnessed, its normal but disappointing. + self.stake_reg_data.push(StakeRegistrationInsertQuery::new( + key_hash, slot_no, txn, pubkey, script, register, deregister, delegation, + )); + } + + /// Index an Alonzo Era certificate into the database. + fn index_alonzo_cert( + &mut self, cert: &alonzo::Certificate, slot_no: u64, txn: i16, witnesses: &HashedWitnesses, + ) { + #[allow(clippy::match_same_arms)] + match cert { + pallas::ledger::primitives::alonzo::Certificate::StakeRegistration(cred) => { + // This may not be witnessed, its normal but disappointing. + self.stake_address(cred, slot_no, txn, true, false, None, witnesses); + }, + pallas::ledger::primitives::alonzo::Certificate::StakeDeregistration(cred) => { + self.stake_address(cred, slot_no, txn, false, true, None, witnesses); + }, + pallas::ledger::primitives::alonzo::Certificate::StakeDelegation(cred, pool) => { + self.stake_address( + cred, + slot_no, + txn, + false, + false, + Some(pool.to_vec()), + witnesses, + ); + }, + pallas::ledger::primitives::alonzo::Certificate::PoolRegistration { .. } => {}, + pallas::ledger::primitives::alonzo::Certificate::PoolRetirement(..) => {}, + pallas::ledger::primitives::alonzo::Certificate::GenesisKeyDelegation(..) => {}, + pallas::ledger::primitives::alonzo::Certificate::MoveInstantaneousRewardsCert(_) => {}, + } + } + + /// Index a certificate from a conway transaction. + fn index_conway_cert( + &mut self, cert: &conway::Certificate, slot_no: u64, txn: i16, witnesses: &HashedWitnesses, + ) { + #[allow(clippy::match_same_arms)] + match cert { + pallas::ledger::primitives::conway::Certificate::StakeRegistration(cred) => { + // This may not be witnessed, its normal but disappointing. + self.stake_address(cred, slot_no, txn, true, false, None, witnesses); + }, + pallas::ledger::primitives::conway::Certificate::StakeDeregistration(cred) => { + self.stake_address(cred, slot_no, txn, false, true, None, witnesses); + }, + pallas::ledger::primitives::conway::Certificate::StakeDelegation(cred, pool) => { + self.stake_address( + cred, + slot_no, + txn, + false, + false, + Some(pool.to_vec()), + witnesses, + ); + }, + pallas::ledger::primitives::conway::Certificate::PoolRegistration { .. } => {}, + pallas::ledger::primitives::conway::Certificate::PoolRetirement(..) => {}, + pallas::ledger::primitives::conway::Certificate::Reg(..) => {}, + pallas::ledger::primitives::conway::Certificate::UnReg(..) => {}, + pallas::ledger::primitives::conway::Certificate::VoteDeleg(..) => {}, + pallas::ledger::primitives::conway::Certificate::StakeVoteDeleg(..) => {}, + pallas::ledger::primitives::conway::Certificate::StakeRegDeleg(..) => {}, + pallas::ledger::primitives::conway::Certificate::VoteRegDeleg(..) => {}, + pallas::ledger::primitives::conway::Certificate::StakeVoteRegDeleg(..) => {}, + pallas::ledger::primitives::conway::Certificate::AuthCommitteeHot(..) => {}, + pallas::ledger::primitives::conway::Certificate::ResignCommitteeCold(..) => {}, + pallas::ledger::primitives::conway::Certificate::RegDRepCert(..) => {}, + pallas::ledger::primitives::conway::Certificate::UnRegDRepCert(..) => {}, + pallas::ledger::primitives::conway::Certificate::UpdateDRepCert(..) => {}, + } + } + + /// Index the certificates in a transaction. + pub(crate) fn index( + &mut self, txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64, txn: i16, + witnesses: &HashedWitnesses, + ) { + #[allow(clippy::match_same_arms)] + txs.certs().iter().for_each(|cert| { + match cert { + pallas::ledger::traverse::MultiEraCert::NotApplicable => {}, + pallas::ledger::traverse::MultiEraCert::AlonzoCompatible(cert) => { + self.index_alonzo_cert(cert, slot_no, txn, witnesses); + }, + pallas::ledger::traverse::MultiEraCert::Conway(cert) => { + self.index_conway_cert(cert, slot_no, txn, witnesses); + }, + _ => {}, + } + }); + } + + /// Execute the Certificate Indexing Queries. + /// + /// Consumes the `self` and returns a vector of futures. + pub(crate) fn execute(self, session: &Arc) -> FallibleQueryTasks { + let mut query_handles: FallibleQueryTasks = Vec::new(); + + let inner_session = session.clone(); + + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch( + PreparedQuery::StakeRegistrationInsertQuery, + self.stake_reg_data, + ) + .await + })); + + query_handles + } +} diff --git a/catalyst-gateway/bin/src/db/index/index_txi.rs b/catalyst-gateway/bin/src/db/index/index_txi.rs new file mode 100644 index 00000000000..20d527cfce6 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/index_txi.rs @@ -0,0 +1,105 @@ +//! Insert TXI Index Data Queries. + +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use super::{ + queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, + session::CassandraSession, +}; +use crate::settings::CassandraEnvVars; + +/// Insert TXI Query and Parameters +#[derive(SerializeRow)] +pub(crate) struct TxiInsertParams { + /// Spent Transactions Hash + txn_hash: Vec, + /// TXO Index spent. + txo: i16, + /// Block Slot Number when spend occurred. + slot_no: num_bigint::BigInt, +} + +impl TxiInsertParams { + /// Create a new record for this transaction. + pub fn new(txn_hash: &[u8], txo: i16, slot_no: u64) -> Self { + Self { + txn_hash: txn_hash.to_vec(), + txo, + slot_no: slot_no.into(), + } + } +} + +/// Insert TXI Query and Parameters +pub(crate) struct TxiInsertQuery { + /// Transaction Input Data to be inserted. + txi_data: Vec, +} + +/// TXI by Txn hash Index +const INSERT_TXI_QUERY: &str = include_str!("./queries/insert_txi.cql"); + +impl TxiInsertQuery { + /// Create a new record for this transaction. + pub(crate) fn new() -> Self { + Self { + txi_data: Vec::new(), + } + } + + /// Prepare Batch of Insert TXI Index Data Queries + pub(crate) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txi_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_TXI_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txi_insert_queries { + error!(error=%error,"Failed to prepare Insert TXI Query."); + }; + + txi_insert_queries + } + + /// Index the transaction Inputs. + pub(crate) fn index(&mut self, txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64) { + // Index the TXI's. + for txi in txs.inputs() { + let txn_hash = txi.hash().to_vec(); + let txo: i16 = txi.index().try_into().unwrap_or(i16::MAX); + + self.txi_data.push(TxiInsertParams { + txn_hash, + txo, + slot_no: slot_no.into(), + }); + } + } + + /// Execute the Certificate Indexing Queries. + /// + /// Consumes the `self` and returns a vector of futures. + pub(crate) fn execute(self, session: &Arc) -> FallibleQueryTasks { + let mut query_handles: FallibleQueryTasks = Vec::new(); + + let inner_session = session.clone(); + + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch(PreparedQuery::TxiInsertQuery, self.txi_data) + .await + })); + + query_handles + } +} diff --git a/catalyst-gateway/bin/src/db/index/index_txo.rs b/catalyst-gateway/bin/src/db/index/index_txo.rs new file mode 100644 index 00000000000..411a4467d47 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/index_txo.rs @@ -0,0 +1,508 @@ +//! Insert TXO Indexed Data Queries. +//! +//! Note, there are multiple ways TXO Data is indexed and they all happen in here. + +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::{error, warn}; + +use super::{ + block::usize_to_i16, + queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, + session::CassandraSession, +}; +use crate::settings::CassandraEnvVars; + +/// This is used to indicate that there is no stake address. +const NO_STAKE_ADDRESS: &[u8] = &[]; + +/// TXO by Stake Address Indexing query +const INSERT_TXO_QUERY: &str = include_str!("./queries/insert_txo.cql"); + +/// Insert TXO Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +struct TxoInsertParams { + /// Stake Address - Binary 28 bytes. 0 bytes = not staked. + stake_address: Vec, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Actual full TXO Address + address: String, + /// Actual TXO Value in lovelace + value: num_bigint::BigInt, + /// Transactions hash. + txn_hash: Vec, +} + +impl TxoInsertParams { + /// Create a new record for this transaction. + pub(crate) fn new( + stake_address: &[u8], slot_no: u64, txn: i16, txo: i16, address: &str, value: u64, + txn_hash: &[u8], + ) -> Self { + Self { + stake_address: stake_address.to_vec(), + slot_no: slot_no.into(), + txn, + txo, + address: address.to_string(), + value: value.into(), + txn_hash: txn_hash.to_vec(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_TXO_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert TXO Asset Query."); + }; + + txo_insert_queries + } +} + +/// Unstaked TXO by Stake Address Indexing query +const INSERT_UNSTAKED_TXO_QUERY: &str = include_str!("./queries/insert_unstaked_txo.cql"); + +/// Insert TXO Unstaked Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +struct TxoUnstakedInsertParams { + /// Transactions hash. + txn_hash: Vec, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Actual full TXO Address + address: String, + /// Actual TXO Value in lovelace + value: num_bigint::BigInt, +} + +impl TxoUnstakedInsertParams { + /// Create a new record for this transaction. + pub(crate) fn new( + txn_hash: &[u8], txo: i16, slot_no: u64, txn: i16, address: &str, value: u64, + ) -> Self { + Self { + txn_hash: txn_hash.to_vec(), + txo, + slot_no: slot_no.into(), + txn, + address: address.to_string(), + value: value.into(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_UNSTAKED_TXO_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert TXO Asset Query."); + }; + + txo_insert_queries + } +} + +/// TXO Asset by Stake Address Indexing Query +const INSERT_TXO_ASSET_QUERY: &str = include_str!("./queries/insert_txo_asset.cql"); + +/// Insert TXO Asset Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +struct TxoAssetInsertParams { + /// Stake Address - Binary 28 bytes. 0 bytes = not staked. + stake_address: Vec, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Policy hash of the asset + policy_id: Vec, + /// Policy name of the asset + policy_name: String, + /// Value of the asset + value: num_bigint::BigInt, +} + +impl TxoAssetInsertParams { + /// Create a new record for this transaction. + /// + /// Note Value can be either a u64 or an i64, so use a i128 to represent all possible + /// values. + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( + stake_address: &[u8], slot_no: u64, txn: i16, txo: i16, policy_id: &[u8], + policy_name: &str, value: i128, + ) -> Self { + Self { + stake_address: stake_address.to_vec(), + slot_no: slot_no.into(), + txn, + txo, + policy_id: policy_id.to_vec(), + policy_name: policy_name.to_owned(), + value: value.into(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_TXO_ASSET_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert TXO Asset Query."); + }; + + txo_insert_queries + } +} + +/// Unstaked TXO Asset by Stake Address Indexing Query +const INSERT_UNSTAKED_TXO_ASSET_QUERY: &str = + include_str!("./queries/insert_unstaked_txo_asset.cql"); + +/// Insert TXO Asset Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +struct TxoUnstakedAssetInsertParams { + /// Transactions hash. + txn_hash: Vec, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Policy hash of the asset + policy_id: Vec, + /// Policy name of the asset + policy_name: String, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Value of the asset + value: num_bigint::BigInt, +} + +impl TxoUnstakedAssetInsertParams { + /// Create a new record for this transaction. + /// + /// Note Value can be either a u64 or an i64, so use a i128 to represent all possible + /// values. + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( + txn_hash: &[u8], txo: i16, policy_id: &[u8], policy_name: &str, slot_no: u64, txn: i16, + value: i128, + ) -> Self { + Self { + txn_hash: txn_hash.to_vec(), + txo, + policy_id: policy_id.to_vec(), + policy_name: policy_name.to_owned(), + slot_no: slot_no.into(), + txn, + value: value.into(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_UNSTAKED_TXO_ASSET_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert Unstaked TXO Asset Query."); + }; + + txo_insert_queries + } +} + +/// Insert TXO Query and Parameters +/// +/// There are multiple possible parameters to a query, which are represented separately. +#[allow(dead_code)] +pub(crate) struct TxoInsertQuery { + /// Staked TXO Data Parameters + staked_txo: Vec, + /// Unstaked TXO Data Parameters + unstaked_txo: Vec, + /// Staked TXO Asset Data Parameters + staked_txo_asset: Vec, + /// Unstaked TXO Asset Data Parameters + unstaked_txo_asset: Vec, +} + +impl TxoInsertQuery { + /// Create a new Insert TXO Query Batch + pub(crate) fn new() -> Self { + TxoInsertQuery { + staked_txo: Vec::new(), + unstaked_txo: Vec::new(), + staked_txo_asset: Vec::new(), + unstaked_txo_asset: Vec::new(), + } + } + + /// Prepare Batch of Insert TXI Index Data Queries + pub(crate) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result<(SizedBatch, SizedBatch, SizedBatch, SizedBatch)> { + let txo_staked_insert_batch = TxoInsertParams::prepare_batch(session, cfg).await; + let txo_unstaked_insert_batch = TxoUnstakedInsertParams::prepare_batch(session, cfg).await; + let txo_staked_asset_insert_batch = TxoAssetInsertParams::prepare_batch(session, cfg).await; + let txo_unstaked_asset_insert_batch = + TxoUnstakedAssetInsertParams::prepare_batch(session, cfg).await; + + Ok(( + txo_staked_insert_batch?, + txo_unstaked_insert_batch?, + txo_staked_asset_insert_batch?, + txo_unstaked_asset_insert_batch?, + )) + } + + /// Extracts a stake address from a TXO if possible. + /// Returns None if it is not possible. + /// If we want to index, but can not determine a stake key hash, then return a Vec + /// with a single 0 byte. This is because the index DB needs data in the + /// primary key, so we use a single byte of 0 to indicate that there is no + /// stake address, and still have a primary key on the table. Otherwise return the + /// stake key hash as a vec of 28 bytes. + fn extract_stake_address( + txo: &pallas::ledger::traverse::MultiEraOutput<'_>, slot_no: u64, txn_id: &str, + ) -> Option<(Vec, String)> { + let stake_address = match txo.address() { + Ok(address) => { + match address { + // Byron addresses do not have stake addresses and are not supported. + pallas::ledger::addresses::Address::Byron(_) => { + return None; + }, + pallas::ledger::addresses::Address::Shelley(address) => { + let address_string = match address.to_bech32() { + Ok(address) => address, + Err(error) => { + // Shouldn't happen, but if it does error and don't index. + error!(error=%error, slot=slot_no, txn=txn_id,"Error converting to bech32: skipping."); + return None; + }, + }; + + match address.delegation() { + pallas::ledger::addresses::ShelleyDelegationPart::Script(hash) + | pallas::ledger::addresses::ShelleyDelegationPart::Key(hash) => { + (hash.to_vec(), address_string) + }, + pallas::ledger::addresses::ShelleyDelegationPart::Pointer(_pointer) => { + // These are not supported from Conway, so we don't support them + // either. + (NO_STAKE_ADDRESS.to_vec(), address_string) + }, + pallas::ledger::addresses::ShelleyDelegationPart::Null => { + (NO_STAKE_ADDRESS.to_vec(), address_string) + }, + } + }, + pallas::ledger::addresses::Address::Stake(_) => { + // This should NOT appear in a TXO, so report if it does. But don't index it + // as a stake address. + warn!( + slot = slot_no, + txn = txn_id, + "Unexpected Stake address found in TXO. Refusing to index." + ); + return None; + }, + } + }, + Err(error) => { + // This should not ever happen. + error!(error=%error, slot = slot_no, txn = txn_id, "Failed to get Address from TXO. Skipping TXO."); + return None; + }, + }; + + Some(stake_address) + } + + /// Index the transaction Inputs. + pub(crate) fn index( + &mut self, txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64, txn_hash: &[u8], + txn: i16, + ) { + let txn_id = hex::encode_upper(txn_hash); + + // Accumulate all the data we want to insert from this transaction here. + for (txo_index, txo) in txs.outputs().iter().enumerate() { + // This will only return None if the TXO is not to be indexed (Byron Addresses) + let Some((stake_address, address)) = Self::extract_stake_address(txo, slot_no, &txn_id) + else { + continue; + }; + + let staked = stake_address != NO_STAKE_ADDRESS; + let txo_index = usize_to_i16(txo_index); + + if staked { + let params = TxoInsertParams::new( + &stake_address, + slot_no, + txn, + txo_index, + &address, + txo.lovelace_amount(), + txn_hash, + ); + + self.staked_txo.push(params); + } else { + let params = TxoUnstakedInsertParams::new( + txn_hash, + txo_index, + slot_no, + txn, + &address, + txo.lovelace_amount(), + ); + + self.unstaked_txo.push(params); + } + + for asset in txo.non_ada_assets() { + let policy_id = asset.policy().to_vec(); + for policy_asset in asset.assets() { + if policy_asset.is_output() { + let policy_name = policy_asset.to_ascii_name().unwrap_or_default(); + let value = policy_asset.any_coin(); + + if staked { + let params = TxoAssetInsertParams::new( + &stake_address, + slot_no, + txn, + txo_index, + &policy_id, + &policy_name, + value, + ); + self.staked_txo_asset.push(params); + } else { + let params = TxoUnstakedAssetInsertParams::new( + txn_hash, + txo_index, + &policy_id, + &policy_name, + slot_no, + txn, + value, + ); + self.unstaked_txo_asset.push(params); + } + } else { + error!("Minting MultiAsset in TXO."); + } + } + } + } + } + + /// Index the transaction Inputs. + /// + /// Consumes `self` and returns a vector of futures. + pub(crate) fn execute(self, session: &Arc) -> FallibleQueryTasks { + let mut query_handles: FallibleQueryTasks = Vec::new(); + + if !self.staked_txo.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch(PreparedQuery::TxoAdaInsertQuery, self.staked_txo) + .await + })); + } + + if !self.unstaked_txo.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch(PreparedQuery::UnstakedTxoAdaInsertQuery, self.unstaked_txo) + .await + })); + } + + if !self.staked_txo_asset.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch(PreparedQuery::TxoAssetInsertQuery, self.staked_txo_asset) + .await + })); + } + if !self.unstaked_txo_asset.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch( + PreparedQuery::UnstakedTxoAssetInsertQuery, + self.unstaked_txo_asset, + ) + .await + })); + } + + query_handles + } +} diff --git a/catalyst-gateway/bin/src/db/index/mod.rs b/catalyst-gateway/bin/src/db/index/mod.rs index f4157be8550..9bbaf16066a 100644 --- a/catalyst-gateway/bin/src/db/index/mod.rs +++ b/catalyst-gateway/bin/src/db/index/mod.rs @@ -1,6 +1,9 @@ //! Blockchain Index Database pub(crate) mod block; +pub(crate) mod index_certs; +pub(crate) mod index_txi; +pub(crate) mod index_txo; pub(crate) mod queries; pub(crate) mod schema; pub(crate) mod session; diff --git a/catalyst-gateway/bin/src/db/index/queries.rs b/catalyst-gateway/bin/src/db/index/queries.rs index 3aa1ed9b6ce..879cfbd5aa2 100644 --- a/catalyst-gateway/bin/src/db/index/queries.rs +++ b/catalyst-gateway/bin/src/db/index/queries.rs @@ -2,94 +2,152 @@ //! //! This improves query execution time. -use scylla::prepared_statement::PreparedStatement; -use tokio::join; -use tracing::error; +use std::sync::Arc; -use super::session::CassandraSession; +use anyhow::bail; +use crossbeam_skiplist::SkipMap; +use scylla::{batch::Batch, serialize::row::SerializeRow, QueryResult, Session}; -/// TXO by Stake Address Indexing query -const INSERT_TXO_QUERY: &str = include_str!("./queries/insert_txo.cql"); -/// TXO Asset by Stake Address Indexing Query -const INSERT_TXO_ASSET_QUERY: &str = include_str!("./queries/insert_txo_asset.cql"); -/// TXI by Txn hash Index -const INSERT_TXI_QUERY: &str = include_str!("./queries/insert_txi.cql"); +use super::{index_certs::CertInsertQuery, index_txi::TxiInsertQuery, index_txo::TxoInsertQuery}; +use crate::settings::{CassandraEnvVars, CASSANDRA_MIN_BATCH_SIZE}; -/// Unstaked TXO by Stake Address Indexing query -const INSERT_UNSTAKED_TXO_QUERY: &str = include_str!("./queries/insert_unstaked_txo.cql"); -/// Unstaked TXO Asset by Stake Address Indexing Query -const INSERT_UNSTAKED_TXO_ASSET_QUERY: &str = - include_str!("./queries/insert_unstaked_txo_asset.cql"); +/// Batches of different sizes, prepared and ready for use. +pub(crate) type SizedBatch = SkipMap>; + +/// All Prepared Queries that we know about. +#[allow(clippy::enum_variant_names, dead_code)] +pub(crate) enum PreparedQuery { + /// TXO Insert query. + TxoAdaInsertQuery, + /// TXO Asset Insert query. + TxoAssetInsertQuery, + /// Unstaked TXO Insert query. + UnstakedTxoAdaInsertQuery, + /// Unstaked TXO Asset Insert query. + UnstakedTxoAssetInsertQuery, + /// TXI Insert query. + TxiInsertQuery, + /// Stake Registration Insert query. + StakeRegistrationInsertQuery, +} /// All prepared queries for a session. #[allow(clippy::struct_field_names)] pub(crate) struct PreparedQueries { /// TXO Insert query. - pub txo_insert_query: PreparedStatement, + txo_insert_queries: SizedBatch, /// TXO Asset Insert query. - pub txo_asset_insert_query: PreparedStatement, + txo_asset_insert_queries: SizedBatch, /// Unstaked TXO Insert query. - pub unstaked_txo_insert_query: PreparedStatement, + unstaked_txo_insert_queries: SizedBatch, /// Unstaked TXO Asset Insert query. - pub unstaked_txo_asset_insert_query: PreparedStatement, + unstaked_txo_asset_insert_queries: SizedBatch, /// TXI Insert query. - pub txi_insert_query: PreparedStatement, + txi_insert_queries: SizedBatch, + /// TXI Insert query. + stake_registration_insert_queries: SizedBatch, } +/// An individual query response that can fail +#[allow(dead_code)] +pub(crate) type FallibleQueryResult = anyhow::Result; +/// A set of query responses that can fail. +pub(crate) type FallibleQueryResults = anyhow::Result>; +/// A set of query responses from tasks that can fail. +pub(crate) type FallibleQueryTasks = Vec>; + impl PreparedQueries { /// Create new prepared queries for a given session. - pub(crate) async fn new(session: &CassandraSession) -> anyhow::Result { - // Pre-prepare our queries. - let (txo_query, txo_asset_query, unstaked_txo_query, unstaked_txo_asset_query, txi_query) = join!( - session.prepare(INSERT_TXO_QUERY), - session.prepare(INSERT_TXO_ASSET_QUERY), - session.prepare(INSERT_UNSTAKED_TXO_QUERY), - session.prepare(INSERT_UNSTAKED_TXO_ASSET_QUERY), - session.prepare(INSERT_TXI_QUERY), - ); - - if let Err(ref error) = txo_query { - error!(error=%error,"Failed to prepare Insert TXO Query."); - }; - if let Err(ref error) = txo_asset_query { - error!(error=%error,"Failed to prepare Insert TXO Asset Query."); - }; - if let Err(ref error) = unstaked_txo_query { - error!(error=%error,"Failed to prepare Insert Unstaked TXO Query."); - }; - if let Err(ref error) = unstaked_txo_asset_query { - error!(error=%error,"Failed to prepare Insert Unstaked TXO Asset Query."); - }; - if let Err(ref error) = txi_query { - error!(error=%error,"Failed to prepare Insert TXI Query."); - }; + pub(crate) async fn new(session: Arc, cfg: &CassandraEnvVars) -> anyhow::Result { + // We initialize like this, so that all errors preparing querys get shown before aborting. + let txi_insert_queries = TxiInsertQuery::prepare_batch(&session, cfg).await; + let all_txo_queries = TxoInsertQuery::prepare_batch(&session, cfg).await; + let stake_registration_insert_queries = CertInsertQuery::prepare_batch(&session, cfg).await; - let mut txo_query = txo_query?; - let mut txo_asset_query = txo_asset_query?; - let mut txo_unstaked_query = unstaked_txo_query?; - let mut txo_unstaked_asset_query = unstaked_txo_asset_query?; - let mut txi_query = txi_query?; - - // We just want to write as fast as possible, consistency at this stage isn't required. - txo_query.set_consistency(scylla::statement::Consistency::Any); - txo_asset_query.set_consistency(scylla::statement::Consistency::Any); - txo_unstaked_query.set_consistency(scylla::statement::Consistency::Any); - txo_unstaked_asset_query.set_consistency(scylla::statement::Consistency::Any); - txi_query.set_consistency(scylla::statement::Consistency::Any); - - // These operations are idempotent, because they are always the same data. - txo_query.set_is_idempotent(true); - txo_asset_query.set_is_idempotent(true); - txo_unstaked_query.set_is_idempotent(true); - txo_unstaked_asset_query.set_is_idempotent(true); - txi_query.set_is_idempotent(true); + let ( + txo_insert_queries, + unstaked_txo_insert_queries, + txo_asset_insert_queries, + unstaked_txo_asset_insert_queries, + ) = all_txo_queries?; Ok(Self { - txo_insert_query: txo_query, - txo_asset_insert_query: txo_asset_query, - unstaked_txo_insert_query: txo_unstaked_query, - unstaked_txo_asset_insert_query: txo_unstaked_asset_query, - txi_insert_query: txi_query, + txo_insert_queries, + txo_asset_insert_queries, + unstaked_txo_insert_queries, + unstaked_txo_asset_insert_queries, + txi_insert_queries: txi_insert_queries?, + stake_registration_insert_queries: stake_registration_insert_queries?, }) } + + /// Prepares all permutations of the batch from 1 to max. + /// It is necessary to do this because batches are pre-sized, they can not be dynamic. + /// Preparing the batches in advance is a very larger performance increase. + pub(crate) async fn prepare_batch( + session: Arc, query: &str, cfg: &CassandraEnvVars, + consistency: scylla::statement::Consistency, idempotent: bool, logged: bool, + ) -> anyhow::Result { + let sized_batches: SizedBatch = SkipMap::new(); + + // First prepare the query. Only needs to be done once, all queries on a batch are the + // same. + let mut prepared = session.prepare(query).await?; + prepared.set_consistency(consistency); + prepared.set_is_idempotent(idempotent); + + for batch_size in CASSANDRA_MIN_BATCH_SIZE..=cfg.max_batch_size { + let mut batch: Batch = Batch::new(if logged { + scylla::batch::BatchType::Logged + } else { + scylla::batch::BatchType::Unlogged + }); + batch.set_consistency(consistency); + batch.set_is_idempotent(idempotent); + for _ in CASSANDRA_MIN_BATCH_SIZE..=batch_size { + batch.append_statement(prepared.clone()); + } + + sized_batches.insert(batch_size.try_into()?, Arc::new(batch)); + } + + Ok(sized_batches) + } + + /// Execute a Batch query with the given parameters. + /// + /// Values should be a Vec of values which implement `SerializeRow` and they MUST be + /// the same, and must match the query being executed. + /// + /// This will divide the batch into optimal sized chunks and execute them until all + /// values have been executed or the first error is encountered. + pub(crate) async fn execute_batch( + &self, session: Arc, cfg: Arc, query: PreparedQuery, + values: Vec, + ) -> FallibleQueryResults { + let query_map = match query { + PreparedQuery::TxoAdaInsertQuery => &self.txo_insert_queries, + PreparedQuery::TxoAssetInsertQuery => &self.txo_asset_insert_queries, + PreparedQuery::UnstakedTxoAdaInsertQuery => &self.unstaked_txo_insert_queries, + PreparedQuery::UnstakedTxoAssetInsertQuery => &self.unstaked_txo_asset_insert_queries, + PreparedQuery::TxiInsertQuery => &self.txi_insert_queries, + PreparedQuery::StakeRegistrationInsertQuery => &self.stake_registration_insert_queries, + }; + + let mut results: Vec = Vec::new(); + + let chunks = values.chunks(cfg.max_batch_size.try_into().unwrap_or(1)); + + for chunk in chunks { + let chunk_size: u16 = chunk.len().try_into()?; + let Some(batch_query) = query_map.get(&chunk_size) else { + // This should not actually occur. + bail!("No batch query found for size {}", chunk_size); + }; + let batch_query_statements = batch_query.value().clone(); + results.push(session.batch(&batch_query_statements, chunk).await?); + } + + Ok(results) + } } diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_stake_registration.cql b/catalyst-gateway/bin/src/db/index/queries/insert_stake_registration.cql new file mode 100644 index 00000000000..76907c19dbb --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/insert_stake_registration.cql @@ -0,0 +1,20 @@ +-- Index Stake Registrations +INSERT INTO stake_registration ( + stake_hash, + slot_no, + txn, + stake_address, + script, + register, + deregister, + pool_delegation +) VALUES ( + :stake_hash, + :slot_no, + :txn, + :stake_address, + :script, + :register, + :deregister, + :pool_delegation +); \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql b/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql index 7b6b6227cea..c25bb41a1ff 100644 --- a/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql +++ b/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql @@ -1,6 +1,10 @@ -- Create the TXI Record for a transaction hash, -UPDATE txi_by_txn_hash SET - slot_no = :slot_no -WHERE - txn_hash = :txn_hash AND - txo = :txo ; \ No newline at end of file +INSERT INTO txi_by_txn_hash ( + txn_hash, + txo, + slot_no +) VALUES ( + :txn_hash, + :txo, + :slot_no +); diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql b/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql index de22b94c132..630f431c975 100644 --- a/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql +++ b/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql @@ -1,11 +1,18 @@ -- Create the TXO Record for a stake address, --- But nlyif it does not already exist. -UPDATE txo_by_stake SET - address = :address, - value = :value, - txn_hash = :txn_hash -WHERE - stake_address = :stake_address AND - slot_no = :slot_no AND - txn = :txn and - txo = :txo ; +INSERT INTO txo_by_stake ( + stake_address, + slot_no, + txn, + txo, + address, + value, + txn_hash +) VALUES ( + :stake_address, + :slot_no, + :txn, + :txo, + :address, + :value, + :txn_hash +); \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql b/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql index a03e25b99a6..3bdb6342c4b 100644 --- a/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql +++ b/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql @@ -1,12 +1,19 @@ -- Create the TXO Record for a stake address, -- Will not overwrite anything if it already exists. -UPDATE txo_assets_by_stake SET - value = :value, - txn_hash = :txn_hash -WHERE - stake_address = :stake_address AND - slot_no = :slot_no AND - txn = :txn AND - txo = :txo AND - policy_id = :policy_id AND - policy_name = :policy_name ; +INSERT INTO txo_assets_by_stake ( + stake_address, + slot_no, + txn, + txo, + policy_id, + policy_name, + value +) VALUES ( + :stake_address, + :slot_no, + :txn, + :txo, + :policy_id, + :policy_name, + :value +); diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo.cql b/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo.cql index de22b94c132..08cc91808e3 100644 --- a/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo.cql +++ b/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo.cql @@ -1,11 +1,16 @@ --- Create the TXO Record for a stake address, --- But nlyif it does not already exist. -UPDATE txo_by_stake SET - address = :address, - value = :value, - txn_hash = :txn_hash -WHERE - stake_address = :stake_address AND - slot_no = :slot_no AND - txn = :txn and - txo = :txo ; +-- Create the TXO Record for when its not staked. +INSERT INTO unstaked_txo_by_txn_hash ( + txn_hash, + txo, + slot_no, + txn, + address, + value +) VALUES ( + :txn_hash, + :txo, + :slot_no, + :txn, + :address, + :value +); diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo_asset.cql b/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo_asset.cql index a03e25b99a6..e170a0b46c2 100644 --- a/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo_asset.cql +++ b/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo_asset.cql @@ -1,12 +1,18 @@ --- Create the TXO Record for a stake address, --- Will not overwrite anything if it already exists. -UPDATE txo_assets_by_stake SET - value = :value, - txn_hash = :txn_hash -WHERE - stake_address = :stake_address AND - slot_no = :slot_no AND - txn = :txn AND - txo = :txo AND - policy_id = :policy_id AND - policy_name = :policy_name ; +-- Create the TXO Record for an unstaked TXO Asset. +INSERT INTO unstaked_txo_assets_by_txn_hash ( + txn_hash, + txo, + policy_id, + policy_name, + slot_no, + txn, + value +) VALUES ( + :txn_hash, + :txo, + :policy_id, + :policy_name, + :slot_no, + :txn, + :value +); diff --git a/catalyst-gateway/bin/src/db/index/schema.rs b/catalyst-gateway/bin/src/db/index/schema.rs index e4f2f1eacad..f854fc7329a 100644 --- a/catalyst-gateway/bin/src/db/index/schema.rs +++ b/catalyst-gateway/bin/src/db/index/schema.rs @@ -1,39 +1,56 @@ //! Index Schema +use std::sync::Arc; + use anyhow::Context; use handlebars::Handlebars; +use scylla::Session; use serde_json::json; use tracing::error; -use super::session::CassandraSession; use crate::settings::CassandraEnvVars; /// Keyspace Create (Templated) const CREATE_NAMESPACE_CQL: &str = include_str!("./schema/namespace.cql"); -/// TXO by Stake Address Table Schema -const CREATE_TABLE_TXO_BY_STAKE_ADDRESS_CQL: &str = include_str!("./schema/txo_by_stake_table.cql"); -/// TXO Assets by Stake Address Table Schema -const CREATE_TABLE_TXO_ASSETS_BY_STAKE_ADDRESS_CQL: &str = - include_str!("./schema/txo_assets_by_stake_table.cql"); -/// TXI by Stake Address Table Schema -const CREATE_TABLE_TXI_BY_TXN_HASH_CQL: &str = include_str!("./schema/txi_by_txn_hash_table.cql"); - -/// TXO by Stake Address Table Schema -const CREATE_TABLE_UNSTAKED_TXO_BY_TXN_HASH_CQL: &str = - include_str!("./schema/unstaked_txo_by_txn_hash.cql"); -/// TXO Assets by Stake Address Table Schema -const CREATE_TABLE_UNSTAKED_TXO_ASSETS_BY_TXN_HASH_CQL: &str = - include_str!("./schema/unstaked_txo_assets_by_txn_hash.cql"); - -/// Stake Address/Registration Table Schema -const CREATE_TABLE_STAKE_HASH_TO_STAKE_ADDRESS_CQL: &str = - include_str!("./schema/stake_hash_to_stake_address.cql"); - /// The version of the Schema we are using. /// Must be incremented if there is a breaking change in any schema tables below. pub(crate) const SCHEMA_VERSION: u64 = 1; +/// All Schema Creation Statements +const SCHEMAS: &[(&str, &str)] = &[ + ( + // TXO by Stake Address Table Schema + include_str!("./schema/txo_by_stake_table.cql"), + "Create Table TXO By Stake Address", + ), + ( + // TXO Assets by Stake Address Table Schema + include_str!("./schema/txo_assets_by_stake_table.cql"), + "Create Table TXO Assets By Stake Address", + ), + ( + // TXO Unstaked Table Schema + include_str!("./schema/unstaked_txo_by_txn_hash.cql"), + "Create Table Unstaked TXO By Txn Hash", + ), + ( + // TXO Unstaked Assets Table Schema + include_str!("./schema/unstaked_txo_assets_by_txn_hash.cql"), + "Create Table Unstaked TXO Assets By Txn Hash", + ), + ( + // TXI by Stake Address Table Schema + include_str!("./schema/txi_by_txn_hash_table.cql"), + "Create Table TXI By Stake Address", + ), + ( + // Stake Address/Registration Table Schema + include_str!("./schema/stake_registration.cql"), + "Create Table Stake Registration", + ), +]; + /// Get the namespace for a particular db configuration pub(crate) fn namespace(cfg: &CassandraEnvVars) -> String { // Build and set the Keyspace to use. @@ -43,7 +60,7 @@ pub(crate) fn namespace(cfg: &CassandraEnvVars) -> String { /// Create the namespace we will use for this session /// Ok to run this if the namespace already exists. async fn create_namespace( - session: &mut CassandraSession, cfg: &CassandraEnvVars, + session: &mut Arc, cfg: &CassandraEnvVars, ) -> anyhow::Result<()> { let keyspace = namespace(cfg); @@ -68,95 +85,23 @@ async fn create_namespace( Ok(()) } -/// Create tables for holding TXO data. -async fn create_txo_tables(session: &mut CassandraSession) -> anyhow::Result<()> { - let stmt = session - .prepare(CREATE_TABLE_TXO_BY_STAKE_ADDRESS_CQL) - .await - .context("Create Table TXO By Stake Address: Prepared")?; - session - .execute(&stmt, ()) - .await - .context("Create Table TXO By Stake Address: Executed")?; - - let stmt = session - .prepare(CREATE_TABLE_TXO_ASSETS_BY_STAKE_ADDRESS_CQL) - .await - .context("Create Table TXO Assets By Stake Address: Prepared")?; - session - .execute(&stmt, ()) - .await - .context("Create Table TXO Assets By Stake Address: Executed")?; - - Ok(()) -} - -/// Create tables for holding unstaked TXO data. -async fn create_unstaked_txo_tables(session: &mut CassandraSession) -> anyhow::Result<()> { - let stmt = session - .prepare(CREATE_TABLE_UNSTAKED_TXO_BY_TXN_HASH_CQL) - .await - .context("Create Table Unstaked TXO By Txn Hash: Prepared")?; - session - .execute(&stmt, ()) - .await - .context("Create Table Unstaked TXO By Txn Hash: Executed")?; - - let stmt = session - .prepare(CREATE_TABLE_UNSTAKED_TXO_ASSETS_BY_TXN_HASH_CQL) - .await - .context("Create Table Unstaked TXO Assets By Txn Hash: Prepared")?; - session - .execute(&stmt, ()) - .await - .context("Create Table Unstaked TXO Assets By Txn Hash: Executed")?; - - Ok(()) -} - -/// Create tables for holding volatile TXI data -async fn create_txi_tables(session: &mut CassandraSession) -> anyhow::Result<()> { - let stmt = session - .prepare(CREATE_TABLE_TXI_BY_TXN_HASH_CQL) - .await - .context("Create Table TXI By Stake Address: Prepared")?; - - session - .execute(&stmt, ()) - .await - .context("Create Table TXI By Stake Address: Executed")?; - - Ok(()) -} - -/// Create tables for holding volatile TXI data -async fn create_stake_tables(session: &mut CassandraSession) -> anyhow::Result<()> { - let stmt = session - .prepare(CREATE_TABLE_STAKE_HASH_TO_STAKE_ADDRESS_CQL) - .await - .context("Create Table Stake Hash to Stake Address: Prepared")?; - - session - .execute(&stmt, ()) - .await - .context("Create Table Stake Hash to Stake Address: Executed")?; - - Ok(()) -} - /// Create the Schema on the connected Cassandra DB pub(crate) async fn create_schema( - session: &mut CassandraSession, cfg: &CassandraEnvVars, + session: &mut Arc, cfg: &CassandraEnvVars, ) -> anyhow::Result<()> { create_namespace(session, cfg).await?; - create_txo_tables(session).await?; + for schema in SCHEMAS { + let stmt = session + .prepare(schema.0) + .await + .context(format!("{} : Prepared", schema.1))?; - create_unstaked_txo_tables(session).await?; - - create_txi_tables(session).await?; - - create_stake_tables(session).await?; + session + .execute(&stmt, ()) + .await + .context(format!("{} : Executed", schema.1))?; + } // Wait for the Schema to be ready. session.await_schema_agreement().await?; diff --git a/catalyst-gateway/bin/src/db/index/schema/stake_hash_to_stake_address.cql b/catalyst-gateway/bin/src/db/index/schema/stake_hash_to_stake_address.cql deleted file mode 100644 index 25c4df61326..00000000000 --- a/catalyst-gateway/bin/src/db/index/schema/stake_hash_to_stake_address.cql +++ /dev/null @@ -1,17 +0,0 @@ --- This could be ADA or a native asset being spent. --- This can represent a spend on either immutable data or volatile data. -CREATE TABLE IF NOT EXISTS stake_hash_to_stake_address ( - -- Primary Key Data - stake_hash blob, -- 32 Bytes Stake Key Hash. - slot_no varint, -- slot number when the key_was_registered. - txn smallint, -- Index of the TX which washolds the registration - - -- Non key data, we can only spend a transaction hash/txo once, so this should be unique in any event. - stake_address blob, -- 32 Bytes Stake address - not present for scripts - - register boolean, -- True if the stake was registered in this transaction. - deregister boolean, -- True if the stake key was deregisterd in this transaction. - pool_delegation blob, -- Stake was delegated to this Pool address. - - PRIMARY KEY (stake_hash, slot_no, txn) -); diff --git a/catalyst-gateway/bin/src/db/index/schema/stake_registration.cql b/catalyst-gateway/bin/src/db/index/schema/stake_registration.cql new file mode 100644 index 00000000000..65915fa1d91 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/stake_registration.cql @@ -0,0 +1,20 @@ +-- Index of stake registrations. +-- Can also be used to convert a known stake key hash back to a full stake address. +CREATE TABLE IF NOT EXISTS stake_registration ( + -- Primary Key Data + stake_hash blob, -- 28 Bytes Stake Key Hash. + slot_no varint, -- slot number when the key_was_registered/re-registered. + txn smallint, -- Index of the TX which holds the registration data. + + -- Non-Key Data + stake_address blob, -- 32 Bytes Stake address - not present for scripts and may not be present for `register`. + + -- Stake key licecycle data, shows what happened with the stake key at this slot#. + script boolean, -- Is the address a script address. + register boolean, -- True if the stake was registered in this transaction. + deregister boolean, -- True if the stake key was deregisterd in this transaction. + pool_delegation blob, -- Stake was delegated to this Pool address. + -- Not present if delegation did not change. + + PRIMARY KEY (stake_hash, script, slot_no, txn) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql index bcacbb3f845..3ab1bcad003 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql @@ -12,15 +12,5 @@ CREATE TABLE IF NOT EXISTS txo_assets_by_stake ( -- None Key Data of the asset. value varint, -- Value of the asset (u64) - -- Data needed to correlate a spent TXO. - txn_hash blob, -- 32 byte hash of this transaction. - - spent_slot varint, -- Slot this TXO was spent in. - -- This is ONLY calculated/stored - -- when first detected in a query lookup. - -- It serves as an optimization on subsequnt queries. - -- It is also only updated when the refenc is the same type - -- ie, an immutable txo can only record an immutable spend. - PRIMARY KEY (stake_address, slot_no, txn, txo, policy_id, policy_name) ); diff --git a/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql b/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql index 2c1211175c1..723db9350c1 100644 --- a/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql +++ b/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql @@ -6,20 +6,12 @@ CREATE TABLE IF NOT EXISTS unstaked_txo_assets_by_txn_hash ( policy_id blob, -- asset policy hash (id) (28 byte binary hash) policy_name text, -- name of the policy (UTF8) - -- Secondary location information - slot_no varint, -- slot number the txo was created in. - txn smallint, -- Which Transaction in the Slot is the TXO. + -- Secondary Location information for the transaction. + slot_no varint, -- slot number the txo was created in. + txn smallint, -- Which Transaction in the Slot is the TXO. -- Value of the asset. value varint, -- Value of the asset (u64) - -- Data needed to correlate a spent TXO. - spent_slot varint, -- Slot this TXO was spent in. - -- This is ONLY calculated/stored - -- when first detected in a query lookup. - -- It serves as an optimization on subsequnt queries. - -- It is also only updated when the refenc is the same type - -- ie, an immutable txo can only record an immutable spend. - PRIMARY KEY (txn_hash, txo, policy_id, policy_name) ); diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs index 08461f8ad1f..4e50e3a5392 100644 --- a/catalyst-gateway/bin/src/db/index/session.rs +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -7,11 +7,16 @@ use std::{ }; use openssl::ssl::{SslContextBuilder, SslFiletype, SslMethod, SslVerifyMode}; -use scylla::{frame::Compression, ExecutionProfile, Session, SessionBuilder}; +use scylla::{ + frame::Compression, serialize::row::SerializeRow, ExecutionProfile, Session, SessionBuilder, +}; use tokio::fs; use tracing::{error, info}; -use super::{queries::PreparedQueries, schema::create_schema}; +use super::{ + queries::{FallibleQueryResults, PreparedQueries, PreparedQuery}, + schema::create_schema, +}; use crate::{ db::index::queries, settings::{CassandraEnvVars, Settings}, @@ -41,10 +46,82 @@ pub(crate) enum TlsChoice { Unverified, } -/// A Session on the cassandra database -pub(crate) type CassandraSession = Arc; +/// All interaction with cassandra goes through this struct. +#[derive(Clone)] +pub(crate) struct CassandraSession { + /// Is the session to the persistent or volatile DB? + #[allow(dead_code)] + persistent: bool, + /// Configuration for this session. + cfg: Arc, + /// The actual session. + session: Arc, + /// All prepared queries we can use on this session. + queries: Arc, +} + +/// Persistent DB Session. +static PERSISTENT_SESSION: OnceLock> = OnceLock::new(); + +/// Volatile DB Session. +static VOLATILE_SESSION: OnceLock> = OnceLock::new(); + +impl CassandraSession { + /// Initialise the Cassandra Cluster Connections. + pub(crate) fn init() { + let (persistent, volatile) = Settings::cassandra_db_cfg(); + + let _join_handle = tokio::task::spawn(async move { retry_init(persistent, true).await }); + let _join_handle = tokio::task::spawn(async move { retry_init(volatile, false).await }); + } + + /// Check to see if the Cassandra Indexing DB is ready for use + pub(crate) fn is_ready() -> bool { + PERSISTENT_SESSION.get().is_some() && VOLATILE_SESSION.get().is_some() + } + + /// Wait for the Cassandra Indexing DB to be ready before continuing + pub(crate) async fn wait_is_ready(interval: Duration) { + loop { + if Self::is_ready() { + break; + } + + tokio::time::sleep(interval).await; + } + } + + /// Get the session needed to perform a query. + pub(crate) fn get(persistent: bool) -> Option> { + if persistent { + PERSISTENT_SESSION.get().cloned() + } else { + VOLATILE_SESSION.get().cloned() + } + } + + /// Execute a Batch query with the given parameters. + /// + /// Values should be a Vec of values which implement `SerializeRow` and they MUST be + /// the same, and must match the query being executed. + /// + /// This will divide the batch into optimal sized chunks and execute them until all + /// values have been executed or the first error is encountered. + pub(crate) async fn execute_batch( + &self, query: PreparedQuery, values: Vec, + ) -> FallibleQueryResults { + let session = self.session.clone(); + let cfg = self.cfg.clone(); + let queries = self.queries.clone(); + + queries.execute_batch(session, cfg, query, values).await + } +} /// Create a new execution profile based on the given configuration. +/// +/// The intention here is that we should be able to tune this based on configuration, +/// but for now we don't so the `cfg` is not used yet. fn make_execution_profile(_cfg: &CassandraEnvVars) -> ExecutionProfile { ExecutionProfile::builder() .consistency(scylla::statement::Consistency::LocalQuorum) @@ -65,7 +142,7 @@ fn make_execution_profile(_cfg: &CassandraEnvVars) -> ExecutionProfile { } /// Construct a session based on the given configuration. -async fn make_session(cfg: &CassandraEnvVars) -> anyhow::Result { +async fn make_session(cfg: &CassandraEnvVars) -> anyhow::Result> { let cluster_urls: Vec<&str> = cfg.url.as_str().split(',').collect(); let mut sb = SessionBuilder::new() @@ -112,12 +189,6 @@ async fn make_session(cfg: &CassandraEnvVars) -> anyhow::Result)> = OnceLock::new(); - -/// Volatile DB Session. -static VOLATILE_SESSION: OnceLock<(CassandraSession, Arc)> = OnceLock::new(); - /// Continuously try and init the DB, if it fails, backoff. /// /// Display reasonable logs to help diagnose DB connection issues. @@ -163,7 +234,7 @@ async fn retry_init(cfg: CassandraEnvVars, persistent: bool) { continue; } - let queries = match queries::PreparedQueries::new(&session).await { + let queries = match queries::PreparedQueries::new(session.clone(), &cfg).await { Ok(queries) => Arc::new(queries), Err(error) => { error!( @@ -175,12 +246,19 @@ async fn retry_init(cfg: CassandraEnvVars, persistent: bool) { }, }; + let cassandra_session = CassandraSession { + persistent, + cfg: Arc::new(cfg), + session, + queries, + }; + // Save the session so we can execute queries on the DB if persistent { - if PERSISTENT_SESSION.set((session, queries)).is_err() { + if PERSISTENT_SESSION.set(Arc::new(cassandra_session)).is_err() { error!("Persistent Session already set. This should not happen."); }; - } else if VOLATILE_SESSION.set((session, queries)).is_err() { + } else if VOLATILE_SESSION.set(Arc::new(cassandra_session)).is_err() { error!("Volatile Session already set. This should not happen."); }; @@ -190,36 +268,3 @@ async fn retry_init(cfg: CassandraEnvVars, persistent: bool) { info!(db_type = db_type, "Index DB Session Creation: OK."); } - -/// Initialise the Cassandra Cluster Connections. -pub(crate) fn init() { - let (persistent, volatile) = Settings::cassandra_db_cfg(); - - let _join_handle = tokio::task::spawn(async move { retry_init(persistent, true).await }); - let _join_handle = tokio::task::spawn(async move { retry_init(volatile, false).await }); -} - -/// Check to see if the Cassandra Indexing DB is ready for use -pub(crate) fn is_ready() -> bool { - PERSISTENT_SESSION.get().is_some() && VOLATILE_SESSION.get().is_some() -} - -/// Wait for the Cassandra Indexing DB to be ready before continuing -pub(crate) async fn wait_is_ready(interval: Duration) { - loop { - if is_ready() { - break; - } - - tokio::time::sleep(interval).await; - } -} - -/// Get the session needed to perform a query. -pub(crate) fn session(persistent: bool) -> Option<(CassandraSession, Arc)> { - if persistent { - PERSISTENT_SESSION.get().cloned() - } else { - VOLATILE_SESSION.get().cloned() - } -} diff --git a/catalyst-gateway/bin/src/service/api/health/live_get.rs b/catalyst-gateway/bin/src/service/api/health/live_get.rs index 29f43538646..84a6cafaf0a 100644 --- a/catalyst-gateway/bin/src/service/api/health/live_get.rs +++ b/catalyst-gateway/bin/src/service/api/health/live_get.rs @@ -4,7 +4,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; use poem_openapi::ApiResponse; -use crate::{db, service::common::responses::WithErrorResponses}; +use crate::{db::index::session::CassandraSession, service::common::responses::WithErrorResponses}; /// Flag to determine if the service has started static IS_LIVE: AtomicBool = AtomicBool::new(true); @@ -17,7 +17,7 @@ pub(crate) fn set_live(flag: bool) { /// Get the started flag #[allow(dead_code)] fn is_live() -> bool { - IS_LIVE.load(Ordering::Acquire) && db::index::session::is_ready() + IS_LIVE.load(Ordering::Acquire) && CassandraSession::is_ready() } /// Endpoint responses. diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 6acc2d81086..919a9b95dbb 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -70,8 +70,21 @@ const CASSANDRA_VOLATILE_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; /// Default Cassandra DB URL for the Persistent DB. const CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT: &str = "volatile"; +/// Default maximum batch size. +/// This comes from: +/// +/// Scylla may support larger batches for better performance. +/// Larger batches will incur more memory overhead to store the prepared batches. +const CASSANDRA_MAX_BATCH_SIZE_DEFAULT: i64 = 30; + +/// Minimum possible batch size. +pub(crate) const CASSANDRA_MIN_BATCH_SIZE: i64 = 1; + +/// Maximum possible batch size. +const CASSANDRA_MAX_BATCH_SIZE: i64 = 256; + /// Default chain to follow. -const CHAIN_FOLLOWER_DEFAULT: Network = Network::Mainnet; +const CHAIN_FOLLOWER_DEFAULT: Network = Network::Preprod; /// Default number of sync tasks (must be in the range 1 to 255 inclusive.) const CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT: u16 = 16; @@ -417,6 +430,9 @@ pub(crate) struct CassandraEnvVars { /// Compression to use. pub(crate) compression: CompressionChoice, + + /// Maximum Configured Batch size. + pub(crate) max_batch_size: i64, } impl CassandraEnvVars { @@ -443,6 +459,12 @@ impl CassandraEnvVars { tls, tls_cert: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_TLS_CERT"), false), compression, + max_batch_size: StringEnvVar::new_as_i64( + &format!("CASSANDRA_{name}_BATCH_SIZE"), + CASSANDRA_MAX_BATCH_SIZE_DEFAULT, + CASSANDRA_MIN_BATCH_SIZE, + CASSANDRA_MAX_BATCH_SIZE, + ), } } From 194d7341796de235a67246b562d101faa46a2b0a Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 16 Aug 2024 17:37:22 +0700 Subject: [PATCH 21/69] fix(backend): Add support for log control with env vars, default to mainnet, adjust `justfile` to properly select preprod and also refresh git dependencies. --- catalyst-gateway/Cargo.toml | 6 +++--- catalyst-gateway/bin/src/logger.rs | 5 +++++ catalyst-gateway/bin/src/settings.rs | 17 ++++++++--------- justfile | 16 +++++++++++++--- 4 files changed, 29 insertions(+), 15 deletions(-) diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index caf304a7b63..ccb8356c2c3 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -18,7 +18,7 @@ license = "MIT OR Apache-2.0" [workspace.dependencies] clap = "4.5.13" tracing = "0.1.40" -tracing-subscriber = "0.3.18" +tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } serde = "1.0.204" serde_json = "1.0.121" poem = "3.0.4" @@ -51,8 +51,8 @@ pallas = "0.29.0" cardano-chain-follower = { git = "https://github.com/input-output-hk/hermes.git", branch = "feat/auto-sync-mithril", version="0.2.0" } stringzilla = "3.8.4" duration-string = "0.4.0" -build-info = "0.0.37" -build-info-build = "0.0.37" +build-info = "0.0.38" +build-info-build = "0.0.38" ed25519-dalek = "2.1.1" scylla = { version = "0.13.1", features = ["ssl", "full-serialization"]} strum = { version = "0.26.3", features = ["derive"] } diff --git a/catalyst-gateway/bin/src/logger.rs b/catalyst-gateway/bin/src/logger.rs index b02dbe1b15f..104c3cce6fe 100644 --- a/catalyst-gateway/bin/src/logger.rs +++ b/catalyst-gateway/bin/src/logger.rs @@ -104,6 +104,11 @@ pub(crate) fn init(log_level: LogLevel) { tracing_subscriber::registry() .with(filter) .with(layer) + .with( + tracing_subscriber::EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .from_env_lossy(), + ) .init(); // Logging is globally disabled by default, so globally enable it to the required level. diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 919a9b95dbb..b6d2d99b8f1 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -84,7 +84,7 @@ pub(crate) const CASSANDRA_MIN_BATCH_SIZE: i64 = 1; const CASSANDRA_MAX_BATCH_SIZE: i64 = 256; /// Default chain to follow. -const CHAIN_FOLLOWER_DEFAULT: Network = Network::Preprod; +const CHAIN_FOLLOWER_DEFAULT: Network = Network::Mainnet; /// Default number of sync tasks (must be in the range 1 to 255 inclusive.) const CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT: u16 = 16; @@ -323,16 +323,15 @@ impl StringEnvVar { } choices.push(']'); - let value = match T::from_str( - StringEnvVar::new( - var_name, - (default.to_string().as_str(), redacted, choices.as_str()).into(), - ) - .as_str(), - ) { + let choice = StringEnvVar::new( + var_name, + (default.to_string().as_str(), redacted, choices.as_str()).into(), + ); + + let value = match T::from_str(choice.as_str()) { Ok(var) => var, Err(error) => { - error!(error=%error, default=%default, choices=choices, "Invalid choice. Using Default."); + error!(error=%error, default=%default, choices=choices, choice=%choice, "Invalid choice. Using Default."); default }, }; diff --git a/justfile b/justfile index 6bfc8f19ff6..fa9c72f7c25 100644 --- a/justfile +++ b/justfile @@ -10,7 +10,17 @@ default: code_format: cd catalyst-gateway && cargo +nightly fmtfix -# Start the development cluster - linux/windows x86_64 +# Run cat-gateway natively on preprod run-cat-gateway: code_format - cd catalyst-gateway && cargo build -r - ./catalyst-gateway/target/release/cat-gateway run + cd catalyst-gateway && cargo update && cargo build -r + CHAIN_FOLLOWER_SYNC_TASKS="16" \ + RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ + CHAIN_NETWORK="Preprod" \ + ./catalyst-gateway/target/release/cat-gateway run --log-level debug + +# Run cat-gateway natively on mainnet +run-cat-gateway-mainnet: code_format + cd catalyst-gateway && cargo update && cargo build -r + CHAIN_FOLLOWER_SYNC_TASKS="1" \ + RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ + ./catalyst-gateway/target/release/cat-gateway run --log-level debug From 318060ad7026b52f514cdc22805422f73a29b575 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Mon, 19 Aug 2024 18:51:06 +0700 Subject: [PATCH 22/69] feat(backend): Make local test scylla db run with 4 nodes, not 1 --- utilities/local-cluster/justfile | 18 +- utilities/local-scylla/Readme.md | 165 ++++++ utilities/local-scylla/docker-compose.yml | 56 ++ utilities/local-scylla/justfile | 38 ++ utilities/local-scylla/node1-scylla.yaml | 625 ++++++++++++++++++++++ utilities/local-scylla/node2-scylla.yaml | 622 +++++++++++++++++++++ utilities/local-scylla/node3-scylla.yaml | 622 +++++++++++++++++++++ utilities/local-scylla/node4-scylla.yaml | 622 +++++++++++++++++++++ 8 files changed, 2756 insertions(+), 12 deletions(-) create mode 100644 utilities/local-scylla/Readme.md create mode 100644 utilities/local-scylla/docker-compose.yml create mode 100644 utilities/local-scylla/justfile create mode 100644 utilities/local-scylla/node1-scylla.yaml create mode 100644 utilities/local-scylla/node2-scylla.yaml create mode 100644 utilities/local-scylla/node3-scylla.yaml create mode 100644 utilities/local-scylla/node4-scylla.yaml diff --git a/utilities/local-cluster/justfile b/utilities/local-cluster/justfile index b037de7c4ef..0a202096dec 100644 --- a/utilities/local-cluster/justfile +++ b/utilities/local-cluster/justfile @@ -3,6 +3,12 @@ # Catalyst Voices Test Cluster basic Control export KUBECONFIG := "shared/k3s.yaml" +#set dotenv-required := true +#set dotenv-path := "./scylla/.env" +#set dotenv-load + +host_ip := `hostname -i | cut -d " " -f 1` + # cspell: words prereqs, commitlog default: @@ -54,15 +60,3 @@ ssh-into-agent99: get-all-logs: mkdir -p logs kail --log-level=error --since 6h > cluster.logs - -# Temporary local scylla dev DB until we can get it exposed from the cluster. -# TODO: Get the cluster scylla DB exposed on port 9042 of the cluster. -temp-scylla-dev-db: - mkdir -p /var/lib/scylla/data /var/lib/scylla/commitlog /var/lib/scylla/hints /var/lib/scylla/view_hints - # docker run --privileged -p 9042:9042 --name scylla-dev --volume /var/lib/scylla:/var/lib/scylla -d scylladb/scylla:latest --developer-mode=1 --smp 8 - docker run --rm --privileged -p 9042:9042 --name scylla-dev --volume /var/lib/scylla:/var/lib/scylla -d scylladb/scylla:latest --smp 8 --io-setup 1 --developer-mode=0 - docker logs scylla-dev -f - -stop-temp-scylla-dev-db: - docker stop scylla-dev - docker rm scylla-dev \ No newline at end of file diff --git a/utilities/local-scylla/Readme.md b/utilities/local-scylla/Readme.md new file mode 100644 index 00000000000..881b935e817 --- /dev/null +++ b/utilities/local-scylla/Readme.md @@ -0,0 +1,165 @@ +# Local Scylla Cluster + +This is a setup for a local Scylla cluster using Docker for local testing and development. + +## Prerequisites + +* [Just](https://github.com/casey/just) +* [Docker](https://www.docker.com/) + +## Cluster Architecture + +The Cluster is based Scylla published docker images. + +The Cluster is 4 nodes, consisting of 2 cores each, and 1GB of ram. +They are exposed on ports 9042-9045. + +## Starting the Scylla Cluster + +```sh +just + +## Getting Cluster Status and Metrics + +### Setup hosts on Windows + +On Windows, you need to set up the hosts before starting the cluster +From the Windows terminal to open the hosts file: + +```sh +notepad %SystemRoot%\System32\drivers\etc\hosts +``` + +and copy the hosts from `./shared/extra.hosts` into the Windows host file + +### Startup + +#### Linux/Windows + +From the root of the repo: + +```sh +just start-cluster +``` + +#### macOS + +From the root of the repo: + +```sh +just start-cluster-aarch64-macos +``` + +### Getting Basic Cluster details + +From the root of the repo: + +```sh +just show-cluster +``` + +Note the report is **VERY** Wide. +Best viewed with a small terminal font. + +### Suspending the Cluster + +The cluster can be suspended to save local system resources, without tearing it down. + +```sh +just suspend-cluster +``` + +### Resuming a suspended the Cluster + +The suspended cluster can then be resumed with: + +```sh +just resume-cluster +``` + +### Stopping the Cluster + +```sh +just stop-cluster +``` + +## Catalyst Voices Services + +These services are not deployed by default. + +* [Catalyst Voices Frontend](http://voices.cluster.test/) + * [HTTPS](https://voices.cluster.test/) +* [Catalyst Voices Backend](http://voices.cluster.test/api/) + * [HTTPS](https://voices.cluster.test/api/) +* [Catalyst Voices Documentation](http://docs.voices.cluster.test/) + * [HTTPS](https://docs.voices.cluster.test/) + +### Deploying Catalyst Voices Frontend and Backend Services + +TODO. + +### Deploying Catalyst Voices Documentation Service + +From the root of the repo: + + 1. Make sure the documentation is built, and its container pushed to the container repo: + + ```sh + earthly --push ./docs+local + ``` + +2. Deploy the Documentation Service: + + ```sh + earthly ./utilities/local-cluster+deploy-docs + ``` + + +3. Stop the Documentation Service: + + ```sh + earthly ./utilities/local-cluster+stop-docs + ``` + +## Debugging the cluster + +### SSH into a running VM + +To SSH into a VM running the cluster, use `vagrant`: + +```sh +vagrant ssh control +``` + +```sh +vagrant ssh agent86 +``` + +```sh +vagrant ssh agent99 +``` + +## Local UI to access ScyllaDB + +Found (and tested) description how to connect using only open-source via DBeaver: + +1. Download DBeaver (Community Edition) +2. Download Cassandra JDBC jar files: + (Downloading and Testing the Driver Binaries section have links to binary and source) +3. extract Cassandra JDBC zip +4. run DBeaver +5. go to Database > Driver Manager +6. click New +7. Fill in details as follows: + * Driver Name: `Cassandra` (or whatever you want it to say) + * Driver Type: `Generic` + * Class Name: `com.dbschema.CassandraJdbcDriver` + * URL Template: `jdbc:cassandra://{host}[:{port}][/{database}]` + * Default Port: `9042` + * Embedded: `no` + * Category: + * Description: `Cassandra` (or whatever you want it to say) +8. click Add File and add all the jars in the Cassandra JDBC zip file. +9. click Find Class to make sure the Class Name is found okay +10. click OK +11. Create New Connection, selecting the database driver you just added diff --git a/utilities/local-scylla/docker-compose.yml b/utilities/local-scylla/docker-compose.yml new file mode 100644 index 00000000000..fdb237c914d --- /dev/null +++ b/utilities/local-scylla/docker-compose.yml @@ -0,0 +1,56 @@ +services: + scylla-node1: + container_name: scylla-node1 + image: scylladb/scylla:latest + restart: unless-stopped + command: --seeds=scylla-node1 --smp 2 --memory 1G --overprovisioned 1 --api-address 0.0.0.0 --broadcast-rpc-address ${HOST_IP} + ports: + - "9042:9042" + volumes: + - "/var/lib/scylla/1:/var/lib/scylla" + - "./node1-scylla.yaml:/etc/scylla/scylla.yaml" + networks: + cluster: + + scylla-node2: + container_name: scylla-node2 + image: scylladb/scylla:latest + restart: unless-stopped + command: --seeds=scylla-node1 --smp 2 --memory 1G --overprovisioned 1 --api-address 0.0.0.0 --broadcast-rpc-address ${HOST_IP} + ports: + - "9043:9043" + volumes: + - "/var/lib/scylla/2:/var/lib/scylla" + - "./node2-scylla.yaml:/etc/scylla/scylla.yaml" + networks: + cluster: + + scylla-node3: + container_name: scylla-node3 + image: scylladb/scylla:latest + restart: unless-stopped + command: --seeds=scylla-node1 --smp 2 --memory 1G --overprovisioned 1 --api-address 0.0.0.0 --broadcast-rpc-address ${HOST_IP} + ports: + - "9044:9044" + volumes: + - "/var/lib/scylla/3:/var/lib/scylla" + - "./node3-scylla.yaml:/etc/scylla/scylla.yaml" + networks: + cluster: + + scylla-node4: + container_name: scylla-node4 + image: scylladb/scylla:latest + restart: unless-stopped + command: --seeds=scylla-node1 --smp 2 --memory 1G --overprovisioned 1 --api-address 0.0.0.0 --broadcast-rpc-address ${HOST_IP} + ports: + - "9045:9045" + volumes: + - "/var/lib/scylla/4:/var/lib/scylla" + - "./node4-scylla.yaml:/etc/scylla/scylla.yaml" + networks: + cluster: + +networks: + cluster: + driver: bridge diff --git a/utilities/local-scylla/justfile b/utilities/local-scylla/justfile new file mode 100644 index 00000000000..ace8b8758c4 --- /dev/null +++ b/utilities/local-scylla/justfile @@ -0,0 +1,38 @@ +# use with https://github.com/casey/just +# + +host_ip := `hostname -i | cut -d " " -f 1` + +default: + @just --list --unsorted + +# Local scylla dev DB - Starts with pre-existing data. +scylla-dev-db: + HOST_IP="{{host_ip}}" \ + docker compose up + +# Reset the cluster storage and start a new dev scylla cluster +scylla-dev-db-reset: scylla-dev-db-purge scylla-dev-db + +# Run CQLSH on the dev Scylla cluster +scylla-dev-db-cqlsh: + docker run --rm -it scylladb/scylla-cqlsh `hostname` 9043 + +# Run Nodetool on the dev Scylla cluster to dump status info. +scylla-dev-db-nodetool: + docker exec -it scylla-node1 nodetool status + docker exec -it scylla-node1 nodetool info + docker exec -it scylla-node1 nodetool tablestats + docker exec -it scylla-node1 nodetool sstableinfo + +# Shell into running node 1. +scylla-dev-db-shell: + docker exec -it scylla-node1 sh + +# Purge the storage used by the local test scylla cluster +scylla-dev-db-purge: + sudo rm -rf /var/lib/scylla/* + mkdir -p /var/lib/scylla/1/data /var/lib/scylla/1/commitlog /var/lib/scylla/1/hints /var/lib/scylla/1/view_hints + mkdir -p /var/lib/scylla/2/data /var/lib/scylla/2/commitlog /var/lib/scylla/2/hints /var/lib/scylla/2/view_hints + mkdir -p /var/lib/scylla/3/data /var/lib/scylla/3/commitlog /var/lib/scylla/3/hints /var/lib/scylla/3/view_hints + mkdir -p /var/lib/scylla/4/data /var/lib/scylla/4/commitlog /var/lib/scylla/4/hints /var/lib/scylla/4/view_hints diff --git a/utilities/local-scylla/node1-scylla.yaml b/utilities/local-scylla/node1-scylla.yaml new file mode 100644 index 00000000000..96a78e0cbaa --- /dev/null +++ b/utilities/local-scylla/node1-scylla.yaml @@ -0,0 +1,625 @@ +# Scylla storage config YAML + +####################################### +# This file is split to two sections: +# 1. Supported parameters +# 2. Unsupported parameters: reserved for future use or backwards +# compatibility. +# Scylla will only read and use the first segment +####################################### + +### Supported Parameters + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +# It is recommended to change the default value when creating a new cluster. +# You can NOT modify this value for an existing cluster +#cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +num_tokens: 256 + +# Directory where Scylla should store all its files, which are commitlog, +# data, hints, view_hints and saved_caches subdirectories. All of these +# subs can be overridden by the respective options below. +# If unset, the value defaults to /var/lib/scylla +# workdir: /var/lib/scylla + +# Directory where Scylla should store data on disk. +# data_file_directories: +# - /var/lib/scylla/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# commitlog_directory: /var/lib/scylla/commitlog + +# schema commit log. A special commitlog instance +# used for schema and system tables. +# When running on magnetic HDD, this should be a +# separate spindle than the data directories. +# schema_commitlog_directory: /var/lib/scylla/commitlog/schema + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Scylla won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# The size of the individual schema commitlog file segments. +# +# The default size is 128, which is 4 times larger than the default +# size of the data commitlog. It's because the segment size puts +# a limit on the mutation size that can be written at once, and some +# schema mutation writes are much larger than average. +schema_commitlog_segment_size_in_mb: 128 + +# seed_provider class_name is saved for future use. +# A seed address is mandatory. +seed_provider: + # The addresses of hosts that will serve as contact points for the joining node. + # It allows the node to discover the cluster ring topology on startup (when + # joining the cluster). + # Once the node has joined the cluster, the seed list has no function. + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # In a new cluster, provide the address of the first node. + # In an existing cluster, specify the address of at least one existing node. + # If you specify addresses of more than one node, use a comma to separate them. + # For example: ",," + - seeds: "127.0.0.1" + +# Address to bind to and tell other Scylla nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# If you leave broadcast_address (below) empty, then setting listen_address +# to 0.0.0.0 is wrong as other nodes will not know how to reach this node. +# If you set broadcast_address, then you can set listen_address to 0.0.0.0. +listen_address: localhost + +# Address to broadcast to other Scylla nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + + +# When using multiple physical network interfaces, set this to true to listen on broadcast_address +# in addition to the listen_address, allowing nodes to communicate in both interfaces. +# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2. +# +# listen_on_broadcast_address: false + +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# To disable the CQL native transport, remove this option and configure native_transport_port_ssl. +native_transport_port: 9042 + +# Like native_transport_port, but clients are forwarded to specific shards, based on the +# client-side port numbers. +native_shard_aware_transport_port: 19042 + +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +#native_transport_port_ssl: 9142 + +# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the +# client-side port numbers. +#native_shard_aware_transport_port_ssl: 19142 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 + +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# how long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# IEndpointSnitch. The snitch has two functions: +# - it teaches Scylla enough about your network topology to route +# requests efficiently +# - it allows Scylla to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Scylla will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Scylla provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# - GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Scylla will switch to the private IP after +# establishing a connection.) +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +rpc_address: localhost +# rpc_interface: eth1 +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# port for REST API server +api_port: 10000 + +# IP for the REST API server +api_address: 127.0.0.1 + +# Log WARN on any batch size exceeding this value. 128 kiB per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 128 + +# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default. +batch_size_fail_threshold_in_kb: 1024 + +# Authentication backend, identifying users +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# - com.scylladb.auth.TransitionalAuthenticator requires username/password pair +# to authenticate in the same manner as PasswordAuthenticator, but improper credentials +# result in being logged in as an anonymous user. Use for upgrading clusters' auth. +# authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +# - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for +# authorizing permission management. Otherwise, it allows all. Use for upgrading +# clusters' auth. +# authorizer: AllowAllAuthorizer + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# RPC address to broadcast to drivers and other Scylla nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +# broadcast_rpc_address: 1.2.3.4 + +# Uncomment to enable experimental features +# experimental_features: +# - udf +# - alternator-streams +# - broadcast-tables +# - keyspace-storage-options +# - tablets + +# The directory where hints files are stored if hinted handoff is enabled. +# hints_directory: /var/lib/scylla/hints + +# The directory where hints files are stored for materialized-view updates +# view_hints_directory: /var/lib/scylla/view_hints + +# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff +# May either be "true" or "false" to enable globally, or contain a list +# of data centers to enable per-datacenter. +# hinted_handoff_enabled: DC1,DC2 +# hinted_handoff_enabled: true + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +# max_hint_window_in_ms: 10800000 # 3 hours + + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 10000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +# permissions_validity_in_ms: 10000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this also must have +# a non-zero value. Defaults to 2000. It's recommended to set this value to +# be at least 3 times smaller than the permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Murmur3Partitioner is currently the only supported partitioner, +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Total space to use for commitlogs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Scylla will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# +# A value of -1 (default) will automatically equate it to the total amount of memory +# available for Scylla. +commitlog_total_space_in_mb: -1 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# ssl_storage_port: 7001 + +# listen_interface: eth0 +# listen_interface_prefer_ipv6: false + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +# start_native_transport: true + +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. +# native_transport_max_frame_size_in_mb: 256 + +# Whether to start the thrift rpc server. +# start_rpc: true + +# enable or disable keepalive on rpc/native connections +# rpc_keepalive: true + +# Set to true to have Scylla create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +# incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Scylla won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +# snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +# auto_snapshot: true + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exhaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +# tombstone_warn_threshold: 1000 +# tombstone_failure_threshold: 100000 + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# 1) a smaller granularity means more index entries are generated +# and looking up rows within the partition by collation column +# is faster +# 2) but, Scylla will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +# column_index_size_in_kb: 64 + +# Auto-scaling of the promoted index prevents running out of memory +# when the promoted index grows too large (due to partitions with many rows +# vs. too small column_index_size_in_kb). When the serialized representation +# of the promoted index grows by this threshold, the desired block size +# for this partition (initialized to column_index_size_in_kb) +# is doubled, to decrease the sampling resolution by half. +# +# To disable promoted index auto-scaling, set the threshold to 0. +# column_index_auto_scale_threshold_in_kb: 10240 + +# Log a warning when writing partitions larger than this value +# compaction_large_partition_warning_threshold_mb: 1000 + +# Log a warning when writing rows larger than this value +# compaction_large_row_warning_threshold_mb: 10 + +# Log a warning when writing cells larger than this value +# compaction_large_cell_warning_threshold_mb: 1 + +# Log a warning when row number is larger than this value +# compaction_rows_count_warning_threshold: 100000 + +# Log a warning when writing a collection containing more elements than this value +# compaction_collection_elements_count_warning_threshold: 10000 + +# How long the coordinator should wait for seq or index scans to complete +# range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +# counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +# cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +# truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +# request_timeout_in_ms: 10000 + +# Enable or disable inter-node encryption. +# You must also generate keys and provide the appropriate key and trust store locations and passwords. +# +# The available internode options are : all, none, dc, rack +# If set to dc scylla will encrypt the traffic between the DCs +# If set to rack scylla will encrypt the traffic between the racks +# +# SSL/TLS algorithm and ciphers used can be controlled by +# the priority_string parameter. Info on priority string +# syntax and values is available at: +# https://gnutls.org/manual/html_node/Priority-Strings.html +# +# The require_client_auth parameter allows you to +# restrict access to service based on certificate +# validation. Client must provide a certificate +# accepted by the used trust store to connect. +# +# server_encryption_options: +# internode_encryption: none +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# enable or disable client/server encryption. +# client_encryption_options: +# enabled: false +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +# internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +# inter_dc_tcp_nodelay: false + +# Relaxation of environment checks. +# +# Scylla places certain requirements on its environment. If these requirements are +# not met, performance and reliability can be degraded. +# +# These requirements include: +# - A filesystem with good support for asynchronous I/O (AIO). Currently, +# this means XFS. +# +# false: strict environment checks are in place; do not start if they are not met. +# true: relaxed environment checks; performance and reliability may degraade. +# +# developer_mode: false + + +# Idle-time background processing +# +# Scylla can perform certain jobs in the background while the system is otherwise idle, +# freeing processor resources when there is other work to be done. +# +# defragment_memory_on_idle: true +# +# prometheus port +# By default, Scylla opens prometheus API port on port 9180 +# setting the port to 0 will disable the prometheus API. +# prometheus_port: 9180 +# +# prometheus address +# Leaving this blank will set it to the same value as listen_address. +# This means that by default, Scylla listens to the prometheus API on the same +# listening address (and therefore network interface) used to listen for +# internal communication. If the monitoring node is not in this internal +# network, you can override prometheus_address explicitly - e.g., setting +# it to 0.0.0.0 to listen on all interfaces. +# prometheus_address: 1.2.3.4 + +# Distribution of data among cores (shards) within a node +# +# Scylla distributes data within a node among shards, using a round-robin +# strategy: +# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ... +# +# Scylla versions 1.6 and below used just one repetition of the pattern; +# this interfered with data placement among nodes (vnodes). +# +# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this +# provides for better data distribution. +# +# the value below is log (base 2) of the number of repetitions. +# +# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and +# below. +# +# Keep at 12 for new clusters. +murmur3_partitioner_ignore_msb_bits: 12 + +# Bypass in-memory data cache (the row cache) when performing reversed queries. +# reversed_reads_auto_bypass_cache: false + +# Use a new optimized algorithm for performing reversed reads. +# Set to `false` to fall-back to the old algorithm. +# enable_optimized_reversed_reads: true + +# Use on a new, parallel algorithm for performing aggregate queries. +# Set to `false` to fall-back to the old algorithm. +# enable_parallelized_aggregation: true + +# Time for which task manager task is kept in memory after it completes. +# task_ttl_in_seconds: 0 + +# In materialized views, restrictions are allowed only on the view's primary key columns. +# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part +# of the view's primary key. These invalid restrictions were ignored. +# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions. +# +# Can be true, false, or warn. +# * `true`: IS NOT NULL is allowed only on the view's primary key columns, +# trying to use it on other columns will cause an error, as it should. +# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored. +# It's useful for backwards compatibility. +# * `warn`: The same as false, but there's a warning about invalid view restrictions. +# +# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`. +# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`) +# to make sure that trying to create an invalid view causes an error. +strict_is_not_null_in_views: true + +# The Unix Domain Socket the node uses for maintenance socket. +# The possible options are: +# * ignore: the node will not open the maintenance socket, +# * workdir: the node will open the maintenance socket on the path /cql.m, +# where is a path defined by the workdir configuration option, +# * : the node will open the maintenance socket on the path . +maintenance_socket: ignore + +# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL +# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime +# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating +# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false +# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers. +# live_updatable_config_params_changeable_via_cql: true + +# **************** +# * GUARDRAILS * +# **************** + +# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold. +# Please note that the value of 0 is always allowed, +# which means that having no replication at all, i.e. RF = 0, is always valid. +# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled. +# Commenting out a guardrail also means it is disabled. +# minimum_replication_factor_fail_threshold: -1 +# minimum_replication_factor_warn_threshold: 3 +# maximum_replication_factor_warn_threshold: -1 +# maximum_replication_factor_fail_threshold: -1 + +# Guardrails to warn about or disallow creating a keyspace with specific replication strategy. +# Each of these 2 settings is a list storing replication strategies considered harmful. +# The replication strategies to choose from are: +# 1) SimpleStrategy, +# 2) NetworkTopologyStrategy, +# 3) LocalStrategy, +# 4) EverywhereStrategy +# +# replication_strategy_warn_list: +# - SimpleStrategy +# replication_strategy_fail_list: + +# This enables tablets on newly created keyspaces +enable_tablets: true +api_ui_dir: /opt/scylladb/swagger-ui/dist/ +api_doc_dir: /opt/scylladb/api/api-doc/ \ No newline at end of file diff --git a/utilities/local-scylla/node2-scylla.yaml b/utilities/local-scylla/node2-scylla.yaml new file mode 100644 index 00000000000..98e75fb478c --- /dev/null +++ b/utilities/local-scylla/node2-scylla.yaml @@ -0,0 +1,622 @@ +# Scylla storage config YAML + +####################################### +# This file is split to two sections: +# 1. Supported parameters +# 2. Unsupported parameters: reserved for future use or backwards +# compatibility. +# Scylla will only read and use the first segment +####################################### + +### Supported Parameters + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +# It is recommended to change the default value when creating a new cluster. +# You can NOT modify this value for an existing cluster +#cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +num_tokens: 256 + +# Directory where Scylla should store all its files, which are commitlog, +# data, hints, view_hints and saved_caches subdirectories. All of these +# subs can be overridden by the respective options below. +# If unset, the value defaults to /var/lib/scylla +# workdir: /var/lib/scylla + +# Directory where Scylla should store data on disk. +# data_file_directories: +# - /var/lib/scylla/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# commitlog_directory: /var/lib/scylla/commitlog + +# schema commit log. A special commitlog instance +# used for schema and system tables. +# When running on magnetic HDD, this should be a +# separate spindle than the data directories. +# schema_commitlog_directory: /var/lib/scylla/commitlog/schema + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Scylla won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# The size of the individual schema commitlog file segments. +# +# The default size is 128, which is 4 times larger than the default +# size of the data commitlog. It's because the segment size puts +# a limit on the mutation size that can be written at once, and some +# schema mutation writes are much larger than average. +schema_commitlog_segment_size_in_mb: 128 + +# seed_provider class_name is saved for future use. +# A seed address is mandatory. +seed_provider: + # The addresses of hosts that will serve as contact points for the joining node. + # It allows the node to discover the cluster ring topology on startup (when + # joining the cluster). + # Once the node has joined the cluster, the seed list has no function. + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # In a new cluster, provide the address of the first node. + # In an existing cluster, specify the address of at least one existing node. + # If you specify addresses of more than one node, use a comma to separate them. + # For example: ",," + - seeds: "127.0.0.1" + +# Address to bind to and tell other Scylla nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# If you leave broadcast_address (below) empty, then setting listen_address +# to 0.0.0.0 is wrong as other nodes will not know how to reach this node. +# If you set broadcast_address, then you can set listen_address to 0.0.0.0. +listen_address: localhost + +# Address to broadcast to other Scylla nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# When using multiple physical network interfaces, set this to true to listen on broadcast_address +# in addition to the listen_address, allowing nodes to communicate in both interfaces. +# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2. +# +# listen_on_broadcast_address: false + +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# To disable the CQL native transport, remove this option and configure native_transport_port_ssl. +native_transport_port: 9043 + +# Like native_transport_port, but clients are forwarded to specific shards, based on the +# client-side port numbers. +native_shard_aware_transport_port: 19043 + +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +#native_transport_port_ssl: 9142 + +# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the +# client-side port numbers. +#native_shard_aware_transport_port_ssl: 19142 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 + +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# how long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# IEndpointSnitch. The snitch has two functions: +# - it teaches Scylla enough about your network topology to route +# requests efficiently +# - it allows Scylla to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Scylla will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Scylla provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# - GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Scylla will switch to the private IP after +# establishing a connection.) +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +rpc_address: localhost +# rpc_interface: eth1 +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# port for REST API server +api_port: 10000 + +# IP for the REST API server +api_address: 127.0.0.1 + +# Log WARN on any batch size exceeding this value. 128 kiB per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 128 + +# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default. +batch_size_fail_threshold_in_kb: 1024 + +# Authentication backend, identifying users +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# - com.scylladb.auth.TransitionalAuthenticator requires username/password pair +# to authenticate in the same manner as PasswordAuthenticator, but improper credentials +# result in being logged in as an anonymous user. Use for upgrading clusters' auth. +# authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +# - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for +# authorizing permission management. Otherwise, it allows all. Use for upgrading +# clusters' auth. +# authorizer: AllowAllAuthorizer + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# RPC address to broadcast to drivers and other Scylla nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +# broadcast_rpc_address: 1.2.3.4 + +# Uncomment to enable experimental features +# experimental_features: +# - udf +# - alternator-streams +# - broadcast-tables +# - keyspace-storage-options +# - tablets + +# The directory where hints files are stored if hinted handoff is enabled. +# hints_directory: /var/lib/scylla/hints + +# The directory where hints files are stored for materialized-view updates +# view_hints_directory: /var/lib/scylla/view_hints + +# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff +# May either be "true" or "false" to enable globally, or contain a list +# of data centers to enable per-datacenter. +# hinted_handoff_enabled: DC1,DC2 +# hinted_handoff_enabled: true + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +# max_hint_window_in_ms: 10800000 # 3 hours + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 10000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +# permissions_validity_in_ms: 10000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this also must have +# a non-zero value. Defaults to 2000. It's recommended to set this value to +# be at least 3 times smaller than the permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Murmur3Partitioner is currently the only supported partitioner, +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Total space to use for commitlogs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Scylla will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# +# A value of -1 (default) will automatically equate it to the total amount of memory +# available for Scylla. +commitlog_total_space_in_mb: -1 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# ssl_storage_port: 7001 + +# listen_interface: eth0 +# listen_interface_prefer_ipv6: false + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +# start_native_transport: true + +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. +# native_transport_max_frame_size_in_mb: 256 + +# Whether to start the thrift rpc server. +# start_rpc: true + +# enable or disable keepalive on rpc/native connections +# rpc_keepalive: true + +# Set to true to have Scylla create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +# incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Scylla won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +# snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +# auto_snapshot: true + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exhaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +# tombstone_warn_threshold: 1000 +# tombstone_failure_threshold: 100000 + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# 1) a smaller granularity means more index entries are generated +# and looking up rows within the partition by collation column +# is faster +# 2) but, Scylla will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +# column_index_size_in_kb: 64 + +# Auto-scaling of the promoted index prevents running out of memory +# when the promoted index grows too large (due to partitions with many rows +# vs. too small column_index_size_in_kb). When the serialized representation +# of the promoted index grows by this threshold, the desired block size +# for this partition (initialized to column_index_size_in_kb) +# is doubled, to decrease the sampling resolution by half. +# +# To disable promoted index auto-scaling, set the threshold to 0. +# column_index_auto_scale_threshold_in_kb: 10240 + +# Log a warning when writing partitions larger than this value +# compaction_large_partition_warning_threshold_mb: 1000 + +# Log a warning when writing rows larger than this value +# compaction_large_row_warning_threshold_mb: 10 + +# Log a warning when writing cells larger than this value +# compaction_large_cell_warning_threshold_mb: 1 + +# Log a warning when row number is larger than this value +# compaction_rows_count_warning_threshold: 100000 + +# Log a warning when writing a collection containing more elements than this value +# compaction_collection_elements_count_warning_threshold: 10000 + +# How long the coordinator should wait for seq or index scans to complete +# range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +# counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +# cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +# truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +# request_timeout_in_ms: 10000 + +# Enable or disable inter-node encryption. +# You must also generate keys and provide the appropriate key and trust store locations and passwords. +# +# The available internode options are : all, none, dc, rack +# If set to dc scylla will encrypt the traffic between the DCs +# If set to rack scylla will encrypt the traffic between the racks +# +# SSL/TLS algorithm and ciphers used can be controlled by +# the priority_string parameter. Info on priority string +# syntax and values is available at: +# https://gnutls.org/manual/html_node/Priority-Strings.html +# +# The require_client_auth parameter allows you to +# restrict access to service based on certificate +# validation. Client must provide a certificate +# accepted by the used trust store to connect. +# +# server_encryption_options: +# internode_encryption: none +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# enable or disable client/server encryption. +# client_encryption_options: +# enabled: false +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +# internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +# inter_dc_tcp_nodelay: false + +# Relaxation of environment checks. +# +# Scylla places certain requirements on its environment. If these requirements are +# not met, performance and reliability can be degraded. +# +# These requirements include: +# - A filesystem with good support for asynchronous I/O (AIO). Currently, +# this means XFS. +# +# false: strict environment checks are in place; do not start if they are not met. +# true: relaxed environment checks; performance and reliability may degraade. +# +# developer_mode: false + +# Idle-time background processing +# +# Scylla can perform certain jobs in the background while the system is otherwise idle, +# freeing processor resources when there is other work to be done. +# +# defragment_memory_on_idle: true +# +# prometheus port +# By default, Scylla opens prometheus API port on port 9180 +# setting the port to 0 will disable the prometheus API. +# prometheus_port: 9180 +# +# prometheus address +# Leaving this blank will set it to the same value as listen_address. +# This means that by default, Scylla listens to the prometheus API on the same +# listening address (and therefore network interface) used to listen for +# internal communication. If the monitoring node is not in this internal +# network, you can override prometheus_address explicitly - e.g., setting +# it to 0.0.0.0 to listen on all interfaces. +# prometheus_address: 1.2.3.4 + +# Distribution of data among cores (shards) within a node +# +# Scylla distributes data within a node among shards, using a round-robin +# strategy: +# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ... +# +# Scylla versions 1.6 and below used just one repetition of the pattern; +# this interfered with data placement among nodes (vnodes). +# +# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this +# provides for better data distribution. +# +# the value below is log (base 2) of the number of repetitions. +# +# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and +# below. +# +# Keep at 12 for new clusters. +murmur3_partitioner_ignore_msb_bits: 12 + +# Bypass in-memory data cache (the row cache) when performing reversed queries. +# reversed_reads_auto_bypass_cache: false + +# Use a new optimized algorithm for performing reversed reads. +# Set to `false` to fall-back to the old algorithm. +# enable_optimized_reversed_reads: true + +# Use on a new, parallel algorithm for performing aggregate queries. +# Set to `false` to fall-back to the old algorithm. +# enable_parallelized_aggregation: true + +# Time for which task manager task is kept in memory after it completes. +# task_ttl_in_seconds: 0 + +# In materialized views, restrictions are allowed only on the view's primary key columns. +# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part +# of the view's primary key. These invalid restrictions were ignored. +# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions. +# +# Can be true, false, or warn. +# * `true`: IS NOT NULL is allowed only on the view's primary key columns, +# trying to use it on other columns will cause an error, as it should. +# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored. +# It's useful for backwards compatibility. +# * `warn`: The same as false, but there's a warning about invalid view restrictions. +# +# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`. +# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`) +# to make sure that trying to create an invalid view causes an error. +strict_is_not_null_in_views: true + +# The Unix Domain Socket the node uses for maintenance socket. +# The possible options are: +# * ignore: the node will not open the maintenance socket, +# * workdir: the node will open the maintenance socket on the path /cql.m, +# where is a path defined by the workdir configuration option, +# * : the node will open the maintenance socket on the path . +maintenance_socket: ignore + +# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL +# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime +# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating +# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false +# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers. +# live_updatable_config_params_changeable_via_cql: true + +# **************** +# * GUARDRAILS * +# **************** + +# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold. +# Please note that the value of 0 is always allowed, +# which means that having no replication at all, i.e. RF = 0, is always valid. +# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled. +# Commenting out a guardrail also means it is disabled. +# minimum_replication_factor_fail_threshold: -1 +# minimum_replication_factor_warn_threshold: 3 +# maximum_replication_factor_warn_threshold: -1 +# maximum_replication_factor_fail_threshold: -1 + +# Guardrails to warn about or disallow creating a keyspace with specific replication strategy. +# Each of these 2 settings is a list storing replication strategies considered harmful. +# The replication strategies to choose from are: +# 1) SimpleStrategy, +# 2) NetworkTopologyStrategy, +# 3) LocalStrategy, +# 4) EverywhereStrategy +# +# replication_strategy_warn_list: +# - SimpleStrategy +# replication_strategy_fail_list: + +# This enables tablets on newly created keyspaces +enable_tablets: true +api_ui_dir: /opt/scylladb/swagger-ui/dist/ +api_doc_dir: /opt/scylladb/api/api-doc/ diff --git a/utilities/local-scylla/node3-scylla.yaml b/utilities/local-scylla/node3-scylla.yaml new file mode 100644 index 00000000000..d5742ec8542 --- /dev/null +++ b/utilities/local-scylla/node3-scylla.yaml @@ -0,0 +1,622 @@ +# Scylla storage config YAML + +####################################### +# This file is split to two sections: +# 1. Supported parameters +# 2. Unsupported parameters: reserved for future use or backwards +# compatibility. +# Scylla will only read and use the first segment +####################################### + +### Supported Parameters + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +# It is recommended to change the default value when creating a new cluster. +# You can NOT modify this value for an existing cluster +#cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +num_tokens: 256 + +# Directory where Scylla should store all its files, which are commitlog, +# data, hints, view_hints and saved_caches subdirectories. All of these +# subs can be overridden by the respective options below. +# If unset, the value defaults to /var/lib/scylla +# workdir: /var/lib/scylla + +# Directory where Scylla should store data on disk. +# data_file_directories: +# - /var/lib/scylla/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# commitlog_directory: /var/lib/scylla/commitlog + +# schema commit log. A special commitlog instance +# used for schema and system tables. +# When running on magnetic HDD, this should be a +# separate spindle than the data directories. +# schema_commitlog_directory: /var/lib/scylla/commitlog/schema + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Scylla won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# The size of the individual schema commitlog file segments. +# +# The default size is 128, which is 4 times larger than the default +# size of the data commitlog. It's because the segment size puts +# a limit on the mutation size that can be written at once, and some +# schema mutation writes are much larger than average. +schema_commitlog_segment_size_in_mb: 128 + +# seed_provider class_name is saved for future use. +# A seed address is mandatory. +seed_provider: + # The addresses of hosts that will serve as contact points for the joining node. + # It allows the node to discover the cluster ring topology on startup (when + # joining the cluster). + # Once the node has joined the cluster, the seed list has no function. + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # In a new cluster, provide the address of the first node. + # In an existing cluster, specify the address of at least one existing node. + # If you specify addresses of more than one node, use a comma to separate them. + # For example: ",," + - seeds: "127.0.0.1" + +# Address to bind to and tell other Scylla nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# If you leave broadcast_address (below) empty, then setting listen_address +# to 0.0.0.0 is wrong as other nodes will not know how to reach this node. +# If you set broadcast_address, then you can set listen_address to 0.0.0.0. +listen_address: localhost + +# Address to broadcast to other Scylla nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# When using multiple physical network interfaces, set this to true to listen on broadcast_address +# in addition to the listen_address, allowing nodes to communicate in both interfaces. +# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2. +# +# listen_on_broadcast_address: false + +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# To disable the CQL native transport, remove this option and configure native_transport_port_ssl. +native_transport_port: 9044 + +# Like native_transport_port, but clients are forwarded to specific shards, based on the +# client-side port numbers. +native_shard_aware_transport_port: 19044 + +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +#native_transport_port_ssl: 9142 + +# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the +# client-side port numbers. +#native_shard_aware_transport_port_ssl: 19142 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 + +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# how long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# IEndpointSnitch. The snitch has two functions: +# - it teaches Scylla enough about your network topology to route +# requests efficiently +# - it allows Scylla to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Scylla will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Scylla provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# - GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Scylla will switch to the private IP after +# establishing a connection.) +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +rpc_address: localhost +# rpc_interface: eth1 +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# port for REST API server +api_port: 10000 + +# IP for the REST API server +api_address: 127.0.0.1 + +# Log WARN on any batch size exceeding this value. 128 kiB per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 128 + +# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default. +batch_size_fail_threshold_in_kb: 1024 + +# Authentication backend, identifying users +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# - com.scylladb.auth.TransitionalAuthenticator requires username/password pair +# to authenticate in the same manner as PasswordAuthenticator, but improper credentials +# result in being logged in as an anonymous user. Use for upgrading clusters' auth. +# authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +# - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for +# authorizing permission management. Otherwise, it allows all. Use for upgrading +# clusters' auth. +# authorizer: AllowAllAuthorizer + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# RPC address to broadcast to drivers and other Scylla nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +# broadcast_rpc_address: 1.2.3.4 + +# Uncomment to enable experimental features +# experimental_features: +# - udf +# - alternator-streams +# - broadcast-tables +# - keyspace-storage-options +# - tablets + +# The directory where hints files are stored if hinted handoff is enabled. +# hints_directory: /var/lib/scylla/hints + +# The directory where hints files are stored for materialized-view updates +# view_hints_directory: /var/lib/scylla/view_hints + +# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff +# May either be "true" or "false" to enable globally, or contain a list +# of data centers to enable per-datacenter. +# hinted_handoff_enabled: DC1,DC2 +# hinted_handoff_enabled: true + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +# max_hint_window_in_ms: 10800000 # 3 hours + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 10000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +# permissions_validity_in_ms: 10000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this also must have +# a non-zero value. Defaults to 2000. It's recommended to set this value to +# be at least 3 times smaller than the permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Murmur3Partitioner is currently the only supported partitioner, +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Total space to use for commitlogs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Scylla will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# +# A value of -1 (default) will automatically equate it to the total amount of memory +# available for Scylla. +commitlog_total_space_in_mb: -1 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# ssl_storage_port: 7001 + +# listen_interface: eth0 +# listen_interface_prefer_ipv6: false + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +# start_native_transport: true + +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. +# native_transport_max_frame_size_in_mb: 256 + +# Whether to start the thrift rpc server. +# start_rpc: true + +# enable or disable keepalive on rpc/native connections +# rpc_keepalive: true + +# Set to true to have Scylla create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +# incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Scylla won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +# snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +# auto_snapshot: true + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exhaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +# tombstone_warn_threshold: 1000 +# tombstone_failure_threshold: 100000 + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# 1) a smaller granularity means more index entries are generated +# and looking up rows within the partition by collation column +# is faster +# 2) but, Scylla will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +# column_index_size_in_kb: 64 + +# Auto-scaling of the promoted index prevents running out of memory +# when the promoted index grows too large (due to partitions with many rows +# vs. too small column_index_size_in_kb). When the serialized representation +# of the promoted index grows by this threshold, the desired block size +# for this partition (initialized to column_index_size_in_kb) +# is doubled, to decrease the sampling resolution by half. +# +# To disable promoted index auto-scaling, set the threshold to 0. +# column_index_auto_scale_threshold_in_kb: 10240 + +# Log a warning when writing partitions larger than this value +# compaction_large_partition_warning_threshold_mb: 1000 + +# Log a warning when writing rows larger than this value +# compaction_large_row_warning_threshold_mb: 10 + +# Log a warning when writing cells larger than this value +# compaction_large_cell_warning_threshold_mb: 1 + +# Log a warning when row number is larger than this value +# compaction_rows_count_warning_threshold: 100000 + +# Log a warning when writing a collection containing more elements than this value +# compaction_collection_elements_count_warning_threshold: 10000 + +# How long the coordinator should wait for seq or index scans to complete +# range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +# counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +# cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +# truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +# request_timeout_in_ms: 10000 + +# Enable or disable inter-node encryption. +# You must also generate keys and provide the appropriate key and trust store locations and passwords. +# +# The available internode options are : all, none, dc, rack +# If set to dc scylla will encrypt the traffic between the DCs +# If set to rack scylla will encrypt the traffic between the racks +# +# SSL/TLS algorithm and ciphers used can be controlled by +# the priority_string parameter. Info on priority string +# syntax and values is available at: +# https://gnutls.org/manual/html_node/Priority-Strings.html +# +# The require_client_auth parameter allows you to +# restrict access to service based on certificate +# validation. Client must provide a certificate +# accepted by the used trust store to connect. +# +# server_encryption_options: +# internode_encryption: none +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# enable or disable client/server encryption. +# client_encryption_options: +# enabled: false +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +# internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +# inter_dc_tcp_nodelay: false + +# Relaxation of environment checks. +# +# Scylla places certain requirements on its environment. If these requirements are +# not met, performance and reliability can be degraded. +# +# These requirements include: +# - A filesystem with good support for asynchronous I/O (AIO). Currently, +# this means XFS. +# +# false: strict environment checks are in place; do not start if they are not met. +# true: relaxed environment checks; performance and reliability may degraade. +# +# developer_mode: false + +# Idle-time background processing +# +# Scylla can perform certain jobs in the background while the system is otherwise idle, +# freeing processor resources when there is other work to be done. +# +# defragment_memory_on_idle: true +# +# prometheus port +# By default, Scylla opens prometheus API port on port 9180 +# setting the port to 0 will disable the prometheus API. +# prometheus_port: 9180 +# +# prometheus address +# Leaving this blank will set it to the same value as listen_address. +# This means that by default, Scylla listens to the prometheus API on the same +# listening address (and therefore network interface) used to listen for +# internal communication. If the monitoring node is not in this internal +# network, you can override prometheus_address explicitly - e.g., setting +# it to 0.0.0.0 to listen on all interfaces. +# prometheus_address: 1.2.3.4 + +# Distribution of data among cores (shards) within a node +# +# Scylla distributes data within a node among shards, using a round-robin +# strategy: +# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ... +# +# Scylla versions 1.6 and below used just one repetition of the pattern; +# this interfered with data placement among nodes (vnodes). +# +# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this +# provides for better data distribution. +# +# the value below is log (base 2) of the number of repetitions. +# +# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and +# below. +# +# Keep at 12 for new clusters. +murmur3_partitioner_ignore_msb_bits: 12 + +# Bypass in-memory data cache (the row cache) when performing reversed queries. +# reversed_reads_auto_bypass_cache: false + +# Use a new optimized algorithm for performing reversed reads. +# Set to `false` to fall-back to the old algorithm. +# enable_optimized_reversed_reads: true + +# Use on a new, parallel algorithm for performing aggregate queries. +# Set to `false` to fall-back to the old algorithm. +# enable_parallelized_aggregation: true + +# Time for which task manager task is kept in memory after it completes. +# task_ttl_in_seconds: 0 + +# In materialized views, restrictions are allowed only on the view's primary key columns. +# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part +# of the view's primary key. These invalid restrictions were ignored. +# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions. +# +# Can be true, false, or warn. +# * `true`: IS NOT NULL is allowed only on the view's primary key columns, +# trying to use it on other columns will cause an error, as it should. +# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored. +# It's useful for backwards compatibility. +# * `warn`: The same as false, but there's a warning about invalid view restrictions. +# +# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`. +# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`) +# to make sure that trying to create an invalid view causes an error. +strict_is_not_null_in_views: true + +# The Unix Domain Socket the node uses for maintenance socket. +# The possible options are: +# * ignore: the node will not open the maintenance socket, +# * workdir: the node will open the maintenance socket on the path /cql.m, +# where is a path defined by the workdir configuration option, +# * : the node will open the maintenance socket on the path . +maintenance_socket: ignore + +# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL +# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime +# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating +# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false +# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers. +# live_updatable_config_params_changeable_via_cql: true + +# **************** +# * GUARDRAILS * +# **************** + +# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold. +# Please note that the value of 0 is always allowed, +# which means that having no replication at all, i.e. RF = 0, is always valid. +# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled. +# Commenting out a guardrail also means it is disabled. +# minimum_replication_factor_fail_threshold: -1 +# minimum_replication_factor_warn_threshold: 3 +# maximum_replication_factor_warn_threshold: -1 +# maximum_replication_factor_fail_threshold: -1 + +# Guardrails to warn about or disallow creating a keyspace with specific replication strategy. +# Each of these 2 settings is a list storing replication strategies considered harmful. +# The replication strategies to choose from are: +# 1) SimpleStrategy, +# 2) NetworkTopologyStrategy, +# 3) LocalStrategy, +# 4) EverywhereStrategy +# +# replication_strategy_warn_list: +# - SimpleStrategy +# replication_strategy_fail_list: + +# This enables tablets on newly created keyspaces +enable_tablets: true +api_ui_dir: /opt/scylladb/swagger-ui/dist/ +api_doc_dir: /opt/scylladb/api/api-doc/ diff --git a/utilities/local-scylla/node4-scylla.yaml b/utilities/local-scylla/node4-scylla.yaml new file mode 100644 index 00000000000..9e502036a9f --- /dev/null +++ b/utilities/local-scylla/node4-scylla.yaml @@ -0,0 +1,622 @@ +# Scylla storage config YAML + +####################################### +# This file is split to two sections: +# 1. Supported parameters +# 2. Unsupported parameters: reserved for future use or backwards +# compatibility. +# Scylla will only read and use the first segment +####################################### + +### Supported Parameters + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +# It is recommended to change the default value when creating a new cluster. +# You can NOT modify this value for an existing cluster +#cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +num_tokens: 256 + +# Directory where Scylla should store all its files, which are commitlog, +# data, hints, view_hints and saved_caches subdirectories. All of these +# subs can be overridden by the respective options below. +# If unset, the value defaults to /var/lib/scylla +# workdir: /var/lib/scylla + +# Directory where Scylla should store data on disk. +# data_file_directories: +# - /var/lib/scylla/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# commitlog_directory: /var/lib/scylla/commitlog + +# schema commit log. A special commitlog instance +# used for schema and system tables. +# When running on magnetic HDD, this should be a +# separate spindle than the data directories. +# schema_commitlog_directory: /var/lib/scylla/commitlog/schema + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Scylla won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# The size of the individual schema commitlog file segments. +# +# The default size is 128, which is 4 times larger than the default +# size of the data commitlog. It's because the segment size puts +# a limit on the mutation size that can be written at once, and some +# schema mutation writes are much larger than average. +schema_commitlog_segment_size_in_mb: 128 + +# seed_provider class_name is saved for future use. +# A seed address is mandatory. +seed_provider: + # The addresses of hosts that will serve as contact points for the joining node. + # It allows the node to discover the cluster ring topology on startup (when + # joining the cluster). + # Once the node has joined the cluster, the seed list has no function. + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # In a new cluster, provide the address of the first node. + # In an existing cluster, specify the address of at least one existing node. + # If you specify addresses of more than one node, use a comma to separate them. + # For example: ",," + - seeds: "127.0.0.1" + +# Address to bind to and tell other Scylla nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# If you leave broadcast_address (below) empty, then setting listen_address +# to 0.0.0.0 is wrong as other nodes will not know how to reach this node. +# If you set broadcast_address, then you can set listen_address to 0.0.0.0. +listen_address: localhost + +# Address to broadcast to other Scylla nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# When using multiple physical network interfaces, set this to true to listen on broadcast_address +# in addition to the listen_address, allowing nodes to communicate in both interfaces. +# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2. +# +# listen_on_broadcast_address: false + +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# To disable the CQL native transport, remove this option and configure native_transport_port_ssl. +native_transport_port: 9045 + +# Like native_transport_port, but clients are forwarded to specific shards, based on the +# client-side port numbers. +native_shard_aware_transport_port: 19045 + +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +#native_transport_port_ssl: 9142 + +# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the +# client-side port numbers. +#native_shard_aware_transport_port_ssl: 19142 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 + +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# how long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# IEndpointSnitch. The snitch has two functions: +# - it teaches Scylla enough about your network topology to route +# requests efficiently +# - it allows Scylla to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Scylla will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Scylla provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# - GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Scylla will switch to the private IP after +# establishing a connection.) +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +rpc_address: localhost +# rpc_interface: eth1 +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# port for REST API server +api_port: 10000 + +# IP for the REST API server +api_address: 127.0.0.1 + +# Log WARN on any batch size exceeding this value. 128 kiB per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 128 + +# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default. +batch_size_fail_threshold_in_kb: 1024 + +# Authentication backend, identifying users +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# - com.scylladb.auth.TransitionalAuthenticator requires username/password pair +# to authenticate in the same manner as PasswordAuthenticator, but improper credentials +# result in being logged in as an anonymous user. Use for upgrading clusters' auth. +# authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +# - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for +# authorizing permission management. Otherwise, it allows all. Use for upgrading +# clusters' auth. +# authorizer: AllowAllAuthorizer + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# RPC address to broadcast to drivers and other Scylla nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +# broadcast_rpc_address: 1.2.3.4 + +# Uncomment to enable experimental features +# experimental_features: +# - udf +# - alternator-streams +# - broadcast-tables +# - keyspace-storage-options +# - tablets + +# The directory where hints files are stored if hinted handoff is enabled. +# hints_directory: /var/lib/scylla/hints + +# The directory where hints files are stored for materialized-view updates +# view_hints_directory: /var/lib/scylla/view_hints + +# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff +# May either be "true" or "false" to enable globally, or contain a list +# of data centers to enable per-datacenter. +# hinted_handoff_enabled: DC1,DC2 +# hinted_handoff_enabled: true + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +# max_hint_window_in_ms: 10800000 # 3 hours + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 10000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +# permissions_validity_in_ms: 10000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this also must have +# a non-zero value. Defaults to 2000. It's recommended to set this value to +# be at least 3 times smaller than the permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Murmur3Partitioner is currently the only supported partitioner, +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Total space to use for commitlogs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Scylla will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# +# A value of -1 (default) will automatically equate it to the total amount of memory +# available for Scylla. +commitlog_total_space_in_mb: -1 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# ssl_storage_port: 7001 + +# listen_interface: eth0 +# listen_interface_prefer_ipv6: false + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +# start_native_transport: true + +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. +# native_transport_max_frame_size_in_mb: 256 + +# Whether to start the thrift rpc server. +# start_rpc: true + +# enable or disable keepalive on rpc/native connections +# rpc_keepalive: true + +# Set to true to have Scylla create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +# incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Scylla won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +# snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +# auto_snapshot: true + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exhaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +# tombstone_warn_threshold: 1000 +# tombstone_failure_threshold: 100000 + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# 1) a smaller granularity means more index entries are generated +# and looking up rows within the partition by collation column +# is faster +# 2) but, Scylla will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +# column_index_size_in_kb: 64 + +# Auto-scaling of the promoted index prevents running out of memory +# when the promoted index grows too large (due to partitions with many rows +# vs. too small column_index_size_in_kb). When the serialized representation +# of the promoted index grows by this threshold, the desired block size +# for this partition (initialized to column_index_size_in_kb) +# is doubled, to decrease the sampling resolution by half. +# +# To disable promoted index auto-scaling, set the threshold to 0. +# column_index_auto_scale_threshold_in_kb: 10240 + +# Log a warning when writing partitions larger than this value +# compaction_large_partition_warning_threshold_mb: 1000 + +# Log a warning when writing rows larger than this value +# compaction_large_row_warning_threshold_mb: 10 + +# Log a warning when writing cells larger than this value +# compaction_large_cell_warning_threshold_mb: 1 + +# Log a warning when row number is larger than this value +# compaction_rows_count_warning_threshold: 100000 + +# Log a warning when writing a collection containing more elements than this value +# compaction_collection_elements_count_warning_threshold: 10000 + +# How long the coordinator should wait for seq or index scans to complete +# range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +# counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +# cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +# truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +# request_timeout_in_ms: 10000 + +# Enable or disable inter-node encryption. +# You must also generate keys and provide the appropriate key and trust store locations and passwords. +# +# The available internode options are : all, none, dc, rack +# If set to dc scylla will encrypt the traffic between the DCs +# If set to rack scylla will encrypt the traffic between the racks +# +# SSL/TLS algorithm and ciphers used can be controlled by +# the priority_string parameter. Info on priority string +# syntax and values is available at: +# https://gnutls.org/manual/html_node/Priority-Strings.html +# +# The require_client_auth parameter allows you to +# restrict access to service based on certificate +# validation. Client must provide a certificate +# accepted by the used trust store to connect. +# +# server_encryption_options: +# internode_encryption: none +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# enable or disable client/server encryption. +# client_encryption_options: +# enabled: false +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +# internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +# inter_dc_tcp_nodelay: false + +# Relaxation of environment checks. +# +# Scylla places certain requirements on its environment. If these requirements are +# not met, performance and reliability can be degraded. +# +# These requirements include: +# - A filesystem with good support for asynchronous I/O (AIO). Currently, +# this means XFS. +# +# false: strict environment checks are in place; do not start if they are not met. +# true: relaxed environment checks; performance and reliability may degraade. +# +# developer_mode: false + +# Idle-time background processing +# +# Scylla can perform certain jobs in the background while the system is otherwise idle, +# freeing processor resources when there is other work to be done. +# +# defragment_memory_on_idle: true +# +# prometheus port +# By default, Scylla opens prometheus API port on port 9180 +# setting the port to 0 will disable the prometheus API. +# prometheus_port: 9180 +# +# prometheus address +# Leaving this blank will set it to the same value as listen_address. +# This means that by default, Scylla listens to the prometheus API on the same +# listening address (and therefore network interface) used to listen for +# internal communication. If the monitoring node is not in this internal +# network, you can override prometheus_address explicitly - e.g., setting +# it to 0.0.0.0 to listen on all interfaces. +# prometheus_address: 1.2.3.4 + +# Distribution of data among cores (shards) within a node +# +# Scylla distributes data within a node among shards, using a round-robin +# strategy: +# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ... +# +# Scylla versions 1.6 and below used just one repetition of the pattern; +# this interfered with data placement among nodes (vnodes). +# +# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this +# provides for better data distribution. +# +# the value below is log (base 2) of the number of repetitions. +# +# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and +# below. +# +# Keep at 12 for new clusters. +murmur3_partitioner_ignore_msb_bits: 12 + +# Bypass in-memory data cache (the row cache) when performing reversed queries. +# reversed_reads_auto_bypass_cache: false + +# Use a new optimized algorithm for performing reversed reads. +# Set to `false` to fall-back to the old algorithm. +# enable_optimized_reversed_reads: true + +# Use on a new, parallel algorithm for performing aggregate queries. +# Set to `false` to fall-back to the old algorithm. +# enable_parallelized_aggregation: true + +# Time for which task manager task is kept in memory after it completes. +# task_ttl_in_seconds: 0 + +# In materialized views, restrictions are allowed only on the view's primary key columns. +# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part +# of the view's primary key. These invalid restrictions were ignored. +# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions. +# +# Can be true, false, or warn. +# * `true`: IS NOT NULL is allowed only on the view's primary key columns, +# trying to use it on other columns will cause an error, as it should. +# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored. +# It's useful for backwards compatibility. +# * `warn`: The same as false, but there's a warning about invalid view restrictions. +# +# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`. +# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`) +# to make sure that trying to create an invalid view causes an error. +strict_is_not_null_in_views: true + +# The Unix Domain Socket the node uses for maintenance socket. +# The possible options are: +# * ignore: the node will not open the maintenance socket, +# * workdir: the node will open the maintenance socket on the path /cql.m, +# where is a path defined by the workdir configuration option, +# * : the node will open the maintenance socket on the path . +maintenance_socket: ignore + +# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL +# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime +# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating +# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false +# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers. +# live_updatable_config_params_changeable_via_cql: true + +# **************** +# * GUARDRAILS * +# **************** + +# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold. +# Please note that the value of 0 is always allowed, +# which means that having no replication at all, i.e. RF = 0, is always valid. +# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled. +# Commenting out a guardrail also means it is disabled. +# minimum_replication_factor_fail_threshold: -1 +# minimum_replication_factor_warn_threshold: 3 +# maximum_replication_factor_warn_threshold: -1 +# maximum_replication_factor_fail_threshold: -1 + +# Guardrails to warn about or disallow creating a keyspace with specific replication strategy. +# Each of these 2 settings is a list storing replication strategies considered harmful. +# The replication strategies to choose from are: +# 1) SimpleStrategy, +# 2) NetworkTopologyStrategy, +# 3) LocalStrategy, +# 4) EverywhereStrategy +# +# replication_strategy_warn_list: +# - SimpleStrategy +# replication_strategy_fail_list: + +# This enables tablets on newly created keyspaces +enable_tablets: true +api_ui_dir: /opt/scylladb/swagger-ui/dist/ +api_doc_dir: /opt/scylladb/api/api-doc/ From 2f7f893ade07b23609c30f3dcedbc2c7a159397b Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Tue, 27 Aug 2024 15:15:00 +0700 Subject: [PATCH 23/69] fix(backend-lib): Add stop for cassandra db cluster --- utilities/local-scylla/justfile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utilities/local-scylla/justfile b/utilities/local-scylla/justfile index ace8b8758c4..3abf7ccc131 100644 --- a/utilities/local-scylla/justfile +++ b/utilities/local-scylla/justfile @@ -11,6 +11,11 @@ scylla-dev-db: HOST_IP="{{host_ip}}" \ docker compose up +# Stop the scylla development DB +scylla-dev-db-stop: + HOST_IP="{{host_ip}}" \ + docker compose down + # Reset the cluster storage and start a new dev scylla cluster scylla-dev-db-reset: scylla-dev-db-purge scylla-dev-db From def54cac38a930323bc7197b8d1be3fff368354c Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 5 Sep 2024 21:56:54 +0700 Subject: [PATCH 24/69] refactor(backend-lib): Remove c509-certificate because its moved to catalyst-libs --- .../c509-certificate/.cargo/config.toml | 93 ---- .../c509-certificate/.config/nextest.toml | 49 -- .../c509-certificate/.gitignore | 11 - .../c509-certificate/Cargo.toml | 80 --- .../c509-certificate/Earthfile | 31 -- .../c509-certificate/clippy.toml | 1 - .../c509-certificate/deny.toml | 117 ---- .../examples/cli/data/cert_sample_1.json | 29 - .../c509-certificate/examples/cli/main.rs | 338 ------------ .../c509-certificate/examples/web/index.html | 14 - .../c509-certificate/examples/web/index.js | 69 --- .../c509-certificate/rust-toolchain.toml | 3 - .../c509-certificate/rustfmt.toml | 68 --- .../c509-certificate/src/c509.rs | 62 --- .../src/c509_algo_identifier.rs | 92 ---- .../src/c509_attributes/attribute.rs | 257 --------- .../src/c509_attributes/data.rs | 88 --- .../src/c509_attributes/mod.rs | 128 ----- .../c509-certificate/src/c509_big_uint.rs | 95 ---- .../src/c509_extensions/alt_name.rs | 161 ------ .../src/c509_extensions/extension/data.rs | 115 ---- .../src/c509_extensions/extension/mod.rs | 344 ------------ .../src/c509_extensions/mod.rs | 225 -------- .../src/c509_general_names/data.rs | 132 ----- .../src/c509_general_names/general_name.rs | 330 ----------- .../src/c509_general_names/mod.rs | 168 ------ .../other_name_hw_module.rs | 55 -- .../src/c509_issuer_sig_algo/data.rs | 82 --- .../src/c509_issuer_sig_algo/mod.rs | 178 ------ .../c509-certificate/src/c509_name/mod.rs | 521 ------------------ .../c509-certificate/src/c509_name/rdn.rs | 171 ------ .../c509-certificate/src/c509_oid.rs | 243 -------- .../src/c509_subject_pub_key_algo/data.rs | 75 --- .../src/c509_subject_pub_key_algo/mod.rs | 178 ------ .../c509-certificate/src/c509_time.rs | 101 ---- .../c509-certificate/src/lib.rs | 134 ----- .../c509-certificate/src/signing.rs | 227 -------- .../c509-certificate/src/tables.rs | 78 --- .../c509-certificate/src/tbs_cert.rs | 477 ---------------- .../c509-certificate/src/wasm_binding.rs | 76 --- 40 files changed, 5696 deletions(-) delete mode 100644 catalyst-gateway-crates/c509-certificate/.cargo/config.toml delete mode 100644 catalyst-gateway-crates/c509-certificate/.config/nextest.toml delete mode 100644 catalyst-gateway-crates/c509-certificate/.gitignore delete mode 100644 catalyst-gateway-crates/c509-certificate/Cargo.toml delete mode 100644 catalyst-gateway-crates/c509-certificate/Earthfile delete mode 100644 catalyst-gateway-crates/c509-certificate/clippy.toml delete mode 100644 catalyst-gateway-crates/c509-certificate/deny.toml delete mode 100644 catalyst-gateway-crates/c509-certificate/examples/cli/data/cert_sample_1.json delete mode 100644 catalyst-gateway-crates/c509-certificate/examples/cli/main.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/examples/web/index.html delete mode 100644 catalyst-gateway-crates/c509-certificate/examples/web/index.js delete mode 100644 catalyst-gateway-crates/c509-certificate/rust-toolchain.toml delete mode 100644 catalyst-gateway-crates/c509-certificate/rustfmt.toml delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_algo_identifier.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_attributes/attribute.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_attributes/data.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_attributes/mod.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_big_uint.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_extensions/alt_name.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_extensions/extension/data.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_extensions/extension/mod.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_extensions/mod.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_general_names/data.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_general_names/general_name.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_general_names/mod.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_general_names/other_name_hw_module.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_issuer_sig_algo/data.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_issuer_sig_algo/mod.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_name/mod.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_name/rdn.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_oid.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_subject_pub_key_algo/data.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_subject_pub_key_algo/mod.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/c509_time.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/lib.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/signing.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/tables.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/tbs_cert.rs delete mode 100644 catalyst-gateway-crates/c509-certificate/src/wasm_binding.rs diff --git a/catalyst-gateway-crates/c509-certificate/.cargo/config.toml b/catalyst-gateway-crates/c509-certificate/.cargo/config.toml deleted file mode 100644 index 2764f1df4e2..00000000000 --- a/catalyst-gateway-crates/c509-certificate/.cargo/config.toml +++ /dev/null @@ -1,93 +0,0 @@ -# Use MOLD linker where possible, but ONLY in CI applicable targets. - -# Configure how Docker container targets build. - -# If you want to customize these targets for a local build, then customize them in your: -# $CARGO_HOME/config.toml -# NOT in the project itself. -# These targets are ONLY the targets used by CI and inside docker builds. - -# DO NOT remove `"-C", "target-feature=+crt-static"` from the rustflags for these targets. - -# Should be the default to have fully static rust programs in CI -[target.x86_64-unknown-linux-musl] -linker = "clang" -rustflags = [ - "-C", "link-arg=-fuse-ld=/usr/bin/mold", - "-C", "target-feature=-crt-static" -] - -# Should be the default to have fully static rust programs in CI -[target.aarch64-unknown-linux-musl] -linker = "clang" -rustflags = [ - "-C", "link-arg=-fuse-ld=/usr/bin/mold", - "-C", "target-feature=-crt-static" -] - -[build] -rustflags = [] -rustdocflags = [ - "--enable-index-page", - "-Z", - "unstable-options", -] - -[profile.dev] -opt-level = 1 -debug = true -debug-assertions = true -overflow-checks = true -lto = false -panic = "unwind" -incremental = true -codegen-units = 256 - -[profile.release] -opt-level = 3 -debug = false -debug-assertions = false -overflow-checks = false -lto = "thin" -panic = "unwind" -incremental = false -codegen-units = 16 - -[profile.test] -opt-level = 3 -debug = true -lto = false -debug-assertions = true -incremental = true -codegen-units = 256 - -[profile.bench] -opt-level = 3 -debug = false -debug-assertions = false -overflow-checks = false -lto = "thin" -incremental = false -codegen-units = 16 - -[alias] -lint = "clippy --all-targets" -lintfix = "clippy --all-targets --fix --allow-dirty" -lint-vscode = "clippy --message-format=json-diagnostic-rendered-ansi --all-targets" - -docs = "doc --release --no-deps --document-private-items --bins --lib --examples" -# nightly docs build broken... when they are'nt we can enable these docs... --unit-graph --timings=html,json -Z unstable-options" -testunit = "nextest run --release --bins --lib --tests --benches --no-fail-fast -P ci" -testcov = "llvm-cov nextest --release --bins --lib --tests --benches --no-fail-fast -P ci" -testdocs = "test --doc --release" - -# Rust formatting, MUST be run with +nightly -fmtchk = "fmt -- --check -v --color=always" -fmtfix = "fmt -- -v" - -[term] -quiet = false # whether cargo output is quiet -verbose = false # whether cargo provides verbose output -color = "auto" # whether cargo colorizes output use `CARGO_TERM_COLOR="off"` to disable. -progress.when = "never" # whether cargo shows progress bar -progress.width = 80 # width of progress bar \ No newline at end of file diff --git a/catalyst-gateway-crates/c509-certificate/.config/nextest.toml b/catalyst-gateway-crates/c509-certificate/.config/nextest.toml deleted file mode 100644 index be3673830bb..00000000000 --- a/catalyst-gateway-crates/c509-certificate/.config/nextest.toml +++ /dev/null @@ -1,49 +0,0 @@ -# cspell: words scrollability testcase -[store] -# The directory under the workspace root at which nextest-related files are -# written. Profile-specific storage is currently written to dir/. -# dir = "target/nextest" - -[profile.default] -# Print out output for failing tests as soon as they fail, and also at the end -# of the run (for easy scrollability). -failure-output = "immediate-final" - -# Do not cancel the test run on the first failure. -fail-fast = true - -status-level = "all" -final-status-level = "all" - -[profile.ci] -# Print out output for failing tests as soon as they fail, and also at the end -# of the run (for easy scrollability). -failure-output = "immediate-final" -# Do not cancel the test run on the first failure. -fail-fast = false - -status-level = "all" -final-status-level = "all" - - -[profile.ci.junit] -# Output a JUnit report into the given file inside 'store.dir/'. -# If unspecified, JUnit is not written out. - -path = "junit.xml" - -# The name of the top-level "report" element in JUnit report. If aggregating -# reports across different test runs, it may be useful to provide separate names -# for each report. -report-name = "nextest" - -# Whether standard output and standard error for passing tests should be stored in the JUnit report. -# Output is stored in the and elements of the element. -store-success-output = true - -# Whether standard output and standard error for failing tests should be stored in the JUnit report. -# Output is stored in the and elements of the element. -# -# Note that if a description can be extracted from the output, it is always stored in the -# element. -store-failure-output = true diff --git a/catalyst-gateway-crates/c509-certificate/.gitignore b/catalyst-gateway-crates/c509-certificate/.gitignore deleted file mode 100644 index c1b3e696893..00000000000 --- a/catalyst-gateway-crates/c509-certificate/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -### Rust ### -# Generated by Cargo -# will have compiled files and executables -debug/ -target/ - -# These are backup files generated by rustfmt -**/*.rs.bk - -# MSVC Windows builds of rustc generate these, which store debugging information -*.pdb \ No newline at end of file diff --git a/catalyst-gateway-crates/c509-certificate/Cargo.toml b/catalyst-gateway-crates/c509-certificate/Cargo.toml deleted file mode 100644 index 935b29235d2..00000000000 --- a/catalyst-gateway-crates/c509-certificate/Cargo.toml +++ /dev/null @@ -1,80 +0,0 @@ -[package] -name = "c509-certificate" -description = "C509 certificate implementation" -keywords = ["cardano", "catalyst", "c509 certificate", "certificate", "x509"] -version = "0.0.1" -authors = [ - "Arissara Chotivichit " -] -homepage = "https://input-output-hk.github.io/catalyst-voices" -repository = "https://github.com/input-output-hk/catalyst-voices" -license = "MIT OR Apache-2.0" -edition = "2021" - -[lib] -crate-type = ["cdylib", "rlib"] - -[lints.rust] -warnings = "deny" -missing_docs = "deny" -let_underscore_drop = "deny" -non_ascii_idents = "deny" -single_use_lifetimes = "deny" -trivial_casts = "deny" -trivial_numeric_casts = "deny" - -[lints.rustdoc] -broken_intra_doc_links = "deny" -invalid_codeblock_attributes = "deny" -invalid_html_tags = "deny" -invalid_rust_codeblocks = "deny" -bare_urls = "deny" -unescaped_backticks = "deny" - -[lints.clippy] -pedantic = { level = "deny", priority = -1 } -unwrap_used = "deny" -expect_used = "deny" -exit = "deny" -get_unwrap = "deny" -index_refutable_slice = "deny" -indexing_slicing = "deny" -match_on_vec_items = "deny" -match_wild_err_arm = "deny" -missing_panics_doc = "deny" -panic = "deny" -string_slice = "deny" -unchecked_duration_subtraction = "deny" -unreachable = "deny" -missing_docs_in_private_items = "deny" - -[dependencies] -minicbor = { version = "0.24", features = ["std"] } -hex = "0.4.3" -oid = "0.2.1" -oid-registry = "0.7.0" -asn1-rs = "0.6.0" -anyhow = "1.0.86" -bimap = "0.6.3" -once_cell = "1.19.0" -strum = "0.26.3" -strum_macros = "0.26.3" -regex = "1.10.5" -ed25519-dalek = { version = "2.1.1", features = ["pem"] } -thiserror = "1.0.56" -serde = { version = "1.0.204", features = ["derive"] } -wasm-bindgen = "0.2.92" -serde-wasm-bindgen = "0.6.5" - -[package.metadata.cargo-machete] -ignored = ["strum"] - -[dev-dependencies] -clap = { version = "4.5.9", features = ["derive"] } -serde_json = "1.0.120" -rand = "0.8.5" -chrono = "0.4.38" - -[[example]] -name = "c509" -path = "examples/cli/main.rs" diff --git a/catalyst-gateway-crates/c509-certificate/Earthfile b/catalyst-gateway-crates/c509-certificate/Earthfile deleted file mode 100644 index 9509b7671b3..00000000000 --- a/catalyst-gateway-crates/c509-certificate/Earthfile +++ /dev/null @@ -1,31 +0,0 @@ -VERSION 0.8 - -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/faster-rust-tool-install AS rust-ci - -# builder : Set up our target toolchains, and copy our files. -builder: - DO rust-ci+SETUP - - COPY --dir .cargo .config Cargo.* clippy.toml deny.toml rustfmt.toml src examples . - -# check : Run basic check. -check: - FROM +builder - - DO rust-ci+EXECUTE --cmd="/scripts/std_checks.py" - -# build : Build the C509 library -build: - FROM +builder - - DO rust-ci+EXECUTE \ - --cmd="/scripts/std_build.py" \ - --args1="--libs=c509-certificate" - - RUN cargo install wasm-pack --version=0.12.1 --locked - -# js-wasm-package-locally : Generate the wasm package and save it locally -js-wasm-package-locally: - FROM +build - RUN wasm-pack build --target web - SAVE ARTIFACT ./pkg AS LOCAL ./pkg diff --git a/catalyst-gateway-crates/c509-certificate/clippy.toml b/catalyst-gateway-crates/c509-certificate/clippy.toml deleted file mode 100644 index 6933b816419..00000000000 --- a/catalyst-gateway-crates/c509-certificate/clippy.toml +++ /dev/null @@ -1 +0,0 @@ -allow-expect-in-tests = true diff --git a/catalyst-gateway-crates/c509-certificate/deny.toml b/catalyst-gateway-crates/c509-certificate/deny.toml deleted file mode 100644 index 5455931b26c..00000000000 --- a/catalyst-gateway-crates/c509-certificate/deny.toml +++ /dev/null @@ -1,117 +0,0 @@ -# cspell: words msvc, wasip, RUSTSEC, rustls, libssh, reqwest, tinyvec, Leay, webpki - -[graph] -# cargo-deny is really only ever intended to run on the "normal" tier-1 targets -targets = [ - "x86_64-unknown-linux-gnu", - "aarch64-unknown-linux-gnu", - "x86_64-unknown-linux-musl", - "aarch64-apple-darwin", - "x86_64-apple-darwin", - "x86_64-pc-windows-msvc", - "wasm32-unknown-unknown", - "wasm32-wasip1", - "wasm32-wasip2", -] - -[advisories] -version = 2 -ignore = [ - { id = "RUSTSEC-2020-0168", reason = "`mach` is used by wasmtime and we have no control over that." }, - { id = "RUSTSEC-2021-0145", reason = "we don't target windows, and don't use a custom global allocator." }, -] - -[bans] -multiple-versions = "warn" -wildcards = 'deny' -deny = [ - # { crate = "git2", use-instead = "gix" }, - { crate = "openssl", use-instead = "rustls" }, - { crate = "openssl-sys", use-instead = "rustls" }, - "libssh2-sys", - # { crate = "cmake", use-instead = "cc" }, - # { crate = "windows", reason = "bloated and unnecessary", use-instead = "ideally inline bindings, practically, windows-sys" }, -] -skip = [ - # { crate = "bitflags@1.3.2", reason = "https://github.com/seanmonstar/reqwest/pull/2130 should be in the next version" }, - # { crate = "winnow@0.5.40", reason = "gix 0.59 was yanked, see https://github.com/Byron/gitoxide/issues/1309" }, - # { crate = "heck@0.4.1", reason = "strum_macros uses this old version" }, - # { crate = "base64@0.21.7", reason = "gix-transport pulls in this old version, as well as a newer version via reqwest" }, - # { crate = "byte-array-literalsase64@0.21.7", reason = "gix-transport pulls in this old version, as well as a newer version via reqwest" }, -] -skip-tree = [ - { crate = "windows-sys@0.48.0", reason = "a foundational crate for many that bumps far too frequently to ever have a shared version" }, -] - -[sources] -unknown-registry = "deny" -unknown-git = "deny" - -# List of URLs for allowed Git repositories -allow-git = [ - "https://github.com/input-output-hk/hermes.git", - "https://github.com/input-output-hk/catalyst-pallas.git", - "https://github.com/bytecodealliance/wasmtime", - "https://github.com/aldanor/hdf5-rust", -] - -[licenses] -version = 2 -# Don't warn if a listed license isn't found -unused-allowed-license="allow" -# We want really high confidence when inferring licenses from text -confidence-threshold = 0.93 -allow = [ - "MIT", - "Apache-2.0", - "Unicode-DFS-2016", - "BSD-3-Clause", - "BSD-2-Clause", - "BlueOak-1.0.0", - "Apache-2.0 WITH LLVM-exception", - "CC0-1.0", - "ISC", - "Unicode-3.0", - "MPL-2.0", -] -exceptions = [ - #{ allow = ["Zlib"], crate = "tinyvec" }, - #{ allow = ["Unicode-DFS-2016"], crate = "unicode-ident" }, - #{ allow = ["OpenSSL"], crate = "ring" }, -] - -[[licenses.clarify]] -crate = "byte-array-literals" -expression = "Apache-2.0 WITH LLVM-exception" -license-files = [{ path = "../../../LICENSE", hash = 0x001c7e6c }] - -[[licenses.clarify]] -crate = "hdf5-src" -expression = "MIT" -license-files = [{ path = "../LICENSE-MIT", hash = 0x001c7e6c }] - -[[licenses.clarify]] -crate = "ring" -expression = "MIT" -license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] - -# SPDX considers OpenSSL to encompass both the OpenSSL and SSLeay licenses -# https://spdx.org/licenses/OpenSSL.html -# ISC - Both BoringSSL and ring use this for their new files -# MIT - "Files in third_party/ have their own licenses, as described therein. The MIT -# license, for third_party/fiat, which, unlike other third_party directories, is -# compiled into non-test libraries, is included below." -# OpenSSL - Obviously -#expression = "ISC AND MIT AND OpenSSL" -#license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] - -#[[licenses.clarify]] -#crate = "webpki" -#expression = "ISC" -#license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] - -# Actually "ISC-style" -#[[licenses.clarify]] -#crate = "rustls-webpki" -#expression = "ISC" -#license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] \ No newline at end of file diff --git a/catalyst-gateway-crates/c509-certificate/examples/cli/data/cert_sample_1.json b/catalyst-gateway-crates/c509-certificate/examples/cli/data/cert_sample_1.json deleted file mode 100644 index ca41ddabfab..00000000000 --- a/catalyst-gateway-crates/c509-certificate/examples/cli/data/cert_sample_1.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "self_signed": true, - "c509_certificate_type": 0, - "certificate_serial_number": 128269, - "issuer": [ - { - "oid": "2.5.4.3", - "value": [{ "text": "RFC test CA" }] - } - ], - "validity_not_before": null, - "validity_not_after": null, - "subject": [ - { - "oid": "2.5.4.3", - "value": [{ "text": "01-23-45-FF-FE-67-89-AB" }] - } - ], - "subject_public_key_algorithm": null, - "subject_public_key": "examples/cli/key/public_key.pem", - "extensions": [ - { - "oid": "2.5.29.15", - "value": { "int": 1 }, - "critical": false - } - ], - "issuer_signature_algorithm": null -} diff --git a/catalyst-gateway-crates/c509-certificate/examples/cli/main.rs b/catalyst-gateway-crates/c509-certificate/examples/cli/main.rs deleted file mode 100644 index f344200f000..00000000000 --- a/catalyst-gateway-crates/c509-certificate/examples/cli/main.rs +++ /dev/null @@ -1,338 +0,0 @@ -//! C509 certificate CLI - -use std::{ - fs::{self, File}, - io::Write, - path::PathBuf, -}; - -use asn1_rs::{oid, Oid}; -use c509_certificate::{ - c509_big_uint::UnwrappedBigUint, - c509_extensions::Extensions, - c509_issuer_sig_algo::IssuerSignatureAlgorithm, - c509_name::{rdn::RelativeDistinguishedName, Name, NameValue}, - c509_subject_pub_key_algo::SubjectPubKeyAlgorithm, - c509_time::Time, - signing::{PrivateKey, PublicKey}, - tbs_cert::TbsCert, -}; -use chrono::{DateTime, Utc}; -use clap::Parser; -use hex::ToHex; -use minicbor::Decode; -use rand::Rng; -use serde::{Deserialize, Serialize}; - -/// Commands for C509 certificate generation, verification and decoding -#[derive(Parser)] -#[command(version, about, long_about = None)] -enum Cli { - /// Generate C509 certificate, if private key is provided, self-signed certificate - /// will be generated. - Generate { - /// JSON file with information to create C509 certificate. - #[clap(short = 'f', long)] - json_file: PathBuf, - /// Optional output path that the generated C509 will be written to. - #[clap(short, long)] - output: Option, - /// Optional private key file, if provided, self-signed certificate will be - /// generated. Currently support only PEM format. - #[clap(long)] - private_key: Option, - #[clap(long)] - /// Optional key type. - key_type: Option, - }, - - /// C509 certificate signature verification. - Verify { - /// C509 certificate file - #[clap(short, long)] - file: PathBuf, - /// Public key file. Currently support only PEM format. - #[clap(long)] - public_key: PathBuf, - }, - - /// Decode C509 certificate back to JSON. - Decode { - /// C509 certificate file. - #[clap(short, long)] - file: PathBuf, - /// Optional output path of C509 certificate information in JSON format. - #[clap(short, long)] - output: Option, - }, -} - -impl Cli { - /// Function to execute the commands. - pub(crate) fn exec() -> anyhow::Result<()> { - let cli = Cli::parse(); - - match cli { - Cli::Generate { - json_file, - output, - private_key, - key_type, - } => { - let sk = match private_key { - Some(key) => Some(PrivateKey::from_file(key)?), - None => None, - }; - - generate(&json_file, output, sk.as_ref(), &key_type) - }, - Cli::Verify { file, public_key } => verify(&file, public_key), - Cli::Decode { file, output } => decode(&file, output), - } - } -} - -/// A struct representing the JSON format of C509 certificate. -#[derive(Deserialize, Serialize)] -struct C509Json { - /// Indicate whether the certificate is self-signed. - self_signed: bool, - /// Optional certificate type, if not provided, set to 0 as self-signed. - certificate_type: Option, - /// Optional serial number of the certificate, - /// if not provided, a random number will be generated. - serial_number: Option, - /// Optional issuer of the certificate, - /// if not provided, issuer is the same as subject. - issuer: Option, - /// Optional validity not before date, - /// if not provided, set to current time. - validity_not_before: Option, - /// Optional validity not after date, - /// if not provided, set to no expire date 9999-12-31T23:59:59+00:00. - validity_not_after: Option, - /// Relative distinguished name of the subject. - subject: RelativeDistinguishedName, - /// Optional subject public key algorithm of the certificate, - /// if not provided, set to Ed25519. - subject_public_key_algorithm: Option, - /// A path to the public key file. - /// Currently support only PEM format. - subject_public_key: String, - /// Extensions of the certificate. - extensions: Extensions, - /// Optional issuer signature algorithm of the certificate, - /// if not provided, set to Ed25519. - issuer_signature_algorithm: Option, - /// Optional issuer signature value of the certificate. - #[serde(skip_deserializing)] - issuer_signature_value: Option>, -} - -/// Ed25519 oid and parameter - default algorithm. -const ED25519: (Oid, Option) = (oid!(1.3.101 .112), None); - -/// Integer indicate that certificate is self-signed. -/// 0 for Natively Signed C509 Certificate following X.509 v3 -/// 1 for CBOR re-encoding of X.509 v3 Certificate -const SELF_SIGNED_INT: u8 = 0; - -// -------------------generate----------------------- - -/// A function to generate C509 certificate. -fn generate( - file: &PathBuf, output: Option, private_key: Option<&PrivateKey>, - key_type: &Option, -) -> anyhow::Result<()> { - let data = fs::read_to_string(file)?; - let c509_json: C509Json = serde_json::from_str(&data)?; - - validate_certificate_type(c509_json.self_signed, c509_json.certificate_type)?; - - let serial_number = parse_serial_number(c509_json.serial_number); - - let issuer = determine_issuer( - c509_json.self_signed, - c509_json.issuer, - c509_json.subject.clone(), - )?; - - // Parse validity dates or use defaults - // Now for not_before date - let not_before = parse_or_default_date(c509_json.validity_not_before, Utc::now().timestamp())?; - // Default as expire date for not_after - // Expire date = 9999-12-31T23:59:59+00:00 as mention in the C509 document - let not_after = parse_or_default_date( - c509_json.validity_not_after, - parse_or_default_date(Some("9999-12-31T23:59:59+00:00".to_string()), 0)?, - )?; - - let public_key = parse_public_key(&c509_json.subject_public_key)?; - - let key_type = get_key_type(key_type)?; - - // Create TbsCert instance - let tbs = TbsCert::new( - c509_json.certificate_type.unwrap_or(SELF_SIGNED_INT), - serial_number, - Name::new(NameValue::RelativeDistinguishedName(issuer)), - Time::new(not_before), - Time::new(not_after), - Name::new(NameValue::RelativeDistinguishedName(c509_json.subject)), - c509_json - .subject_public_key_algorithm - .unwrap_or(SubjectPubKeyAlgorithm::new(key_type.0.clone(), key_type.1)), - public_key.to_bytes(), - c509_json.extensions.clone(), - c509_json - .issuer_signature_algorithm - .unwrap_or(IssuerSignatureAlgorithm::new(key_type.0, ED25519.1)), - ); - - let cert = c509_certificate::generate(&tbs, private_key)?; - - // If the output path is provided, write to the file - if let Some(output) = output { - write_to_output_file(output, &cert)?; - }; - - println!("Hex: {:?}", hex::encode(&cert)); - println!("Bytes: {:?}", &cert); - - Ok(()) -} - -/// Write a data to a file given an output path. -fn write_to_output_file(output: PathBuf, data: &[u8]) -> anyhow::Result<()> { - let mut file = File::create(output).map_err(|e| anyhow::anyhow!(e))?; - file.write_all(data).map_err(|e| anyhow::anyhow!(e))?; - Ok(()) -} - -/// Determine issuer of the certificate. -/// If self-signed is true, issuer is the same as subject. -/// Otherwise, issuer must be present. -fn determine_issuer( - self_signed: bool, issuer: Option, - subject: RelativeDistinguishedName, -) -> anyhow::Result { - if self_signed { - Ok(subject) - } else { - issuer.ok_or_else(|| anyhow::anyhow!("Issuer must be present if self-signed is false")) - } -} - -/// Validate the certificate type. -fn validate_certificate_type( - self_signed: bool, certificate_type: Option, -) -> anyhow::Result<()> { - if self_signed && certificate_type.unwrap_or(SELF_SIGNED_INT) != SELF_SIGNED_INT { - return Err(anyhow::anyhow!( - "Certificate type must be 0 if self-signed is true" - )); - } - Ok(()) -} - -/// Parse public key from file path. -fn parse_public_key(public_key: &str) -> anyhow::Result { - let pk_path = PathBuf::from(public_key); - PublicKey::from_file(pk_path) -} - -/// Get the key type. Currently support only Ed25519. -fn get_key_type(key_type: &Option) -> anyhow::Result<(Oid<'static>, Option)> { - match key_type.as_deref() { - Some("ed25519") | None => Ok(ED25519), - Some(_) => Err(anyhow::anyhow!("Currently only support Ed25519")), - } -} - -/// Parse date string to i64. -fn parse_or_default_date(date_option: Option, default: i64) -> Result { - match date_option { - Some(date) => { - DateTime::parse_from_rfc3339(&date) - .map(|dt| dt.timestamp()) - .map_err(|e| anyhow::anyhow!(format!("Failed to parse date {date}: {e}",))) - }, - None => Ok(default), - } -} - -/// Generate random serial number if not provided -fn parse_serial_number(serial_number: Option) -> UnwrappedBigUint { - let random_number: u64 = rand::thread_rng().gen(); - serial_number.unwrap_or(UnwrappedBigUint::new(random_number)) -} - -// -------------------verify----------------------- - -/// Verify the signature of the certificate given public key file path. -fn verify(file: &PathBuf, public_key: PathBuf) -> anyhow::Result<()> { - let cert = fs::read(file)?; - let pk = PublicKey::from_file(public_key)?; - match c509_certificate::verify(&cert, &pk) { - Ok(()) => println!("Signature verified!"), - Err(e) => println!("Signature verification failed: {e}"), - }; - Ok(()) -} - -// -------------------decode----------------------- - -/// Decode the certificate to JSON. -fn decode(file: &PathBuf, output: Option) -> anyhow::Result<()> { - let cert = fs::read(file)?; - let mut d = minicbor::Decoder::new(&cert); - let c509 = c509_certificate::c509::C509::decode(&mut d, &mut ())?; - - let tbs_cert = c509.get_tbs_cert(); - let is_self_signed = tbs_cert.get_c509_certificate_type() == SELF_SIGNED_INT; - let c509_json = C509Json { - self_signed: is_self_signed, - certificate_type: Some(tbs_cert.get_c509_certificate_type()), - serial_number: Some(tbs_cert.get_certificate_serial_number().clone()), - issuer: Some(extract_relative_distinguished_name(tbs_cert.get_issuer())?), - validity_not_before: Some(time_to_string(tbs_cert.get_validity_not_before().to_i64())?), - validity_not_after: Some(time_to_string(tbs_cert.get_validity_not_after().to_i64())?), - subject: extract_relative_distinguished_name(tbs_cert.get_subject())?, - subject_public_key_algorithm: Some(tbs_cert.get_subject_public_key_algorithm().clone()), - // Return a hex formation of the public key - subject_public_key: tbs_cert.get_subject_public_key().encode_hex(), - extensions: tbs_cert.get_extensions().clone(), - issuer_signature_algorithm: Some(tbs_cert.get_issuer_signature_algorithm().clone()), - issuer_signature_value: c509.get_issuer_signature_value().clone(), - }; - - let data = serde_json::to_string(&c509_json)?; - // If the output path is provided, write to the file - if let Some(output) = output { - write_to_output_file(output, data.as_bytes())?; - }; - - println!("{data}"); - Ok(()) -} - -/// Extract a `RelativeDistinguishedName` from a `Name`. -fn extract_relative_distinguished_name(name: &Name) -> anyhow::Result { - match name.get_value() { - NameValue::RelativeDistinguishedName(rdn) => Ok(rdn.clone()), - _ => Err(anyhow::anyhow!("Expected RelativeDistinguishedName")), - } -} - -/// Convert time in i64 to string. -fn time_to_string(time: i64) -> anyhow::Result { - let datetime = - DateTime::from_timestamp(time, 0).ok_or_else(|| anyhow::anyhow!("Invalid timestamp"))?; - Ok(datetime.to_rfc3339()) -} - -// -------------------main----------------------- - -fn main() -> anyhow::Result<()> { - Cli::exec() -} diff --git a/catalyst-gateway-crates/c509-certificate/examples/web/index.html b/catalyst-gateway-crates/c509-certificate/examples/web/index.html deleted file mode 100644 index 7de6ca92294..00000000000 --- a/catalyst-gateway-crates/c509-certificate/examples/web/index.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - C509 certificate! - - - - - - - \ No newline at end of file diff --git a/catalyst-gateway-crates/c509-certificate/examples/web/index.js b/catalyst-gateway-crates/c509-certificate/examples/web/index.js deleted file mode 100644 index dd37a6f83f1..00000000000 --- a/catalyst-gateway-crates/c509-certificate/examples/web/index.js +++ /dev/null @@ -1,69 +0,0 @@ -// Testing the wasm binding JS functions. - -import init, { - generate, - verify, - decode, - PublicKey, - PrivateKey, -} from "../../pkg/c509_certificate.js"; - -const pem_sk = ` ------BEGIN PRIVATE KEY----- -MC4CAQAwBQYDK2VwBCIEIP1iI3LF7h89yY6QZmhDp4Y5FmTQ4oasbz2lEiaqqTzV ------END PRIVATE KEY----- -`; - -const pem_pk = ` ------BEGIN PUBLIC KEY----- -MCowBQYDK2VwAyEAtFuCleJwHS28jUCT+ulLl5c1+MXhehhDz2SimOhmWaI= ------END PUBLIC KEY----- -`; - -const tbs = { - c509_certificate_type: 0, - certificate_serial_number: 1000000n, - issuer: { - relative_distinguished_name: [ - { - oid: "2.5.4.3", - value: [{ text: "RFC test CA" }], - }, - ], - }, - validity_not_before: 1_672_531_200n, - validity_not_after: 1_767_225_600n, - subject: { text: "01-23-45-ff-fe-67-89-AB" }, - subject_public_key_algorithm: { - oid: "1.3.101.112", - }, - subject_public_key: [], - extensions: [ - { - oid: "2.5.29.19", - value: { int: -2n }, - critical: false, - }, - ], - issuer_signature_algorithm: { - oid: "1.3.101.112", - }, -}; - -async function run() { - await init(); - - let sk = PrivateKey.str_to_sk(pem_sk); - let pk = PublicKey.str_to_pk(pem_pk); - - // Call the generate with private key to create a signed version - let c509 = generate(tbs, sk); - console.log(c509); - // Verify the generated C509 with the public key - console.log(verify(c509, pk)); - // Decode the generated C509 back to readable format - let decoded_c509 = decode(c509); - console.log(decoded_c509.tbs_cert); -} - -run(); diff --git a/catalyst-gateway-crates/c509-certificate/rust-toolchain.toml b/catalyst-gateway-crates/c509-certificate/rust-toolchain.toml deleted file mode 100644 index 20a42f2a9f7..00000000000 --- a/catalyst-gateway-crates/c509-certificate/rust-toolchain.toml +++ /dev/null @@ -1,3 +0,0 @@ -[toolchain] -channel = "1.80" -profile = "default" \ No newline at end of file diff --git a/catalyst-gateway-crates/c509-certificate/rustfmt.toml b/catalyst-gateway-crates/c509-certificate/rustfmt.toml deleted file mode 100644 index 1a0573b222b..00000000000 --- a/catalyst-gateway-crates/c509-certificate/rustfmt.toml +++ /dev/null @@ -1,68 +0,0 @@ -# Enable unstable features: -# * imports_indent -# * imports_layout -# * imports_granularity -# * group_imports -# * reorder_impl_items -# * trailing_comma -# * where_single_line -# * wrap_comments -# * comment_width -# * blank_lines_upper_bound -# * condense_wildcard_suffixes -# * force_multiline_blocks -# * format_code_in_doc_comments -# * format_generated_files -# * hex_literal_case -# * inline_attribute_width -# * normalize_comments -# * normalize_doc_attributes -# * overflow_delimited_expr -unstable_features = true - -# Compatibility: -edition = "2021" - -# Tabs & spaces - Defaults, listed for clarity -tab_spaces = 4 -hard_tabs = false - -# Commas. -trailing_comma = "Vertical" -match_block_trailing_comma = true - -# General width constraints. -max_width = 100 - -# Comments: -normalize_comments = true -normalize_doc_attributes = true -wrap_comments = true -comment_width = 90 # small excess is okay but prefer 80 -format_code_in_doc_comments = true -format_generated_files = false - -# Imports. -imports_indent = "Block" -imports_layout = "Mixed" -group_imports = "StdExternalCrate" -reorder_imports = true -imports_granularity = "Crate" - -# Arguments: -use_small_heuristics = "Default" -fn_params_layout = "Compressed" -overflow_delimited_expr = true -where_single_line = true - -# Misc: -inline_attribute_width = 0 -blank_lines_upper_bound = 1 -reorder_impl_items = true -use_field_init_shorthand = true -force_multiline_blocks = true -condense_wildcard_suffixes = true -hex_literal_case = "Upper" - -# Ignored files: -ignore = [] \ No newline at end of file diff --git a/catalyst-gateway-crates/c509-certificate/src/c509.rs b/catalyst-gateway-crates/c509-certificate/src/c509.rs deleted file mode 100644 index cde35592bf2..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509.rs +++ /dev/null @@ -1,62 +0,0 @@ -//! C509 Certificate - -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Serialize}; - -use crate::tbs_cert::TbsCert; - -#[derive(Deserialize, Serialize)] -/// A struct represents the `C509` Certificate. -pub struct C509 { - /// A TBS Certificate. - tbs_cert: TbsCert, - /// An optional `IssuerSignatureValue` of the C509 Certificate. - issuer_signature_value: Option>, -} - -impl C509 { - /// Create a new instance of C509 Certificate . - #[must_use] - pub fn new(tbs_cert: TbsCert, issuer_signature_value: Option>) -> Self { - Self { - tbs_cert, - issuer_signature_value, - } - } - - /// Get the `TBSCertificate` of the C509 Certificate. - #[must_use] - pub fn get_tbs_cert(&self) -> &TbsCert { - &self.tbs_cert - } - - /// Get the `IssuerSignatureValue` of the C509 Certificate. - #[must_use] - pub fn get_issuer_signature_value(&self) -> &Option> { - &self.issuer_signature_value - } -} - -impl Encode<()> for C509 { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - self.tbs_cert.encode(e, ctx)?; - match self.issuer_signature_value { - Some(ref value) => e.bytes(value)?, - None => e.null()?, - }; - Ok(()) - } -} - -impl Decode<'_, ()> for C509 { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - let tbs_cert = TbsCert::decode(d, ctx)?; - let issuer_signature_value = match d.datatype()? { - minicbor::data::Type::Bytes => Some(d.bytes()?.to_vec()), - _ => None, - }; - Ok(Self::new(tbs_cert, issuer_signature_value)) - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_algo_identifier.rs b/catalyst-gateway-crates/c509-certificate/src/c509_algo_identifier.rs deleted file mode 100644 index b11153c8a9d..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_algo_identifier.rs +++ /dev/null @@ -1,92 +0,0 @@ -//! C509 Algorithm Identifier -//! -//! This module handle the `AlgorithmIdentifier` type where OID does not fall into the -//! table. -//! -//! ```cddl -//! AlgorithmIdentifier = int / ~oid / [ algorithm: ~oid, parameters: bytes ] -//! ``` -//! -//! **Note** `AlgorithmIdentifier` that have the same OID with different parameters are -//! not implemented yet. -//! -//! For more information about `AlgorithmIdentifier`, -//! visit [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) - -use asn1_rs::Oid; -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Serialize}; - -use crate::c509_oid::C509oid; - -/// A struct represents the `AlgorithmIdentifier` type. -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub struct AlgorithmIdentifier { - /// A `C509oid` - oid: C509oid, - /// An optional parameter string - param: Option, -} - -impl AlgorithmIdentifier { - /// Create new instance of `AlgorithmIdentifier`. - #[must_use] - pub fn new(oid: Oid<'static>, param: Option) -> Self { - Self { - oid: C509oid::new(oid), - param, - } - } - - /// Get the OID. - pub(crate) fn get_oid(&self) -> Oid<'static> { - self.oid.clone().get_oid() - } - - /// Get the parameter. - pub(crate) fn get_param(&self) -> &Option { - &self.param - } -} - -impl Encode<()> for AlgorithmIdentifier { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - match &self.param { - // [ algorithm: ~oid, parameters: bytes ] - Some(p) => { - e.array(2)?; - self.oid.encode(e, ctx)?; - e.bytes(p.as_bytes())?; - }, - // ~oid - None => { - self.oid.encode(e, ctx)?; - }, - } - Ok(()) - } -} - -impl Decode<'_, ()> for AlgorithmIdentifier { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - // [ algorithm: ~oid, parameters: bytes ] - if d.datatype()? == minicbor::data::Type::Array { - let len = d.array()?.ok_or(minicbor::decode::Error::message( - "Failed to get array length", - ))?; - if len != 2 { - return Err(minicbor::decode::Error::message("Array length must be 2")); - } - let c509_oid = C509oid::decode(d, ctx)?; - let param = - String::from_utf8(d.bytes()?.to_vec()).map_err(minicbor::decode::Error::message)?; - Ok(AlgorithmIdentifier::new(c509_oid.get_oid(), Some(param))) - // ~oid - } else { - let oid = C509oid::decode(d, ctx)?; - Ok(AlgorithmIdentifier::new(oid.get_oid(), None)) - } - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_attributes/attribute.rs b/catalyst-gateway-crates/c509-certificate/src/c509_attributes/attribute.rs deleted file mode 100644 index 507a5decb0a..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_attributes/attribute.rs +++ /dev/null @@ -1,257 +0,0 @@ -//! C509 Attribute -//! -//! ```cddl -//! Attribute = ( attributeType: int, attributeValue: text ) // -//! ( attributeType: ~oid, attributeValue: bytes ) // -//! ( attributeType: pen, attributeValue: bytes ) -//! ``` -//! -//! For more information about Attribute, -//! visit [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) - -use std::str::FromStr; - -use asn1_rs::Oid; -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Deserializer, Serialize}; - -use super::data::{get_oid_from_int, ATTRIBUTES_LOOKUP}; -use crate::c509_oid::{C509oid, C509oidRegistered}; - -/// A struct of C509 `Attribute` -#[derive(Debug, Clone, PartialEq)] -pub struct Attribute { - /// A registered OID of C509 `Attribute`. - registered_oid: C509oidRegistered, - /// A flag to indicate whether the value can have multiple value. - multi_value: bool, - /// A value of C509 `Attribute` can be a vector of text or bytes. - value: Vec, -} - -impl Attribute { - /// Create a new instance of `Attribute`. - #[must_use] - pub fn new(oid: Oid<'static>) -> Self { - Self { - registered_oid: C509oidRegistered::new(oid, ATTRIBUTES_LOOKUP.get_int_to_oid_table()), - multi_value: false, - value: Vec::new(), - } - } - - /// Add a value to `Attribute`. - pub fn add_value(&mut self, value: AttributeValue) { - self.value.push(value); - } - - /// Get the registered OID of `Attribute`. - pub(crate) fn get_registered_oid(&self) -> &C509oidRegistered { - &self.registered_oid - } - - /// Get the value of `Attribute`. - pub(crate) fn get_value(&self) -> &Vec { - &self.value - } - - /// Set whether `Attribute` can be PEN encoded. - pub(crate) fn set_pen_supported(self) -> Self { - Self { - registered_oid: self.registered_oid.pen_encoded(), - multi_value: self.multi_value, - value: self.value, - } - } - - /// Set whether `Attribute` can have multiple value. - pub(crate) fn set_multi_value(mut self) -> Self { - self.multi_value = true; - self - } -} - -/// A helper struct for deserialize and serialize `Attribute`. -#[derive(Debug, Deserialize, Serialize)] -struct Helper { - /// An OID value in string. - oid: String, - /// A value of C509 `Attribute` can be a vector of text or bytes. - value: Vec, -} - -impl<'de> Deserialize<'de> for Attribute { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> { - let helper = Helper::deserialize(deserializer)?; - let oid = - Oid::from_str(&helper.oid).map_err(|e| serde::de::Error::custom(format!("{e:?}")))?; - let mut attr = Attribute::new(oid); - for value in helper.value { - attr.add_value(value); - } - Ok(attr) - } -} - -impl Serialize for Attribute { - fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - let helper = Helper { - oid: self.registered_oid.get_c509_oid().get_oid().to_string(), - value: self.value.clone(), - }; - helper.serialize(serializer) - } -} - -impl Encode<()> for Attribute { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - // Encode CBOR int if available - if let Some(&oid) = self - .registered_oid - .get_table() - .get_map() - .get_by_right(&self.registered_oid.get_c509_oid().get_oid()) - { - e.i16(oid)?; - } else { - // Encode unwrapped CBOR OID or CBOR PEN - self.registered_oid.get_c509_oid().encode(e, ctx)?; - } - - // Check if the attribute value is empty - if self.value.is_empty() { - return Err(minicbor::encode::Error::message("Attribute value is empty")); - } - - // If multi-value attributes, encode it as array - if self.multi_value { - e.array(self.value.len() as u64)?; - } - - // Encode each value in the attribute - for value in &self.value { - value.encode(e, ctx)?; - } - - Ok(()) - } -} - -impl Decode<'_, ()> for Attribute { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - // Handle CBOR int - let mut attr = if d.datatype()? == minicbor::data::Type::U8 { - let i = d.i16()?; - let oid = get_oid_from_int(i).map_err(minicbor::decode::Error::message)?; - Attribute::new(oid.clone()) - } else { - // Handle unwrapped CBOR OID or CBOR PEN - let c509_oid: C509oid = d.decode()?; - Attribute::new(c509_oid.get_oid()) - }; - - // Handle attribute value - if d.datatype()? == minicbor::data::Type::Array { - // When multi-value attribute - let len = d.array()?.ok_or_else(|| { - minicbor::decode::Error::message("Failed to get array length for attribute value") - })?; - - if len == 0 { - return Err(minicbor::decode::Error::message("Attribute value is empty")); - } - - for _ in 0..len { - attr.add_value(AttributeValue::decode(d, ctx)?); - } - attr = attr.set_multi_value(); - } else { - let value = AttributeValue::decode(d, ctx)?; - attr.add_value(value); - } - Ok(attr) - } -} - -// ------------------AttributeValue---------------------- - -/// An enum of possible value types for `Attribute`. -#[allow(clippy::module_name_repetitions)] -#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Deserialize, Serialize)] -#[serde(rename_all = "snake_case")] -pub enum AttributeValue { - /// A text string. - Text(String), - /// A byte vector. - Bytes(Vec), -} - -impl Encode<()> for AttributeValue { - fn encode( - &self, e: &mut Encoder, _ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - match self { - AttributeValue::Text(text) => e.str(text)?, - AttributeValue::Bytes(bytes) => e.bytes(bytes)?, - }; - Ok(()) - } -} - -impl Decode<'_, ()> for AttributeValue { - fn decode(d: &mut Decoder<'_>, _ctx: &mut ()) -> Result { - match d.datatype()? { - minicbor::data::Type::String => Ok(AttributeValue::Text(d.str()?.to_string())), - minicbor::data::Type::Bytes => Ok(AttributeValue::Bytes(d.bytes()?.to_vec())), - _ => { - Err(minicbor::decode::Error::message( - "Invalid AttributeValue, value should be either String or Bytes", - )) - }, - } - } -} - -// ------------------Test---------------------- - -#[cfg(test)] -mod test_attribute { - use asn1_rs::oid; - - use super::*; - - #[test] - fn encode_decode_attribute_int() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - let mut attribute = Attribute::new(oid!(1.2.840 .113549 .1 .9 .1)); - attribute.add_value(AttributeValue::Text("example@example.com".to_string())); - attribute - .encode(&mut encoder, &mut ()) - .expect("Failed to encode Attribute"); - // Email Address example@example.com: 0x00736578616d706c65406578616d706c652e636f6d - assert_eq!( - hex::encode(buffer.clone()), - "00736578616d706c65406578616d706c652e636f6d" - ); - - let mut decoder = Decoder::new(&buffer); - let attribute_decoded = - Attribute::decode(&mut decoder, &mut ()).expect("Failed to decode Attribute"); - assert_eq!(attribute_decoded, attribute); - } - - #[test] - fn empty_attribute_value() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - let attribute = Attribute::new(oid!(1.2.840 .113549 .1 .9 .1)); - attribute - .encode(&mut encoder, &mut ()) - .expect_err("Failed to encode Attribute"); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_attributes/data.rs b/catalyst-gateway-crates/c509-certificate/src/c509_attributes/data.rs deleted file mode 100644 index b89fb429cb4..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_attributes/data.rs +++ /dev/null @@ -1,88 +0,0 @@ -//! Attribute data provides a necessary information for encoding and decoding of C509 -//! Attribute. See [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) -//! Section 9.3 C509 Attributes Registry for more information. - -use anyhow::Error; -use asn1_rs::{oid, Oid}; -use once_cell::sync::Lazy; - -use crate::tables::IntegerToOidTable; - -/// Type of `Attribute` data -/// Int | OID | Name -type AttributeDataTuple = (i16, Oid<'static>, &'static str); - -/// `Attribute` data table -#[rustfmt::skip] -const ATTRIBUTE_DATA: [AttributeDataTuple; 30] = [ - // Int | OID | Name - (0, oid!(1.2.840.113549.1.9.1), "Email Address"), - (1, oid!(2.5.4.3), "Common Name"), - (2, oid!(2.5.4.4), "Surname"), - (3, oid!(2.5.4.5), "Serial Number"), - (4, oid!(2.5.4.6), "Country"), - (5, oid!(2.5.4.7), "Locality"), - (6, oid!(2.5.4.8), "State or Province"), - (7, oid!(2.5.4.9), "Street Address"), - (8, oid!(2.5.4.10), "Organization"), - (9, oid!(2.5.4.11), "Organizational Unit"), - (10, oid!(2.5.4.12), "Title"), - (11, oid!(2.5.4.15), "Business Category"), - (12, oid!(2.5.4.17), "Postal Code"), - (13, oid!(2.5.4.42), "Given Name"), - (14, oid!(2.5.4.43), "Initials"), - (15, oid!(2.5.4.44), "Generation Qualifier"), - (16, oid!(2.5.4.46), "DN Qualifier"), - (17, oid!(2.5.4.65), "Pseudonym"), - (18, oid!(2.5.4.97), "Organization Identifier"), - (19, oid!(1.3.6.1.4.1.311.60.2.1.1), "Inc. Locality"), - (20, oid!(1.3.6.1.4.1.311.60.2.1.2), "Inc. State or Province"), - (21, oid!(1.3.6.1.4.1.311.60.2.1.3), "Inc. Country"), - (22, oid!(0.9.2342.19200300.100.1.25), "Domain Component"), - (23, oid!(2.5.4.16), "Postal Address"), - (24, oid!(2.5.4.41), "Name"), - (25, oid!(2.5.4.20), "Telephone Number"), - (26, oid!(2.5.4.54), "Directory Management Domain Name"), - (27, oid!(0.9.2342.19200300.100.1.1), "userid"), - (28, oid!(1.2.840.113549.1.9.2), "Unstructured Name"), - (29, oid!(1.2.840.113549.1.9.8), "Unstructured Address"), -]; - -/// A struct of data that contains lookup tables for `Attribute`. -pub(crate) struct AttributeData { - /// A table of integer to OID, provide a bidirectional lookup. - int_to_oid_table: IntegerToOidTable, -} - -impl AttributeData { - /// Get the `IntegerToOidTable`. - pub(crate) fn get_int_to_oid_table(&self) -> &IntegerToOidTable { - &self.int_to_oid_table - } -} - -/// Define static lookup for attributes table -static ATTRIBUTES_TABLES: Lazy = Lazy::new(|| { - let mut int_to_oid_table = IntegerToOidTable::new(); - - for data in ATTRIBUTE_DATA { - int_to_oid_table.add(data.0, data.1); - } - - AttributeData { int_to_oid_table } -}); - -/// Static reference to the `AttributeData` lookup table. -pub(crate) static ATTRIBUTES_LOOKUP: &Lazy = &ATTRIBUTES_TABLES; - -/// Get the OID from the int value. -pub(crate) fn get_oid_from_int(i: i16) -> Result, Error> { - ATTRIBUTES_TABLES - .get_int_to_oid_table() - .get_map() - .get_by_left(&i) - .ok_or(Error::msg(format!( - "OID int not found in the attribute registry table given {i}" - ))) - .cloned() -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_attributes/mod.rs b/catalyst-gateway-crates/c509-certificate/src/c509_attributes/mod.rs deleted file mode 100644 index 30fa1684ae4..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_attributes/mod.rs +++ /dev/null @@ -1,128 +0,0 @@ -//! C509 `Attributes` containing `Attribute` -//! -//! ```cddl -//! Attributes = ( attributeType: int, attributeValue: [+text] ) // -//! ( attributeType: ~oid, attributeValue: [+bytes] ) -//! ``` -//! -//! Use case: -//! ```cddl -//! SubjectDirectoryAttributes = [+Attributes] -//! ``` -//! -//! For more information about `Atributes`, -//! visit [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) - -use attribute::Attribute; -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; - -pub mod attribute; -mod data; - -/// A struct of C509 `Attributes` containing a vector of `Attribute`. -#[derive(Debug, Clone, PartialEq)] -pub struct Attributes(Vec); - -impl Default for Attributes { - fn default() -> Self { - Self::new() - } -} - -impl Attributes { - /// Create a new instance of `Attributes` as empty vector. - #[must_use] - pub fn new() -> Self { - Self(Vec::new()) - } - - /// Add an `Attribute` to the `Attributes`. - /// and set `Attribute` value to support multiple value. - pub fn add_attr(&mut self, attribute: Attribute) { - self.0.push(attribute.set_multi_value()); - } -} - -impl Encode<()> for Attributes { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - if self.0.is_empty() { - return Err(minicbor::encode::Error::message( - "Attributes should not be empty", - )); - } - e.array(self.0.len() as u64)?; - for attribute in &self.0 { - attribute.encode(e, ctx)?; - } - Ok(()) - } -} - -impl Decode<'_, ()> for Attributes { - fn decode(d: &mut Decoder<'_>, _ctx: &mut ()) -> Result { - let len = d - .array()? - .ok_or_else(|| minicbor::decode::Error::message("Failed to get array length"))?; - if len == 0 { - return Err(minicbor::decode::Error::message("Attributes is empty")); - } - - let mut attributes = Attributes::new(); - - for _ in 0..len { - let attribute = Attribute::decode(d, &mut ())?; - attributes.add_attr(attribute); - } - - Ok(attributes) - } -} - -// ------------------Test---------------------- - -#[cfg(test)] -mod test_attributes { - use asn1_rs::oid; - use attribute::AttributeValue; - - use super::*; - - #[test] - fn encode_decode_attributes_int() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - let mut attr = Attribute::new(oid!(1.2.840 .113549 .1 .9 .1)); - attr.add_value(AttributeValue::Text("example@example.com".to_string())); - attr.add_value(AttributeValue::Text("example@example.com".to_string())); - let mut attributes = Attributes::new(); - attributes.add_attr(attr); - attributes - .encode(&mut encoder, &mut ()) - .expect("Failed to encode Attributes"); - // 1 Attribute value (array len 1): 0x81 - // Email Address: 0x00 - // Attribute value (array len 2): 0x82 - // example@example.com: 0x736578616d706c65406578616d706c652e636f6d - assert_eq!( - hex::encode(buffer.clone()), - "810082736578616d706c65406578616d706c652e636f6d736578616d706c65406578616d706c652e636f6d" - ); - - let mut decoder = Decoder::new(&buffer); - let attribute_decoded = - Attributes::decode(&mut decoder, &mut ()).expect("Failed to decode Attributes"); - assert_eq!(attribute_decoded, attributes); - } - - #[test] - fn empty_attributes() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - let attributes = Attributes::new(); - attributes - .encode(&mut encoder, &mut ()) - .expect_err("Failed to encode Attributes"); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_big_uint.rs b/catalyst-gateway-crates/c509-certificate/src/c509_big_uint.rs deleted file mode 100644 index 8d844e2e8ad..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_big_uint.rs +++ /dev/null @@ -1,95 +0,0 @@ -//! C509 Unwrapped CBOR Unsigned Bignum (~biguint) -//! -//! Please refer to [CDDL Wrapping](https://datatracker.ietf.org/doc/html/rfc8610#section-3.7) -//! for unwrapped types. - -// cspell: words Bignum bignum biguint - -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Serialize}; - -/// A struct representing an unwrapped CBOR unsigned bignum. -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub struct UnwrappedBigUint(u64); - -impl UnwrappedBigUint { - /// Create a new instance of `UnwrappedBigUint`. - #[must_use] - pub fn new(uint: u64) -> Self { - Self(uint) - } -} - -impl Encode<()> for UnwrappedBigUint { - fn encode( - &self, e: &mut Encoder, _ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - let bytes = self.0.to_be_bytes(); - // Trim leading zeros - let significant_bytes = bytes - .iter() - .skip_while(|&&b| b == 0) - .copied() - .collect::>(); - - e.bytes(&significant_bytes)?; - Ok(()) - } -} - -impl Decode<'_, ()> for UnwrappedBigUint { - fn decode(d: &mut Decoder<'_>, _ctx: &mut ()) -> Result { - // Turn bytes into u64 - let b = d - .bytes()? - .iter() - .fold(0, |acc, &b| (acc << 8) | u64::from(b)); - Ok(UnwrappedBigUint::new(b)) - } -} - -#[cfg(test)] -mod test_big_uint { - - use super::*; - - // Test reference https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/ - // A.1. Example RFC 7925 profiled X.509 Certificate - #[test] - fn test_encode_decode() { - let mut buffer = Vec::new(); - let mut encoder = minicbor::Encoder::new(&mut buffer); - // Serial Number: 128269 (0x1f50d) - let b_uint = UnwrappedBigUint::new(128_269); - b_uint - .encode(&mut encoder, &mut ()) - .expect("Failed to encode UnwrappedBigUint"); - assert_eq!(hex::encode(buffer.clone()), "4301f50d"); - - let mut decoder = minicbor::Decoder::new(&buffer); - let decoded_b_uint = UnwrappedBigUint::decode(&mut decoder, &mut ()) - .expect("Failed to decode UnwrappedBigUint"); - - assert_eq!(decoded_b_uint, b_uint); - } - - // Test reference https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/ - // A.2. Example IEEE 802.1AR profiled X.509 Certificate - #[test] - fn test_encode_decode_2() { - let mut buffer = Vec::new(); - let mut encoder = minicbor::Encoder::new(&mut buffer); - // Serial Number: 9112578475118446130 (0x7e7661d7b54e4632) - let b_uint = UnwrappedBigUint::new(9_112_578_475_118_446_130); - b_uint - .encode(&mut encoder, &mut ()) - .expect("Failed to encode UnwrappedBigUint"); - assert_eq!(hex::encode(buffer.clone()), "487e7661d7b54e4632"); - - let mut decoder = minicbor::Decoder::new(&buffer); - let decoded_b_uint = UnwrappedBigUint::decode(&mut decoder, &mut ()) - .expect("Failed to decode UnwrappedBigUint"); - - assert_eq!(decoded_b_uint, b_uint); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_extensions/alt_name.rs b/catalyst-gateway-crates/c509-certificate/src/c509_extensions/alt_name.rs deleted file mode 100644 index 7ab83513bca..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_extensions/alt_name.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! C509 Alternative Name uses for Subject Alternative Name extension and -//! Issuer Alternative Name extension. - -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Serialize}; - -use crate::c509_general_names::{ - general_name::{GeneralName, GeneralNameTypeRegistry, GeneralNameValue}, - GeneralNames, -}; - -/// Alternative Name extension. -/// Can be interpreted as a `GeneralNames / text` -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub struct AlternativeName(GeneralNamesOrText); - -impl AlternativeName { - /// Create a new instance of `AlternativeName` given value. - #[must_use] - pub fn new(value: GeneralNamesOrText) -> Self { - Self(value) - } -} - -impl Encode<()> for AlternativeName { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - self.0.encode(e, ctx) - } -} - -impl Decode<'_, ()> for AlternativeName { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - GeneralNamesOrText::decode(d, ctx).map(AlternativeName::new) - } -} - -// ------------------GeneralNamesOrText-------------------- - -/// Enum for type that can be a `GeneralNames` or a text use in `AlternativeName`. -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "snake_case")] -pub enum GeneralNamesOrText { - /// A value of `GeneralNames`. - GeneralNames(GeneralNames), - /// A text string. - Text(String), -} - -impl Encode<()> for GeneralNamesOrText { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - match self { - GeneralNamesOrText::GeneralNames(gns) => { - let gn = gns - .get_gns() - .first() - .ok_or(minicbor::encode::Error::message("GeneralNames is empty"))?; - // Check whether there is only 1 item in the array which is a DNSName - if gns.get_gns().len() == 1 && gn.get_gn_type().is_dns_name() { - gn.get_gn_value().encode(e, ctx)?; - } else { - gns.encode(e, ctx)?; - } - }, - GeneralNamesOrText::Text(text) => { - e.str(text)?; - }, - } - Ok(()) - } -} - -impl Decode<'_, ()> for GeneralNamesOrText { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - match d.datatype()? { - // If it is a string it is a GeneralNames with only 1 DNSName - minicbor::data::Type::String => { - let gn_dns = GeneralName::new( - GeneralNameTypeRegistry::DNSName, - GeneralNameValue::Text(d.str()?.to_string()), - ); - let mut gns = GeneralNames::new(); - gns.add_gn(gn_dns); - Ok(GeneralNamesOrText::GeneralNames(gns)) - }, - minicbor::data::Type::Array => { - Ok(GeneralNamesOrText::GeneralNames(GeneralNames::decode( - d, ctx, - )?)) - }, - _ => { - Err(minicbor::decode::Error::message( - "Invalid type for AlternativeName", - )) - }, - } - } -} - -// ------------------Test---------------------- - -#[cfg(test)] -mod test_alt_name { - use super::*; - use crate::c509_general_names::general_name::{ - GeneralName, GeneralNameTypeRegistry, GeneralNameValue, - }; - - #[test] - fn encode_only_dns() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - let mut gns = GeneralNames::new(); - gns.add_gn(GeneralName::new( - GeneralNameTypeRegistry::DNSName, - GeneralNameValue::Text("example.com".to_string()), - )); - let alt_name = AlternativeName::new(GeneralNamesOrText::GeneralNames(gns)); - alt_name - .encode(&mut encoder, &mut ()) - .expect("Failed to encode AlternativeName"); - // "example.com": 0x6b6578616d706c652e636f6d - assert_eq!(hex::encode(buffer.clone()), "6b6578616d706c652e636f6d"); - - let mut decoder = Decoder::new(&buffer); - let decoded_alt_name = AlternativeName::decode(&mut decoder, &mut ()) - .expect("Failed to decode Alternative Name"); - assert_eq!(decoded_alt_name, alt_name); - } - - #[test] - fn encode_decode_text() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let alt_name = AlternativeName::new(GeneralNamesOrText::Text("example.com".to_string())); - alt_name - .encode(&mut encoder, &mut ()) - .expect("Failed to encode AlternativeName"); - // "example.com": 0x6b6578616d706c652e636f6d - assert_eq!(hex::encode(buffer.clone()), "6b6578616d706c652e636f6d"); - - // If only text, it should be GeneralNames with only 1 DNSName - let mut gns = GeneralNames::new(); - gns.add_gn(GeneralName::new( - GeneralNameTypeRegistry::DNSName, - GeneralNameValue::Text("example.com".to_string()), - )); - - let mut decoder = Decoder::new(&buffer); - let decoded_alt_name = AlternativeName::decode(&mut decoder, &mut ()) - .expect("Failed to decode Alternative Name"); - assert_eq!( - decoded_alt_name, - AlternativeName::new(GeneralNamesOrText::GeneralNames(gns)) - ); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_extensions/extension/data.rs b/catalyst-gateway-crates/c509-certificate/src/c509_extensions/extension/data.rs deleted file mode 100644 index 9bf2b5a8fc8..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_extensions/extension/data.rs +++ /dev/null @@ -1,115 +0,0 @@ -//! Extension data provides a necessary information for encoding and decoding of C509 -//! Extension. See [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) -//! Section 9.4 C509 Extensions Registry for more information. - -// cspell: words Evt - -use std::collections::HashMap; - -use anyhow::Error; -use asn1_rs::{oid, Oid}; -use once_cell::sync::Lazy; - -use super::ExtensionValueType; -use crate::tables::IntegerToOidTable; - -/// Type of `Extension` data -/// Int | OID | Type | Name -type ExtensionDataTuple = (i16, Oid<'static>, ExtensionValueType, &'static str); - -/// Create a type alias for `ExtensionValueType` -type Evt = ExtensionValueType; - -/// `Extension` data table -#[rustfmt::skip] -const EXTENSION_DATA: [ExtensionDataTuple; 25] = [ - // Int | OID | Type | Name - ( 1, oid!(2.5.29 .14), Evt::Bytes, "Subject Key Identifier"), - ( 2, oid!(2.5.29 .15), Evt::Int, "Key Usage"), - ( 3, oid!(2.5.29 .17), Evt::AlternativeName, "Subject Alternative Name"), - ( 4, oid!(2.5.29 .19), Evt::Int, "Basic Constraints"), - ( 5, oid!(2.5.29 .31), Evt::Unsupported, "CRL Distribution Points"), - ( 6, oid!(2.5.29 .32), Evt::Unsupported, "Certificate Policies"), - ( 7, oid!(2.5.29 .35), Evt::Unsupported, "Authority Key Identifier"), - ( 8, oid!(2.5.29 .37), Evt::Unsupported, "Extended Key Usage"), - ( 9, oid!(1.3.6 .1 .5 .5 .7 .1 .1), Evt::Unsupported, "Authority Information Access"), - (10, oid!(1.3.6 .1 .4 .1 .11129 .2 .4 .2), Evt::Unsupported, "Signed Certificate Timestamp List"), - (24, oid!(2.5.29 .9), Evt::Unsupported, "Subject Directory Attributes"), - (25, oid!(2.5.29 .18), Evt::AlternativeName, "Issuer Alternative Name"), - (26, oid!(2.5.29 .30), Evt::Unsupported, "Name Constraints"), - (27, oid!(2.5.29 .33), Evt::Unsupported, "Policy Mappings"), - (28, oid!(2.5.29 .36), Evt::Unsupported, "Policy Constraints"), - (29, oid!(2.5.29 .46), Evt::Unsupported, "Freshest CRL"), - (30, oid!(2.5.29 .54), Evt::Int, "Inhibit anyPolicy"), - (31, oid!(1.3.6 .1 .5 .5 .7 .1 .11), Evt::Unsupported, "Subject Information Access"), - (32, oid!(1.3.6 .1 .5 .5 .7 .1 .7), Evt::Unsupported, "IP Resources"), - (33, oid!(1.3.6 .1 .5 .5 .7 .1 .7), Evt::Unsupported, "AS Resource"), - (34, oid!(1.3.6 .1 .5 .5 .7 .1 .28), Evt::Unsupported, "IP Resources v2"), - (35, oid!(1.3.6 .1 .5 .5 .7 .1 .29), Evt::Unsupported, "AS Resources v2"), - (36, oid!(1.3.6 .1 .5 .5 .7 .1 .2), Evt::Unsupported, "Biometric Information"), - (37, oid!(1.3.6 .1 .4 .1 .11129 .2 .4 .4), Evt::Unsupported, "Precertificate Signing Certificate"), - (38, oid!(1.3.6 .1 .5 .5 .7 .48 .1 .5), Evt::Unsupported, "OCSP No Check"), -]; - -/// A struct of data that contains lookup tables for `Extension`. -pub(crate) struct ExtensionData { - /// A table of integer to OID, provide a bidirectional lookup. - int_to_oid_table: IntegerToOidTable, - /// A table of integer to `ExtensionValueType`, provide a lookup for `Extension` value - /// type. - int_to_type_table: HashMap, -} - -impl ExtensionData { - /// Get the `IntegerToOidTable`. - pub(crate) fn get_int_to_oid_table(&self) -> &IntegerToOidTable { - &self.int_to_oid_table - } - - /// Get the `int_to_type_table` - pub(crate) fn get_int_to_type_table(&self) -> &HashMap { - &self.int_to_type_table - } -} - -/// Define static lookup for extensions table -static EXTENSIONS_TABLES: Lazy = Lazy::new(|| { - let mut int_to_oid_table = IntegerToOidTable::new(); - let mut int_to_type_table = HashMap::::new(); - - for data in EXTENSION_DATA { - int_to_oid_table.add(data.0, data.1); - int_to_type_table.insert(data.0, data.2); - } - - ExtensionData { - int_to_oid_table, - int_to_type_table, - } -}); - -/// Static reference to the `ExtensionData` lookup table. -pub(crate) static EXTENSIONS_LOOKUP: &Lazy = &EXTENSIONS_TABLES; - -/// Get the OID from the int value. -pub(crate) fn get_oid_from_int(i: i16) -> Result, Error> { - EXTENSIONS_TABLES - .get_int_to_oid_table() - .get_map() - .get_by_left(&i) - .ok_or(Error::msg(format!( - "OID not found in the extension registry table given int {i}" - ))) - .cloned() -} - -/// Get the extension value type from the int value. -pub(crate) fn get_extension_type_from_int(i: i16) -> Result { - EXTENSIONS_TABLES - .get_int_to_type_table() - .get(&i) - .ok_or(Error::msg(format!( - "Extension value type not found in the extension registry table given int {i}" - ))) - .cloned() -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_extensions/extension/mod.rs b/catalyst-gateway-crates/c509-certificate/src/c509_extensions/extension/mod.rs deleted file mode 100644 index 264b98d1a72..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_extensions/extension/mod.rs +++ /dev/null @@ -1,344 +0,0 @@ -//! C509 Extension use to construct an Extensions message field for C509 Certificate. - -mod data; -use std::{fmt::Debug, str::FromStr}; - -use asn1_rs::Oid; -use data::{get_extension_type_from_int, get_oid_from_int, EXTENSIONS_LOOKUP}; -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Deserializer, Serialize}; -use strum_macros::EnumDiscriminants; - -use super::alt_name::AlternativeName; -use crate::c509_oid::{C509oid, C509oidRegistered}; - -/// A struct of C509 `Extension` -#[derive(Debug, Clone, PartialEq)] -pub struct Extension { - /// The registered OID of the `Extension`. - registered_oid: C509oidRegistered, - /// The critical flag of the `Extension` negative if critical is true, otherwise - /// positive. - critical: bool, - /// The value of the `Extension` in `ExtensionValue`. - value: ExtensionValue, -} - -impl Extension { - /// Create a new instance of `Extension` using `OID` and value. - #[must_use] - pub fn new(oid: Oid<'static>, value: ExtensionValue, critical: bool) -> Self { - Self { - registered_oid: C509oidRegistered::new(oid, EXTENSIONS_LOOKUP.get_int_to_oid_table()) - .pen_encoded(), - critical, - value, - } - } - - /// Get the value of the `Extension` in `ExtensionValue`. - #[must_use] - pub fn get_value(&self) -> &ExtensionValue { - &self.value - } - - /// Get the critical flag of the `Extension`. - #[must_use] - pub fn get_critical(&self) -> bool { - self.critical - } - - /// Get the registered OID of the `Extension`. - #[must_use] - pub fn get_registered_oid(&self) -> &C509oidRegistered { - &self.registered_oid - } -} - -/// A helper struct to deserialize and serialize `Extension`. -#[derive(Debug, Deserialize, Serialize)] -struct Helper { - /// OID string value - oid: String, - /// Extension value - value: ExtensionValue, - /// Flag to indicate whether the extension is critical - critical: bool, -} - -impl<'de> Deserialize<'de> for Extension { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> { - let helper = Helper::deserialize(deserializer)?; - let oid = - Oid::from_str(&helper.oid).map_err(|e| serde::de::Error::custom(format!("{e:?}")))?; - - Ok(Extension::new(oid, helper.value, helper.critical)) - } -} - -impl Serialize for Extension { - fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - let helper = Helper { - oid: self.registered_oid.get_c509_oid().get_oid().to_string(), - value: self.value.clone(), - critical: self.critical, - }; - helper.serialize(serializer) - } -} - -impl Encode<()> for Extension { - // Extension can be encoded as: - // - (extensionID: int, extensionValue: any) - // - (extensionID: ~oid, ? critical: true, extensionValue: bytes) - // - (extensionID: pen, ? critical: true, extensionValue: bytes) - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - // Handle CBOR int based on OID mapping - if let Some(&mapped_oid) = self - .registered_oid - .get_table() - .get_map() - .get_by_right(&self.registered_oid.get_c509_oid().get_oid()) - { - // Determine encoded OID value based on critical flag - let encoded_oid = if self.critical { - -mapped_oid - } else { - mapped_oid - }; - e.i16(encoded_oid)?; - } else { - // Handle unwrapped CBOR OID or CBOR PEN - self.registered_oid.get_c509_oid().encode(e, ctx)?; - if self.critical { - e.bool(self.critical)?; - } - } - // Encode the extension value - self.value.encode(e, ctx)?; - Ok(()) - } -} - -impl Decode<'_, ()> for Extension { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - match d.datatype()? { - // Check whether OID is an int - // Even the encoding is i16, the minicbor decoder doesn't know what type we encoded, - // so need to check every possible type. - minicbor::data::Type::U8 - | minicbor::data::Type::U16 - | minicbor::data::Type::I8 - | minicbor::data::Type::I16 => { - let int_value = d.i16()?; - // OID can be negative due to critical flag, so need absolute the value - let abs_int_value = int_value.abs(); - let oid = - get_oid_from_int(abs_int_value).map_err(minicbor::decode::Error::message)?; - let value_type = get_extension_type_from_int(abs_int_value) - .map_err(minicbor::decode::Error::message)?; - - // Decode extension value - let extension_value = ExtensionValue::decode(d, &mut value_type.get_type())?; - Ok(Extension::new( - oid.to_owned(), - extension_value, - int_value.is_negative(), - )) - }, - _ => { - // Handle unwrapped CBOR OID or CBOR PEN - let c509_oid = C509oid::decode(d, ctx)?; - // Critical flag is optional, so if exist, this mean we have to decode it - let critical = if d.datatype()? == minicbor::data::Type::Bool { - d.bool()? - } else { - false - }; - - // Decode bytes for extension value - let extension_value = ExtensionValue::Bytes(d.bytes()?.to_vec()); - - Ok(Extension::new( - c509_oid.get_oid(), - extension_value, - critical, - )) - }, - } - } -} - -// -----------------ExtensionValue------------------------ - -/// Trait for `ExtensionValueType` -trait ExtensionValueTypeTrait { - /// Get the type of the `ExtensionValueType`. - fn get_type(&self) -> ExtensionValueType; -} - -/// An enum of possible value types for `Extension`. -#[allow(clippy::module_name_repetitions)] -#[derive(Debug, Clone, PartialEq, EnumDiscriminants, Deserialize, Serialize)] -#[strum_discriminants(name(ExtensionValueType))] -#[serde(rename_all = "snake_case")] -pub enum ExtensionValue { - /// An Integer in the range [-2^64, 2^64-1] - Int(i64), - /// A bytes. - Bytes(Vec), - /// An Alternative Name. - AlternativeName(AlternativeName), - /// An unsupported value. - Unsupported, -} - -impl ExtensionValueTypeTrait for ExtensionValueType { - fn get_type(&self) -> ExtensionValueType { - *self - } -} - -impl Encode<()> for ExtensionValue { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - match self { - ExtensionValue::Int(value) => { - e.i64(*value)?; - }, - ExtensionValue::Bytes(value) => { - e.bytes(value)?; - }, - ExtensionValue::AlternativeName(value) => { - value.encode(e, ctx)?; - }, - ExtensionValue::Unsupported => { - return Err(minicbor::encode::Error::message( - "Cannot encode unsupported Extension value", - )); - }, - } - Ok(()) - } -} - -impl Decode<'_, C> for ExtensionValue -where C: ExtensionValueTypeTrait + Debug -{ - fn decode(d: &mut Decoder<'_>, ctx: &mut C) -> Result { - match ctx.get_type() { - ExtensionValueType::Int => { - let value = d.i64()?; - Ok(ExtensionValue::Int(value)) - }, - ExtensionValueType::Bytes => { - let value = d.bytes()?.to_vec(); - Ok(ExtensionValue::Bytes(value)) - }, - ExtensionValueType::AlternativeName => { - let value = AlternativeName::decode(d, &mut ())?; - Ok(ExtensionValue::AlternativeName(value)) - }, - ExtensionValueType::Unsupported => { - Err(minicbor::decode::Error::message( - "Cannot decode Unsupported extension value", - )) - }, - } - } -} - -// ------------------Test---------------------- - -#[cfg(test)] -mod test_extension { - use asn1_rs::oid; - - use super::*; - - #[test] - fn int_oid_inhibit_anypolicy_value_unsigned_int() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let ext = Extension::new(oid!(2.5.29 .54), ExtensionValue::Int(2), false); - ext.encode(&mut encoder, &mut ()) - .expect("Failed to encode Extension"); - // Inhibit anyPolicy : 0x181e - // 2 : 0x02 - assert_eq!(hex::encode(buffer.clone()), "181e02"); - - let mut decoder = Decoder::new(&buffer); - let decoded_ext = - Extension::decode(&mut decoder, &mut ()).expect("Failed to decode Extension"); - assert_eq!(decoded_ext, ext); - } - - #[test] - fn unwrapped_oid_critical_key_usage_value_int() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let ext = Extension::new(oid!(2.5.29 .15), ExtensionValue::Int(-1), true); - ext.encode(&mut encoder, &mut ()) - .expect("Failed to encode Extension"); - // Key Usage with critical true: 0x21 - // -1 : 0x20 - assert_eq!(hex::encode(buffer.clone()), "2120"); - - let mut decoder = Decoder::new(&buffer); - let decoded_ext = - Extension::decode(&mut decoder, &mut ()).expect("Failed to decode Extension"); - assert_eq!(decoded_ext, ext); - } - - #[test] - fn oid_unwrapped_value_bytes_string() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - // Not PEN OID and not in the registry table - // Value should be bytes - let ext = Extension::new( - oid!(2.16.840 .1 .101 .3 .4 .2 .1), - ExtensionValue::Bytes("test".as_bytes().to_vec()), - false, - ); - ext.encode(&mut encoder, &mut ()) - .expect("Failed to encode Extension"); - // OID : 0x49608648016503040201 - // "test".as_bytes() : 0x4474657374 - assert_eq!( - hex::encode(buffer.clone()), - "496086480165030402014474657374" - ); - - let mut decoder = Decoder::new(&buffer); - let decoded_ext = - Extension::decode(&mut decoder, &mut ()).expect("Failed to decode Extension"); - assert_eq!(decoded_ext, ext); - } - - #[test] - fn encode_decode_mismatch_type() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - // Subject Key Identifier should be bytes - let ext = Extension::new(oid!(2.5.29 .14), ExtensionValue::Int(2), false); - ext.encode(&mut encoder, &mut ()) - .expect("Failed to encode Extension"); - // SubjectKeyIdentifier : 0x01 - // 2 : 0x02 - assert_eq!(hex::encode(buffer.clone()), "0102"); - - let mut decoder = Decoder::new(&buffer); - // Decode should fail, because rely on the int value - Extension::decode(&mut decoder, &mut ()).expect_err("Failed to decode Extension"); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_extensions/mod.rs b/catalyst-gateway-crates/c509-certificate/src/c509_extensions/mod.rs deleted file mode 100644 index 71d71d422ec..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_extensions/mod.rs +++ /dev/null @@ -1,225 +0,0 @@ -//! C509 Extension as a part of `TBSCertificate` used in C509 Certificate. -//! -//! Extension fallback of C509 OID extension -//! Given OID if not found in the registered OID table, it will be encoded as a PEN OID. -//! If the OID is not a PEN OID, it will be encoded as an unwrapped OID. -//! -//! ```cddl -//! Extensions and Extension can be encoded as the following: -//! Extensions = [ * Extension ] / int -//! Extension = ( extensionID: int, extensionValue: any ) // -//! ( extensionID: ~oid, ? critical: true, -//! extensionValue: bytes ) // -//! ( extensionID: pen, ? critical: true, -//! extensionValue: bytes ) -//! ``` -//! -//! For more information about Extensions, -//! visit [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) - -pub mod alt_name; -pub mod extension; - -use std::fmt::Debug; - -use asn1_rs::{oid, Oid}; -use extension::{Extension, ExtensionValue}; -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Serialize}; - -/// OID of `KeyUsage` extension -static KEY_USAGE_OID: Oid<'static> = oid!(2.5.29 .15); - -/// A struct of C509 Extensions containing a vector of `Extension`. -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub struct Extensions(Vec); - -impl Default for Extensions { - fn default() -> Self { - Self::new() - } -} - -impl Extensions { - /// Create a new instance of `Extensions` as empty vector. - #[must_use] - pub fn new() -> Self { - Self(Vec::new()) - } - - /// Add an `Extension` to the `Extensions`. - pub fn add_ext(&mut self, extension: Extension) { - self.0.push(extension); - } -} - -impl Encode<()> for Extensions { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - // If there is only one extension and it is KeyUsage, encode as int - // encoding as absolute value of the second int and the sign of the first int - if let Some(extension) = self.0.first() { - if self.0.len() == 1 - && extension.get_registered_oid().get_c509_oid().get_oid() == KEY_USAGE_OID - { - match extension.get_value() { - ExtensionValue::Int(value) => { - let ku_value = if extension.get_critical() { - -value - } else { - *value - }; - e.i64(ku_value)?; - return Ok(()); - }, - _ => { - return Err(minicbor::encode::Error::message( - "KeyUsage extension value should be an integer", - )); - }, - } - } - } - // Else handle the array of `Extension` - e.array(self.0.len() as u64)?; - for extension in &self.0 { - extension.encode(e, ctx)?; - } - Ok(()) - } -} - -impl Decode<'_, ()> for Extensions { - fn decode(d: &mut Decoder<'_>, _ctx: &mut ()) -> Result { - // If only KeyUsage is in the extension -> will only contain an int - if d.datatype()? == minicbor::data::Type::U8 || d.datatype()? == minicbor::data::Type::I8 { - // Check if it's a negative number (critical extension) - let critical = d.datatype()? == minicbor::data::Type::I8; - // Note that 'KeyUsage' BIT STRING is interpreted as an unsigned integer, - // so we can absolute the value - let value = d.i64()?.abs(); - - let extension_value = ExtensionValue::Int(value); - let mut extensions = Extensions::new(); - extensions.add_ext(Extension::new( - KEY_USAGE_OID.clone(), - extension_value, - critical, - )); - return Ok(extensions); - } - // Handle array of extensions - let len = d - .array()? - .ok_or_else(|| minicbor::decode::Error::message("Failed to get array length"))?; - let mut extensions = Extensions::new(); - for _ in 0..len { - let extension = Extension::decode(d, &mut ())?; - extensions.add_ext(extension); - } - - Ok(extensions) - } -} - -// ------------------Test---------------------- - -#[cfg(test)] -mod test_extensions { - use super::*; - - #[test] - fn one_extension_key_usage() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut exts = Extensions::new(); - exts.add_ext(Extension::new( - oid!(2.5.29 .15), - ExtensionValue::Int(2), - false, - )); - exts.encode(&mut encoder, &mut ()) - .expect("Failed to encode Extensions"); - // 1 extension - // value 2 : 0x02 - assert_eq!(hex::encode(buffer.clone()), "02"); - - let mut decoder = Decoder::new(&buffer); - let decoded_exts = - Extensions::decode(&mut decoder, &mut ()).expect("Failed to decode Extensions"); - assert_eq!(decoded_exts, exts); - } - - #[test] - fn one_extension_key_usage_set_critical() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut exts = Extensions::new(); - exts.add_ext(Extension::new( - oid!(2.5.29 .15), - ExtensionValue::Int(2), - true, - )); - exts.encode(&mut encoder, &mut ()) - .expect("Failed to encode Extensions"); - // 1 extension - // value -2 : 0x21 - assert_eq!(hex::encode(buffer.clone()), "21"); - - let mut decoder = Decoder::new(&buffer); - let decoded_exts = - Extensions::decode(&mut decoder, &mut ()).expect("Failed to decode Extensions"); - assert_eq!(decoded_exts, exts); - } - - #[test] - fn multiple_extensions() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut exts = Extensions::new(); - exts.add_ext(Extension::new( - oid!(2.5.29 .15), - ExtensionValue::Int(2), - false, - )); - - exts.add_ext(Extension::new( - oid!(2.5.29 .14), - ExtensionValue::Bytes([1, 2, 3, 4].to_vec()), - false, - )); - exts.encode(&mut encoder, &mut ()) - .expect("Failed to encode Extensions"); - - // 2 extensions (array of 2): 0x82 - // KeyUsage with value 2: 0x0202 - // SubjectKeyIdentifier with value [1,2,3,4]: 0x0401020304 - assert_eq!(hex::encode(buffer.clone()), "820202014401020304"); - - let mut decoder = Decoder::new(&buffer); - let decoded_exts = - Extensions::decode(&mut decoder, &mut ()).expect("Failed to decode Extensions"); - assert_eq!(decoded_exts, exts); - } - - #[test] - fn zero_extensions() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let exts = Extensions::new(); - exts.encode(&mut encoder, &mut ()) - .expect("Failed to encode Extensions"); - assert_eq!(hex::encode(buffer.clone()), "80"); - - let mut decoder = Decoder::new(&buffer); - // Extensions can have 0 length - let decoded_exts = - Extensions::decode(&mut decoder, &mut ()).expect("Failed to decode Extensions"); - assert_eq!(decoded_exts, exts); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_general_names/data.rs b/catalyst-gateway-crates/c509-certificate/src/c509_general_names/data.rs deleted file mode 100644 index 71ff7649492..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_general_names/data.rs +++ /dev/null @@ -1,132 +0,0 @@ -//! General Name data provides a necessary information for encoding and decoding of C509 -//! General Name. See [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) -//! Section 9.9 C509 General Names Registry for more information. - -// cspell: words Gntr Gnvt - -use std::collections::HashMap; - -use anyhow::Error; -use bimap::BiMap; -use once_cell::sync::Lazy; - -use super::general_name::{GeneralNameTypeRegistry, GeneralNameValueType}; -use crate::tables::{IntTable, TableTrait}; - -/// Type of `GeneralName` data. -/// Int | Name | Type -type GeneralNameDataTuple = (i16, GeneralNameTypeRegistry, GeneralNameValueType); - -/// Create a type alias for `GeneralNameTypeRegistry` -type Gntr = GeneralNameTypeRegistry; -/// Create a type alias for `GeneralNameValueType` -type Gnvt = GeneralNameValueType; - -/// `GeneralName` data table. -#[rustfmt::skip] -const GENERAL_NAME_DATA: [GeneralNameDataTuple; 10] = [ - // Int | Name | Type - (-3, Gntr::OtherNameBundleEID, Gnvt::Unsupported), - (-2, Gntr::OtherNameSmtpUTF8Mailbox, Gnvt::Text), - (-1, Gntr::OtherNameHardwareModuleName, Gnvt::OtherNameHWModuleName), - (0, Gntr::OtherName, Gnvt::OtherNameHWModuleName), - (1, Gntr::Rfc822Name, Gnvt::Text), - (2, Gntr::DNSName, Gnvt::Text), - (4, Gntr::DirectoryName, Gnvt::Name), - (6, Gntr::UniformResourceIdentifier, Gnvt::Text), - (7, Gntr::IPAddress, Gnvt::Bytes), - (8, Gntr::RegisteredID, Gnvt::Oid), -]; - -/// A struct of data that contains lookup table for `GeneralName`. -pub(crate) struct GeneralNameData { - /// A table of integer to `GeneralNameTypeRegistry`, provide a bidirectional lookup. - int_to_name_table: IntegerToGNTable, - /// A table of integer to `GeneralNameValueType`, provide a lookup for the type of - /// `GeneralName` value. - int_to_type_table: HashMap, -} - -impl GeneralNameData { - /// Get the `int_to_name_table`. - pub(crate) fn get_int_to_name_table(&self) -> &IntegerToGNTable { - &self.int_to_name_table - } - - /// Get the `int_to_type_table`. - pub(crate) fn get_int_to_type_table(&self) -> &HashMap { - &self.int_to_type_table - } -} - -/// A struct of integer to `GeneralNameTypeRegistry` table. -#[derive(Debug, Clone, PartialEq)] -pub(crate) struct IntegerToGNTable(IntTable); - -impl IntegerToGNTable { - /// Create a new instance of `IntegerToGNTable`. - pub(crate) fn new() -> Self { - Self(IntTable::::new()) - } - - /// Add a new integer to `GeneralNameTypeRegistry` map table. - pub(crate) fn add(&mut self, k: i16, v: GeneralNameTypeRegistry) { - self.0.add(k, v); - } - - /// Get the map table of integer to `GeneralNameTypeRegistry`. - pub(crate) fn get_map(&self) -> &BiMap { - self.0.get_map() - } -} - -/// Define static lookup for general names table -static GENERAL_NAME_TABLES: Lazy = Lazy::new(|| { - let mut int_to_name_table = IntegerToGNTable::new(); - let mut int_to_type_table = HashMap::new(); - - for data in GENERAL_NAME_DATA { - int_to_name_table.add(data.0, data.1); - int_to_type_table.insert(data.0, data.2); - } - - GeneralNameData { - int_to_name_table, - int_to_type_table, - } -}); - -/// Get the general name from the int value. -pub(crate) fn get_gn_from_int(i: i16) -> Result { - GENERAL_NAME_TABLES - .get_int_to_name_table() - .get_map() - .get_by_left(&i) - .ok_or(Error::msg(format!( - "GeneralName not found in the general name registry table given int {i}" - ))) - .cloned() -} - -/// Get the int value from the general name. -pub(crate) fn get_int_from_gn(gn: GeneralNameTypeRegistry) -> Result { - GENERAL_NAME_TABLES - .get_int_to_name_table() - .get_map() - .get_by_right(&gn) - .ok_or(Error::msg(format!( - "Int value not found in the general name registry table given GeneralName {gn:?}" - ))) - .cloned() -} - -/// Get the general name value type from the int value. -pub(crate) fn get_gn_value_type_from_int(i: i16) -> Result { - GENERAL_NAME_TABLES - .get_int_to_type_table() - .get(&i) - .ok_or(Error::msg(format!( - "General name value type not found in the general name registry table given {i}" - ))) - .cloned() -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_general_names/general_name.rs b/catalyst-gateway-crates/c509-certificate/src/c509_general_names/general_name.rs deleted file mode 100644 index 9d7b06ecb2d..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_general_names/general_name.rs +++ /dev/null @@ -1,330 +0,0 @@ -//! C509 General Name -//! -//! For more information about `GeneralName`, -//! visit [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) - -use std::fmt::Debug; - -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Serialize}; -use strum_macros::{EnumDiscriminants, EnumIs}; - -use super::{ - data::{get_gn_from_int, get_gn_value_type_from_int, get_int_from_gn}, - other_name_hw_module::OtherNameHardwareModuleName, -}; -use crate::{c509_name::Name, c509_oid::C509oid}; - -/// A struct represents a `GeneralName`. -/// ```cddl -/// GeneralName = ( GeneralNameType : int, GeneralNameValue : any ) -/// ``` -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub struct GeneralName { - /// A registered general name type. - gn_type: GeneralNameTypeRegistry, - /// A general name value. - value: GeneralNameValue, -} - -#[allow(dead_code)] -impl GeneralName { - /// Create a new instance of `GeneralName`. - #[must_use] - pub fn new(gn_type: GeneralNameTypeRegistry, value: GeneralNameValue) -> Self { - Self { gn_type, value } - } - - /// Get the `GeneralName` type. - #[must_use] - pub fn get_gn_type(&self) -> &GeneralNameTypeRegistry { - &self.gn_type - } - - /// Get the value of the `GeneralName` in `GeneralNameValue`. - #[must_use] - pub fn get_gn_value(&self) -> &GeneralNameValue { - &self.value - } -} - -impl Encode<()> for GeneralName { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - // Encode GeneralNameType as int - let i = get_int_from_gn(self.gn_type).map_err(minicbor::encode::Error::message)?; - e.i16(i)?; - // Encode GeneralNameValue as its type - self.value.encode(e, ctx)?; - Ok(()) - } -} - -impl Decode<'_, ()> for GeneralName { - fn decode(d: &mut Decoder<'_>, _ctx: &mut ()) -> Result { - if minicbor::data::Type::U8 == d.datatype()? || minicbor::data::Type::I8 == d.datatype()? { - let i = d.i16()?; - let gn = get_gn_from_int(i).map_err(minicbor::decode::Error::message)?; - let value_type = - get_gn_value_type_from_int(i).map_err(minicbor::decode::Error::message)?; - Ok(GeneralName::new( - gn, - GeneralNameValue::decode(d, &mut value_type.get_type())?, - )) - } else { - // GeneralName is not type int - Err(minicbor::decode::Error::message( - "GeneralName id type invalid, expected int", - )) - } - } -} - -// -----------------GeneralNameTypeRegistry------------------------ - -/// Enum of `GeneralName` registered in table Section 9.9 C509. -#[allow(clippy::module_name_repetitions)] -#[derive(Debug, Copy, PartialEq, Clone, Eq, Hash, EnumIs, Deserialize, Serialize)] -pub enum GeneralNameTypeRegistry { - /// An otherName with `BundleEID`. - OtherNameBundleEID, // EID - /// An otherName with `SmtpUTF8Mailbox`. - OtherNameSmtpUTF8Mailbox, - /// An otherName with `HardwareModuleName`. - OtherNameHardwareModuleName, - /// An otherName. - OtherName, - /// A rfc822Name. - Rfc822Name, - /// A dNSName. - DNSName, - /// A directoryName. - DirectoryName, - /// A uniformResourceIdentifier. - UniformResourceIdentifier, - /// An iPAddress. - IPAddress, - /// A registeredID. - RegisteredID, -} - -// -------------------GeneralNameValue---------------------- - -/// An enum of possible value types for `GeneralName`. -#[allow(clippy::module_name_repetitions)] -#[derive(Debug, PartialEq, Clone, EnumDiscriminants, Deserialize, Serialize)] -#[strum_discriminants(name(GeneralNameValueType))] -#[serde(rename_all = "snake_case")] -pub enum GeneralNameValue { - /// A text string. - Text(String), - /// A otherName + hardwareModuleName. - OtherNameHWModuleName(OtherNameHardwareModuleName), - /// A bytes. - Bytes(Vec), - /// An OID - Oid(C509oid), - /// Name - Name(Name), - /// An unsupported value. - Unsupported, -} - -/// Trait for `GeneralNameValueType` -trait GeneralNameValueTrait { - /// Get the type of the `GeneralNameValueType`. - fn get_type(&self) -> GeneralNameValueType; -} - -impl GeneralNameValueTrait for GeneralNameValueType { - fn get_type(&self) -> GeneralNameValueType { - *self - } -} - -impl Encode<()> for GeneralNameValue { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - match self { - GeneralNameValue::Text(value) => { - e.str(value)?; - }, - GeneralNameValue::Bytes(value) => { - e.bytes(value)?; - }, - GeneralNameValue::Oid(value) => { - value.encode(e, ctx)?; - }, - GeneralNameValue::OtherNameHWModuleName(value) => { - value.encode(e, ctx)?; - }, - GeneralNameValue::Name(value) => { - Name::encode(value, e, ctx)?; - }, - GeneralNameValue::Unsupported => { - return Err(minicbor::encode::Error::message( - "Cannot encode unsupported GeneralName value", - )) - }, - }; - Ok(()) - } -} -impl Decode<'_, C> for GeneralNameValue -where C: GeneralNameValueTrait + Debug -{ - fn decode(d: &mut Decoder<'_>, ctx: &mut C) -> Result { - match ctx.get_type() { - GeneralNameValueType::Text => { - let value = d.str()?.to_string(); - Ok(GeneralNameValue::Text(value)) - }, - GeneralNameValueType::Bytes => { - let value = d.bytes()?.to_vec(); - Ok(GeneralNameValue::Bytes(value)) - }, - GeneralNameValueType::Oid => { - let value = C509oid::decode(d, &mut ())?; - Ok(GeneralNameValue::Oid(value)) - }, - GeneralNameValueType::OtherNameHWModuleName => { - let value = OtherNameHardwareModuleName::decode(d, &mut ())?; - Ok(GeneralNameValue::OtherNameHWModuleName(value)) - }, - GeneralNameValueType::Name => { - let value = Name::decode(d, &mut ())?; - Ok(GeneralNameValue::Name(value)) - }, - GeneralNameValueType::Unsupported => { - Err(minicbor::decode::Error::message( - "Cannot decode Unsupported GeneralName value", - )) - }, - } - } -} - -// ------------------Test---------------------- - -#[cfg(test)] -mod test_general_name { - use std::net::Ipv4Addr; - - use asn1_rs::oid; - - use super::*; - - #[test] - fn encode_decode_text() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let gn = GeneralName::new( - GeneralNameTypeRegistry::DNSName, - GeneralNameValue::Text("example.com".to_string()), - ); - gn.encode(&mut encoder, &mut ()) - .expect("Failed to encode GeneralName"); - // DNSName: 0x02 - // "example.com": 0x6b6578616d706c652e636f6d - assert_eq!(hex::encode(buffer.clone()), "026b6578616d706c652e636f6d"); - - let mut decoder = Decoder::new(&buffer); - let gn_decoded = - GeneralName::decode(&mut decoder, &mut ()).expect("Failed to decode GeneralName"); - assert_eq!(gn_decoded, gn); - } - - #[test] - fn encode_decode_hw_module_name() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let hw = OtherNameHardwareModuleName::new(oid!(2.16.840 .1 .101 .3 .4 .2 .1), vec![ - 0x01, 0x02, 0x03, 0x04, - ]); - let gn = GeneralName::new( - GeneralNameTypeRegistry::OtherNameHardwareModuleName, - GeneralNameValue::OtherNameHWModuleName(hw), - ); - gn.encode(&mut encoder, &mut ()) - .expect("Failed to encode GeneralName"); - // OtherNameHardwareModuleName: 0x20 - // [ ~oid, bytes ] = 0x82496086480165030402014401020304 - assert_eq!( - hex::encode(buffer.clone()), - "2082496086480165030402014401020304" - ); - - let mut decoder = Decoder::new(&buffer); - let gn_decoded = - GeneralName::decode(&mut decoder, &mut ()).expect("Failed to decode GeneralName"); - assert_eq!(gn_decoded, gn); - } - - #[test] - fn encode_decode_ip() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let ipv4 = Ipv4Addr::new(192, 168, 1, 1); - let gn = GeneralName::new( - GeneralNameTypeRegistry::IPAddress, - GeneralNameValue::Bytes(ipv4.octets().to_vec()), - ); - - gn.encode(&mut encoder, &mut ()) - .expect("Failed to encode GeneralName"); - // IPAddress: 0x07 - // 192.168.1.1 bytes: 0x44c0a8010 - assert_eq!(hex::encode(buffer.clone()), "0744c0a80101"); - - let mut decoder = Decoder::new(&buffer); - let gn_decoded = - GeneralName::decode(&mut decoder, &mut ()).expect("Failed to decode GeneralName"); - assert_eq!(gn_decoded, gn); - } - - #[test] - fn encode_decode_oid() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let gn = GeneralName::new( - GeneralNameTypeRegistry::RegisteredID, - GeneralNameValue::Oid(C509oid::new(oid!(2.16.840 .1 .101 .3 .4 .2 .1))), - ); - gn.encode(&mut encoder, &mut ()) - .expect("Failed to encode GeneralName"); - // RegisteredID: 0x08 - // oid: 49608648016503040201 - assert_eq!(hex::encode(buffer.clone()), "0849608648016503040201"); - - let mut decoder = Decoder::new(&buffer); - let gn_decoded = - GeneralName::decode(&mut decoder, &mut ()).expect("Failed to decode GeneralName"); - assert_eq!(gn_decoded, gn); - } - - #[test] - fn encode_decode_mismatch_type() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let gn = GeneralName::new( - GeneralNameTypeRegistry::OtherNameSmtpUTF8Mailbox, - GeneralNameValue::Oid(C509oid::new(oid!(2.16.840 .1 .101 .3 .4 .2 .1))), - ); - gn.encode(&mut encoder, &mut ()) - .expect("Failed to encode GeneralName"); - // OtherNameSmtpUTF8Mailbox: 0x21 - // oid: 49608648016503040201 - assert_eq!(hex::encode(buffer.clone()), "2149608648016503040201"); - - let mut decoder = Decoder::new(&buffer); - // Decode should fail, because rely on the int value - GeneralName::decode(&mut decoder, &mut ()).expect_err("Failed to decode GeneralName"); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_general_names/mod.rs b/catalyst-gateway-crates/c509-certificate/src/c509_general_names/mod.rs deleted file mode 100644 index f5778fb0142..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_general_names/mod.rs +++ /dev/null @@ -1,168 +0,0 @@ -//! C509 General Names -//! -//! For more information about `GeneralNames`, -//! visit [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) - -mod data; -pub mod general_name; -pub mod other_name_hw_module; -use general_name::GeneralName; -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Serialize}; - -/// A struct represents an array of `GeneralName`. -/// -/// ```cddl -/// GeneralNames = [ + GeneralName ] -/// ``` -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub struct GeneralNames(Vec); - -impl Default for GeneralNames { - fn default() -> Self { - Self::new() - } -} - -impl GeneralNames { - /// Create a new instance of `GeneralNames` as empty vector. - #[must_use] - pub fn new() -> Self { - Self(Vec::new()) - } - - /// Add a new `GeneralName` to the `GeneralNames`. - pub fn add_gn(&mut self, gn: GeneralName) { - self.0.push(gn); - } - - /// Get the a vector of `GeneralName`. - pub(crate) fn get_gns(&self) -> &Vec { - &self.0 - } -} - -impl Encode<()> for GeneralNames { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - if self.0.is_empty() { - return Err(minicbor::encode::Error::message( - "GeneralNames should not be empty", - )); - } - // The general name type should be included in array too - e.array(self.0.len() as u64 * 2)?; - for gn in &self.0 { - gn.encode(e, ctx)?; - } - Ok(()) - } -} - -impl Decode<'_, ()> for GeneralNames { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - let len = d.array()?.ok_or(minicbor::decode::Error::message( - "GeneralNames should be an array", - ))?; - let mut gn = GeneralNames::new(); - for _ in 0..len / 2 { - gn.add_gn(GeneralName::decode(d, ctx)?); - } - Ok(gn) - } -} - -// ------------------Test---------------------- - -#[cfg(test)] -mod test_general_names { - - use std::net::Ipv4Addr; - - use asn1_rs::oid; - use general_name::{GeneralNameTypeRegistry, GeneralNameValue}; - use other_name_hw_module::OtherNameHardwareModuleName; - - use super::*; - use crate::c509_oid::C509oid; - - #[test] - fn encode_decode_gns() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut gns = GeneralNames::new(); - gns.add_gn(GeneralName::new( - GeneralNameTypeRegistry::DNSName, - GeneralNameValue::Text("example.com".to_string()), - )); - gns.add_gn(GeneralName::new( - GeneralNameTypeRegistry::OtherNameHardwareModuleName, - GeneralNameValue::OtherNameHWModuleName(OtherNameHardwareModuleName::new( - oid!(2.16.840 .1 .101 .3 .4 .2 .1), - vec![0x01, 0x02, 0x03, 0x04], - )), - )); - gns.add_gn(GeneralName::new( - GeneralNameTypeRegistry::IPAddress, - GeneralNameValue::Bytes(Ipv4Addr::new(192, 168, 1, 1).octets().to_vec()), - )); - gns.add_gn(GeneralName::new( - GeneralNameTypeRegistry::RegisteredID, - GeneralNameValue::Oid(C509oid::new(oid!(2.16.840 .1 .101 .3 .4 .2 .1))), - )); - gns.encode(&mut encoder, &mut ()) - .expect("Failed to encode GeneralNames"); - // Array of 4 GeneralName (type, value) so 8 items: 0x88 - assert_eq!(hex::encode(buffer.clone()), "88026b6578616d706c652e636f6d20824960864801650304020144010203040744c0a801010849608648016503040201"); - - let mut decoder = Decoder::new(&buffer); - let gns_decoded = - GeneralNames::decode(&mut decoder, &mut ()).expect("Failed to decode GeneralName"); - assert_eq!(gns_decoded, gns); - } - - #[test] - fn encode_decode_gns_with_same_gn_type() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut gns = GeneralNames::new(); - gns.add_gn(GeneralName::new( - GeneralNameTypeRegistry::DNSName, - GeneralNameValue::Text("example.com".to_string()), - )); - gns.add_gn(GeneralName::new( - GeneralNameTypeRegistry::DNSName, - GeneralNameValue::Text("example.com".to_string()), - )); - gns.add_gn(GeneralName::new( - GeneralNameTypeRegistry::DNSName, - GeneralNameValue::Text("example.com".to_string()), - )); - gns.encode(&mut encoder, &mut ()) - .expect("Failed to encode GeneralNames"); - // Array of 3 GeneralName (type, value) so 6 items: 0x86 - // DNSName with "example.com": 0x026b6578616d706c652e636f6d - assert_eq!( - hex::encode(buffer.clone()), - "86026b6578616d706c652e636f6d026b6578616d706c652e636f6d026b6578616d706c652e636f6d" - ); - - let mut decoder = Decoder::new(&buffer); - let gns_decoded = - GeneralNames::decode(&mut decoder, &mut ()).expect("Failed to decode GeneralName"); - assert_eq!(gns_decoded, gns); - } - - #[test] - fn encode_decode_gns_empty() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let gns = GeneralNames::new(); - gns.encode(&mut encoder, &mut ()) - .expect_err("GeneralNames should not be empty"); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_general_names/other_name_hw_module.rs b/catalyst-gateway-crates/c509-certificate/src/c509_general_names/other_name_hw_module.rs deleted file mode 100644 index 745b3cca0da..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_general_names/other_name_hw_module.rs +++ /dev/null @@ -1,55 +0,0 @@ -//! `OtherNameHardwareModuleName`, special type for `hardwareModuleName` type of -//! otherName. When 'otherName + hardwareModuleName' is used, then `[ ~oid, bytes ]` is -//! used to contain the pair ( hwType, hwSerialNum ) directly as specified in -//! [RFC4108](https://datatracker.ietf.org/doc/rfc4108/) - -use asn1_rs::Oid; -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Serialize}; - -use crate::c509_oid::C509oid; - -/// A struct represents the hardwareModuleName type of otherName. -/// Containing a pair of ( hwType, hwSerialNum ) as mentioned in -/// [RFC4108](https://datatracker.ietf.org/doc/rfc4108/) -#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] -pub struct OtherNameHardwareModuleName { - /// The hardware type OID. - hw_type: C509oid, - /// The hardware serial number represent in bytes. - hw_serial_num: Vec, -} - -impl OtherNameHardwareModuleName { - /// Create a new instance of `OtherNameHardwareModuleName`. - #[must_use] - pub fn new(hw_type: Oid<'static>, hw_serial_num: Vec) -> Self { - Self { - hw_type: C509oid::new(hw_type), - hw_serial_num, - } - } -} - -impl Encode<()> for OtherNameHardwareModuleName { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - e.array(2)?; - self.hw_type.encode(e, ctx)?; - e.bytes(&self.hw_serial_num)?; - Ok(()) - } -} - -impl<'a> Decode<'a, ()> for OtherNameHardwareModuleName { - fn decode(d: &mut Decoder<'a>, ctx: &mut ()) -> Result { - d.array()?; - let hw_type = C509oid::decode(d, ctx)?; - let hw_serial_num = d.bytes()?.to_vec(); - Ok(OtherNameHardwareModuleName::new( - hw_type.get_oid(), - hw_serial_num, - )) - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_issuer_sig_algo/data.rs b/catalyst-gateway-crates/c509-certificate/src/c509_issuer_sig_algo/data.rs deleted file mode 100644 index 1d0e77bf392..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_issuer_sig_algo/data.rs +++ /dev/null @@ -1,82 +0,0 @@ -//! Signature algorithm data provides a necessary information for encoding and decoding of -//! C509 `issuerSignatureAlgorithm`. See [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) -//! Section 9.10 C509 Signature Algorithms Registry for more information. - -// cspell: words RSASSA XMSS - -use anyhow::Error; -use asn1_rs::{oid, Oid}; -use once_cell::sync::Lazy; - -use crate::tables::IntegerToOidTable; - -/// Type of algorithm data -/// INT | OID | Name -type AlgorithmDataTuple = (i16, Oid<'static>, &'static str); - -/// Signature algorithm data table. -#[rustfmt::skip] -const SIG_ALGO_DATA: [AlgorithmDataTuple; 22] = [ - // Int | OID | Name - (-256, oid!(1.2.840.113549.1.1.5), "RSASSA-PKCS1-v1_5 with SHA-1"), - (-255, oid!(1.2.840.10045.4.1), "ECDSA with SHA-1"), - (0, oid!(1.2.840.10045.4.3.2), "ECDSA with SHA-256"), - (1, oid!(1.2.840.10045.4.3.3), "ECDSA with SHA-384"), - (2, oid!(1.2.840.10045.4.3.4), "ECDSA with SHA-512"), - (3, oid!(1.3.6.1.5.5.7.6.32), "ECDSA with SHAKE128"), - (4, oid!(1.3.6.1.5.5.7.6.33), "ECDSA with SHAKE256"), - (12, oid!(1.3.101.112), "Ed25519"), - (13, oid!(1.3.101.113), "Ed448"), - (14, oid!(1.3.6.1.5.5.7.6.26), "SHA-256 with HMAC-SHA256"), - (15, oid!(1.3.6.1.5.5.7.6.27), "SHA-384 with HMAC-SHA384"), - (16, oid!(1.3.6.1.5.5.7.6.28), "SHA-512 with HMAC-SHA512"), - (23, oid!(1.2.840.113549.1.1.11), "RSASSA-PKCS1-v1_5 with SHA-256"), - (24, oid!(1.2.840.113549.1.1.12), "RSASSA-PKCS1-v1_5 with SHA-384"), - (25, oid!(1.2.840.113549.1.1.13), "RSASSA-PKCS1-v1_5 with SHA-512"), - (26, oid!(1.2.840.113549.1.1.10), "RSASSA-PSS with SHA-256"), - // (27, oid!(1.2.840.113549.1.1.10), "RSASSA-PSS with SHA-384"), - // (28, oid!(1.2.840.113549.1.1.10), "RSASSA-PSS with SHA-512"), - (29, oid!(1.3.6.1.5.5.7.6.30), "RSASSA-PSS with SHAKE128"), - (30, oid!(1.3.6.1.5.5.7.6.3), "RSASSA-PSS with SHAKE256"), - (42, oid!(1.2.840.113549.1.9.16.3.17), "HSS / LMS"), - (43, oid!(0.4.0.127.0.15.1.1.13.0), "XMSS"), - (44, oid!(0.4.0.127.0.15.1.1.14.0), "XMSS^MT"), - (45, oid!(1.2.156.10197.1.501), "SM2 with SM3"), -]; - -/// A struct of data that contains lookup table of integer to OID in -/// bidirectional way for `IssuerSignatureAlgorithm`. -pub(crate) struct IssuerSigAlgoData(IntegerToOidTable); - -impl IssuerSigAlgoData { - /// Get the `IntegerToOidTable` - pub(crate) fn get_int_to_oid_table(&self) -> &IntegerToOidTable { - &self.0 - } -} - -/// Define static lookup for issuer signature algorithm table -static ISSUER_SIG_ALGO_TABLE: Lazy = Lazy::new(|| { - let mut int_to_oid_table = IntegerToOidTable::new(); - - for data in SIG_ALGO_DATA { - int_to_oid_table.add(data.0, data.1); - } - - IssuerSigAlgoData(int_to_oid_table) -}); - -/// Static reference to the `IssuerSigAlgoData` lookup table. -pub(crate) static ISSUER_SIG_ALGO_LOOKUP: &Lazy = &ISSUER_SIG_ALGO_TABLE; - -/// Get the OID from the int value. -pub(crate) fn get_oid_from_int(i: i16) -> Result, Error> { - ISSUER_SIG_ALGO_TABLE - .get_int_to_oid_table() - .get_map() - .get_by_left(&i) - .ok_or(Error::msg(format!( - "OID not found in the signature algorithms registry table given int {i}" - ))) - .cloned() -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_issuer_sig_algo/mod.rs b/catalyst-gateway-crates/c509-certificate/src/c509_issuer_sig_algo/mod.rs deleted file mode 100644 index 33e67941c45..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_issuer_sig_algo/mod.rs +++ /dev/null @@ -1,178 +0,0 @@ -//! C509 Issuer Signature Algorithm as a part of `TBSCertificate` used in C509 -//! Certificate. -//! -//! ```cddl -//! issuerSignatureAlgorithm: AlgorithmIdentifier -//! ``` - -mod data; - -use std::str::FromStr; - -use asn1_rs::Oid; -use data::{get_oid_from_int, ISSUER_SIG_ALGO_LOOKUP}; -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Deserializer, Serialize}; - -use crate::{c509_algo_identifier::AlgorithmIdentifier, c509_oid::C509oidRegistered}; - -/// A struct represents the `IssuerSignatureAlgorithm` -#[derive(Debug, Clone, PartialEq)] -pub struct IssuerSignatureAlgorithm { - /// The registered OID of the `IssuerSignatureAlgorithm`. - registered_oid: C509oidRegistered, - /// An `AlgorithmIdentifier` type - algo_identifier: AlgorithmIdentifier, -} - -impl IssuerSignatureAlgorithm { - /// Create new instance of `IssuerSignatureAlgorithm` where it registered with - /// Issuer Signature Algorithm lookup table. - pub fn new(oid: Oid<'static>, param: Option) -> Self { - Self { - registered_oid: C509oidRegistered::new( - oid.clone(), - ISSUER_SIG_ALGO_LOOKUP.get_int_to_oid_table(), - ), - algo_identifier: AlgorithmIdentifier::new(oid, param), - } - } -} -/// Helper struct for deserialize and serialize `IssuerSignatureAlgorithm`. -#[derive(Debug, Deserialize, Serialize)] -struct Helper { - /// OID as string. - oid: String, - /// Optional parameter. - param: Option, -} - -impl<'de> Deserialize<'de> for IssuerSignatureAlgorithm { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> { - let helper = Helper::deserialize(deserializer)?; - let oid = - Oid::from_str(&helper.oid).map_err(|e| serde::de::Error::custom(format!("{e:?}")))?; - - Ok(IssuerSignatureAlgorithm::new(oid, helper.param)) - } -} - -impl Serialize for IssuerSignatureAlgorithm { - fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - let helper = Helper { - oid: self.registered_oid.get_c509_oid().get_oid().to_string(), - param: self.algo_identifier.get_param().clone(), - }; - helper.serialize(serializer) - } -} - -impl Encode<()> for IssuerSignatureAlgorithm { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - if let Some(&i) = self - .registered_oid - .get_table() - .get_map() - .get_by_right(&self.registered_oid.get_c509_oid().get_oid()) - { - e.i16(i)?; - } else { - AlgorithmIdentifier::encode(&self.algo_identifier, e, ctx)?; - } - Ok(()) - } -} - -impl Decode<'_, ()> for IssuerSignatureAlgorithm { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - match d.datatype()? { - // Check i16 for -256 and -256 - minicbor::data::Type::U8 | minicbor::data::Type::I16 => { - let i = d.i16()?; - let oid = get_oid_from_int(i).map_err(minicbor::decode::Error::message)?; - Ok(Self::new(oid, None)) - }, - _ => { - let algo_identifier = AlgorithmIdentifier::decode(d, ctx)?; - Ok(IssuerSignatureAlgorithm::new( - algo_identifier.get_oid(), - algo_identifier.get_param().clone(), - )) - }, - } - } -} - -// ------------------Test---------------------- - -#[cfg(test)] -mod test_issuer_signature_algorithm { - use asn1_rs::oid; - - use super::*; - - #[test] - fn test_registered_oid() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let isa = IssuerSignatureAlgorithm::new(oid!(1.3.101 .112), None); - isa.encode(&mut encoder, &mut ()) - .expect("Failed to encode IssuerSignatureAlgorithm"); - - // Ed25519 - int 12: 0x0c - assert_eq!(hex::encode(buffer.clone()), "0c"); - - let mut decoder = Decoder::new(&buffer); - let decoded_isa = IssuerSignatureAlgorithm::decode(&mut decoder, &mut ()) - .expect("Failed to decode IssuerSignatureAlgorithm"); - assert_eq!(decoded_isa, isa); - } - - #[test] - fn test_unregistered_oid() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let isa = IssuerSignatureAlgorithm::new(oid!(2.16.840 .1 .101 .3 .4 .2 .1), None); - isa.encode(&mut encoder, &mut ()) - .expect("Failed to encode IssuerSignatureAlgorithm"); - - // 2.16.840 .1 .101 .3 .4 .2 .1: 0x49608648016503040201 - assert_eq!(hex::encode(buffer.clone()), "49608648016503040201"); - - let mut decoder = Decoder::new(&buffer); - let decoded_isa = IssuerSignatureAlgorithm::decode(&mut decoder, &mut ()) - .expect("Failed to decode IssuerSignatureAlgorithm"); - assert_eq!(decoded_isa, isa); - } - - #[test] - fn test_unregistered_oid_with_param() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let isa = IssuerSignatureAlgorithm::new( - oid!(2.16.840 .1 .101 .3 .4 .2 .1), - Some("example".to_string()), - ); - isa.encode(&mut encoder, &mut ()) - .expect("Failed to encode IssuerSignatureAlgorithm"); - // Array of 2 items: 0x82 - // 2.16.840 .1 .101 .3 .4 .2 .1: 0x49608648016503040201 - // bytes "example": 0x476578616d706c65 - assert_eq!( - hex::encode(buffer.clone()), - "8249608648016503040201476578616d706c65" - ); - - let mut decoder = Decoder::new(&buffer); - let decoded_isa = IssuerSignatureAlgorithm::decode(&mut decoder, &mut ()) - .expect("Failed to decode IssuerSignatureAlgorithm"); - assert_eq!(decoded_isa, isa); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_name/mod.rs b/catalyst-gateway-crates/c509-certificate/src/c509_name/mod.rs deleted file mode 100644 index 5e0028dfdf4..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_name/mod.rs +++ /dev/null @@ -1,521 +0,0 @@ -//! C509 type Name -//! -//! Currently only support natively signed c509 certificate, so all text strings -//! are UTF-8 encoded and all attributeType should be non-negative. -//! -//! ```cddl -//! Name = [ * RelativeDistinguishedName ] / text / bytes -//! RelativeDistinguishedName = Attribute / [ 2* Attribute ] -//! Attribute = ( attributeType: int, attributeValue: text ) // -//! ( attributeType: ~oid, attributeValue: bytes ) // -//! ( attributeType: pen, attributeValue: bytes ) -//! ``` -//! -//! For more information about Name, -//! visit [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) - -// cspell: words rdns - -pub mod rdn; -use asn1_rs::{oid, Oid}; -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use rdn::RelativeDistinguishedName; -use regex::Regex; -use serde::{Deserialize, Serialize}; - -use crate::c509_attributes::attribute::{Attribute, AttributeValue}; - -/// OID of `CommonName` attribute. -const COMMON_NAME_OID: Oid<'static> = oid!(2.5.4 .3); -/// EUI-64 prefix. -const EUI64_PREFIX: u8 = 0x01; -/// Hex prefix. -const HEX_PREFIX: u8 = 0x00; -/// Total length of CBOR byte for EUI-64. -const EUI64_LEN: usize = 9; -/// Total length of CBOR byte for EUI-64 mapped from a 48-bit MAC address. -const EUI64_MAC_LEN: usize = 7; - -// ------------------Name---------------------- - -/// A struct of C509 Name with `NameValue`. -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub struct Name(NameValue); - -impl Name { - /// Create a new instance of `Name` its value. - #[must_use] - pub fn new(value: NameValue) -> Self { - Self(value) - } - - /// Get the value of the `Name`. - #[must_use] - pub fn get_value(&self) -> &NameValue { - &self.0 - } -} - -impl Encode<()> for Name { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - self.0.encode(e, ctx) - } -} - -impl Decode<'_, ()> for Name { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - NameValue::decode(d, ctx).map(Name::new) - } -} - -// ------------------NameValue---------------------- - -/// An enum of possible value types for `Name`. -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "snake_case")] -pub enum NameValue { - /// A relative distinguished name. - RelativeDistinguishedName(RelativeDistinguishedName), - /// A text. - Text(String), - /// bytes. - Bytes(Vec), -} - -impl Encode<()> for NameValue { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - match self { - NameValue::RelativeDistinguishedName(rdn) => { - let attr = rdn.get_attributes(); - let attr_first = attr.first().ok_or(minicbor::encode::Error::message( - "Cannot get the first Attribute", - ))?; - // If Name contains a single Attribute of type CommonName - if attr.len() == 1 - && attr_first.get_registered_oid().get_c509_oid().get_oid() == COMMON_NAME_OID - { - // Get the value of the attribute - let cn_value = - attr_first - .get_value() - .first() - .ok_or(minicbor::encode::Error::message( - "Cannot get the first Attribute value", - ))?; - - encode_cn_value(e, cn_value)?; - } else { - rdn.encode(e, ctx)?; - } - }, - NameValue::Text(text) => { - e.str(text)?; - }, - NameValue::Bytes(bytes) => { - e.bytes(bytes)?; - }, - } - Ok(()) - } -} - -impl Decode<'_, ()> for NameValue { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - match d.datatype()? { - minicbor::data::Type::Array => { - Ok(NameValue::RelativeDistinguishedName( - RelativeDistinguishedName::decode(d, ctx)?, - )) - }, - // If Name is a text string, the attribute is a CommonName - minicbor::data::Type::String => Ok(create_rdn_with_cn_attr(d.str()?.to_string())), - minicbor::data::Type::Bytes => decode_bytes(d), - _ => { - Err(minicbor::decode::Error::message( - "Name must be an array, text or bytes", - )) - }, - } - } -} - -/// Encode common name value. -fn encode_cn_value( - e: &mut Encoder, cn_value: &AttributeValue, -) -> Result<(), minicbor::encode::Error> { - let hex_regex = Regex::new(r"^[0-9a-f]+$").map_err(minicbor::encode::Error::message)?; - let eui64_regex = - Regex::new(r"^([0-9A-F]{2}-){7}[0-9A-F]{2}$").map_err(minicbor::encode::Error::message)?; - let mac_eui64_regex = Regex::new(r"^([0-9A-F]{2}-){3}FF-FE-([0-9A-F]{2}-){2}[0-9A-F]{2}$") - .map_err(minicbor::encode::Error::message)?; - - match cn_value { - AttributeValue::Text(s) => { - // If the text string has an even length ≥ 2 and contains only the - // symbols '0'–'9' or 'a'–'f', it is encoded as a CBOR byte - // string, prefixed with an initial byte set to '00' - if hex_regex.is_match(s) && s.len() % 2 == 0 { - let decoded_bytes = hex::decode(s).map_err(minicbor::encode::Error::message)?; - e.bytes(&[&[HEX_PREFIX], &decoded_bytes[..]].concat())?; - - // An EUI-64 mapped from a 48-bit MAC address (i.e., of the form - // "HH-HH-HH-FF-FE-HH-HH-HH) is encoded as a CBOR byte string prefixed with an - // initial byte set to '01', for a total length of 7. - } else if mac_eui64_regex.is_match(s) { - let clean_name = s.replace('-', ""); - let decoded_bytes = - hex::decode(clean_name).map_err(minicbor::encode::Error::message)?; - let chunk2 = decoded_bytes - .get(..3) - .ok_or(minicbor::encode::Error::message( - "Failed to get MAC EUI-64 bytes index 0 to 2", - ))?; - let chunk3 = decoded_bytes - .get(5..) - .ok_or(minicbor::encode::Error::message( - "Failed to get MAC EUI-64 bytes index 5 to 6", - ))?; - e.bytes(&[&[EUI64_PREFIX], chunk2, chunk3].concat())?; - - // an EUI-64 of the form "HH-HH-HH-HH-HH-HH-HH-HH" where 'H' - // is one of the symbols '0'–'9' or 'A'–'F' it is encoded as a - // CBOR byte string prefixed with an initial byte set to '01', for a total - // length of 9. - } else if eui64_regex.is_match(s) { - let clean_name = s.replace('-', ""); - let decoded_bytes = - hex::decode(clean_name).map_err(minicbor::encode::Error::message)?; - e.bytes(&[&[EUI64_PREFIX], &decoded_bytes[..]].concat())?; - } else { - e.str(s)?; - } - }, - AttributeValue::Bytes(_) => { - return Err(minicbor::encode::Error::message( - "CommonName attribute value must be a text string", - )); - }, - } - Ok(()) -} - -/// Format EUI bytes. -fn formatted_eui_bytes(data: &[u8]) -> String { - data.iter() - .map(|b| format!("{b:02X}")) - .collect::>() - .join("-") -} - -/// Decode bytes. -fn decode_bytes(d: &mut Decoder<'_>) -> Result { - let bytes = d.bytes()?; - - let first_i = bytes.first().ok_or(minicbor::decode::Error::message( - "Failed to get the first index of bytes", - ))?; - - // Bytes prefix - match *first_i { - // 0x00 for hex - HEX_PREFIX => decode_hex_cn_bytes(bytes), - // 0x01 for EUI - EUI64_PREFIX => decode_eui_cn_bytes(bytes), - _ => Ok(NameValue::Bytes(bytes.to_vec())), - } -} - -/// Decode common name hex bytes. -fn decode_hex_cn_bytes(bytes: &[u8]) -> Result { - let text = hex::encode(bytes.get(1..).ok_or(minicbor::decode::Error::message( - "Failed to get hex bytes index", - ))?); - Ok(create_rdn_with_cn_attr(text)) -} - -/// Decode common name EUI-64 bytes. -fn decode_eui_cn_bytes(bytes: &[u8]) -> Result { - // Check the length of the bytes to determine what EUI type it is - match bytes.len() { - // EUI-64 mapped from a 48-bit MAC address - EUI64_MAC_LEN => { - let chunk1 = bytes.get(1..4).ok_or(minicbor::decode::Error::message( - "Failed to get EUI-64 bytes index 1 to 3", - ))?; - let chunk4 = bytes.get(4..).ok_or(minicbor::decode::Error::message( - "Failed to get EUI-64 bytes index 4 to 7", - ))?; - // Turn it into HH-HH-HH-FF-FE-HH-HH-HH - let data = [chunk1, &[0xFF], &[0xFE], chunk4].concat(); - let text = formatted_eui_bytes(&data); - Ok(create_rdn_with_cn_attr(text)) - }, - // EUI-64 - EUI64_LEN => { - let text = formatted_eui_bytes(bytes.get(1..).ok_or( - minicbor::decode::Error::message("Failed to get EUI-64 bytes index"), - )?); - Ok(create_rdn_with_cn_attr(text)) - }, - _ => { - Err(minicbor::decode::Error::message( - "EUI-64 or MAC address must be 7 or 9 bytes", - )) - }, - } -} - -/// Create a relative distinguished name with attribute common name from string. -fn create_rdn_with_cn_attr(text: String) -> NameValue { - let mut attr = Attribute::new(COMMON_NAME_OID); - attr.add_value(AttributeValue::Text(text)); - let mut rdn = RelativeDistinguishedName::new(); - rdn.add_attr(attr); - NameValue::RelativeDistinguishedName(rdn) -} - -// ------------------Test---------------------- - -#[cfg(test)] -pub(crate) mod test_name { - use super::*; - use crate::c509_attributes::attribute::Attribute; - - // Test data from https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/ - // A.1.1. Example C509 Certificate Encoding - pub(crate) fn name_cn_text() -> (Name, String) { - let mut attr = Attribute::new(oid!(2.5.4 .3)); - attr.add_value(AttributeValue::Text("RFC test CA".to_string())); - let mut rdn = RelativeDistinguishedName::new(); - rdn.add_attr(attr); - - ( - Name::new(NameValue::RelativeDistinguishedName(rdn)), - // "RFC test CA" Text string: 6b5246432074657374204341 - "6b5246432074657374204341".to_string(), - ) - } - - #[test] - fn encode_decode_type_name_cn() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let name = name_cn_text().0; - name.encode(&mut encoder, &mut ()) - .expect("Failed to encode Name"); - - assert_eq!(hex::encode(buffer.clone()), name_cn_text().1); - - let mut decoder = Decoder::new(&buffer); - let name_decoded = Name::decode(&mut decoder, &mut ()).expect("Failed to decode Name"); - assert_eq!(name_decoded, name); - } - - #[test] - fn encode_decode_type_name_hex() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut attr = Attribute::new(oid!(2.5.4 .3)); - attr.add_value(AttributeValue::Text("000123abcd".to_string())); - let mut rdn = RelativeDistinguishedName::new(); - rdn.add_attr(attr); - - let name = Name::new(NameValue::RelativeDistinguishedName(rdn)); - name.encode(&mut encoder, &mut ()) - .expect("Failed to encode Name"); - - // Bytes of length 6: 0x46 - // Prefix of CommonName hex: 0x00 - // Bytes 000123abcd: 0x000123abcd - assert_eq!(hex::encode(buffer.clone()), "4600000123abcd"); - - let mut decoder = Decoder::new(&buffer); - let name_decoded = Name::decode(&mut decoder, &mut ()).expect("Failed to decode Name"); - assert_eq!(name_decoded, name); - } - - #[test] - fn encode_decode_type_name_hex_cap() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut attr = Attribute::new(oid!(2.5.4 .3)); - attr.add_value(AttributeValue::Text("000123ABCD".to_string())); - let mut rdn = RelativeDistinguishedName::new(); - rdn.add_attr(attr); - - let name = Name::new(NameValue::RelativeDistinguishedName(rdn)); - name.encode(&mut encoder, &mut ()) - .expect("Failed to encode Name"); - - // String of len 10: 0x6a - // String 000123abcd: 30303031323341424344 - assert_eq!(hex::encode(buffer.clone()), "6a30303031323341424344"); - - let mut decoder = Decoder::new(&buffer); - let name_decoded = Name::decode(&mut decoder, &mut ()).expect("Failed to decode Name"); - assert_eq!(name_decoded, name); - } - - // Test data from https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/ - // A.1. Example RFC 7925 profiled X.509 Certificate - pub(crate) fn name_cn_eui_mac() -> (Name, String) { - let mut attr = Attribute::new(oid!(2.5.4 .3)); - attr.add_value(AttributeValue::Text("01-23-45-FF-FE-67-89-AB".to_string())); - let mut rdn = RelativeDistinguishedName::new(); - rdn.add_attr(attr); - - ( - Name::new(NameValue::RelativeDistinguishedName(rdn)), - // Bytes of length 7: 0x47 - // "01-23-45-FF-FE-67-89-AB" special encode: 0x010123456789AB - "47010123456789ab".to_string(), - ) - } - - #[test] - fn encode_decode_type_name_cn_eui_mac() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let name = name_cn_eui_mac().0; - - name.encode(&mut encoder, &mut ()) - .expect("Failed to encode Name"); - assert_eq!(hex::encode(buffer.clone()), name_cn_eui_mac().1); - - let mut decoder = Decoder::new(&buffer); - let name_decoded = Name::decode(&mut decoder, &mut ()).expect("Failed to decode Name"); - assert_eq!(name_decoded, name); - } - - #[test] - fn encode_decode_type_name_cn_eui_mac_un_cap() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut attr = Attribute::new(oid!(2.5.4 .3)); - attr.add_value(AttributeValue::Text("01-23-45-ff-fe-67-89-AB".to_string())); - let mut rdn = RelativeDistinguishedName::new(); - rdn.add_attr(attr); - let name = Name::new(NameValue::RelativeDistinguishedName(rdn)); - - name.encode(&mut encoder, &mut ()) - .expect("Failed to encode Name"); - - // String of len 23: 0x77 - // "01-23-45-ff-fe-67-89-AB": 0x7730312d32332d34352d66662d66652d36372d38392d4142 - assert_eq!( - hex::encode(buffer.clone()), - "7730312d32332d34352d66662d66652d36372d38392d4142" - ); - - let mut decoder = Decoder::new(&buffer); - let name_decoded = Name::decode(&mut decoder, &mut ()).expect("Failed to decode Name"); - assert_eq!(name_decoded, name); - } - - #[test] - fn encode_decode_type_name_cn_eui() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut attr = Attribute::new(oid!(2.5.4 .3)); - attr.add_value(AttributeValue::Text("01-23-45-67-89-AB-00-01".to_string())); - let mut rdn = RelativeDistinguishedName::new(); - rdn.add_attr(attr); - - let name = Name::new(NameValue::RelativeDistinguishedName(rdn)); - - name.encode(&mut encoder, &mut ()) - .expect("Failed to encode Name"); - - assert_eq!(hex::encode(buffer.clone()), "49010123456789ab0001"); - - let mut decoder = Decoder::new(&buffer); - let name_decoded = Name::decode(&mut decoder, &mut ()).expect("Failed to decode Name"); - assert_eq!(name_decoded, name); - } - - #[test] - fn encode_decode_type_name_cn_eui_un_cap() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut attr = Attribute::new(oid!(2.5.4 .3)); - attr.add_value(AttributeValue::Text("01-23-45-67-89-ab-00-01".to_string())); - let mut rdn = RelativeDistinguishedName::new(); - rdn.add_attr(attr); - - let name = Name::new(NameValue::RelativeDistinguishedName(rdn)); - - name.encode(&mut encoder, &mut ()) - .expect("Failed to encode Name"); - - // String of len 23: 0x77 - // "01-23-45-67-89-ab-00-01": 0x7730312d32332d34352d36372d38392d61622d30302d3031 - assert_eq!( - hex::encode(buffer.clone()), - "7730312d32332d34352d36372d38392d61622d30302d3031" - ); - - let mut decoder = Decoder::new(&buffer); - let name_decoded = Name::decode(&mut decoder, &mut ()).expect("Failed to decode Name"); - assert_eq!(name_decoded, name); - } - - // Test data from https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/ - // A.2. Example IEEE 802.1AR profiled X.509 Certificate - // Issuer: C=US, ST=CA, O=Example Inc, OU=certification, CN=802.1AR CA - pub(crate) fn names() -> (Name, String) { - let mut attr1 = Attribute::new(oid!(2.5.4 .6)); - attr1.add_value(AttributeValue::Text("US".to_string())); - let mut attr2 = Attribute::new(oid!(2.5.4 .8)); - attr2.add_value(AttributeValue::Text("CA".to_string())); - let mut attr3 = Attribute::new(oid!(2.5.4 .10)); - attr3.add_value(AttributeValue::Text("Example Inc".to_string())); - let mut attr4 = Attribute::new(oid!(2.5.4 .11)); - attr4.add_value(AttributeValue::Text("certification".to_string())); - let mut attr5 = Attribute::new(oid!(2.5.4 .3)); - attr5.add_value(AttributeValue::Text("802.1AR CA".to_string())); - - let mut rdn = RelativeDistinguishedName::new(); - rdn.add_attr(attr1); - rdn.add_attr(attr2); - rdn.add_attr(attr3); - rdn.add_attr(attr4); - rdn.add_attr(attr5); - - ( - Name::new(NameValue::RelativeDistinguishedName(rdn)), - // Array of 10 items [4, "US", 6, "CA", 8, "Example Inc", 9, "certification", 1, "802.1AR CA"] : 0x8a - // attr1: 0x04625553 - // attr2: 0x06624341 - // attr3: 0x086b4578616d706c6520496e63 - // attr4: 0x096d63657274696669636174696f6e - // attr5: 0x016a3830322e314152204341 - "8a0462555306624341086b4578616d706c6520496e63096d63657274696669636174696f6e016a3830322e314152204341".to_string(), - ) - } - #[test] - fn encode_decode_type_name_rdns() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let name = names().0; - - name.encode(&mut encoder, &mut ()) - .expect("Failed to encode Name"); - assert_eq!(hex::encode(buffer.clone()), names().1); - - let mut decoder = Decoder::new(&buffer); - let name_decoded = Name::decode(&mut decoder, &mut ()).expect("Failed to decode Name"); - assert_eq!(name_decoded, name); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_name/rdn.rs b/catalyst-gateway-crates/c509-certificate/src/c509_name/rdn.rs deleted file mode 100644 index ef71481fa28..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_name/rdn.rs +++ /dev/null @@ -1,171 +0,0 @@ -//! C509 Relative Distinguished Name -//! -//! For more information about `RelativeDistinguishedName`, -//! visit [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) - -// cspell: words rdns - -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Serialize}; - -use crate::c509_attributes::attribute::Attribute; - -/// A struct represents a Relative Distinguished Name containing vector of `Attribute`. -/// -/// ```cddl -/// RelativeDistinguishedName = Attribute / [ 2* Attribute ] -/// ``` -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub struct RelativeDistinguishedName(Vec); - -impl Default for RelativeDistinguishedName { - fn default() -> Self { - Self::new() - } -} - -impl RelativeDistinguishedName { - /// Create a new instance of `RelativeDistinguishedName` as empty vector. - #[must_use] - pub fn new() -> Self { - Self(Vec::new()) - } - - /// Add an `Attribute` to the `RelativeDistinguishedName`. - pub fn add_attr(&mut self, attribute: Attribute) { - // RelativeDistinguishedName support pen encoding - self.0.push(attribute.set_pen_supported()); - } - - /// Get the a vector of `Attribute`. - pub(crate) fn get_attributes(&self) -> &Vec { - &self.0 - } -} - -impl Encode<()> for RelativeDistinguishedName { - // ```cddl - // RelativeDistinguishedName = Attribute / [ 2* Attribute ] - // ``` - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - // Should contain >= 1 attribute - if self.0.is_empty() { - return Err(minicbor::encode::Error::message( - "RelativeDistinguishedName should not be empty", - )); - } - - if self.0.len() == 1 { - self.0.first().encode(e, ctx)?; - } else { - // The attribute type should be included in array too - e.array(self.0.len() as u64 * 2)?; - for attr in &self.0 { - attr.encode(e, ctx)?; - } - } - Ok(()) - } -} - -impl Decode<'_, ()> for RelativeDistinguishedName { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - let mut rdn = RelativeDistinguishedName::new(); - - match d.datatype()? { - minicbor::data::Type::Array => { - let len = d.array()?.ok_or(minicbor::decode::Error::message( - "Failed to get array length for relative distinguished name", - ))?; - // Should contain >= 1 attribute - if len == 0 { - return Err(minicbor::decode::Error::message( - "RelativeDistinguishedName should not be empty", - )); - } - // The attribute type is included in an array, so divide by 2 - for _ in 0..len / 2 { - rdn.add_attr(Attribute::decode(d, ctx)?); - } - }, - _ => rdn.add_attr(Attribute::decode(d, ctx)?), - } - Ok(rdn) - } -} - -// -------------------Test---------------------- - -#[cfg(test)] -mod test_relative_distinguished_name { - - use asn1_rs::oid; - - use super::*; - use crate::c509_attributes::attribute::AttributeValue; - - #[test] - fn encode_decode_rdn() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut attr = Attribute::new(oid!(1.2.840 .113549 .1 .9 .1)); - attr.add_value(AttributeValue::Text("example@example.com".to_string())); - - let mut rdn = RelativeDistinguishedName::new(); - rdn.add_attr(attr); - rdn.encode(&mut encoder, &mut ()) - .expect("Failed to encode RDN"); - // Email Address: 0x00 - // "example@example.como": 736578616d706c65406578616d706c652e636f6d - assert_eq!( - hex::encode(buffer.clone()), - "00736578616d706c65406578616d706c652e636f6d" - ); - - let mut decoder = Decoder::new(&buffer); - let rdn_decoded = RelativeDistinguishedName::decode(&mut decoder, &mut ()) - .expect("Failed to decode RelativeDistinguishedName"); - assert_eq!(rdn_decoded, rdn); - } - - #[test] - fn encode_decode_rdns() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let mut attr1 = Attribute::new(oid!(1.2.840 .113549 .1 .9 .1)); - attr1.add_value(AttributeValue::Text("example@example.com".to_string())); - let mut attr2 = Attribute::new(oid!(2.5.4 .3)); - attr2.add_value(AttributeValue::Text("example".to_string())); - - let mut rdns = RelativeDistinguishedName::new(); - rdns.add_attr(attr1); - rdns.add_attr(attr2); - - rdns.encode(&mut encoder, &mut ()) - .expect("Failed to encode RDN"); - // Array of 2 attributes: 0x84 - // Email Address example@example.com: 0x00736578616d706c65406578616d706c652e636f6d - // Common Name example: 0x01676578616d706c65 - assert_eq!( - hex::encode(buffer.clone()), - "8400736578616d706c65406578616d706c652e636f6d01676578616d706c65" - ); - let mut decoder = Decoder::new(&buffer); - let rdn_decoded = RelativeDistinguishedName::decode(&mut decoder, &mut ()) - .expect("Failed to decode RelativeDistinguishedName"); - assert_eq!(rdn_decoded, rdns); - } - - #[test] - fn empty_rdn() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - let rdn = RelativeDistinguishedName::new(); - rdn.encode(&mut encoder, &mut ()) - .expect_err("Failed to encode RDN"); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_oid.rs b/catalyst-gateway-crates/c509-certificate/src/c509_oid.rs deleted file mode 100644 index 646263a14a9..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_oid.rs +++ /dev/null @@ -1,243 +0,0 @@ -//! C509 OID provides an encoding and decoding of C509 Object Identifier (OID). -//! -//! Please refer to [RFC9090](https://datatracker.ietf.org/doc/rfc9090/) for OID encoding -//! Please refer to [CDDL Wrapping](https://datatracker.ietf.org/doc/html/rfc8610#section-3.7) -//! for unwrapped types. - -use std::str::FromStr; - -use anyhow::Result; -use asn1_rs::oid; -use minicbor::{data::Tag, decode, encode::Write, Decode, Decoder, Encode, Encoder}; -use oid_registry::Oid; -use serde::{Deserialize, Deserializer, Serialize}; - -use crate::tables::IntegerToOidTable; - -/// IANA Private Enterprise Number (PEN) OID prefix. -const PEN_PREFIX: Oid<'static> = oid!(1.3.6 .1 .4 .1); - -/// Tag number representing IANA Private Enterprise Number (PEN) OID. -const OID_PEN_TAG: u64 = 112; - -/// A strut of C509 OID with Registered Integer. -#[derive(Debug, Clone, PartialEq)] -pub struct C509oidRegistered { - /// The `C509oid`. - oid: C509oid, - /// The registration table. - registration_table: &'static IntegerToOidTable, -} - -impl C509oidRegistered { - /// Create a new instance of `C509oidRegistered`. - pub(crate) fn new(oid: Oid<'static>, table: &'static IntegerToOidTable) -> Self { - Self { - oid: C509oid::new(oid), - registration_table: table, - } - } - - /// Is PEN Encoding supported for this OID. - /// Depends on each registration table. - pub(crate) fn pen_encoded(mut self) -> Self { - self.oid.pen_supported = true; - self - } - - /// Get the `C509oid`. - pub(crate) fn get_c509_oid(&self) -> C509oid { - self.oid.clone() - } - - /// Get the registration table. - pub(crate) fn get_table(&self) -> &'static IntegerToOidTable { - self.registration_table - } -} - -// ----------------------------------------- - -/// A struct represent an instance of `C509oid`. -#[derive(Debug, PartialEq, Clone, Eq, Hash)] -pub struct C509oid { - /// The OID. - oid: Oid<'static>, - /// The flag to indicate whether PEN encoding is supported. - pen_supported: bool, -} - -/// A helper struct for deserialize and serialize `C509oid`. -#[derive(Debug, Deserialize, Serialize)] -struct Helper { - /// OID value in string. - oid: String, -} - -impl<'de> Deserialize<'de> for C509oid { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> { - let helper = Helper::deserialize(deserializer)?; - let oid = - Oid::from_str(&helper.oid).map_err(|e| serde::de::Error::custom(format!("{e:?}")))?; - Ok(C509oid::new(oid)) - } -} - -impl Serialize for C509oid { - fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - let helper = Helper { - oid: self.oid.to_string(), - }; - helper.serialize(serializer) - } -} - -impl C509oid { - /// Create an new instance of `C509oid`. - /// Default value of PEN flag is false - #[must_use] - pub fn new(oid: Oid<'static>) -> Self { - Self { - oid, - pen_supported: false, - } - } - - /// Is PEN Encoding supported for this OID - pub(crate) fn pen_encoded(mut self) -> Self { - self.pen_supported = true; - self - } - - /// Get the underlying OID of the `C509oid` - #[must_use] - pub fn get_oid(self) -> Oid<'static> { - self.oid.clone() - } -} - -impl Encode<()> for C509oid { - /// Encode an OID - /// If `pen_supported` flag is set, and OID start with a valid `PEN_PREFIX`, - /// it is encoded as PEN (Private Enterprise Number) - /// else encode as an unwrapped OID (~oid) - as bytes string without tag. - /// - /// # Returns - /// - /// A vector of bytes containing the CBOR encoded OID. - /// If the encoding fails, it will return an error. - fn encode( - &self, e: &mut Encoder, _ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - // Check if PEN encoding is supported and the OID starts with the PEN prefix. - if self.pen_supported && self.oid.starts_with(&PEN_PREFIX) { - // Set the CBOR tag. - e.tag(Tag::new(OID_PEN_TAG))?; - // Convert OID originally store as [u8] to [u64] - // This process is necessary to get the correct OID - // For example given - 1.3.6 .1 .4 .1.4.999 - // This OID will be stored as [u8] - [43, 6, 1, 4, 1, 4, 135, 103] - // The first 2 integer has a special encoding formula where, - // values is computed using X * 40 + Y (See RFC9090 for more info) - // The number 999 exceed the 225 limit (max of u8), so it will be encoded as 2 bytes - let raw_oid: Vec = - self.oid - .iter() - .map(Iterator::collect) - .ok_or(minicbor::encode::Error::message( - "Failed to collect OID components from iterator", - ))?; - let raw_pen_prefix: Vec = PEN_PREFIX.iter().map(Iterator::collect).ok_or( - minicbor::encode::Error::message("Failed to collect OID components from iterator"), - )?; - // relative_oid is OID that follows PEN_PREFIX (relative to PEN_PREFIX) - // Use the [u64] PEN prefix length to extract the relative OID - let oid_slice = - raw_oid - .get(raw_pen_prefix.len()..) - .ok_or(minicbor::encode::Error::message( - "Failed to get a OID slice", - ))?; - let relative_oid = Oid::from_relative(oid_slice) - .map_err(|_| minicbor::encode::Error::message("Failed to get a relative OID"))?; - return e.bytes(relative_oid.as_bytes())?.ok(); - } - let oid_bytes = self.oid.as_bytes(); - e.bytes(oid_bytes)?.ok() - } -} - -impl Decode<'_, ()> for C509oid { - /// Decode an OID - /// If the data to be decoded is a `Tag`, and the tag is an `OID_PEN_TAG`, - /// then decode the OID as Private Enterprise Number (PEN) OID. - /// else decode the OID as unwrapped OID (~oid) - as bytes string without tag. - - /// # Returns - /// - /// A C509oid instance. - /// If the decoding fails, it will return an error. - fn decode(d: &mut Decoder, _ctx: &mut ()) -> Result { - if (minicbor::data::Type::Tag == d.datatype()?) && (Tag::new(OID_PEN_TAG) == d.tag()?) { - let oid_bytes = d.bytes()?; - // raw_oid contains the whole OID which stored in bytes - let mut raw_oid = Vec::new(); - raw_oid.extend_from_slice(PEN_PREFIX.as_bytes()); - raw_oid.extend_from_slice(oid_bytes); - // Convert the raw_oid to Oid - let oid = Oid::new(raw_oid.into()); - return Ok(C509oid::new(oid).pen_encoded()); - } - // Not a PEN Relative OID, so treat as a normal OID - let oid_bytes = d.bytes()?; - let oid = Oid::new(oid_bytes.to_owned().into()); - Ok(C509oid::new(oid)) - } -} - -// ----------------------------------------- - -#[cfg(test)] -mod test_c509_oid { - - use super::*; - - // Test reference 3.1. Encoding of the SHA-256 OID - // https://datatracker.ietf.org/doc/rfc9090/ - #[test] - fn encode_decode_unwrapped() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - let oid = C509oid::new(oid!(2.16.840 .1 .101 .3 .4 .2 .1)); - oid.encode(&mut encoder, &mut ()) - .expect("Failed to encode OID"); - assert_eq!(hex::encode(buffer.clone()), "49608648016503040201"); - - let mut decoder = Decoder::new(&buffer); - let decoded_oid = C509oid::decode(&mut decoder, &mut ()).expect("Failed to decode OID"); - assert_eq!(decoded_oid, oid); - } - - #[test] - fn encode_decode_pen() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - let oid = C509oid::new(oid!(1.3.6 .1 .4 .1 .1 .1 .29)).pen_encoded(); - oid.encode(&mut encoder, &mut ()) - .expect("Failed to encode OID"); - assert_eq!(hex::encode(buffer.clone()), "d8704301011d"); - - let mut decoder = Decoder::new(&buffer); - let decoded_oid = C509oid::decode(&mut decoder, &mut ()).expect("Failed to decode OID"); - assert_eq!(decoded_oid, oid); - } - - #[test] - fn partial_equal() { - let oid1 = C509oid::new(oid_registry::OID_HASH_SHA1); - let oid2 = C509oid::new(oid!(1.3.14 .3 .2 .26)); - assert_eq!(oid1, oid2); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_subject_pub_key_algo/data.rs b/catalyst-gateway-crates/c509-certificate/src/c509_subject_pub_key_algo/data.rs deleted file mode 100644 index bb37a2d4bf5..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_subject_pub_key_algo/data.rs +++ /dev/null @@ -1,75 +0,0 @@ -//! Public key algorithm data provides a necessary information for encoding and decoding -//! of C509 `subjectPublicKeyAlgorithm`. See [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) -//! Section 9.11 C509 Public Key Algorithms Registry for more information. - -// cspell: words Weierstraß secp XMSS brainpool - -use anyhow::Error; -use asn1_rs::{oid, Oid}; -use once_cell::sync::Lazy; - -use crate::tables::IntegerToOidTable; - -/// Type of algorithm data -/// INT | OID | Name -type AlgorithmDataTuple = (i16, Oid<'static>, &'static str); - -/// Public key algorithm data table. -#[rustfmt::skip] -const PUB_KEY_ALGO_DATA: [AlgorithmDataTuple; 9] = [ - // Int | OID | Name - (0, oid!(1.2.840.113549.1.1.1), "RSA"), - (1, oid!(1.2.840.10045.2.1), "EC Public Key (Weierstraß) with secp256r1"), - // (2, oid!(1.2.840.10045.2.1), "EC Public Key (Weierstraß) with secp384r1"), - // (3, oid!(1.2.840.10045.2.1), "EC Public Key (Weierstraß) with secp521r1"), - (8, oid!(1.3.101.110), "X25519 (Montgomery)"), - (9, oid!(1.3.101.111), "X448 (Montgomery)"), - (10, oid!(1.3.101.112), "Ed25519 (Twisted Edwards)"), - (11, oid!(1.3.101.113), "Ed448 (Edwards)"), - (16, oid!(1.2.840.113549.1.9.16.3.17), "HSS / LMS"), - (17, oid!(0.4.0.127.0.15.1.1.13.0), "XMSS"), - (18, oid!(0.4.0.127.0.15.1.1.14.0), "XMSS^MT"), - // (24, oid!(1.2.840.10045.2.1), "EC Public Key (Weierstraß) with brainpoolP256r1"), - // (25, oid!(1.2.840.10045.2.1), "EC Public Key (Weierstraß) with brainpoolP384r1"), - // (26, oid!(1.2.840.10045.2.1), "EC Public Key (Weierstraß) with brainpoolP512r1"), - // (27, oid!(1.2.840.10045.2.1), "EC Public Key (Weierstraß) with FRP256v1"), - // (28, oid!(1.2.840.10045.2.1), "EC Public Key (Weierstraß) with sm2p256v1"), -]; - -/// A struct of data that contains lookup table of integer to OID in -/// bidirectional way for `SubjectPublicKeyAlgorithm`. -pub(crate) struct SubjectPubKeyAlgoData(IntegerToOidTable); - -impl SubjectPubKeyAlgoData { - /// Get the `IntegerToOidTable` - pub(crate) fn get_int_to_oid_table(&self) -> &IntegerToOidTable { - &self.0 - } -} - -/// Define static lookup for subject publickey table -static SUBJECT_PUB_KEY_ALGO_TABLE: Lazy = Lazy::new(|| { - let mut int_to_oid_table = IntegerToOidTable::new(); - - for data in PUB_KEY_ALGO_DATA { - int_to_oid_table.add(data.0, data.1); - } - - SubjectPubKeyAlgoData(int_to_oid_table) -}); - -/// Static reference to the `SubjectPubKeyAlgoData` lookup table. -pub(crate) static SUBJECT_PUB_KEY_ALGO_LOOKUP: &Lazy = - &SUBJECT_PUB_KEY_ALGO_TABLE; - -/// Get the OID from the int value. -pub(crate) fn get_oid_from_int(i: i16) -> Result, Error> { - SUBJECT_PUB_KEY_ALGO_TABLE - .get_int_to_oid_table() - .get_map() - .get_by_left(&i) - .ok_or(Error::msg(format!( - "OID not found in the public key algorithms registry table given int {i}" - ))) - .cloned() -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_subject_pub_key_algo/mod.rs b/catalyst-gateway-crates/c509-certificate/src/c509_subject_pub_key_algo/mod.rs deleted file mode 100644 index 3c56175fbda..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_subject_pub_key_algo/mod.rs +++ /dev/null @@ -1,178 +0,0 @@ -//! C509 Issuer Signature Algorithm as a part of `TBSCertificate` used in C509 -//! Certificate. -//! -//! ```cddl -//! subjectPublicKeyAlgorithm: AlgorithmIdentifier -//! ``` - -// cspell: words spka - -mod data; - -use std::str::FromStr; - -use asn1_rs::Oid; -use data::{get_oid_from_int, SUBJECT_PUB_KEY_ALGO_LOOKUP}; -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Deserializer, Serialize}; - -use crate::{c509_algo_identifier::AlgorithmIdentifier, c509_oid::C509oidRegistered}; - -/// A struct represents the `SubjectPubKeyAlgorithm` -#[derive(Debug, Clone, PartialEq)] -pub struct SubjectPubKeyAlgorithm { - /// The registered OID of the `SubjectPubKeyAlgorithm`. - registered_oid: C509oidRegistered, - /// An `AlgorithmIdentifier` type - algo_identifier: AlgorithmIdentifier, -} - -impl SubjectPubKeyAlgorithm { - /// Create new instance of `SubjectPubKeyAlgorithm` where it registered with - /// Subject Public Key Algorithm lookup table. - pub fn new(oid: Oid<'static>, param: Option) -> Self { - Self { - registered_oid: C509oidRegistered::new( - oid.clone(), - SUBJECT_PUB_KEY_ALGO_LOOKUP.get_int_to_oid_table(), - ), - algo_identifier: AlgorithmIdentifier::new(oid, param), - } - } -} - -/// Helper struct for deserialize and serialize `SubjectPubKeyAlgorithm`. -#[derive(Debug, Deserialize, Serialize)] -struct Helper { - /// OID as string. - oid: String, - /// Optional parameter. - param: Option, -} - -impl<'de> Deserialize<'de> for SubjectPubKeyAlgorithm { - fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> { - let helper = Helper::deserialize(deserializer)?; - let oid = - Oid::from_str(&helper.oid).map_err(|e| serde::de::Error::custom(format!("{e:?}")))?; - - Ok(SubjectPubKeyAlgorithm::new(oid, helper.param)) - } -} - -impl Serialize for SubjectPubKeyAlgorithm { - fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - let helper = Helper { - oid: self.registered_oid.get_c509_oid().get_oid().to_string(), - param: self.algo_identifier.get_param().clone(), - }; - helper.serialize(serializer) - } -} - -impl Encode<()> for SubjectPubKeyAlgorithm { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - if let Some(&i) = self - .registered_oid - .get_table() - .get_map() - .get_by_right(&self.registered_oid.get_c509_oid().get_oid()) - { - e.i16(i)?; - } else { - AlgorithmIdentifier::encode(&self.algo_identifier, e, ctx)?; - } - Ok(()) - } -} - -impl Decode<'_, ()> for SubjectPubKeyAlgorithm { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - // Check u8 for 0 - 28 - if d.datatype()? == minicbor::data::Type::U8 { - let i = d.i16()?; - let oid = get_oid_from_int(i).map_err(minicbor::decode::Error::message)?; - Ok(Self::new(oid, None)) - } else { - let algo_identifier = AlgorithmIdentifier::decode(d, ctx)?; - Ok(SubjectPubKeyAlgorithm::new( - algo_identifier.get_oid(), - algo_identifier.get_param().clone(), - )) - } - } -} - -// ------------------Test---------------------- - -#[cfg(test)] -mod test_subject_public_key_algorithm { - use asn1_rs::oid; - - use super::*; - - #[test] - fn test_registered_oid() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let spka = SubjectPubKeyAlgorithm::new(oid!(1.3.101 .112), None); - spka.encode(&mut encoder, &mut ()) - .expect("Failed to encode SubjectPubKeyAlgorithm"); - - // Ed25519 - int 10: 0x0a - assert_eq!(hex::encode(buffer.clone()), "0a"); - - let mut decoder = Decoder::new(&buffer); - let decoded_spka = SubjectPubKeyAlgorithm::decode(&mut decoder, &mut ()) - .expect("Failed to decode SubjectPubKeyAlgorithm"); - assert_eq!(decoded_spka, spka); - } - - #[test] - fn test_unregistered_oid() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let spka = SubjectPubKeyAlgorithm::new(oid!(2.16.840 .1 .101 .3 .4 .2 .1), None); - spka.encode(&mut encoder, &mut ()) - .expect("Failed to encode SubjectPubKeyAlgorithm"); - - // 2.16.840 .1 .101 .3 .4 .2 .1: 0x49608648016503040201 - assert_eq!(hex::encode(buffer.clone()), "49608648016503040201"); - - let mut decoder = Decoder::new(&buffer); - let decoded_spka = SubjectPubKeyAlgorithm::decode(&mut decoder, &mut ()) - .expect("Failed to decode SubjectPubKeyAlgorithm"); - assert_eq!(decoded_spka, spka); - } - - #[test] - fn test_unregistered_oid_with_param() { - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - - let spka = SubjectPubKeyAlgorithm::new( - oid!(2.16.840 .1 .101 .3 .4 .2 .1), - Some("example".to_string()), - ); - spka.encode(&mut encoder, &mut ()) - .expect("Failed to encode SubjectPubKeyAlgorithm"); - // Array of 2 items: 0x82 - // 2.16.840 .1 .101 .3 .4 .2 .1: 0x49608648016503040201 - // bytes "example": 0x476578616d706c65 - assert_eq!( - hex::encode(buffer.clone()), - "8249608648016503040201476578616d706c65" - ); - - let mut decoder = Decoder::new(&buffer); - let decoded_spka = SubjectPubKeyAlgorithm::decode(&mut decoder, &mut ()) - .expect("Failed to decode SubjectPubKeyAlgorithm"); - assert_eq!(decoded_spka, spka); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/c509_time.rs b/catalyst-gateway-crates/c509-certificate/src/c509_time.rs deleted file mode 100644 index 48c370f22e6..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/c509_time.rs +++ /dev/null @@ -1,101 +0,0 @@ -//! C509 Time - -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Serialize}; - -/// A struct representing a time where it accept seconds since the Unix epoch. -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub struct Time(i64); - -/// No expiration date in seconds since the Unix epoch. -const NO_EXP_DATE: i64 = 253_402_300_799; - -impl Time { - /// Create a new instance of `Time`. - #[must_use] - pub fn new(time: i64) -> Self { - Self(time) - } - - /// Get the time in i64. - #[must_use] - pub fn to_i64(&self) -> i64 { - self.0 - } -} - -impl Encode<()> for Time { - fn encode( - &self, e: &mut Encoder, _ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - if self.0 == NO_EXP_DATE { - e.null()?; - } else { - e.i64(self.0)?; - } - Ok(()) - } -} - -impl Decode<'_, ()> for Time { - fn decode(d: &mut Decoder<'_>, _ctx: &mut ()) -> Result { - match d.datatype()? { - minicbor::data::Type::U8 - | minicbor::data::Type::I8 - | minicbor::data::Type::U16 - | minicbor::data::Type::I16 - | minicbor::data::Type::U32 - | minicbor::data::Type::I32 - | minicbor::data::Type::U64 - | minicbor::data::Type::I64 => { - let time = d.i64()?; - Ok(Time::new(time)) - }, - minicbor::data::Type::Null => { - d.null()?; - Ok(Time::new(NO_EXP_DATE)) - }, - _ => Err(minicbor::decode::Error::message("Invalid type for Time")), - } - } -} - -#[cfg(test)] -mod test_time { - - use super::*; - - #[test] - fn test_encode_decode_no_exp_date() { - let mut buffer = Vec::new(); - let mut encoder = minicbor::Encoder::new(&mut buffer); - let time = Time::new(NO_EXP_DATE); - time.encode(&mut encoder, &mut ()) - .expect("Failed to encode Time"); - // null: 0xf6 - assert_eq!(hex::encode(buffer.clone()), "f6"); - - let mut decoder = minicbor::Decoder::new(&buffer); - let decoded_time = Time::decode(&mut decoder, &mut ()).expect("Failed to decode Time"); - - assert_eq!(decoded_time, time); - } - - // Test reference https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/ - // A.1. Example RFC 7925 profiled X.509 Certificate - #[test] - fn test_encode_decode() { - let mut buffer = Vec::new(); - let mut encoder = minicbor::Encoder::new(&mut buffer); - // Jan 1 00:00:00 2023 GMT - let time = Time::new(1_672_531_200); - time.encode(&mut encoder, &mut ()) - .expect("Failed to encode Time"); - assert_eq!(hex::encode(buffer.clone()), "1a63b0cd00"); - - let mut decoder = minicbor::Decoder::new(&buffer); - let decoded_time = Time::decode(&mut decoder, &mut ()).expect("Failed to decode Time"); - - assert_eq!(decoded_time, time); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/lib.rs b/catalyst-gateway-crates/c509-certificate/src/lib.rs deleted file mode 100644 index 56fea7c16ab..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/lib.rs +++ /dev/null @@ -1,134 +0,0 @@ -//! CBOR Encoded X.509 Certificate (C509 Certificate) library -//! -//! This crate provides a functionality for generating C509 Certificate. -//! -//! ## C509 certificate contains 2 parts -//! 1. `TBSCertificate` -//! 2. `issuerSignatureValue` -//! -//! In order to generate an unsigned C509 certificate, the TBS Certificate must be -//! provided. Then the unsigned C509 certificate will then be used to calculate the -//! issuerSignatureValue. -//! -//! # TBS Certificate -//! -//! The To Be Sign Certificate contains the following fields: -//! * c509CertificateType: A certificate type, whether 0 a natively signed C509 -//! certificate following X.509 v3 or 1 a CBOR re-encoded X.509 v3 DER certificate. -//! * certificateSerialNumber: A unique serial number for the certificate. -//! * issuer: The entity that issued the certificate. -//! * validityNotBefore: The duration for which the Certificate Authority (CA) -//! guarantees it will retain information regarding the certificate's status on which -//! the period begins. -//! * validityNotAfter: The duration for which the Certificate Authority (CA) -//! guarantees it will retain information regarding the certificate's status on which -//! the period ends. -//! * subject: The entity associated with the public key stored in the subject public -//! key field. -//! * subjectPublicKeyAlgorithm: The algorithm that the public key is used. -//! * subjectPublicKey: The public key of the subject. -//! * extensions: A list of extensions defined for X.509 v3 certificate, providing -//! additional attributes for users or public keys, and for managing relationships -//! between Certificate Authorities (CAs). -//! * issuerSignatureAlgorithm: The algorithm used to sign the certificate (must be the -//! algorithm uses to create `IssuerSignatureValue`). -//! -//! Please refer to the [C509 Certificate](https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/) for more information. - -use anyhow::anyhow; -use c509::C509; -use minicbor::{Decode, Encode}; -use signing::{PrivateKey, PublicKey}; -use tbs_cert::TbsCert; -pub mod c509; -pub mod c509_algo_identifier; -pub mod c509_attributes; -pub mod c509_big_uint; -pub mod c509_extensions; -pub mod c509_general_names; -pub mod c509_issuer_sig_algo; -pub mod c509_name; -pub mod c509_oid; -pub mod c509_subject_pub_key_algo; -pub mod c509_time; -pub mod signing; -mod tables; -pub mod tbs_cert; -pub mod wasm_binding; - -/// Generate a signed or unsigned C509 certificate. -/// -/// # Arguments -/// - `tbs_cert` - A TBS certificate. -/// - `private_key` - An optional private key, if provided certificate is signed. -/// -/// # Returns -/// Returns a signed or unsigned C509 certificate. -/// -/// # Errors -/// -/// Returns an error if tne data cannot be converted to CBOR bytes. - -pub fn generate(tbs_cert: &TbsCert, private_key: Option<&PrivateKey>) -> anyhow::Result> { - // Encode the TbsCert - let encoded_tbs = { - let mut buffer = Vec::new(); - let mut encoder = minicbor::Encoder::new(&mut buffer); - tbs_cert.encode(&mut encoder, &mut ())?; - buffer - }; - let sign_data = private_key.map(|pk| pk.sign(&encoded_tbs)); - - // Encode the whole C509 certificate including `TbSCert` and `issuerSignatureValue` - let encoded_c509 = { - let mut buffer = Vec::new(); - let mut encoder = minicbor::Encoder::new(&mut buffer); - let c509 = C509::new(tbs_cert.clone(), sign_data); - c509.encode(&mut encoder, &mut ())?; - buffer - }; - Ok(encoded_c509) -} - -/// Verify the signature of a C509 certificate. -/// -/// # Arguments -/// - `c509` - The cbor encoded C509 certificate to verify. -/// - `public_key` - The public key used to verify the certificate. -/// -/// # Errors -/// Returns an error if the `issuer_signature_value` is invalid or the signature cannot be -/// verified. -pub fn verify(c509: &[u8], public_key: &PublicKey) -> anyhow::Result<()> { - let mut d = minicbor::Decoder::new(c509); - let c509 = C509::decode(&mut d, &mut ())?; - let mut encoded_tbs = Vec::new(); - let mut encoder = minicbor::Encoder::new(&mut encoded_tbs); - c509.get_tbs_cert().encode(&mut encoder, &mut ())?; - let issuer_sig = c509.get_issuer_signature_value().clone().ok_or(anyhow!( - "Signature verification failed, No issuer signature" - ))?; - public_key.verify(&encoded_tbs, &issuer_sig) -} - -#[cfg(test)] -mod test { - use std::str::FromStr; - - use signing::tests::private_key_str; - use tbs_cert::test_tbs_cert::tbs; - - use super::*; - - #[test] - fn test_generate_and_verify_signed_c509_cert() { - let tbs_cert = tbs(); - - let private_key = FromStr::from_str(&private_key_str()).expect("Cannot create private key"); - - let signed_c509 = generate(&tbs_cert, Some(&private_key)) - .expect("Failed to generate signed C509 certificate"); - - assert!(verify(&signed_c509, &private_key.public_key()).is_ok()); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/signing.rs b/catalyst-gateway-crates/c509-certificate/src/signing.rs deleted file mode 100644 index c118c57875c..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/signing.rs +++ /dev/null @@ -1,227 +0,0 @@ -//! ED25519 public and private key implementation. - -// cspell: words outpubkey genpkey - -use std::{fmt::Display, path::Path, str::FromStr}; - -use ed25519_dalek::{ - ed25519::signature::Signer, - pkcs8::{DecodePrivateKey, DecodePublicKey}, - SigningKey, VerifyingKey, -}; -use wasm_bindgen::prelude::wasm_bindgen; - -/// Public or private key decoding from string error. -#[derive(thiserror::Error, Debug)] -#[error("Cannot decode key from string. Invalid PEM format.")] -struct KeyPemDecodingError; - -/// Ed25519 private key instance. -/// Wrapper over `ed25519_dalek::SigningKey`. -#[allow(dead_code)] -#[wasm_bindgen] -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct PrivateKey(SigningKey); - -/// File open and read error. -#[derive(thiserror::Error, Debug)] -struct FileError { - /// File location. - location: String, - /// File open and read error. - msg: Option, -} - -#[allow(dead_code)] -impl FileError { - /// Create a new `FileError` instance from a string location. - fn from_string(location: String, msg: Option) -> Self { - Self { location, msg } - } - - /// Create a new `FileError` instance from a path location. - fn from_path>(path: P, msg: Option) -> Self { - Self { - location: path.as_ref().display().to_string(), - msg, - } - } -} - -impl Display for FileError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let msg = format!("Cannot open or read file at {0}", self.location); - let err = self - .msg - .as_ref() - .map(|msg| format!(":\n{msg}")) - .unwrap_or_default(); - writeln!(f, "{msg}{err}",) - } -} - -#[allow(dead_code)] -impl PrivateKey { - /// Create new public key from file decoded in PEM format. - /// - /// # Errors - /// Returns an error if the file cannot be opened or read. - pub fn from_file>(path: P) -> anyhow::Result { - let str = std::fs::read_to_string(&path).map_err(|_| FileError::from_path(&path, None))?; - Ok(Self::from_str(&str).map_err(|err| FileError::from_path(&path, Some(err)))?) - } - - /// Get associated public key. - #[must_use] - pub fn public_key(&self) -> PublicKey { - PublicKey(self.0.verifying_key()) - } - - /// Sign the message with the current private key. - /// Returns the signature bytes. - #[must_use] - pub fn sign(&self, msg: &[u8]) -> Vec { - self.0.sign(msg).to_vec() - } -} - -impl FromStr for PrivateKey { - type Err = anyhow::Error; - - fn from_str(str: &str) -> Result { - let key = SigningKey::from_pkcs8_pem(str).map_err(|_| KeyPemDecodingError)?; - Ok(Self(key)) - } -} - -/// Ed25519 public key instance. -/// Wrapper over `ed25519_dalek::VerifyingKey`. -#[derive(Clone, Debug, PartialEq, Eq)] -#[wasm_bindgen] -pub struct PublicKey(VerifyingKey); - -#[allow(dead_code)] -impl PublicKey { - /// Create new public key from file decoded in PEM format. - /// - /// # Errors - /// Returns an error if the file cannot be opened or read. - pub fn from_file>(path: P) -> anyhow::Result { - let str = std::fs::read_to_string(&path).map_err(|_| FileError::from_path(&path, None))?; - Ok(Self::from_str(&str).map_err(|err| FileError::from_path(&path, Some(err)))?) - } - - /// Create new public key from raw bytes. - /// - /// # Errors - /// Returns an error if the provided bytes are not a valid public key. - pub fn from_bytes(bytes: &[u8]) -> anyhow::Result { - let key = VerifyingKey::from_bytes(bytes.try_into()?)?; - Ok(Self(key)) - } - - /// Convert public key to raw bytes. - #[must_use] - pub fn to_bytes(&self) -> Vec { - self.0.to_bytes().to_vec() - } - - /// Verify signature of the message with the current public key. - /// - /// # Errors - /// Returns an error if the signature is invalid. - pub fn verify(&self, msg: &[u8], signature_bytes: &[u8]) -> anyhow::Result<()> { - let signature_bytes = signature_bytes.try_into().map_err(|_| { - anyhow::anyhow!( - "Invalid signature bytes size: expected {}, provided {}.", - ed25519_dalek::Signature::BYTE_SIZE, - signature_bytes.len() - ) - })?; - let signature = ed25519_dalek::Signature::from_bytes(signature_bytes); - self.0.verify_strict(msg, &signature)?; - Ok(()) - } -} - -impl FromStr for PublicKey { - type Err = anyhow::Error; - - fn from_str(str: &str) -> Result { - let key = VerifyingKey::from_public_key_pem(str).map_err(|_| KeyPemDecodingError)?; - Ok(Self(key)) - } -} - -#[cfg(test)] -pub(crate) mod tests { - use std::env::temp_dir; - - use super::*; - - /// An Ed25519 private key in PEM format. - /// Generated with `openssl` tool: - /// ```shell - /// openssl genpkey -algorithm=ED25519 -out=private.pem -outpubkey=public.pem - /// ``` - pub(crate) fn private_key_str() -> String { - format!( - "{}\n{}\n{}", - "-----BEGIN PRIVATE KEY-----", - "MC4CAQAwBQYDK2VwBCIEIP1iI3LF7h89yY6QZmhDp4Y5FmTQ4oasbz2lEiaqqTzV", - "-----END PRIVATE KEY-----" - ) - } - - /// An Ed25519 public key in PEM format. - /// This public key is corresponding to the `private_key_str()` private key. - /// Generated with `openssl` tool: - /// ```shell - /// openssl genpkey -algorithm=ED25519 -out=private.pem -outpubkey=public.pem - /// ``` - pub(crate) fn public_key_str() -> String { - format!( - "{}\n{}\n{}", - "-----BEGIN PUBLIC KEY-----", - "MCowBQYDK2VwAyEAtFuCleJwHS28jUCT+ulLl5c1+MXhehhDz2SimOhmWaI=", - "-----END PUBLIC KEY-----" - ) - } - - #[test] - fn private_key_from_file_test() { - let dir = temp_dir(); - - let private_key_path = dir.as_path().join("private.pem"); - std::fs::write(&private_key_path, private_key_str()) - .expect("Cannot create private.pem file"); - - let _key = - PrivateKey::from_file(private_key_path).expect("Cannot create private key from file"); - } - - #[test] - fn public_private_key_test() { - let private_key = - PrivateKey::from_str(&private_key_str()).expect("Cannot create private key"); - let public_key = PublicKey::from_str(&public_key_str()).expect("Cannot create public key"); - - assert_eq!(private_key.public_key(), public_key); - } - - #[test] - fn sign_test() { - let private_key = - PrivateKey::from_str(&private_key_str()).expect("Cannot create private key"); - let public_key = PublicKey::from_str(&public_key_str()).expect("Cannot create public key"); - - let msg = b"test"; - - let signature = private_key.sign(msg); - assert!(public_key.verify(msg, &signature).is_ok()); - assert!( - public_key.verify(b"corrupted", &signature).is_err(), - "Provided msg is not actually signed." - ); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/tables.rs b/catalyst-gateway-crates/c509-certificate/src/tables.rs deleted file mode 100644 index 67e83fde4fd..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/tables.rs +++ /dev/null @@ -1,78 +0,0 @@ -//! A bimap table for bidirectional lookup. - -use std::hash::Hash; - -use asn1_rs::Oid; -use bimap::BiMap; - -/// A trait that represents a table structure with key-value pairs. -/// -/// # Type Parameters -/// -/// * `K` - The type of the keys in the table. -/// * `V` - The type of the values in the table. -pub(crate) trait TableTrait { - /// Create new instance of the map table. - fn new() -> Self; - /// Add the key-value pair to the map table. - fn add(&mut self, k: K, v: V); - /// Get the bimap of the map table. - fn get_map(&self) -> &BiMap; -} - -// ----------------------------------------- - -/// A struct that represents a table mapping integers to any type that -/// implements `Eq` and `Hash`. -/// i16 is used because the int value in C509 certificate registry can be -256 to 255. -#[derive(Debug, Clone, PartialEq)] -pub(crate) struct IntTable { - /// A bimap table for bidirectional lookup where it map between i16 and other type. - map: BiMap, -} - -impl TableTrait for IntTable { - /// Create new instance of `IntTable`. - fn new() -> Self { - Self { map: BiMap::new() } - } - - /// Add the key-value pair to the map table. - fn add(&mut self, k: i16, v: T) { - self.map.insert(k, v); - } - - /// Get the bimap of the map table. - fn get_map(&self) -> &BiMap { - &self.map - } -} - -// ----------------------------------------- - -/// A struct represents a table of integer to OID. -#[derive(Debug, Clone, PartialEq)] -pub(crate) struct IntegerToOidTable { - /// A table of integer to OID, provide a bidirectional lookup. - table: IntTable>, -} - -#[allow(dead_code)] -impl IntegerToOidTable { - /// Create new instance of `IntegerToOidTable`. - pub(crate) fn new() -> Self { - Self { - table: IntTable::>::new(), - } - } - - /// Add the key-value pair to the map table. - pub(crate) fn add(&mut self, k: i16, v: Oid<'static>) { - self.table.add(k, v); - } - - /// Get the bimap of the map table. - pub(crate) fn get_map(&self) -> &BiMap> { - self.table.get_map() - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/tbs_cert.rs b/catalyst-gateway-crates/c509-certificate/src/tbs_cert.rs deleted file mode 100644 index 0154a5d40e1..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/tbs_cert.rs +++ /dev/null @@ -1,477 +0,0 @@ -//! To Be Sign Certificate (TBS Certificate) use to construct a C509 certificate. - -use minicbor::{encode::Write, Decode, Decoder, Encode, Encoder}; -use serde::{Deserialize, Serialize}; - -use crate::{ - c509_big_uint::UnwrappedBigUint, c509_extensions::Extensions, - c509_issuer_sig_algo::IssuerSignatureAlgorithm, c509_name::Name, - c509_subject_pub_key_algo::SubjectPubKeyAlgorithm, c509_time::Time, -}; - -/// A struct represents a To Be Signed Certificate (TBS Certificate). -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "snake_case")] -pub struct TbsCert { - /// Certificate type. - c509_certificate_type: u8, - /// Serial number of the certificate. - certificate_serial_number: UnwrappedBigUint, - /// Issuer - issuer: Name, - /// Validity not before. - validity_not_before: Time, - /// Validity not after. - validity_not_after: Time, - /// Subject - subject: Name, - /// Subject Public Key Algorithm - subject_public_key_algorithm: SubjectPubKeyAlgorithm, - /// Subject Public Key value - subject_public_key: Vec, - /// Extensions - extensions: Extensions, - /// Issuer Signature Algorithm - issuer_signature_algorithm: IssuerSignatureAlgorithm, -} - -impl TbsCert { - /// Create a new instance of TBS Certificate. - #[must_use] - #[allow(clippy::too_many_arguments)] - pub fn new( - c509_certificate_type: u8, certificate_serial_number: UnwrappedBigUint, issuer: Name, - validity_not_before: Time, validity_not_after: Time, subject: Name, - subject_public_key_algorithm: SubjectPubKeyAlgorithm, subject_public_key: Vec, - extensions: Extensions, issuer_signature_algorithm: IssuerSignatureAlgorithm, - ) -> Self { - Self { - c509_certificate_type, - certificate_serial_number, - issuer, - validity_not_before, - validity_not_after, - subject, - subject_public_key_algorithm, - subject_public_key, - extensions, - issuer_signature_algorithm, - } - } - - /// Get the certificate type. - #[must_use] - pub fn get_c509_certificate_type(&self) -> u8 { - self.c509_certificate_type - } - - /// Get the certificate serial number. - #[must_use] - pub fn get_certificate_serial_number(&self) -> &UnwrappedBigUint { - &self.certificate_serial_number - } - - /// Get the issuer. - #[must_use] - pub fn get_issuer(&self) -> &Name { - &self.issuer - } - - /// Get the validity not before. - #[must_use] - pub fn get_validity_not_before(&self) -> &Time { - &self.validity_not_before - } - - /// Get the validity not after. - #[must_use] - pub fn get_validity_not_after(&self) -> &Time { - &self.validity_not_after - } - - /// Get the subject. - #[must_use] - pub fn get_subject(&self) -> &Name { - &self.subject - } - - /// Get the subject public key algorithm. - #[must_use] - pub fn get_subject_public_key_algorithm(&self) -> &SubjectPubKeyAlgorithm { - &self.subject_public_key_algorithm - } - - /// Get the subject public key. - #[must_use] - pub fn get_subject_public_key(&self) -> &[u8] { - &self.subject_public_key - } - - /// Get the extensions. - #[must_use] - pub fn get_extensions(&self) -> &Extensions { - &self.extensions - } - - /// Get the issuer signature algorithm. - #[must_use] - pub fn get_issuer_signature_algorithm(&self) -> &IssuerSignatureAlgorithm { - &self.issuer_signature_algorithm - } -} - -impl Encode<()> for TbsCert { - fn encode( - &self, e: &mut Encoder, ctx: &mut (), - ) -> Result<(), minicbor::encode::Error> { - e.u8(self.c509_certificate_type)?; - self.certificate_serial_number.encode(e, ctx)?; - self.issuer.encode(e, ctx)?; - self.validity_not_before.encode(e, ctx)?; - self.validity_not_after.encode(e, ctx)?; - self.subject.encode(e, ctx)?; - self.subject_public_key_algorithm.encode(e, ctx)?; - e.bytes(&self.subject_public_key)?; - self.extensions.encode(e, ctx)?; - self.issuer_signature_algorithm.encode(e, ctx)?; - Ok(()) - } -} - -impl Decode<'_, ()> for TbsCert { - fn decode(d: &mut Decoder<'_>, ctx: &mut ()) -> Result { - let cert_type = d.u8()?; - let serial_number = UnwrappedBigUint::decode(d, ctx)?; - let issuer = Name::decode(d, ctx)?; - let not_before = Time::decode(d, ctx)?; - let not_after = Time::decode(d, ctx)?; - let subject = Name::decode(d, ctx)?; - let subject_public_key_algorithm = SubjectPubKeyAlgorithm::decode(d, ctx)?; - let subject_public_key = d.bytes()?; - let extensions = Extensions::decode(d, ctx)?; - let issuer_signature_algorithm = IssuerSignatureAlgorithm::decode(d, ctx)?; - - Ok(TbsCert::new( - cert_type, - serial_number, - issuer, - not_before, - not_after, - subject, - subject_public_key_algorithm, - subject_public_key.to_vec(), - extensions, - issuer_signature_algorithm, - )) - } -} - -// ------------------Test---------------------- - -// Notes -// - Test is modified to match the current encode and decode where `subject_public_key` -// doesn't support -// special case for rsaEncryption and id-ecPublicKey. -// - Currently support natively signed c509 certificate, so all text strings -// are UTF-8 encoded and all attributeType SHALL be non-negative -// - Some Extension values are not supported yet. - -#[cfg(test)] -pub(crate) mod test_tbs_cert { - use asn1_rs::oid; - - use super::*; - use crate::{ - c509_attributes::attribute::{Attribute, AttributeValue}, - c509_extensions::{ - alt_name::{AlternativeName, GeneralNamesOrText}, - extension::{Extension, ExtensionValue}, - }, - c509_general_names::{ - general_name::{GeneralName, GeneralNameTypeRegistry, GeneralNameValue}, - other_name_hw_module::OtherNameHardwareModuleName, - GeneralNames, - }, - c509_name::{ - rdn::RelativeDistinguishedName, - test_name::{name_cn_eui_mac, name_cn_text, names}, - NameValue, - }, - }; - - // Mnemonic: match mad promote group rival case - const PUBKEY: [u8; 8] = [0x88, 0xD0, 0xB6, 0xB0, 0xB3, 0x7B, 0xAA, 0x46]; - - // Test reference https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/ - // A.1. Example RFC 7925 profiled X.509 Certificate - // - // - // Certificate: - // Data: - // Version: 3 (0x2) - // Serial Number: 128269 (0x1f50d) - // Signature Algorithm: ecdsa-with-SHA256 - // Issuer: CN=RFC test CA - // Validity - // Not Before: Jan 1 00:00:00 2023 GMT - // Not After : Jan 1 00:00:00 2026 GMT - // Subject: CN=01-23-45-FF-FE-67-89-AB - // Subject Public Key Info: - // Public Key Algorithm: id-ecPublicKey - // Public-Key: (256 bit) - // pub: - // 04:b1:21:6a:b9:6e:5b:3b:33:40:f5:bd:f0:2e:69: - // 3f:16:21:3a:04:52:5e:d4:44:50:b1:01:9c:2d:fd: - // 38:38:ab:ac:4e:14:d8:6c:09:83:ed:5e:9e:ef:24: - // 48:c6:86:1c:c4:06:54:71:77:e6:02:60:30:d0:51: - // f7:79:2a:c2:06 - // ASN1 OID: prime256v1 - // NIST CURVE: P-256 - // X509v3 extensions: - // X509v3 Key Usage: - // Digital Signature - // Signature Algorithm: ecdsa-with-SHA256 - // 30:46:02:21:00:d4:32:0b:1d:68:49:e3:09:21:9d:30:03:7e: - // 13:81:66:f2:50:82:47:dd:da:e7:6c:ce:ea:55:05:3c:10:8e: - // 90:02:21:00:d5:51:f6:d6:01:06:f1:ab:b4:84:cf:be:62:56: - // c1:78:e4:ac:33:14:ea:19:19:1e:8b:60:7d:a5:ae:3b:da:16 - // - // 01 - // 43 01 F5 0D - // 6B 52 46 43 20 74 65 73 74 20 43 41 - // 1A 63 B0 CD 00 - // 1A 69 55 B9 00 - // 47 01 01 23 45 67 89 AB - // 01 - // 58 21 02 B1 21 6A B9 6E 5B 3B 33 40 F5 BD F0 2E 69 3F 16 21 3A 04 52 - // 5E D4 44 50 B1 01 9C 2D FD 38 38 AB - // 01 - // 00 - // 58 40 D4 32 0B 1D 68 49 E3 09 21 9D 30 03 7E 13 81 66 F2 50 82 47 DD - // DA E7 6C CE EA 55 05 3C 10 8E 90 D5 51 F6 D6 01 06 F1 AB B4 84 CF BE - // 62 56 C1 78 E4 AC 33 14 EA 19 19 1E 8B 60 7D A5 AE 3B DA 16 - - pub(crate) fn tbs() -> TbsCert { - fn extensions() -> Extensions { - let mut exts = Extensions::new(); - exts.add_ext(Extension::new( - oid!(2.5.29 .15), - ExtensionValue::Int(1), - false, - )); - exts - } - - TbsCert::new( - 1, - UnwrappedBigUint::new(128_269), - name_cn_text().0, - Time::new(1_672_531_200), - Time::new(1_767_225_600), - name_cn_eui_mac().0, - SubjectPubKeyAlgorithm::new(oid!(1.2.840 .10045 .2 .1), None), - PUBKEY.to_vec(), - extensions(), - IssuerSignatureAlgorithm::new(oid!(1.2.840 .10045 .4 .3 .2), None), - ) - } - - #[test] - fn encode_decode_tbs_cert() { - let tbs_cert = tbs(); - - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - tbs_cert - .encode(&mut encoder, &mut ()) - .expect("Failed to encode TBS Certificate"); - - // c509_certificate_type: 0x01 - // certificate_serial_number: 0x4301f50d - // issuer: 0x6b5246432074657374204341 - // validity_not_before: 0x1a63b0cd00 - // validity_not_after: 0x1a6955b900 - // subject: 0x47010123456789ab - // subject_public_key_algorithm: 0x01 - // subject_public_key: 0x4888d0b6b0b37baa46 - // extensions: 0x01 - // issuer_signature_algorithm: 0x00 - - assert_eq!( - hex::encode(buffer.clone()), - "014301f50d6b52464320746573742043411a63b0cd001a6955b90047010123456789ab014888d0b6b0b37baa460100" - ); - - let mut decoder = Decoder::new(&buffer); - let decoded_tbs = - TbsCert::decode(&mut decoder, &mut ()).expect("Failed to decode TBS Certificate"); - assert_eq!(decoded_tbs, tbs_cert); - } - - // Test reference https://datatracker.ietf.org/doc/draft-ietf-cose-cbor-encoded-cert/09/ - // A.2. Example IEEE 802.1AR profiled X.509 Certificate - // - // Certificate: - // Data: - // Version: 3 (0x2) - // Serial Number: 9112578475118446130 (0x7e7661d7b54e4632) - // Signature Algorithm: ecdsa-with-SHA256 - // Issuer: C=US, ST=CA, O=Example Inc, OU=certification, CN=802.1AR CA - // Validity - // Not Before: Jan 31 11:29:16 2019 GMT - // Not After : Dec 31 23:59:59 9999 GMT - // Subject: C=US, ST=CA, L=LA, O=example Inc, OU=IoT/serialNumber=Wt1234 - // Subject Public Key Info: - // Public Key Algorithm: id-ecPublicKey - // Public-Key: (256 bit) - // pub: - // 04:c8:b4:21:f1:1c:25:e4:7e:3a:c5:71:23:bf:2d: - // 9f:dc:49:4f:02:8b:c3:51:cc:80:c0:3f:15:0b:f5: - // 0c:ff:95:8d:75:41:9d:81:a6:a2:45:df:fa:e7:90: - // be:95:cf:75:f6:02:f9:15:26:18:f8:16:a2:b2:3b: - // 56:38:e5:9f:d9 - // ASN1 OID: prime256v1 - // NIST CURVE: P-256 - // X509v3 extensions: - // X509v3 Basic Constraints: - // CA:FALSE - // X509v3 Subject Key Identifier: - // 96:60:0D:87:16:BF:7F:D0:E7:52:D0:AC:76:07:77:AD:66:5D:02:A0 - // X509v3 Authority Key Identifier: - // 68:D1:65:51:F9:51:BF:C8:2A:43:1D:0D:9F:08:BC:2D:20:5B:11:60 - // X509v3 Key Usage: critical - // Digital Signature, Key Encipherment - // X509v3 Subject Alternative Name: - // otherName: - // type-id: 1.3.6.1.5.5.7.8.4 (id-on-hardwareModuleName) - // value: - // hwType: 1.3.6.1.4.1.6175.10.1 - // hwSerialNum: 01:02:03:04 - // Signature Algorithm: ecdsa-with-SHA256 - // Signature Value: - // 30:46:02:21:00:c0:d8:19:96:d2:50:7d:69:3f:3c:48:ea:a5: - // ee:94:91:bd:a6:db:21:40:99:d9:81:17:c6:3b:36:13:74:cd: - // 86:02:21:00:a7:74:98:9f:4c:32:1a:5c:f2:5d:83:2a:4d:33: - // 6a:08:ad:67:df:20:f1:50:64:21:18:8a:0a:de:6d:34:92:36 - // - // 01 48 7E 76 61 D7 B5 4E 46 32 8A 23 62 55 53 06 62 43 41 08 6B 45 78 - // 61 6D 70 6C 65 20 49 6E 63 09 6D 63 65 72 74 69 66 69 63 61 74 69 6F - // 6E 01 6A 38 30 32 2E 31 41 52 20 43 41 1A 5C 52 DC 0C F6 8C 23 62 55 - // 53 06 62 43 41 05 62 4C 41 08 6B 65 78 61 6D 70 6C 65 20 49 6E 63 09 - // 63 49 6F 54 22 66 57 74 31 32 33 34 01 58 21 03 C8 B4 21 F1 1C 25 E4 - // 7E 3A C5 71 23 BF 2D 9F DC 49 4F 02 8B C3 51 CC 80 C0 3F 15 0B F5 0C - // FF 95 8A 04 21 01 54 96 60 0D 87 16 BF 7F D0 E7 52 D0 AC 76 07 77 AD - // 66 5D 02 A0 07 54 68 D1 65 51 F9 51 BF C8 2A 43 1D 0D 9F 08 BC 2D 20 - // 5B 11 60 21 05 03 82 20 82 49 2B 06 01 04 01 B0 1F 0A 01 44 01 02 03 - // 04 00 58 40 C0 D8 19 96 D2 50 7D 69 3F 3C 48 EA A5 EE 94 91 BD A6 DB - // 21 40 99 D9 81 17 C6 3B 36 13 74 CD 86 A7 74 98 9F 4C 32 1A 5C F2 5D - // 83 2A 4D 33 6A 08 AD 67 DF 20 F1 50 64 21 18 8A 0A DE 6D 34 92 36 - - #[test] - fn tbs_cert2() { - // ---------helper---------- - // C=US, ST=CA, L=LA, O=example Inc, OU=IoT/serialNumber=Wt1234 - fn subject() -> Name { - let mut attr1 = Attribute::new(oid!(2.5.4 .6)); - attr1.add_value(AttributeValue::Text("US".to_string())); - let mut attr2 = Attribute::new(oid!(2.5.4 .8)); - attr2.add_value(AttributeValue::Text("CA".to_string())); - let mut attr3 = Attribute::new(oid!(2.5.4 .7)); - attr3.add_value(AttributeValue::Text("LA".to_string())); - let mut attr4 = Attribute::new(oid!(2.5.4 .10)); - attr4.add_value(AttributeValue::Text("example Inc".to_string())); - let mut attr5 = Attribute::new(oid!(2.5.4 .11)); - attr5.add_value(AttributeValue::Text("IoT".to_string())); - let mut attr6 = Attribute::new(oid!(2.5.4 .5)); - attr6.add_value(AttributeValue::Text("Wt1234".to_string())); - - let mut rdn = RelativeDistinguishedName::new(); - rdn.add_attr(attr1); - rdn.add_attr(attr2); - rdn.add_attr(attr3); - rdn.add_attr(attr4); - rdn.add_attr(attr5); - rdn.add_attr(attr6); - - Name::new(NameValue::RelativeDistinguishedName(rdn)) - } - - fn extensions() -> Extensions { - let mut exts = Extensions::new(); - exts.add_ext(Extension::new( - oid!(2.5.29 .19), - ExtensionValue::Int(-2), - false, - )); - exts.add_ext(Extension::new( - oid!(2.5.29 .14), - ExtensionValue::Bytes( - [ - 0x96, 0x60, 0x0D, 0x87, 0x16, 0xBF, 0x7F, 0xD0, 0xE7, 0x52, 0xD0, 0xAC, - 0x76, 0x07, 0x77, 0xAD, 0x66, 0x5D, 0x02, 0xA0, - ] - .to_vec(), - ), - false, - )); - exts.add_ext(Extension::new( - oid!(2.5.29 .15), - ExtensionValue::Int(5), - true, - )); - let mut gns = GeneralNames::new(); - let hw = OtherNameHardwareModuleName::new(oid!(1.3.6 .1 .4 .1 .6175 .10 .1), vec![ - 0x01, 0x02, 0x03, 0x04, - ]); - gns.add_gn(GeneralName::new( - GeneralNameTypeRegistry::OtherNameHardwareModuleName, - GeneralNameValue::OtherNameHWModuleName(hw), - )); - - exts.add_ext(Extension::new( - oid!(2.5.29 .17), - ExtensionValue::AlternativeName(AlternativeName::new( - GeneralNamesOrText::GeneralNames(gns), - )), - false, - )); - - exts - } - - let tbs_cert = TbsCert::new( - 1, - UnwrappedBigUint::new(9_112_578_475_118_446_130), - names().0, - Time::new(1_548_934_156), - Time::new(253_402_300_799), - subject(), - SubjectPubKeyAlgorithm::new(oid!(1.2.840 .10045 .2 .1), None), - PUBKEY.to_vec(), - extensions(), - IssuerSignatureAlgorithm::new(oid!(1.2.840 .10045 .4 .3 .2), None), - ); - - let mut buffer = Vec::new(); - let mut encoder = Encoder::new(&mut buffer); - tbs_cert - .encode(&mut encoder, &mut ()) - .expect("Failed to encode TBS Certificate"); - // c509_certificate_type: 0x01 - // certificate_serial_number: 0x487e7661d7b54e4632 - // issuer: 0x8a0462555306624341086b4578616d706c6520496e63096d63657274696669636174696f6e016a3830322e314152204341 - // validity_not_before: 0x1a5c52dc0c - // validity_not_after: 0xf6 - // subject: 0x8c046255530662434105624c41086b6578616d706c6520496e630963496f540366577431323334 - // subject_public_key_algorithm: 0x01 - // subject_public_key: 0x4888d0b6b0b37baa46 - // extensions: - // 0x840421015496600d8716bf7fd0e752d0ac760777ad665d02a0210503822082492b06010401b01f0a014401020304 - // issuer_signature_algorithm: 0x00 - assert_eq!(hex::encode(buffer.clone()), "01487e7661d7b54e46328a0462555306624341086b4578616d706c6520496e63096d63657274696669636174696f6e016a3830322e3141522043411a5c52dc0cf68c046255530662434105624c41086b6578616d706c6520496e630963496f540366577431323334014888d0b6b0b37baa46840421015496600d8716bf7fd0e752d0ac760777ad665d02a0210503822082492b06010401b01f0a01440102030400"); - let mut decoder = Decoder::new(&buffer); - let decoded_tbs = - TbsCert::decode(&mut decoder, &mut ()).expect("Failed to decode TBS Certificate"); - assert_eq!(decoded_tbs, tbs_cert); - } -} diff --git a/catalyst-gateway-crates/c509-certificate/src/wasm_binding.rs b/catalyst-gateway-crates/c509-certificate/src/wasm_binding.rs deleted file mode 100644 index 29aea24c59a..00000000000 --- a/catalyst-gateway-crates/c509-certificate/src/wasm_binding.rs +++ /dev/null @@ -1,76 +0,0 @@ -//! WASM binding wrapper for the C509 certificate crate. - -use std::str::FromStr; - -use minicbor::Decode; -use wasm_bindgen::{prelude::wasm_bindgen, JsValue}; - -use crate::{ - signing::{PrivateKey, PublicKey}, - tbs_cert::TbsCert, -}; - -/// Wrapper for generate function. -/// -/// # Errors -/// Returns an error if the provided TbsCert JSValue cannot be converted `TbsCert` or C509 -/// cannot be generated. -#[wasm_bindgen] -// wasm_bindgen does not allowed ref passing unless it implement `RefFromWasmAbi`. -#[allow(clippy::needless_pass_by_value)] -pub fn generate(tbs_cert: JsValue, private_key: Option) -> Result { - let tbs_cert: TbsCert = serde_wasm_bindgen::from_value(tbs_cert)?; - let c509 = crate::generate(&tbs_cert, private_key.as_ref()) - .map_err(|e| JsValue::from(e.to_string()))?; - Ok(serde_wasm_bindgen::to_value(&c509)?) -} - -/// Wrapper for verify function. -/// -/// # Errors -/// Returns an error if the signature is invalid or the signature cannot be verified. -#[wasm_bindgen] -pub fn verify(c509: &[u8], public_key: &PublicKey) -> Result { - match crate::verify(c509, public_key) { - Ok(()) => Ok(JsValue::from("Signature verified")), - Err(e) => Err(JsValue::from(e.to_string())), - } -} - -/// Wrapper for decoding vector of C509 back to readable object. -/// -/// # Errors -/// Returns an error if the provided vector is not a valid C509 certificate. -#[wasm_bindgen] -pub fn decode(c509: &[u8]) -> Result { - let mut d = minicbor::Decoder::new(c509); - let c509 = crate::C509::decode(&mut d, &mut ()).map_err(|e| JsValue::from(e.to_string()))?; - Ok(serde_wasm_bindgen::to_value(&c509)?) -} - -#[wasm_bindgen] -impl PrivateKey { - /// Convert string to private key. - /// - /// # Errors - /// Returns an error if the provided string is not a valid private key. - #[wasm_bindgen] - pub fn str_to_sk(str: &str) -> Result { - FromStr::from_str(str).map_err(|_| { - JsValue::from("Cannot decode private key from string. Invalid PEM format.") - }) - } -} - -#[wasm_bindgen] -impl PublicKey { - /// Convert string to public key. - /// - /// # Errors - /// Returns an error if the provided string is not a valid public key. - #[wasm_bindgen] - pub fn str_to_pk(str: &str) -> Result { - FromStr::from_str(str) - .map_err(|_| JsValue::from("Cannot decode public key from string. Invalid PEM format.")) - } -} From a1f6557a03bf8514a5a0eb7f51e859cc047d5dbb Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 6 Sep 2024 17:18:28 +0700 Subject: [PATCH 25/69] fix(backend): Remove dependencies from Workspace, and move into project --- catalyst-gateway/Cargo.toml | 50 -------------- catalyst-gateway/bin/Cargo.toml | 118 +++++++++++++++++--------------- 2 files changed, 63 insertions(+), 105 deletions(-) diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index ccb8356c2c3..e2fd64ec089 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -2,7 +2,6 @@ resolver = "2" members = [ "bin", - # "crates/", ] [workspace.package] @@ -15,55 +14,6 @@ homepage = "https://input-output-hk.github.io/catalyst-voices" repository = "https://github.com/input-output-hk/catalyst-voices" license = "MIT OR Apache-2.0" -[workspace.dependencies] -clap = "4.5.13" -tracing = "0.1.40" -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } -serde = "1.0.204" -serde_json = "1.0.121" -poem = "3.0.4" -poem-openapi = "5.0.3" -prometheus = "0.13.4" -cryptoxide = "0.4.4" -uuid = "1.10.0" -panic-message = "0.3" -cpu-time = "1.0" -ulid = "1.1.3" -rust-embed = "8.5.0" -url = "2.5.2" -thiserror = "1.0.63" -chrono = "0.4.38" -async-trait = "0.1.81" -rust_decimal = "1.35.0" -bb8 = "0.8.5" -bb8-postgres = "0.8.1" -tokio-postgres = "0.7.11" -tokio = "1.39.2" -dotenvy = "0.15.7" -local-ip-address = "0.6.1" -gethostname = "0.5.0" -hex = "0.4.3" -handlebars = "6.0.0" -anyhow = "1.0.86" -cddl = "0.9.4" -ciborium = "0.2.2" -pallas = "0.29.0" -cardano-chain-follower = { git = "https://github.com/input-output-hk/hermes.git", branch = "feat/auto-sync-mithril", version="0.2.0" } -stringzilla = "3.8.4" -duration-string = "0.4.0" -build-info = "0.0.38" -build-info-build = "0.0.38" -ed25519-dalek = "2.1.1" -scylla = { version = "0.13.1", features = ["ssl", "full-serialization"]} -strum = { version = "0.26.3", features = ["derive"] } -strum_macros = "0.26.4" -openssl = { version = "0.10.66", features = ["vendored"] } -num-bigint = "0.4.6" -futures = "0.3.30" -rand = "0.8.5" -moka = { version = "0.12.8", features=["future"] } -crossbeam-skiplist = "0.1.3" - [workspace.lints.rust] warnings = "deny" missing_docs = "deny" diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 81be5212fc0..8ae77e60d7d 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -4,10 +4,10 @@ description = "The Catalyst Data Gateway" keywords = ["cardano", "catalyst", "gateway"] categories = ["command-line-utilities"] version = "0.1.0" -authors.workspace = true -edition.workspace = true -license.workspace = true -repository.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -15,32 +15,62 @@ repository.workspace = true workspace = true [dependencies] -build-info.workspace = true -bb8 = { workspace = true } -bb8-postgres = { workspace = true } -tokio-postgres = { workspace = true, features = [ +cardano-chain-follower = { version = "0.0.2", git = "https://github.com/input-output-hk/catalyst-libs.git", branch = "feat/expose_witness_map" } + +pallas = { version = "0.30.1", git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "9b5183c8b90b90fe2cc319d986e933e9518957b3" } +pallas-traverse = { version = "0.30.1", git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "9b5183c8b90b90fe2cc319d986e933e9518957b3" } +#pallas-crypto = { version = "0.30.1", git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "9b5183c8b90b90fe2cc319d986e933e9518957b3" } + +clap = { version = "4.5.17", features = ["derive", "env"] } +tracing = { version = "0.1.40", features = ["log"] } +tracing-subscriber = { version = "0.3.18", features = [ + "fmt", + "json", + "registry", + "std", + "time", + "env-filter", +] } +serde = { version = "1.0.204", features = ["derive"] } +serde_json = "1.0.128" +thiserror = "1.0.63" +chrono = "0.4.38" +async-trait = "0.1.82" +bb8 = "0.8.5" +bb8-postgres = "0.8.1" +tokio-postgres = { version = "0.7.11", features = [ "with-chrono-0_4", "with-serde_json-1", "with-time-0_3", ] } -clap = { workspace = true, features = ["derive", "env"] } -tracing = { workspace = true, features = ["log"] } -tracing-subscriber = { workspace = true, features = ["fmt", "json", "registry", "std", "time"] } -serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } -tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } -thiserror = { workspace = true } -rust_decimal = { workspace = true, features = [ +tokio = { version = "1.39.2", features = ["rt", "macros", "rt-multi-thread"] } +dotenvy = "0.15.7" +local-ip-address = "0.6.2" +gethostname = "0.5.0" +hex = "0.4.3" +handlebars = "6.0.0" +anyhow = "1.0.86" +cddl = "0.9.4" +ciborium = "0.2.2" +stringzilla = "3.9.3" +duration-string = "0.4.0" +build-info = "0.0.38" +ed25519-dalek = "2.1.1" +scylla = { version = "0.14.0", features = ["ssl", "full-serialization"] } +strum = { version = "0.26.3", features = ["derive"] } +strum_macros = "0.26.4" +openssl = { version = "0.10.66", features = ["vendored"] } +num-bigint = "0.4.6" +futures = "0.3.30" +rand = "0.8.5" +moka = { version = "0.12.8", features = ["future"] } +crossbeam-skiplist = "0.1.3" +rust_decimal = { version = "1.36.0", features = [ "serde-with-float", "db-tokio-postgres", ] } -chrono = { workspace = true } -poem = { workspace = true, features = [ - "embed", - "prometheus", - "compression", -] } -poem-openapi = { workspace = true, features = [ +poem = { version = "3.0.4", features = ["embed", "prometheus", "compression"] } +poem-openapi = { version = "5.0.3", features = [ "openapi-explorer", "rapidoc", "redoc", @@ -49,37 +79,15 @@ poem-openapi = { workspace = true, features = [ "url", "chrono", ] } -prometheus = { workspace = true } -cryptoxide = { workspace = true } -uuid = { workspace = true, features = ["v4", "serde"] } -url = { workspace = true } -dotenvy = { workspace = true } -panic-message = { workspace = true } -cpu-time = { workspace = true } -ulid = { workspace = true, features = ["serde", "uuid"] } -rust-embed = { workspace = true } -local-ip-address = { workspace = true } -gethostname = { workspace = true } -hex = { workspace = true } -pallas = { workspace = true } -cardano-chain-follower= { workspace = true } -anyhow = { workspace = true } -handlebars = { workspace = true } -cddl = { workspace = true } -ciborium = { workspace = true } -ed25519-dalek.workspace = true -stringzilla = { workspace = true } -duration-string.workspace = true -scylla.workspace = true -strum.workspace = true -strum_macros.workspace = true -openssl.workspace = true -num-bigint.workspace = true -futures.workspace = true -rand.workspace = true -moka.workspace = true -crossbeam-skiplist.workspace = true - +uuid = { version = "1.10.0", features = ["v4", "serde"] } +ulid = { version = "1.1.3", features = ["serde", "uuid"] } +cryptoxide = "0.4.4" # TODO: For blake2b replace with blake2b_simd. +url = "2.5.2" +panic-message = "0.3.0" +cpu-time = "1.0.0" +prometheus = "0.13.4" +rust-embed = "8.5.0" +num-traits = "0.2.19" [build-dependencies] -build-info-build = { workspace = true } +build-info-build = "0.0.38" From 67fe2eb295bc4f4c04a993a2b260aabb4cebb358 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 6 Sep 2024 17:19:05 +0700 Subject: [PATCH 26/69] fix(backend): Use temporary cat-ci branch for rust builders --- catalyst-gateway/Earthfile | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/catalyst-gateway/Earthfile b/catalyst-gateway/Earthfile index 2fe8da2e10e..2959fa6aada 100644 --- a/catalyst-gateway/Earthfile +++ b/catalyst-gateway/Earthfile @@ -1,7 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/faster-rust-tool-install AS rust-ci -IMPORT github.com/input-output-hk/catalyst-ci/earthly/mithril_snapshot:v3.1.21 AS mithril-snapshot-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/cardano-chain-follower-changes AS rust-ci #cspell: words rustfmt toolsets USERARCH @@ -69,18 +68,6 @@ package-cat-gateway: ENTRYPOINT ./entry.sh SAVE IMAGE cat-gateway:$tag -# package-cat-gateway : Create a deployable container for catalyst-gateway -# And bundle a Mithril snapshot of cardano preprod -nightly-package-cat-gateway-with-preprod: - ARG tag="latest" - - FROM +package-cat-gateway - - # copy preprod mithril snapshot to /tmp/preprod dir - COPY mithril-snapshot-ci+preprod/snapshot /tmp/preprod - - SAVE IMAGE cat-gateway:$tag - # Publish packages if all integration tests have passed. (Failure to pass tests will prevent packages being published.) # publish: # FROM scratch From 8cbe29b0993c543a9f503ee3a8073fe1033092ca Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 6 Sep 2024 17:19:28 +0700 Subject: [PATCH 27/69] fix(backend): Remove obsolete common crates subdirectory --- catalyst-gateway/crates/README.md | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 catalyst-gateway/crates/README.md diff --git a/catalyst-gateway/crates/README.md b/catalyst-gateway/crates/README.md deleted file mode 100644 index 5ee79b849de..00000000000 --- a/catalyst-gateway/crates/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Catalyst Data Gateway - Crates - -These are fully re-usable generalized `rust` crates that the Catalyst Gateway uses and are developed with it. -They are also able to be used stand-alone in other projects and can be published separately. From de6e465baa943d2785c908a69f81c6b9251f79b9 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 6 Sep 2024 17:20:08 +0700 Subject: [PATCH 28/69] fix(backend): Don't use pre-packaged mithril snapshots in integration tests --- catalyst-gateway/tests/api_tests/Earthfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/catalyst-gateway/tests/api_tests/Earthfile b/catalyst-gateway/tests/api_tests/Earthfile index 4cbe641d7bb..6612c2d4a5c 100644 --- a/catalyst-gateway/tests/api_tests/Earthfile +++ b/catalyst-gateway/tests/api_tests/Earthfile @@ -38,7 +38,7 @@ nightly-test: WITH DOCKER \ --compose docker-compose.yml \ --load event-db:latest=(../../event-db+build) \ - --load cat-gateway:latest=(../../+nightly-package-cat-gateway-with-preprod) \ + --load cat-gateway:latest=(../../+package-cat-gateway) \ --service cat-gateway \ --allow-privileged RUN poetry run pytest -s -m nightly --junitxml=junit-report.xml --cov=api_tests --cov-report lcov From 5df51b1a705a9dd36664960efdc0c3d6edad3f72 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 6 Sep 2024 17:21:02 +0700 Subject: [PATCH 29/69] fix(backend): Fix code so it builds with latest chain follower code. Also eliminates redundant logic now incorporated into chain follower. --- catalyst-gateway/bin/src/cardano/util.rs | 86 +++++------------ catalyst-gateway/bin/src/db/index/block.rs | 6 +- .../bin/src/db/index/index_certs.rs | 64 +++++-------- .../bin/src/db/index/index_txi.rs | 2 +- catalyst-gateway/bin/src/db/index/schema.rs | 4 +- .../bin/src/service/utilities/convert.rs | 94 +++++++++++++++++++ .../bin/src/service/utilities/mod.rs | 1 + 7 files changed, 148 insertions(+), 109 deletions(-) create mode 100644 catalyst-gateway/bin/src/service/utilities/convert.rs diff --git a/catalyst-gateway/bin/src/cardano/util.rs b/catalyst-gateway/bin/src/cardano/util.rs index 75a9f48e289..7a18ac3dce9 100644 --- a/catalyst-gateway/bin/src/cardano/util.rs +++ b/catalyst-gateway/bin/src/cardano/util.rs @@ -1,10 +1,7 @@ //! Block stream parsing and filtering utils - -use std::collections::HashMap; - use cryptoxide::{blake2b::Blake2b, digest::Digest}; use pallas::ledger::{ - primitives::conway::{StakeCredential, VKeyWitness}, + primitives::conway::StakeCredential, traverse::{Era, MultiEraAsset, MultiEraCert, MultiEraPolicyAssets}, }; use serde::Serialize; @@ -58,11 +55,9 @@ pub struct PolicyAsset { pub(crate) fn parse_policy_assets(assets: &[MultiEraPolicyAssets<'_>]) -> Vec { assets .iter() - .map(|asset| { - PolicyAsset { - policy_hash: asset.policy().to_string(), - assets: parse_child_assets(&asset.assets()), - } + .map(|asset| PolicyAsset { + policy_hash: asset.policy().to_string(), + assets: parse_child_assets(&asset.assets()), }) .collect() } @@ -71,25 +66,21 @@ pub(crate) fn parse_policy_assets(assets: &[MultiEraPolicyAssets<'_>]) -> Vec Vec { assets .iter() - .filter_map(|asset| { - match asset { - MultiEraAsset::AlonzoCompatibleOutput(id, name, amount) => { - Some(Asset { - policy_id: id.to_string(), - name: name.to_string(), - amount: *amount, - }) - }, - MultiEraAsset::AlonzoCompatibleMint(id, name, amount) => { - let amount = u64::try_from(*amount).ok()?; - Some(Asset { - policy_id: id.to_string(), - name: name.to_string(), - amount, - }) - }, - _ => Some(Asset::default()), - } + .filter_map(|asset| match asset { + MultiEraAsset::AlonzoCompatibleOutput(id, name, amount) => Some(Asset { + policy_id: id.to_string(), + name: name.to_string(), + amount: *amount, + }), + MultiEraAsset::AlonzoCompatibleMint(id, name, amount) => { + let amount = u64::try_from(*amount).ok()?; + Some(Asset { + policy_id: id.to_string(), + name: name.to_string(), + amount, + }) + }, + _ => Some(Asset::default()), }) .collect() } @@ -113,13 +104,11 @@ pub fn extract_stake_credentials_from_certs( pallas::ledger::primitives::alonzo::Certificate::StakeDelegation( stake_credential, _, - ) => { - match stake_credential { - StakeCredential::AddrKeyhash(stake_credential) => { - stake_credentials.push(hex::encode(stake_credential.as_slice())); - }, - StakeCredential::Scripthash(_) => (), - } + ) => match stake_credential { + StakeCredential::AddrKeyhash(stake_credential) => { + stake_credentials.push(hex::encode(stake_credential.as_slice())); + }, + StakeCredential::Scripthash(_) => (), }, _ => continue, } @@ -129,33 +118,6 @@ pub fn extract_stake_credentials_from_certs( stake_credentials } -/// Get a Blake2b-224 (28 byte) hash of some bytes -pub(crate) fn blake2b_224(value: &[u8]) -> [u8; 28] { - let mut digest = [0u8; 28]; - let mut context = Blake2b::new(28); - context.input(value); - context.result(&mut digest); - digest -} - -/// A map of hashed witnesses. -pub(crate) type HashedWitnesses = HashMap<[u8; 28], Vec>; - -/// Extract witness pub keys and pair with blake2b hash of the pub key. -/// This converts raw Addresses to their hashes as used on Cardano (Blake2b-224). -/// And allows them to be easily cross referenced. -pub(crate) fn extract_hashed_witnesses(witnesses: &[VKeyWitness]) -> HashedWitnesses { - let mut hashed_witnesses = HashMap::new(); - for witness in witnesses { - let pub_key = witness.vkey.to_vec(); - let hash = blake2b_224(&pub_key); - - hashed_witnesses.insert(hash, pub_key); - } - - hashed_witnesses -} - /// Match hashed witness pub keys with hashed stake credentials from the TX certificates /// to identify the correct stake credential key. #[allow(dead_code)] diff --git a/catalyst-gateway/bin/src/db/index/block.rs b/catalyst-gateway/bin/src/db/index/block.rs index 72b895a658b..69704dd6e49 100644 --- a/catalyst-gateway/bin/src/db/index/block.rs +++ b/catalyst-gateway/bin/src/db/index/block.rs @@ -7,7 +7,6 @@ use super::{ index_certs::CertInsertQuery, index_txi::TxiInsertQuery, index_txo::TxoInsertQuery, queries::FallibleQueryTasks, session::CassandraSession, }; -use crate::cardano::util::extract_hashed_witnesses; /// Convert a usize to an i16 and saturate at `i16::MAX` pub(crate) fn usize_to_i16(value: usize) -> i16 { @@ -35,9 +34,6 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { let txn_hash = txs.hash().to_vec(); - // Hash all the witnesses for easy lookup. - let witnesses = extract_hashed_witnesses(txs.vkey_witnesses()); - // Index the TXIs. txi_index.index(txs, slot_no); @@ -47,7 +43,7 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { // TODO: Index Metadata. // Index Certificates inside the transaction. - cert_index.index(txs, slot_no, txn, &witnesses); + cert_index.index(txs, slot_no, txn, block); // Index the TXOs. txo_index.index(txs, slot_no, &txn_hash, txn); diff --git a/catalyst-gateway/bin/src/db/index/index_certs.rs b/catalyst-gateway/bin/src/db/index/index_certs.rs index b39cf8fdace..1b2f06f821b 100644 --- a/catalyst-gateway/bin/src/db/index/index_certs.rs +++ b/catalyst-gateway/bin/src/db/index/index_certs.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use cardano_chain_follower::MultiEraBlock; use pallas::ledger::primitives::{alonzo, conway}; use scylla::{frame::value::MaybeUnset, SerializeRow, Session}; use tracing::error; @@ -10,7 +11,7 @@ use super::{ queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, session::CassandraSession, }; -use crate::{cardano::util::HashedWitnesses, settings::CassandraEnvVars}; +use crate::{service::utilities::convert::u16_from_saturating, settings::CassandraEnvVars}; /// Insert TXI Query and Parameters #[derive(SerializeRow)] @@ -121,12 +122,15 @@ impl CertInsertQuery { #[allow(clippy::too_many_arguments)] fn stake_address( &mut self, cred: &alonzo::StakeCredential, slot_no: u64, txn: i16, register: bool, - deregister: bool, delegation: Option>, witnesses: &HashedWitnesses, + deregister: bool, delegation: Option>, block: &MultiEraBlock, ) { let default_addr = Vec::new(); let (key_hash, pubkey, script) = match cred { pallas::ledger::primitives::conway::StakeCredential::AddrKeyhash(cred) => { - let addr = witnesses.get(cred.as_ref()).unwrap_or(&default_addr); + let addr = block + .witness_for_tx(cred, u16_from_saturating(txn)) + .unwrap_or(default_addr); + //let addr = witnesses.get(cred.as_ref()).unwrap_or(&default_addr); // Note: it is totally possible for the Registration Certificate to not be // witnessed. (cred.to_vec(), addr.clone(), false) @@ -158,27 +162,19 @@ impl CertInsertQuery { /// Index an Alonzo Era certificate into the database. fn index_alonzo_cert( - &mut self, cert: &alonzo::Certificate, slot_no: u64, txn: i16, witnesses: &HashedWitnesses, + &mut self, cert: &alonzo::Certificate, slot_no: u64, txn: i16, block: &MultiEraBlock, ) { #[allow(clippy::match_same_arms)] match cert { pallas::ledger::primitives::alonzo::Certificate::StakeRegistration(cred) => { // This may not be witnessed, its normal but disappointing. - self.stake_address(cred, slot_no, txn, true, false, None, witnesses); + self.stake_address(cred, slot_no, txn, true, false, None, block); }, pallas::ledger::primitives::alonzo::Certificate::StakeDeregistration(cred) => { - self.stake_address(cred, slot_no, txn, false, true, None, witnesses); + self.stake_address(cred, slot_no, txn, false, true, None, block); }, pallas::ledger::primitives::alonzo::Certificate::StakeDelegation(cred, pool) => { - self.stake_address( - cred, - slot_no, - txn, - false, - false, - Some(pool.to_vec()), - witnesses, - ); + self.stake_address(cred, slot_no, txn, false, false, Some(pool.to_vec()), block); }, pallas::ledger::primitives::alonzo::Certificate::PoolRegistration { .. } => {}, pallas::ledger::primitives::alonzo::Certificate::PoolRetirement(..) => {}, @@ -189,27 +185,19 @@ impl CertInsertQuery { /// Index a certificate from a conway transaction. fn index_conway_cert( - &mut self, cert: &conway::Certificate, slot_no: u64, txn: i16, witnesses: &HashedWitnesses, + &mut self, cert: &conway::Certificate, slot_no: u64, txn: i16, block: &MultiEraBlock, ) { #[allow(clippy::match_same_arms)] match cert { pallas::ledger::primitives::conway::Certificate::StakeRegistration(cred) => { // This may not be witnessed, its normal but disappointing. - self.stake_address(cred, slot_no, txn, true, false, None, witnesses); + self.stake_address(cred, slot_no, txn, true, false, None, block); }, pallas::ledger::primitives::conway::Certificate::StakeDeregistration(cred) => { - self.stake_address(cred, slot_no, txn, false, true, None, witnesses); + self.stake_address(cred, slot_no, txn, false, true, None, block); }, pallas::ledger::primitives::conway::Certificate::StakeDelegation(cred, pool) => { - self.stake_address( - cred, - slot_no, - txn, - false, - false, - Some(pool.to_vec()), - witnesses, - ); + self.stake_address(cred, slot_no, txn, false, false, Some(pool.to_vec()), block); }, pallas::ledger::primitives::conway::Certificate::PoolRegistration { .. } => {}, pallas::ledger::primitives::conway::Certificate::PoolRetirement(..) => {}, @@ -231,20 +219,18 @@ impl CertInsertQuery { /// Index the certificates in a transaction. pub(crate) fn index( &mut self, txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64, txn: i16, - witnesses: &HashedWitnesses, + block: &MultiEraBlock, ) { #[allow(clippy::match_same_arms)] - txs.certs().iter().for_each(|cert| { - match cert { - pallas::ledger::traverse::MultiEraCert::NotApplicable => {}, - pallas::ledger::traverse::MultiEraCert::AlonzoCompatible(cert) => { - self.index_alonzo_cert(cert, slot_no, txn, witnesses); - }, - pallas::ledger::traverse::MultiEraCert::Conway(cert) => { - self.index_conway_cert(cert, slot_no, txn, witnesses); - }, - _ => {}, - } + txs.certs().iter().for_each(|cert| match cert { + pallas::ledger::traverse::MultiEraCert::NotApplicable => {}, + pallas::ledger::traverse::MultiEraCert::AlonzoCompatible(cert) => { + self.index_alonzo_cert(cert, slot_no, txn, block); + }, + pallas::ledger::traverse::MultiEraCert::Conway(cert) => { + self.index_conway_cert(cert, slot_no, txn, block); + }, + _ => {}, }); } diff --git a/catalyst-gateway/bin/src/db/index/index_txi.rs b/catalyst-gateway/bin/src/db/index/index_txi.rs index 20d527cfce6..7680dafab5e 100644 --- a/catalyst-gateway/bin/src/db/index/index_txi.rs +++ b/catalyst-gateway/bin/src/db/index/index_txi.rs @@ -72,7 +72,7 @@ impl TxiInsertQuery { } /// Index the transaction Inputs. - pub(crate) fn index(&mut self, txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64) { + pub(crate) fn index(&mut self, txs: &pallas_traverse::MultiEraTx<'_>, slot_no: u64) { // Index the TXI's. for txi in txs.inputs() { let txn_hash = txi.hash().to_vec(); diff --git a/catalyst-gateway/bin/src/db/index/schema.rs b/catalyst-gateway/bin/src/db/index/schema.rs index f854fc7329a..01980d608b3 100644 --- a/catalyst-gateway/bin/src/db/index/schema.rs +++ b/catalyst-gateway/bin/src/db/index/schema.rs @@ -72,7 +72,7 @@ async fn create_namespace( // Create the Keyspace if it doesn't exist already. let stmt = session.prepare(query).await?; - session.execute(&stmt, ()).await?; + session.execute_unpaged(&stmt, ()).await?; // Wait for the Schema to be ready. session.await_schema_agreement().await?; @@ -98,7 +98,7 @@ pub(crate) async fn create_schema( .context(format!("{} : Prepared", schema.1))?; session - .execute(&stmt, ()) + .execute_unpaged(&stmt, ()) .await .context(format!("{} : Executed", schema.1))?; } diff --git a/catalyst-gateway/bin/src/service/utilities/convert.rs b/catalyst-gateway/bin/src/service/utilities/convert.rs new file mode 100644 index 00000000000..f5733f1360e --- /dev/null +++ b/catalyst-gateway/bin/src/service/utilities/convert.rs @@ -0,0 +1,94 @@ +//! Simple general purpose utility functions. + +/// Convert T to an i16. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn i16_from_saturating>(value: T) -> i16 { + match value.try_into() { + Ok(value) => value, + Err(_) => i16::MAX, + } +} + +/// Convert an `` to `u16`. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn u16_from_saturating< + T: Copy + + TryInto + + std::ops::Sub + + std::cmp::PartialOrd + + num_traits::identities::Zero, +>( + value: T, +) -> u16 { + if value < T::zero() { + u16::MIN + } else { + match value.try_into() { + Ok(value) => value, + Err(_) => u16::MAX, + } + } +} + +/// Convert an `` to `usize`. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn usize_from_saturating< + T: Copy + + TryInto + + std::ops::Sub + + std::cmp::PartialOrd + + num_traits::identities::Zero, +>( + value: T, +) -> usize { + if value < T::zero() { + usize::MIN + } else { + match value.try_into() { + Ok(value) => value, + Err(_) => usize::MAX, + } + } +} + +/// Convert an `` to `u32`. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn u32_from_saturating< + T: Copy + + TryInto + + std::ops::Sub + + std::cmp::PartialOrd + + num_traits::identities::Zero, +>( + value: T, +) -> u32 { + if value < T::zero() { + u32::MIN + } else { + match value.try_into() { + Ok(converted) => converted, + Err(_) => u32::MAX, + } + } +} + +/// Convert an `` to `u64`. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn u64_from_saturating< + T: Copy + + TryInto + + std::ops::Sub + + std::cmp::PartialOrd + + num_traits::identities::Zero, +>( + value: T, +) -> u64 { + if value < T::zero() { + u64::MIN + } else { + match value.try_into() { + Ok(converted) => converted, + Err(_) => u64::MAX, + } + } +} diff --git a/catalyst-gateway/bin/src/service/utilities/mod.rs b/catalyst-gateway/bin/src/service/utilities/mod.rs index c19d7daccfc..796aca69224 100644 --- a/catalyst-gateway/bin/src/service/utilities/mod.rs +++ b/catalyst-gateway/bin/src/service/utilities/mod.rs @@ -1,5 +1,6 @@ //! `API` Utility operations pub(crate) mod catch_panic; +pub(crate) mod convert; pub(crate) mod middleware; pub(crate) mod net; From 36634b9e12bcb274b0cdd852f134d48bb8f8c188 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Tue, 10 Sep 2024 14:21:52 +0700 Subject: [PATCH 30/69] fix(backend): Fix broken reference to catalyst libs --- catalyst-gateway/bin/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 8ae77e60d7d..0b8eb4e78aa 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -15,7 +15,7 @@ repository.workspace = true workspace = true [dependencies] -cardano-chain-follower = { version = "0.0.2", git = "https://github.com/input-output-hk/catalyst-libs.git", branch = "feat/expose_witness_map" } +cardano-chain-follower = { version = "0.0.2", git = "https://github.com/input-output-hk/catalyst-libs.git", branch = "main" } pallas = { version = "0.30.1", git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "9b5183c8b90b90fe2cc319d986e933e9518957b3" } pallas-traverse = { version = "0.30.1", git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "9b5183c8b90b90fe2cc319d986e933e9518957b3" } From a57ac80debde0d39e70237c1f9bce7640405be94 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:34:16 +0700 Subject: [PATCH 31/69] ci(ci): Bump all earthfiles to latest WIP cat-ci branch --- Earthfile | 10 +++++++--- catalyst-gateway/event-db/Earthfile | 2 +- catalyst-gateway/tests/Earthfile | 2 +- catalyst-gateway/tests/api_tests/Earthfile | 2 +- catalyst_voices/Earthfile | 2 +- catalyst_voices/uikit_example/Earthfile | 2 +- 6 files changed, 12 insertions(+), 8 deletions(-) diff --git a/Earthfile b/Earthfile index be95b3c69e3..5f714c2c506 100644 --- a/Earthfile +++ b/Earthfile @@ -1,8 +1,8 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/mdlint:v3.1.21 AS mdlint-ci -IMPORT github.com/input-output-hk/catalyst-ci/earthly/cspell:v3.1.21 AS cspell-ci -IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.1.21 AS postgresql-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/mdlint:feat/cat-gateway-changes AS mdlint-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/cspell:feat/cat-gateway-changes AS cspell-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:feat/cat-gateway-changes AS postgresql-ci FROM debian:stable-slim @@ -18,6 +18,10 @@ markdown-check-fix: DO mdlint-ci+MDLINT_LOCALLY --src=$(echo ${PWD}) --fix=--fix +# Make sure the project dictionary is properly sorted. +clean-spelling-list: + DO cspell-ci+CLEAN + # check-spelling Check spelling in this repo inside a container. check-spelling: DO cspell-ci+CHECK diff --git a/catalyst-gateway/event-db/Earthfile b/catalyst-gateway/event-db/Earthfile index c85600ef886..5398ab0239e 100644 --- a/catalyst-gateway/event-db/Earthfile +++ b/catalyst-gateway/event-db/Earthfile @@ -3,7 +3,7 @@ # the database and its associated software. VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.1.21 AS postgresql-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:feat/cat-gateway-changes AS postgresql-ci # cspell: words diff --git a/catalyst-gateway/tests/Earthfile b/catalyst-gateway/tests/Earthfile index 16aed63c4a5..11758f7766f 100644 --- a/catalyst-gateway/tests/Earthfile +++ b/catalyst-gateway/tests/Earthfile @@ -1,5 +1,5 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/spectral:v3.1.21 AS spectral-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/spectral:feat/cat-gateway-changes AS spectral-ci # test-lint-openapi - OpenAPI linting from an artifact # testing whether the OpenAPI generated during build stage follows good practice. diff --git a/catalyst-gateway/tests/api_tests/Earthfile b/catalyst-gateway/tests/api_tests/Earthfile index 6612c2d4a5c..bcab3ccc733 100644 --- a/catalyst-gateway/tests/api_tests/Earthfile +++ b/catalyst-gateway/tests/api_tests/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/python:v3.1.21 AS python-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/python:feat/cat-gateway-changes AS python-ci builder: FROM python-ci+python-base diff --git a/catalyst_voices/Earthfile b/catalyst_voices/Earthfile index bb57578f266..52c076994c7 100644 --- a/catalyst_voices/Earthfile +++ b/catalyst_voices/Earthfile @@ -1,7 +1,7 @@ VERSION 0.8 IMPORT ../catalyst-gateway AS catalyst-gateway -IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.1.26 AS flutter-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:feat/cat-gateway-changes AS flutter-ci # Copy all the necessary files and running bootstrap builder: diff --git a/catalyst_voices/uikit_example/Earthfile b/catalyst_voices/uikit_example/Earthfile index 66c8fdeb305..7515712484c 100644 --- a/catalyst_voices/uikit_example/Earthfile +++ b/catalyst_voices/uikit_example/Earthfile @@ -1,7 +1,7 @@ VERSION 0.8 IMPORT ../ AS catalyst-voices -IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.1.26 AS flutter-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:feat/cat-gateway-changes AS flutter-ci # local-build-web - build web version of UIKit example. # Prefixed by "local" to make sure it's not auto triggered, the target was From 3c15ae23cfc6888f5eae62a0a27a1409e115eea9 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:34:59 +0700 Subject: [PATCH 32/69] fix(frontend-pkg): Ignore .dart_tool directory in frontend files checking markdown --- .markdownlint-cli2.jsonc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index df0bd0d0c96..2278004089e 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -13,7 +13,8 @@ "CHANGELOG.md", "catalyst_voices_packages/**/CHANGELOG.md", "catalyst_voices/macos/Pods/**", - "**/node_modules/**" + "**/node_modules/**", + "**/.dart_tool/**" ], // Set standard config options in `/.markdownlint.jsonc` "config": { From 004ff17c0d2cd0145e6397a3902cea3832c7bf8e Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:35:40 +0700 Subject: [PATCH 33/69] fix(ci): Fix spelling --- .markdownlint.jsonc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.markdownlint.jsonc b/.markdownlint.jsonc index a83548c0262..be69f34a424 100644 --- a/.markdownlint.jsonc +++ b/.markdownlint.jsonc @@ -1,7 +1,7 @@ { // markdownlint JSON(C) configuration for Catalyst Standards // Do not individually set markdown lint rules in documents. - // It is permissable to disable a rule in a document if it is a false positive. + // It is permissible to disable a rule in a document if it is a false positive. // Keep the scope of the lint disable to as small as possible. // See: https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md // Default state for all rules From 98d53e7b3a801cd2c9bd6341a8f6a86fef19eba0 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:36:06 +0700 Subject: [PATCH 34/69] fix(spelling): Add more project words and properly sort list --- .config/dictionaries/project.dic | 43 +++++++++++++++++++------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index 97be7e896d2..5cb392b9c57 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -42,16 +42,20 @@ CIPs COCOAPODS codegen codepoints +commitlog coti coverallsapp +CQLSH cryptoxide Cunego Cupertino dalek +damian-molinski DAPPLICATION dbeaver dbschema dbsync +Deleg delegators DIND dockerhub @@ -64,19 +68,21 @@ dreps dtscalac earthfile Easterling +eddsa Edgedriver emurgo encryptor endfunction Eternl +EUTXO fetchval fluttericon fmtchk fmtfix fontawesome fontello -formz Formz +formz fuzzer gapless gcloud @@ -88,6 +94,7 @@ gmtime gradlew headlessui HIDPI +hotspots icudtl ideascale idents @@ -95,6 +102,7 @@ ilap Instantitation integ Intellij +interps iohk iphoneos jdbc @@ -122,6 +130,7 @@ localizable loguru lovelace lovelaces +LTRB mdlint metadatum metadatums @@ -132,23 +141,26 @@ minicbor mithril mitigations moderations +moka msedgedriver multiasset multidex -multiplatform Multiplatform +multiplatform myproject nanos NDEBUG netifas netkey nextest +Nodetool OCSP Oleksandr onboarded oneshot openapi opentelemetry +overprovisioned pbxproj Pdart permissionless @@ -177,8 +189,8 @@ reqwest rfwtxt rgloader ripgrep -rngs rlib +rngs RPATH rustc rustdoc @@ -188,8 +200,8 @@ rustfmt rustls rxdart saibatizoku -schemathesis Schemathesis +schemathesis Scripthash ScyllaDB seckey @@ -198,13 +210,16 @@ sendfile slotno sqlfluff sslmode +sstableinfo Stefano stevenj stringzilla +subchain Subkey submiting subosito SYSROOT +tablestats tacho testcov testdocs @@ -213,8 +228,10 @@ testunit thiserror thollander timelike -toastify Toastify +toastify +todos +toggleable tojunit Traceback traefik @@ -223,13 +240,14 @@ TXNZD Typer unawaited unchunk +unchunk Unlogged unmanaged Unstaked -utxo UTXO -utxos +utxo Utxos +utxos varint vite vitss @@ -238,6 +256,7 @@ vkeys vkeywitness voteplan voteplans +vsync wallclock wasmtime Wconditional @@ -254,13 +273,3 @@ xctest xctestrun xcworkspace yoroi -unchunk -EUTXO -eddsa -toggleable -interps -todos -vsync -damian-molinski -LTRB -hotspots \ No newline at end of file From 52a4a6766d3e209f881415a7142416e77b03011f Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:37:09 +0700 Subject: [PATCH 35/69] fix(backend): Sync rust configs and add target to make it easier in future --- catalyst-gateway/.cargo/config.toml | 2 +- catalyst-gateway/.config/nextest.toml | 2 +- catalyst-gateway/Earthfile | 9 +++++++-- catalyst-gateway/clippy.toml | 1 + catalyst-gateway/deny.toml | 17 ++++++++++++----- catalyst-gateway/rust-toolchain.toml | 6 ++---- 6 files changed, 24 insertions(+), 13 deletions(-) diff --git a/catalyst-gateway/.cargo/config.toml b/catalyst-gateway/.cargo/config.toml index 2764f1df4e2..02c23140754 100644 --- a/catalyst-gateway/.cargo/config.toml +++ b/catalyst-gateway/.cargo/config.toml @@ -90,4 +90,4 @@ quiet = false # whether cargo output is quiet verbose = false # whether cargo provides verbose output color = "auto" # whether cargo colorizes output use `CARGO_TERM_COLOR="off"` to disable. progress.when = "never" # whether cargo shows progress bar -progress.width = 80 # width of progress bar \ No newline at end of file +progress.width = 80 # width of progress bar diff --git a/catalyst-gateway/.config/nextest.toml b/catalyst-gateway/.config/nextest.toml index de5cf9b1ef9..be3673830bb 100644 --- a/catalyst-gateway/.config/nextest.toml +++ b/catalyst-gateway/.config/nextest.toml @@ -46,4 +46,4 @@ store-success-output = true # # Note that if a description can be extracted from the output, it is always stored in the # element. -store-failure-output = true \ No newline at end of file +store-failure-output = true diff --git a/catalyst-gateway/Earthfile b/catalyst-gateway/Earthfile index 2959fa6aada..47acbb2983b 100644 --- a/catalyst-gateway/Earthfile +++ b/catalyst-gateway/Earthfile @@ -1,8 +1,13 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/cardano-chain-follower-changes AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/cat-gateway-changes AS rust-ci -#cspell: words rustfmt toolsets USERARCH +#cspell: words rustfmt toolsets USERARCH stdcfgs + +# sync-cfg: Synchronize local config with CI version. +# Must be run by the developer manually. +sync-cfg: + DO rust-ci+SYNC_STD_CFG # builder : Set up our target toolchains, and copy our files. builder: diff --git a/catalyst-gateway/clippy.toml b/catalyst-gateway/clippy.toml index 6933b816419..0358cdb508c 100644 --- a/catalyst-gateway/clippy.toml +++ b/catalyst-gateway/clippy.toml @@ -1 +1,2 @@ +allow-unwrap-in-tests = true allow-expect-in-tests = true diff --git a/catalyst-gateway/deny.toml b/catalyst-gateway/deny.toml index 54089796af8..26ec8794bbf 100644 --- a/catalyst-gateway/deny.toml +++ b/catalyst-gateway/deny.toml @@ -19,16 +19,20 @@ version = 2 ignore = [ { id = "RUSTSEC-2020-0168", reason = "`mach` is used by wasmtime and we have no control over that." }, { id = "RUSTSEC-2021-0145", reason = "we don't target windows, and don't use a custom global allocator." }, + { id = "RUSTSEC-2024-0370", reason = "`proc-macro-error` is used by crates we rely on, we can't control what they use."}, ] [bans] multiple-versions = "warn" wildcards = 'deny' deny = [ - # { crate = "git2", use-instead = "gix" }, - { crate = "openssl", use-instead = "rustls" }, - { crate = "openssl-sys", use-instead = "rustls" }, + # Scylla DB Drivers currently require OpenSSL. Its unavoidable. + # However, there is movement to enable support for Rustls. + # So, for now, allow open-ssl but it needs to be disabled as soon as Scylla DB enables Rustls. + #{ crate = "openssl", use-instead = "rustls" }, + #{ crate = "openssl-sys", use-instead = "rustls" }, "libssh2-sys", + # { crate = "git2", use-instead = "gix" }, # { crate = "cmake", use-instead = "cc" }, # { crate = "windows", reason = "bloated and unnecessary", use-instead = "ideally inline bindings, practically, windows-sys" }, ] @@ -49,8 +53,9 @@ unknown-git = "deny" # List of URLs for allowed Git repositories allow-git = [ - "https://github.com/input-output-hk/hermes.git", + "https://github.com/input-output-hk/catalyst-libs.git", "https://github.com/input-output-hk/catalyst-pallas.git", + "https://github.com/input-output-hk/catalyst-mithril.git", "https://github.com/bytecodealliance/wasmtime", "https://github.com/aldanor/hdf5-rust", ] @@ -73,6 +78,7 @@ allow = [ "ISC", "Unicode-3.0", "MPL-2.0", + "Zlib", ] exceptions = [ #{ allow = ["Zlib"], crate = "tinyvec" }, @@ -94,6 +100,7 @@ license-files = [{ path = "../LICENSE-MIT", hash = 0x001c7e6c }] crate = "ring" expression = "MIT" license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] + # SPDX considers OpenSSL to encompass both the OpenSSL and SSLeay licenses # https://spdx.org/licenses/OpenSSL.html # ISC - Both BoringSSL and ring use this for their new files @@ -113,4 +120,4 @@ license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] #[[licenses.clarify]] #crate = "rustls-webpki" #expression = "ISC" -#license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] \ No newline at end of file +#license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] diff --git a/catalyst-gateway/rust-toolchain.toml b/catalyst-gateway/rust-toolchain.toml index 3b61003ab09..20a42f2a9f7 100644 --- a/catalyst-gateway/rust-toolchain.toml +++ b/catalyst-gateway/rust-toolchain.toml @@ -1,5 +1,3 @@ [toolchain] -channel = "1.80.0" -profile = "default" -components = [] -targets = ["x86_64-unknown-linux-musl"] \ No newline at end of file +channel = "1.80" +profile = "default" \ No newline at end of file From 841d8ce16346f6c29df71eb3abe3a65781c4ba23 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:37:33 +0700 Subject: [PATCH 36/69] fix(backend): Enable all features of Scylla for now. --- catalyst-gateway/bin/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 0b8eb4e78aa..6da2f9a2580 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -56,7 +56,7 @@ stringzilla = "3.9.3" duration-string = "0.4.0" build-info = "0.0.38" ed25519-dalek = "2.1.1" -scylla = { version = "0.14.0", features = ["ssl", "full-serialization"] } +scylla = { version = "0.14.0", features = ["cloud", "full-serialization"] } strum = { version = "0.26.3", features = ["derive"] } strum_macros = "0.26.4" openssl = { version = "0.10.66", features = ["vendored"] } From 12b93c67cde385d1f40c02b66afaa16db1408106 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:38:05 +0700 Subject: [PATCH 37/69] fix(frontend-pkg): Fix markdown table having too many columns --- catalyst_voices_packages/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/catalyst_voices_packages/README.md b/catalyst_voices_packages/README.md index 1e7c5ac0c25..41f0d73fe19 100644 --- a/catalyst_voices_packages/README.md +++ b/catalyst_voices_packages/README.md @@ -14,15 +14,15 @@ A collection of Catalyst packages and plugins for Flutter and Dart. | Name | Pub | Documentation | Android | iOS | Web | macOS | Windows | Linux | |--------|-----|---------------| ------- |-----|-------|-----|---------|-------| -| [`catalyst_cardano_serialization`](catalyst_cardano_serialization) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_serialization.svg) | [📖](https://pub.dev/documentation/catalyst_cardano_serialization/latest/catalyst_cardano_serialization/catalyst_cardano_serialization-library.html) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | -| [`catalyst_analysis`](catalyst_analysis) | ![pub package](https://img.shields.io/pub/v/catalyst_analysis.svg) | [📖](https://pub.dev/documentation/catalyst_analysis/latest/) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | -| [`catalyst_cardano`](catalyst_cardano/catalyst_cardano) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano.svg) | [📖](https://pub.dev/documentation/catalyst_cardano/latest/catalyst_cardano/catalyst_cardano-library.html) | N/A | N/A | ✔️ | N/A | N/A | N/A | N/A | -| [`catalyst_cardano_platform_interface`](catalyst_cardano/catalyst_cardano_platform_interface) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_platform_interface.svg) | [📖](https://pub.dev/documentation/catalyst_cardano_platform_interface/latest/catalyst_cardano_platform_interface/catalyst_cardano_platform_interface-library.html) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | -| [`catalyst_cardano_web`](catalyst_cardano/catalyst_cardano_web) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_web.svg) | [📖](https://pub.dev/documentation/catalyst_cardano_web/latest/catalyst_cardano_web/catalyst_cardano_web-library.html) | N/A | N/A | ✔️ | N/A | N/A | N/A | N/A | -| [`catalyst_compression`](catalyst_compression/catalyst_compression) | ![pub package](https://img.shields.io/pub/v/catalyst_compression.svg) | [📖](https://pub.dev/documentation/catalyst_compression/latest/catalyst_compression/catalyst_compression-library.html) | N/A | N/A | ✔️ | N/A | N/A | N/A | N/A | -| [`catalyst_compression_platform_interface`](catalyst_compression/catalyst_compression_platform_interface) | ![pub package](https://img.shields.io/pub/v/catalyst_compression_platform_interface.svg) | [📖](https://pub.dev/documentation/catalyst_compression_platform_interface/latest/catalyst_compression_platform_interface/catalyst_compression_platform_interface-library.html) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | -| [`catalyst_compression_web`](catalyst_compression/catalyst_compression_web) | ![pub package](https://img.shields.io/pub/v/catalyst_compression_web.svg) | [📖](https://pub.dev/documentation/catalyst_compression_web/latest/catalyst_compression_web/catalyst_compression_web-library.html) | N/A | N/A | ✔️ | N/A | N/A | N/A | N/A | -| [`catalyst_cose`](catalyst_cose) | ![pub package](https://img.shields.io/pub/v/catalyst_cose.svg) | [📖](https://pub.dev/documentation/catalyst_cose/latest/catalyst_cose/catalyst_cose-library.html) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| [`catalyst_cardano_serialization`](catalyst_cardano_serialization) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_serialization.svg) | [📖](https://pub.dev/documentation/catalyst_cardano_serialization/latest/catalyst_cardano_serialization/catalyst_cardano_serialization-library.html) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| [`catalyst_analysis`](catalyst_analysis) | ![pub package](https://img.shields.io/pub/v/catalyst_analysis.svg) | [📖](https://pub.dev/documentation/catalyst_analysis/latest/) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| [`catalyst_cardano`](catalyst_cardano/catalyst_cardano) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano.svg) | [📖](https://pub.dev/documentation/catalyst_cardano/latest/catalyst_cardano/catalyst_cardano-library.html) | N/A | N/A | ✔️ | N/A | N/A | N/A | +| [`catalyst_cardano_platform_interface`](catalyst_cardano/catalyst_cardano_platform_interface) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_platform_interface.svg) | [📖](https://pub.dev/documentation/catalyst_cardano_platform_interface/latest/catalyst_cardano_platform_interface/catalyst_cardano_platform_interface-library.html) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| [`catalyst_cardano_web`](catalyst_cardano/catalyst_cardano_web) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_web.svg) | [📖](https://pub.dev/documentation/catalyst_cardano_web/latest/catalyst_cardano_web/catalyst_cardano_web-library.html) | N/A | N/A | ✔️ | N/A | N/A | N/A | +| [`catalyst_compression`](catalyst_compression/catalyst_compression) | ![pub package](https://img.shields.io/pub/v/catalyst_compression.svg) | [📖](https://pub.dev/documentation/catalyst_compression/latest/catalyst_compression/catalyst_compression-library.html) | N/A | N/A | ✔️ | N/A | N/A | N/A | +| [`catalyst_compression_platform_interface`](catalyst_compression/catalyst_compression_platform_interface) | ![pub package](https://img.shields.io/pub/v/catalyst_compression_platform_interface.svg) | [📖](https://pub.dev/documentation/catalyst_compression_platform_interface/latest/catalyst_compression_platform_interface/catalyst_compression_platform_interface-library.html) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| [`catalyst_compression_web`](catalyst_compression/catalyst_compression_web) | ![pub package](https://img.shields.io/pub/v/catalyst_compression_web.svg) | [📖](https://pub.dev/documentation/catalyst_compression_web/latest/catalyst_compression_web/catalyst_compression_web-library.html) | N/A | N/A | ✔️ | N/A | N/A | N/A | +| [`catalyst_cose`](catalyst_cose) | ![pub package](https://img.shields.io/pub/v/catalyst_cose.svg) | [📖](https://pub.dev/documentation/catalyst_cose/latest/catalyst_cose/catalyst_cose-library.html) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ## Requirements From 6895162c9ed2cd3377e2942b55af7b344070b2a5 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:39:14 +0700 Subject: [PATCH 38/69] ci(spelling): Fix spelling issues --- utilities/local-scylla/node1-scylla.yaml | 2 ++ utilities/local-scylla/node2-scylla.yaml | 2 ++ utilities/local-scylla/node3-scylla.yaml | 2 ++ utilities/local-scylla/node4-scylla.yaml | 2 ++ .../wallet-tester/src/common/components/TxBuilder.tsx | 10 +++++----- .../src/common/helpers/buildUnsignedTx.ts | 4 ++-- 6 files changed, 15 insertions(+), 7 deletions(-) diff --git a/utilities/local-scylla/node1-scylla.yaml b/utilities/local-scylla/node1-scylla.yaml index 96a78e0cbaa..63267c2bcad 100644 --- a/utilities/local-scylla/node1-scylla.yaml +++ b/utilities/local-scylla/node1-scylla.yaml @@ -1,5 +1,7 @@ # Scylla storage config YAML +# cspell: words fsyncs rackdc partitioner mbean certficate degraade defragment + ####################################### # This file is split to two sections: # 1. Supported parameters diff --git a/utilities/local-scylla/node2-scylla.yaml b/utilities/local-scylla/node2-scylla.yaml index 98e75fb478c..d0f15402524 100644 --- a/utilities/local-scylla/node2-scylla.yaml +++ b/utilities/local-scylla/node2-scylla.yaml @@ -1,5 +1,7 @@ # Scylla storage config YAML +# cspell: words fsyncs rackdc partitioner mbean certficate degraade defragment + ####################################### # This file is split to two sections: # 1. Supported parameters diff --git a/utilities/local-scylla/node3-scylla.yaml b/utilities/local-scylla/node3-scylla.yaml index d5742ec8542..aec5e3d224f 100644 --- a/utilities/local-scylla/node3-scylla.yaml +++ b/utilities/local-scylla/node3-scylla.yaml @@ -1,5 +1,7 @@ # Scylla storage config YAML +# cspell: words fsyncs rackdc partitioner mbean certficate degraade defragment + ####################################### # This file is split to two sections: # 1. Supported parameters diff --git a/utilities/local-scylla/node4-scylla.yaml b/utilities/local-scylla/node4-scylla.yaml index 9e502036a9f..4bf06b2f620 100644 --- a/utilities/local-scylla/node4-scylla.yaml +++ b/utilities/local-scylla/node4-scylla.yaml @@ -1,5 +1,7 @@ # Scylla storage config YAML +# cspell: words fsyncs rackdc partitioner mbean certficate degraade defragment + ####################################### # This file is split to two sections: # 1. Supported parameters diff --git a/utilities/wallet-tester/src/common/components/TxBuilder.tsx b/utilities/wallet-tester/src/common/components/TxBuilder.tsx index fe29b8f31bd..94617a45d52 100644 --- a/utilities/wallet-tester/src/common/components/TxBuilder.tsx +++ b/utilities/wallet-tester/src/common/components/TxBuilder.tsx @@ -200,8 +200,8 @@ function TxBuilder({ utxos, addresses, onSubmit: onPropSubmit = noop }: Props) { certificateFields.fields[i]?.type === value ? null : certificateFields.replace({ - type: value as any /* TODO: support default values for each type */, - }) + type: value as any /* TODO: support default values for each type */, + }) } /> {certificateFields.fields[i]?.type === CertificateType.StakeDelegation ? ( @@ -212,7 +212,7 @@ function TxBuilder({ utxos, addresses, onSubmit: onPropSubmit = noop }: Props) { className={twMerge( "w-full rounded px-1 border border-solid border-black", (certificateFields.fields[i] as any)?.hashType === "addr_keyhash" && - "bg-black text-white" + "bg-black text-white" )} onClick={() => certificateFields.update(i, { @@ -228,7 +228,7 @@ function TxBuilder({ utxos, addresses, onSubmit: onPropSubmit = noop }: Props) { className={twMerge( "w-full rounded px-1 border border-solid border-black", (certificateFields.fields[i] as any)?.hashType === "scripthash" && - "bg-black text-white" + "bg-black text-white" )} onClick={() => certificateFields.update(i, { @@ -409,7 +409,7 @@ function TxBuilder({ utxos, addresses, onSubmit: onPropSubmit = noop }: Props) { {({ open }) => ( <>
{open ? : }
-

Auxillary Metadata

+

Auxiliary Metadata

)} diff --git a/utilities/wallet-tester/src/common/helpers/buildUnsignedTx.ts b/utilities/wallet-tester/src/common/helpers/buildUnsignedTx.ts index 59f981467be..a863556d4b0 100644 --- a/utilities/wallet-tester/src/common/helpers/buildUnsignedTx.ts +++ b/utilities/wallet-tester/src/common/helpers/buildUnsignedTx.ts @@ -149,7 +149,7 @@ export default async function buildUnsignedTx( // #7 add auxiliary data hash if (builder.auxiliaryDataHash) { - // note: the hash will be set after building auxillary data + // note: the hash will be set after building auxiliary data } // #8 add validity interval start @@ -219,7 +219,7 @@ export default async function buildUnsignedTx( // build a full transaction, passing in empty witness set const txBody = txBuilder.build(); - + // #15 add network id if (builder.networkId && [0, 1].includes(Number(builder.networkId))) { const networkId = Number(builder.networkId) === 0 ? NetworkId.testnet() : NetworkId.mainnet() From 8564ba6fdfa3bc48443c6e529ebff90eacb8d63f Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:39:43 +0700 Subject: [PATCH 39/69] fix(docs): Bump docs to latest WIP cat-ci version --- docs/Earthfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Earthfile b/docs/Earthfile index 61fd0e8b2b8..901843b2b61 100644 --- a/docs/Earthfile +++ b/docs/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/docs:v3.1.21 AS docs-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/docs:feat/cat-gateway-changes AS docs-ci IMPORT .. AS repo IMPORT ../catalyst-gateway AS catalyst-gateway From 4073cd9860e38a2a3d2e3f9491954de06b13212c Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:40:19 +0700 Subject: [PATCH 40/69] feat(gateway): Add low resource scylla db instance for local testing --- utilities/local-scylla/justfile | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/utilities/local-scylla/justfile b/utilities/local-scylla/justfile index 3abf7ccc131..2cbf82e207f 100644 --- a/utilities/local-scylla/justfile +++ b/utilities/local-scylla/justfile @@ -6,19 +6,34 @@ host_ip := `hostname -i | cut -d " " -f 1` default: @just --list --unsorted -# Local scylla dev DB - Starts with pre-existing data. +# Local scylla dev DB (Developer Mode) - Starts with pre-existing data. scylla-dev-db: + docker run --name scylla-node1 --volume /var/lib/scylla/1:/var/lib/scylla -d scylladb/scylla --developer-mode=1 --smp 1 + +# Local scylla dev DB Logs (Developer Mode) - Follow the running scylla DB logs. +scylla-dev-db-logs: + docker logs scylla-node1 -f + +# Local scylla dev DB Logs (Developer Mode) - Follow the running scylla DB logs. +scylla-dev-db-stop: + docker stop scylla-node1 + +# Local scylla dev DB CLUSTER - Starts with pre-existing data. +scylla-dev-db-cluster: HOST_IP="{{host_ip}}" \ docker compose up -# Stop the scylla development DB -scylla-dev-db-stop: +# Stop the scylla development DB CLUSTER +scylla-dev-db-stop-cluster: HOST_IP="{{host_ip}}" \ docker compose down -# Reset the cluster storage and start a new dev scylla cluster +# Reset the dev mode scylla instance and start a new dev scylla dev instance scylla-dev-db-reset: scylla-dev-db-purge scylla-dev-db +# Reset the cluster storage and start a new dev scylla cluster +scylla-dev-db-reset-cluster: scylla-dev-db-purge scylla-dev-db-cluster + # Run CQLSH on the dev Scylla cluster scylla-dev-db-cqlsh: docker run --rm -it scylladb/scylla-cqlsh `hostname` 9043 From 8242f61a346817cef54648aeeb99b87793aea21a Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:41:00 +0700 Subject: [PATCH 41/69] feat(gateway): Add and update developer convenience functions for backend --- catalyst-gateway/Justfile | 51 +++++++++++++++++++++++++++++++++++++++ justfile | 31 +++++++++++++----------- 2 files changed, 68 insertions(+), 14 deletions(-) create mode 100644 catalyst-gateway/Justfile diff --git a/catalyst-gateway/Justfile b/catalyst-gateway/Justfile new file mode 100644 index 00000000000..b59846da969 --- /dev/null +++ b/catalyst-gateway/Justfile @@ -0,0 +1,51 @@ +# use with https://github.com/casey/just +# +# Hermes developer convenience functions + +# cspell: words prereqs, commitlog, rustls, nocapture + +default: + @just --list --unsorted + +# Show the dependency tree and all enabled feature flags of every crate. +cargo-tree: + cargo tree -e features,normal,build -f "{p}[{f}]" --workspace --frozen + +# Check Dependency licenses and CVE's +license-check: + cargo deny check --exclude-dev + +# Format the rust code +code-format: + cargo +nightly fmtfix + cargo +nightly fmtchk + +# Lint the rust code +code-lint: + cargo lintfix + cargo lint + +# Synchronize Rust Configs +sync-cfg: + earthly +sync-cfg + +# Pre Push Checks +pre-push: sync-cfg code-format code-lint license-check + +# Build Local release build of catalyst gateway +build-cat-gateway: code-format code-lint + cargo update + cargo build -r + +# Run cat-gateway natively on preprod +run-cat-gateway: build-cat-gateway + CHAIN_FOLLOWER_SYNC_TASKS="16" \ + RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ + CHAIN_NETWORK="Preprod" \ + ./catalyst-gateway/target/release/cat-gateway run --log-level debug + +# Run cat-gateway natively on mainnet +run-cat-gateway-mainnet: build-cat-gateway + CHAIN_FOLLOWER_SYNC_TASKS="1" \ + RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ + ./catalyst-gateway/target/release/cat-gateway run --log-level debug diff --git a/justfile b/justfile index fa9c72f7c25..ea364f38e08 100644 --- a/justfile +++ b/justfile @@ -6,21 +6,24 @@ default: @just --list --unsorted -# Format the rust code -code_format: - cd catalyst-gateway && cargo +nightly fmtfix + +# Fix and Check Markdown files +check-markdown: + earthly +markdown-check-fix + +# Check Spelling +check-spelling: + earthly +clean-spelling-list + earthly +check-spelling + +# Pre Push Checks - intended to be run by a git pre-push hook. +pre-push: check-markdown check-spelling + just catalyst-gateway/pre-push # Run cat-gateway natively on preprod -run-cat-gateway: code_format - cd catalyst-gateway && cargo update && cargo build -r - CHAIN_FOLLOWER_SYNC_TASKS="16" \ - RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ - CHAIN_NETWORK="Preprod" \ - ./catalyst-gateway/target/release/cat-gateway run --log-level debug +run-cat-gateway: + just catalyst-gateway/run-cat-gateway # Run cat-gateway natively on mainnet -run-cat-gateway-mainnet: code_format - cd catalyst-gateway && cargo update && cargo build -r - CHAIN_FOLLOWER_SYNC_TASKS="1" \ - RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ - ./catalyst-gateway/target/release/cat-gateway run --log-level debug +run-cat-gateway-mainnet: + just catalyst-gateway/run-cat-gateway-mainnet From 6de60840f3e98cbd4029b16b16d7e095e6451bba Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:42:18 +0700 Subject: [PATCH 42/69] fix(backend): Fix code format --- catalyst-gateway/bin/src/cardano/util.rs | 54 +++++++++++-------- .../bin/src/db/index/index_certs.rs | 21 ++++---- 2 files changed, 42 insertions(+), 33 deletions(-) diff --git a/catalyst-gateway/bin/src/cardano/util.rs b/catalyst-gateway/bin/src/cardano/util.rs index 7a18ac3dce9..12e510488ef 100644 --- a/catalyst-gateway/bin/src/cardano/util.rs +++ b/catalyst-gateway/bin/src/cardano/util.rs @@ -55,9 +55,11 @@ pub struct PolicyAsset { pub(crate) fn parse_policy_assets(assets: &[MultiEraPolicyAssets<'_>]) -> Vec { assets .iter() - .map(|asset| PolicyAsset { - policy_hash: asset.policy().to_string(), - assets: parse_child_assets(&asset.assets()), + .map(|asset| { + PolicyAsset { + policy_hash: asset.policy().to_string(), + assets: parse_child_assets(&asset.assets()), + } }) .collect() } @@ -66,21 +68,25 @@ pub(crate) fn parse_policy_assets(assets: &[MultiEraPolicyAssets<'_>]) -> Vec Vec { assets .iter() - .filter_map(|asset| match asset { - MultiEraAsset::AlonzoCompatibleOutput(id, name, amount) => Some(Asset { - policy_id: id.to_string(), - name: name.to_string(), - amount: *amount, - }), - MultiEraAsset::AlonzoCompatibleMint(id, name, amount) => { - let amount = u64::try_from(*amount).ok()?; - Some(Asset { - policy_id: id.to_string(), - name: name.to_string(), - amount, - }) - }, - _ => Some(Asset::default()), + .filter_map(|asset| { + match asset { + MultiEraAsset::AlonzoCompatibleOutput(id, name, amount) => { + Some(Asset { + policy_id: id.to_string(), + name: name.to_string(), + amount: *amount, + }) + }, + MultiEraAsset::AlonzoCompatibleMint(id, name, amount) => { + let amount = u64::try_from(*amount).ok()?; + Some(Asset { + policy_id: id.to_string(), + name: name.to_string(), + amount, + }) + }, + _ => Some(Asset::default()), + } }) .collect() } @@ -104,11 +110,13 @@ pub fn extract_stake_credentials_from_certs( pallas::ledger::primitives::alonzo::Certificate::StakeDelegation( stake_credential, _, - ) => match stake_credential { - StakeCredential::AddrKeyhash(stake_credential) => { - stake_credentials.push(hex::encode(stake_credential.as_slice())); - }, - StakeCredential::Scripthash(_) => (), + ) => { + match stake_credential { + StakeCredential::AddrKeyhash(stake_credential) => { + stake_credentials.push(hex::encode(stake_credential.as_slice())); + }, + StakeCredential::Scripthash(_) => (), + } }, _ => continue, } diff --git a/catalyst-gateway/bin/src/db/index/index_certs.rs b/catalyst-gateway/bin/src/db/index/index_certs.rs index 1b2f06f821b..ef238557101 100644 --- a/catalyst-gateway/bin/src/db/index/index_certs.rs +++ b/catalyst-gateway/bin/src/db/index/index_certs.rs @@ -130,7 +130,6 @@ impl CertInsertQuery { let addr = block .witness_for_tx(cred, u16_from_saturating(txn)) .unwrap_or(default_addr); - //let addr = witnesses.get(cred.as_ref()).unwrap_or(&default_addr); // Note: it is totally possible for the Registration Certificate to not be // witnessed. (cred.to_vec(), addr.clone(), false) @@ -222,15 +221,17 @@ impl CertInsertQuery { block: &MultiEraBlock, ) { #[allow(clippy::match_same_arms)] - txs.certs().iter().for_each(|cert| match cert { - pallas::ledger::traverse::MultiEraCert::NotApplicable => {}, - pallas::ledger::traverse::MultiEraCert::AlonzoCompatible(cert) => { - self.index_alonzo_cert(cert, slot_no, txn, block); - }, - pallas::ledger::traverse::MultiEraCert::Conway(cert) => { - self.index_conway_cert(cert, slot_no, txn, block); - }, - _ => {}, + txs.certs().iter().for_each(|cert| { + match cert { + pallas::ledger::traverse::MultiEraCert::NotApplicable => {}, + pallas::ledger::traverse::MultiEraCert::AlonzoCompatible(cert) => { + self.index_alonzo_cert(cert, slot_no, txn, block); + }, + pallas::ledger::traverse::MultiEraCert::Conway(cert) => { + self.index_conway_cert(cert, slot_no, txn, block); + }, + _ => {}, + } }); } From 9d5dbb75eea40e31bb064a22a4819ec20778a2cb Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:42:40 +0700 Subject: [PATCH 43/69] fix(backend): Fix spelling issues in CQL files --- .../bin/src/db/index/schema/stake_registration.cql | 4 ++-- .../bin/src/db/index/schema/txi_by_txn_hash_table.cql | 2 +- .../bin/src/db/index/schema/txo_assets_by_stake_table.cql | 2 +- .../bin/src/db/index/schema/txo_by_stake_table.cql | 4 ++-- .../src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql | 2 +- .../bin/src/db/index/schema/unstaked_txo_by_txn_hash.cql | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/catalyst-gateway/bin/src/db/index/schema/stake_registration.cql b/catalyst-gateway/bin/src/db/index/schema/stake_registration.cql index 65915fa1d91..71afa7d5b8d 100644 --- a/catalyst-gateway/bin/src/db/index/schema/stake_registration.cql +++ b/catalyst-gateway/bin/src/db/index/schema/stake_registration.cql @@ -9,10 +9,10 @@ CREATE TABLE IF NOT EXISTS stake_registration ( -- Non-Key Data stake_address blob, -- 32 Bytes Stake address - not present for scripts and may not be present for `register`. - -- Stake key licecycle data, shows what happened with the stake key at this slot#. + -- Stake key lifecycle data, shows what happened with the stake key at this slot#. script boolean, -- Is the address a script address. register boolean, -- True if the stake was registered in this transaction. - deregister boolean, -- True if the stake key was deregisterd in this transaction. + deregister boolean, -- True if the stake key was deregistered in this transaction. pool_delegation blob, -- Stake was delegated to this Pool address. -- Not present if delegation did not change. diff --git a/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql b/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql index 5f67ffcfba9..a954bdd3c4c 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql @@ -5,7 +5,7 @@ CREATE TABLE IF NOT EXISTS txi_by_txn_hash ( txo smallint, -- Index of the TXO which was spent -- Non key data, we can only spend a transaction hash/txo once, so this should be unique in any event. - slot_no varint, -- slot number when the spend occured. + slot_no varint, -- slot number when the spend occurred. PRIMARY KEY (txn_hash, txo) ); diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql index 3ab1bcad003..f575fffc757 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql @@ -1,7 +1,7 @@ -- Transaction Outputs (Native Assets) per stake address. -- Unstaked Assets are not present in this table. CREATE TABLE IF NOT EXISTS txo_assets_by_stake ( - -- Priamry Key Fields + -- Primary Key Fields stake_address blob, -- stake address hash (28 bytes stake address hash, zero length if not staked.) slot_no varint, -- slot number the txo was created in. txn smallint, -- Which Transaction in the Slot is the TXO. diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql index fc331e14de6..b9ad080358a 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql @@ -1,7 +1,7 @@ -- Transaction Outputs (ADA) per stake address. -- Unstaked ADA is not present in this table. CREATE TABLE IF NOT EXISTS txo_by_stake ( - -- Priamry Key Fields + -- Primary Key Fields stake_address blob, -- stake address hash (28 bytes stake address hash, zero length if not staked.) slot_no varint, -- slot number the txo was created in. txn smallint, -- Which Transaction in the Slot is the TXO. @@ -17,7 +17,7 @@ CREATE TABLE IF NOT EXISTS txo_by_stake ( spent_slot varint, -- Slot this TXO was spent in. -- This is ONLY calculated/stored -- when first detected in a query lookup. - -- It serves as an optimization on subsequnt queries. + -- It serves as an optimization on subsequent queries. PRIMARY KEY (stake_address, slot_no, txn, txo) ); diff --git a/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql b/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql index 723db9350c1..047567d895d 100644 --- a/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql +++ b/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql @@ -1,6 +1,6 @@ -- Transaction Outputs (Native Assets) per stake address. CREATE TABLE IF NOT EXISTS unstaked_txo_assets_by_txn_hash ( - -- Priamry Key Fields + -- Primary Key Fields txn_hash blob, -- 32 byte hash of this transaction. txo smallint, -- offset in the txo list of the transaction the txo is in. policy_id blob, -- asset policy hash (id) (28 byte binary hash) diff --git a/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_by_txn_hash.cql b/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_by_txn_hash.cql index 1ff2ab2a96f..b6627cbbe38 100644 --- a/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_by_txn_hash.cql +++ b/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_by_txn_hash.cql @@ -1,6 +1,6 @@ -- Transaction Outputs (ADA) that are not staked, by their transaction hash. CREATE TABLE IF NOT EXISTS unstaked_txo_by_txn_hash ( - -- Priamry Key Fields + -- Primary Key Fields txn_hash blob, -- 32 byte hash of this transaction. txo smallint, -- offset in the txo list of the transaction the txo is in. @@ -16,7 +16,7 @@ CREATE TABLE IF NOT EXISTS unstaked_txo_by_txn_hash ( spent_slot varint, -- Slot this TXO was spent in. -- This is ONLY calculated/stored -- when first detected in a query lookup. - -- It serves as an optimization on subsequnt queries. + -- It serves as an optimization on subsequent queries. PRIMARY KEY (txn_hash, txo) ); From 0eef251cea89f560d45c5553697c22da20f2507a Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 16:59:26 +0700 Subject: [PATCH 44/69] fix(spelling): Remove duplicates from the project words dictionary --- .config/dictionaries/project.dic | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index 08e05f420ed..8c3c63229e3 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -82,7 +82,6 @@ fmtfix fontawesome fontello Formz -formz fuzzer gapless gcloud @@ -146,7 +145,6 @@ msedgedriver multiasset multidex Multiplatform -multiplatform myproject nanos NDEBUG @@ -170,6 +168,7 @@ plpgsql podfile podhelper postcss +Precache Precertificate preprod projectcatalyst @@ -201,7 +200,6 @@ rustls rxdart saibatizoku Schemathesis -schemathesis Scripthash ScyllaDB seckey @@ -218,6 +216,7 @@ subchain Subkey submiting subosito +svgs SYSROOT tablestats tacho @@ -229,7 +228,6 @@ thiserror thollander timelike Toastify -toastify todos toggleable tojunit @@ -240,14 +238,11 @@ TXNZD Typer unawaited unchunk -unchunk Unlogged unmanaged Unstaked UTXO -utxo Utxos -utxos varint vite vitss @@ -273,16 +268,3 @@ xctest xctestrun xcworkspace yoroi -unchunk -EUTXO -eddsa -toggleable -interps -todos -vsync -damian-molinski -LTRB -hotspots -precache -Precache -svgs From 50741775061098d9cd3ff6bee1583e45324a39c6 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 11 Sep 2024 17:36:40 +0700 Subject: [PATCH 45/69] fix(backend): Get the backend building properly with earthly. --- catalyst-gateway/Cargo.toml | 2 ++ catalyst-gateway/Earthfile | 2 +- catalyst-gateway/Justfile | 4 ++++ catalyst-gateway/bin/Cargo.toml | 8 ++++---- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index e2fd64ec089..b819a6f6ca0 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -35,6 +35,8 @@ unescaped_backticks = "deny" pedantic = { level = "deny", priority = -1 } unwrap_used = "deny" expect_used = "deny" +todo = "deny" +unimplemented = "deny" exit = "deny" get_unwrap = "deny" index_refutable_slice = "deny" diff --git a/catalyst-gateway/Earthfile b/catalyst-gateway/Earthfile index 47acbb2983b..2e0ebc8e0bc 100644 --- a/catalyst-gateway/Earthfile +++ b/catalyst-gateway/Earthfile @@ -13,7 +13,7 @@ sync-cfg: builder: DO rust-ci+SETUP - COPY --dir .cargo .config Cargo.* clippy.toml deny.toml rustfmt.toml bin crates . + COPY --dir .cargo .config Cargo.* clippy.toml deny.toml rustfmt.toml bin . ## ----------------------------------------------------------------------------- ## diff --git a/catalyst-gateway/Justfile b/catalyst-gateway/Justfile index b59846da969..e742d379102 100644 --- a/catalyst-gateway/Justfile +++ b/catalyst-gateway/Justfile @@ -31,6 +31,10 @@ sync-cfg: # Pre Push Checks pre-push: sync-cfg code-format code-lint license-check + # Make sure we can actually build inside Earthly which needs to happen in CI. + earthly +check + earthly +build + earthly +package-cat-gateway # Build Local release build of catalyst gateway build-cat-gateway: code-format code-lint diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 6da2f9a2580..99421fbc4c6 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -35,7 +35,7 @@ serde = { version = "1.0.204", features = ["derive"] } serde_json = "1.0.128" thiserror = "1.0.63" chrono = "0.4.38" -async-trait = "0.1.82" +# async-trait = "0.1.82" bb8 = "0.8.5" bb8-postgres = "0.8.1" tokio-postgres = { version = "0.7.11", features = [ @@ -52,18 +52,18 @@ handlebars = "6.0.0" anyhow = "1.0.86" cddl = "0.9.4" ciborium = "0.2.2" -stringzilla = "3.9.3" +# stringzilla = "3.9.3" duration-string = "0.4.0" build-info = "0.0.38" ed25519-dalek = "2.1.1" scylla = { version = "0.14.0", features = ["cloud", "full-serialization"] } strum = { version = "0.26.3", features = ["derive"] } -strum_macros = "0.26.4" +# strum_macros = "0.26.4" openssl = { version = "0.10.66", features = ["vendored"] } num-bigint = "0.4.6" futures = "0.3.30" rand = "0.8.5" -moka = { version = "0.12.8", features = ["future"] } +# moka = { version = "0.12.8", features = ["future"] } crossbeam-skiplist = "0.1.3" rust_decimal = { version = "1.36.0", features = [ "serde-with-float", From 412a7b09dfce3c531a8e53d9a497726f37e0380e Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 12 Sep 2024 09:45:14 +0700 Subject: [PATCH 46/69] feat(backend): remove obsoleted postgres logic for chain indexing --- .../bin/src/cardano/cip36_registration/mod.rs | 7 ++ catalyst-gateway/bin/src/cardano/util.rs | 3 + .../insert_update_state.sql.obsolete} | 0 .../chain_state/mod.rs | 0 .../select_slot_info_by_datetime.sql.hbs | 0 .../chain_state/select_update_state.sql | 0 .../cip36_registration/mod.rs | 0 .../select_cip36_registration.sql.obsolete} | 0 .../config/mod.rs | 0 .../config/select_config.sql.obsolete} | 0 .../mod.rs.obsolete} | 0 .../{cardano => cardano.obsolete}/utxo/mod.rs | 0 .../select_total_utxo_amount.sql.obsolete} | 0 catalyst-gateway/bin/src/db/event/mod.rs | 1 - .../cardano/date_time_to_slot_number_get.rs | 110 +++++++++--------- .../bin/src/service/api/cardano/mod.rs | 15 ++- .../service/api/cardano/registration_get.rs | 28 +++-- .../src/service/api/cardano/staked_ada_get.rs | 40 +++---- .../src/service/api/cardano/sync_state_get.rs | 18 +-- .../bin/src/service/api/cardano/types.rs | 31 +++++ catalyst-gateway/bin/src/service/api/mod.rs | 2 +- .../objects/cardano/registration_info.rs | 8 +- .../common/objects/cardano/slot_info.rs | 6 +- .../common/objects/cardano/stake_info.rs | 2 +- .../common/objects/cardano/sync_state.rs | 6 +- 25 files changed, 162 insertions(+), 115 deletions(-) rename catalyst-gateway/bin/src/db/event/{cardano/chain_state/insert_update_state.sql => cardano.obsolete/chain_state/insert_update_state.sql.obsolete} (100%) rename catalyst-gateway/bin/src/db/event/{cardano => cardano.obsolete}/chain_state/mod.rs (100%) rename catalyst-gateway/bin/src/db/event/{cardano => cardano.obsolete}/chain_state/select_slot_info_by_datetime.sql.hbs (100%) rename catalyst-gateway/bin/src/db/event/{cardano => cardano.obsolete}/chain_state/select_update_state.sql (100%) rename catalyst-gateway/bin/src/db/event/{cardano => cardano.obsolete}/cip36_registration/mod.rs (100%) rename catalyst-gateway/bin/src/db/event/{cardano/cip36_registration/select_cip36_registration.sql => cardano.obsolete/cip36_registration/select_cip36_registration.sql.obsolete} (100%) rename catalyst-gateway/bin/src/db/event/{cardano => cardano.obsolete}/config/mod.rs (100%) rename catalyst-gateway/bin/src/db/event/{cardano/config/select_config.sql => cardano.obsolete/config/select_config.sql.obsolete} (100%) rename catalyst-gateway/bin/src/db/event/{cardano/mod.rs => cardano.obsolete/mod.rs.obsolete} (100%) rename catalyst-gateway/bin/src/db/event/{cardano => cardano.obsolete}/utxo/mod.rs (100%) rename catalyst-gateway/bin/src/db/event/{cardano/utxo/select_total_utxo_amount.sql => cardano.obsolete/utxo/select_total_utxo_amount.sql.obsolete} (100%) create mode 100644 catalyst-gateway/bin/src/service/api/cardano/types.rs diff --git a/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs b/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs index 29d7a07b781..a1dae967a59 100644 --- a/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs +++ b/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs @@ -30,10 +30,12 @@ pub(crate) struct VotingPurpose(u64); pub(crate) struct RewardsAddress(pub Vec); /// Error report for serializing +#[allow(dead_code)] pub(crate) type ErrorReport = Vec; impl PubKey { /// Get credentials, a blake2b 28 bytes hash of the pub key + #[allow(dead_code)] pub(crate) fn get_credentials(&self) -> [u8; 28] { let mut digest = [0u8; 28]; let mut context = Blake2b::new(28); @@ -71,6 +73,8 @@ pub(crate) enum VotingInfo { /// CIP-36 registration info part #[derive(Debug, Clone, PartialEq)] +#[allow(dead_code)] + pub(crate) struct Registration { /// Voting info pub(crate) voting_info: VotingInfo, @@ -86,6 +90,8 @@ pub(crate) struct Registration { /// A catalyst CIP-36 registration on Cardano #[derive(Debug, Clone, PartialEq)] +#[allow(dead_code)] + pub(crate) struct Cip36Metadata { /// CIP-36 registration 61284 pub(crate) registration: Option, @@ -98,6 +104,7 @@ pub(crate) struct Cip36Metadata { impl Cip36Metadata { /// Create new `Cip36Registration` from tx metadata /// Collect secondary errors for granular json error report + #[allow(dead_code)] pub(crate) fn generate_from_tx_metadata( tx_metadata: &MultiEraMeta, network: Network, ) -> Option { diff --git a/catalyst-gateway/bin/src/cardano/util.rs b/catalyst-gateway/bin/src/cardano/util.rs index 12e510488ef..26f8a11f210 100644 --- a/catalyst-gateway/bin/src/cardano/util.rs +++ b/catalyst-gateway/bin/src/cardano/util.rs @@ -52,6 +52,7 @@ pub struct PolicyAsset { } /// Extract assets +#[allow(dead_code)] pub(crate) fn parse_policy_assets(assets: &[MultiEraPolicyAssets<'_>]) -> Vec { assets .iter() @@ -65,6 +66,7 @@ pub(crate) fn parse_policy_assets(assets: &[MultiEraPolicyAssets<'_>]) -> Vec Vec { assets .iter() @@ -92,6 +94,7 @@ fn parse_child_assets(assets: &[MultiEraAsset]) -> Vec { } /// Eras before staking should be ignored +#[allow(dead_code)] pub fn valid_era(era: Era) -> bool { !matches!(era, Era::Byron) } diff --git a/catalyst-gateway/bin/src/db/event/cardano/chain_state/insert_update_state.sql b/catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/insert_update_state.sql.obsolete similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano/chain_state/insert_update_state.sql rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/insert_update_state.sql.obsolete diff --git a/catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano/chain_state/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/mod.rs diff --git a/catalyst-gateway/bin/src/db/event/cardano/chain_state/select_slot_info_by_datetime.sql.hbs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/select_slot_info_by_datetime.sql.hbs similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano/chain_state/select_slot_info_by_datetime.sql.hbs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/select_slot_info_by_datetime.sql.hbs diff --git a/catalyst-gateway/bin/src/db/event/cardano/chain_state/select_update_state.sql b/catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/select_update_state.sql similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano/chain_state/select_update_state.sql rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/select_update_state.sql diff --git a/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano/cip36_registration/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/mod.rs diff --git a/catalyst-gateway/bin/src/db/event/cardano/cip36_registration/select_cip36_registration.sql b/catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/select_cip36_registration.sql.obsolete similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano/cip36_registration/select_cip36_registration.sql rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/select_cip36_registration.sql.obsolete diff --git a/catalyst-gateway/bin/src/db/event/cardano/config/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/config/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano/config/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/config/mod.rs diff --git a/catalyst-gateway/bin/src/db/event/cardano/config/select_config.sql b/catalyst-gateway/bin/src/db/event/cardano.obsolete/config/select_config.sql.obsolete similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano/config/select_config.sql rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/config/select_config.sql.obsolete diff --git a/catalyst-gateway/bin/src/db/event/cardano/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/mod.rs.obsolete similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/mod.rs.obsolete diff --git a/catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano/utxo/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/mod.rs diff --git a/catalyst-gateway/bin/src/db/event/cardano/utxo/select_total_utxo_amount.sql b/catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/select_total_utxo_amount.sql.obsolete similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano/utxo/select_total_utxo_amount.sql rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/select_total_utxo_amount.sql.obsolete diff --git a/catalyst-gateway/bin/src/db/event/mod.rs b/catalyst-gateway/bin/src/db/event/mod.rs index 87256ad8706..3efd6aa6983 100644 --- a/catalyst-gateway/bin/src/db/event/mod.rs +++ b/catalyst-gateway/bin/src/db/event/mod.rs @@ -14,7 +14,6 @@ use tracing::{debug, debug_span, error, Instrument}; use crate::settings::Settings; -pub(crate) mod cardano; pub(crate) mod error; pub(crate) mod legacy; pub(crate) mod schema_check; diff --git a/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs b/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs index 7f7142026f0..b459460b9ae 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs @@ -1,20 +1,14 @@ //! Implementation of the GET `/date_time_to_slot_number` endpoint -use poem_openapi::{payload::Json, ApiResponse}; +use poem_openapi::{payload::Json, types::Example, ApiResponse}; -use crate::{ - db::event::{ - cardano::chain_state::{BlockHash, DateTime, SlotInfoQueryType, SlotNumber}, - error::NotFoundError, - EventDB, - }, - service::common::{ - objects::cardano::{ - network::Network, - slot_info::{Slot, SlotInfo}, - }, - responses::WithErrorResponses, +use super::types::DateTime; +use crate::service::common::{ + objects::cardano::{ + network::Network, + slot_info::{Slot, SlotInfo}, }, + responses::WithErrorResponses, }; /// Endpoint responses. @@ -29,54 +23,60 @@ pub(crate) enum Responses { pub(crate) type AllResponses = WithErrorResponses; /// # GET `/date_time_to_slot_number` -#[allow(clippy::unused_async)] +#[allow(clippy::unused_async, clippy::no_effect_underscore_binding)] pub(crate) async fn endpoint( date_time: Option, network: Option, ) -> AllResponses { - let date_time = date_time.unwrap_or_else(chrono::Utc::now); - let network = network.unwrap_or(Network::Mainnet); + let _date_time = date_time.unwrap_or_else(chrono::Utc::now); + let _network = network.unwrap_or(Network::Mainnet); - let (previous, current, next) = tokio::join!( - EventDB::get_slot_info( - date_time, - network.clone().into(), - SlotInfoQueryType::Previous - ), - EventDB::get_slot_info( - date_time, - network.clone().into(), - SlotInfoQueryType::Current - ), - EventDB::get_slot_info(date_time, network.into(), SlotInfoQueryType::Next) - ); + let previous = Some(Slot::example()); + let current = Some(Slot::example()); + let next = Some(Slot::example()); - let process_slot_info_result = - |slot_info_result: anyhow::Result<(SlotNumber, BlockHash, DateTime)>| { - match slot_info_result { - Ok((slot_number, block_hash, block_time)) => { - Ok(Some(Slot { - slot_number, - block_hash: From::from(block_hash), - block_time, - })) - }, - Err(err) if err.is::() => Ok(None), - Err(err) => Err(err), - } - }; + let _unused = " + let (previous, current, next) = tokio::join!( + EventDB::get_slot_info( + date_time, + network.clone().into(), + SlotInfoQueryType::Previous + ), + EventDB::get_slot_info( + date_time, + network.clone().into(), + SlotInfoQueryType::Current + ), + EventDB::get_slot_info(date_time, network.into(), SlotInfoQueryType::Next) + ); + + let process_slot_info_result = + |slot_info_result: anyhow::Result<(SlotNumber, BlockHash, DateTime)>| { + match slot_info_result { + Ok((slot_number, block_hash, block_time)) => { + Ok(Some(Slot { + slot_number, + block_hash: From::from(block_hash), + block_time, + })) + }, + Err(err) if err.is::() => Ok(None), + Err(err) => Err(err), + } + }; - let current = match process_slot_info_result(current) { - Ok(current) => current, - Err(err) => return AllResponses::handle_error(&err), - }; - let previous = match process_slot_info_result(previous) { - Ok(current) => current, - Err(err) => return AllResponses::handle_error(&err), - }; - let next = match process_slot_info_result(next) { - Ok(current) => current, - Err(err) => return AllResponses::handle_error(&err), - }; + let current = match process_slot_info_result(current) { + Ok(current) => current, + Err(err) => return AllResponses::handle_error(&err), + }; + let previous = match process_slot_info_result(previous) { + Ok(current) => current, + Err(err) => return AllResponses::handle_error(&err), + }; + let next = match process_slot_info_result(next) { + Ok(current) => current, + Err(err) => return AllResponses::handle_error(&err), + }; + "; Responses::Ok(Json(SlotInfo { previous, diff --git a/catalyst-gateway/bin/src/service/api/cardano/mod.rs b/catalyst-gateway/bin/src/service/api/cardano/mod.rs index 3038a15d939..9e83da3c475 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/mod.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/mod.rs @@ -3,22 +3,21 @@ use poem_openapi::{ param::{Path, Query}, OpenApi, }; +use types::{DateTime, SlotNumber}; -use crate::{ - db::event::cardano::chain_state::{DateTime, SlotNumber}, - service::{ - common::{ - objects::cardano::{network::Network, stake_address::StakeAddress}, - tags::ApiTags, - }, - utilities::middleware::schema_validation::schema_version_validation, +use crate::service::{ + common::{ + objects::cardano::{network::Network, stake_address::StakeAddress}, + tags::ApiTags, }, + utilities::middleware::schema_validation::schema_version_validation, }; mod date_time_to_slot_number_get; mod registration_get; mod staked_ada_get; mod sync_state_get; +pub(crate) mod types; /// Cardano Follower API Endpoints pub(crate) struct CardanoApi; diff --git a/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs b/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs index 5b9f89cfed0..00c4e89d1bd 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs @@ -2,21 +2,20 @@ use poem_openapi::{payload::Json, ApiResponse}; -use crate::{ - db::event::{cardano::chain_state::SlotNumber, error::NotFoundError, EventDB}, - service::{ - common::{ - objects::cardano::{ - network::Network, registration_info::RegistrationInfo, stake_address::StakeAddress, - }, - responses::WithErrorResponses, +use super::types::SlotNumber; +use crate::service::{ + common::{ + objects::cardano::{ + network::Network, registration_info::RegistrationInfo, stake_address::StakeAddress, }, - utilities::check_network, + responses::WithErrorResponses, }, + utilities::check_network, }; /// Endpoint responses #[derive(ApiResponse)] +#[allow(dead_code)] pub(crate) enum Responses { /// The registration information for the stake address queried. #[oai(status = 200)] @@ -31,16 +30,18 @@ pub(crate) enum Responses { pub(crate) type AllResponses = WithErrorResponses; /// # GET `/registration` +#[allow(clippy::unused_async, clippy::no_effect_underscore_binding)] pub(crate) async fn endpoint( stake_address: StakeAddress, provided_network: Option, slot_num: Option, ) -> AllResponses { - let date_time = slot_num.unwrap_or(SlotNumber::MAX); - let stake_credential = stake_address.payload().as_hash().to_vec(); - let network = match check_network(stake_address.network(), provided_network) { + let _date_time = slot_num.unwrap_or(SlotNumber::MAX); + let _stake_credential = stake_address.payload().as_hash().to_vec(); + let _network = match check_network(stake_address.network(), provided_network) { Ok(network) => network, Err(err) => return AllResponses::handle_error(&err), }; + let _unused = " // get the total utxo amount from the database match EventDB::get_registration_info(stake_credential, network.into(), date_time).await { Ok((tx_id, payment_address, voting_info, nonce)) => { @@ -55,4 +56,7 @@ pub(crate) async fn endpoint( Err(err) if err.is::() => Responses::NotFound.into(), Err(err) => AllResponses::handle_error(&err), } + "; + + Responses::NotFound.into() } diff --git a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs index 61b32381ba3..976c8ae7e1b 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs @@ -2,21 +2,18 @@ use poem_openapi::{payload::Json, ApiResponse}; -use crate::{ - db::event::{cardano::chain_state::SlotNumber, error::NotFoundError, EventDB}, - service::{ - common::{ - objects::cardano::{ - network::Network, stake_address::StakeAddress, stake_info::StakeInfo, - }, - responses::WithErrorResponses, - }, - utilities::check_network, +use super::types::SlotNumber; +use crate::service::{ + common::{ + objects::cardano::{network::Network, stake_address::StakeAddress, stake_info::StakeInfo}, + responses::WithErrorResponses, }, + utilities::check_network, }; /// Endpoint responses. #[derive(ApiResponse)] +#[allow(dead_code)] pub(crate) enum Responses { /// The amount of ADA staked by the queried stake address, as at the indicated slot. #[oai(status = 200)] @@ -30,27 +27,30 @@ pub(crate) enum Responses { pub(crate) type AllResponses = WithErrorResponses; /// # GET `/staked_ada` +#[allow(clippy::unused_async, clippy::no_effect_underscore_binding)] pub(crate) async fn endpoint( stake_address: StakeAddress, provided_network: Option, slot_num: Option, ) -> AllResponses { - let date_time = slot_num.unwrap_or(SlotNumber::MAX); - let stake_credential = stake_address.payload().as_hash().to_vec(); + let _date_time = slot_num.unwrap_or(SlotNumber::MAX); + let _stake_credential = stake_address.payload().as_hash().to_vec(); - let network = match check_network(stake_address.network(), provided_network) { + let _network = match check_network(stake_address.network(), provided_network) { Ok(network) => network, Err(err) => return AllResponses::handle_error(&err), }; + let _unused = " // get the total utxo amount from the database match EventDB::total_utxo_amount(stake_credential, network.into(), date_time).await { - Ok((amount, slot_number)) => { - Responses::Ok(Json(StakeInfo { - amount, - slot_number, - })) - .into() - }, + Ok((amount, slot_number)) => Responses::Ok(Json(StakeInfo { + amount, + slot_number, + })) + .into(), Err(err) if err.is::() => Responses::NotFound.into(), Err(err) => AllResponses::handle_error(&err), } + "; + + Responses::NotFound.into() } diff --git a/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs b/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs index db71beb44f1..9b93a3c005b 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs @@ -2,16 +2,14 @@ use poem_openapi::{payload::Json, ApiResponse}; -use crate::{ - db::event::{error::NotFoundError, EventDB}, - service::common::{ - objects::cardano::{network::Network, sync_state::SyncState}, - responses::WithErrorResponses, - }, +use crate::service::common::{ + objects::cardano::{network::Network, sync_state::SyncState}, + responses::WithErrorResponses, }; /// Endpoint responses. #[derive(ApiResponse)] +#[allow(dead_code)] pub(crate) enum Responses { /// The synchronisation state of the blockchain with the catalyst gateway service. #[oai(status = 200)] @@ -25,10 +23,11 @@ pub(crate) enum Responses { pub(crate) type AllResponses = WithErrorResponses; /// # GET `/sync_state` -#[allow(clippy::unused_async)] +#[allow(clippy::unused_async, clippy::no_effect_underscore_binding)] pub(crate) async fn endpoint(network: Option) -> AllResponses { - let network = network.unwrap_or(Network::Mainnet); + let _network = network.unwrap_or(Network::Mainnet); + let _unused = " match EventDB::last_updated_state(network.into()).await { Ok((slot_number, block_hash, last_updated)) => { Responses::Ok(Json(SyncState { @@ -41,4 +40,7 @@ pub(crate) async fn endpoint(network: Option) -> AllResponses { Err(err) if err.is::() => Responses::NotFound.into(), Err(err) => AllResponses::handle_error(&err), } + "; + + Responses::NotFound.into() } diff --git a/catalyst-gateway/bin/src/service/api/cardano/types.rs b/catalyst-gateway/bin/src/service/api/cardano/types.rs new file mode 100644 index 00000000000..af15c5f8622 --- /dev/null +++ b/catalyst-gateway/bin/src/service/api/cardano/types.rs @@ -0,0 +1,31 @@ +//! Cardano Specific Types +//! +//! These are temporary types are needed to prevent breakage due to the removal of the +//! Event DB logic for chain-sync. They should be replaced with proper types in a better +//! place. + +use crate::cardano::cip36_registration::VotingInfo; + +/// Block time +pub(crate) type DateTime = chrono::DateTime; +/// Slot +pub(crate) type SlotNumber = i64; +/// Transaction id +#[allow(dead_code)] +pub(crate) type TxId = Vec; +/// Stake credential +#[allow(dead_code)] +pub(crate) type StakeCredential = Vec; +/// Public voting key +#[allow(dead_code)] +pub(crate) type PublicVotingInfo = VotingInfo; +/// Payment address +#[allow(dead_code)] +pub(crate) type PaymentAddress = Vec; +/// Nonce +pub(crate) type Nonce = i64; +/// Metadata 61284 +#[allow(dead_code)] +pub(crate) type MetadataCip36 = Vec; +/// Stake amount. +pub(crate) type StakeAmount = i64; diff --git a/catalyst-gateway/bin/src/service/api/mod.rs b/catalyst-gateway/bin/src/service/api/mod.rs index 6f680854d13..1608edfed3e 100644 --- a/catalyst-gateway/bin/src/service/api/mod.rs +++ b/catalyst-gateway/bin/src/service/api/mod.rs @@ -13,7 +13,7 @@ use poem_openapi::{ContactObject, LicenseObject, OpenApiService, ServerObject}; use self::cardano::CardanoApi; use crate::settings::Settings; -mod cardano; +pub(crate) mod cardano; mod health; mod legacy; diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs index 45dda1cf752..6a855cc7998 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs @@ -2,9 +2,10 @@ use poem_openapi::{types::Example, Object, Union}; -use crate::{ - db::event::cardano::cip36_registration::{Nonce, PaymentAddress, PublicVotingInfo, TxId}, - service::{common::objects::cardano::hash::Hash, utilities::to_hex_with_prefix}, +use crate::service::{ + api::cardano::types::{Nonce, PaymentAddress, PublicVotingInfo, TxId}, + common::objects::cardano::hash::Hash, + utilities::to_hex_with_prefix, }; /// Delegation type @@ -68,6 +69,7 @@ pub(crate) struct RegistrationInfo { impl RegistrationInfo { /// Creates a new `RegistrationInfo` + #[allow(dead_code)] pub(crate) fn new( tx_hash: TxId, rewards_address: &PaymentAddress, voting_info: PublicVotingInfo, nonce: Nonce, diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs index f4ea1662211..5a741887fc1 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs @@ -2,9 +2,9 @@ use poem_openapi::{types::Example, Object}; -use crate::{ - db::event::cardano::chain_state::{DateTime, SlotNumber}, - service::common::objects::cardano::hash::Hash, +use crate::service::{ + api::cardano::types::{DateTime, SlotNumber}, + common::objects::cardano::hash::Hash, }; /// Cardano block's slot data. diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs index fda6ee0f7c9..6793085d7a8 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs @@ -2,7 +2,7 @@ use poem_openapi::{types::Example, Object}; -use crate::db::event::cardano::{chain_state::SlotNumber, utxo::StakeAmount}; +use crate::service::api::cardano::types::{SlotNumber, StakeAmount}; /// User's cardano stake info. #[derive(Object)] diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs index c4b7dde539a..326d1633406 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs @@ -2,9 +2,9 @@ use poem_openapi::{types::Example, Object}; -use crate::{ - db::event::cardano::chain_state::{DateTime, SlotNumber}, - service::common::objects::cardano::hash::Hash, +use crate::service::{ + api::cardano::types::{DateTime, SlotNumber}, + common::objects::cardano::hash::Hash, }; /// Cardano follower's sync state info. From 704f5a2e97868a18a648b70e9ba47146aef75dfb Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 12 Sep 2024 12:18:42 +0700 Subject: [PATCH 47/69] revert(event-db): Revert extension changes to sql files after fixing sqlfluff version --- .../{insert_update_state.sql.obsolete => insert_update_state.sql} | 0 ...36_registration.sql.obsolete => select_cip36_registration.sql} | 0 .../config/{select_config.sql.obsolete => select_config.sql} | 0 ...otal_utxo_amount.sql.obsolete => select_total_utxo_amount.sql} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/{insert_update_state.sql.obsolete => insert_update_state.sql} (100%) rename catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/{select_cip36_registration.sql.obsolete => select_cip36_registration.sql} (100%) rename catalyst-gateway/bin/src/db/event/cardano.obsolete/config/{select_config.sql.obsolete => select_config.sql} (100%) rename catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/{select_total_utxo_amount.sql.obsolete => select_total_utxo_amount.sql} (100%) diff --git a/catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/insert_update_state.sql.obsolete b/catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/insert_update_state.sql similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/insert_update_state.sql.obsolete rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/insert_update_state.sql diff --git a/catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/select_cip36_registration.sql.obsolete b/catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/select_cip36_registration.sql similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/select_cip36_registration.sql.obsolete rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/select_cip36_registration.sql diff --git a/catalyst-gateway/bin/src/db/event/cardano.obsolete/config/select_config.sql.obsolete b/catalyst-gateway/bin/src/db/event/cardano.obsolete/config/select_config.sql similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano.obsolete/config/select_config.sql.obsolete rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/config/select_config.sql diff --git a/catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/select_total_utxo_amount.sql.obsolete b/catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/select_total_utxo_amount.sql similarity index 100% rename from catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/select_total_utxo_amount.sql.obsolete rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/select_total_utxo_amount.sql From 273e15d3ad829f08e7fdf96850befe7e41cf29dc Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 12 Sep 2024 15:58:07 +0700 Subject: [PATCH 48/69] fix(frontend): Regenerate the dart api interface file, and add doing that to the pre-push just command --- .../catalyst_gateway/cat_gateway_api.enums.swagger.dart | 2 -- justfile | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.enums.swagger.dart b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.enums.swagger.dart index bfe9f1faa12..a88cc123f58 100644 --- a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.enums.swagger.dart +++ b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.enums.swagger.dart @@ -39,8 +39,6 @@ enum Network { @JsonValue('mainnet') mainnet('mainnet'), - @JsonValue('testnet') - testnet('testnet'), @JsonValue('preprod') preprod('preprod'), @JsonValue('preview') diff --git a/justfile b/justfile index ea364f38e08..88cd8c7737e 100644 --- a/justfile +++ b/justfile @@ -19,6 +19,7 @@ check-spelling: # Pre Push Checks - intended to be run by a git pre-push hook. pre-push: check-markdown check-spelling just catalyst-gateway/pre-push + earthly ./catalyst_voices+code-generator --platform=linux/amd64 --save_locally=true # Run cat-gateway natively on preprod run-cat-gateway: From 45848ebc78367d274dc23db5aa1384540f0aa868 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 12 Sep 2024 18:58:59 +0700 Subject: [PATCH 49/69] fix(backend): temporarily disable API tests --- catalyst-gateway/tests/api_tests/Earthfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/catalyst-gateway/tests/api_tests/Earthfile b/catalyst-gateway/tests/api_tests/Earthfile index bcab3ccc733..c5a96943e59 100644 --- a/catalyst-gateway/tests/api_tests/Earthfile +++ b/catalyst-gateway/tests/api_tests/Earthfile @@ -10,7 +10,7 @@ builder: COPY ./snapshot_tool-56364174.json . DO python-ci+BUILDER -test: +disabled-test: FROM +builder RUN apk update && apk add iptables-legacy # workaround for https://github.com/earthly/earthly/issues/3784 @@ -29,7 +29,7 @@ test: SAVE ARTIFACT coverage.lcov AS LOCAL api-tests.coverage.info END -nightly-test: +disabled-nightly-test: FROM +builder RUN apk update && apk add iptables-legacy # workaround for https://github.com/earthly/earthly/issues/3784 From c80008e50ec3859b3fd5399effab0870db8869f3 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 12 Sep 2024 19:02:56 +0700 Subject: [PATCH 50/69] fix(backend): Also temporarily stop workflow consuming test reports that are disabled --- .github/workflows/generate-allure-report.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/generate-allure-report.yml b/.github/workflows/generate-allure-report.yml index 91ae87570af..dbf4a774a96 100644 --- a/.github/workflows/generate-allure-report.yml +++ b/.github/workflows/generate-allure-report.yml @@ -173,7 +173,7 @@ jobs: - name: Normalize coverage report paths run: | sed -i -e 's/SF:\/root/SF:catalyst-gateway/g' ${{ env.COVERAGE_REPORT_PATH }}/cat-gateway.coverage.info - sed -i -e 's/SF:/SF:catalyst-gateway\/tests\/api_tests\//g' ${{ env.COVERAGE_REPORT_PATH }}/api-tests.coverage.info + # sed -i -e 's/SF:/SF:catalyst-gateway\/tests\/api_tests\//g' ${{ env.COVERAGE_REPORT_PATH }}/api-tests.coverage.info - name: Coveralls env: From 13b4968df6654c490a61784cead27b1e7dc0859a Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 12 Sep 2024 19:07:12 +0700 Subject: [PATCH 51/69] fix(ci): Try and stop coveralls running for api-tests --- .github/workflows/generate-allure-report.yml | 118 +++++++++---------- 1 file changed, 59 insertions(+), 59 deletions(-) diff --git a/.github/workflows/generate-allure-report.yml b/.github/workflows/generate-allure-report.yml index dbf4a774a96..8da10eed64b 100644 --- a/.github/workflows/generate-allure-report.yml +++ b/.github/workflows/generate-allure-report.yml @@ -3,26 +3,26 @@ name: Allure Report Generation on: pull_request: push: - branches: 'main' + branches: "main" permissions: - contents: write - pull-requests: write - id-token: write + contents: write + pull-requests: write + id-token: write concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} cancel-in-progress: true env: - AWS_REGION: eu-central-1 - AWS_ROLE_ARN: arn:aws:iam::332405224602:role/ci - EARTHLY_TARGET: docker - ECR_REGISTRY: 332405224602.dkr.ecr.eu-central-1.amazonaws.com - ALLURE_REPORT_PATH: allure-report - COVERAGE_REPORT_PATH: coverage-report - REPORT_EXT: .junit-report.xml - COVERAGE_EXT: .info + AWS_REGION: eu-central-1 + AWS_ROLE_ARN: arn:aws:iam::332405224602:role/ci + EARTHLY_TARGET: docker + ECR_REGISTRY: 332405224602.dkr.ecr.eu-central-1.amazonaws.com + ALLURE_REPORT_PATH: allure-report + COVERAGE_REPORT_PATH: coverage-report + REPORT_EXT: .junit-report.xml + COVERAGE_EXT: .info jobs: generate-test-reports: @@ -34,21 +34,21 @@ jobs: - name: Setup CI uses: input-output-hk/catalyst-ci/actions/setup@master with: - aws_role_arn: ${{ env.AWS_ROLE_ARN }} - aws_region: ${{ env.AWS_REGION }} - earthly_runner_secret: ${{ secrets.EARTHLY_RUNNER_SECRET }} + aws_role_arn: ${{ env.AWS_ROLE_ARN }} + aws_region: ${{ env.AWS_REGION }} + earthly_runner_secret: ${{ secrets.EARTHLY_RUNNER_SECRET }} - name: Get catalyst gateway unit test report uses: input-output-hk/catalyst-ci/actions/run@master if: always() continue-on-error: true with: - earthfile: ./catalyst-gateway/ - flags: - targets: build - target_flags: - runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} - artifact: "false" + earthfile: ./catalyst-gateway/ + flags: + targets: build + target_flags: + runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} + artifact: "false" - name: Get schemathesis test report uses: input-output-hk/catalyst-ci/actions/run@master @@ -67,42 +67,42 @@ jobs: if: always() continue-on-error: true with: - earthfile: ./catalyst_voices/ - flags: - targets: test-unit - target_flags: - runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} - artifact: "false" + earthfile: ./catalyst_voices/ + flags: + targets: test-unit + target_flags: + runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} + artifact: "false" - name: Get python api test report uses: input-output-hk/catalyst-ci/actions/run@master if: always() continue-on-error: true with: - earthfile: ./catalyst-gateway/tests/api_tests/ - flags: --allow-privileged - targets: test - target_flags: - runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} - artifact: "false" + earthfile: ./catalyst-gateway/tests/api_tests/ + flags: --allow-privileged + targets: test + target_flags: + runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} + artifact: "false" - name: Collect and upload test reports uses: actions/upload-artifact@v4 if: always() with: - name: test-reports - path: '**/*${{ env.REPORT_EXT }}' - if-no-files-found: error - retention-days: 1 + name: test-reports + path: "**/*${{ env.REPORT_EXT }}" + if-no-files-found: error + retention-days: 1 - name: Collect and upload test coverage uses: actions/upload-artifact@v4 if: always() with: - name: coverage-reports - path: '**/*${{ env.COVERAGE_EXT }}' - if-no-files-found: error - retention-days: 1 + name: coverage-reports + path: "**/*${{ env.COVERAGE_EXT }}" + if-no-files-found: error + retention-days: 1 generate-allure-report: name: Generate allure report @@ -117,10 +117,10 @@ jobs: - name: Setup Allure report run: | - mkdir -p ${{ env.ALLURE_REPORT_PATH }} - shopt -s globstar - cp **/*${{ env.REPORT_EXT }} ${{ env.ALLURE_REPORT_PATH }} - ls ${{ env.ALLURE_REPORT_PATH }} + mkdir -p ${{ env.ALLURE_REPORT_PATH }} + shopt -s globstar + cp **/*${{ env.REPORT_EXT }} ${{ env.ALLURE_REPORT_PATH }} + ls ${{ env.ALLURE_REPORT_PATH }} - name: Checkout gh-pages uses: actions/checkout@v4 @@ -132,8 +132,8 @@ jobs: uses: mgrybyk/allure-report-branch-action@v1 id: allure with: - report_id: 'test-report' - gh_pages: 'gh-pages-dir' + report_id: "test-report" + gh_pages: "gh-pages-dir" report_dir: ${{ env.ALLURE_REPORT_PATH }} - name: Git push to gh-pages @@ -165,15 +165,15 @@ jobs: - name: Collect coverage report run: | - mkdir -p ${{ env.COVERAGE_REPORT_PATH }} - shopt -s globstar - cp **/*${{ env.COVERAGE_EXT }} ${{ env.COVERAGE_REPORT_PATH }} - ls ${{ env.COVERAGE_REPORT_PATH }} + mkdir -p ${{ env.COVERAGE_REPORT_PATH }} + shopt -s globstar + cp **/*${{ env.COVERAGE_EXT }} ${{ env.COVERAGE_REPORT_PATH }} + ls ${{ env.COVERAGE_REPORT_PATH }} - name: Normalize coverage report paths run: | - sed -i -e 's/SF:\/root/SF:catalyst-gateway/g' ${{ env.COVERAGE_REPORT_PATH }}/cat-gateway.coverage.info - # sed -i -e 's/SF:/SF:catalyst-gateway\/tests\/api_tests\//g' ${{ env.COVERAGE_REPORT_PATH }}/api-tests.coverage.info + sed -i -e 's/SF:\/root/SF:catalyst-gateway/g' ${{ env.COVERAGE_REPORT_PATH }}/cat-gateway.coverage.info + # sed -i -e 's/SF:/SF:catalyst-gateway\/tests\/api_tests\//g' ${{ env.COVERAGE_REPORT_PATH }}/api-tests.coverage.info - name: Coveralls env: @@ -188,6 +188,7 @@ jobs: parallel: true - name: Coveralls + if: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} uses: coverallsapp/github-action@v2 @@ -211,15 +212,14 @@ jobs: base-path: "/home/runner/work/catalyst-voices/catalyst-voices/" parallel: true - upload-coverage-report: name: Upload coverage report needs: [generate-coverage-report] if: ${{ always() }} runs-on: ubuntu-latest steps: - - name: Coveralls Finished - uses: coverallsapp/github-action@v2 - with: - parallel-finished: true - carryforward: "rust-unit-test,flutter-test" + - name: Coveralls Finished + uses: coverallsapp/github-action@v2 + with: + parallel-finished: true + carryforward: "rust-unit-test,flutter-test" From e4a6939203022d1cd24688b3ef6f238689b2c4b1 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 13 Sep 2024 16:13:48 +0700 Subject: [PATCH 52/69] ci(general): Replace temp CI branch with tagged release --- Earthfile | 8 ++++---- catalyst-gateway/Earthfile | 2 +- catalyst-gateway/event-db/Earthfile | 2 +- catalyst-gateway/tests/Earthfile | 2 +- catalyst-gateway/tests/api_tests/Earthfile | 2 +- catalyst_voices/Earthfile | 2 +- catalyst_voices/uikit_example/Earthfile | 2 +- docs/Earthfile | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Earthfile b/Earthfile index 5f714c2c506..95f6e0ee2c3 100644 --- a/Earthfile +++ b/Earthfile @@ -1,8 +1,8 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/mdlint:feat/cat-gateway-changes AS mdlint-ci -IMPORT github.com/input-output-hk/catalyst-ci/earthly/cspell:feat/cat-gateway-changes AS cspell-ci -IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:feat/cat-gateway-changes AS postgresql-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/mdlint:v3.2.03 AS mdlint-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/cspell:v3.2.03 AS cspell-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.2.03 AS postgresql-ci FROM debian:stable-slim @@ -18,7 +18,7 @@ markdown-check-fix: DO mdlint-ci+MDLINT_LOCALLY --src=$(echo ${PWD}) --fix=--fix -# Make sure the project dictionary is properly sorted. +# clean-spelling-list : Make sure the project dictionary is properly sorted. clean-spelling-list: DO cspell-ci+CLEAN diff --git a/catalyst-gateway/Earthfile b/catalyst-gateway/Earthfile index 2e0ebc8e0bc..0795a93f6f2 100644 --- a/catalyst-gateway/Earthfile +++ b/catalyst-gateway/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/cat-gateway-changes AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.2.03 AS rust-ci #cspell: words rustfmt toolsets USERARCH stdcfgs diff --git a/catalyst-gateway/event-db/Earthfile b/catalyst-gateway/event-db/Earthfile index 5398ab0239e..823d60a0c81 100644 --- a/catalyst-gateway/event-db/Earthfile +++ b/catalyst-gateway/event-db/Earthfile @@ -3,7 +3,7 @@ # the database and its associated software. VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:feat/cat-gateway-changes AS postgresql-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.2.03 AS postgresql-ci # cspell: words diff --git a/catalyst-gateway/tests/Earthfile b/catalyst-gateway/tests/Earthfile index 11758f7766f..41d344b0241 100644 --- a/catalyst-gateway/tests/Earthfile +++ b/catalyst-gateway/tests/Earthfile @@ -1,5 +1,5 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/spectral:feat/cat-gateway-changes AS spectral-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/spectral:v3.2.03 AS spectral-ci # test-lint-openapi - OpenAPI linting from an artifact # testing whether the OpenAPI generated during build stage follows good practice. diff --git a/catalyst-gateway/tests/api_tests/Earthfile b/catalyst-gateway/tests/api_tests/Earthfile index c5a96943e59..0aac1a2e786 100644 --- a/catalyst-gateway/tests/api_tests/Earthfile +++ b/catalyst-gateway/tests/api_tests/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/python:feat/cat-gateway-changes AS python-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/python:v3.2.03 AS python-ci builder: FROM python-ci+python-base diff --git a/catalyst_voices/Earthfile b/catalyst_voices/Earthfile index 52c076994c7..100cf06b38d 100644 --- a/catalyst_voices/Earthfile +++ b/catalyst_voices/Earthfile @@ -1,7 +1,7 @@ VERSION 0.8 IMPORT ../catalyst-gateway AS catalyst-gateway -IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:feat/cat-gateway-changes AS flutter-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.03 AS flutter-ci # Copy all the necessary files and running bootstrap builder: diff --git a/catalyst_voices/uikit_example/Earthfile b/catalyst_voices/uikit_example/Earthfile index 7515712484c..d4ec00ed9e8 100644 --- a/catalyst_voices/uikit_example/Earthfile +++ b/catalyst_voices/uikit_example/Earthfile @@ -1,7 +1,7 @@ VERSION 0.8 IMPORT ../ AS catalyst-voices -IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:feat/cat-gateway-changes AS flutter-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.03 AS flutter-ci # local-build-web - build web version of UIKit example. # Prefixed by "local" to make sure it's not auto triggered, the target was diff --git a/docs/Earthfile b/docs/Earthfile index 901843b2b61..79eb3c40d2d 100644 --- a/docs/Earthfile +++ b/docs/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/docs:feat/cat-gateway-changes AS docs-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/docs:v3.2.03 AS docs-ci IMPORT .. AS repo IMPORT ../catalyst-gateway AS catalyst-gateway From 0789214f84d2546a5f0dfc0208d6d276da43b3bc Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Tue, 17 Sep 2024 14:54:12 +0700 Subject: [PATCH 53/69] feat: Add Handler for Permissionless Auth (#825) * docs(cips): Add Formal Defintion of auth token * fix(docs): Fix comments in cddl file * fix(docs): sig size * fix(docs): Rename CDDL for the auth token * docs(docs): Add auth-header documentation * docs(docs): Fix markdown line length error * docs(general): Fix spelling * fix(backend-lib): Bump to catalyst-libs tagged version * fix(backend): stub out obsolete code (to be removed in follow up PR). * fix(backend-lib): code format * fix(backend): remove unused crate dependencies --- .config/dictionaries/project.dic | 1 + .markdownlint.jsonc | 2 +- catalyst-gateway/bin/Cargo.toml | 8 +- .../cip36.cddl | 0 .../cip36_registration.cddl | 0 .../cip36_witness.cddl | 0 .../mod.rs.obsolete} | 1 - catalyst-gateway/bin/src/cardano/mod.rs | 2 +- catalyst-gateway/bin/src/cardano/util.rs | 2 + .../cip36_registration/mod.rs | 33 +++--- .../bin/src/service/api/cardano/types.rs | 45 +++++++- .../permissionless-auth/auth-header.md | 106 ++++++++++++++++++ .../permissionless-auth/auth-token.cddl | 34 ++++++ 13 files changed, 210 insertions(+), 24 deletions(-) rename catalyst-gateway/bin/src/cardano/{cip36_registration => cip36_registration_obsolete}/cip36.cddl (100%) rename catalyst-gateway/bin/src/cardano/{cip36_registration => cip36_registration_obsolete}/cip36_registration.cddl (100%) rename catalyst-gateway/bin/src/cardano/{cip36_registration => cip36_registration_obsolete}/cip36_witness.cddl (100%) rename catalyst-gateway/bin/src/cardano/{cip36_registration/mod.rs => cip36_registration_obsolete/mod.rs.obsolete} (99%) create mode 100644 docs/src/catalyst-standards/permissionless-auth/auth-header.md create mode 100644 docs/src/catalyst-standards/permissionless-auth/auth-token.cddl diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index da99b300cf2..cf5bb61534b 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -29,6 +29,7 @@ carryforward Catalyst CBOR cborg +cborseq cdrs cdylib certdir diff --git a/.markdownlint.jsonc b/.markdownlint.jsonc index be69f34a424..bee3d794e0c 100644 --- a/.markdownlint.jsonc +++ b/.markdownlint.jsonc @@ -211,7 +211,7 @@ "MD045": true, // MD046/code-block-style - Code block style // Code Blocks are used by Admonitions and need to be indented. - // Actual code should be fenced, this can;t be enforced by mdlint. + // Actual code should be fenced, this can't be enforced by mdlint. "MD046": { // Block style "style": "consistent" diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 99421fbc4c6..5d507bdf354 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -15,7 +15,7 @@ repository.workspace = true workspace = true [dependencies] -cardano-chain-follower = { version = "0.0.2", git = "https://github.com/input-output-hk/catalyst-libs.git", branch = "main" } +cardano-chain-follower = { version = "0.0.2", git = "https://github.com/input-output-hk/catalyst-libs.git", tag = "v0.0.2" } pallas = { version = "0.30.1", git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "9b5183c8b90b90fe2cc319d986e933e9518957b3" } pallas-traverse = { version = "0.30.1", git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "9b5183c8b90b90fe2cc319d986e933e9518957b3" } @@ -50,12 +50,12 @@ gethostname = "0.5.0" hex = "0.4.3" handlebars = "6.0.0" anyhow = "1.0.86" -cddl = "0.9.4" -ciborium = "0.2.2" +#cddl = "0.9.4" +#ciborium = "0.2.2" # stringzilla = "3.9.3" duration-string = "0.4.0" build-info = "0.0.38" -ed25519-dalek = "2.1.1" +#ed25519-dalek = "2.1.1" scylla = { version = "0.14.0", features = ["cloud", "full-serialization"] } strum = { version = "0.26.3", features = ["derive"] } # strum_macros = "0.26.4" diff --git a/catalyst-gateway/bin/src/cardano/cip36_registration/cip36.cddl b/catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36.cddl similarity index 100% rename from catalyst-gateway/bin/src/cardano/cip36_registration/cip36.cddl rename to catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36.cddl diff --git a/catalyst-gateway/bin/src/cardano/cip36_registration/cip36_registration.cddl b/catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36_registration.cddl similarity index 100% rename from catalyst-gateway/bin/src/cardano/cip36_registration/cip36_registration.cddl rename to catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36_registration.cddl diff --git a/catalyst-gateway/bin/src/cardano/cip36_registration/cip36_witness.cddl b/catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36_witness.cddl similarity index 100% rename from catalyst-gateway/bin/src/cardano/cip36_registration/cip36_witness.cddl rename to catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36_witness.cddl diff --git a/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs b/catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/mod.rs.obsolete similarity index 99% rename from catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs rename to catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/mod.rs.obsolete index a1dae967a59..ea3ab2b1a28 100644 --- a/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs +++ b/catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/mod.rs.obsolete @@ -104,7 +104,6 @@ pub(crate) struct Cip36Metadata { impl Cip36Metadata { /// Create new `Cip36Registration` from tx metadata /// Collect secondary errors for granular json error report - #[allow(dead_code)] pub(crate) fn generate_from_tx_metadata( tx_metadata: &MultiEraMeta, network: Network, ) -> Option { diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index 166994e9cab..74002dad48e 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -15,7 +15,7 @@ use crate::{ settings::Settings, }; -pub(crate) mod cip36_registration; +// pub(crate) mod cip36_registration_obsolete; pub(crate) mod util; /// Blocks batch length that will trigger the blocks buffer to be written to the database. diff --git a/catalyst-gateway/bin/src/cardano/util.rs b/catalyst-gateway/bin/src/cardano/util.rs index 26f8a11f210..9916797b7f5 100644 --- a/catalyst-gateway/bin/src/cardano/util.rs +++ b/catalyst-gateway/bin/src/cardano/util.rs @@ -19,9 +19,11 @@ pub type StakeCredentialHash = String; pub type StakeCredentialKey = String; /// Hash size +#[allow(dead_code)] pub(crate) const BLAKE_2B_256_HASH_SIZE: usize = 256 / 8; /// Helper function to generate the `blake2b_256` hash of a byte slice +#[allow(dead_code)] pub(crate) fn hash(bytes: &[u8]) -> [u8; BLAKE_2B_256_HASH_SIZE] { let mut digest = [0u8; BLAKE_2B_256_HASH_SIZE]; let mut context = Blake2b::new(BLAKE_2B_256_HASH_SIZE); diff --git a/catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/mod.rs index 1c487f31cf2..2624934aea0 100644 --- a/catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/mod.rs @@ -63,7 +63,6 @@ pub(crate) struct IndexedVoterRegistrationParams { impl IndexedVoterRegistrationParams { /// Creates voter registration indexing data from block data. - #[allow(dead_code)] pub(crate) fn from_block_data( block: &MultiEraBlock, network: Network, ) -> Option> { @@ -159,16 +158,19 @@ impl EventDB { let sink = tx .copy_in("COPY tmp_cardano_voter_registration (tx_id, stake_credential, public_voting_key, payment_address, nonce, metadata_cip36, stats, valid) FROM STDIN BINARY") .await?; - let writer = BinaryCopyInWriter::new(sink, &[ - Type::BYTEA, - Type::BYTEA, - Type::BYTEA, - Type::BYTEA, - Type::INT8, - Type::BYTEA, - Type::JSONB, - Type::BOOL, - ]); + let writer = BinaryCopyInWriter::new( + sink, + &[ + Type::BYTEA, + Type::BYTEA, + Type::BYTEA, + Type::BYTEA, + Type::INT8, + Type::BYTEA, + Type::JSONB, + Type::BOOL, + ], + ); tokio::pin!(writer); for params in values { @@ -204,11 +206,10 @@ impl EventDB { pub(crate) async fn get_registration_info( stake_credential: StakeCredential, network: Network, slot_num: SlotNumber, ) -> anyhow::Result<(TxId, PaymentAddress, PublicVotingInfo, Nonce)> { - let rows = Self::query(SELECT_VOTER_REGISTRATION_SQL, &[ - &stake_credential, - &network.to_string(), - &slot_num, - ]) + let rows = Self::query( + SELECT_VOTER_REGISTRATION_SQL, + &[&stake_credential, &network.to_string(), &slot_num], + ) .await?; let row = rows.first().ok_or(NotFoundError)?; diff --git a/catalyst-gateway/bin/src/service/api/cardano/types.rs b/catalyst-gateway/bin/src/service/api/cardano/types.rs index af15c5f8622..f12c4c649c9 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/types.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/types.rs @@ -4,7 +4,50 @@ //! Event DB logic for chain-sync. They should be replaced with proper types in a better //! place. -use crate::cardano::cip36_registration::VotingInfo; +use cryptoxide::{blake2b::Blake2b, digest::Digest}; +use serde::{Deserialize, Serialize}; + +/// Pub key +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub(crate) struct PubKey(Vec); + +impl PubKey { + /// Get credentials, a blake2b 28 bytes hash of the pub key + #[allow(dead_code)] + pub(crate) fn get_credentials(&self) -> [u8; 28] { + let mut digest = [0u8; 28]; + let mut context = Blake2b::new(28); + context.input(&self.0); + context.result(&mut digest); + digest + } + + /// Get bytes + pub(crate) fn bytes(&self) -> &[u8] { + &self.0 + } +} + +/// The source of voting power for a given registration +/// +/// The voting power can either come from: +/// - a single wallet, OR +/// - a set of delegations +#[derive(Serialize, Deserialize)] +#[serde(untagged)] +#[derive(Debug, Clone, PartialEq)] +pub(crate) enum VotingInfo { + /// Direct voting + /// + /// Voting power is based on the staked ada of the given key + Direct(PubKey), + + /// Delegated voting + /// + /// Voting power is based on the staked ada of the delegated keys + /// order of elements is important and must be preserved. + Delegated(Vec<(PubKey, i64)>), +} /// Block time pub(crate) type DateTime = chrono::DateTime; diff --git a/docs/src/catalyst-standards/permissionless-auth/auth-header.md b/docs/src/catalyst-standards/permissionless-auth/auth-header.md new file mode 100644 index 00000000000..a32ccbfcfc1 --- /dev/null +++ b/docs/src/catalyst-standards/permissionless-auth/auth-header.md @@ -0,0 +1,106 @@ +# Permission-less Authentication for Catalyst + +## Overview + +There is a requirement to establish identity with the catalyst backend to provide secure and +contextual access to resources managed by project Catalyst. + +For example, a query of a voter's current voting power, should provide that information from the voter's identity. + +This provides better security and also simplifies API's because they can have implicit parameters based on +the verified identity of the user. + +This document defines the format of the Authentication Token, and how it should be used. + +## Token Format + +The Authentication Token is based loosely on JWT. +It consists of an Authentication Header attached to every authenticated request, and an encoded signed. + +This token can be attached to either individual HTTP requests, or to the beginning of a web socket connection. + +The authentication header is in the format: + +```http +Authorization: Bearer catv1. +``` + +The `` is a [base64-url] encoded binary token whose format is defined in +[auth-token.cddl](./auth-token.cddl). + +### Encoded Binary Token Format + +The Encoded Binary Token is a [CBOR sequence] that consists of 3 fields. + +* `kid` : The key identifier. +* `ulid` : A ULID which defines when the token was issued, and a random nonce. +* `signature` : The signature over the `kid` and `ulid` fields. + +#### kid + +The Key ID is used to identify the Public Key Certificate, which identifies the Public Key used to sign the token. +Because this certificate is the Role 0 Certificate from the on-chain Role-Based Access Control specification, +it can be used to also provide identifying information about the user. +Such as: + +* Stake Address +* Registered Rewards Address +* The Identity of the issuer of the Certificate (Self Signed, or issued by an Authority). +* Other Roles keys they have registered. +* Or any other data attached to the registration. + +The `kid` is simply the Blake2b-128 hash of the Role 0 Certificate. + +The backend will use this hash to identify the certificate from the on-chain registration and use +that information to both authenticate the user and provide identifying information about them to the +backend. + +#### ulid + +A standard [ULID] will be created when the token is first issued. +The [ULID] contains a timestamp of when it was created, and a random nonce. +The timestamp is used to protect against replay attack by allowing the backend to reject +authentication if the timestamp is too old (or too far into the future). + +#### signature + +Initially, the only supported signature algorithm is ED25519. +However, the KID could in-future refer to a certificate which uses different cryptography. +Accordingly, the formal specification of the signature is that it is as many bytes as required to +embed a signature of the type defined by the certificate identified by the `kid`. + +For ED25519, the signature will be 64 bytes. + +## Example Token + +The [CDDL Specification](./auth-token.cddl) contains an example token. +This is binary. + +The binary of that example is: + +```hex +50 00 11 22 33 44 55 66 77 88 99 AA BB CC DD EE FF +50 01 91 2C EC 71 CF 2C 4C 14 A5 5D 55 85 D9 4D 7B +58 40 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +``` + +[base64-url] encoded it becomes: + + +```base64 +UAARIjNEVWZ3iJmqu8zd7v9QAZEs7HHPLEwUpV1VhdlNe1hAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +``` + +The full token header would then be: + +```http +Authorization: Bearer catv1.UAARIjNEVWZ3iJmqu8zd7v9QAZEs7HHPLEwUpV1VhdlNe1hAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +``` + + +* [base64-url] +* [CBOR sequence] +* [ULID] diff --git a/docs/src/catalyst-standards/permissionless-auth/auth-token.cddl b/docs/src/catalyst-standards/permissionless-auth/auth-token.cddl new file mode 100644 index 00000000000..bd12a907b78 --- /dev/null +++ b/docs/src/catalyst-standards/permissionless-auth/auth-token.cddl @@ -0,0 +1,34 @@ +; Permissionless Authorization using RBAC Certificates for Catalyst. +; +; Token Data Definition + +auth-token-v1 = bytes .cborseq auth-token-v1-fields + +; Note: This is NOT an array it is a set of fields in a cbor sequence. +auth-token-v1-fields = [ kid, ulid, signature ] + +; Key ID - Blake2b-128 hash of the Role 0 Certificate defining the Session public key. +; This Certificate defines the cryptography used to sign the token. +; Current, ONLY ed25519 is supported, but other signature cryptography may be allowed in future. +kid = (bstr .size 16) + +; ULID - Identifier for this token, encodes both the time the token was issued and a random nonce. +ulid = (bstr .size 16) + +; Signature - ED25519 Signature over the preceding two fields. +; Must be signed using the Private Key of the Role 0 Certificate identified by the Kid field. +signature = (bstr .size 64) + +; Example Signed Token +; # CBOR sequence with 3 elements +; 50 # bytes(16) +; 00112233445566778899AABBCCDDEEFF +; 50 # bytes(16) +; 01912CEC71CF2C4C14A55D5585D94D7B +; 58 40 # bytes(64) +; 00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +; +; Where: +; kid = 0x00112233445566778899aabbccddeeff +; ulid = 0x01912cec71cf2c4c14a55d5585d94d7b +; signature = 0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 From f2dee3e10a167aff4a898194273ea9e01f3ea078 Mon Sep 17 00:00:00 2001 From: cong-or <60357579+cong-or@users.noreply.github.com> Date: Tue, 17 Sep 2024 09:50:12 +0100 Subject: [PATCH 54/69] feat: auth token (#723) * feat(auth token encode and decode): permissionless auth * feat(auth token encode and decode): permissionless auth * feat(auth token encode and decode): permissionless auth * feat(auth token encode and decode): permissionless auth * feat(auth token encode and decode): permissionless auth * iron out tests * iron out tests * refactor(auth token encode and decode): ed25519 Signature cbor fields Sig over the preceding two fields - sig(cbor(kid), cbor(ulid)) * refactor(auth token encode and decode): ed25519 Signature cbor fields Sig over the preceding two fields - sig(cbor(kid), cbor(ulid)) * feat(cat security scheme): open api * feat(cat security scheme): open api * feat(mock cert state): given kid from bearer return pub key * feat(auth token): cache TTL * feat(auth token): cache TTL * feat(auth token): cache TT * ci(spell check): fix * ci(spell check): fix * ci(spell check): fix * refactor(clippy): housekeeping tidy * refactor(clippy): housekeeping tidy * refactor(clippy): housekeeping tidy * refactor(clippy): housekeeping tidy * fix(backend): Re-enable dependent crates used by this code * fix(backend): clippy lints * fix(backend): spelling --------- Co-authored-by: Steven Johnson Co-authored-by: Steven Johnson --- .config/dictionaries/project.dic | 1 + catalyst-gateway/bin/Cargo.toml | 8 +- .../bin/src/service/api/auth/endpoint.rs | 128 ++++++++++++++++ .../bin/src/service/api/auth/mod.rs | 4 + .../bin/src/service/api/auth/token.rs | 145 ++++++++++++++++++ .../bin/src/service/api/health/mod.rs | 1 - catalyst-gateway/bin/src/service/api/mod.rs | 3 +- .../service/docs/stoplight_elements/mod.rs | 2 +- catalyst-gateway/bin/src/service/mod.rs | 1 + 9 files changed, 287 insertions(+), 6 deletions(-) create mode 100644 catalyst-gateway/bin/src/service/api/auth/endpoint.rs create mode 100644 catalyst-gateway/bin/src/service/api/auth/mod.rs create mode 100644 catalyst-gateway/bin/src/service/api/auth/token.rs diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index cf5bb61534b..30195ba66e4 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -54,6 +54,7 @@ Cupertino dalek damian-molinski DAPPLICATION +dashmap dbeaver dbschema dbsync diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 5d507bdf354..8d614a96b80 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -55,7 +55,7 @@ anyhow = "1.0.86" # stringzilla = "3.9.3" duration-string = "0.4.0" build-info = "0.0.38" -#ed25519-dalek = "2.1.1" +ed25519-dalek = "2.1.1" scylla = { version = "0.14.0", features = ["cloud", "full-serialization"] } strum = { version = "0.26.3", features = ["derive"] } # strum_macros = "0.26.4" @@ -63,7 +63,7 @@ openssl = { version = "0.10.66", features = ["vendored"] } num-bigint = "0.4.6" futures = "0.3.30" rand = "0.8.5" -# moka = { version = "0.12.8", features = ["future"] } +moka = { version = "0.12.8", features = ["future"] } crossbeam-skiplist = "0.1.3" rust_decimal = { version = "1.36.0", features = [ "serde-with-float", @@ -88,6 +88,8 @@ cpu-time = "1.0.0" prometheus = "0.13.4" rust-embed = "8.5.0" num-traits = "0.2.19" +base64 = "0.22.1" +dashmap = "6.0.1" [build-dependencies] -build-info-build = "0.0.38" +build-info-build = "0.0.38" \ No newline at end of file diff --git a/catalyst-gateway/bin/src/service/api/auth/endpoint.rs b/catalyst-gateway/bin/src/service/api/auth/endpoint.rs new file mode 100644 index 00000000000..54bf7e46224 --- /dev/null +++ b/catalyst-gateway/bin/src/service/api/auth/endpoint.rs @@ -0,0 +1,128 @@ +use std::{sync::LazyLock, time::Duration}; + +use dashmap::DashMap; +use ed25519_dalek::{Signature, VerifyingKey, PUBLIC_KEY_LENGTH}; +use moka::future::Cache; +use poem::{error::ResponseError, http::StatusCode, Request}; +use poem_openapi::{auth::Bearer, SecurityScheme}; +use tracing::error; + +use super::token::{Kid, SignatureEd25519, UlidBytes}; +use crate::service::api::auth::token::decode_auth_token_ed25519; + +/// Decoded token consists of a Kid, Ulid and Signature +pub type DecodedAuthToken = (Kid, UlidBytes, SignatureEd25519); + +/// Auth token in the form of catv1.. +pub type EncodedAuthToken = String; + +/// Cached auth tokens +static CACHE: LazyLock> = LazyLock::new(|| { + Cache::builder() + // Time to live (TTL): 30 minutes + .time_to_live(Duration::from_secs(30 * 60)) + // Time to idle (TTI): 5 minutes + .time_to_idle(Duration::from_secs(5 * 60)) + // Create the cache. + .build() +}); + +/// Mocked Valid certificates +/// TODO: the following is temporary state for POC until RBAC database is complete. +static CERTS: LazyLock> = LazyLock::new(|| { + /// Mock KID + const KID: &str = "0467de6bd945b9207bfa09d846b77ef5"; + + let public_key_bytes: [u8; PUBLIC_KEY_LENGTH] = [ + 180, 91, 130, 149, 226, 112, 29, 45, 188, 141, 64, 147, 250, 233, 75, 151, 151, 53, 248, + 197, 225, 122, 24, 67, 207, 100, 162, 152, 232, 102, 89, 162, + ]; + + let cert_map = DashMap::new(); + cert_map.insert(KID.to_string(), public_key_bytes); + cert_map +}); + +#[derive(SecurityScheme)] +#[oai( + rename = "CatalystSecurityScheme", + ty = "bearer", + key_in = "header", + key_name = "Bearer", + checker = "checker_api_catalyst_auth" +)] +#[allow(dead_code)] +/// Auth token security scheme +/// Add to endpoint params e.g async fn endpoint(&self, auth: `CatalystSecurityScheme`) +pub struct CatalystSecurityScheme(pub DecodedAuthToken); + +#[derive(Debug, thiserror::Error)] +#[error("Corrupt Auth Token")] +pub struct AuthTokenError; + +impl ResponseError for AuthTokenError { + fn status(&self) -> StatusCode { + StatusCode::FORBIDDEN + } +} + +/// When added to an endpoint, this hook is called per request to verify the bearer token +/// is valid. +async fn checker_api_catalyst_auth( + _req: &Request, bearer: Bearer, +) -> poem::Result { + if CACHE.contains_key(&bearer.token) { + // This get() will extend the entry life for another 5 minutes. + // Even though we keep calling get(), the entry will expire + // after 30 minutes (TTL) from the origin insert(). + if let Some((kid, ulid, sig)) = CACHE.get(&bearer.token).await { + Ok((kid, ulid, sig)) + } else { + error!("Auth token is not in the cache: {:?}", bearer.token); + Err(AuthTokenError)? + } + } else { + // Decode bearer token + let (kid, ulid, sig, msg) = match decode_auth_token_ed25519(&bearer.token.clone()) { + Ok((kid, ulid, sig, msg)) => (kid, ulid, sig, msg), + Err(err) => { + error!("Corrupt auth token: {:?}", err); + Err(AuthTokenError)? + }, + }; + + // Get pub key from CERTS state given decoded KID from decoded bearer token + let pub_key_bytes = if let Some(cert) = CERTS.get(&hex::encode(kid.0)) { + *cert + } else { + error!("Invalid KID {:?}", kid); + Err(AuthTokenError)? + }; + + let public_key = match VerifyingKey::from_bytes(&pub_key_bytes) { + Ok(pub_key) => pub_key, + Err(err) => { + error!("Invalid public key: {:?}", err); + Err(AuthTokenError)? + }, + }; + + // Strictly verify a signature on a message with this key-pair public key. + if public_key + .verify_strict(&msg, &Signature::from_bytes(&sig.0)) + .is_err() + { + error!( + "Message {:?} was not signed by this key-pair {:?}", + hex::encode(msg), + public_key, + ); + Err(AuthTokenError)?; + } + + // This entry will expire after 5 minutes (TTI) if there is no get(). + CACHE.insert(bearer.token, (kid, ulid, sig.clone())).await; + + Ok((kid, ulid, sig)) + } +} diff --git a/catalyst-gateway/bin/src/service/api/auth/mod.rs b/catalyst-gateway/bin/src/service/api/auth/mod.rs new file mode 100644 index 00000000000..d54d3fd16ce --- /dev/null +++ b/catalyst-gateway/bin/src/service/api/auth/mod.rs @@ -0,0 +1,4 @@ +/// Cat security scheme +pub mod endpoint; +/// Token encoding decoding logic +mod token; diff --git a/catalyst-gateway/bin/src/service/api/auth/token.rs b/catalyst-gateway/bin/src/service/api/auth/token.rs new file mode 100644 index 00000000000..7684f3fc3b1 --- /dev/null +++ b/catalyst-gateway/bin/src/service/api/auth/token.rs @@ -0,0 +1,145 @@ +use anyhow::Ok; +use base64::{prelude::BASE64_STANDARD, Engine}; +use ed25519_dalek::{Signer, SigningKey, SECRET_KEY_LENGTH, SIGNATURE_LENGTH}; +use pallas::codec::minicbor; + +/// Key ID - Blake2b-128 hash of the Role 0 Certificate defining the Session public key. +/// BLAKE2b-128 produces digest side of 16 bytes. +#[derive(Debug, Clone, Copy)] +pub struct Kid(pub [u8; 16]); + +/// Identifier for this token, encodes both the time the token was issued and a random +/// nonce. +#[derive(Debug, Clone, Copy)] +pub struct UlidBytes(pub [u8; 16]); + +/// Ed25519 signatures are (64 bytes) +#[derive(Debug, Clone)] +pub struct SignatureEd25519(pub [u8; 64]); + +/// The Encoded Binary Auth Token is a [CBOR sequence] that consists of 3 fields [ kid, +/// ulid, signature ]. ED25519 Signature over the preceding two fields - sig(cbor(kid), +/// cbor(ulid)) +#[allow(dead_code)] +pub fn encode_auth_token_ed25519( + kid: &Kid, ulid: &UlidBytes, secret_key_bytes: [u8; SECRET_KEY_LENGTH], +) -> anyhow::Result { + /// Auth token prefix as per spec + const AUTH_TOKEN_PREFIX: &str = "catv1"; + + let sk: SigningKey = SigningKey::from_bytes(&secret_key_bytes); + + let out: Vec = Vec::new(); + let mut encoder = minicbor::Encoder::new(out); + + encoder.bytes(&kid.0)?; + encoder.bytes(&ulid.0)?; + + let signature: [u8; SIGNATURE_LENGTH] = sk.sign(encoder.writer()).to_bytes(); + + encoder.bytes(&signature)?; + + Ok(format!( + "{}.{}", + AUTH_TOKEN_PREFIX, + BASE64_STANDARD.encode(encoder.writer()) + )) +} + +/// Decode base64 cbor encoded auth token into constituent parts of (kid, ulid, signature) +/// e.g catv1.UAARIjNEVWZ3iJmqu8zd7v9QAZEs7HHPLEwUpV1VhdlNe1hAAAAAAAAAAAAA... +#[allow(dead_code)] +pub fn decode_auth_token_ed25519( + auth_token: &str, +) -> anyhow::Result<(Kid, UlidBytes, SignatureEd25519, Vec)> { + /// The message is a Cbor sequence (cbor(kid) + cbor(ulid)): + /// kid + ulid are 16 bytes a piece, with 1 byte extra due to cbor encoding, + /// The two fields include their encoding resulting in 17 bytes each. + const KID_ULID_CBOR_ENCODED_BYTES: u8 = 34; + /// Auth token prefix + const AUTH_TOKEN_PREFIX: &str = "catv1"; + + let token = auth_token.split('.').collect::>(); + + let prefix = token.first().ok_or(anyhow::anyhow!("No valid prefix"))?; + if *prefix != AUTH_TOKEN_PREFIX { + return Err(anyhow::anyhow!("Corrupt token, invalid prefix")); + } + let token_base64 = token.get(1).ok_or(anyhow::anyhow!("No valid token"))?; + let token_cbor_encoded = BASE64_STANDARD.decode(token_base64)?; + + // We verify the signature on the message which corresponds to a Cbor sequence (cbor(kid) + // + cbor(ulid)): + let message_cbor_encoded = &token_cbor_encoded + .get(0..KID_ULID_CBOR_ENCODED_BYTES.into()) + .ok_or(anyhow::anyhow!("No valid token"))?; + + // Decode cbor to bytes + let mut cbor_decoder = minicbor::Decoder::new(&token_cbor_encoded); + + // Raw kid bytes + let kid = Kid(cbor_decoder + .bytes() + .map_err(|e| anyhow::anyhow!(format!("Invalid cbor for kid : {e}")))? + .try_into()?); + + // Raw ulid bytes + let ulid = UlidBytes( + cbor_decoder + .bytes() + .map_err(|e| anyhow::anyhow!(format!("Invalid cbor for ulid : {e}")))? + .try_into()?, + ); + + // Raw signature + let signature = SignatureEd25519( + cbor_decoder + .bytes() + .map_err(|e| anyhow::anyhow!(format!("Invalid cbor for sig : {e}")))? + .try_into()?, + ); + + Ok((kid, ulid, signature, message_cbor_encoded.to_vec())) +} + +#[cfg(test)] +mod tests { + + use ed25519_dalek::{Signature, SigningKey, Verifier, SECRET_KEY_LENGTH}; + use rand::rngs::OsRng; + + use super::{encode_auth_token_ed25519, Kid, UlidBytes}; + use crate::service::api::auth::token::decode_auth_token_ed25519; + + #[test] + fn test_token_generation_and_decoding() { + let kid: [u8; 16] = hex::decode("00112233445566778899aabbccddeeff") + .unwrap() + .try_into() + .unwrap(); + let ulid: [u8; 16] = hex::decode("01912cec71cf2c4c14a55d5585d94d7b") + .unwrap() + .try_into() + .unwrap(); + + let mut random_seed = OsRng; + let signing_key: SigningKey = SigningKey::generate(&mut random_seed); + + let verifying_key = signing_key.verifying_key(); + + let secret_key_bytes: [u8; SECRET_KEY_LENGTH] = *signing_key.as_bytes(); + + let auth_token = + encode_auth_token_ed25519(&Kid(kid), &UlidBytes(ulid), secret_key_bytes).unwrap(); + + let (decoded_kid, decoded_ulid, decoded_sig, message) = + decode_auth_token_ed25519(&auth_token).unwrap(); + + assert_eq!(decoded_kid.0, kid); + assert_eq!(decoded_ulid.0, ulid); + + verifying_key + .verify(&message, &Signature::from(&decoded_sig.0)) + .unwrap(); + } +} diff --git a/catalyst-gateway/bin/src/service/api/health/mod.rs b/catalyst-gateway/bin/src/service/api/health/mod.rs index 037d1019351..24c9b20d96c 100644 --- a/catalyst-gateway/bin/src/service/api/health/mod.rs +++ b/catalyst-gateway/bin/src/service/api/health/mod.rs @@ -7,7 +7,6 @@ mod inspection_get; mod live_get; mod ready_get; mod started_get; - pub(crate) use started_get::started; /// Health API Endpoints diff --git a/catalyst-gateway/bin/src/service/api/mod.rs b/catalyst-gateway/bin/src/service/api/mod.rs index 1608edfed3e..2ab54ffb2ff 100644 --- a/catalyst-gateway/bin/src/service/api/mod.rs +++ b/catalyst-gateway/bin/src/service/api/mod.rs @@ -12,7 +12,8 @@ use poem_openapi::{ContactObject, LicenseObject, OpenApiService, ServerObject}; use self::cardano::CardanoApi; use crate::settings::Settings; - +/// Auth +mod auth; pub(crate) mod cardano; mod health; mod legacy; diff --git a/catalyst-gateway/bin/src/service/docs/stoplight_elements/mod.rs b/catalyst-gateway/bin/src/service/docs/stoplight_elements/mod.rs index 315e4b3100f..6a3874e5622 100644 --- a/catalyst-gateway/bin/src/service/docs/stoplight_elements/mod.rs +++ b/catalyst-gateway/bin/src/service/docs/stoplight_elements/mod.rs @@ -43,7 +43,7 @@ fn create_html(document: &str) -> String { .replace("{:spec}", document) } -/// Create an endpoint to return teh Stoplight documentation for our API. +/// Create an endpoint to return the Stoplight documentation for our API. pub(crate) fn create_endpoint(document: &str) -> impl Endpoint { let ui_html = create_html(document); poem::Route::new().at("/", make_sync(move |_| Html(ui_html.clone()))) diff --git a/catalyst-gateway/bin/src/service/mod.rs b/catalyst-gateway/bin/src/service/mod.rs index 99f1fd4efd6..3a46f96aabe 100644 --- a/catalyst-gateway/bin/src/service/mod.rs +++ b/catalyst-gateway/bin/src/service/mod.rs @@ -2,6 +2,7 @@ // These Modules contain endpoints mod api; + mod docs; // These modules are utility or common types/functions mod common; From 5213f64e1bd21e3720b15eb7c97bcc469fcd728b Mon Sep 17 00:00:00 2001 From: Felipe Rosa Date: Tue, 17 Sep 2024 12:47:23 -0300 Subject: [PATCH 55/69] feat: Update GET staked_ada endpoint to fetch from ScyllaDB (#728) * feat: get staked ada from scylladb * chore: revert justfile changes * chore: filter TXOs in rust instead of filtering in ScyllaDB query * fix(backend): spelling * fix(backend): Eliminate lint errors from Derived function * fix(backend): code format * fix(backend): Udate autogenerated dart code * chore(cat-voices): fix tests --------- Co-authored-by: Steven Johnson Co-authored-by: Steven Johnson Co-authored-by: Dominik Toton --- .config/dictionaries/project.dic | 1 + .../src/db/event/cardano.obsolete/utxo/mod.rs | 30 +-- catalyst-gateway/bin/src/db/index/mod.rs | 1 + catalyst-gateway/bin/src/db/index/queries.rs | 76 ++++++- .../index/queries/get_txi_by_txn_hashes.cql | 6 + .../queries/get_txo_by_stake_address.cql | 10 + .../bin/src/db/index/queries/insert_txo.cql | 4 +- .../src/db/index/queries/update_txo_spent.cql | 6 + .../db/index/schema/txi_by_txn_hash_table.cql | 4 +- catalyst-gateway/bin/src/db/index/session.rs | 19 +- .../index/staked_ada/get_txi_by_txn_hash.rs | 83 +++++++ .../staked_ada/get_txo_by_stake_address.rs | 95 ++++++++ .../bin/src/db/index/staked_ada/mod.rs | 69 ++++++ .../src/service/api/cardano/staked_ada_get.rs | 214 ++++++++++++++++-- .../common/objects/cardano/stake_address.rs | 2 +- .../common/objects/cardano/stake_info.rs | 21 +- .../src/catalyst_data_gateway_repository.dart | 2 +- ...catalyst_data_gateway_repository_test.dart | 23 +- .../cat_gateway_api.models.swagger.dart | 56 +++++ .../cat_gateway_api.models.swagger.g.dart | 13 ++ .../cat_gateway_api.swagger.chopper.dart | 4 +- .../cat_gateway_api.swagger.dart | 7 +- 22 files changed, 666 insertions(+), 80 deletions(-) create mode 100644 catalyst-gateway/bin/src/db/index/queries/get_txi_by_txn_hashes.cql create mode 100644 catalyst-gateway/bin/src/db/index/queries/get_txo_by_stake_address.cql create mode 100644 catalyst-gateway/bin/src/db/index/queries/update_txo_spent.cql create mode 100644 catalyst-gateway/bin/src/db/index/staked_ada/get_txi_by_txn_hash.rs create mode 100644 catalyst-gateway/bin/src/db/index/staked_ada/get_txo_by_stake_address.rs create mode 100644 catalyst-gateway/bin/src/db/index/staked_ada/mod.rs diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index 30195ba66e4..70280dde3e5 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -250,6 +250,7 @@ Traceback traefik trailings TXNZD +txos Typer unawaited unchunk diff --git a/catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/mod.rs index bf3a9fa5e17..999771620c7 100644 --- a/catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/mod.rs @@ -1,22 +1,17 @@ //! Utxo Queries -use cardano_chain_follower::Network; use pallas::ledger::{addresses::Address, traverse::MultiEraTx}; use tokio_postgres::{binary_copy::BinaryCopyInWriter, types::Type}; use tracing::error; -use super::{chain_state::SlotNumber, cip36_registration::StakeCredential}; use crate::{ cardano::util::parse_policy_assets, - db::event::{error::NotFoundError, Error, EventDB, EVENT_DB_POOL}, + db::event::{Error, EventDB, EVENT_DB_POOL}, }; /// Stake amount. pub(crate) type StakeAmount = i64; -/// `select_total_utxo_amount.sql` -const SELECT_TOTAL_UTXO_AMOUNT_SQL: &str = include_str!("select_total_utxo_amount.sql"); - /// Data required to index transactions. pub(crate) struct IndexedTxnParams<'a> { /// Transaction id @@ -273,27 +268,4 @@ impl EventDB { Ok(()) } - - /// Get total utxo amount - pub(crate) async fn total_utxo_amount( - stake_credential: StakeCredential, network: Network, slot_num: SlotNumber, - ) -> anyhow::Result<(StakeAmount, SlotNumber)> { - let row = Self::query_one(SELECT_TOTAL_UTXO_AMOUNT_SQL, &[ - &stake_credential, - &network.to_string(), - &slot_num, - ]) - .await?; - - // Aggregate functions as SUM and MAX return NULL if there are no rows, so we need to - // check for it. - // https://www.postgresql.org/docs/8.2/functions-aggregate.html - if let Some(amount) = row.try_get("total_utxo_amount")? { - let slot_number = row.try_get("slot_no")?; - - Ok((amount, slot_number)) - } else { - Err(NotFoundError.into()) - } - } } diff --git a/catalyst-gateway/bin/src/db/index/mod.rs b/catalyst-gateway/bin/src/db/index/mod.rs index 9bbaf16066a..15e04cd1388 100644 --- a/catalyst-gateway/bin/src/db/index/mod.rs +++ b/catalyst-gateway/bin/src/db/index/mod.rs @@ -7,3 +7,4 @@ pub(crate) mod index_txo; pub(crate) mod queries; pub(crate) mod schema; pub(crate) mod session; +pub(crate) mod staked_ada; diff --git a/catalyst-gateway/bin/src/db/index/queries.rs b/catalyst-gateway/bin/src/db/index/queries.rs index 879cfbd5aa2..374f71645d5 100644 --- a/catalyst-gateway/bin/src/db/index/queries.rs +++ b/catalyst-gateway/bin/src/db/index/queries.rs @@ -6,9 +6,20 @@ use std::sync::Arc; use anyhow::bail; use crossbeam_skiplist::SkipMap; -use scylla::{batch::Batch, serialize::row::SerializeRow, QueryResult, Session}; - -use super::{index_certs::CertInsertQuery, index_txi::TxiInsertQuery, index_txo::TxoInsertQuery}; +use scylla::{ + batch::Batch, prepared_statement::PreparedStatement, serialize::row::SerializeRow, + transport::iterator::RowIterator, QueryResult, Session, +}; + +use super::{ + index_certs::CertInsertQuery, + index_txi::TxiInsertQuery, + index_txo::TxoInsertQuery, + staked_ada::{ + get_txi_by_txn_hash::GetTxiByTxnHashesQuery, + get_txo_by_stake_address::GetTxoByStakeAddressQuery, UpdateTxoSpentQuery, + }, +}; use crate::settings::{CassandraEnvVars, CASSANDRA_MIN_BATCH_SIZE}; /// Batches of different sizes, prepared and ready for use. @@ -29,6 +40,16 @@ pub(crate) enum PreparedQuery { TxiInsertQuery, /// Stake Registration Insert query. StakeRegistrationInsertQuery, + /// TXO spent Update query. + TxoSpentUpdateQuery, +} + +/// All prepared SELECT query statements. +pub(crate) enum PreparedSelectQuery { + /// Get TXO by stake address query. + GetTxoByStakeAddress, + /// Get TXI by transaction hash query. + GetTxiByTransactionHash, } /// All prepared queries for a session. @@ -46,6 +67,12 @@ pub(crate) struct PreparedQueries { txi_insert_queries: SizedBatch, /// TXI Insert query. stake_registration_insert_queries: SizedBatch, + /// Update TXO spent query. + txo_spent_update_queries: SizedBatch, + /// Get TXO by stake address query. + txo_by_stake_address_query: PreparedStatement, + /// Get TXI by transaction hash. + txi_by_txn_hash_query: PreparedStatement, } /// An individual query response that can fail @@ -63,6 +90,10 @@ impl PreparedQueries { let txi_insert_queries = TxiInsertQuery::prepare_batch(&session, cfg).await; let all_txo_queries = TxoInsertQuery::prepare_batch(&session, cfg).await; let stake_registration_insert_queries = CertInsertQuery::prepare_batch(&session, cfg).await; + let txo_spent_update_queries = + UpdateTxoSpentQuery::prepare_batch(session.clone(), cfg).await; + let txo_by_stake_address_query = GetTxoByStakeAddressQuery::prepare(session.clone()).await; + let txi_by_txn_hash_query = GetTxiByTxnHashesQuery::prepare(session.clone()).await; let ( txo_insert_queries, @@ -78,9 +109,24 @@ impl PreparedQueries { unstaked_txo_asset_insert_queries, txi_insert_queries: txi_insert_queries?, stake_registration_insert_queries: stake_registration_insert_queries?, + txo_spent_update_queries: txo_spent_update_queries?, + txo_by_stake_address_query: txo_by_stake_address_query?, + txi_by_txn_hash_query: txi_by_txn_hash_query?, }) } + /// Prepares a statement. + pub(crate) async fn prepare( + session: Arc, query: &str, consistency: scylla::statement::Consistency, + idempotent: bool, + ) -> anyhow::Result { + let mut prepared = session.prepare(query).await?; + prepared.set_consistency(consistency); + prepared.set_is_idempotent(idempotent); + + Ok(prepared) + } + /// Prepares all permutations of the batch from 1 to max. /// It is necessary to do this because batches are pre-sized, they can not be dynamic. /// Preparing the batches in advance is a very larger performance increase. @@ -92,9 +138,7 @@ impl PreparedQueries { // First prepare the query. Only needs to be done once, all queries on a batch are the // same. - let mut prepared = session.prepare(query).await?; - prepared.set_consistency(consistency); - prepared.set_is_idempotent(idempotent); + let prepared = Self::prepare(session, query, consistency, idempotent).await?; for batch_size in CASSANDRA_MIN_BATCH_SIZE..=cfg.max_batch_size { let mut batch: Batch = Batch::new(if logged { @@ -114,6 +158,25 @@ impl PreparedQueries { Ok(sized_batches) } + /// Executes a select query with the given parameters. + /// + /// Returns an iterator that iterates over all the result pages that the query + /// returns. + pub(crate) async fn execute_iter

( + &self, session: Arc, select_query: PreparedSelectQuery, params: P, + ) -> anyhow::Result + where P: SerializeRow { + let prepared_stmt = match select_query { + PreparedSelectQuery::GetTxoByStakeAddress => &self.txo_by_stake_address_query, + PreparedSelectQuery::GetTxiByTransactionHash => &self.txi_by_txn_hash_query, + }; + + session + .execute_iter(prepared_stmt.clone(), params) + .await + .map_err(|e| anyhow::anyhow!(e)) + } + /// Execute a Batch query with the given parameters. /// /// Values should be a Vec of values which implement `SerializeRow` and they MUST be @@ -132,6 +195,7 @@ impl PreparedQueries { PreparedQuery::UnstakedTxoAssetInsertQuery => &self.unstaked_txo_asset_insert_queries, PreparedQuery::TxiInsertQuery => &self.txi_insert_queries, PreparedQuery::StakeRegistrationInsertQuery => &self.stake_registration_insert_queries, + PreparedQuery::TxoSpentUpdateQuery => &self.txo_spent_update_queries, }; let mut results: Vec = Vec::new(); diff --git a/catalyst-gateway/bin/src/db/index/queries/get_txi_by_txn_hashes.cql b/catalyst-gateway/bin/src/db/index/queries/get_txi_by_txn_hashes.cql new file mode 100644 index 00000000000..b2ed6ee3e94 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/get_txi_by_txn_hashes.cql @@ -0,0 +1,6 @@ +SELECT + txn_hash, + txo, + slot_no +FROM txi_by_txn_hash +WHERE txn_hash IN :txn_hashes diff --git a/catalyst-gateway/bin/src/db/index/queries/get_txo_by_stake_address.cql b/catalyst-gateway/bin/src/db/index/queries/get_txo_by_stake_address.cql new file mode 100644 index 00000000000..002e0532177 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/get_txo_by_stake_address.cql @@ -0,0 +1,10 @@ +SELECT + txn_hash, + txn, + txo, + slot_no, + value, + spent_slot +FROM txo_by_stake +WHERE stake_address = :stake_address +AND slot_no <= :slot_no diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql b/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql index 630f431c975..22293ae6acb 100644 --- a/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql +++ b/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql @@ -1,4 +1,4 @@ --- Create the TXO Record for a stake address, +-- Create the TXO Record for a stake address, INSERT INTO txo_by_stake ( stake_address, slot_no, @@ -15,4 +15,4 @@ INSERT INTO txo_by_stake ( :address, :value, :txn_hash -); \ No newline at end of file +); diff --git a/catalyst-gateway/bin/src/db/index/queries/update_txo_spent.cql b/catalyst-gateway/bin/src/db/index/queries/update_txo_spent.cql new file mode 100644 index 00000000000..e74704815c8 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/update_txo_spent.cql @@ -0,0 +1,6 @@ +UPDATE txo_by_stake +SET spent_slot = :spent_slot +WHERE stake_address = :stake_address +AND txn = :txn +AND txo = :txo +AND slot_no = :slot_no diff --git a/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql b/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql index a954bdd3c4c..4ed7bd12cd9 100644 --- a/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql +++ b/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql @@ -5,7 +5,7 @@ CREATE TABLE IF NOT EXISTS txi_by_txn_hash ( txo smallint, -- Index of the TXO which was spent -- Non key data, we can only spend a transaction hash/txo once, so this should be unique in any event. - slot_no varint, -- slot number when the spend occurred. - + slot_no varint, -- slot number when the spend occurred. + PRIMARY KEY (txn_hash, txo) ); diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs index 4e50e3a5392..884f5a7a7ac 100644 --- a/catalyst-gateway/bin/src/db/index/session.rs +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -8,13 +8,14 @@ use std::{ use openssl::ssl::{SslContextBuilder, SslFiletype, SslMethod, SslVerifyMode}; use scylla::{ - frame::Compression, serialize::row::SerializeRow, ExecutionProfile, Session, SessionBuilder, + frame::Compression, serialize::row::SerializeRow, transport::iterator::RowIterator, + ExecutionProfile, Session, SessionBuilder, }; use tokio::fs; use tracing::{error, info}; use super::{ - queries::{FallibleQueryResults, PreparedQueries, PreparedQuery}, + queries::{FallibleQueryResults, PreparedQueries, PreparedQuery, PreparedSelectQuery}, schema::create_schema, }; use crate::{ @@ -100,6 +101,20 @@ impl CassandraSession { } } + /// Executes a select query with the given parameters. + /// + /// Returns an iterator that iterates over all the result pages that the query + /// returns. + pub(crate) async fn execute_iter

( + &self, select_query: PreparedSelectQuery, params: P, + ) -> anyhow::Result + where P: SerializeRow { + let session = self.session.clone(); + let queries = self.queries.clone(); + + queries.execute_iter(session, select_query, params).await + } + /// Execute a Batch query with the given parameters. /// /// Values should be a Vec of values which implement `SerializeRow` and they MUST be diff --git a/catalyst-gateway/bin/src/db/index/staked_ada/get_txi_by_txn_hash.rs b/catalyst-gateway/bin/src/db/index/staked_ada/get_txi_by_txn_hash.rs new file mode 100644 index 00000000000..e1589ec6b2e --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/staked_ada/get_txi_by_txn_hash.rs @@ -0,0 +1,83 @@ +//! Get TXI by Transaction hash query + +use std::sync::Arc; + +use scylla::{ + prepared_statement::PreparedStatement, transport::iterator::TypedRowIterator, SerializeRow, + Session, +}; +use tracing::error; + +use crate::db::index::{ + queries::{PreparedQueries, PreparedSelectQuery}, + session::CassandraSession, +}; + +/// Get TXI query string. +const GET_TXI_BY_TXN_HASHES_QUERY: &str = include_str!("../queries/get_txi_by_txn_hashes.cql"); + +/// Get TXI query parameters. +#[derive(SerializeRow)] +pub(crate) struct GetTxiByTxnHashesQueryParams { + /// Transaction hashes. + txn_hashes: Vec>, +} + +impl GetTxiByTxnHashesQueryParams { + /// Create a new instance of [`GetTxiByTxnHashesQueryParams`] + pub(crate) fn new(txn_hashes: Vec>) -> Self { + Self { txn_hashes } + } +} + +/// Get TXI Query Result +// TODO: https://github.com/input-output-hk/catalyst-voices/issues/828 +// The macro uses expect to signal an error in deserializing values. +#[allow(clippy::expect_used)] +mod result { + use scylla::FromRow; + + /// Get TXI query result. + #[derive(FromRow)] + pub(crate) struct GetTxiByTxnHashesQuery { + /// TXI transaction hash. + pub txn_hash: Vec, + /// TXI original TXO index. + pub txo: i16, + /// TXI slot number. + pub slot_no: num_bigint::BigInt, + } +} +/// Get TXI query. +pub(crate) struct GetTxiByTxnHashesQuery; + +impl GetTxiByTxnHashesQuery { + /// Prepares a get txi query. + pub(crate) async fn prepare(session: Arc) -> anyhow::Result { + let get_txi_by_txn_hashes_query = PreparedQueries::prepare( + session, + GET_TXI_BY_TXN_HASHES_QUERY, + scylla::statement::Consistency::All, + true, + ) + .await; + + if let Err(ref error) = get_txi_by_txn_hashes_query { + error!(error=%error, "Failed to prepare get TXI by txn hashes query."); + }; + + get_txi_by_txn_hashes_query + } + + /// Executes a get txi by transaction hashes query. + pub(crate) async fn execute( + session: &CassandraSession, params: GetTxiByTxnHashesQueryParams, + ) -> anyhow::Result> { + let iter = session + .execute_iter(PreparedSelectQuery::GetTxiByTransactionHash, params) + .await? + .into_typed::(); + + Ok(iter) + } +} diff --git a/catalyst-gateway/bin/src/db/index/staked_ada/get_txo_by_stake_address.rs b/catalyst-gateway/bin/src/db/index/staked_ada/get_txo_by_stake_address.rs new file mode 100644 index 00000000000..861a1e2ce46 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/staked_ada/get_txo_by_stake_address.rs @@ -0,0 +1,95 @@ +//! Get the TXO by Stake Address +use std::sync::Arc; + +use scylla::{ + prepared_statement::PreparedStatement, transport::iterator::TypedRowIterator, SerializeRow, + Session, +}; +use tracing::error; + +use crate::db::index::{ + queries::{PreparedQueries, PreparedSelectQuery}, + session::CassandraSession, +}; + +/// Get txo by stake address query string. +const GET_TXO_BY_STAKE_ADDRESS_QUERY: &str = + include_str!("../queries/get_txo_by_stake_address.cql"); + +/// Get txo by stake address query parameters. +#[derive(SerializeRow)] +pub(crate) struct GetTxoByStakeAddressQueryParams { + /// Stake address. + stake_address: Vec, + /// Max slot num. + slot_no: num_bigint::BigInt, +} + +impl GetTxoByStakeAddressQueryParams { + /// Creates a new [`GetTxoByStakeAddressQueryParams`]. + pub(crate) fn new(stake_address: Vec, slot_no: num_bigint::BigInt) -> Self { + Self { + stake_address, + slot_no, + } + } +} + +/// Get TXO by stake address query row result +// TODO: https://github.com/input-output-hk/catalyst-voices/issues/828 +// The macro uses expect to signal an error in deserializing values. +#[allow(clippy::expect_used)] +mod result { + use scylla::FromRow; + + /// Get txo by stake address query result. + #[derive(FromRow)] + pub(crate) struct GetTxoByStakeAddressQuery { + /// TXO transaction hash. + pub txn_hash: Vec, + /// TXO transaction index within the slot. + pub txn: i16, + /// TXO index. + pub txo: i16, + /// TXO transaction slot number. + pub slot_no: num_bigint::BigInt, + /// TXO value. + pub value: num_bigint::BigInt, + /// TXO spent slot. + pub spent_slot: Option, + } +} + +/// Get staked ADA query. +pub(crate) struct GetTxoByStakeAddressQuery; + +impl GetTxoByStakeAddressQuery { + /// Prepares a get txo by stake address query. + pub(crate) async fn prepare(session: Arc) -> anyhow::Result { + let get_txo_by_stake_address_query = PreparedQueries::prepare( + session, + GET_TXO_BY_STAKE_ADDRESS_QUERY, + scylla::statement::Consistency::All, + true, + ) + .await; + + if let Err(ref error) = get_txo_by_stake_address_query { + error!(error=%error, "Failed to prepare get TXO by stake address"); + }; + + get_txo_by_stake_address_query + } + + /// Executes a get txo by stake address query. + pub(crate) async fn execute( + session: &CassandraSession, params: GetTxoByStakeAddressQueryParams, + ) -> anyhow::Result> { + let iter = session + .execute_iter(PreparedSelectQuery::GetTxoByStakeAddress, params) + .await? + .into_typed::(); + + Ok(iter) + } +} diff --git a/catalyst-gateway/bin/src/db/index/staked_ada/mod.rs b/catalyst-gateway/bin/src/db/index/staked_ada/mod.rs new file mode 100644 index 00000000000..ee682a77fa3 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/staked_ada/mod.rs @@ -0,0 +1,69 @@ +//! Staked ADA related queries. +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use super::{ + queries::{FallibleQueryResults, PreparedQueries, PreparedQuery, SizedBatch}, + session::CassandraSession, +}; +use crate::settings::CassandraEnvVars; + +pub(crate) mod get_txi_by_txn_hash; +pub(crate) mod get_txo_by_stake_address; + +/// Update TXO spent query string. +const UPDATE_TXO_SPENT_QUERY: &str = include_str!("../queries/update_txo_spent.cql"); + +/// Update TXO spent query params. +#[derive(SerializeRow)] +pub(crate) struct UpdateTxoSpentQueryParams { + /// TXO stake address. + pub stake_address: Vec, + /// TXO transaction index within the slot. + pub txn: i16, + /// TXO index. + pub txo: i16, + /// TXO slot number. + pub slot_no: num_bigint::BigInt, + /// TXO spent slot number. + pub spent_slot: num_bigint::BigInt, +} + +/// Update TXO spent query. +pub(crate) struct UpdateTxoSpentQuery; + +impl UpdateTxoSpentQuery { + /// Prepare a batch of update TXO spent queries. + pub(crate) async fn prepare_batch( + session: Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let update_txo_spent_queries = PreparedQueries::prepare_batch( + session.clone(), + UPDATE_TXO_SPENT_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = update_txo_spent_queries { + error!(error=%error,"Failed to prepare update TXO spent query."); + }; + + update_txo_spent_queries + } + + /// Executes a update txo spent query. + pub(crate) async fn execute( + session: &CassandraSession, params: Vec, + ) -> FallibleQueryResults { + let results = session + .execute_batch(PreparedQuery::TxoSpentUpdateQuery, params) + .await?; + + Ok(results) + } +} diff --git a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs index 976c8ae7e1b..7d974dbdee9 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs @@ -1,14 +1,30 @@ //! Implementation of the GET `/staked_ada` endpoint +use std::collections::HashMap; +use anyhow::anyhow; +use futures::StreamExt; use poem_openapi::{payload::Json, ApiResponse}; use super::types::SlotNumber; -use crate::service::{ - common::{ - objects::cardano::{network::Network, stake_address::StakeAddress, stake_info::StakeInfo}, +use crate::{ + db::index::{ + session::CassandraSession, + staked_ada::{ + get_txi_by_txn_hash::{GetTxiByTxnHashesQuery, GetTxiByTxnHashesQueryParams}, + get_txo_by_stake_address::{ + GetTxoByStakeAddressQuery, GetTxoByStakeAddressQueryParams, + }, + UpdateTxoSpentQuery, UpdateTxoSpentQueryParams, + }, + }, + service::common::{ + objects::cardano::{ + network::Network, + stake_address::StakeAddress, + stake_info::{FullStakeInfo, StakeInfo}, + }, responses::WithErrorResponses, }, - utilities::check_network, }; /// Endpoint responses. @@ -17,7 +33,7 @@ use crate::service::{ pub(crate) enum Responses { /// The amount of ADA staked by the queried stake address, as at the indicated slot. #[oai(status = 200)] - Ok(Json), + Ok(Json), /// The queried stake address was not found at the requested slot number. #[oai(status = 404)] NotFound, @@ -29,28 +45,182 @@ pub(crate) type AllResponses = WithErrorResponses; /// # GET `/staked_ada` #[allow(clippy::unused_async, clippy::no_effect_underscore_binding)] pub(crate) async fn endpoint( - stake_address: StakeAddress, provided_network: Option, slot_num: Option, + stake_address: StakeAddress, _provided_network: Option, slot_num: Option, ) -> AllResponses { - let _date_time = slot_num.unwrap_or(SlotNumber::MAX); - let _stake_credential = stake_address.payload().as_hash().to_vec(); + let persistent_res = calculate_stake_info(true, stake_address.clone(), slot_num).await; + let persistent_stake_info = match persistent_res { + Ok(stake_info) => stake_info, + Err(err) => return AllResponses::handle_error(&err), + }; - let _network = match check_network(stake_address.network(), provided_network) { - Ok(network) => network, + let volatile_res = calculate_stake_info(false, stake_address, slot_num).await; + let volatile_stake_info = match volatile_res { + Ok(stake_info) => stake_info, Err(err) => return AllResponses::handle_error(&err), }; - let _unused = " - // get the total utxo amount from the database - match EventDB::total_utxo_amount(stake_credential, network.into(), date_time).await { - Ok((amount, slot_number)) => Responses::Ok(Json(StakeInfo { - amount, - slot_number, - })) - .into(), - Err(err) if err.is::() => Responses::NotFound.into(), - Err(err) => AllResponses::handle_error(&err), + if persistent_stake_info.is_none() && volatile_stake_info.is_none() { + return Responses::NotFound.into(); + } + + Responses::Ok(Json(FullStakeInfo { + volatile: volatile_stake_info.unwrap_or_default(), + persistent: persistent_stake_info.unwrap_or_default(), + })) + .into() +} + +/// TXO information used when calculating a user's stake info. +struct TxoInfo { + /// TXO value. + value: num_bigint::BigInt, + /// TXO transaction index within the slot. + txn: i16, + /// TXO index. + txo: i16, + /// TXO transaction slot number. + slot_no: num_bigint::BigInt, + /// Whether the TXO was spent. + spent_slot_no: Option, +} + +/// Calculate the stake info for a given stake address. +/// +/// This function also updates the spent column if it detects that a TXO was spent +/// between lookups. +async fn calculate_stake_info( + persistent: bool, stake_address: StakeAddress, slot_num: Option, +) -> anyhow::Result> { + let Some(session) = CassandraSession::get(persistent) else { + anyhow::bail!("Failed to acquire db session"); + }; + + let stake_address_bytes = stake_address.payload().as_hash().to_vec(); + + let mut txos_by_txn = get_txo_by_txn(&session, stake_address_bytes.clone(), slot_num).await?; + if txos_by_txn.is_empty() { + return Ok(None); + } + + check_and_set_spent(&session, &mut txos_by_txn).await?; + update_spent(&session, stake_address_bytes, &txos_by_txn).await?; + + let stake_info = build_stake_info(txos_by_txn)?; + + Ok(Some(stake_info)) +} + +/// Returns a map of TXO infos by transaction hash for the given stake address. +async fn get_txo_by_txn( + session: &CassandraSession, stake_address: Vec, slot_num: Option, +) -> anyhow::Result, HashMap>> { + let mut txos_iter = GetTxoByStakeAddressQuery::execute( + session, + GetTxoByStakeAddressQueryParams::new( + stake_address, + num_bigint::BigInt::from(slot_num.unwrap_or(i64::MAX)), + ), + ) + .await?; + + let mut txos_by_txn = HashMap::new(); + while let Some(row_res) = txos_iter.next().await { + let row = row_res?; + + // Filter out already known spent TXOs. + if row.spent_slot.is_some() { + continue; + } + + let txn_map = txos_by_txn.entry(row.txn_hash).or_insert(HashMap::new()); + txn_map.insert(row.txo, TxoInfo { + value: row.value, + txn: row.txn, + txo: row.txo, + slot_no: row.slot_no, + spent_slot_no: None, + }); + } + + Ok(txos_by_txn) +} + +/// Checks if the given TXOs were spent and mark then as such. +async fn check_and_set_spent( + session: &CassandraSession, txos_by_txn: &mut HashMap, HashMap>, +) -> anyhow::Result<()> { + let txn_hashes = txos_by_txn.keys().cloned().collect::>(); + + for chunk in txn_hashes.chunks(100) { + let mut txi_iter = GetTxiByTxnHashesQuery::execute( + session, + GetTxiByTxnHashesQueryParams::new(chunk.to_vec()), + ) + .await?; + + while let Some(row_res) = txi_iter.next().await { + let row = row_res?; + + if let Some(txn_map) = txos_by_txn.get_mut(&row.txn_hash) { + if let Some(txo_info) = txn_map.get_mut(&row.txo) { + if row.slot_no >= num_bigint::BigInt::ZERO { + txo_info.spent_slot_no = Some(row.slot_no); + } + } + } + } + } + + Ok(()) +} + +/// Sets TXOs as spent in the database if they are marked as spent in the map. +async fn update_spent( + session: &CassandraSession, stake_address: Vec, + txos_by_txn: &HashMap, HashMap>, +) -> anyhow::Result<()> { + let mut params = Vec::new(); + for txn_map in txos_by_txn.values() { + for txo_info in txn_map.values() { + if txo_info.spent_slot_no.is_none() { + continue; + } + + if let Some(spent_slot) = &txo_info.spent_slot_no { + params.push(UpdateTxoSpentQueryParams { + stake_address: stake_address.clone(), + txn: txo_info.txn, + txo: txo_info.txo, + slot_no: txo_info.slot_no.clone(), + spent_slot: spent_slot.clone(), + }); + } + } + } + + UpdateTxoSpentQuery::execute(session, params).await?; + + Ok(()) +} + +/// Builds an instance of [`StakeInfo`] based on the TXOs given. +fn build_stake_info( + txos_by_txn: HashMap, HashMap>, +) -> anyhow::Result { + let mut stake_info = StakeInfo::default(); + for txn_map in txos_by_txn.into_values() { + for txo_info in txn_map.into_values() { + if txo_info.spent_slot_no.is_none() { + stake_info.amount += i64::try_from(txo_info.value).map_err(|err| anyhow!(err))?; + + let slot_no = i64::try_from(txo_info.slot_no).map_err(|err| anyhow!(err))?; + + if stake_info.slot_number < slot_no { + stake_info.slot_number = slot_no; + } + } + } } - "; - Responses::NotFound.into() + Ok(stake_info) } diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_address.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_address.rs index 52ac82d61c4..e1428c13fa8 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_address.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_address.rs @@ -10,7 +10,7 @@ use poem_openapi::{ /// Cardano stake address of the user. /// Should a valid Bech32 encoded stake address followed by the `https://cips.cardano.org/cip/CIP-19/#stake-addresses.` -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct StakeAddress(StakeAddressPallas); impl StakeAddress { diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs index 6793085d7a8..eb718f899df 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs @@ -5,7 +5,7 @@ use poem_openapi::{types::Example, Object}; use crate::service::api::cardano::types::{SlotNumber, StakeAmount}; /// User's cardano stake info. -#[derive(Object)] +#[derive(Object, Default)] #[oai(example = true)] pub(crate) struct StakeInfo { /// Total stake amount. @@ -27,3 +27,22 @@ impl Example for StakeInfo { } } } + +/// Full user's cardano stake info. +#[derive(Object, Default)] +#[oai(example = true)] +pub(crate) struct FullStakeInfo { + /// Volatile stake information. + pub(crate) volatile: StakeInfo, + /// Persistent stake information. + pub(crate) persistent: StakeInfo, +} + +impl Example for FullStakeInfo { + fn example() -> Self { + Self { + volatile: StakeInfo::example(), + persistent: StakeInfo::example(), + } + } +} diff --git a/catalyst_voices/packages/catalyst_voices_repositories/lib/src/catalyst_data_gateway_repository.dart b/catalyst_voices/packages/catalyst_voices_repositories/lib/src/catalyst_data_gateway_repository.dart index 394d7989131..d22ae32e641 100644 --- a/catalyst_voices/packages/catalyst_voices_repositories/lib/src/catalyst_data_gateway_repository.dart +++ b/catalyst_voices/packages/catalyst_voices_repositories/lib/src/catalyst_data_gateway_repository.dart @@ -81,7 +81,7 @@ final class CatalystDataGatewayRepository { } } - Future> getCardanoStakedAdaStakeAddress({ + Future> getCardanoStakedAdaStakeAddress({ required String stakeAddress, enums.Network network = enums.Network.mainnet, int? slotNumber, diff --git a/catalyst_voices/packages/catalyst_voices_repositories/test/src/catalyst_data_gateway_repository/catalyst_data_gateway_repository_test.dart b/catalyst_voices/packages/catalyst_voices_repositories/test/src/catalyst_data_gateway_repository/catalyst_data_gateway_repository_test.dart index 746020be8f8..d215bcfbd5b 100644 --- a/catalyst_voices/packages/catalyst_voices_repositories/test/src/catalyst_data_gateway_repository/catalyst_data_gateway_repository_test.dart +++ b/catalyst_voices/packages/catalyst_voices_repositories/test/src/catalyst_data_gateway_repository/catalyst_data_gateway_repository_test.dart @@ -30,12 +30,12 @@ class FakeCatGatewayApi extends Fake implements CatGatewayApi { Future> apiHealthLiveGet() async => response; @override - Future> apiCardanoStakedAdaStakeAddressGet({ + Future> apiCardanoStakedAdaStakeAddressGet({ required String? stakeAddress, enums.Network? network, int? slotNumber, }) async => - response as chopper.Response; + response as chopper.Response; @override Future> apiCardanoSyncStateGet({ @@ -155,17 +155,22 @@ void main() { amount: 1, slotNumber: 5, ); - final repository = setupRepository( - chopper.Response(http.Response('', HttpStatus.ok), stakeInfo), + const fullStakeInfo = FullStakeInfo( + volatile: stakeInfo, + persistent: stakeInfo, + ); + + final repository = setupRepository( + chopper.Response(http.Response('', HttpStatus.ok), fullStakeInfo), ); final result = await repository.getCardanoStakedAdaStakeAddress( stakeAddress: validStakeAddress, ); expect(result.isSuccess, true); - expect(result.success, equals(stakeInfo)); + expect(result.success, equals(fullStakeInfo)); }); test('getCardanoStakedAdaStakeAddress Bad request', () async { - final repository = setupRepository( + final repository = setupRepository( chopper.Response(http.Response('', HttpStatus.badRequest), null), ); final result = await repository.getCardanoStakedAdaStakeAddress( @@ -176,7 +181,7 @@ void main() { }); test('getCardanoStakedAdaStakeAddress Not found', () async { - final repository = setupRepository( + final repository = setupRepository( chopper.Response(http.Response('', HttpStatus.notFound), null), ); final result = await repository.getCardanoStakedAdaStakeAddress( @@ -186,7 +191,7 @@ void main() { expect(result.failure, equals(NetworkErrors.notFound)); }); test('getCardanoStakedAdaStakeAddress Server Error', () async { - final repository = setupRepository( + final repository = setupRepository( chopper.Response( http.Response('', HttpStatus.internalServerError), null, @@ -200,7 +205,7 @@ void main() { }); test('getCardanoStakedAdaStakeAddress Service Unavailable', () async { - final repository = setupRepository( + final repository = setupRepository( chopper.Response( http.Response('', HttpStatus.serviceUnavailable), null, diff --git a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.dart b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.dart index 9b06413c74a..3385365eaf1 100644 --- a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.dart +++ b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.dart @@ -432,6 +432,62 @@ extension $FragmentsProcessingSummaryExtension on FragmentsProcessingSummary { } } +@JsonSerializable(explicitToJson: true) +class FullStakeInfo { + const FullStakeInfo({ + required this.volatile, + required this.persistent, + }); + + factory FullStakeInfo.fromJson(Map json) => + _$FullStakeInfoFromJson(json); + + static const toJsonFactory = _$FullStakeInfoToJson; + Map toJson() => _$FullStakeInfoToJson(this); + + @JsonKey(name: 'volatile') + final StakeInfo volatile; + @JsonKey(name: 'persistent') + final StakeInfo persistent; + static const fromJsonFactory = _$FullStakeInfoFromJson; + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other is FullStakeInfo && + (identical(other.volatile, volatile) || + const DeepCollectionEquality() + .equals(other.volatile, volatile)) && + (identical(other.persistent, persistent) || + const DeepCollectionEquality() + .equals(other.persistent, persistent))); + } + + @override + String toString() => jsonEncode(this); + + @override + int get hashCode => + const DeepCollectionEquality().hash(volatile) ^ + const DeepCollectionEquality().hash(persistent) ^ + runtimeType.hashCode; +} + +extension $FullStakeInfoExtension on FullStakeInfo { + FullStakeInfo copyWith({StakeInfo? volatile, StakeInfo? persistent}) { + return FullStakeInfo( + volatile: volatile ?? this.volatile, + persistent: persistent ?? this.persistent); + } + + FullStakeInfo copyWithWrapped( + {Wrapped? volatile, Wrapped? persistent}) { + return FullStakeInfo( + volatile: (volatile != null ? volatile.value : this.volatile), + persistent: (persistent != null ? persistent.value : this.persistent)); + } +} + @JsonSerializable(explicitToJson: true) class Hash { const Hash({ diff --git a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.g.dart b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.g.dart index 9a106a7e118..4dba49dd5a9 100644 --- a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.g.dart +++ b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.g.dart @@ -113,6 +113,19 @@ Map _$FragmentsProcessingSummaryToJson( 'rejected': instance.rejected.map((e) => e.toJson()).toList(), }; +FullStakeInfo _$FullStakeInfoFromJson(Map json) => + FullStakeInfo( + volatile: StakeInfo.fromJson(json['volatile'] as Map), + persistent: + StakeInfo.fromJson(json['persistent'] as Map), + ); + +Map _$FullStakeInfoToJson(FullStakeInfo instance) => + { + 'volatile': instance.volatile.toJson(), + 'persistent': instance.persistent.toJson(), + }; + Hash _$HashFromJson(Map json) => Hash( hash: json['hash'] as String, ); diff --git a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.chopper.dart b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.chopper.dart index cbd9f774aa5..88a535d0873 100644 --- a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.chopper.dart +++ b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.chopper.dart @@ -70,7 +70,7 @@ final class _$CatGatewayApi extends CatGatewayApi { } @override - Future> _apiCardanoStakedAdaStakeAddressGet({ + Future> _apiCardanoStakedAdaStakeAddressGet({ required String? stakeAddress, String? network, int? slotNumber, @@ -86,7 +86,7 @@ final class _$CatGatewayApi extends CatGatewayApi { client.baseUrl, parameters: $params, ); - return client.send($request); + return client.send($request); } @override diff --git a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.dart b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.dart index f8c79a33455..121209f0e74 100644 --- a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.dart +++ b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.dart @@ -100,12 +100,13 @@ abstract class CatGatewayApi extends ChopperService { ///@param stake_address The stake address of the user. Should a valid Bech32 encoded address followed by the https://cips.cardano.org/cip/CIP-19/#stake-addresses. ///@param network Cardano network type. If omitted network type is identified from the stake address. If specified it must be correspondent to the network type encoded in the stake address. As `preprod` and `preview` network types in the stake address encoded as a `testnet`, to specify `preprod` or `preview` network type use this query parameter. ///@param slot_number Slot number at which the staked ada amount should be calculated. If omitted latest slot number is used. - Future> apiCardanoStakedAdaStakeAddressGet({ + Future> apiCardanoStakedAdaStakeAddressGet({ required String? stakeAddress, enums.Network? network, int? slotNumber, }) { - generatedMapping.putIfAbsent(StakeInfo, () => StakeInfo.fromJsonFactory); + generatedMapping.putIfAbsent( + FullStakeInfo, () => FullStakeInfo.fromJsonFactory); return _apiCardanoStakedAdaStakeAddressGet( stakeAddress: stakeAddress, @@ -118,7 +119,7 @@ abstract class CatGatewayApi extends ChopperService { ///@param network Cardano network type. If omitted network type is identified from the stake address. If specified it must be correspondent to the network type encoded in the stake address. As `preprod` and `preview` network types in the stake address encoded as a `testnet`, to specify `preprod` or `preview` network type use this query parameter. ///@param slot_number Slot number at which the staked ada amount should be calculated. If omitted latest slot number is used. @Get(path: '/api/cardano/staked_ada/{stake_address}') - Future> _apiCardanoStakedAdaStakeAddressGet({ + Future> _apiCardanoStakedAdaStakeAddressGet({ @Path('stake_address') required String? stakeAddress, @Query('network') String? network, @Query('slot_number') int? slotNumber, From 138172d575905eb2aa4f5636de8423628625ad1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joaqu=C3=ADn=20Rosales?= Date: Wed, 18 Sep 2024 06:10:40 -0600 Subject: [PATCH 56/69] feat: DB Indexing for CIP-36 registrations (#788) * feat: add schema for cip-36 registration tables * feat: index cip-36 by stake address * feat: index cip-36 registrations by vote key * fix: use TxiInserParams::new when adding txi data * fix: remove unused cfg attributes * fix: refactor Cip36RegistrationInsertQuery::new * fix(backend): Refactor queries and add multiple tables for cip36 registration indexes * fix(backend): Cip36 Primary key is stake key. Stake Key N->1 Vote Key * fix(backend): code format --------- Co-authored-by: Steven Johnson Co-authored-by: Steven Johnson --- .../index/{index_certs.rs => block/certs.rs} | 14 +- .../db/index/block/cip36/cql/insert_cip36.cql | 22 ++ .../cip36/cql/insert_cip36_for_vote_key.cql | 14 + .../block/cip36/cql/insert_cip36_invalid.cql | 26 ++ .../src/db/index/block/cip36/insert_cip36.rs | 83 +++++ .../block/cip36/insert_cip36_for_vote_key.rs | 76 +++++ .../index/block/cip36/insert_cip36_invalid.rs | 98 ++++++ .../bin/src/db/index/block/cip36/mod.rs | 159 ++++++++++ .../cql}/insert_stake_registration.cql | 0 .../{queries => block/cql}/insert_txi.cql | 0 .../src/db/index/{block.rs => block/mod.rs} | 27 +- .../db/index/{index_txi.rs => block/txi.rs} | 19 +- .../{queries => block/txo/cql}/insert_txo.cql | 0 .../txo/cql}/insert_txo_asset.cql | 0 .../txo/cql}/insert_unstaked_txo.cql | 0 .../txo/cql}/insert_unstaked_txo_asset.cql | 0 .../bin/src/db/index/block/txo/insert_txo.rs | 75 +++++ .../db/index/block/txo/insert_txo_asset.rs | 77 +++++ .../db/index/block/txo/insert_unstaked_txo.rs | 68 ++++ .../block/txo/insert_unstaked_txo_asset.rs | 77 +++++ .../index/{index_txo.rs => block/txo/mod.rs} | 300 ++---------------- catalyst-gateway/bin/src/db/index/mod.rs | 4 - .../{ => cql}/get_txi_by_txn_hashes.cql | 0 .../{ => cql}/get_txo_by_stake_address.cql | 0 .../queries/{ => cql}/update_txo_spent.cql | 0 .../db/index/{queries.rs => queries/mod.rs} | 45 ++- .../staked_ada/get_txi_by_txn_hash.rs | 2 +- .../staked_ada/get_txo_by_stake_address.rs | 3 +- .../src/db/index/queries/staked_ada/mod.rs | 4 + .../staked_ada/update_txo_spent.rs} | 18 +- .../index/schema/cql/cip36_registration.cql | 18 ++ .../cql/cip36_registration_for_vote_key.cql | 14 + .../schema/cql/cip36_registration_invalid.cql | 20 ++ .../db/index/schema/{ => cql}/namespace.cql | 0 .../schema/{ => cql}/stake_registration.cql | 0 .../{ => cql}/txi_by_txn_hash_table.cql | 0 .../{ => cql}/txo_assets_by_stake_table.cql | 0 .../schema/{ => cql}/txo_by_stake_table.cql | 0 .../unstaked_txo_assets_by_txn_hash.cql | 0 .../{ => cql}/unstaked_txo_by_txn_hash.cql | 0 .../src/db/index/{schema.rs => schema/mod.rs} | 29 +- .../src/service/api/cardano/staked_ada_get.rs | 6 +- 42 files changed, 966 insertions(+), 332 deletions(-) rename catalyst-gateway/bin/src/db/index/{index_certs.rs => block/certs.rs} (96%) create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/mod.rs rename catalyst-gateway/bin/src/db/index/{queries => block/cql}/insert_stake_registration.cql (100%) rename catalyst-gateway/bin/src/db/index/{queries => block/cql}/insert_txi.cql (100%) rename catalyst-gateway/bin/src/db/index/{block.rs => block/mod.rs} (81%) rename catalyst-gateway/bin/src/db/index/{index_txi.rs => block/txi.rs} (86%) rename catalyst-gateway/bin/src/db/index/{queries => block/txo/cql}/insert_txo.cql (100%) rename catalyst-gateway/bin/src/db/index/{queries => block/txo/cql}/insert_txo_asset.cql (100%) rename catalyst-gateway/bin/src/db/index/{queries => block/txo/cql}/insert_unstaked_txo.cql (100%) rename catalyst-gateway/bin/src/db/index/{queries => block/txo/cql}/insert_unstaked_txo_asset.cql (100%) create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs rename catalyst-gateway/bin/src/db/index/{index_txo.rs => block/txo/mod.rs} (50%) rename catalyst-gateway/bin/src/db/index/queries/{ => cql}/get_txi_by_txn_hashes.cql (100%) rename catalyst-gateway/bin/src/db/index/queries/{ => cql}/get_txo_by_stake_address.cql (100%) rename catalyst-gateway/bin/src/db/index/queries/{ => cql}/update_txo_spent.cql (100%) rename catalyst-gateway/bin/src/db/index/{queries.rs => queries/mod.rs} (82%) rename catalyst-gateway/bin/src/db/index/{ => queries}/staked_ada/get_txi_by_txn_hash.rs (96%) rename catalyst-gateway/bin/src/db/index/{ => queries}/staked_ada/get_txo_by_stake_address.rs (96%) create mode 100644 catalyst-gateway/bin/src/db/index/queries/staked_ada/mod.rs rename catalyst-gateway/bin/src/db/index/{staked_ada/mod.rs => queries/staked_ada/update_txo_spent.rs} (81%) create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql rename catalyst-gateway/bin/src/db/index/schema/{ => cql}/namespace.cql (100%) rename catalyst-gateway/bin/src/db/index/schema/{ => cql}/stake_registration.cql (100%) rename catalyst-gateway/bin/src/db/index/schema/{ => cql}/txi_by_txn_hash_table.cql (100%) rename catalyst-gateway/bin/src/db/index/schema/{ => cql}/txo_assets_by_stake_table.cql (100%) rename catalyst-gateway/bin/src/db/index/schema/{ => cql}/txo_by_stake_table.cql (100%) rename catalyst-gateway/bin/src/db/index/schema/{ => cql}/unstaked_txo_assets_by_txn_hash.cql (100%) rename catalyst-gateway/bin/src/db/index/schema/{ => cql}/unstaked_txo_by_txn_hash.cql (100%) rename catalyst-gateway/bin/src/db/index/{schema.rs => schema/mod.rs} (75%) diff --git a/catalyst-gateway/bin/src/db/index/index_certs.rs b/catalyst-gateway/bin/src/db/index/block/certs.rs similarity index 96% rename from catalyst-gateway/bin/src/db/index/index_certs.rs rename to catalyst-gateway/bin/src/db/index/block/certs.rs index ef238557101..3c7ec9bcca9 100644 --- a/catalyst-gateway/bin/src/db/index/index_certs.rs +++ b/catalyst-gateway/bin/src/db/index/block/certs.rs @@ -7,11 +7,14 @@ use pallas::ledger::primitives::{alonzo, conway}; use scylla::{frame::value::MaybeUnset, SerializeRow, Session}; use tracing::error; -use super::{ - queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, - session::CassandraSession, +use crate::{ + db::index::{ + queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, + session::CassandraSession, + }, + service::utilities::convert::u16_from_saturating, + settings::CassandraEnvVars, }; -use crate::{service::utilities::convert::u16_from_saturating, settings::CassandraEnvVars}; /// Insert TXI Query and Parameters #[derive(SerializeRow)] @@ -35,8 +38,7 @@ pub(crate) struct StakeRegistrationInsertQuery { } /// TXI by Txn hash Index -const INSERT_STAKE_REGISTRATION_QUERY: &str = - include_str!("./queries/insert_stake_registration.cql"); +const INSERT_STAKE_REGISTRATION_QUERY: &str = include_str!("./cql/insert_stake_registration.cql"); impl StakeRegistrationInsertQuery { /// Create a new Insert Query. diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql new file mode 100644 index 00000000000..220954045c8 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql @@ -0,0 +1,22 @@ +-- Index CIP-36 Registrations (Valid) +INSERT INTO cip36_registration ( + stake_address, + nonce, + slot_no, + txn, + vote_key, + payment_address, + is_payable, + raw_nonce, + cip36, +) VALUES ( + :stake_address, + :nonce, + :slot_no, + :txn, + :vote_key, + :payment_address, + :is_payable, + :raw_nonce, + :cip36, +); diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql new file mode 100644 index 00000000000..a09d36d3f55 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql @@ -0,0 +1,14 @@ +-- Index CIP-36 Registration (Valid) +INSERT INTO cip36_registration_for_stake_addr ( + vote_key, + stake_address, + slot_no, + txn, + valid, +) VALUES ( + :vote_key, + :stake_address, + :slot_no, + :txn, + :valid, +); diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql new file mode 100644 index 00000000000..06162661fd0 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql @@ -0,0 +1,26 @@ +-- Index CIP-36 Registrations by Vote Key +INSERT INTO cip36_registration_invalid ( + stake_address, + slot_no, + txn, + vote_key, + payment_address, + is_payable, + raw_nonce, + nonce, + cip36, + signed, + error_report, +) VALUES ( + :stake_address, + :slot_no, + :txn, + :vote_key, + :payment_address, + :is_payable, + :raw_nonce, + :nonce, + :cip36, + :signed, + :error_report +); diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs new file mode 100644 index 00000000000..dbbb4061819 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs @@ -0,0 +1,83 @@ +//! Insert CIP36 Registration Query + +use std::sync::Arc; + +use cardano_chain_follower::Metadata::cip36::{Cip36, VotingPubKey}; +use scylla::{frame::value::MaybeUnset, SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// Index Registration by Stake Address +const INSERT_CIP36_REGISTRATION_QUERY: &str = include_str!("./cql/insert_cip36.cql"); + +/// Insert CIP-36 Registration Query Parameters +#[derive(SerializeRow, Clone)] +pub(super) struct Params { + /// Stake key hash + stake_address: Vec, + /// Nonce value after normalization. + nonce: num_bigint::BigInt, + /// Slot Number the cert is in. + slot_no: num_bigint::BigInt, + /// Transaction Index. + txn: i16, + /// Voting Public Key + vote_key: Vec, + /// Full Payment Address (not hashed, 32 byte ED25519 Public key). + payment_address: MaybeUnset>, + /// Is the stake address a script or not. + is_payable: bool, + /// Raw nonce value. + raw_nonce: num_bigint::BigInt, + /// Is the Registration CIP36 format, or CIP15 + cip36: bool, +} + +impl Params { + /// Create a new Insert Query. + pub fn new(vote_key: &VotingPubKey, slot_no: u64, txn: i16, cip36: &Cip36) -> Self { + Params { + stake_address: cip36 + .stake_pk + .map(|s| s.to_bytes().to_vec()) + .unwrap_or_default(), + nonce: cip36.nonce.into(), + slot_no: slot_no.into(), + txn, + vote_key: vote_key.voting_pk.to_bytes().to_vec(), + payment_address: if cip36.payment_addr.is_empty() { + MaybeUnset::Unset + } else { + MaybeUnset::Set(cip36.payment_addr.clone()) + }, + is_payable: cip36.payable, + raw_nonce: cip36.raw_nonce.into(), + cip36: cip36.cip36.unwrap_or_default(), + } + } + + /// Prepare Batch of Insert CIP-36 Registration Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_CIP36_REGISTRATION_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = insert_queries { + error!(error=%error,"Failed to prepare Insert CIP-36 Registration Query."); + }; + + insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs new file mode 100644 index 00000000000..bf964f01487 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs @@ -0,0 +1,76 @@ +//! Insert CIP36 Registration Query + +use std::sync::Arc; + +use cardano_chain_follower::Metadata::cip36::{Cip36, VotingPubKey}; +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// Index Registration by Vote Key +const INSERT_CIP36_REGISTRATION_FOR_VOTE_KEY_QUERY: &str = + include_str!("./cql/insert_cip36_for_vote_key.cql"); + +/// Insert CIP-36 Registration Invalid Query Parameters +#[derive(SerializeRow, Clone)] +pub(super) struct Params { + /// Stake key hash + stake_address: Vec, + /// Stake key hash + vote_key: Vec, + /// Slot Number the cert is in. + slot_no: num_bigint::BigInt, + /// Transaction Index. + txn: i16, + /// Is the registration Valid or not. + valid: bool, +} + +impl Params { + /// Create a new Insert Query. + pub fn new( + vote_key: Option<&VotingPubKey>, slot_no: u64, txn: i16, cip36: &Cip36, valid: bool, + ) -> Self { + let vote_key = if let Some(vote_key) = vote_key { + vote_key.voting_pk.to_bytes().to_vec() + } else { + Vec::new() + }; + + Params { + stake_address: cip36 + .stake_pk + .map(|s| s.to_bytes().to_vec()) + .unwrap_or_default(), + vote_key, + slot_no: slot_no.into(), + txn, + valid, + } + } + + /// Prepare Batch of Insert CIP-36 Registration Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_CIP36_REGISTRATION_FOR_VOTE_KEY_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = insert_queries { + error!(error=%error,"Failed to prepare Insert CIP-36 Registration Query."); + }; + + insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs new file mode 100644 index 00000000000..35d24bdf3b1 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs @@ -0,0 +1,98 @@ +//! Insert CIP36 Registration Query (Invalid Records) + +use std::sync::Arc; + +use cardano_chain_follower::Metadata::cip36::{Cip36, VotingPubKey}; +use scylla::{frame::value::MaybeUnset, SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// Index Registration by Stake Address (Invalid Registrations) +const INSERT_CIP36_REGISTRATION_INVALID_QUERY: &str = + include_str!("./cql/insert_cip36_invalid.cql"); + +/// Insert CIP-36 Registration Invalid Query Parameters +#[derive(SerializeRow, Clone)] +pub(super) struct Params { + /// Stake key hash + stake_address: Vec, + /// Slot Number the cert is in. + slot_no: num_bigint::BigInt, + /// Transaction Index. + txn: i16, + /// Stake key hash + vote_key: Vec, + /// Full Payment Address (not hashed, 32 byte ED25519 Public key). + payment_address: Vec, + /// Is the stake address a script or not. + is_payable: bool, + /// Raw nonce value. + raw_nonce: num_bigint::BigInt, + /// Nonce value after normalization. + nonce: num_bigint::BigInt, + /// Strict Catalyst validated. + cip36: MaybeUnset, + /// Signature validates. + signed: bool, + /// List of serialization errors. + error_report: Vec, +} + +impl Params { + /// Create a new Insert Query. + pub fn new( + vote_key: Option<&VotingPubKey>, slot_no: u64, txn: i16, cip36: &Cip36, + error_report: Vec, + ) -> Self { + let vote_key = if let Some(vote_key) = vote_key { + vote_key.voting_pk.to_bytes().to_vec() + } else { + Vec::new() + }; + Params { + stake_address: cip36 + .stake_pk + .map(|s| s.to_bytes().to_vec()) + .unwrap_or_default(), + slot_no: slot_no.into(), + txn, + vote_key, + payment_address: cip36.payment_addr.clone(), + is_payable: cip36.payable, + raw_nonce: cip36.raw_nonce.into(), + nonce: cip36.nonce.into(), + cip36: if let Some(cip36) = cip36.cip36 { + MaybeUnset::Set(cip36) + } else { + MaybeUnset::Unset + }, + signed: cip36.signed, + error_report, + } + } + + /// Prepare Batch of Insert CIP-36 Registration Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_CIP36_REGISTRATION_INVALID_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = insert_queries { + error!(error=%error,"Failed to prepare Insert CIP-36 Registration Invalid Query."); + }; + + insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs b/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs new file mode 100644 index 00000000000..a713ba756b9 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs @@ -0,0 +1,159 @@ +//! Index CIP-36 Registrations. + +mod insert_cip36; +mod insert_cip36_for_vote_key; +mod insert_cip36_invalid; + +use std::sync::Arc; + +use cardano_chain_follower::{Metadata, MultiEraBlock}; +use scylla::Session; + +use crate::{ + db::index::{ + queries::{FallibleQueryTasks, PreparedQuery, SizedBatch}, + session::CassandraSession, + }, + settings::CassandraEnvVars, +}; + +/// Insert CIP-36 Registration Queries +pub(crate) struct Cip36InsertQuery { + /// Stake Registration Data captured during indexing. + registrations: Vec, + /// Stake Registration Data captured during indexing. + invalid: Vec, + /// Stake Registration Data captured during indexing. + for_stake: Vec, +} + +impl Cip36InsertQuery { + /// Create new data set for CIP-36 Registrations Insert Query Batch. + pub(crate) fn new() -> Self { + Cip36InsertQuery { + registrations: Vec::new(), + invalid: Vec::new(), + for_stake: Vec::new(), + } + } + + /// Prepare Batch of Insert Cip36 Registration Data Queries + pub(crate) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result<(SizedBatch, SizedBatch, SizedBatch)> { + let insert_cip36_batch = insert_cip36::Params::prepare_batch(session, cfg).await; + let insert_cip36_invalid_batch = + insert_cip36_invalid::Params::prepare_batch(session, cfg).await; + let insert_cip36_for_stake_addr_batch = + insert_cip36_for_vote_key::Params::prepare_batch(session, cfg).await; + + Ok(( + insert_cip36_batch?, + insert_cip36_invalid_batch?, + insert_cip36_for_stake_addr_batch?, + )) + } + + /// Index the CIP-36 registrations in a transaction. + pub(crate) fn index( + &mut self, txn: usize, txn_index: i16, slot_no: u64, block: &MultiEraBlock, + ) { + if let Some(decoded_metadata) = block.txn_metadata(txn, Metadata::cip36::LABEL) { + #[allow(irrefutable_let_patterns)] + if let Metadata::DecodedMetadataValues::Cip36(cip36) = &decoded_metadata.value { + // Check if we are indexing a valid or invalid registration. + // Note, we ONLY care about catalyst, we should only have 1 voting key, if not, call + // it an error. + if decoded_metadata.report.is_empty() && cip36.voting_keys.len() == 1 { + // Always true, because we already checked if the array has only one entry. + if let Some(vote_key) = cip36.voting_keys.first() { + self.registrations.push(insert_cip36::Params::new( + vote_key, slot_no, txn_index, cip36, + )); + self.for_stake.push(insert_cip36_for_vote_key::Params::new( + Some(vote_key), + slot_no, + txn_index, + cip36, + true, + )); + } + } else { + if cip36.voting_keys.is_empty() { + self.invalid.push(insert_cip36_invalid::Params::new( + None, + slot_no, + txn_index, + cip36, + decoded_metadata.report.clone(), + )); + self.for_stake.push(insert_cip36_for_vote_key::Params::new( + None, slot_no, txn_index, cip36, false, + )); + } + for vote_key in &cip36.voting_keys { + self.invalid.push(insert_cip36_invalid::Params::new( + Some(vote_key), + slot_no, + txn_index, + cip36, + decoded_metadata.report.clone(), + )); + self.for_stake.push(insert_cip36_for_vote_key::Params::new( + Some(vote_key), + slot_no, + txn_index, + cip36, + false, + )); + } + } + } + } + } + + /// Execute the CIP-36 Registration Indexing Queries. + /// + /// Consumes the `self` and returns a vector of futures. + pub(crate) fn execute(self, session: &Arc) -> FallibleQueryTasks { + let mut query_handles: FallibleQueryTasks = Vec::new(); + + if !self.registrations.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch( + PreparedQuery::Cip36RegistrationInsertQuery, + self.registrations, + ) + .await + })); + } + + if !self.invalid.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch( + PreparedQuery::Cip36RegistrationInsertErrorQuery, + self.invalid, + ) + .await + })); + } + + if !self.for_stake.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch( + PreparedQuery::Cip36RegistrationForStakeAddrInsertQuery, + self.for_stake, + ) + .await + })); + } + + query_handles + } +} diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_stake_registration.cql b/catalyst-gateway/bin/src/db/index/block/cql/insert_stake_registration.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/queries/insert_stake_registration.cql rename to catalyst-gateway/bin/src/db/index/block/cql/insert_stake_registration.cql diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txi.cql b/catalyst-gateway/bin/src/db/index/block/cql/insert_txi.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/queries/insert_txi.cql rename to catalyst-gateway/bin/src/db/index/block/cql/insert_txi.cql diff --git a/catalyst-gateway/bin/src/db/index/block.rs b/catalyst-gateway/bin/src/db/index/block/mod.rs similarity index 81% rename from catalyst-gateway/bin/src/db/index/block.rs rename to catalyst-gateway/bin/src/db/index/block/mod.rs index 69704dd6e49..e2c010c5ed8 100644 --- a/catalyst-gateway/bin/src/db/index/block.rs +++ b/catalyst-gateway/bin/src/db/index/block/mod.rs @@ -1,17 +1,20 @@ //! Index a block +//! Primary Data Indexing - Upsert operations + +pub(crate) mod certs; +pub(crate) mod cip36; +pub(crate) mod txi; +pub(crate) mod txo; use cardano_chain_follower::MultiEraBlock; +use certs::CertInsertQuery; +use cip36::Cip36InsertQuery; use tracing::{debug, error}; +use txi::TxiInsertQuery; +use txo::TxoInsertQuery; -use super::{ - index_certs::CertInsertQuery, index_txi::TxiInsertQuery, index_txo::TxoInsertQuery, - queries::FallibleQueryTasks, session::CassandraSession, -}; - -/// Convert a usize to an i16 and saturate at `i16::MAX` -pub(crate) fn usize_to_i16(value: usize) -> i16 { - value.try_into().unwrap_or(i16::MAX) -} +use super::{queries::FallibleQueryTasks, session::CassandraSession}; +use crate::service::utilities::convert::i16_from_saturating; /// Add all data needed from the block into the indexes. #[allow(clippy::similar_names)] @@ -22,6 +25,8 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { }; let mut cert_index = CertInsertQuery::new(); + let mut cip36_index = Cip36InsertQuery::new(); + let mut txi_index = TxiInsertQuery::new(); let mut txo_index = TxoInsertQuery::new(); @@ -30,7 +35,7 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { // We add all transactions in the block to their respective index data sets. for (txn_index, txs) in block_data.txs().iter().enumerate() { - let txn = usize_to_i16(txn_index); + let txn = i16_from_saturating(txn_index); let txn_hash = txs.hash().to_vec(); @@ -41,6 +46,7 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { // let mint = txs.mints().iter() {}; // TODO: Index Metadata. + cip36_index.index(txn_index, txn, slot_no, block); // Index Certificates inside the transaction. cert_index.index(txs, slot_no, txn, block); @@ -56,6 +62,7 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { query_handles.extend(txo_index.execute(&session)); query_handles.extend(txi_index.execute(&session)); query_handles.extend(cert_index.execute(&session)); + query_handles.extend(cip36_index.execute(&session)); let mut result: anyhow::Result<()> = Ok(()); diff --git a/catalyst-gateway/bin/src/db/index/index_txi.rs b/catalyst-gateway/bin/src/db/index/block/txi.rs similarity index 86% rename from catalyst-gateway/bin/src/db/index/index_txi.rs rename to catalyst-gateway/bin/src/db/index/block/txi.rs index 7680dafab5e..d3a37b3055f 100644 --- a/catalyst-gateway/bin/src/db/index/index_txi.rs +++ b/catalyst-gateway/bin/src/db/index/block/txi.rs @@ -5,11 +5,13 @@ use std::sync::Arc; use scylla::{SerializeRow, Session}; use tracing::error; -use super::{ - queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, - session::CassandraSession, +use crate::{ + db::index::{ + queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, + session::CassandraSession, + }, + settings::CassandraEnvVars, }; -use crate::settings::CassandraEnvVars; /// Insert TXI Query and Parameters #[derive(SerializeRow)] @@ -40,7 +42,7 @@ pub(crate) struct TxiInsertQuery { } /// TXI by Txn hash Index -const INSERT_TXI_QUERY: &str = include_str!("./queries/insert_txi.cql"); +const INSERT_TXI_QUERY: &str = include_str!("./cql/insert_txi.cql"); impl TxiInsertQuery { /// Create a new record for this transaction. @@ -78,11 +80,8 @@ impl TxiInsertQuery { let txn_hash = txi.hash().to_vec(); let txo: i16 = txi.index().try_into().unwrap_or(i16::MAX); - self.txi_data.push(TxiInsertParams { - txn_hash, - txo, - slot_no: slot_no.into(), - }); + self.txi_data + .push(TxiInsertParams::new(&txn_hash, txo, slot_no)); } } diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txo.cql b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/queries/insert_txo.cql rename to catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo.cql diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo_asset.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/queries/insert_txo_asset.cql rename to catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo_asset.cql diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo.cql b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo.cql rename to catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo.cql diff --git a/catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo_asset.cql b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo_asset.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/queries/insert_unstaked_txo_asset.cql rename to catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo_asset.cql diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs new file mode 100644 index 00000000000..7d9c0b67216 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs @@ -0,0 +1,75 @@ +//! Insert TXO Indexed Data Queries. +//! +//! Note, there are multiple ways TXO Data is indexed and they all happen in here. + +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// TXO by Stake Address Indexing query +const INSERT_TXO_QUERY: &str = include_str!("./cql/insert_txo.cql"); + +/// Insert TXO Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +pub(super) struct Params { + /// Stake Address - Binary 28 bytes. 0 bytes = not staked. + stake_address: Vec, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Actual full TXO Address + address: String, + /// Actual TXO Value in lovelace + value: num_bigint::BigInt, + /// Transactions hash. + txn_hash: Vec, +} + +impl Params { + /// Create a new record for this transaction. + pub(super) fn new( + stake_address: &[u8], slot_no: u64, txn: i16, txo: i16, address: &str, value: u64, + txn_hash: &[u8], + ) -> Self { + Self { + stake_address: stake_address.to_vec(), + slot_no: slot_no.into(), + txn, + txo, + address: address.to_string(), + value: value.into(), + txn_hash: txn_hash.to_vec(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_TXO_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert TXO Asset Query."); + }; + + txo_insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs new file mode 100644 index 00000000000..9fa349237b4 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs @@ -0,0 +1,77 @@ +//! Insert TXO Native Assets into the DB. + +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// TXO Asset by Stake Address Indexing Query +const INSERT_TXO_ASSET_QUERY: &str = include_str!("./cql/insert_txo_asset.cql"); + +/// Insert TXO Asset Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +pub(super) struct Params { + /// Stake Address - Binary 28 bytes. 0 bytes = not staked. + stake_address: Vec, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Policy hash of the asset + policy_id: Vec, + /// Policy name of the asset + policy_name: String, + /// Value of the asset + value: num_bigint::BigInt, +} + +impl Params { + /// Create a new record for this transaction. + /// + /// Note Value can be either a u64 or an i64, so use a i128 to represent all possible + /// values. + #[allow(clippy::too_many_arguments)] + pub(super) fn new( + stake_address: &[u8], slot_no: u64, txn: i16, txo: i16, policy_id: &[u8], + policy_name: &str, value: i128, + ) -> Self { + Self { + stake_address: stake_address.to_vec(), + slot_no: slot_no.into(), + txn, + txo, + policy_id: policy_id.to_vec(), + policy_name: policy_name.to_owned(), + value: value.into(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_TXO_ASSET_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert TXO Asset Query."); + }; + + txo_insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs new file mode 100644 index 00000000000..e27c7651c23 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs @@ -0,0 +1,68 @@ +//! Insert Unstaked TXOs into the DB. +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// Unstaked TXO by Stake Address Indexing query +const INSERT_UNSTAKED_TXO_QUERY: &str = include_str!("./cql/insert_unstaked_txo.cql"); + +/// Insert TXO Unstaked Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +pub(super) struct Params { + /// Transactions hash. + txn_hash: Vec, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Actual full TXO Address + address: String, + /// Actual TXO Value in lovelace + value: num_bigint::BigInt, +} + +impl Params { + /// Create a new record for this transaction. + pub(super) fn new( + txn_hash: &[u8], txo: i16, slot_no: u64, txn: i16, address: &str, value: u64, + ) -> Self { + Self { + txn_hash: txn_hash.to_vec(), + txo, + slot_no: slot_no.into(), + txn, + address: address.to_string(), + value: value.into(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_UNSTAKED_TXO_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert TXO Asset Query."); + }; + + txo_insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs new file mode 100644 index 00000000000..8ac33aa129d --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs @@ -0,0 +1,77 @@ +//! Insert Unstaked TXO Native Assets into the DB. + +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// Unstaked TXO Asset by Stake Address Indexing Query +const INSERT_UNSTAKED_TXO_ASSET_QUERY: &str = include_str!("./cql/insert_unstaked_txo_asset.cql"); + +/// Insert TXO Asset Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +pub(super) struct Params { + /// Transactions hash. + txn_hash: Vec, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Policy hash of the asset + policy_id: Vec, + /// Policy name of the asset + policy_name: String, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Value of the asset + value: num_bigint::BigInt, +} + +impl Params { + /// Create a new record for this transaction. + /// + /// Note Value can be either a u64 or an i64, so use a i128 to represent all possible + /// values. + #[allow(clippy::too_many_arguments)] + pub(super) fn new( + txn_hash: &[u8], txo: i16, policy_id: &[u8], policy_name: &str, slot_no: u64, txn: i16, + value: i128, + ) -> Self { + Self { + txn_hash: txn_hash.to_vec(), + txo, + policy_id: policy_id.to_vec(), + policy_name: policy_name.to_owned(), + slot_no: slot_no.into(), + txn, + value: value.into(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_UNSTAKED_TXO_ASSET_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert Unstaked TXO Asset Query."); + }; + + txo_insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/index_txo.rs b/catalyst-gateway/bin/src/db/index/block/txo/mod.rs similarity index 50% rename from catalyst-gateway/bin/src/db/index/index_txo.rs rename to catalyst-gateway/bin/src/db/index/block/txo/mod.rs index 411a4467d47..fc1ea2f306e 100644 --- a/catalyst-gateway/bin/src/db/index/index_txo.rs +++ b/catalyst-gateway/bin/src/db/index/block/txo/mod.rs @@ -2,287 +2,41 @@ //! //! Note, there are multiple ways TXO Data is indexed and they all happen in here. +mod insert_txo; +mod insert_txo_asset; +mod insert_unstaked_txo; +mod insert_unstaked_txo_asset; + use std::sync::Arc; -use scylla::{SerializeRow, Session}; +use scylla::Session; use tracing::{error, warn}; -use super::{ - block::usize_to_i16, - queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, - session::CassandraSession, +use crate::{ + db::index::{ + queries::{FallibleQueryTasks, PreparedQuery, SizedBatch}, + session::CassandraSession, + }, + service::utilities::convert::i16_from_saturating, + settings::CassandraEnvVars, }; -use crate::settings::CassandraEnvVars; /// This is used to indicate that there is no stake address. const NO_STAKE_ADDRESS: &[u8] = &[]; -/// TXO by Stake Address Indexing query -const INSERT_TXO_QUERY: &str = include_str!("./queries/insert_txo.cql"); - -/// Insert TXO Query Parameters -/// (Superset of data to support both Staked and Unstaked TXO records.) -#[derive(SerializeRow)] -struct TxoInsertParams { - /// Stake Address - Binary 28 bytes. 0 bytes = not staked. - stake_address: Vec, - /// Block Slot Number - slot_no: num_bigint::BigInt, - /// Transaction Offset inside the block. - txn: i16, - /// Transaction Output Offset inside the transaction. - txo: i16, - /// Actual full TXO Address - address: String, - /// Actual TXO Value in lovelace - value: num_bigint::BigInt, - /// Transactions hash. - txn_hash: Vec, -} - -impl TxoInsertParams { - /// Create a new record for this transaction. - pub(crate) fn new( - stake_address: &[u8], slot_no: u64, txn: i16, txo: i16, address: &str, value: u64, - txn_hash: &[u8], - ) -> Self { - Self { - stake_address: stake_address.to_vec(), - slot_no: slot_no.into(), - txn, - txo, - address: address.to_string(), - value: value.into(), - txn_hash: txn_hash.to_vec(), - } - } - - /// Prepare Batch of Staked Insert TXO Asset Index Data Queries - async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, - ) -> anyhow::Result { - let txo_insert_queries = PreparedQueries::prepare_batch( - session.clone(), - INSERT_TXO_QUERY, - cfg, - scylla::statement::Consistency::Any, - true, - false, - ) - .await; - - if let Err(ref error) = txo_insert_queries { - error!(error=%error,"Failed to prepare Insert TXO Asset Query."); - }; - - txo_insert_queries - } -} - -/// Unstaked TXO by Stake Address Indexing query -const INSERT_UNSTAKED_TXO_QUERY: &str = include_str!("./queries/insert_unstaked_txo.cql"); - -/// Insert TXO Unstaked Query Parameters -/// (Superset of data to support both Staked and Unstaked TXO records.) -#[derive(SerializeRow)] -struct TxoUnstakedInsertParams { - /// Transactions hash. - txn_hash: Vec, - /// Transaction Output Offset inside the transaction. - txo: i16, - /// Block Slot Number - slot_no: num_bigint::BigInt, - /// Transaction Offset inside the block. - txn: i16, - /// Actual full TXO Address - address: String, - /// Actual TXO Value in lovelace - value: num_bigint::BigInt, -} - -impl TxoUnstakedInsertParams { - /// Create a new record for this transaction. - pub(crate) fn new( - txn_hash: &[u8], txo: i16, slot_no: u64, txn: i16, address: &str, value: u64, - ) -> Self { - Self { - txn_hash: txn_hash.to_vec(), - txo, - slot_no: slot_no.into(), - txn, - address: address.to_string(), - value: value.into(), - } - } - - /// Prepare Batch of Staked Insert TXO Asset Index Data Queries - async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, - ) -> anyhow::Result { - let txo_insert_queries = PreparedQueries::prepare_batch( - session.clone(), - INSERT_UNSTAKED_TXO_QUERY, - cfg, - scylla::statement::Consistency::Any, - true, - false, - ) - .await; - - if let Err(ref error) = txo_insert_queries { - error!(error=%error,"Failed to prepare Insert TXO Asset Query."); - }; - - txo_insert_queries - } -} - -/// TXO Asset by Stake Address Indexing Query -const INSERT_TXO_ASSET_QUERY: &str = include_str!("./queries/insert_txo_asset.cql"); - -/// Insert TXO Asset Query Parameters -/// (Superset of data to support both Staked and Unstaked TXO records.) -#[derive(SerializeRow)] -struct TxoAssetInsertParams { - /// Stake Address - Binary 28 bytes. 0 bytes = not staked. - stake_address: Vec, - /// Block Slot Number - slot_no: num_bigint::BigInt, - /// Transaction Offset inside the block. - txn: i16, - /// Transaction Output Offset inside the transaction. - txo: i16, - /// Policy hash of the asset - policy_id: Vec, - /// Policy name of the asset - policy_name: String, - /// Value of the asset - value: num_bigint::BigInt, -} - -impl TxoAssetInsertParams { - /// Create a new record for this transaction. - /// - /// Note Value can be either a u64 or an i64, so use a i128 to represent all possible - /// values. - #[allow(clippy::too_many_arguments)] - pub(crate) fn new( - stake_address: &[u8], slot_no: u64, txn: i16, txo: i16, policy_id: &[u8], - policy_name: &str, value: i128, - ) -> Self { - Self { - stake_address: stake_address.to_vec(), - slot_no: slot_no.into(), - txn, - txo, - policy_id: policy_id.to_vec(), - policy_name: policy_name.to_owned(), - value: value.into(), - } - } - - /// Prepare Batch of Staked Insert TXO Asset Index Data Queries - async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, - ) -> anyhow::Result { - let txo_insert_queries = PreparedQueries::prepare_batch( - session.clone(), - INSERT_TXO_ASSET_QUERY, - cfg, - scylla::statement::Consistency::Any, - true, - false, - ) - .await; - - if let Err(ref error) = txo_insert_queries { - error!(error=%error,"Failed to prepare Insert TXO Asset Query."); - }; - - txo_insert_queries - } -} - -/// Unstaked TXO Asset by Stake Address Indexing Query -const INSERT_UNSTAKED_TXO_ASSET_QUERY: &str = - include_str!("./queries/insert_unstaked_txo_asset.cql"); - -/// Insert TXO Asset Query Parameters -/// (Superset of data to support both Staked and Unstaked TXO records.) -#[derive(SerializeRow)] -struct TxoUnstakedAssetInsertParams { - /// Transactions hash. - txn_hash: Vec, - /// Transaction Output Offset inside the transaction. - txo: i16, - /// Policy hash of the asset - policy_id: Vec, - /// Policy name of the asset - policy_name: String, - /// Block Slot Number - slot_no: num_bigint::BigInt, - /// Transaction Offset inside the block. - txn: i16, - /// Value of the asset - value: num_bigint::BigInt, -} - -impl TxoUnstakedAssetInsertParams { - /// Create a new record for this transaction. - /// - /// Note Value can be either a u64 or an i64, so use a i128 to represent all possible - /// values. - #[allow(clippy::too_many_arguments)] - pub(crate) fn new( - txn_hash: &[u8], txo: i16, policy_id: &[u8], policy_name: &str, slot_no: u64, txn: i16, - value: i128, - ) -> Self { - Self { - txn_hash: txn_hash.to_vec(), - txo, - policy_id: policy_id.to_vec(), - policy_name: policy_name.to_owned(), - slot_no: slot_no.into(), - txn, - value: value.into(), - } - } - - /// Prepare Batch of Staked Insert TXO Asset Index Data Queries - async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, - ) -> anyhow::Result { - let txo_insert_queries = PreparedQueries::prepare_batch( - session.clone(), - INSERT_UNSTAKED_TXO_ASSET_QUERY, - cfg, - scylla::statement::Consistency::Any, - true, - false, - ) - .await; - - if let Err(ref error) = txo_insert_queries { - error!(error=%error,"Failed to prepare Insert Unstaked TXO Asset Query."); - }; - - txo_insert_queries - } -} - /// Insert TXO Query and Parameters /// /// There are multiple possible parameters to a query, which are represented separately. #[allow(dead_code)] pub(crate) struct TxoInsertQuery { /// Staked TXO Data Parameters - staked_txo: Vec, + staked_txo: Vec, /// Unstaked TXO Data Parameters - unstaked_txo: Vec, + unstaked_txo: Vec, /// Staked TXO Asset Data Parameters - staked_txo_asset: Vec, + staked_txo_asset: Vec, /// Unstaked TXO Asset Data Parameters - unstaked_txo_asset: Vec, + unstaked_txo_asset: Vec, } impl TxoInsertQuery { @@ -300,11 +54,13 @@ impl TxoInsertQuery { pub(crate) async fn prepare_batch( session: &Arc, cfg: &CassandraEnvVars, ) -> anyhow::Result<(SizedBatch, SizedBatch, SizedBatch, SizedBatch)> { - let txo_staked_insert_batch = TxoInsertParams::prepare_batch(session, cfg).await; - let txo_unstaked_insert_batch = TxoUnstakedInsertParams::prepare_batch(session, cfg).await; - let txo_staked_asset_insert_batch = TxoAssetInsertParams::prepare_batch(session, cfg).await; + let txo_staked_insert_batch = insert_txo::Params::prepare_batch(session, cfg).await; + let txo_unstaked_insert_batch = + insert_unstaked_txo::Params::prepare_batch(session, cfg).await; + let txo_staked_asset_insert_batch = + insert_txo_asset::Params::prepare_batch(session, cfg).await; let txo_unstaked_asset_insert_batch = - TxoUnstakedAssetInsertParams::prepare_batch(session, cfg).await; + insert_unstaked_txo_asset::Params::prepare_batch(session, cfg).await; Ok(( txo_staked_insert_batch?, @@ -394,10 +150,10 @@ impl TxoInsertQuery { }; let staked = stake_address != NO_STAKE_ADDRESS; - let txo_index = usize_to_i16(txo_index); + let txo_index = i16_from_saturating(txo_index); if staked { - let params = TxoInsertParams::new( + let params = insert_txo::Params::new( &stake_address, slot_no, txn, @@ -409,7 +165,7 @@ impl TxoInsertQuery { self.staked_txo.push(params); } else { - let params = TxoUnstakedInsertParams::new( + let params = insert_unstaked_txo::Params::new( txn_hash, txo_index, slot_no, @@ -429,7 +185,7 @@ impl TxoInsertQuery { let value = policy_asset.any_coin(); if staked { - let params = TxoAssetInsertParams::new( + let params = insert_txo_asset::Params::new( &stake_address, slot_no, txn, @@ -440,7 +196,7 @@ impl TxoInsertQuery { ); self.staked_txo_asset.push(params); } else { - let params = TxoUnstakedAssetInsertParams::new( + let params = insert_unstaked_txo_asset::Params::new( txn_hash, txo_index, &policy_id, diff --git a/catalyst-gateway/bin/src/db/index/mod.rs b/catalyst-gateway/bin/src/db/index/mod.rs index 15e04cd1388..f4157be8550 100644 --- a/catalyst-gateway/bin/src/db/index/mod.rs +++ b/catalyst-gateway/bin/src/db/index/mod.rs @@ -1,10 +1,6 @@ //! Blockchain Index Database pub(crate) mod block; -pub(crate) mod index_certs; -pub(crate) mod index_txi; -pub(crate) mod index_txo; pub(crate) mod queries; pub(crate) mod schema; pub(crate) mod session; -pub(crate) mod staked_ada; diff --git a/catalyst-gateway/bin/src/db/index/queries/get_txi_by_txn_hashes.cql b/catalyst-gateway/bin/src/db/index/queries/cql/get_txi_by_txn_hashes.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/queries/get_txi_by_txn_hashes.cql rename to catalyst-gateway/bin/src/db/index/queries/cql/get_txi_by_txn_hashes.cql diff --git a/catalyst-gateway/bin/src/db/index/queries/get_txo_by_stake_address.cql b/catalyst-gateway/bin/src/db/index/queries/cql/get_txo_by_stake_address.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/queries/get_txo_by_stake_address.cql rename to catalyst-gateway/bin/src/db/index/queries/cql/get_txo_by_stake_address.cql diff --git a/catalyst-gateway/bin/src/db/index/queries/update_txo_spent.cql b/catalyst-gateway/bin/src/db/index/queries/cql/update_txo_spent.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/queries/update_txo_spent.cql rename to catalyst-gateway/bin/src/db/index/queries/cql/update_txo_spent.cql diff --git a/catalyst-gateway/bin/src/db/index/queries.rs b/catalyst-gateway/bin/src/db/index/queries/mod.rs similarity index 82% rename from catalyst-gateway/bin/src/db/index/queries.rs rename to catalyst-gateway/bin/src/db/index/queries/mod.rs index 374f71645d5..89129f8d84c 100644 --- a/catalyst-gateway/bin/src/db/index/queries.rs +++ b/catalyst-gateway/bin/src/db/index/queries/mod.rs @@ -2,6 +2,8 @@ //! //! This improves query execution time. +pub(crate) mod staked_ada; + use std::sync::Arc; use anyhow::bail; @@ -10,15 +12,13 @@ use scylla::{ batch::Batch, prepared_statement::PreparedStatement, serialize::row::SerializeRow, transport::iterator::RowIterator, QueryResult, Session, }; +use staked_ada::{ + get_txi_by_txn_hash::GetTxiByTxnHashesQuery, + get_txo_by_stake_address::GetTxoByStakeAddressQuery, update_txo_spent::UpdateTxoSpentQuery, +}; -use super::{ - index_certs::CertInsertQuery, - index_txi::TxiInsertQuery, - index_txo::TxoInsertQuery, - staked_ada::{ - get_txi_by_txn_hash::GetTxiByTxnHashesQuery, - get_txo_by_stake_address::GetTxoByStakeAddressQuery, UpdateTxoSpentQuery, - }, +use super::block::{ + certs::CertInsertQuery, cip36::Cip36InsertQuery, txi::TxiInsertQuery, txo::TxoInsertQuery, }; use crate::settings::{CassandraEnvVars, CASSANDRA_MIN_BATCH_SIZE}; @@ -40,6 +40,12 @@ pub(crate) enum PreparedQuery { TxiInsertQuery, /// Stake Registration Insert query. StakeRegistrationInsertQuery, + /// CIP 36 Registration Insert Query. + Cip36RegistrationInsertQuery, + /// CIP 36 Registration Error Insert query. + Cip36RegistrationInsertErrorQuery, + /// CIP 36 Registration for stake address Insert query. + Cip36RegistrationForStakeAddrInsertQuery, /// TXO spent Update query. TxoSpentUpdateQuery, } @@ -67,6 +73,12 @@ pub(crate) struct PreparedQueries { txi_insert_queries: SizedBatch, /// TXI Insert query. stake_registration_insert_queries: SizedBatch, + /// CIP36 Registrations. + cip36_registration_insert_queries: SizedBatch, + /// CIP36 Registration errors. + cip36_registration_error_insert_queries: SizedBatch, + /// CIP36 Registration for Stake Address Insert query. + cip36_registration_for_stake_address_insert_queries: SizedBatch, /// Update TXO spent query. txo_spent_update_queries: SizedBatch, /// Get TXO by stake address query. @@ -90,6 +102,7 @@ impl PreparedQueries { let txi_insert_queries = TxiInsertQuery::prepare_batch(&session, cfg).await; let all_txo_queries = TxoInsertQuery::prepare_batch(&session, cfg).await; let stake_registration_insert_queries = CertInsertQuery::prepare_batch(&session, cfg).await; + let all_cip36_queries = Cip36InsertQuery::prepare_batch(&session, cfg).await; let txo_spent_update_queries = UpdateTxoSpentQuery::prepare_batch(session.clone(), cfg).await; let txo_by_stake_address_query = GetTxoByStakeAddressQuery::prepare(session.clone()).await; @@ -102,6 +115,12 @@ impl PreparedQueries { unstaked_txo_asset_insert_queries, ) = all_txo_queries?; + let ( + cip36_registration_insert_queries, + cip36_registration_error_insert_queries, + cip36_registration_for_stake_address_insert_queries, + ) = all_cip36_queries?; + Ok(Self { txo_insert_queries, txo_asset_insert_queries, @@ -109,6 +128,9 @@ impl PreparedQueries { unstaked_txo_asset_insert_queries, txi_insert_queries: txi_insert_queries?, stake_registration_insert_queries: stake_registration_insert_queries?, + cip36_registration_insert_queries, + cip36_registration_error_insert_queries, + cip36_registration_for_stake_address_insert_queries, txo_spent_update_queries: txo_spent_update_queries?, txo_by_stake_address_query: txo_by_stake_address_query?, txi_by_txn_hash_query: txi_by_txn_hash_query?, @@ -195,6 +217,13 @@ impl PreparedQueries { PreparedQuery::UnstakedTxoAssetInsertQuery => &self.unstaked_txo_asset_insert_queries, PreparedQuery::TxiInsertQuery => &self.txi_insert_queries, PreparedQuery::StakeRegistrationInsertQuery => &self.stake_registration_insert_queries, + PreparedQuery::Cip36RegistrationInsertQuery => &self.cip36_registration_insert_queries, + PreparedQuery::Cip36RegistrationInsertErrorQuery => { + &self.cip36_registration_error_insert_queries + }, + PreparedQuery::Cip36RegistrationForStakeAddrInsertQuery => { + &self.cip36_registration_for_stake_address_insert_queries + }, PreparedQuery::TxoSpentUpdateQuery => &self.txo_spent_update_queries, }; diff --git a/catalyst-gateway/bin/src/db/index/staked_ada/get_txi_by_txn_hash.rs b/catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txi_by_txn_hash.rs similarity index 96% rename from catalyst-gateway/bin/src/db/index/staked_ada/get_txi_by_txn_hash.rs rename to catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txi_by_txn_hash.rs index e1589ec6b2e..7c3cc6af048 100644 --- a/catalyst-gateway/bin/src/db/index/staked_ada/get_txi_by_txn_hash.rs +++ b/catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txi_by_txn_hash.rs @@ -14,7 +14,7 @@ use crate::db::index::{ }; /// Get TXI query string. -const GET_TXI_BY_TXN_HASHES_QUERY: &str = include_str!("../queries/get_txi_by_txn_hashes.cql"); +const GET_TXI_BY_TXN_HASHES_QUERY: &str = include_str!("../cql/get_txi_by_txn_hashes.cql"); /// Get TXI query parameters. #[derive(SerializeRow)] diff --git a/catalyst-gateway/bin/src/db/index/staked_ada/get_txo_by_stake_address.rs b/catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txo_by_stake_address.rs similarity index 96% rename from catalyst-gateway/bin/src/db/index/staked_ada/get_txo_by_stake_address.rs rename to catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txo_by_stake_address.rs index 861a1e2ce46..2beee7e6467 100644 --- a/catalyst-gateway/bin/src/db/index/staked_ada/get_txo_by_stake_address.rs +++ b/catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txo_by_stake_address.rs @@ -13,8 +13,7 @@ use crate::db::index::{ }; /// Get txo by stake address query string. -const GET_TXO_BY_STAKE_ADDRESS_QUERY: &str = - include_str!("../queries/get_txo_by_stake_address.cql"); +const GET_TXO_BY_STAKE_ADDRESS_QUERY: &str = include_str!("../cql/get_txo_by_stake_address.cql"); /// Get txo by stake address query parameters. #[derive(SerializeRow)] diff --git a/catalyst-gateway/bin/src/db/index/queries/staked_ada/mod.rs b/catalyst-gateway/bin/src/db/index/queries/staked_ada/mod.rs new file mode 100644 index 00000000000..e7114f1d68a --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/staked_ada/mod.rs @@ -0,0 +1,4 @@ +//! Staked ADA related queries. +pub(crate) mod get_txi_by_txn_hash; +pub(crate) mod get_txo_by_stake_address; +pub(crate) mod update_txo_spent; diff --git a/catalyst-gateway/bin/src/db/index/staked_ada/mod.rs b/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs similarity index 81% rename from catalyst-gateway/bin/src/db/index/staked_ada/mod.rs rename to catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs index ee682a77fa3..21658d74e29 100644 --- a/catalyst-gateway/bin/src/db/index/staked_ada/mod.rs +++ b/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs @@ -1,20 +1,20 @@ -//! Staked ADA related queries. +//! Update the TXO Spent column to optimize future queries. + use std::sync::Arc; use scylla::{SerializeRow, Session}; use tracing::error; -use super::{ - queries::{FallibleQueryResults, PreparedQueries, PreparedQuery, SizedBatch}, - session::CassandraSession, +use crate::{ + db::index::{ + queries::{FallibleQueryResults, PreparedQueries, PreparedQuery, SizedBatch}, + session::CassandraSession, + }, + settings::CassandraEnvVars, }; -use crate::settings::CassandraEnvVars; - -pub(crate) mod get_txi_by_txn_hash; -pub(crate) mod get_txo_by_stake_address; /// Update TXO spent query string. -const UPDATE_TXO_SPENT_QUERY: &str = include_str!("../queries/update_txo_spent.cql"); +const UPDATE_TXO_SPENT_QUERY: &str = include_str!("../cql/update_txo_spent.cql"); /// Update TXO spent query params. #[derive(SerializeRow)] diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql new file mode 100644 index 00000000000..17c6886e3b7 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql @@ -0,0 +1,18 @@ +-- Index of CIP-36 registrations. Valid. +CREATE TABLE IF NOT EXISTS cip36_registration ( + -- Primary Key Data + stake_address blob, -- 32 Bytes of Stake Address. + nonce varint, -- Nonce that has been slot corrected. + slot_no varint, -- slot number when the key_was_registered/re-registered. + txn smallint, -- Index of the TX which holds the registration data. + + -- Non-Key Data + vote_key blob, -- 32 Bytes of Vote Key. + payment_address blob, -- Bytes of address for payment of rewards. + is_payable boolean, -- True if payment to the address is possible. + raw_nonce varint, -- Nonce that has not been slot corrected. + cip36 boolean, -- True if the registration is CIP-36 format, Cip-15=False. + + PRIMARY KEY (stake_address, nonce, slot_no, txn) +) +WITH CLUSTERING ORDER BY (nonce, DESC, slot_no DESC, txn DESC); diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql new file mode 100644 index 00000000000..3ab03c8f1ef --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql @@ -0,0 +1,14 @@ +-- Index of CIP-36 registrations searchable by Stake Address. +-- Full registration data needs to be queried from the man cip36 registration tables. +-- Includes both Valid and Invalid registrations. +CREATE TABLE IF NOT EXISTS cip36_registration_for_stake_addr ( + -- Primary Key Data + vote_key blob, -- 32 Bytes of Vote Key. + stake_address blob, -- 32 Bytes of Stake Address. + slot_no varint, -- slot number when the key_was_registered/re-registered. + txn smallint, -- Index of the TX which holds the registration data. + valid boolean, -- True if the registration is valid. + + PRIMARY KEY ((vote_key, stake_address), slot_no, txn, valid) +) +WITH CLUSTERING ORDER BY (slot_no DESC, txn DESC); diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql new file mode 100644 index 00000000000..e72eaf304ea --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql @@ -0,0 +1,20 @@ +-- Index of CIP-36 registrations that are invalid. +CREATE TABLE IF NOT EXISTS cip36_registration_invalid ( + -- Primary Key Data + stake_address blob, -- 32 Bytes of Stake Address. + slot_no varint, -- slot number when the key_was_registered/re-registered. + txn smallint, -- Index of the TX which holds the registration data. + + -- Non-Key Data + vote_key blob, -- 32 Bytes of Vote Key. + payment_address blob, -- Bytes of address for payment of rewards. + is_payable boolean, -- True if payment to the address is possible. + raw_nonce varint, -- Nonce that has not been slot corrected. + nonce varint, -- Nonce that has been slot corrected. + cip36 boolean, -- True if CIP-36 Registration format used. CIP-15 = False. + signed boolean, -- Signature validates. + error_report list, -- List of serialization errors in the registration. + + PRIMARY KEY (vote_key, slot_no, txn) +) +WITH CLUSTERING ORDER BY (slot_no DESC, txn DESC); diff --git a/catalyst-gateway/bin/src/db/index/schema/namespace.cql b/catalyst-gateway/bin/src/db/index/schema/cql/namespace.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/schema/namespace.cql rename to catalyst-gateway/bin/src/db/index/schema/cql/namespace.cql diff --git a/catalyst-gateway/bin/src/db/index/schema/stake_registration.cql b/catalyst-gateway/bin/src/db/index/schema/cql/stake_registration.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/schema/stake_registration.cql rename to catalyst-gateway/bin/src/db/index/schema/cql/stake_registration.cql diff --git a/catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql b/catalyst-gateway/bin/src/db/index/schema/cql/txi_by_txn_hash_table.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/schema/txi_by_txn_hash_table.cql rename to catalyst-gateway/bin/src/db/index/schema/cql/txi_by_txn_hash_table.cql diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/cql/txo_assets_by_stake_table.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/schema/txo_assets_by_stake_table.cql rename to catalyst-gateway/bin/src/db/index/schema/cql/txo_assets_by_stake_table.cql diff --git a/catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/cql/txo_by_stake_table.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/schema/txo_by_stake_table.cql rename to catalyst-gateway/bin/src/db/index/schema/cql/txo_by_stake_table.cql diff --git a/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql b/catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_assets_by_txn_hash.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/schema/unstaked_txo_assets_by_txn_hash.cql rename to catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_assets_by_txn_hash.cql diff --git a/catalyst-gateway/bin/src/db/index/schema/unstaked_txo_by_txn_hash.cql b/catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_by_txn_hash.cql similarity index 100% rename from catalyst-gateway/bin/src/db/index/schema/unstaked_txo_by_txn_hash.cql rename to catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_by_txn_hash.cql diff --git a/catalyst-gateway/bin/src/db/index/schema.rs b/catalyst-gateway/bin/src/db/index/schema/mod.rs similarity index 75% rename from catalyst-gateway/bin/src/db/index/schema.rs rename to catalyst-gateway/bin/src/db/index/schema/mod.rs index 01980d608b3..4bfd4725db9 100644 --- a/catalyst-gateway/bin/src/db/index/schema.rs +++ b/catalyst-gateway/bin/src/db/index/schema/mod.rs @@ -11,7 +11,7 @@ use tracing::error; use crate::settings::CassandraEnvVars; /// Keyspace Create (Templated) -const CREATE_NAMESPACE_CQL: &str = include_str!("./schema/namespace.cql"); +const CREATE_NAMESPACE_CQL: &str = include_str!("./cql/namespace.cql"); /// The version of the Schema we are using. /// Must be incremented if there is a breaking change in any schema tables below. @@ -21,34 +21,49 @@ pub(crate) const SCHEMA_VERSION: u64 = 1; const SCHEMAS: &[(&str, &str)] = &[ ( // TXO by Stake Address Table Schema - include_str!("./schema/txo_by_stake_table.cql"), + include_str!("./cql/txo_by_stake_table.cql"), "Create Table TXO By Stake Address", ), ( // TXO Assets by Stake Address Table Schema - include_str!("./schema/txo_assets_by_stake_table.cql"), + include_str!("./cql/txo_assets_by_stake_table.cql"), "Create Table TXO Assets By Stake Address", ), ( // TXO Unstaked Table Schema - include_str!("./schema/unstaked_txo_by_txn_hash.cql"), + include_str!("./cql/unstaked_txo_by_txn_hash.cql"), "Create Table Unstaked TXO By Txn Hash", ), ( // TXO Unstaked Assets Table Schema - include_str!("./schema/unstaked_txo_assets_by_txn_hash.cql"), + include_str!("./cql/unstaked_txo_assets_by_txn_hash.cql"), "Create Table Unstaked TXO Assets By Txn Hash", ), ( // TXI by Stake Address Table Schema - include_str!("./schema/txi_by_txn_hash_table.cql"), + include_str!("./cql/txi_by_txn_hash_table.cql"), "Create Table TXI By Stake Address", ), ( // Stake Address/Registration Table Schema - include_str!("./schema/stake_registration.cql"), + include_str!("./cql/stake_registration.cql"), "Create Table Stake Registration", ), + ( + // CIP-36 Registration Table Schema + include_str!("./cql/cip36_registration.cql"), + "Create Table CIP-36 Registration", + ), + ( + // CIP-36 Registration Table Schema + include_str!("./cql/cip36_registration_invalid.cql"), + "Create Table CIP-36 Registration Invalid", + ), + ( + // CIP-36 Registration Table Schema + include_str!("./cql/cip36_registration_for_vote_key.cql"), + "Create Table CIP-36 Registration For a stake address", + ), ]; /// Get the namespace for a particular db configuration diff --git a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs index 7d974dbdee9..5ad8f1fef27 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs @@ -8,14 +8,14 @@ use poem_openapi::{payload::Json, ApiResponse}; use super::types::SlotNumber; use crate::{ db::index::{ - session::CassandraSession, - staked_ada::{ + queries::staked_ada::{ get_txi_by_txn_hash::{GetTxiByTxnHashesQuery, GetTxiByTxnHashesQueryParams}, get_txo_by_stake_address::{ GetTxoByStakeAddressQuery, GetTxoByStakeAddressQueryParams, }, - UpdateTxoSpentQuery, UpdateTxoSpentQueryParams, + update_txo_spent::{UpdateTxoSpentQuery, UpdateTxoSpentQueryParams}, }, + session::CassandraSession, }, service::common::{ objects::cardano::{ From 092fa4b8dac87f5545740d5a9f12aa21c9e7e257 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 18 Sep 2024 19:15:01 +0700 Subject: [PATCH 57/69] docs(general): Cleanup project dictionary --- .config/dictionaries/project.dic | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index 288e074d8b0..9b2de4028e3 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -182,8 +182,8 @@ plpgsql podfile podhelper postcss -Precache Pozhylenkov +Precache Precertificate preprod projectcatalyst @@ -289,11 +289,3 @@ xctestrun xcworkspace xvfb yoroi -vsync -damian-molinski -LTRB -hotspots -precache -Precache -svgs -Dreps \ No newline at end of file From d2c65ba00dc329d11b067d583195b55b838d3092 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 18 Sep 2024 19:19:03 +0700 Subject: [PATCH 58/69] docs(spelling): Fix spelling --- .config/dictionaries/project.dic | 1 + 1 file changed, 1 insertion(+) diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index 9b2de4028e3..2a20967c22f 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -191,6 +191,7 @@ Prokhorenko psql Ptarget pubkey +PUBLICKEY pubspec pytest qrcode From dbd6f57afaf623855f3838375bfff98f20e20419 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 18 Sep 2024 20:55:01 +0700 Subject: [PATCH 59/69] fix(backend): remove obsolete clippy lint cfg --- catalyst-gateway/bin/src/db/index/block/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/catalyst-gateway/bin/src/db/index/block/mod.rs b/catalyst-gateway/bin/src/db/index/block/mod.rs index e2c010c5ed8..dd586ed6f81 100644 --- a/catalyst-gateway/bin/src/db/index/block/mod.rs +++ b/catalyst-gateway/bin/src/db/index/block/mod.rs @@ -17,7 +17,6 @@ use super::{queries::FallibleQueryTasks, session::CassandraSession}; use crate::service::utilities::convert::i16_from_saturating; /// Add all data needed from the block into the indexes. -#[allow(clippy::similar_names)] pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { // Get the session. This should never fail. let Some(session) = CassandraSession::get(block.immutable()) else { From 8eee5906e232c0594a6d29f96b86511d0a908870 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 18 Sep 2024 21:05:33 +0700 Subject: [PATCH 60/69] docs(backend): Improve field documentation so its not ambiguous. --- catalyst-gateway/bin/src/cardano/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index 74002dad48e..08e1c9015b7 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -50,9 +50,9 @@ struct SyncParams { first_indexed_block: Option, /// The last block we successfully synced. last_indexed_block: Option, - /// The number of blocks we successfully synced. + /// The number of blocks we successfully synced overall. total_blocks_synced: u64, - /// The number of blocks we successfully synced. + /// The number of blocks we successfully synced, in the last attempt. last_blocks_synced: u64, /// The number of retries so far on this sync task. retries: u64, From 1246b78595de0f51b51d401d1d18978af02f0802 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Wed, 18 Sep 2024 21:09:04 +0700 Subject: [PATCH 61/69] docs(backend): Fix comment --- catalyst-gateway/bin/src/db/event/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/catalyst-gateway/bin/src/db/event/mod.rs b/catalyst-gateway/bin/src/db/event/mod.rs index 3efd6aa6983..3a8fb81f91e 100644 --- a/catalyst-gateway/bin/src/db/event/mod.rs +++ b/catalyst-gateway/bin/src/db/event/mod.rs @@ -52,7 +52,7 @@ impl EventDB { /// /// # Arguments /// - /// * `deep_query` - `DeepQueryInspection` setting. + /// * `enable` - Set the `DeepQueryInspection` setting to this value. pub(crate) fn modify_deep_query(enable: bool) { DEEP_QUERY_INSPECT.store(enable, Ordering::SeqCst); } From 3f069b3c6d0c7a57a3575a0a13121d1838a17632 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 19 Sep 2024 11:16:32 +0700 Subject: [PATCH 62/69] docs(backend): Improve comment --- catalyst-gateway/bin/src/db/event/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/catalyst-gateway/bin/src/db/event/mod.rs b/catalyst-gateway/bin/src/db/event/mod.rs index 3a8fb81f91e..39e5ed3b869 100644 --- a/catalyst-gateway/bin/src/db/event/mod.rs +++ b/catalyst-gateway/bin/src/db/event/mod.rs @@ -19,7 +19,7 @@ pub(crate) mod legacy; pub(crate) mod schema_check; /// Database version this crate matches. -/// Must equal the last Migrations Version Number. +/// Must equal the last Migrations Version Number from `event-db/migrations`. pub(crate) const DATABASE_SCHEMA_VERSION: i32 = 9; /// Postgres Connection Manager DB Pool From 8be3334400909f261c2af84f41e02fc665b73910 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 19 Sep 2024 11:27:10 +0700 Subject: [PATCH 63/69] fix(backend): Vote Key index logic, and update comments --- .../src/db/index/block/cip36/insert_cip36.rs | 2 +- .../block/cip36/insert_cip36_for_vote_key.rs | 16 +++----- .../index/block/cip36/insert_cip36_invalid.rs | 4 +- .../bin/src/db/index/block/cip36/mod.rs | 37 +++++++------------ 4 files changed, 22 insertions(+), 37 deletions(-) diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs index dbbb4061819..d346124998d 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs @@ -17,7 +17,7 @@ const INSERT_CIP36_REGISTRATION_QUERY: &str = include_str!("./cql/insert_cip36.c /// Insert CIP-36 Registration Query Parameters #[derive(SerializeRow, Clone)] pub(super) struct Params { - /// Stake key hash + /// Full Stake Address (not hashed, 32 byte ED25519 Public key). stake_address: Vec, /// Nonce value after normalization. nonce: num_bigint::BigInt, diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs index bf964f01487..67a892d4f86 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs @@ -18,10 +18,10 @@ const INSERT_CIP36_REGISTRATION_FOR_VOTE_KEY_QUERY: &str = /// Insert CIP-36 Registration Invalid Query Parameters #[derive(SerializeRow, Clone)] pub(super) struct Params { - /// Stake key hash - stake_address: Vec, - /// Stake key hash + /// Voting Public Key vote_key: Vec, + /// Full Stake Address (not hashed, 32 byte ED25519 Public key). + stake_address: Vec, /// Slot Number the cert is in. slot_no: num_bigint::BigInt, /// Transaction Index. @@ -33,20 +33,14 @@ pub(super) struct Params { impl Params { /// Create a new Insert Query. pub fn new( - vote_key: Option<&VotingPubKey>, slot_no: u64, txn: i16, cip36: &Cip36, valid: bool, + vote_key: &VotingPubKey, slot_no: u64, txn: i16, cip36: &Cip36, valid: bool, ) -> Self { - let vote_key = if let Some(vote_key) = vote_key { - vote_key.voting_pk.to_bytes().to_vec() - } else { - Vec::new() - }; - Params { + vote_key: vote_key.voting_pk.to_bytes().to_vec(), stake_address: cip36 .stake_pk .map(|s| s.to_bytes().to_vec()) .unwrap_or_default(), - vote_key, slot_no: slot_no.into(), txn, valid, diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs index 35d24bdf3b1..0ee5a4e5b19 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs @@ -18,13 +18,13 @@ const INSERT_CIP36_REGISTRATION_INVALID_QUERY: &str = /// Insert CIP-36 Registration Invalid Query Parameters #[derive(SerializeRow, Clone)] pub(super) struct Params { - /// Stake key hash + /// Full Stake Address (not hashed, 32 byte ED25519 Public key). stake_address: Vec, /// Slot Number the cert is in. slot_no: num_bigint::BigInt, /// Transaction Index. txn: i16, - /// Stake key hash + /// Voting Public Key vote_key: Vec, /// Full Payment Address (not hashed, 32 byte ED25519 Public key). payment_address: Vec, diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs b/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs index a713ba756b9..aa7efe29b8a 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs @@ -24,7 +24,7 @@ pub(crate) struct Cip36InsertQuery { /// Stake Registration Data captured during indexing. invalid: Vec, /// Stake Registration Data captured during indexing. - for_stake: Vec, + for_vote_key: Vec, } impl Cip36InsertQuery { @@ -33,7 +33,7 @@ impl Cip36InsertQuery { Cip36InsertQuery { registrations: Vec::new(), invalid: Vec::new(), - for_stake: Vec::new(), + for_vote_key: Vec::new(), } } @@ -44,13 +44,13 @@ impl Cip36InsertQuery { let insert_cip36_batch = insert_cip36::Params::prepare_batch(session, cfg).await; let insert_cip36_invalid_batch = insert_cip36_invalid::Params::prepare_batch(session, cfg).await; - let insert_cip36_for_stake_addr_batch = + let insert_cip36_for_vote_key_addr_batch = insert_cip36_for_vote_key::Params::prepare_batch(session, cfg).await; Ok(( insert_cip36_batch?, insert_cip36_invalid_batch?, - insert_cip36_for_stake_addr_batch?, + insert_cip36_for_vote_key_addr_batch?, )) } @@ -70,13 +70,10 @@ impl Cip36InsertQuery { self.registrations.push(insert_cip36::Params::new( vote_key, slot_no, txn_index, cip36, )); - self.for_stake.push(insert_cip36_for_vote_key::Params::new( - Some(vote_key), - slot_no, - txn_index, - cip36, - true, - )); + self.for_vote_key + .push(insert_cip36_for_vote_key::Params::new( + vote_key, slot_no, txn_index, cip36, true, + )); } } else { if cip36.voting_keys.is_empty() { @@ -87,9 +84,6 @@ impl Cip36InsertQuery { cip36, decoded_metadata.report.clone(), )); - self.for_stake.push(insert_cip36_for_vote_key::Params::new( - None, slot_no, txn_index, cip36, false, - )); } for vote_key in &cip36.voting_keys { self.invalid.push(insert_cip36_invalid::Params::new( @@ -99,13 +93,10 @@ impl Cip36InsertQuery { cip36, decoded_metadata.report.clone(), )); - self.for_stake.push(insert_cip36_for_vote_key::Params::new( - Some(vote_key), - slot_no, - txn_index, - cip36, - false, - )); + self.for_vote_key + .push(insert_cip36_for_vote_key::Params::new( + vote_key, slot_no, txn_index, cip36, false, + )); } } } @@ -142,13 +133,13 @@ impl Cip36InsertQuery { })); } - if !self.for_stake.is_empty() { + if !self.for_vote_key.is_empty() { let inner_session = session.clone(); query_handles.push(tokio::spawn(async move { inner_session .execute_batch( PreparedQuery::Cip36RegistrationForStakeAddrInsertQuery, - self.for_stake, + self.for_vote_key, ) .await })); From 82676d1ee8dec212fce5705e9112ef7a97a2d4a9 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 19 Sep 2024 16:12:59 +0700 Subject: [PATCH 64/69] fix(backend): Earthfile needs to be executed from root of repo, to properly pick up secrets --- catalyst-gateway/Justfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/catalyst-gateway/Justfile b/catalyst-gateway/Justfile index e742d379102..87c2cc7be7b 100644 --- a/catalyst-gateway/Justfile +++ b/catalyst-gateway/Justfile @@ -27,14 +27,14 @@ code-lint: # Synchronize Rust Configs sync-cfg: - earthly +sync-cfg + cd .. && earthly ./catalyst-gateway+sync-cfg # Pre Push Checks pre-push: sync-cfg code-format code-lint license-check # Make sure we can actually build inside Earthly which needs to happen in CI. - earthly +check - earthly +build - earthly +package-cat-gateway + cd .. && earthly ./catalyst-gateway+check + cd .. && earthly ./catalyst-gateway+build + cd .. && earthly ./catalyst-gateway+package-cat-gateway # Build Local release build of catalyst gateway build-cat-gateway: code-format code-lint From 3e8c76fd93f1867ff5516a5628854857627f060b Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 19 Sep 2024 16:14:08 +0700 Subject: [PATCH 65/69] fix(backend): make generic saturating value converter and use it instead of type specific ones --- .../bin/src/db/index/block/certs.rs | 4 +- .../bin/src/db/index/block/mod.rs | 4 +- .../bin/src/db/index/block/txo/mod.rs | 4 +- .../bin/src/service/utilities/convert.rs | 114 ++++++------------ 4 files changed, 40 insertions(+), 86 deletions(-) diff --git a/catalyst-gateway/bin/src/db/index/block/certs.rs b/catalyst-gateway/bin/src/db/index/block/certs.rs index 3c7ec9bcca9..7aca9450ec6 100644 --- a/catalyst-gateway/bin/src/db/index/block/certs.rs +++ b/catalyst-gateway/bin/src/db/index/block/certs.rs @@ -12,7 +12,7 @@ use crate::{ queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, session::CassandraSession, }, - service::utilities::convert::u16_from_saturating, + service::utilities::convert::from_saturating, settings::CassandraEnvVars, }; @@ -130,7 +130,7 @@ impl CertInsertQuery { let (key_hash, pubkey, script) = match cred { pallas::ledger::primitives::conway::StakeCredential::AddrKeyhash(cred) => { let addr = block - .witness_for_tx(cred, u16_from_saturating(txn)) + .witness_for_tx(cred, from_saturating(txn)) .unwrap_or(default_addr); // Note: it is totally possible for the Registration Certificate to not be // witnessed. diff --git a/catalyst-gateway/bin/src/db/index/block/mod.rs b/catalyst-gateway/bin/src/db/index/block/mod.rs index dd586ed6f81..775b55d502c 100644 --- a/catalyst-gateway/bin/src/db/index/block/mod.rs +++ b/catalyst-gateway/bin/src/db/index/block/mod.rs @@ -14,7 +14,7 @@ use txi::TxiInsertQuery; use txo::TxoInsertQuery; use super::{queries::FallibleQueryTasks, session::CassandraSession}; -use crate::service::utilities::convert::i16_from_saturating; +use crate::service::utilities::convert::from_saturating; /// Add all data needed from the block into the indexes. pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { @@ -34,7 +34,7 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { // We add all transactions in the block to their respective index data sets. for (txn_index, txs) in block_data.txs().iter().enumerate() { - let txn = i16_from_saturating(txn_index); + let txn = from_saturating(txn_index); let txn_hash = txs.hash().to_vec(); diff --git a/catalyst-gateway/bin/src/db/index/block/txo/mod.rs b/catalyst-gateway/bin/src/db/index/block/txo/mod.rs index fc1ea2f306e..9b4029fc3bb 100644 --- a/catalyst-gateway/bin/src/db/index/block/txo/mod.rs +++ b/catalyst-gateway/bin/src/db/index/block/txo/mod.rs @@ -17,7 +17,7 @@ use crate::{ queries::{FallibleQueryTasks, PreparedQuery, SizedBatch}, session::CassandraSession, }, - service::utilities::convert::i16_from_saturating, + service::utilities::convert::from_saturating, settings::CassandraEnvVars, }; @@ -150,7 +150,7 @@ impl TxoInsertQuery { }; let staked = stake_address != NO_STAKE_ADDRESS; - let txo_index = i16_from_saturating(txo_index); + let txo_index = from_saturating(txo_index); if staked { let params = insert_txo::Params::new( diff --git a/catalyst-gateway/bin/src/service/utilities/convert.rs b/catalyst-gateway/bin/src/service/utilities/convert.rs index f5733f1360e..4bc0d1b369d 100644 --- a/catalyst-gateway/bin/src/service/utilities/convert.rs +++ b/catalyst-gateway/bin/src/service/utilities/convert.rs @@ -1,94 +1,48 @@ //! Simple general purpose utility functions. -/// Convert T to an i16. (saturate if out of range.) -#[allow(dead_code)] // Its OK if we don't use this general utility function. -pub(crate) fn i16_from_saturating>(value: T) -> i16 { - match value.try_into() { - Ok(value) => value, - Err(_) => i16::MAX, - } -} - -/// Convert an `` to `u16`. (saturate if out of range.) -#[allow(dead_code)] // Its OK if we don't use this general utility function. -pub(crate) fn u16_from_saturating< +/// Convert an `` to ``. (saturate if out of range.) +pub(crate) fn from_saturating< + R: Copy + num_traits::identities::Zero + num_traits::Bounded, T: Copy - + TryInto + + TryInto + std::ops::Sub + std::cmp::PartialOrd + num_traits::identities::Zero, >( value: T, -) -> u16 { - if value < T::zero() { - u16::MIN - } else { - match value.try_into() { - Ok(value) => value, - Err(_) => u16::MAX, - } - } -} - -/// Convert an `` to `usize`. (saturate if out of range.) -#[allow(dead_code)] // Its OK if we don't use this general utility function. -pub(crate) fn usize_from_saturating< - T: Copy - + TryInto - + std::ops::Sub - + std::cmp::PartialOrd - + num_traits::identities::Zero, ->( - value: T, -) -> usize { - if value < T::zero() { - usize::MIN - } else { - match value.try_into() { - Ok(value) => value, - Err(_) => usize::MAX, - } +) -> R { + match value.try_into() { + Ok(value) => value, + Err(_) => { + // If we couldn't convert, its out of range for the destination type. + if value > T::zero() { + // If the number is positive, its out of range in the positive direction. + R::max_value() + } else { + // Otherwise its out of range in the negative direction. + R::min_value() + } + }, } } -/// Convert an `` to `u32`. (saturate if out of range.) -#[allow(dead_code)] // Its OK if we don't use this general utility function. -pub(crate) fn u32_from_saturating< - T: Copy - + TryInto - + std::ops::Sub - + std::cmp::PartialOrd - + num_traits::identities::Zero, ->( - value: T, -) -> u32 { - if value < T::zero() { - u32::MIN - } else { - match value.try_into() { - Ok(converted) => converted, - Err(_) => u32::MAX, - } - } -} +#[cfg(test)] +mod tests { + use super::*; -/// Convert an `` to `u64`. (saturate if out of range.) -#[allow(dead_code)] // Its OK if we don't use this general utility function. -pub(crate) fn u64_from_saturating< - T: Copy - + TryInto - + std::ops::Sub - + std::cmp::PartialOrd - + num_traits::identities::Zero, ->( - value: T, -) -> u64 { - if value < T::zero() { - u64::MIN - } else { - match value.try_into() { - Ok(converted) => converted, - Err(_) => u64::MAX, - } + #[test] + fn from_saturating_tests() { + let x: u32 = from_saturating(0_u8); + assert!(x == 0); + let x: u32 = from_saturating(255_u8); + assert!(x == 255); + let x: i8 = from_saturating(0_u32); + assert!(x == 0); + let x: i8 = from_saturating(512_u32); + assert!(x == 127); + let x: i8 = from_saturating(-512_i32); + assert!(x == -128); + let x: u16 = from_saturating(-512_i32); + assert!(x == 0); } } From 552508ada3d0ed50c619a855feb7a00abeead8e1 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 20 Sep 2024 13:47:39 +0700 Subject: [PATCH 66/69] test(cat-gateway): Add tests for float conversion and better docs about functions limitations. --- .../bin/src/service/utilities/convert.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/catalyst-gateway/bin/src/service/utilities/convert.rs b/catalyst-gateway/bin/src/service/utilities/convert.rs index 4bc0d1b369d..04f5424dc59 100644 --- a/catalyst-gateway/bin/src/service/utilities/convert.rs +++ b/catalyst-gateway/bin/src/service/utilities/convert.rs @@ -1,6 +1,8 @@ //! Simple general purpose utility functions. /// Convert an `` to ``. (saturate if out of range.) +/// Note can convert any int to float, or f32 to f64 as well. +/// can not convert from float to int, or f64 to f32. pub(crate) fn from_saturating< R: Copy + num_traits::identities::Zero + num_traits::Bounded, T: Copy @@ -28,9 +30,11 @@ pub(crate) fn from_saturating< #[cfg(test)] mod tests { + use super::*; #[test] + #[allow(clippy::float_cmp)] fn from_saturating_tests() { let x: u32 = from_saturating(0_u8); assert!(x == 0); @@ -44,5 +48,13 @@ mod tests { assert!(x == -128); let x: u16 = from_saturating(-512_i32); assert!(x == 0); + let x: f64 = from_saturating(0.0_f32); + assert!(x == 0.0); + let x: f64 = from_saturating(0_u32); + assert!(x == 0.0); + let x: f64 = from_saturating(65536_u32); + assert!(x == 65536.0_f64); + let x: f64 = from_saturating(i32::MIN); + assert!(x == -2_147_483_648.0_f64); } } From b7d25b44656fdcb4ccd75125c2107a701ed8489a Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 20 Sep 2024 13:59:14 +0700 Subject: [PATCH 67/69] fix(cat-gateway): Developer lints in release mode, and also refer to correct local release binary --- catalyst-gateway/Justfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/catalyst-gateway/Justfile b/catalyst-gateway/Justfile index 87c2cc7be7b..a07d9b0c0e6 100644 --- a/catalyst-gateway/Justfile +++ b/catalyst-gateway/Justfile @@ -22,8 +22,8 @@ code-format: # Lint the rust code code-lint: - cargo lintfix - cargo lint + cargo lintfix -r + cargo lint -r # Synchronize Rust Configs sync-cfg: @@ -46,10 +46,10 @@ run-cat-gateway: build-cat-gateway CHAIN_FOLLOWER_SYNC_TASKS="16" \ RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ CHAIN_NETWORK="Preprod" \ - ./catalyst-gateway/target/release/cat-gateway run --log-level debug + ./target/release/cat-gateway run --log-level debug # Run cat-gateway natively on mainnet run-cat-gateway-mainnet: build-cat-gateway CHAIN_FOLLOWER_SYNC_TASKS="1" \ RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ - ./catalyst-gateway/target/release/cat-gateway run --log-level debug + ./target/release/cat-gateway run --log-level debug From be0d3fc2fb6ee5e57d621e67a1b6c5bc4b8910f8 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 20 Sep 2024 14:00:08 +0700 Subject: [PATCH 68/69] fix(cat-gateway): CIP36 index schema error --- .../bin/src/db/index/schema/cql/cip36_registration.cql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql index 17c6886e3b7..f9303e0e6d1 100644 --- a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql +++ b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql @@ -15,4 +15,4 @@ CREATE TABLE IF NOT EXISTS cip36_registration ( PRIMARY KEY (stake_address, nonce, slot_no, txn) ) -WITH CLUSTERING ORDER BY (nonce, DESC, slot_no DESC, txn DESC); +WITH CLUSTERING ORDER BY (nonce DESC, slot_no DESC, txn DESC); From 43152e448911d59fa72e0cf2c781158708f2ff51 Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Fri, 20 Sep 2024 15:45:20 +0700 Subject: [PATCH 69/69] fix(cat-gateway): Cip36 indexing working, improve bad cassandra query reporting. --- catalyst-gateway/bin/Cargo.toml | 2 +- .../bin/src/db/index/block/certs.rs | 34 ++++++++++++++++++- .../db/index/block/cip36/cql/insert_cip36.cql | 4 +-- .../cip36/cql/insert_cip36_for_vote_key.cql | 8 ++--- .../block/cip36/cql/insert_cip36_invalid.cql | 2 +- .../src/db/index/block/cip36/insert_cip36.rs | 22 +++++++++++- .../block/cip36/insert_cip36_for_vote_key.rs | 2 +- .../index/block/cip36/insert_cip36_invalid.rs | 24 ++++++++++++- .../bin/src/db/index/block/cip36/mod.rs | 3 +- .../bin/src/db/index/block/txi.rs | 2 +- .../bin/src/db/index/block/txo/insert_txo.rs | 2 +- .../db/index/block/txo/insert_txo_asset.rs | 2 +- .../db/index/block/txo/insert_unstaked_txo.rs | 2 +- .../block/txo/insert_unstaked_txo_asset.rs | 2 +- .../bin/src/db/index/queries/mod.rs | 16 ++++++--- .../queries/staked_ada/update_txo_spent.rs | 2 +- .../cql/cip36_registration_for_vote_key.cql | 2 +- .../schema/cql/cip36_registration_invalid.cql | 2 +- catalyst-gateway/bin/src/db/index/session.rs | 3 +- 19 files changed, 109 insertions(+), 27 deletions(-) diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 8d614a96b80..aed66bbb86f 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -58,7 +58,7 @@ build-info = "0.0.38" ed25519-dalek = "2.1.1" scylla = { version = "0.14.0", features = ["cloud", "full-serialization"] } strum = { version = "0.26.3", features = ["derive"] } -# strum_macros = "0.26.4" +strum_macros = "0.26.4" openssl = { version = "0.10.66", features = ["vendored"] } num-bigint = "0.4.6" futures = "0.3.30" diff --git a/catalyst-gateway/bin/src/db/index/block/certs.rs b/catalyst-gateway/bin/src/db/index/block/certs.rs index 7aca9450ec6..86240df12bb 100644 --- a/catalyst-gateway/bin/src/db/index/block/certs.rs +++ b/catalyst-gateway/bin/src/db/index/block/certs.rs @@ -1,6 +1,6 @@ //! Index certs found in a transaction. -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use cardano_chain_follower::MultiEraBlock; use pallas::ledger::primitives::{alonzo, conway}; @@ -37,6 +37,38 @@ pub(crate) struct StakeRegistrationInsertQuery { pool_delegation: MaybeUnset>, } +impl Debug for StakeRegistrationInsertQuery { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> { + let stake_address = match self.stake_address { + MaybeUnset::Unset => "UNSET", + MaybeUnset::Set(ref v) => &hex::encode(v), + }; + let register = match self.register { + MaybeUnset::Unset => "UNSET", + MaybeUnset::Set(v) => &format!("{v:?}"), + }; + let deregister = match self.deregister { + MaybeUnset::Unset => "UNSET", + MaybeUnset::Set(v) => &format!("{v:?}"), + }; + let pool_delegation = match self.pool_delegation { + MaybeUnset::Unset => "UNSET", + MaybeUnset::Set(ref v) => &hex::encode(v), + }; + + f.debug_struct("StakeRegistrationInsertQuery") + .field("stake_hash", &hex::encode(hex::encode(&self.stake_hash))) + .field("slot_no", &self.slot_no) + .field("txn", &self.txn) + .field("stake_address", &stake_address) + .field("script", &self.script) + .field("register", ®ister) + .field("deregister", &deregister) + .field("pool_delegation", &pool_delegation) + .finish() + } +} + /// TXI by Txn hash Index const INSERT_STAKE_REGISTRATION_QUERY: &str = include_str!("./cql/insert_stake_registration.cql"); diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql index 220954045c8..1ecacb34937 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql +++ b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql @@ -8,7 +8,7 @@ INSERT INTO cip36_registration ( payment_address, is_payable, raw_nonce, - cip36, + cip36 ) VALUES ( :stake_address, :nonce, @@ -18,5 +18,5 @@ INSERT INTO cip36_registration ( :payment_address, :is_payable, :raw_nonce, - :cip36, + :cip36 ); diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql index a09d36d3f55..b6d257f9c84 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql +++ b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql @@ -1,14 +1,14 @@ --- Index CIP-36 Registration (Valid) -INSERT INTO cip36_registration_for_stake_addr ( +-- Index CIP-36 Registration (For each Vote Key) +INSERT INTO cip36_registration_for_vote_key ( vote_key, stake_address, slot_no, txn, - valid, + valid ) VALUES ( :vote_key, :stake_address, :slot_no, :txn, - :valid, + :valid ); diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql index 06162661fd0..fac9b51d1ac 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql +++ b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql @@ -10,7 +10,7 @@ INSERT INTO cip36_registration_invalid ( nonce, cip36, signed, - error_report, + error_report ) VALUES ( :stake_address, :slot_no, diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs index d346124998d..771cb9b5d2a 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs @@ -1,6 +1,6 @@ //! Insert CIP36 Registration Query -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use cardano_chain_follower::Metadata::cip36::{Cip36, VotingPubKey}; use scylla::{frame::value::MaybeUnset, SerializeRow, Session}; @@ -37,6 +37,26 @@ pub(super) struct Params { cip36: bool, } +impl Debug for Params { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let payment_address = match self.payment_address { + MaybeUnset::Unset => "UNSET", + MaybeUnset::Set(ref v) => &hex::encode(v), + }; + f.debug_struct("Params") + .field("stake_address", &self.stake_address) + .field("nonce", &self.nonce) + .field("slot_no", &self.slot_no) + .field("txn", &self.txn) + .field("vote_key", &self.vote_key) + .field("payment_address", &payment_address) + .field("is_payable", &self.is_payable) + .field("raw_nonce", &self.raw_nonce) + .field("cip36", &self.cip36) + .finish() + } +} + impl Params { /// Create a new Insert Query. pub fn new(vote_key: &VotingPubKey, slot_no: u64, txn: i16, cip36: &Cip36) -> Self { diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs index 67a892d4f86..b7f0d48d83f 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs @@ -16,7 +16,7 @@ const INSERT_CIP36_REGISTRATION_FOR_VOTE_KEY_QUERY: &str = include_str!("./cql/insert_cip36_for_vote_key.cql"); /// Insert CIP-36 Registration Invalid Query Parameters -#[derive(SerializeRow, Clone)] +#[derive(SerializeRow, Debug)] pub(super) struct Params { /// Voting Public Key vote_key: Vec, diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs index 0ee5a4e5b19..0ab3fd81225 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs @@ -1,6 +1,6 @@ //! Insert CIP36 Registration Query (Invalid Records) -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use cardano_chain_follower::Metadata::cip36::{Cip36, VotingPubKey}; use scylla::{frame::value::MaybeUnset, SerializeRow, Session}; @@ -42,6 +42,28 @@ pub(super) struct Params { error_report: Vec, } +impl Debug for Params { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let cip36 = match self.cip36 { + MaybeUnset::Unset => "UNSET", + MaybeUnset::Set(v) => &format!("{v:?}"), + }; + f.debug_struct("Params") + .field("stake_address", &self.stake_address) + .field("slot_no", &self.slot_no) + .field("txn", &self.txn) + .field("vote_key", &self.vote_key) + .field("payment_address", &self.payment_address) + .field("is_payable", &self.is_payable) + .field("raw_nonce", &self.raw_nonce) + .field("nonce", &self.nonce) + .field("cip36", &cip36) + .field("signed", &self.signed) + .field("error_report", &self.error_report) + .finish() + } +} + impl Params { /// Create a new Insert Query. pub fn new( diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs b/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs index aa7efe29b8a..13d4c70b46f 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs @@ -75,7 +75,8 @@ impl Cip36InsertQuery { vote_key, slot_no, txn_index, cip36, true, )); } - } else { + } else if cip36.stake_pk.is_some() { + // We can't index an error, if there is no stake public key. if cip36.voting_keys.is_empty() { self.invalid.push(insert_cip36_invalid::Params::new( None, diff --git a/catalyst-gateway/bin/src/db/index/block/txi.rs b/catalyst-gateway/bin/src/db/index/block/txi.rs index d3a37b3055f..9dd4e0c8f9b 100644 --- a/catalyst-gateway/bin/src/db/index/block/txi.rs +++ b/catalyst-gateway/bin/src/db/index/block/txi.rs @@ -14,7 +14,7 @@ use crate::{ }; /// Insert TXI Query and Parameters -#[derive(SerializeRow)] +#[derive(SerializeRow, Debug)] pub(crate) struct TxiInsertParams { /// Spent Transactions Hash txn_hash: Vec, diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs index 7d9c0b67216..94837b50936 100644 --- a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs @@ -17,7 +17,7 @@ const INSERT_TXO_QUERY: &str = include_str!("./cql/insert_txo.cql"); /// Insert TXO Query Parameters /// (Superset of data to support both Staked and Unstaked TXO records.) -#[derive(SerializeRow)] +#[derive(SerializeRow, Debug)] pub(super) struct Params { /// Stake Address - Binary 28 bytes. 0 bytes = not staked. stake_address: Vec, diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs index 9fa349237b4..a42ea5b61ec 100644 --- a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs @@ -15,7 +15,7 @@ const INSERT_TXO_ASSET_QUERY: &str = include_str!("./cql/insert_txo_asset.cql"); /// Insert TXO Asset Query Parameters /// (Superset of data to support both Staked and Unstaked TXO records.) -#[derive(SerializeRow)] +#[derive(SerializeRow, Debug)] pub(super) struct Params { /// Stake Address - Binary 28 bytes. 0 bytes = not staked. stake_address: Vec, diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs index e27c7651c23..24957e92b30 100644 --- a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs @@ -14,7 +14,7 @@ const INSERT_UNSTAKED_TXO_QUERY: &str = include_str!("./cql/insert_unstaked_txo. /// Insert TXO Unstaked Query Parameters /// (Superset of data to support both Staked and Unstaked TXO records.) -#[derive(SerializeRow)] +#[derive(SerializeRow, Debug)] pub(super) struct Params { /// Transactions hash. txn_hash: Vec, diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs index 8ac33aa129d..78605f92ae6 100644 --- a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs @@ -15,7 +15,7 @@ const INSERT_UNSTAKED_TXO_ASSET_QUERY: &str = include_str!("./cql/insert_unstake /// Insert TXO Asset Query Parameters /// (Superset of data to support both Staked and Unstaked TXO records.) -#[derive(SerializeRow)] +#[derive(SerializeRow, Debug)] pub(super) struct Params { /// Transactions hash. txn_hash: Vec, diff --git a/catalyst-gateway/bin/src/db/index/queries/mod.rs b/catalyst-gateway/bin/src/db/index/queries/mod.rs index 89129f8d84c..505918b58fb 100644 --- a/catalyst-gateway/bin/src/db/index/queries/mod.rs +++ b/catalyst-gateway/bin/src/db/index/queries/mod.rs @@ -4,9 +4,9 @@ pub(crate) mod staked_ada; -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; -use anyhow::bail; +use anyhow::{bail, Context}; use crossbeam_skiplist::SkipMap; use scylla::{ batch::Batch, prepared_statement::PreparedStatement, serialize::row::SerializeRow, @@ -26,7 +26,8 @@ use crate::settings::{CassandraEnvVars, CASSANDRA_MIN_BATCH_SIZE}; pub(crate) type SizedBatch = SkipMap>; /// All Prepared Queries that we know about. -#[allow(clippy::enum_variant_names, dead_code)] +#[derive(strum_macros::Display)] +#[allow(clippy::enum_variant_names)] pub(crate) enum PreparedQuery { /// TXO Insert query. TxoAdaInsertQuery, @@ -206,7 +207,7 @@ impl PreparedQueries { /// /// This will divide the batch into optimal sized chunks and execute them until all /// values have been executed or the first error is encountered. - pub(crate) async fn execute_batch( + pub(crate) async fn execute_batch( &self, session: Arc, cfg: Arc, query: PreparedQuery, values: Vec, ) -> FallibleQueryResults { @@ -238,7 +239,12 @@ impl PreparedQueries { bail!("No batch query found for size {}", chunk_size); }; let batch_query_statements = batch_query.value().clone(); - results.push(session.batch(&batch_query_statements, chunk).await?); + results.push( + session + .batch(&batch_query_statements, chunk) + .await + .context(format!("query={query}, chunk={chunk:?}"))?, + ); } Ok(results) diff --git a/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs b/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs index 21658d74e29..0fe0a60bcfc 100644 --- a/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs +++ b/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs @@ -17,7 +17,7 @@ use crate::{ const UPDATE_TXO_SPENT_QUERY: &str = include_str!("../cql/update_txo_spent.cql"); /// Update TXO spent query params. -#[derive(SerializeRow)] +#[derive(SerializeRow, Debug)] pub(crate) struct UpdateTxoSpentQueryParams { /// TXO stake address. pub stake_address: Vec, diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql index 3ab03c8f1ef..c3ba5f6dfce 100644 --- a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql +++ b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql @@ -1,7 +1,7 @@ -- Index of CIP-36 registrations searchable by Stake Address. -- Full registration data needs to be queried from the man cip36 registration tables. -- Includes both Valid and Invalid registrations. -CREATE TABLE IF NOT EXISTS cip36_registration_for_stake_addr ( +CREATE TABLE IF NOT EXISTS cip36_registration_for_vote_key ( -- Primary Key Data vote_key blob, -- 32 Bytes of Vote Key. stake_address blob, -- 32 Bytes of Stake Address. diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql index e72eaf304ea..626b9d90ac9 100644 --- a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql +++ b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql @@ -15,6 +15,6 @@ CREATE TABLE IF NOT EXISTS cip36_registration_invalid ( signed boolean, -- Signature validates. error_report list, -- List of serialization errors in the registration. - PRIMARY KEY (vote_key, slot_no, txn) + PRIMARY KEY (stake_address, slot_no, txn) ) WITH CLUSTERING ORDER BY (slot_no DESC, txn DESC); diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs index 884f5a7a7ac..300bc9d92a7 100644 --- a/catalyst-gateway/bin/src/db/index/session.rs +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -1,6 +1,7 @@ //! Session creation and storage use std::{ + fmt::Debug, path::PathBuf, sync::{Arc, OnceLock}, time::Duration, @@ -122,7 +123,7 @@ impl CassandraSession { /// /// This will divide the batch into optimal sized chunks and execute them until all /// values have been executed or the first error is encountered. - pub(crate) async fn execute_batch( + pub(crate) async fn execute_batch( &self, query: PreparedQuery, values: Vec, ) -> FallibleQueryResults { let session = self.session.clone();