From 80f5a4920a28e9a6b2885a16459678e36f46cdbe Mon Sep 17 00:00:00 2001 From: Steven Johnson Date: Thu, 19 Sep 2024 14:29:52 +0700 Subject: [PATCH 1/2] feat(cat-gateway): Chain Sync V2 (#781) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(docs): Fix up docs issues * fix(backend): Huge refactor to prep for scylladb config management * fix(backend): Clean up logging a little, and add build info logs as required for production. * Refactor and setup cassandra config/session * feat(backend): Index DB schema setup seems to work * WIP * fix(rust): Format fixes * fix(rust): Build fixes * fix(rust): Adjust index DB so we can index without querying, and can optimize on first detected spend. * fix(rust): add more docs * fix(rust): basic new follower integration * fix(rust): wip * fix(ci): Bump rust compiler version to match CI * ci(backend): Bump rust version to match CI * fix(backend): Fix code format and lints * feat(backend): simple new block indexer just to test the logic works * feat(gateway): Simple indexing with cassandra seems to work * refactor(backend): Remove lazy and once_cell in favor of new standard library replacements * fix(backend): WIP indexing for stake addresses and unstaked ada * fix(backend): indexing WIP * fix(backend): Add support for log control with env vars, default to mainnet, adjust `justfile` to properly select preprod and also refresh git dependencies. * feat(backend): Make local test scylla db run with 4 nodes, not 1 * fix(backend-lib): Add stop for cassandra db cluster * refactor(backend-lib): Remove c509-certificate because its moved to catalyst-libs * fix(backend): Remove dependencies from Workspace, and move into project * fix(backend): Use temporary cat-ci branch for rust builders * fix(backend): Remove obsolete common crates subdirectory * fix(backend): Don't use pre-packaged mithril snapshots in integration tests * fix(backend): Fix code so it builds with latest chain follower code. Also eliminates redundant logic now incorporated into chain follower. * fix(backend): Fix broken reference to catalyst libs * ci(ci): Bump all earthfiles to latest WIP cat-ci branch * fix(frontend-pkg): Ignore .dart_tool directory in frontend files checking markdown * fix(ci): Fix spelling * fix(spelling): Add more project words and properly sort list * fix(backend): Sync rust configs and add target to make it easier in future * fix(backend): Enable all features of Scylla for now. * fix(frontend-pkg): Fix markdown table having too many columns * ci(spelling): Fix spelling issues * fix(docs): Bump docs to latest WIP cat-ci version * feat(gateway): Add low resource scylla db instance for local testing * feat(gateway): Add and update developer convenience functions for backend * fix(backend): Fix code format * fix(backend): Fix spelling issues in CQL files * fix(spelling): Remove duplicates from the project words dictionary * fix(backend): Get the backend building properly with earthly. * feat(backend): remove obsoleted postgres logic for chain indexing * revert(event-db): Revert extension changes to sql files after fixing sqlfluff version * fix(frontend): Regenerate the dart api interface file, and add doing that to the pre-push just command * fix(backend): temporarily disable API tests * fix(backend): Also temporarily stop workflow consuming test reports that are disabled * fix(ci): Try and stop coveralls running for api-tests * ci(general): Replace temp CI branch with tagged release * feat: Add Handler for Permissionless Auth (#825) * docs(cips): Add Formal Defintion of auth token * fix(docs): Fix comments in cddl file * fix(docs): sig size * fix(docs): Rename CDDL for the auth token * docs(docs): Add auth-header documentation * docs(docs): Fix markdown line length error * docs(general): Fix spelling * fix(backend-lib): Bump to catalyst-libs tagged version * fix(backend): stub out obsolete code (to be removed in follow up PR). * fix(backend-lib): code format * fix(backend): remove unused crate dependencies * feat: auth token (#723) * feat(auth token encode and decode): permissionless auth * feat(auth token encode and decode): permissionless auth * feat(auth token encode and decode): permissionless auth * feat(auth token encode and decode): permissionless auth * feat(auth token encode and decode): permissionless auth * iron out tests * iron out tests * refactor(auth token encode and decode): ed25519 Signature cbor fields Sig over the preceding two fields - sig(cbor(kid), cbor(ulid)) * refactor(auth token encode and decode): ed25519 Signature cbor fields Sig over the preceding two fields - sig(cbor(kid), cbor(ulid)) * feat(cat security scheme): open api * feat(cat security scheme): open api * feat(mock cert state): given kid from bearer return pub key * feat(auth token): cache TTL * feat(auth token): cache TTL * feat(auth token): cache TT * ci(spell check): fix * ci(spell check): fix * ci(spell check): fix * refactor(clippy): housekeeping tidy * refactor(clippy): housekeeping tidy * refactor(clippy): housekeeping tidy * refactor(clippy): housekeeping tidy * fix(backend): Re-enable dependent crates used by this code * fix(backend): clippy lints * fix(backend): spelling --------- Co-authored-by: Steven Johnson Co-authored-by: Steven Johnson * feat: Update GET staked_ada endpoint to fetch from ScyllaDB (#728) * feat: get staked ada from scylladb * chore: revert justfile changes * chore: filter TXOs in rust instead of filtering in ScyllaDB query * fix(backend): spelling * fix(backend): Eliminate lint errors from Derived function * fix(backend): code format * fix(backend): Udate autogenerated dart code * chore(cat-voices): fix tests --------- Co-authored-by: Steven Johnson Co-authored-by: Steven Johnson Co-authored-by: Dominik Toton * feat: DB Indexing for CIP-36 registrations (#788) * feat: add schema for cip-36 registration tables * feat: index cip-36 by stake address * feat: index cip-36 registrations by vote key * fix: use TxiInserParams::new when adding txi data * fix: remove unused cfg attributes * fix: refactor Cip36RegistrationInsertQuery::new * fix(backend): Refactor queries and add multiple tables for cip36 registration indexes * fix(backend): Cip36 Primary key is stake key. Stake Key N->1 Vote Key * fix(backend): code format --------- Co-authored-by: Steven Johnson Co-authored-by: Steven Johnson * docs(general): Cleanup project dictionary * docs(spelling): Fix spelling * fix(backend): remove obsolete clippy lint cfg * docs(backend): Improve field documentation so its not ambiguous. * docs(backend): Fix comment * docs(backend): Improve comment * fix(backend): Vote Key index logic, and update comments --------- Co-authored-by: cong-or <60357579+cong-or@users.noreply.github.com> Co-authored-by: Felipe Rosa Co-authored-by: Dominik Toton Co-authored-by: JoaquĆ­n Rosales --- .config/dictionaries/project.dic | 38 +- .github/workflows/generate-allure-report.yml | 118 +-- .gitignore | 3 + .markdownlint-cli2.jsonc | 3 +- .markdownlint.jsonc | 4 +- .secret.template | 1 + .vscode/extensions.json | 5 +- Earthfile | 10 +- catalyst-gateway/.cargo/config.toml | 2 +- catalyst-gateway/.config/nextest.toml | 2 +- catalyst-gateway/Cargo.toml | 40 +- catalyst-gateway/Earthfile | 24 +- catalyst-gateway/Justfile | 55 ++ catalyst-gateway/bin/Cargo.toml | 112 ++- catalyst-gateway/bin/build.rs | 4 + catalyst-gateway/bin/src/build_info.rs | 111 +++ .../cip36.cddl | 0 .../cip36_registration.cddl | 0 .../cip36_witness.cddl | 0 .../mod.rs.obsolete} | 17 +- catalyst-gateway/bin/src/cardano/mod.rs | 499 ++++++++--- catalyst-gateway/bin/src/cardano/util.rs | 31 +- catalyst-gateway/bin/src/cli.rs | 48 +- .../chain_state/insert_update_state.sql | 0 .../cardano.obsolete}/chain_state/mod.rs | 35 +- .../select_slot_info_by_datetime.sql.hbs | 0 .../chain_state/select_update_state.sql | 0 .../cip36_registration/mod.rs | 47 +- .../select_cip36_registration.sql | 0 .../event/cardano.obsolete}/config/mod.rs | 7 +- .../config/select_config.sql | 0 .../event/cardano.obsolete/mod.rs.obsolete} | 0 .../event/cardano.obsolete}/utxo/mod.rs | 53 +- .../utxo/select_total_utxo_amount.sql | 0 .../bin/src/{event_db => db/event}/error.rs | 0 .../src/{event_db => db/event}/legacy/mod.rs | 0 .../event}/legacy/queries/event/ballot.rs | 71 +- .../event}/legacy/queries/event/mod.rs | 12 +- .../event}/legacy/queries/event/objective.rs | 19 +- .../event}/legacy/queries/event/proposal.rs | 24 +- .../event}/legacy/queries/event/review.rs | 40 +- .../event}/legacy/queries/mod.rs | 0 .../event}/legacy/queries/registration.rs | 38 +- .../event}/legacy/queries/search.rs | 53 +- .../event}/legacy/queries/vit_ss/fund.rs | 14 +- .../event}/legacy/queries/vit_ss/mod.rs | 0 .../event}/legacy/types/ballot.rs | 2 +- .../event}/legacy/types/event.rs | 0 .../event}/legacy/types/mod.rs | 0 .../event}/legacy/types/objective.rs | 2 +- .../event}/legacy/types/proposal.rs | 0 .../event}/legacy/types/registration.rs | 0 .../event}/legacy/types/review.rs | 0 .../event}/legacy/types/search.rs | 0 .../event}/legacy/types/vit_ss/challenge.rs | 0 .../event}/legacy/types/vit_ss/fund.rs | 0 .../event}/legacy/types/vit_ss/goal.rs | 0 .../event}/legacy/types/vit_ss/group.rs | 0 .../event}/legacy/types/vit_ss/mod.rs | 0 .../event}/legacy/types/vit_ss/vote_plan.rs | 0 .../event}/legacy/types/voting_status.rs | 0 catalyst-gateway/bin/src/db/event/mod.rs | 237 ++++++ .../event}/schema_check/mod.rs | 6 +- .../schema_check/select_max_version.sql | 0 .../bin/src/db/index/block/certs.rs | 259 ++++++ .../db/index/block/cip36/cql/insert_cip36.cql | 22 + .../cip36/cql/insert_cip36_for_vote_key.cql | 14 + .../block/cip36/cql/insert_cip36_invalid.cql | 26 + .../src/db/index/block/cip36/insert_cip36.rs | 83 ++ .../block/cip36/insert_cip36_for_vote_key.rs | 70 ++ .../index/block/cip36/insert_cip36_invalid.rs | 98 +++ .../bin/src/db/index/block/cip36/mod.rs | 150 ++++ .../block/cql/insert_stake_registration.cql | 20 + .../bin/src/db/index/block/cql/insert_txi.cql | 10 + .../bin/src/db/index/block/mod.rs | 94 +++ .../bin/src/db/index/block/txi.rs | 104 +++ .../src/db/index/block/txo/cql/insert_txo.cql | 18 + .../index/block/txo/cql/insert_txo_asset.cql | 19 + .../block/txo/cql/insert_unstaked_txo.cql | 16 + .../txo/cql/insert_unstaked_txo_asset.cql | 18 + .../bin/src/db/index/block/txo/insert_txo.rs | 75 ++ .../db/index/block/txo/insert_txo_asset.rs | 77 ++ .../db/index/block/txo/insert_unstaked_txo.rs | 68 ++ .../block/txo/insert_unstaked_txo_asset.rs | 77 ++ .../bin/src/db/index/block/txo/mod.rs | 264 ++++++ catalyst-gateway/bin/src/db/index/mod.rs | 6 + .../queries/cql/get_txi_by_txn_hashes.cql | 6 + .../queries/cql/get_txo_by_stake_address.cql | 10 + .../db/index/queries/cql/update_txo_spent.cql | 6 + .../bin/src/db/index/queries/mod.rs | 246 ++++++ .../queries/staked_ada/get_txi_by_txn_hash.rs | 83 ++ .../staked_ada/get_txo_by_stake_address.rs | 94 +++ .../src/db/index/queries/staked_ada/mod.rs | 4 + .../queries/staked_ada/update_txo_spent.rs | 69 ++ .../index/schema/cql/cip36_registration.cql | 18 + .../cql/cip36_registration_for_vote_key.cql | 14 + .../schema/cql/cip36_registration_invalid.cql | 20 + .../bin/src/db/index/schema/cql/namespace.cql | 4 + .../index/schema/cql/stake_registration.cql | 20 + .../schema/cql/txi_by_txn_hash_table.cql | 11 + .../schema/cql/txo_assets_by_stake_table.cql | 16 + .../index/schema/cql/txo_by_stake_table.cql | 23 + .../cql/unstaked_txo_assets_by_txn_hash.cql | 17 + .../schema/cql/unstaked_txo_by_txn_hash.cql | 22 + .../bin/src/db/index/schema/mod.rs | 125 +++ catalyst-gateway/bin/src/db/index/session.rs | 285 +++++++ catalyst-gateway/bin/src/db/mod.rs | 4 + catalyst-gateway/bin/src/event_db/mod.rs | 306 ------- catalyst-gateway/bin/src/logger.rs | 71 +- catalyst-gateway/bin/src/main.rs | 5 +- .../bin/src/service/api/auth/endpoint.rs | 128 +++ .../bin/src/service/api/auth/mod.rs | 4 + .../bin/src/service/api/auth/token.rs | 145 ++++ .../cardano/date_time_to_slot_number_get.rs | 112 ++- .../bin/src/service/api/cardano/mod.rs | 36 +- .../service/api/cardano/registration_get.rs | 39 +- .../src/service/api/cardano/staked_ada_get.rs | 225 ++++- .../src/service/api/cardano/sync_state_get.rs | 25 +- .../bin/src/service/api/cardano/types.rs | 74 ++ .../src/service/api/health/inspection_get.rs | 24 +- .../bin/src/service/api/health/live_get.rs | 4 +- .../bin/src/service/api/health/mod.rs | 14 +- .../bin/src/service/api/health/ready_get.rs | 12 +- .../service/api/legacy/registration/mod.rs | 24 +- .../bin/src/service/api/legacy/v0/mod.rs | 15 +- .../src/service/api/legacy/v0/plans_get.rs | 11 +- .../api/legacy/v1/account_votes_get.rs | 17 +- .../bin/src/service/api/legacy/v1/mod.rs | 26 +- catalyst-gateway/bin/src/service/api/mod.rs | 24 +- .../service/common/objects/cardano/network.rs | 4 - .../objects/cardano/registration_info.rs | 8 +- .../common/objects/cardano/slot_info.rs | 6 +- .../common/objects/cardano/stake_address.rs | 2 +- .../common/objects/cardano/stake_info.rs | 23 +- .../common/objects/cardano/sync_state.rs | 6 +- .../service/common/objects/legacy/event_id.rs | 4 +- .../common/objects/legacy/voter_group_id.rs | 4 +- .../common/objects/legacy/voter_info.rs | 4 +- .../objects/legacy/voter_registration.rs | 4 +- .../service/common/objects/server_error.rs | 4 +- .../service/docs/stoplight_elements/mod.rs | 2 +- catalyst-gateway/bin/src/service/mod.rs | 10 +- .../bin/src/service/poem_service.rs | 37 +- .../bin/src/service/utilities/convert.rs | 94 +++ .../utilities/middleware/schema_validation.rs | 16 +- .../utilities/middleware/tracing_mw.rs | 141 ++-- .../bin/src/service/utilities/mod.rs | 9 +- .../bin/src/service/utilities/net.rs | 42 + catalyst-gateway/bin/src/settings.rs | 780 +++++++++++++++--- catalyst-gateway/bin/src/state/mod.rs | 66 -- catalyst-gateway/clippy.toml | 1 + catalyst-gateway/crates/README.md | 4 - catalyst-gateway/deny.toml | 19 +- catalyst-gateway/event-db/Earthfile | 2 +- catalyst-gateway/rust-toolchain.toml | 6 +- catalyst-gateway/tests/Earthfile | 2 +- catalyst-gateway/tests/api_tests/Earthfile | 8 +- .../src/catalyst_data_gateway_repository.dart | 2 +- ...catalyst_data_gateway_repository_test.dart | 23 +- .../cat_gateway_api.enums.swagger.dart | 2 - .../cat_gateway_api.models.swagger.dart | 56 ++ .../cat_gateway_api.models.swagger.g.dart | 13 + .../cat_gateway_api.swagger.chopper.dart | 4 +- .../cat_gateway_api.swagger.dart | 7 +- catalyst_voices_packages/README.md | 18 +- docs/Earthfile | 1 - .../permissionless-auth/auth-header.md | 106 +++ .../permissionless-auth/auth-token.cddl | 34 + justfile | 30 + utilities/local-cluster/Readme.md | 18 +- utilities/local-cluster/Vagrantfile | 2 +- utilities/local-cluster/justfile | 16 +- utilities/local-scylla/Readme.md | 165 ++++ utilities/local-scylla/docker-compose.yml | 56 ++ utilities/local-scylla/justfile | 58 ++ utilities/local-scylla/node1-scylla.yaml | 627 ++++++++++++++ utilities/local-scylla/node2-scylla.yaml | 624 ++++++++++++++ utilities/local-scylla/node3-scylla.yaml | 624 ++++++++++++++ utilities/local-scylla/node4-scylla.yaml | 624 ++++++++++++++ .../src/common/components/TxBuilder.tsx | 10 +- .../src/common/helpers/buildUnsignedTx.ts | 4 +- 181 files changed, 8716 insertions(+), 1529 deletions(-) create mode 100644 .secret.template create mode 100644 catalyst-gateway/Justfile create mode 100644 catalyst-gateway/bin/build.rs create mode 100644 catalyst-gateway/bin/src/build_info.rs rename catalyst-gateway/bin/src/cardano/{cip36_registration => cip36_registration_obsolete}/cip36.cddl (100%) rename catalyst-gateway/bin/src/cardano/{cip36_registration => cip36_registration_obsolete}/cip36_registration.cddl (100%) rename catalyst-gateway/bin/src/cardano/{cip36_registration => cip36_registration_obsolete}/cip36_witness.cddl (100%) rename catalyst-gateway/bin/src/cardano/{cip36_registration/mod.rs => cip36_registration_obsolete/mod.rs.obsolete} (97%) rename catalyst-gateway/bin/src/{event_db/cardano => db/event/cardano.obsolete}/chain_state/insert_update_state.sql (100%) rename catalyst-gateway/bin/src/{event_db/cardano => db/event/cardano.obsolete}/chain_state/mod.rs (90%) rename catalyst-gateway/bin/src/{event_db/cardano => db/event/cardano.obsolete}/chain_state/select_slot_info_by_datetime.sql.hbs (100%) rename catalyst-gateway/bin/src/{event_db/cardano => db/event/cardano.obsolete}/chain_state/select_update_state.sql (100%) rename catalyst-gateway/bin/src/{event_db/cardano => db/event/cardano.obsolete}/cip36_registration/mod.rs (88%) rename catalyst-gateway/bin/src/{event_db/cardano => db/event/cardano.obsolete}/cip36_registration/select_cip36_registration.sql (100%) rename catalyst-gateway/bin/src/{event_db/cardano => db/event/cardano.obsolete}/config/mod.rs (91%) rename catalyst-gateway/bin/src/{event_db/cardano => db/event/cardano.obsolete}/config/select_config.sql (100%) rename catalyst-gateway/bin/src/{event_db/cardano/mod.rs => db/event/cardano.obsolete/mod.rs.obsolete} (100%) rename catalyst-gateway/bin/src/{event_db/cardano => db/event/cardano.obsolete}/utxo/mod.rs (84%) rename catalyst-gateway/bin/src/{event_db/cardano => db/event/cardano.obsolete}/utxo/select_total_utxo_amount.sql (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/error.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/mod.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/event/ballot.rs (84%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/event/mod.rs (94%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/event/objective.rs (88%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/event/proposal.rs (85%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/event/review.rs (81%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/mod.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/registration.rs (89%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/search.rs (86%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/vit_ss/fund.rs (96%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/queries/vit_ss/mod.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/ballot.rs (95%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/event.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/mod.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/objective.rs (96%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/proposal.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/registration.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/review.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/search.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/challenge.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/fund.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/goal.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/group.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/mod.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/vit_ss/vote_plan.rs (100%) rename catalyst-gateway/bin/src/{event_db => db/event}/legacy/types/voting_status.rs (100%) create mode 100644 catalyst-gateway/bin/src/db/event/mod.rs rename catalyst-gateway/bin/src/{event_db => db/event}/schema_check/mod.rs (83%) rename catalyst-gateway/bin/src/{event_db => db/event}/schema_check/select_max_version.sql (100%) create mode 100644 catalyst-gateway/bin/src/db/index/block/certs.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/cip36/mod.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/cql/insert_stake_registration.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/cql/insert_txi.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/mod.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/txi.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo_asset.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo_asset.cql create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs create mode 100644 catalyst-gateway/bin/src/db/index/block/txo/mod.rs create mode 100644 catalyst-gateway/bin/src/db/index/mod.rs create mode 100644 catalyst-gateway/bin/src/db/index/queries/cql/get_txi_by_txn_hashes.cql create mode 100644 catalyst-gateway/bin/src/db/index/queries/cql/get_txo_by_stake_address.cql create mode 100644 catalyst-gateway/bin/src/db/index/queries/cql/update_txo_spent.cql create mode 100644 catalyst-gateway/bin/src/db/index/queries/mod.rs create mode 100644 catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txi_by_txn_hash.rs create mode 100644 catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txo_by_stake_address.rs create mode 100644 catalyst-gateway/bin/src/db/index/queries/staked_ada/mod.rs create mode 100644 catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/namespace.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/stake_registration.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/txi_by_txn_hash_table.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/txo_assets_by_stake_table.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/txo_by_stake_table.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_assets_by_txn_hash.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_by_txn_hash.cql create mode 100644 catalyst-gateway/bin/src/db/index/schema/mod.rs create mode 100644 catalyst-gateway/bin/src/db/index/session.rs create mode 100644 catalyst-gateway/bin/src/db/mod.rs delete mode 100644 catalyst-gateway/bin/src/event_db/mod.rs create mode 100644 catalyst-gateway/bin/src/service/api/auth/endpoint.rs create mode 100644 catalyst-gateway/bin/src/service/api/auth/mod.rs create mode 100644 catalyst-gateway/bin/src/service/api/auth/token.rs create mode 100644 catalyst-gateway/bin/src/service/api/cardano/types.rs create mode 100644 catalyst-gateway/bin/src/service/utilities/convert.rs create mode 100644 catalyst-gateway/bin/src/service/utilities/net.rs delete mode 100644 catalyst-gateway/bin/src/state/mod.rs delete mode 100644 catalyst-gateway/crates/README.md create mode 100644 docs/src/catalyst-standards/permissionless-auth/auth-header.md create mode 100644 docs/src/catalyst-standards/permissionless-auth/auth-token.cddl create mode 100644 justfile create mode 100644 utilities/local-scylla/Readme.md create mode 100644 utilities/local-scylla/docker-compose.yml create mode 100644 utilities/local-scylla/justfile create mode 100644 utilities/local-scylla/node1-scylla.yaml create mode 100644 utilities/local-scylla/node2-scylla.yaml create mode 100644 utilities/local-scylla/node3-scylla.yaml create mode 100644 utilities/local-scylla/node4-scylla.yaml diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index 8632ae2e70a..2a20967c22f 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -30,7 +30,10 @@ carryforward Catalyst CBOR cborg +cborseq +cdrs cdylib +certdir CEST cfbundle changeme @@ -42,17 +45,21 @@ CIPs COCOAPODS codegen codepoints +commitlog coti coverallsapp +CQLSH cryptoxide Cunego Cupertino dalek damian-molinski DAPPLICATION +dashmap dbeaver dbschema dbsync +Deleg delegators DIND dockerhub @@ -77,7 +84,6 @@ fmtchk fmtfix fontawesome fontello -formz Formz fuzzer gapless @@ -113,6 +119,7 @@ junitreport junitxml Keyhash keyserver +keyspace KUBECONFIG kubernetescrd kubetail @@ -146,10 +153,10 @@ minicbor mithril mitigations moderations +moka msedgedriver multiasset multidex -multiplatform Multiplatform myproject Nami @@ -158,12 +165,14 @@ NDEBUG netifas netkey nextest +Nodetool OCSP Oleksandr onboarded oneshot openapi opentelemetry +overprovisioned pbxproj Pdart permissionless @@ -174,6 +183,7 @@ podfile podhelper postcss Pozhylenkov +Precache Precertificate preprod projectcatalyst @@ -181,6 +191,7 @@ Prokhorenko psql Ptarget pubkey +PUBLICKEY pubspec pytest qrcode @@ -195,6 +206,7 @@ rgloader ripgrep ristretto rlib +rngs RPATH rustc rustdoc @@ -204,23 +216,28 @@ rustfmt rustls rxdart saibatizoku -schemathesis Schemathesis Scripthash ScyllaDB seckey +Seedable sendfile servernum serviceworker slotno sqlfluff +sslmode +sstableinfo Stefano stevenj stringzilla +subchain Subkey submiting subosito +svgs SYSROOT +tablestats tacho testcov testdocs @@ -229,7 +246,6 @@ testunit thiserror thollander timelike -toastify Toastify todos toggleable @@ -238,14 +254,16 @@ Traceback traefik trailings TXNZD +txos Typer unawaited unchunk +Unlogged unmanaged -utxo +Unstaked UTXO -utxos Utxos +varint Vespr vite vitss @@ -272,11 +290,3 @@ xctestrun xcworkspace xvfb yoroi -vsync -damian-molinski -LTRB -hotspots -precache -Precache -svgs -Dreps diff --git a/.github/workflows/generate-allure-report.yml b/.github/workflows/generate-allure-report.yml index 91ae87570af..8da10eed64b 100644 --- a/.github/workflows/generate-allure-report.yml +++ b/.github/workflows/generate-allure-report.yml @@ -3,26 +3,26 @@ name: Allure Report Generation on: pull_request: push: - branches: 'main' + branches: "main" permissions: - contents: write - pull-requests: write - id-token: write + contents: write + pull-requests: write + id-token: write concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} cancel-in-progress: true env: - AWS_REGION: eu-central-1 - AWS_ROLE_ARN: arn:aws:iam::332405224602:role/ci - EARTHLY_TARGET: docker - ECR_REGISTRY: 332405224602.dkr.ecr.eu-central-1.amazonaws.com - ALLURE_REPORT_PATH: allure-report - COVERAGE_REPORT_PATH: coverage-report - REPORT_EXT: .junit-report.xml - COVERAGE_EXT: .info + AWS_REGION: eu-central-1 + AWS_ROLE_ARN: arn:aws:iam::332405224602:role/ci + EARTHLY_TARGET: docker + ECR_REGISTRY: 332405224602.dkr.ecr.eu-central-1.amazonaws.com + ALLURE_REPORT_PATH: allure-report + COVERAGE_REPORT_PATH: coverage-report + REPORT_EXT: .junit-report.xml + COVERAGE_EXT: .info jobs: generate-test-reports: @@ -34,21 +34,21 @@ jobs: - name: Setup CI uses: input-output-hk/catalyst-ci/actions/setup@master with: - aws_role_arn: ${{ env.AWS_ROLE_ARN }} - aws_region: ${{ env.AWS_REGION }} - earthly_runner_secret: ${{ secrets.EARTHLY_RUNNER_SECRET }} + aws_role_arn: ${{ env.AWS_ROLE_ARN }} + aws_region: ${{ env.AWS_REGION }} + earthly_runner_secret: ${{ secrets.EARTHLY_RUNNER_SECRET }} - name: Get catalyst gateway unit test report uses: input-output-hk/catalyst-ci/actions/run@master if: always() continue-on-error: true with: - earthfile: ./catalyst-gateway/ - flags: - targets: build - target_flags: - runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} - artifact: "false" + earthfile: ./catalyst-gateway/ + flags: + targets: build + target_flags: + runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} + artifact: "false" - name: Get schemathesis test report uses: input-output-hk/catalyst-ci/actions/run@master @@ -67,42 +67,42 @@ jobs: if: always() continue-on-error: true with: - earthfile: ./catalyst_voices/ - flags: - targets: test-unit - target_flags: - runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} - artifact: "false" + earthfile: ./catalyst_voices/ + flags: + targets: test-unit + target_flags: + runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} + artifact: "false" - name: Get python api test report uses: input-output-hk/catalyst-ci/actions/run@master if: always() continue-on-error: true with: - earthfile: ./catalyst-gateway/tests/api_tests/ - flags: --allow-privileged - targets: test - target_flags: - runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} - artifact: "false" + earthfile: ./catalyst-gateway/tests/api_tests/ + flags: --allow-privileged + targets: test + target_flags: + runner_address: ${{ secrets.EARTHLY_SATELLITE_ADDRESS }} + artifact: "false" - name: Collect and upload test reports uses: actions/upload-artifact@v4 if: always() with: - name: test-reports - path: '**/*${{ env.REPORT_EXT }}' - if-no-files-found: error - retention-days: 1 + name: test-reports + path: "**/*${{ env.REPORT_EXT }}" + if-no-files-found: error + retention-days: 1 - name: Collect and upload test coverage uses: actions/upload-artifact@v4 if: always() with: - name: coverage-reports - path: '**/*${{ env.COVERAGE_EXT }}' - if-no-files-found: error - retention-days: 1 + name: coverage-reports + path: "**/*${{ env.COVERAGE_EXT }}" + if-no-files-found: error + retention-days: 1 generate-allure-report: name: Generate allure report @@ -117,10 +117,10 @@ jobs: - name: Setup Allure report run: | - mkdir -p ${{ env.ALLURE_REPORT_PATH }} - shopt -s globstar - cp **/*${{ env.REPORT_EXT }} ${{ env.ALLURE_REPORT_PATH }} - ls ${{ env.ALLURE_REPORT_PATH }} + mkdir -p ${{ env.ALLURE_REPORT_PATH }} + shopt -s globstar + cp **/*${{ env.REPORT_EXT }} ${{ env.ALLURE_REPORT_PATH }} + ls ${{ env.ALLURE_REPORT_PATH }} - name: Checkout gh-pages uses: actions/checkout@v4 @@ -132,8 +132,8 @@ jobs: uses: mgrybyk/allure-report-branch-action@v1 id: allure with: - report_id: 'test-report' - gh_pages: 'gh-pages-dir' + report_id: "test-report" + gh_pages: "gh-pages-dir" report_dir: ${{ env.ALLURE_REPORT_PATH }} - name: Git push to gh-pages @@ -165,15 +165,15 @@ jobs: - name: Collect coverage report run: | - mkdir -p ${{ env.COVERAGE_REPORT_PATH }} - shopt -s globstar - cp **/*${{ env.COVERAGE_EXT }} ${{ env.COVERAGE_REPORT_PATH }} - ls ${{ env.COVERAGE_REPORT_PATH }} + mkdir -p ${{ env.COVERAGE_REPORT_PATH }} + shopt -s globstar + cp **/*${{ env.COVERAGE_EXT }} ${{ env.COVERAGE_REPORT_PATH }} + ls ${{ env.COVERAGE_REPORT_PATH }} - name: Normalize coverage report paths run: | - sed -i -e 's/SF:\/root/SF:catalyst-gateway/g' ${{ env.COVERAGE_REPORT_PATH }}/cat-gateway.coverage.info - sed -i -e 's/SF:/SF:catalyst-gateway\/tests\/api_tests\//g' ${{ env.COVERAGE_REPORT_PATH }}/api-tests.coverage.info + sed -i -e 's/SF:\/root/SF:catalyst-gateway/g' ${{ env.COVERAGE_REPORT_PATH }}/cat-gateway.coverage.info + # sed -i -e 's/SF:/SF:catalyst-gateway\/tests\/api_tests\//g' ${{ env.COVERAGE_REPORT_PATH }}/api-tests.coverage.info - name: Coveralls env: @@ -188,6 +188,7 @@ jobs: parallel: true - name: Coveralls + if: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} uses: coverallsapp/github-action@v2 @@ -211,15 +212,14 @@ jobs: base-path: "/home/runner/work/catalyst-voices/catalyst-voices/" parallel: true - upload-coverage-report: name: Upload coverage report needs: [generate-coverage-report] if: ${{ always() }} runs-on: ubuntu-latest steps: - - name: Coveralls Finished - uses: coverallsapp/github-action@v2 - with: - parallel-finished: true - carryforward: "rust-unit-test,flutter-test" + - name: Coveralls Finished + uses: coverallsapp/github-action@v2 + with: + parallel-finished: true + carryforward: "rust-unit-test,flutter-test" diff --git a/.gitignore b/.gitignore index 51902e75b4e..39169dbc566 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +## Secrets +**/.secret + ### Linux ### *~ diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index df0bd0d0c96..2278004089e 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -13,7 +13,8 @@ "CHANGELOG.md", "catalyst_voices_packages/**/CHANGELOG.md", "catalyst_voices/macos/Pods/**", - "**/node_modules/**" + "**/node_modules/**", + "**/.dart_tool/**" ], // Set standard config options in `/.markdownlint.jsonc` "config": { diff --git a/.markdownlint.jsonc b/.markdownlint.jsonc index a83548c0262..bee3d794e0c 100644 --- a/.markdownlint.jsonc +++ b/.markdownlint.jsonc @@ -1,7 +1,7 @@ { // markdownlint JSON(C) configuration for Catalyst Standards // Do not individually set markdown lint rules in documents. - // It is permissable to disable a rule in a document if it is a false positive. + // It is permissible to disable a rule in a document if it is a false positive. // Keep the scope of the lint disable to as small as possible. // See: https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md // Default state for all rules @@ -211,7 +211,7 @@ "MD045": true, // MD046/code-block-style - Code block style // Code Blocks are used by Admonitions and need to be indented. - // Actual code should be fenced, this can;t be enforced by mdlint. + // Actual code should be fenced, this can't be enforced by mdlint. "MD046": { // Block style "style": "consistent" diff --git a/.secret.template b/.secret.template new file mode 100644 index 00000000000..072b7c4f22c --- /dev/null +++ b/.secret.template @@ -0,0 +1 @@ +GITHUB_TOKEN=Make One at https://github.com/settings/tokens only need public repo, read packages permissions diff --git a/.vscode/extensions.json b/.vscode/extensions.json index ad69134d3c4..7b455feb129 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -14,11 +14,12 @@ "tamasfe.even-better-toml", "rust-lang.rust-analyzer", "JScearcy.rust-doc-viewer", - "serayuzgur.crates", "anweiss.cddl-languageserver", "tintinweb.graphviz-interactive-preview", "terrastruct.d2", "bbenoist.vagrant", - "ms-kubernetes-tools.vscode-kubernetes-tools" + "ms-kubernetes-tools.vscode-kubernetes-tools", + "fill-labs.dependi", + "lawrencegrant.cql" ] } \ No newline at end of file diff --git a/Earthfile b/Earthfile index be95b3c69e3..95f6e0ee2c3 100644 --- a/Earthfile +++ b/Earthfile @@ -1,8 +1,8 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/mdlint:v3.1.21 AS mdlint-ci -IMPORT github.com/input-output-hk/catalyst-ci/earthly/cspell:v3.1.21 AS cspell-ci -IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.1.21 AS postgresql-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/mdlint:v3.2.03 AS mdlint-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/cspell:v3.2.03 AS cspell-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.2.03 AS postgresql-ci FROM debian:stable-slim @@ -18,6 +18,10 @@ markdown-check-fix: DO mdlint-ci+MDLINT_LOCALLY --src=$(echo ${PWD}) --fix=--fix +# clean-spelling-list : Make sure the project dictionary is properly sorted. +clean-spelling-list: + DO cspell-ci+CLEAN + # check-spelling Check spelling in this repo inside a container. check-spelling: DO cspell-ci+CHECK diff --git a/catalyst-gateway/.cargo/config.toml b/catalyst-gateway/.cargo/config.toml index 2764f1df4e2..02c23140754 100644 --- a/catalyst-gateway/.cargo/config.toml +++ b/catalyst-gateway/.cargo/config.toml @@ -90,4 +90,4 @@ quiet = false # whether cargo output is quiet verbose = false # whether cargo provides verbose output color = "auto" # whether cargo colorizes output use `CARGO_TERM_COLOR="off"` to disable. progress.when = "never" # whether cargo shows progress bar -progress.width = 80 # width of progress bar \ No newline at end of file +progress.width = 80 # width of progress bar diff --git a/catalyst-gateway/.config/nextest.toml b/catalyst-gateway/.config/nextest.toml index de5cf9b1ef9..be3673830bb 100644 --- a/catalyst-gateway/.config/nextest.toml +++ b/catalyst-gateway/.config/nextest.toml @@ -46,4 +46,4 @@ store-success-output = true # # Note that if a description can be extracted from the output, it is always stored in the # element. -store-failure-output = true \ No newline at end of file +store-failure-output = true diff --git a/catalyst-gateway/Cargo.toml b/catalyst-gateway/Cargo.toml index cbe8cfed01c..b819a6f6ca0 100644 --- a/catalyst-gateway/Cargo.toml +++ b/catalyst-gateway/Cargo.toml @@ -2,7 +2,6 @@ resolver = "2" members = [ "bin", - # "crates/", ] [workspace.package] @@ -15,43 +14,6 @@ homepage = "https://input-output-hk.github.io/catalyst-voices" repository = "https://github.com/input-output-hk/catalyst-voices" license = "MIT OR Apache-2.0" -[workspace.dependencies] -clap = "4" -tracing = "0.1.37" -tracing-subscriber = "0.3.16" -serde = "1.0" -serde_json = "1.0" -poem = "3.0.0" -poem-openapi = "5.0.0" -prometheus = "0.13.0" -cryptoxide = "0.4.4" -uuid = "1" -lazy_static = "1.4" -panic-message = "0.3" -cpu-time = "1.0" -ulid = "1.0.1" -rust-embed = "8" -url = "2.4.1" -thiserror = "1.0" -chrono = "0.4" -async-trait = "0.1.64" -rust_decimal = "1.29" -bb8 = "0.8.1" -bb8-postgres = "0.8.1" -tokio-postgres = "0.7.10" -tokio = "1" -dotenvy = "0.15" -local-ip-address = "0.6.1" -gethostname = "0.4.3" -hex = "0.4.3" -handlebars = "5.1.2" -anyhow = "1.0.71" -cddl = "0.9.2" -ciborium = "0.2" -pallas = { git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "709acb19c52c6b789279ecc4bc8793b5d8b5abe9", version = "0.25.0" } -cardano-chain-follower = { git = "https://github.com/input-output-hk/hermes.git", version="0.0.1" } -stringzilla = "3.8.4" - [workspace.lints.rust] warnings = "deny" missing_docs = "deny" @@ -73,6 +35,8 @@ unescaped_backticks = "deny" pedantic = { level = "deny", priority = -1 } unwrap_used = "deny" expect_used = "deny" +todo = "deny" +unimplemented = "deny" exit = "deny" get_unwrap = "deny" index_refutable_slice = "deny" diff --git a/catalyst-gateway/Earthfile b/catalyst-gateway/Earthfile index a9748d0a0f1..0795a93f6f2 100644 --- a/catalyst-gateway/Earthfile +++ b/catalyst-gateway/Earthfile @@ -1,15 +1,19 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.1.21 AS rust-ci -IMPORT github.com/input-output-hk/catalyst-ci/earthly/mithril_snapshot:v3.1.21 AS mithril-snapshot-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.2.03 AS rust-ci -#cspell: words rustfmt toolsets USERARCH +#cspell: words rustfmt toolsets USERARCH stdcfgs + +# sync-cfg: Synchronize local config with CI version. +# Must be run by the developer manually. +sync-cfg: + DO rust-ci+SYNC_STD_CFG # builder : Set up our target toolchains, and copy our files. builder: DO rust-ci+SETUP - COPY --dir .cargo .config Cargo.* clippy.toml deny.toml rustfmt.toml bin crates . + COPY --dir .cargo .config Cargo.* clippy.toml deny.toml rustfmt.toml bin . ## ----------------------------------------------------------------------------- ## @@ -69,18 +73,6 @@ package-cat-gateway: ENTRYPOINT ./entry.sh SAVE IMAGE cat-gateway:$tag -# package-cat-gateway : Create a deployable container for catalyst-gateway -# And bundle a Mithril snapshot of cardano preprod -nightly-package-cat-gateway-with-preprod: - ARG tag="latest" - - FROM +package-cat-gateway - - # copy preprod mithril snapshot to /tmp/preprod dir - COPY mithril-snapshot-ci+preprod/snapshot /tmp/preprod - - SAVE IMAGE cat-gateway:$tag - # Publish packages if all integration tests have passed. (Failure to pass tests will prevent packages being published.) # publish: # FROM scratch diff --git a/catalyst-gateway/Justfile b/catalyst-gateway/Justfile new file mode 100644 index 00000000000..e742d379102 --- /dev/null +++ b/catalyst-gateway/Justfile @@ -0,0 +1,55 @@ +# use with https://github.com/casey/just +# +# Hermes developer convenience functions + +# cspell: words prereqs, commitlog, rustls, nocapture + +default: + @just --list --unsorted + +# Show the dependency tree and all enabled feature flags of every crate. +cargo-tree: + cargo tree -e features,normal,build -f "{p}[{f}]" --workspace --frozen + +# Check Dependency licenses and CVE's +license-check: + cargo deny check --exclude-dev + +# Format the rust code +code-format: + cargo +nightly fmtfix + cargo +nightly fmtchk + +# Lint the rust code +code-lint: + cargo lintfix + cargo lint + +# Synchronize Rust Configs +sync-cfg: + earthly +sync-cfg + +# Pre Push Checks +pre-push: sync-cfg code-format code-lint license-check + # Make sure we can actually build inside Earthly which needs to happen in CI. + earthly +check + earthly +build + earthly +package-cat-gateway + +# Build Local release build of catalyst gateway +build-cat-gateway: code-format code-lint + cargo update + cargo build -r + +# Run cat-gateway natively on preprod +run-cat-gateway: build-cat-gateway + CHAIN_FOLLOWER_SYNC_TASKS="16" \ + RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ + CHAIN_NETWORK="Preprod" \ + ./catalyst-gateway/target/release/cat-gateway run --log-level debug + +# Run cat-gateway natively on mainnet +run-cat-gateway-mainnet: build-cat-gateway + CHAIN_FOLLOWER_SYNC_TASKS="1" \ + RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ + ./catalyst-gateway/target/release/cat-gateway run --log-level debug diff --git a/catalyst-gateway/bin/Cargo.toml b/catalyst-gateway/bin/Cargo.toml index 7d702c6e017..8d614a96b80 100644 --- a/catalyst-gateway/bin/Cargo.toml +++ b/catalyst-gateway/bin/Cargo.toml @@ -3,11 +3,11 @@ name = "cat-gateway" description = "The Catalyst Data Gateway" keywords = ["cardano", "catalyst", "gateway"] categories = ["command-line-utilities"] -version.workspace = true -authors.workspace = true -edition.workspace = true -license.workspace = true -repository.workspace = true +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -15,31 +15,62 @@ repository.workspace = true workspace = true [dependencies] -bb8 = { workspace = true } -bb8-postgres = { workspace = true } -tokio-postgres = { workspace = true, features = [ +cardano-chain-follower = { version = "0.0.2", git = "https://github.com/input-output-hk/catalyst-libs.git", tag = "v0.0.2" } + +pallas = { version = "0.30.1", git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "9b5183c8b90b90fe2cc319d986e933e9518957b3" } +pallas-traverse = { version = "0.30.1", git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "9b5183c8b90b90fe2cc319d986e933e9518957b3" } +#pallas-crypto = { version = "0.30.1", git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "9b5183c8b90b90fe2cc319d986e933e9518957b3" } + +clap = { version = "4.5.17", features = ["derive", "env"] } +tracing = { version = "0.1.40", features = ["log"] } +tracing-subscriber = { version = "0.3.18", features = [ + "fmt", + "json", + "registry", + "std", + "time", + "env-filter", +] } +serde = { version = "1.0.204", features = ["derive"] } +serde_json = "1.0.128" +thiserror = "1.0.63" +chrono = "0.4.38" +# async-trait = "0.1.82" +bb8 = "0.8.5" +bb8-postgres = "0.8.1" +tokio-postgres = { version = "0.7.11", features = [ "with-chrono-0_4", "with-serde_json-1", "with-time-0_3", ] } -clap = { workspace = true, features = ["derive", "env"] } -tracing = { workspace = true, features = ["log"] } -tracing-subscriber = { workspace = true, features = ["fmt", "json", "registry", "std", "time"] } -serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } -tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } -thiserror = { workspace = true } -rust_decimal = { workspace = true, features = [ +tokio = { version = "1.39.2", features = ["rt", "macros", "rt-multi-thread"] } +dotenvy = "0.15.7" +local-ip-address = "0.6.2" +gethostname = "0.5.0" +hex = "0.4.3" +handlebars = "6.0.0" +anyhow = "1.0.86" +#cddl = "0.9.4" +#ciborium = "0.2.2" +# stringzilla = "3.9.3" +duration-string = "0.4.0" +build-info = "0.0.38" +ed25519-dalek = "2.1.1" +scylla = { version = "0.14.0", features = ["cloud", "full-serialization"] } +strum = { version = "0.26.3", features = ["derive"] } +# strum_macros = "0.26.4" +openssl = { version = "0.10.66", features = ["vendored"] } +num-bigint = "0.4.6" +futures = "0.3.30" +rand = "0.8.5" +moka = { version = "0.12.8", features = ["future"] } +crossbeam-skiplist = "0.1.3" +rust_decimal = { version = "1.36.0", features = [ "serde-with-float", "db-tokio-postgres", ] } -chrono = { workspace = true } -poem = { workspace = true, features = [ - "embed", - "prometheus", - "compression", -] } -poem-openapi = { workspace = true, features = [ +poem = { version = "3.0.4", features = ["embed", "prometheus", "compression"] } +poem-openapi = { version = "5.0.3", features = [ "openapi-explorer", "rapidoc", "redoc", @@ -48,24 +79,17 @@ poem-openapi = { workspace = true, features = [ "url", "chrono", ] } -prometheus = { workspace = true } -cryptoxide = { workspace = true } -uuid = { workspace = true, features = ["v4", "serde"] } -lazy_static = { workspace = true } -url = { workspace = true } -dotenvy = { workspace = true } -panic-message = { workspace = true } -cpu-time = { workspace = true } -ulid = { workspace = true, features = ["serde", "uuid"] } -rust-embed = { workspace = true } -local-ip-address = { workspace = true } -gethostname = { workspace = true } -hex = { workspace = true } -pallas = { workspace = true } -cardano-chain-follower= { workspace = true } -anyhow = { workspace = true } -handlebars = { workspace = true } -cddl = { workspace = true } -ciborium = { workspace = true } -ed25519-dalek = "2.1.1" -stringzilla = { workspace = true } +uuid = { version = "1.10.0", features = ["v4", "serde"] } +ulid = { version = "1.1.3", features = ["serde", "uuid"] } +cryptoxide = "0.4.4" # TODO: For blake2b replace with blake2b_simd. +url = "2.5.2" +panic-message = "0.3.0" +cpu-time = "1.0.0" +prometheus = "0.13.4" +rust-embed = "8.5.0" +num-traits = "0.2.19" +base64 = "0.22.1" +dashmap = "6.0.1" + +[build-dependencies] +build-info-build = "0.0.38" \ No newline at end of file diff --git a/catalyst-gateway/bin/build.rs b/catalyst-gateway/bin/build.rs new file mode 100644 index 00000000000..76a7acdda99 --- /dev/null +++ b/catalyst-gateway/bin/build.rs @@ -0,0 +1,4 @@ +//! Build +fn main() { + build_info_build::build_script(); +} diff --git a/catalyst-gateway/bin/src/build_info.rs b/catalyst-gateway/bin/src/build_info.rs new file mode 100644 index 00000000000..5cfd1ba2e81 --- /dev/null +++ b/catalyst-gateway/bin/src/build_info.rs @@ -0,0 +1,111 @@ +//! Hermes binary build info + +use build_info::{self as build_info_crate}; +use local_ip_address::list_afinet_netifas; +use tracing::info; + +use crate::service::utilities; + +/// Formatted hermes binary build info +pub(crate) const BUILD_INFO: &str = build_info_crate::format!(" +version: {}, +git info: {{{}}} +compiler: {} +build time: {} +", + $.crate_info.version, + $.version_control, + $.compiler, + $.timestamp +); + +build_info_crate::build_info!(fn build_info); + +/// Log Build Info to our logs. +pub(crate) fn log_build_info() { + let info = build_info(); + let timestamp = info.timestamp.to_rfc3339(); + let profile = info.profile.clone(); + let optimization_level = info.optimization_level.to_string(); + + let name = info.crate_info.name.clone(); + let version = info.crate_info.version.to_string(); + let features = info.crate_info.enabled_features.join(","); + + let triple = info.target.triple.clone(); + let family = info.target.family.clone(); + let os = info.target.os.clone(); + let cpu_arch = info.target.cpu.arch.clone(); + let cpu_features = info.target.cpu.features.join(","); + + let compiler_channel = info.compiler.channel.to_string(); + let compiler_version = info.compiler.version.to_string(); + + let mut commit_id = "Unknown".to_string(); + let mut commit_timestamp = "Unknown".to_string(); + let mut branch = "Unknown".to_string(); + let mut tags = "Unknown".to_string(); + + if let Some(ref vc) = info.version_control { + if let Some(git) = vc.git() { + commit_id.clone_from(&git.commit_short_id); + commit_timestamp = git.commit_timestamp.to_rfc3339(); + if let Some(git_branch) = git.branch.clone() { + branch = git_branch; + } + tags = git.tags.join(","); + } + } + + let ipv4 = utilities::net::get_public_ipv4().to_string(); + let ipv6 = utilities::net::get_public_ipv6().to_string(); + + let mut interfaces: String = "Unknown".to_string(); + + // Get local IP address v4 and v6 + if let Ok(network_interfaces) = list_afinet_netifas() { + if !network_interfaces.is_empty() { + interfaces.clear(); + for iface in network_interfaces { + if !interfaces.is_empty() { + interfaces.push(','); + } + interfaces.push_str(&format!("{}:{}", iface.0, iface.1)); + } + } + } + + info!( + BuildTime = timestamp, + Profile = profile, + OptimizationLevel = optimization_level, + Name = name, + Version = version, + Features = features, + TargetTriple = triple, + TargetFamily = family, + TargetOs = os, + CPUArch = cpu_arch, + CPUFeatures = cpu_features, + RustChannel = compiler_channel, + RustVersion = compiler_version, + GitCommitId = commit_id, + GitCommitTimestamp = commit_timestamp, + GitBranch = branch, + GitTags = tags, + PublicIPv4 = ipv4, + PublicIPv6 = ipv6, + NetworkInterfaces = interfaces, + "Catalyst Gateway" + ); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn build_info_test() { + println!("{BUILD_INFO}"); + } +} diff --git a/catalyst-gateway/bin/src/cardano/cip36_registration/cip36.cddl b/catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36.cddl similarity index 100% rename from catalyst-gateway/bin/src/cardano/cip36_registration/cip36.cddl rename to catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36.cddl diff --git a/catalyst-gateway/bin/src/cardano/cip36_registration/cip36_registration.cddl b/catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36_registration.cddl similarity index 100% rename from catalyst-gateway/bin/src/cardano/cip36_registration/cip36_registration.cddl rename to catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36_registration.cddl diff --git a/catalyst-gateway/bin/src/cardano/cip36_registration/cip36_witness.cddl b/catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36_witness.cddl similarity index 100% rename from catalyst-gateway/bin/src/cardano/cip36_registration/cip36_witness.cddl rename to catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/cip36_witness.cddl diff --git a/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs b/catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/mod.rs.obsolete similarity index 97% rename from catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs rename to catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/mod.rs.obsolete index 1c3082b4398..ea3ab2b1a28 100644 --- a/catalyst-gateway/bin/src/cardano/cip36_registration/mod.rs +++ b/catalyst-gateway/bin/src/cardano/cip36_registration_obsolete/mod.rs.obsolete @@ -30,10 +30,12 @@ pub(crate) struct VotingPurpose(u64); pub(crate) struct RewardsAddress(pub Vec); /// Error report for serializing +#[allow(dead_code)] pub(crate) type ErrorReport = Vec; impl PubKey { /// Get credentials, a blake2b 28 bytes hash of the pub key + #[allow(dead_code)] pub(crate) fn get_credentials(&self) -> [u8; 28] { let mut digest = [0u8; 28]; let mut context = Blake2b::new(28); @@ -71,6 +73,8 @@ pub(crate) enum VotingInfo { /// CIP-36 registration info part #[derive(Debug, Clone, PartialEq)] +#[allow(dead_code)] + pub(crate) struct Registration { /// Voting info pub(crate) voting_info: VotingInfo, @@ -86,6 +90,8 @@ pub(crate) struct Registration { /// A catalyst CIP-36 registration on Cardano #[derive(Debug, Clone, PartialEq)] +#[allow(dead_code)] + pub(crate) struct Cip36Metadata { /// CIP-36 registration 61284 pub(crate) registration: Option, @@ -226,12 +232,11 @@ fn is_valid_rewards_address(rewards_address_prefix: u8, network: Network) -> boo return false; } }, - Network::Testnet => { + _ => { if addr_net != 0 { return false; } }, - _ => (), } // Valid addrs: 0x0?, 0x1?, 0x2?, 0x3?, 0x4?, 0x5?, 0x6?, 0x7?, 0xE?, 0xF?. @@ -477,11 +482,11 @@ fn test_rewards_addr_permutations() { for addr_type in valid_addr_types { let test_addr = addr_type << 4; - assert!(is_valid_rewards_address(test_addr, Network::Testnet)); + assert!(is_valid_rewards_address(test_addr, Network::Preprod)); assert!(!is_valid_rewards_address(test_addr, Network::Mainnet)); let test_addr = addr_type << 4 | 1; - assert!(!is_valid_rewards_address(test_addr, Network::Testnet)); + assert!(!is_valid_rewards_address(test_addr, Network::Preprod)); assert!(is_valid_rewards_address(test_addr, Network::Mainnet)); } @@ -489,11 +494,11 @@ fn test_rewards_addr_permutations() { for addr_type in invalid_addr_types { let test_addr = addr_type << 4; - assert!(!is_valid_rewards_address(test_addr, Network::Testnet)); + assert!(!is_valid_rewards_address(test_addr, Network::Preprod)); assert!(!is_valid_rewards_address(test_addr, Network::Mainnet)); let test_addr = addr_type << 4 | 1; - assert!(!is_valid_rewards_address(test_addr, Network::Testnet)); + assert!(!is_valid_rewards_address(test_addr, Network::Preprod)); assert!(!is_valid_rewards_address(test_addr, Network::Mainnet)); } } diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index b7a084d0ea7..08e1c9015b7 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -1,89 +1,390 @@ //! Logic for orchestrating followers -use std::{path::PathBuf, sync::Arc, time::Duration}; -/// Handler for follower tasks, allows for control over spawned follower threads -pub type ManageTasks = JoinHandle<()>; +use std::{fmt::Display, time::Duration}; -use anyhow::Context; use cardano_chain_follower::{ - network_genesis_values, ChainUpdate, Follower, FollowerConfigBuilder, Network, Point, + ChainFollower, ChainSyncConfig, Network, Point, ORIGIN_POINT, TIP_POINT, }; -use pallas::ledger::traverse::{wellknown::GenesisValues, MultiEraBlock, MultiEraTx}; -use tokio::{sync::mpsc, task::JoinHandle, time}; -use tracing::{error, info}; - -use crate::event_db::{ - cardano::{ - chain_state::{IndexedFollowerDataParams, MachineId}, - cip36_registration::IndexedVoterRegistrationParams, - config::FollowerConfig, - utxo::{IndexedTxnInputParams, IndexedTxnOutputParams, IndexedTxnParams}, - }, - error::NotFoundError, - EventDB, +use duration_string::DurationString; +use futures::{stream::FuturesUnordered, StreamExt}; +use rand::{Rng, SeedableRng}; +use tracing::{error, info, warn}; + +use crate::{ + db::index::{block::index_block, session::CassandraSession}, + settings::Settings, }; -pub(crate) mod cip36_registration; +// pub(crate) mod cip36_registration_obsolete; pub(crate) mod util; /// Blocks batch length that will trigger the blocks buffer to be written to the database. +#[allow(dead_code)] const MAX_BLOCKS_BATCH_LEN: usize = 1024; -/// Returns a follower configs, waits until they present inside the db -async fn get_follower_config( - check_config_tick: u64, db: Arc, -) -> anyhow::Result> { - let mut interval = time::interval(time::Duration::from_secs(check_config_tick)); - loop { - // tick until config exists - interval.tick().await; +/// How long we wait between checks for connection to the indexing DB to be ready. +const INDEXING_DB_READY_WAIT_INTERVAL: Duration = Duration::from_secs(1); - match db.get_follower_config().await { - Ok(configs) => break Ok(configs), - Err(err) if err.is::() => { - error!("No follower config found"); - continue; - }, - Err(err) => break Err(err), +/// Start syncing a particular network +async fn start_sync_for(chain: Network) -> anyhow::Result<()> { + let cfg = ChainSyncConfig::default_for(chain); + info!(chain = %cfg.chain, "Starting Blockchain Sync"); + + if let Err(error) = cfg.run().await { + error!(chain=%chain, error=%error, "Failed to start chain sync task"); + Err(error)?; + } + + Ok(()) +} + +/// Data we return from a sync task. +struct SyncParams { + /// What blockchain are we syncing. + chain: Network, + /// The starting point of this sync. + start: Point, + /// The ending point of this sync. + end: Point, + /// The first block we successfully synced. + first_indexed_block: Option, + /// The last block we successfully synced. + last_indexed_block: Option, + /// The number of blocks we successfully synced overall. + total_blocks_synced: u64, + /// The number of blocks we successfully synced, in the last attempt. + last_blocks_synced: u64, + /// The number of retries so far on this sync task. + retries: u64, + /// The number of retries so far on this sync task. + backoff_delay: Option, + /// If the sync completed without error or not. + result: Option>, +} + +impl Display for SyncParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.result.is_none() { + write!(f, "Sync_Params {{ ")?; + } else { + write!(f, "Sync_Result {{ ")?; + } + + write!(f, "start: {}, end: {}", self.start, self.end)?; + + if let Some(first) = self.first_indexed_block.as_ref() { + write!(f, ", first_indexed_block: {first}")?; + } + + if let Some(last) = self.last_indexed_block.as_ref() { + write!(f, ", last_indexed_block: {last}")?; + } + + if self.retries > 0 { + write!(f, ", retries: {}", self.retries)?; + } + + if self.retries > 0 || self.result.is_some() { + write!(f, ", synced_blocks: {}", self.total_blocks_synced)?; + } + + if self.result.is_some() { + write!(f, ", last_sync: {}", self.last_blocks_synced)?; + } + + if let Some(backoff) = self.backoff_delay.as_ref() { + write!(f, ", backoff: {}", DurationString::from(*backoff))?; } + + if let Some(result) = self.result.as_ref() { + match result { + Ok(()) => write!(f, ", Success")?, + Err(error) => write!(f, ", {error}")?, + }; + } + + f.write_str(" }") } } -/// Start followers as per defined in the config -pub(crate) async fn start_followers( - db: Arc, check_config_tick: u64, data_refresh_tick: u64, machine_id: String, -) -> anyhow::Result<()> { - let mut current_config = get_follower_config(check_config_tick, db.clone()).await?; - loop { - // spawn followers and obtain thread handlers for control and future cancellation - let follower_tasks = spawn_followers( - current_config.clone(), - db.clone(), - data_refresh_tick, - machine_id.clone(), - ) - .await?; +/// The range we generate random backoffs within given a base backoff value. +const BACKOFF_RANGE_MULTIPLIER: u32 = 3; + +impl SyncParams { + /// Create a new `SyncParams`. + fn new(chain: Network, start: Point, end: Point) -> Self { + Self { + chain, + start, + end, + first_indexed_block: None, + last_indexed_block: None, + total_blocks_synced: 0, + last_blocks_synced: 0, + retries: 0, + backoff_delay: None, + result: None, + } + } - // Followers should continue indexing until config has changed - current_config = loop { - let new_config = get_follower_config(check_config_tick, db.clone()).await?; - if new_config != current_config { - info!("Config has changed! restarting"); - break new_config; - } - }; + /// Convert a result back into parameters for a retry. + fn retry(&self) -> Self { + let retry_count = self.retries + 1; + + let mut backoff = None; + + // If we did sync any blocks last time, first retry is immediate. + // Otherwise we backoff progressively more as we do more retries. + if self.last_blocks_synced == 0 { + // Calculate backoff based on number of retries so far. + backoff = match retry_count { + 1 => Some(Duration::from_secs(1)), // 1-3 seconds + 2..5 => Some(Duration::from_secs(10)), // 10-30 seconds + _ => Some(Duration::from_secs(30)), // 30-90 seconds. + }; + } + + Self { + chain: self.chain, + start: self.start.clone(), + end: self.end.clone(), + first_indexed_block: self.first_indexed_block.clone(), + last_indexed_block: self.last_indexed_block.clone(), + total_blocks_synced: self.total_blocks_synced, + last_blocks_synced: 0, + retries: retry_count, + backoff_delay: backoff, + result: None, + } + } + + /// Convert Params into the result of the sync. + fn done( + &self, first: Option, last: Option, synced: u64, result: anyhow::Result<()>, + ) -> Self { + Self { + chain: self.chain, + start: self.start.clone(), + end: self.end.clone(), + first_indexed_block: first, + last_indexed_block: last, + total_blocks_synced: synced + self.total_blocks_synced, + last_blocks_synced: synced, + retries: self.retries, + backoff_delay: self.backoff_delay, + result: Some(result), + } + } - // Config has changed, terminate all followers and restart with new config. - info!("Terminating followers"); - for task in follower_tasks { - task.abort(); + /// Get where this sync run actually needs to start from. + fn actual_start(&self) -> Point { + self.last_indexed_block + .as_ref() + .unwrap_or(&self.start) + .clone() + } + + /// Do the backoff delay processing. + /// + /// The actual delay is a random time from the Delay itself to + /// `BACKOFF_RANGE_MULTIPLIER` times the delay. This is to prevent hammering the + /// service at regular intervals. + async fn backoff(&self) { + if let Some(backoff) = self.backoff_delay { + let mut rng = rand::rngs::StdRng::from_entropy(); + let actual_backoff = + rng.gen_range(backoff..backoff.saturating_mul(BACKOFF_RANGE_MULTIPLIER)); + + tokio::time::sleep(actual_backoff).await; } } } +/// Sync a portion of the blockchain. +/// Set end to `TIP_POINT` to sync the tip continuously. +fn sync_subchain(params: SyncParams) -> tokio::task::JoinHandle { + tokio::spawn(async move { + info!(chain = %params.chain, params=%params, "Indexing Blockchain"); + + // Backoff hitting the database if we need to. + params.backoff().await; + + // Wait for indexing DB to be ready before continuing. + CassandraSession::wait_is_ready(INDEXING_DB_READY_WAIT_INTERVAL).await; + info!(chain=%params.chain, params=%params,"Indexing DB is ready"); + + let mut first_indexed_block = params.first_indexed_block.clone(); + let mut last_indexed_block = params.last_indexed_block.clone(); + let mut blocks_synced = 0u64; + + let mut follower = + ChainFollower::new(params.chain, params.actual_start(), params.end.clone()).await; + while let Some(chain_update) = follower.next().await { + match chain_update.kind { + cardano_chain_follower::Kind::ImmutableBlockRollForward => { + // We only process these on the follower tracking the TIP. + if params.end == TIP_POINT { + warn!("TODO: Immutable Chain roll forward"); + }; + }, + cardano_chain_follower::Kind::Block => { + let block = chain_update.block_data(); + + if let Err(error) = index_block(block).await { + let error_msg = format!("Failed to index block {}", block.point()); + error!(chain=%params.chain, error=%error, params=%params, error_msg); + return params.done( + first_indexed_block, + last_indexed_block, + blocks_synced, + Err(error.context(error_msg)), + ); + } + + if first_indexed_block.is_none() { + first_indexed_block = Some(block.point()); + } + last_indexed_block = Some(block.point()); + blocks_synced += 1; + }, + cardano_chain_follower::Kind::Rollback => { + warn!("TODO: Live Chain rollback"); + }, + } + } + + let result = params.done( + first_indexed_block, + last_indexed_block, + blocks_synced, + Ok(()), + ); + + info!(chain = %params.chain, result=%result, "Indexing Blockchain Completed: OK"); + + result + }) +} + +/// Start followers as per defined in the config +#[allow(unused)] +pub(crate) async fn start_followers() -> anyhow::Result<()> { + let cfg = Settings::follower_cfg(); + + // Log the chain follower configuration. + cfg.log(); + + // Start Syncing the blockchain, so we can consume its data as required. + start_sync_for(cfg.chain).await?; + info!(chain=%cfg.chain,"Chain Sync is started."); + + tokio::spawn(async move { + // We can't sync until the local chain data is synced. + // This call will wait until we sync. + let tips = cardano_chain_follower::ChainFollower::get_tips(cfg.chain).await; + let immutable_tip_slot = tips.0.slot_or_default(); + let live_tip_slot = tips.1.slot_or_default(); + info!(chain=%cfg.chain, immutable_tip=immutable_tip_slot, live_tip=live_tip_slot, "Blockchain ready to sync from."); + + let mut sync_tasks: FuturesUnordered> = + FuturesUnordered::new(); + + // Start the Immutable Chain sync tasks. + // If the number of sync tasks is zero, just have one. + // Note: this shouldn't be possible, but easy to handle if it is. + let sub_chain_slots = immutable_tip_slot + .checked_div(cfg.sync_tasks.into()) + .unwrap_or(immutable_tip_slot); + // Need steps in a usize, in the highly unlikely event the steps are > max usize, make + // them max usize. + let sub_chain_steps: usize = sub_chain_slots.try_into().unwrap_or(usize::MAX); + + let mut start_point = ORIGIN_POINT; + for slot_end in (sub_chain_slots..immutable_tip_slot).step_by(sub_chain_steps) { + let next_point = cardano_chain_follower::Point::fuzzy(slot_end); + + sync_tasks.push(sync_subchain(SyncParams::new( + cfg.chain, + start_point, + next_point.clone(), + ))); + + // Next start == last end. + start_point = next_point; + } + + // Start the Live Chain sync task - This never stops syncing. + sync_tasks.push(sync_subchain(SyncParams::new( + cfg.chain, + start_point, + TIP_POINT, + ))); + + // Wait Sync tasks to complete. If they fail and have not completed, reschedule them. + // They will return from this iterator in the order they complete. + while let Some(completed) = sync_tasks.next().await { + let remaining_followers = sync_tasks.len(); + + match completed { + Ok(finished) => { + // Sync task finished. Check if it completed OK or had an error. + // If it failed, we need to reschedule it. + + let last_block = finished + .last_indexed_block + .clone() + .map_or("None".to_string(), |v| v.to_string()); + + let first_block = finished + .first_indexed_block + .clone() + .map_or("None".to_string(), |v| v.to_string()); + + // The TIP follower should NEVER end, even without error, so report that as an + // error. It can fail if the index DB goes down in some way. + // Restart it always. + if finished.end == TIP_POINT { + error!(chain=%cfg.chain, report=%finished, + "The TIP follower failed, restarting it."); + + // Start the Live Chain sync task again from where it left off. + sync_tasks.push(sync_subchain(finished.retry())); + } else if let Some(result) = finished.result.as_ref() { + match result { + Ok(()) => { + info!(chain=%cfg.chain, report=%finished, + "The Immutable follower completed successfully."); + }, + Err(error) => { + // let report = &finished.to_string(); + error!(chain=%cfg.chain, report=%finished, + "An Immutable follower failed, restarting it."); + // Start the Live Chain sync task again from where it left off. + sync_tasks.push(sync_subchain(finished.retry())); + }, + } + } else { + error!(chain=%cfg.chain, report=%finished, + "The Immutable follower completed, but without a proper result."); + } + }, + Err(error) => { + error!(error=%error, "Sync task failed. Can not restart it, not enough information. Sync is probably failed at this point."); + }, + } + } + + error!("Sync tasks have all stopped. This is an unexpected error!"); + }); + + Ok(()) +} + +const _UNUSED_CODE: &str = r#" + /// Spawn follower threads and return associated handlers async fn spawn_followers( - configs: Vec, db: Arc, _data_refresh_tick: u64, machine_id: String, + configs: Vec, _data_refresh_tick: u64, machine_id: String, ) -> anyhow::Result> { let mut follower_tasks = Vec::new(); @@ -91,7 +392,6 @@ async fn spawn_followers( let follower_handler = spawn_follower( config.network, &config.relay, - db.clone(), machine_id.clone(), &config.mithril_snapshot.path, ) @@ -106,12 +406,12 @@ async fn spawn_followers( /// Initiate single follower and returns associated task handler /// which facilitates future control over spawned threads. async fn spawn_follower( - network: Network, relay: &str, db: Arc, machine_id: MachineId, snapshot: &str, + network: Network, relay: &str, machine_id: MachineId, snapshot: &str, ) -> anyhow::Result { // Establish point at which the last follower stopped updating in order to pick up // where it left off. If there was no previous follower, start indexing from // genesis point. - let start_from = match db.last_updated_state(network).await { + let start_from = match EventDB::last_updated_state(network).await { Ok((slot_no, block_hash, _)) => Point::new(slot_no.try_into()?, block_hash), Err(err) if err.is::() => Point::Origin, Err(err) => return Err(err), @@ -125,7 +425,7 @@ async fn spawn_follower( .ok_or(anyhow::anyhow!("Obtaining genesis values failed"))?; let task = tokio::spawn(async move { - process_blocks(&mut follower, db, network, machine_id, &genesis_values).await; + process_blocks(&mut follower, network, machine_id, &genesis_values).await; }); Ok(task) @@ -133,7 +433,7 @@ async fn spawn_follower( /// Process next block from the follower async fn process_blocks( - follower: &mut Follower, db: Arc, network: Network, machine_id: MachineId, + follower: &mut Follower, network: Network, machine_id: MachineId, genesis_values: &GenesisValues, ) { info!("Follower started processing blocks"); @@ -157,7 +457,7 @@ async fn process_blocks( blocks_buffer.push(block_data); if blocks_buffer.len() >= MAX_BLOCKS_BATCH_LEN { - index_block_buffer(db.clone(), &genesis_values, network, &machine_id, std::mem::take(&mut blocks_buffer)).await; + index_block_buffer(&genesis_values, network, &machine_id, std::mem::take(&mut blocks_buffer)).await; // Reset batch ticker since we just indexed the blocks buffer ticker.reset(); @@ -184,7 +484,7 @@ async fn process_blocks( } let current_buffer = std::mem::take(&mut blocks_buffer); - index_block_buffer(db.clone(), &genesis_values, network, &machine_id, current_buffer).await; + index_block_buffer(&genesis_values, network, &machine_id, current_buffer).await; // Reset the ticker so it counts the interval as starting after we wrote everything // to the database. @@ -235,7 +535,7 @@ async fn process_blocks( /// Consumes a block buffer and indexes its data. async fn index_block_buffer( - db: Arc, genesis_values: &GenesisValues, network: Network, machine_id: &MachineId, + genesis_values: &GenesisValues, network: Network, machine_id: &MachineId, buffer: Vec, ) { info!("Starting data batch indexing"); @@ -251,7 +551,7 @@ async fn index_block_buffer( } } - match index_many_blocks(db.clone(), genesis_values, network, machine_id, &blocks).await { + match index_many_blocks(genesis_values, network, machine_id, &blocks).await { Ok(()) => { info!("Finished indexing data batch"); }, @@ -263,7 +563,7 @@ async fn index_block_buffer( /// Index a slice of blocks. async fn index_many_blocks( - db: Arc, genesis_values: &GenesisValues, network: Network, machine_id: &MachineId, + genesis_values: &GenesisValues, network: Network, machine_id: &MachineId, blocks: &[MultiEraBlock<'_>], ) -> anyhow::Result<()> { let Some(last_block) = blocks.last() else { @@ -272,19 +572,18 @@ async fn index_many_blocks( let network_str = network.to_string(); - index_blocks(&db, genesis_values, &network_str, blocks).await?; - index_transactions(&db, blocks, &network_str).await?; - index_voter_registrations(&db, blocks, network).await?; - - match db - .refresh_last_updated( - chrono::offset::Utc::now(), - last_block.slot().try_into()?, - last_block.hash().to_vec(), - network, - machine_id, - ) - .await + index_blocks(genesis_values, &network_str, blocks).await?; + index_transactions(blocks, &network_str).await?; + index_voter_registrations(blocks, network).await?; + + match EventDB::refresh_last_updated( + chrono::offset::Utc::now(), + last_block.slot().try_into()?, + last_block.hash().to_vec(), + network, + machine_id, + ) + .await { Ok(()) => {}, Err(err) => { @@ -297,7 +596,7 @@ async fn index_many_blocks( /// Index the data from the given blocks. async fn index_blocks( - db: &EventDB, genesis_values: &GenesisValues, network_str: &str, blocks: &[MultiEraBlock<'_>], + genesis_values: &GenesisValues, network_str: &str, blocks: &[MultiEraBlock<'_>], ) -> anyhow::Result { let values: Vec<_> = blocks .iter() @@ -306,7 +605,7 @@ async fn index_blocks( }) .collect(); - db.index_many_follower_data(&values) + EventDB::index_many_follower_data(&values) .await .context("Indexing block data")?; @@ -314,24 +613,22 @@ async fn index_blocks( } /// Index transactions (and its inputs and outputs) from a slice of blocks. -async fn index_transactions( - db: &EventDB, blocks: &[MultiEraBlock<'_>], network_str: &str, -) -> anyhow::Result<()> { +async fn index_transactions(blocks: &[MultiEraBlock<'_>], network_str: &str) -> anyhow::Result<()> { let blocks_txs: Vec<_> = blocks .iter() .flat_map(|b| b.txs().into_iter().map(|tx| (b.slot(), tx))) .collect(); - index_transactions_data(db, network_str, &blocks_txs).await?; - index_transaction_outputs_data(db, &blocks_txs).await?; - index_transaction_inputs_data(db, &blocks_txs).await?; + index_transactions_data(network_str, &blocks_txs).await?; + index_transaction_outputs_data(&blocks_txs).await?; + index_transaction_inputs_data(&blocks_txs).await?; Ok(()) } /// Index transactions data. async fn index_transactions_data( - db: &EventDB, network_str: &str, blocks_txs: &[(u64, MultiEraTx<'_>)], + network_str: &str, blocks_txs: &[(u64, MultiEraTx<'_>)], ) -> anyhow::Result { let values: Vec<_> = blocks_txs .iter() @@ -344,7 +641,7 @@ async fn index_transactions_data( }) .collect::>>()?; - db.index_many_txn_data(&values) + EventDB::index_many_txn_data(&values) .await .context("Indexing transaction data")?; @@ -353,14 +650,14 @@ async fn index_transactions_data( /// Index transaction outputs data. async fn index_transaction_outputs_data( - db: &EventDB, blocks_txs: &[(u64, MultiEraTx<'_>)], + blocks_txs: &[(u64, MultiEraTx<'_>)], ) -> anyhow::Result { let values: Vec<_> = blocks_txs .iter() .flat_map(|(_, tx)| IndexedTxnOutputParams::from_txn_data(tx)) .collect(); - db.index_many_txn_output_data(&values) + EventDB::index_many_txn_output_data(&values) .await .context("Indexing transaction outputs")?; @@ -369,14 +666,14 @@ async fn index_transaction_outputs_data( /// Index transaction inputs data. async fn index_transaction_inputs_data( - db: &EventDB, blocks_txs: &[(u64, MultiEraTx<'_>)], + blocks_txs: &[(u64, MultiEraTx<'_>)], ) -> anyhow::Result { let values: Vec<_> = blocks_txs .iter() .flat_map(|(_, tx)| IndexedTxnInputParams::from_txn_data(tx)) .collect(); - db.index_many_txn_input_data(&values) + EventDB::index_many_txn_input_data(&values) .await .context("Indexing transaction inputs")?; @@ -385,7 +682,7 @@ async fn index_transaction_inputs_data( /// Index voter registrations from a slice of blocks. async fn index_voter_registrations( - db: &EventDB, blocks: &[MultiEraBlock<'_>], network: Network, + blocks: &[MultiEraBlock<'_>], network: Network, ) -> anyhow::Result { let values: Vec<_> = blocks .iter() @@ -393,7 +690,7 @@ async fn index_voter_registrations( .flatten() .collect(); - db.index_many_voter_registration_data(&values) + EventDB::index_many_voter_registration_data(&values) .await .context("Indexing voter registration")?; @@ -424,3 +721,5 @@ async fn instantiate_follower( Ok(follower) } + +"#; diff --git a/catalyst-gateway/bin/src/cardano/util.rs b/catalyst-gateway/bin/src/cardano/util.rs index fb54d3217a4..9916797b7f5 100644 --- a/catalyst-gateway/bin/src/cardano/util.rs +++ b/catalyst-gateway/bin/src/cardano/util.rs @@ -1,8 +1,7 @@ //! Block stream parsing and filtering utils - use cryptoxide::{blake2b::Blake2b, digest::Digest}; use pallas::ledger::{ - primitives::conway::{StakeCredential, VKeyWitness}, + primitives::conway::StakeCredential, traverse::{Era, MultiEraAsset, MultiEraCert, MultiEraPolicyAssets}, }; use serde::Serialize; @@ -20,9 +19,11 @@ pub type StakeCredentialHash = String; pub type StakeCredentialKey = String; /// Hash size +#[allow(dead_code)] pub(crate) const BLAKE_2B_256_HASH_SIZE: usize = 256 / 8; /// Helper function to generate the `blake2b_256` hash of a byte slice +#[allow(dead_code)] pub(crate) fn hash(bytes: &[u8]) -> [u8; BLAKE_2B_256_HASH_SIZE] { let mut digest = [0u8; BLAKE_2B_256_HASH_SIZE]; let mut context = Blake2b::new(BLAKE_2B_256_HASH_SIZE); @@ -53,6 +54,7 @@ pub struct PolicyAsset { } /// Extract assets +#[allow(dead_code)] pub(crate) fn parse_policy_assets(assets: &[MultiEraPolicyAssets<'_>]) -> Vec { assets .iter() @@ -66,6 +68,7 @@ pub(crate) fn parse_policy_assets(assets: &[MultiEraPolicyAssets<'_>]) -> Vec Vec { assets .iter() @@ -93,6 +96,7 @@ fn parse_child_assets(assets: &[MultiEraAsset]) -> Vec { } /// Eras before staking should be ignored +#[allow(dead_code)] pub fn valid_era(era: Era) -> bool { !matches!(era, Era::Byron) } @@ -127,29 +131,6 @@ pub fn extract_stake_credentials_from_certs( stake_credentials } -/// Extract witness pub keys and pair with blake2b hash of the pub key. -/// Hashes are generally 32-byte long on Cardano (or 256 bits), -/// except for credentials (i.e. keys or scripts) which are 28-byte long (or 224 bits) -#[allow(dead_code)] -pub fn extract_hashed_witnesses( - witnesses: &[VKeyWitness], -) -> anyhow::Result> { - let mut hashed_witnesses = Vec::new(); - for witness in witnesses { - let pub_key_bytes: [u8; 32] = witness.vkey.as_slice().try_into()?; - - let pub_key_hex = hex::encode(pub_key_bytes); - - let mut digest = [0u8; 28]; - let mut context = Blake2b::new(28); - context.input(&pub_key_bytes); - context.result(&mut digest); - hashed_witnesses.push((pub_key_hex, hex::encode(digest))); - } - - Ok(hashed_witnesses) -} - /// Match hashed witness pub keys with hashed stake credentials from the TX certificates /// to identify the correct stake credential key. #[allow(dead_code)] diff --git a/catalyst-gateway/bin/src/cli.rs b/catalyst-gateway/bin/src/cli.rs index 502b626c0f8..e24f938befb 100644 --- a/catalyst-gateway/bin/src/cli.rs +++ b/catalyst-gateway/bin/src/cli.rs @@ -1,15 +1,14 @@ //! CLI interpreter for the service -use std::{io::Write, sync::Arc}; +use std::io::Write; use clap::Parser; use tracing::{error, info}; use crate::{ cardano::start_followers, - logger, + db::{self, index::session::CassandraSession}, service::{self, started}, - settings::{DocsSettings, ServiceSettings}, - state::State, + settings::{DocsSettings, ServiceSettings, Settings}, }; #[derive(Parser)] @@ -38,39 +37,39 @@ impl Cli { pub(crate) async fn exec(self) -> anyhow::Result<()> { match self { Self::Run(settings) => { - let logger_handle = logger::init(settings.log_level); + Settings::init(settings)?; - // Unique machine id - let machine_id = settings.follower_settings.machine_uid; + let mut tasks = Vec::new(); - let state = Arc::new(State::new(Some(settings.database_url), logger_handle).await?); - let event_db = state.event_db(); - event_db - .modify_deep_query(settings.deep_query_inspection.into()) - .await; + info!("Catalyst Gateway - Starting"); - tokio::spawn(async move { - match service::run(&settings.docs_settings, state.clone()).await { + // Start the DB's + CassandraSession::init(); + db::event::establish_connection(); + + // Start the chain indexing follower. + start_followers().await?; + + let handle = tokio::spawn(async move { + match service::run().await { Ok(()) => info!("Endpoints started ok"), Err(err) => { error!("Error starting endpoints {err}"); }, } }); + tasks.push(handle); - let followers_fut = start_followers( - event_db.clone(), - settings.follower_settings.check_config_tick, - settings.follower_settings.data_refresh_tick, - machine_id, - ); started(); - followers_fut.await?; - Ok(()) + for task in tasks { + task.await?; + } + + info!("Catalyst Gateway - Shut Down"); }, Self::Docs(settings) => { - let docs = service::get_app_docs(&settings); + let docs = service::get_app_docs(); match settings.output { Some(path) => { let mut docs_file = std::fs::File::create(path)?; @@ -78,8 +77,9 @@ impl Cli { }, None => println!("{docs}"), } - Ok(()) }, } + + Ok(()) } } diff --git a/catalyst-gateway/bin/src/event_db/cardano/chain_state/insert_update_state.sql b/catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/insert_update_state.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/chain_state/insert_update_state.sql rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/insert_update_state.sql diff --git a/catalyst-gateway/bin/src/event_db/cardano/chain_state/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/mod.rs similarity index 90% rename from catalyst-gateway/bin/src/event_db/cardano/chain_state/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/mod.rs index a7c8dc8a2ac..ff4294df355 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/chain_state/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/mod.rs @@ -6,7 +6,7 @@ use pallas::ledger::traverse::{wellknown::GenesisValues, MultiEraBlock}; use tokio_postgres::{binary_copy::BinaryCopyInWriter, types::Type}; use tracing::error; -use crate::event_db::{error::NotFoundError, EventDB}; +use crate::db::event::{error::NotFoundError, Error, EventDB, EVENT_DB_POOL}; /// Block time pub type DateTime = chrono::DateTime; @@ -102,6 +102,7 @@ pub(crate) struct IndexedFollowerDataParams<'a> { impl<'a> IndexedFollowerDataParams<'a> { /// Creates a [`IndexedFollowerDataParams`] from block data. + #[allow(dead_code)] pub(crate) fn from_block_data( genesis_values: &GenesisValues, network: &'a str, block: &MultiEraBlock<'a>, ) -> Option { @@ -141,14 +142,16 @@ impl<'a> IndexedFollowerDataParams<'a> { impl EventDB { /// Batch writes follower data. + #[allow(dead_code)] pub(crate) async fn index_many_follower_data( - &self, values: &[IndexedFollowerDataParams<'_>], + values: &[IndexedFollowerDataParams<'_>], ) -> anyhow::Result<()> { if values.is_empty() { return Ok(()); } - let mut conn = self.pool.get().await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; let tx = conn.transaction().await?; tx.execute( @@ -195,14 +198,13 @@ impl EventDB { /// Get slot info for the provided date-time and network and query type pub(crate) async fn get_slot_info( - &self, date_time: DateTime, network: Network, query_type: SlotInfoQueryType, + date_time: DateTime, network: Network, query_type: SlotInfoQueryType, ) -> anyhow::Result<(SlotNumber, BlockHash, DateTime)> { - let rows = self - .query(&query_type.get_sql_query()?, &[ - &network.to_string(), - &date_time, - ]) - .await?; + let rows = Self::query(&query_type.get_sql_query()?, &[ + &network.to_string(), + &date_time, + ]) + .await?; let row = rows.first().ok_or(NotFoundError)?; @@ -214,11 +216,9 @@ impl EventDB { /// Check when last update chain state occurred. pub(crate) async fn last_updated_state( - &self, network: Network, + network: Network, ) -> anyhow::Result<(SlotNumber, BlockHash, DateTime)> { - let rows = self - .query(SELECT_UPDATE_STATE_SQL, &[&network.to_string()]) - .await?; + let rows = Self::query(SELECT_UPDATE_STATE_SQL, &[&network.to_string()]).await?; let row = rows.first().ok_or(NotFoundError)?; @@ -231,9 +231,10 @@ impl EventDB { /// Mark point in time where the last follower finished indexing in order for future /// followers to pick up from this point + #[allow(dead_code)] pub(crate) async fn refresh_last_updated( - &self, last_updated: DateTime, slot_no: SlotNumber, block_hash: BlockHash, - network: Network, machine_id: &MachineId, + last_updated: DateTime, slot_no: SlotNumber, block_hash: BlockHash, network: Network, + machine_id: &MachineId, ) -> anyhow::Result<()> { // Rollback or update let update = true; @@ -242,7 +243,7 @@ impl EventDB { // An insert only happens once when there is no update metadata available // All future additions are just updates on ended, slot_no and block_hash - self.modify(INSERT_UPDATE_STATE_SQL, &[ + Self::modify(INSERT_UPDATE_STATE_SQL, &[ &i64::try_from(network_id)?, &last_updated, &last_updated, diff --git a/catalyst-gateway/bin/src/event_db/cardano/chain_state/select_slot_info_by_datetime.sql.hbs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/select_slot_info_by_datetime.sql.hbs similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/chain_state/select_slot_info_by_datetime.sql.hbs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/select_slot_info_by_datetime.sql.hbs diff --git a/catalyst-gateway/bin/src/event_db/cardano/chain_state/select_update_state.sql b/catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/select_update_state.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/chain_state/select_update_state.sql rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/chain_state/select_update_state.sql diff --git a/catalyst-gateway/bin/src/event_db/cardano/cip36_registration/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/mod.rs similarity index 88% rename from catalyst-gateway/bin/src/event_db/cardano/cip36_registration/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/mod.rs index c267790bdc9..2624934aea0 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/cip36_registration/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/mod.rs @@ -9,7 +9,9 @@ use crate::{ cip36_registration::{Cip36Metadata, VotingInfo}, util::valid_era, }, - event_db::{cardano::chain_state::SlotNumber, error::NotFoundError, EventDB}, + db::event::{ + cardano::chain_state::SlotNumber, error::NotFoundError, Error, EventDB, EVENT_DB_POOL, + }, }; /// Transaction id @@ -134,14 +136,16 @@ impl IndexedVoterRegistrationParams { impl EventDB { /// Batch writes voter registration data. + #[allow(dead_code)] pub(crate) async fn index_many_voter_registration_data( - &self, values: &[IndexedVoterRegistrationParams], + values: &[IndexedVoterRegistrationParams], ) -> anyhow::Result<()> { if values.is_empty() { return Ok(()); } - let mut conn = self.pool.get().await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; let tx = conn.transaction().await?; tx.execute( @@ -154,16 +158,19 @@ impl EventDB { let sink = tx .copy_in("COPY tmp_cardano_voter_registration (tx_id, stake_credential, public_voting_key, payment_address, nonce, metadata_cip36, stats, valid) FROM STDIN BINARY") .await?; - let writer = BinaryCopyInWriter::new(sink, &[ - Type::BYTEA, - Type::BYTEA, - Type::BYTEA, - Type::BYTEA, - Type::INT8, - Type::BYTEA, - Type::JSONB, - Type::BOOL, - ]); + let writer = BinaryCopyInWriter::new( + sink, + &[ + Type::BYTEA, + Type::BYTEA, + Type::BYTEA, + Type::BYTEA, + Type::INT8, + Type::BYTEA, + Type::JSONB, + Type::BOOL, + ], + ); tokio::pin!(writer); for params in values { @@ -197,15 +204,13 @@ impl EventDB { /// Get registration info pub(crate) async fn get_registration_info( - &self, stake_credential: StakeCredential, network: Network, slot_num: SlotNumber, + stake_credential: StakeCredential, network: Network, slot_num: SlotNumber, ) -> anyhow::Result<(TxId, PaymentAddress, PublicVotingInfo, Nonce)> { - let rows = self - .query(SELECT_VOTER_REGISTRATION_SQL, &[ - &stake_credential, - &network.to_string(), - &slot_num, - ]) - .await?; + let rows = Self::query( + SELECT_VOTER_REGISTRATION_SQL, + &[&stake_credential, &network.to_string(), &slot_num], + ) + .await?; let row = rows.first().ok_or(NotFoundError)?; diff --git a/catalyst-gateway/bin/src/event_db/cardano/cip36_registration/select_cip36_registration.sql b/catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/select_cip36_registration.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/cip36_registration/select_cip36_registration.sql rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/cip36_registration/select_cip36_registration.sql diff --git a/catalyst-gateway/bin/src/event_db/cardano/config/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/config/mod.rs similarity index 91% rename from catalyst-gateway/bin/src/event_db/cardano/config/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/config/mod.rs index bbf7b42ff78..b511989fbfe 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/config/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano.obsolete/config/mod.rs @@ -4,7 +4,7 @@ use std::str::FromStr; use cardano_chain_follower::Network; use serde::{Deserialize, Serialize}; -use crate::event_db::{error::NotFoundError, EventDB}; +use crate::db::event::{error::NotFoundError, EventDB}; /// Representation of the `config` table id fields `id`, `id2`, `id3` enum ConfigId { @@ -54,11 +54,12 @@ const SELECT_CONFIG_SQL: &str = include_str!("select_config.sql"); impl EventDB { /// Config query - pub(crate) async fn get_follower_config(&self) -> anyhow::Result> { + #[allow(dead_code)] + pub(crate) async fn get_follower_config() -> anyhow::Result> { let id = "cardano"; let id2 = "follower"; - let rows = self.query(SELECT_CONFIG_SQL, &[&id, &id2]).await?; + let rows = Self::query(SELECT_CONFIG_SQL, &[&id, &id2]).await?; let mut follower_configs = Vec::new(); for row in rows { diff --git a/catalyst-gateway/bin/src/event_db/cardano/config/select_config.sql b/catalyst-gateway/bin/src/db/event/cardano.obsolete/config/select_config.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/config/select_config.sql rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/config/select_config.sql diff --git a/catalyst-gateway/bin/src/event_db/cardano/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/mod.rs.obsolete similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/mod.rs.obsolete diff --git a/catalyst-gateway/bin/src/event_db/cardano/utxo/mod.rs b/catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/mod.rs similarity index 84% rename from catalyst-gateway/bin/src/event_db/cardano/utxo/mod.rs rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/mod.rs index 17ab9c80a55..999771620c7 100644 --- a/catalyst-gateway/bin/src/event_db/cardano/utxo/mod.rs +++ b/catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/mod.rs @@ -1,22 +1,17 @@ //! Utxo Queries -use cardano_chain_follower::Network; use pallas::ledger::{addresses::Address, traverse::MultiEraTx}; use tokio_postgres::{binary_copy::BinaryCopyInWriter, types::Type}; use tracing::error; -use super::{chain_state::SlotNumber, cip36_registration::StakeCredential}; use crate::{ cardano::util::parse_policy_assets, - event_db::{error::NotFoundError, EventDB}, + db::event::{Error, EventDB, EVENT_DB_POOL}, }; /// Stake amount. pub(crate) type StakeAmount = i64; -/// `select_total_utxo_amount.sql` -const SELECT_TOTAL_UTXO_AMOUNT_SQL: &str = include_str!("select_total_utxo_amount.sql"); - /// Data required to index transactions. pub(crate) struct IndexedTxnParams<'a> { /// Transaction id @@ -43,6 +38,7 @@ pub(crate) struct IndexedTxnOutputParams { impl IndexedTxnOutputParams { /// Creates transaction indexing data from transaction data. + #[allow(dead_code)] pub(crate) fn from_txn_data(tx: &MultiEraTx) -> Vec { tx.outputs() .into_iter() @@ -100,6 +96,7 @@ pub(crate) struct IndexedTxnInputParams { impl IndexedTxnInputParams { /// Creates transaction indexing data from transaction data. + #[allow(dead_code)] pub(crate) fn from_txn_data(tx: &MultiEraTx) -> Vec { tx.inputs() .into_iter() @@ -124,14 +121,16 @@ impl IndexedTxnInputParams { impl EventDB { /// Batch writes transaction output indexing data. + #[allow(dead_code)] pub(crate) async fn index_many_txn_output_data( - &self, values: &[IndexedTxnOutputParams], + values: &[IndexedTxnOutputParams], ) -> anyhow::Result<()> { if values.is_empty() { return Ok(()); } - let mut conn = self.pool.get().await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; let tx = conn.transaction().await?; tx.execute( @@ -177,14 +176,16 @@ impl EventDB { } /// Batch writes transaction input indexing data. + #[allow(dead_code)] pub(crate) async fn index_many_txn_input_data( - &self, values: &[IndexedTxnInputParams], + values: &[IndexedTxnInputParams], ) -> anyhow::Result<()> { if values.is_empty() { return Ok(()); } - let mut conn = self.pool.get().await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; let tx = conn.transaction().await?; tx.execute( @@ -224,14 +225,14 @@ impl EventDB { } /// Batch writes transaction indexing data. - pub(crate) async fn index_many_txn_data( - &self, values: &[IndexedTxnParams<'_>], - ) -> anyhow::Result<()> { + #[allow(dead_code)] + pub(crate) async fn index_many_txn_data(values: &[IndexedTxnParams<'_>]) -> anyhow::Result<()> { if values.is_empty() { return Ok(()); } - let mut conn = self.pool.get().await?; + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; let tx = conn.transaction().await?; tx.execute( @@ -267,28 +268,4 @@ impl EventDB { Ok(()) } - - /// Get total utxo amount - pub(crate) async fn total_utxo_amount( - &self, stake_credential: StakeCredential, network: Network, slot_num: SlotNumber, - ) -> anyhow::Result<(StakeAmount, SlotNumber)> { - let row = self - .query_one(SELECT_TOTAL_UTXO_AMOUNT_SQL, &[ - &stake_credential, - &network.to_string(), - &slot_num, - ]) - .await?; - - // Aggregate functions as SUM and MAX return NULL if there are no rows, so we need to - // check for it. - // https://www.postgresql.org/docs/8.2/functions-aggregate.html - if let Some(amount) = row.try_get("total_utxo_amount")? { - let slot_number = row.try_get("slot_no")?; - - Ok((amount, slot_number)) - } else { - Err(NotFoundError.into()) - } - } } diff --git a/catalyst-gateway/bin/src/event_db/cardano/utxo/select_total_utxo_amount.sql b/catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/select_total_utxo_amount.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/cardano/utxo/select_total_utxo_amount.sql rename to catalyst-gateway/bin/src/db/event/cardano.obsolete/utxo/select_total_utxo_amount.sql diff --git a/catalyst-gateway/bin/src/event_db/error.rs b/catalyst-gateway/bin/src/db/event/error.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/error.rs rename to catalyst-gateway/bin/src/db/event/error.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/mod.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/ballot.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/ballot.rs similarity index 84% rename from catalyst-gateway/bin/src/event_db/legacy/queries/event/ballot.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/event/ballot.rs index 87b0a5ed04f..f88799c8f68 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/ballot.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/ballot.rs @@ -1,7 +1,7 @@ //! Ballot Queries use std::collections::HashMap; -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::{ ballot::{ @@ -51,25 +51,23 @@ impl EventDB { /// Get ballot query #[allow(dead_code)] pub(crate) async fn get_ballot( - &self, event: EventId, objective: ObjectiveId, proposal: ProposalId, + event: EventId, objective: ObjectiveId, proposal: ProposalId, ) -> anyhow::Result { - let rows = self - .query(Self::BALLOT_VOTE_OPTIONS_QUERY, &[ - &event.0, - &objective.0, - &proposal.0, - ]) - .await?; + let rows = Self::query(Self::BALLOT_VOTE_OPTIONS_QUERY, &[ + &event.0, + &objective.0, + &proposal.0, + ]) + .await?; let row = rows.first().ok_or(NotFoundError)?; let choices = row.try_get("objective")?; - let rows = self - .query(Self::BALLOT_VOTE_PLANS_QUERY, &[ - &event.0, - &objective.0, - &proposal.0, - ]) - .await?; + let rows = Self::query(Self::BALLOT_VOTE_PLANS_QUERY, &[ + &event.0, + &objective.0, + &proposal.0, + ]) + .await?; let mut voteplans = Vec::new(); for row in rows { voteplans.push(VotePlan { @@ -94,25 +92,23 @@ impl EventDB { pub(crate) async fn get_objective_ballots( &self, event: EventId, objective: ObjectiveId, ) -> anyhow::Result> { - let rows = self - .query(Self::BALLOTS_VOTE_OPTIONS_PER_OBJECTIVE_QUERY, &[ - &event.0, - &objective.0, - ]) - .await?; + let rows = Self::query(Self::BALLOTS_VOTE_OPTIONS_PER_OBJECTIVE_QUERY, &[ + &event.0, + &objective.0, + ]) + .await?; let mut ballots = Vec::new(); for row in rows { let choices = row.try_get("objective")?; let proposal_id = ProposalId(row.try_get("proposal_id")?); - let rows = self - .query(Self::BALLOT_VOTE_PLANS_QUERY, &[ - &event.0, - &objective.0, - &proposal_id.0, - ]) - .await?; + let rows = Self::query(Self::BALLOT_VOTE_PLANS_QUERY, &[ + &event.0, + &objective.0, + &proposal_id.0, + ]) + .await?; let mut voteplans = Vec::new(); for row in rows { voteplans.push(VotePlan { @@ -142,22 +138,19 @@ impl EventDB { pub(crate) async fn get_event_ballots( &self, event: EventId, ) -> anyhow::Result> { - let rows = self - .query(Self::BALLOTS_VOTE_OPTIONS_PER_EVENT_QUERY, &[&event.0]) - .await?; + let rows = Self::query(Self::BALLOTS_VOTE_OPTIONS_PER_EVENT_QUERY, &[&event.0]).await?; let mut ballots = HashMap::>::new(); for row in rows { let choices = row.try_get("objective")?; let proposal_id = ProposalId(row.try_get("proposal_id")?); let objective_id = ObjectiveId(row.try_get("objective_id")?); - let rows = self - .query(Self::BALLOT_VOTE_PLANS_QUERY, &[ - &event.0, - &objective_id.0, - &proposal_id.0, - ]) - .await?; + let rows = Self::query(Self::BALLOT_VOTE_PLANS_QUERY, &[ + &event.0, + &objective_id.0, + &proposal_id.0, + ]) + .await?; let mut voteplans = Vec::new(); for row in rows { voteplans.push(VotePlan { diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/mod.rs similarity index 94% rename from catalyst-gateway/bin/src/event_db/legacy/queries/event/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/event/mod.rs index 329426a1518..1acf7ddcb1c 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/mod.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/mod.rs @@ -1,7 +1,7 @@ //! Event Queries use chrono::{NaiveDateTime, Utc}; -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::event::{ Event, EventDetails, EventGoal, EventId, EventRegistration, EventSchedule, EventSummary, @@ -45,9 +45,7 @@ impl EventDB { pub(crate) async fn get_events( &self, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = self - .query(Self::EVENTS_QUERY, &[&limit, &offset.unwrap_or(0)]) - .await?; + let rows = Self::query(Self::EVENTS_QUERY, &[&limit, &offset.unwrap_or(0)]).await?; let mut events = Vec::new(); for row in rows { @@ -74,8 +72,8 @@ impl EventDB { /// Get event query #[allow(dead_code)] - pub(crate) async fn get_event(&self, event: EventId) -> anyhow::Result { - let rows = self.query(Self::EVENT_QUERY, &[&event.0]).await?; + pub(crate) async fn get_event(event: EventId) -> anyhow::Result { + let rows = Self::query(Self::EVENT_QUERY, &[&event.0]).await?; let row = rows.first().ok_or(NotFoundError)?; let ends = row @@ -129,7 +127,7 @@ impl EventDB { .map(|val| val.and_local_timezone(Utc).unwrap()), }; - let rows = self.query(Self::EVENT_GOALS_QUERY, &[&event.0]).await?; + let rows = Self::query(Self::EVENT_GOALS_QUERY, &[&event.0]).await?; let mut goals = Vec::new(); for row in rows { goals.push(EventGoal { diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/objective.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/objective.rs similarity index 88% rename from catalyst-gateway/bin/src/event_db/legacy/queries/event/objective.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/event/objective.rs index d702d2efb8e..78c1391559a 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/objective.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/objective.rs @@ -1,5 +1,5 @@ //! Objective Queries -use crate::event_db::{ +use crate::db::event::{ legacy::types::{ event::EventId, objective::{ @@ -31,15 +31,14 @@ impl EventDB { /// Get objectives query #[allow(dead_code)] pub(crate) async fn get_objectives( - &self, event: EventId, limit: Option, offset: Option, + event: EventId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = self - .query(Self::OBJECTIVES_QUERY, &[ - &event.0, - &limit, - &offset.unwrap_or(0), - ]) - .await?; + let rows = Self::query(Self::OBJECTIVES_QUERY, &[ + &event.0, + &limit, + &offset.unwrap_or(0), + ]) + .await?; let mut objectives = Vec::new(); for row in rows { @@ -62,7 +61,7 @@ impl EventDB { }; let mut groups = Vec::new(); - let rows = self.query(Self::VOTING_GROUPS_QUERY, &[&row_id]).await?; + let rows = Self::query(Self::VOTING_GROUPS_QUERY, &[&row_id]).await?; for row in rows { let group = row.try_get::<_, Option>("group")?.map(VoterGroupId); let voting_token: Option<_> = row.try_get("voting_token")?; diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/proposal.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/proposal.rs similarity index 85% rename from catalyst-gateway/bin/src/event_db/legacy/queries/event/proposal.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/event/proposal.rs index f4465a4e7a4..ec45313ee1a 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/proposal.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/proposal.rs @@ -1,5 +1,5 @@ //! Proposal Queries -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::{ event::EventId, @@ -31,11 +31,10 @@ impl EventDB { /// Get proposal query #[allow(dead_code)] pub(crate) async fn get_proposal( - &self, event: EventId, objective: ObjectiveId, proposal: ProposalId, + event: EventId, objective: ObjectiveId, proposal: ProposalId, ) -> anyhow::Result { - let rows = self - .query(Self::PROPOSAL_QUERY, &[&event.0, &objective.0, &proposal.0]) - .await?; + let rows = + Self::query(Self::PROPOSAL_QUERY, &[&event.0, &objective.0, &proposal.0]).await?; let row = rows.first().ok_or(NotFoundError)?; let proposer = vec![ProposerDetails { @@ -68,14 +67,13 @@ impl EventDB { pub(crate) async fn get_proposals( &self, event: EventId, objective: ObjectiveId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = self - .query(Self::PROPOSALS_QUERY, &[ - &event.0, - &objective.0, - &limit, - &offset.unwrap_or(0), - ]) - .await?; + let rows = Self::query(Self::PROPOSALS_QUERY, &[ + &event.0, + &objective.0, + &limit, + &offset.unwrap_or(0), + ]) + .await?; let mut proposals = Vec::new(); for row in rows { diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/event/review.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/event/review.rs similarity index 81% rename from catalyst-gateway/bin/src/event_db/legacy/queries/event/review.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/event/review.rs index 7faa54c32d4..e53a4cbd6eb 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/event/review.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/event/review.rs @@ -1,5 +1,5 @@ //! Review Queries -use crate::event_db::{ +use crate::db::event::{ legacy::types::{ event::EventId, objective::ObjectiveId, @@ -38,18 +38,17 @@ impl EventDB { /// Get reviews query #[allow(dead_code)] pub(crate) async fn get_reviews( - &self, event: EventId, objective: ObjectiveId, proposal: ProposalId, limit: Option, + event: EventId, objective: ObjectiveId, proposal: ProposalId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = self - .query(Self::REVIEWS_QUERY, &[ - &event.0, - &objective.0, - &proposal.0, - &limit, - &offset.unwrap_or(0), - ]) - .await?; + let rows = Self::query(Self::REVIEWS_QUERY, &[ + &event.0, + &objective.0, + &proposal.0, + &limit, + &offset.unwrap_or(0), + ]) + .await?; let mut reviews = Vec::new(); for row in rows { @@ -57,9 +56,7 @@ impl EventDB { let review_id: i32 = row.try_get("row_id")?; let mut ratings = Vec::new(); - let rows = self - .query(Self::RATINGS_PER_REVIEW_QUERY, &[&review_id]) - .await?; + let rows = Self::query(Self::RATINGS_PER_REVIEW_QUERY, &[&review_id]).await?; for row in rows { ratings.push(Rating { review_type: row.try_get("metric")?, @@ -79,14 +76,13 @@ impl EventDB { pub(crate) async fn get_review_types( &self, event: EventId, objective: ObjectiveId, limit: Option, offset: Option, ) -> anyhow::Result> { - let rows = self - .query(Self::REVIEW_TYPES_QUERY, &[ - &event.0, - &objective.0, - &limit, - &offset.unwrap_or(0), - ]) - .await?; + let rows = Self::query(Self::REVIEW_TYPES_QUERY, &[ + &event.0, + &objective.0, + &limit, + &offset.unwrap_or(0), + ]) + .await?; let mut review_types = Vec::new(); for row in rows { let map = row diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/queries/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/mod.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/registration.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/registration.rs similarity index 89% rename from catalyst-gateway/bin/src/event_db/legacy/queries/registration.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/registration.rs index 805fe73222d..cf7ae11a4dc 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/registration.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/registration.rs @@ -1,7 +1,7 @@ //! Registration Queries use chrono::{NaiveDateTime, Utc}; -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::{ event::EventId, @@ -74,14 +74,12 @@ impl EventDB { /// Get voter query #[allow(dead_code)] pub(crate) async fn get_voter( - &self, event: &Option, voting_key: String, with_delegations: bool, + event: &Option, voting_key: String, with_delegations: bool, ) -> anyhow::Result { let rows = if let Some(event) = event { - self.query(Self::VOTER_BY_EVENT_QUERY, &[&voting_key, &event.0]) - .await? + Self::query(Self::VOTER_BY_EVENT_QUERY, &[&voting_key, &event.0]).await? } else { - self.query(Self::VOTER_BY_LAST_EVENT_QUERY, &[&voting_key]) - .await? + Self::query(Self::VOTER_BY_LAST_EVENT_QUERY, &[&voting_key]).await? }; let voter = rows.first().ok_or(NotFoundError)?; @@ -89,14 +87,13 @@ impl EventDB { let voting_power = voter.try_get("voting_power")?; let rows = if let Some(event) = event { - self.query(Self::TOTAL_BY_EVENT_VOTING_QUERY, &[ + Self::query(Self::TOTAL_BY_EVENT_VOTING_QUERY, &[ &voting_group.0, &event.0, ]) .await? } else { - self.query(Self::TOTAL_BY_LAST_EVENT_VOTING_QUERY, &[&voting_group.0]) - .await? + Self::query(Self::TOTAL_BY_LAST_EVENT_VOTING_QUERY, &[&voting_group.0]).await? }; let total_voting_power_per_group: i64 = rows @@ -118,10 +115,9 @@ impl EventDB { let delegator_addresses = if with_delegations { let rows = if let Some(event) = event { - self.query(Self::VOTER_DELEGATORS_LIST_QUERY, &[&voting_key, &event.0]) - .await? + Self::query(Self::VOTER_DELEGATORS_LIST_QUERY, &[&voting_key, &event.0]).await? } else { - self.query(Self::VOTER_DELEGATORS_LIST_QUERY, &[ + Self::query(Self::VOTER_DELEGATORS_LIST_QUERY, &[ &voting_key, &voter.try_get::<_, i32>("event")?, ]) @@ -161,25 +157,23 @@ impl EventDB { /// Get delegator query #[allow(dead_code)] pub(crate) async fn get_delegator( - &self, event: &Option, stake_public_key: String, + event: &Option, stake_public_key: String, ) -> anyhow::Result { let rows = if let Some(event) = event { - self.query(Self::DELEGATOR_SNAPSHOT_INFO_BY_EVENT_QUERY, &[&event.0]) - .await? + Self::query(Self::DELEGATOR_SNAPSHOT_INFO_BY_EVENT_QUERY, &[&event.0]).await? } else { - self.query(Self::DELEGATOR_SNAPSHOT_INFO_BY_LAST_EVENT_QUERY, &[]) - .await? + Self::query(Self::DELEGATOR_SNAPSHOT_INFO_BY_LAST_EVENT_QUERY, &[]).await? }; let delegator_snapshot_info = rows.first().ok_or(NotFoundError)?; let delegation_rows = if let Some(event) = event { - self.query(Self::DELEGATIONS_BY_EVENT_QUERY, &[ + Self::query(Self::DELEGATIONS_BY_EVENT_QUERY, &[ &stake_public_key, &event.0, ]) .await? } else { - self.query(Self::DELEGATIONS_BY_EVENT_QUERY, &[ + Self::query(Self::DELEGATIONS_BY_EVENT_QUERY, &[ &stake_public_key, &delegator_snapshot_info.try_get::<_, i32>("event")?, ]) @@ -200,11 +194,9 @@ impl EventDB { } let rows = if let Some(version) = event { - self.query(Self::TOTAL_POWER_BY_EVENT_QUERY, &[&version.0]) - .await? + Self::query(Self::TOTAL_POWER_BY_EVENT_QUERY, &[&version.0]).await? } else { - self.query(Self::TOTAL_POWER_BY_LAST_EVENT_QUERY, &[]) - .await? + Self::query(Self::TOTAL_POWER_BY_LAST_EVENT_QUERY, &[]).await? }; let total_power: i64 = rows .first() diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/search.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/search.rs similarity index 86% rename from catalyst-gateway/bin/src/event_db/legacy/queries/search.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/search.rs index 4ea8d650be4..b26c96961e1 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/search.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/search.rs @@ -1,7 +1,7 @@ //! Search Queries use chrono::{NaiveDateTime, Utc}; -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::{ event::{EventId, EventSummary}, @@ -105,10 +105,10 @@ impl EventDB { /// Search for a total. async fn search_total( - &self, search_query: SearchQuery, limit: Option, offset: Option, + search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = self - .query(&Self::construct_count_query(&search_query), &[ + let rows: Vec = + Self::query(&Self::construct_count_query(&search_query), &[ &limit, &offset.unwrap_or(0), ]) @@ -124,15 +124,14 @@ impl EventDB { /// Search for events async fn search_events( - &self, search_query: SearchQuery, limit: Option, offset: Option, + search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = self - .query(&Self::construct_query(&search_query), &[ - &limit, - &offset.unwrap_or(0), - ]) - .await - .map_err(|_| NotFoundError)?; + let rows: Vec = Self::query(&Self::construct_query(&search_query), &[ + &limit, + &offset.unwrap_or(0), + ]) + .await + .map_err(|_| NotFoundError)?; let mut events = Vec::new(); for row in rows { @@ -166,13 +165,12 @@ impl EventDB { async fn search_objectives( &self, search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = self - .query(&Self::construct_query(&search_query), &[ - &limit, - &offset.unwrap_or(0), - ]) - .await - .map_err(|_| NotFoundError)?; + let rows: Vec = Self::query(&Self::construct_query(&search_query), &[ + &limit, + &offset.unwrap_or(0), + ]) + .await + .map_err(|_| NotFoundError)?; let mut objectives = Vec::new(); for row in rows { @@ -201,13 +199,12 @@ impl EventDB { async fn search_proposals( &self, search_query: SearchQuery, limit: Option, offset: Option, ) -> anyhow::Result { - let rows: Vec = self - .query(&Self::construct_query(&search_query), &[ - &limit, - &offset.unwrap_or(0), - ]) - .await - .map_err(|_| NotFoundError)?; + let rows: Vec = Self::query(&Self::construct_query(&search_query), &[ + &limit, + &offset.unwrap_or(0), + ]) + .await + .map_err(|_| NotFoundError)?; let mut proposals = Vec::new(); for row in rows { @@ -237,10 +234,10 @@ impl EventDB { &self, search_query: SearchQuery, total: bool, limit: Option, offset: Option, ) -> anyhow::Result { if total { - self.search_total(search_query, limit, offset).await + Self::search_total(search_query, limit, offset).await } else { match search_query.table { - SearchTable::Events => self.search_events(search_query, limit, offset).await, + SearchTable::Events => Self::search_events(search_query, limit, offset).await, SearchTable::Objectives => { self.search_objectives(search_query, limit, offset).await }, diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/fund.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/fund.rs similarity index 96% rename from catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/fund.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/fund.rs index 844846ae266..1fac627dc53 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/fund.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/fund.rs @@ -1,6 +1,6 @@ use chrono::{NaiveDateTime, Utc}; -use crate::event_db::{ +use crate::db::event::{ error::NotFoundError, legacy::types::vit_ss::{ challenge::{Challenge, ChallengeHighlights}, @@ -108,8 +108,8 @@ impl EventDB { /// Get fund query // TODO(stevenj): https://github.com/input-output-hk/catalyst-voices/issues/68 #[allow(dead_code, clippy::too_many_lines)] - pub(crate) async fn get_fund(&self) -> anyhow::Result { - let rows = self.query(Self::FUND_QUERY, &[]).await?; + pub(crate) async fn get_fund() -> anyhow::Result { + let rows = Self::query(Self::FUND_QUERY, &[]).await?; let row = rows.first().ok_or(NotFoundError)?; let fund_id = row.try_get("id")?; @@ -130,7 +130,7 @@ impl EventDB { .and_local_timezone(Utc) .unwrap(); - let rows = self.query(Self::FUND_VOTE_PLANS_QUERY, &[&fund_id]).await?; + let rows = Self::query(Self::FUND_VOTE_PLANS_QUERY, &[&fund_id]).await?; let mut chain_vote_plans = Vec::new(); for row in rows { chain_vote_plans.push(Voteplan { @@ -150,7 +150,7 @@ impl EventDB { }); } - let rows = self.query(Self::FUND_CHALLENGES_QUERY, &[&fund_id]).await?; + let rows = Self::query(Self::FUND_CHALLENGES_QUERY, &[&fund_id]).await?; let mut challenges = Vec::new(); for row in rows { challenges.push(Challenge { @@ -175,7 +175,7 @@ impl EventDB { }); } - let rows = self.query(Self::FUND_GOALS_QUERY, &[&fund_id]).await?; + let rows = Self::query(Self::FUND_GOALS_QUERY, &[&fund_id]).await?; let mut goals = Vec::new(); for row in rows { goals.push(Goal { @@ -185,7 +185,7 @@ impl EventDB { }); } - let rows = self.query(Self::FUND_GROUPS_QUERY, &[&fund_id]).await?; + let rows = Self::query(Self::FUND_GROUPS_QUERY, &[&fund_id]).await?; let mut groups = Vec::new(); for row in rows { groups.push(Group { diff --git a/catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/queries/vit_ss/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/queries/vit_ss/mod.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/ballot.rs b/catalyst-gateway/bin/src/db/event/legacy/types/ballot.rs similarity index 95% rename from catalyst-gateway/bin/src/event_db/legacy/types/ballot.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/ballot.rs index 46d1d2f56af..7527756c20f 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/types/ballot.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/types/ballot.rs @@ -1,6 +1,6 @@ //! Ballot types use super::{objective::ObjectiveId, proposal::ProposalId}; -use crate::event_db::legacy::types::registration::VoterGroupId; +use crate::db::event::legacy::types::registration::VoterGroupId; #[derive(Debug, Clone, PartialEq, Eq)] /// Objective Choices diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/event.rs b/catalyst-gateway/bin/src/db/event/legacy/types/event.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/event.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/event.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/types/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/mod.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/objective.rs b/catalyst-gateway/bin/src/db/event/legacy/types/objective.rs similarity index 96% rename from catalyst-gateway/bin/src/event_db/legacy/types/objective.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/objective.rs index 3ccd708bcc5..920cccba2a6 100644 --- a/catalyst-gateway/bin/src/event_db/legacy/types/objective.rs +++ b/catalyst-gateway/bin/src/db/event/legacy/types/objective.rs @@ -1,7 +1,7 @@ //! Objective Types use serde_json::Value; -use crate::event_db::legacy::types::registration::VoterGroupId; +use crate::db::event::legacy::types::registration::VoterGroupId; #[allow(clippy::module_name_repetitions)] #[derive(Debug, Clone, PartialEq, Eq, Hash)] diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/proposal.rs b/catalyst-gateway/bin/src/db/event/legacy/types/proposal.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/proposal.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/proposal.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/registration.rs b/catalyst-gateway/bin/src/db/event/legacy/types/registration.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/registration.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/registration.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/review.rs b/catalyst-gateway/bin/src/db/event/legacy/types/review.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/review.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/review.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/search.rs b/catalyst-gateway/bin/src/db/event/legacy/types/search.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/search.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/search.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/challenge.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/challenge.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/challenge.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/challenge.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/fund.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/fund.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/fund.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/fund.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/goal.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/goal.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/goal.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/goal.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/group.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/group.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/group.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/group.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/mod.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/mod.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/mod.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/mod.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/vote_plan.rs b/catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/vote_plan.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/vit_ss/vote_plan.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/vit_ss/vote_plan.rs diff --git a/catalyst-gateway/bin/src/event_db/legacy/types/voting_status.rs b/catalyst-gateway/bin/src/db/event/legacy/types/voting_status.rs similarity index 100% rename from catalyst-gateway/bin/src/event_db/legacy/types/voting_status.rs rename to catalyst-gateway/bin/src/db/event/legacy/types/voting_status.rs diff --git a/catalyst-gateway/bin/src/db/event/mod.rs b/catalyst-gateway/bin/src/db/event/mod.rs new file mode 100644 index 00000000000..39e5ed3b869 --- /dev/null +++ b/catalyst-gateway/bin/src/db/event/mod.rs @@ -0,0 +1,237 @@ +//! Catalyst Election Database crate +use std::{ + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, OnceLock, + }, +}; + +use bb8::Pool; +use bb8_postgres::PostgresConnectionManager; +use tokio_postgres::{types::ToSql, NoTls, Row}; +use tracing::{debug, debug_span, error, Instrument}; + +use crate::settings::Settings; + +pub(crate) mod error; +pub(crate) mod legacy; +pub(crate) mod schema_check; + +/// Database version this crate matches. +/// Must equal the last Migrations Version Number from `event-db/migrations`. +pub(crate) const DATABASE_SCHEMA_VERSION: i32 = 9; + +/// Postgres Connection Manager DB Pool +type SqlDbPool = Arc>>; + +/// Postgres Connection Manager DB Pool Instance +static EVENT_DB_POOL: OnceLock = OnceLock::new(); + +/// Is Deep Query Analysis enabled or not? +static DEEP_QUERY_INSPECT: AtomicBool = AtomicBool::new(false); + +/// The Catalyst Event SQL Database +pub(crate) struct EventDB {} + +/// `EventDB` Errors +#[derive(thiserror::Error, Debug, PartialEq, Eq)] +pub(crate) enum Error { + /// Failed to get a DB Pool + #[error("DB Pool uninitialized")] + DbPoolUninitialized, +} + +impl EventDB { + /// Determine if deep query inspection is enabled. + pub(crate) fn is_deep_query_enabled() -> bool { + DEEP_QUERY_INSPECT.load(Ordering::SeqCst) + } + + /// Modify the deep query inspection setting. + /// + /// # Arguments + /// + /// * `enable` - Set the `DeepQueryInspection` setting to this value. + pub(crate) fn modify_deep_query(enable: bool) { + DEEP_QUERY_INSPECT.store(enable, Ordering::SeqCst); + } + + /// Query the database. + /// + /// If deep query inspection is enabled, this will log the query plan inside a + /// rolled-back transaction, before running the query. + /// + /// # Arguments + /// + /// * `stmt` - `&str` SQL statement. + /// * `params` - `&[&(dyn ToSql + Sync)]` SQL parameters. + /// + /// # Returns + /// + /// `Result, anyhow::Error>` + #[must_use = "ONLY use this function for SELECT type operations which return row data, otherwise use `modify()`"] + pub(crate) async fn query( + stmt: &str, params: &[&(dyn ToSql + Sync)], + ) -> Result, anyhow::Error> { + if Self::is_deep_query_enabled() { + Self::explain_analyze_rollback(stmt, params).await?; + } + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let conn = pool.get().await?; + let rows = conn.query(stmt, params).await?; + Ok(rows) + } + + /// Query the database for a single row. + /// + /// # Arguments + /// + /// * `stmt` - `&str` SQL statement. + /// * `params` - `&[&(dyn ToSql + Sync)]` SQL parameters. + /// + /// # Returns + /// + /// `Result` + #[must_use = "ONLY use this function for SELECT type operations which return row data, otherwise use `modify()`"] + pub(crate) async fn query_one( + stmt: &str, params: &[&(dyn ToSql + Sync)], + ) -> Result { + if Self::is_deep_query_enabled() { + Self::explain_analyze_rollback(stmt, params).await?; + } + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let conn = pool.get().await?; + let row = conn.query_one(stmt, params).await?; + Ok(row) + } + + /// Modify the database. + /// + /// Use this for `UPDATE`, `DELETE`, and other DB statements that + /// don't return data. + /// + /// # Arguments + /// + /// * `stmt` - `&str` SQL statement. + /// * `params` - `&[&(dyn ToSql + Sync)]` SQL parameters. + /// + /// # Returns + /// + /// `anyhow::Result<()>` + #[allow(dead_code)] + pub(crate) async fn modify(stmt: &str, params: &[&(dyn ToSql + Sync)]) -> anyhow::Result<()> { + if Self::is_deep_query_enabled() { + Self::explain_analyze_commit(stmt, params).await?; + } else { + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let conn = pool.get().await?; + conn.query(stmt, params).await?; + } + Ok(()) + } + + /// Prepend `EXPLAIN ANALYZE` to the query, and rollback the transaction. + async fn explain_analyze_rollback( + stmt: &str, params: &[&(dyn ToSql + Sync)], + ) -> anyhow::Result<()> { + Self::explain_analyze(stmt, params, true).await + } + + /// Prepend `EXPLAIN ANALYZE` to the query, and commit the transaction. + #[allow(dead_code)] + async fn explain_analyze_commit( + stmt: &str, params: &[&(dyn ToSql + Sync)], + ) -> anyhow::Result<()> { + Self::explain_analyze(stmt, params, false).await + } + + /// Prepend `EXPLAIN ANALYZE` to the query. + /// + /// Log the query plan inside a transaction that may be committed or rolled back. + /// + /// # Arguments + /// + /// * `stmt` - `&str` SQL statement. + /// * `params` - `&[&(dyn ToSql + Sync)]` SQL parameters. + /// * `rollback` - `bool` whether to roll back the transaction or not. + async fn explain_analyze( + stmt: &str, params: &[&(dyn ToSql + Sync)], rollback: bool, + ) -> anyhow::Result<()> { + let span = debug_span!( + "query_plan", + query_statement = stmt, + params = format!("{:?}", params), + uuid = uuid::Uuid::new_v4().to_string() + ); + + async move { + let pool = EVENT_DB_POOL.get().ok_or(Error::DbPoolUninitialized)?; + let mut conn = pool.get().await?; + let transaction = conn.transaction().await?; + let explain_stmt = transaction + .prepare(format!("EXPLAIN ANALYZE {stmt}").as_str()) + .await?; + let rows = transaction.query(&explain_stmt, params).await?; + for r in rows { + let query_plan_str: String = r.get("QUERY PLAN"); + debug!("{}", query_plan_str); + } + if rollback { + transaction.rollback().await?; + } else { + transaction.commit().await?; + } + Ok(()) + } + .instrument(span) + .await + } +} + +/// Establish a connection to the database, and check the schema is up-to-date. +/// +/// # Parameters +/// +/// * `url` set to the postgres connection string needed to connect to the database. IF +/// it is None, then the env var "`DATABASE_URL`" will be used for this connection +/// string. eg: "`postgres://catalyst-dev:CHANGE_ME@localhost/CatalystDev`" +/// * `do_schema_check` boolean flag to decide whether to verify the schema version or +/// not. If it is `true`, a query is made to verify the DB schema version. +/// +/// # Errors +/// +/// This function will return an error if: +/// * `url` is None and the environment variable "`DATABASE_URL`" isn't set. +/// * There is any error communicating the the database to check its schema. +/// * The database schema in the DB does not 100% match the schema supported by this +/// library. +/// +/// # Notes +/// +/// The env var "`DATABASE_URL`" can be set directly as an anv var, or in a +/// `.env` file. +pub(crate) fn establish_connection() { + let (url, user, pass) = Settings::event_db_settings(); + + // This was pre-validated and can't fail, but provide default in the impossible case it + // does. + let mut config = tokio_postgres::config::Config::from_str(url).unwrap_or_else(|_| { + error!(url = url, "Postgres URL Pre Validation has failed."); + tokio_postgres::config::Config::default() + }); + if let Some(user) = user { + config.user(user); + } + if let Some(pass) = pass { + config.password(pass); + } + + let pg_mgr = PostgresConnectionManager::new(config, tokio_postgres::NoTls); + + let pool = Pool::builder().build_unchecked(pg_mgr); + + if EVENT_DB_POOL.set(Arc::new(pool)).is_err() { + error!("Failed to set event db pool. Called Twice?"); + } +} diff --git a/catalyst-gateway/bin/src/event_db/schema_check/mod.rs b/catalyst-gateway/bin/src/db/event/schema_check/mod.rs similarity index 83% rename from catalyst-gateway/bin/src/event_db/schema_check/mod.rs rename to catalyst-gateway/bin/src/db/event/schema_check/mod.rs index 2fe81bbef9c..f5a0ce379f5 100644 --- a/catalyst-gateway/bin/src/event_db/schema_check/mod.rs +++ b/catalyst-gateway/bin/src/db/event/schema_check/mod.rs @@ -1,6 +1,6 @@ //! Check if the schema is up-to-date. -use crate::event_db::{EventDB, DATABASE_SCHEMA_VERSION}; +use crate::db::event::{EventDB, DATABASE_SCHEMA_VERSION}; /// Schema in database does not match schema supported by the Crate. #[derive(thiserror::Error, Debug, PartialEq, Eq)] @@ -19,8 +19,8 @@ impl EventDB { /// Check the schema version. /// return the current schema version if its current. /// Otherwise return an error. - pub(crate) async fn schema_version_check(&self) -> anyhow::Result { - let schema_check = self.query_one(SELECT_MAX_VERSION_SQL, &[]).await?; + pub(crate) async fn schema_version_check() -> anyhow::Result { + let schema_check = Self::query_one(SELECT_MAX_VERSION_SQL, &[]).await?; let current_ver = schema_check.try_get("max")?; diff --git a/catalyst-gateway/bin/src/event_db/schema_check/select_max_version.sql b/catalyst-gateway/bin/src/db/event/schema_check/select_max_version.sql similarity index 100% rename from catalyst-gateway/bin/src/event_db/schema_check/select_max_version.sql rename to catalyst-gateway/bin/src/db/event/schema_check/select_max_version.sql diff --git a/catalyst-gateway/bin/src/db/index/block/certs.rs b/catalyst-gateway/bin/src/db/index/block/certs.rs new file mode 100644 index 00000000000..3c7ec9bcca9 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/certs.rs @@ -0,0 +1,259 @@ +//! Index certs found in a transaction. + +use std::sync::Arc; + +use cardano_chain_follower::MultiEraBlock; +use pallas::ledger::primitives::{alonzo, conway}; +use scylla::{frame::value::MaybeUnset, SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::{ + queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, + session::CassandraSession, + }, + service::utilities::convert::u16_from_saturating, + settings::CassandraEnvVars, +}; + +/// Insert TXI Query and Parameters +#[derive(SerializeRow)] +pub(crate) struct StakeRegistrationInsertQuery { + /// Stake key hash + stake_hash: Vec, + /// Slot Number the cert is in. + slot_no: num_bigint::BigInt, + /// Transaction Index. + txn: i16, + /// Full Stake Address (not hashed, 32 byte ED25519 Public key). + stake_address: MaybeUnset>, + /// Is the stake address a script or not. + script: bool, + /// Is the Certificate Registered? + register: MaybeUnset, + /// Is the Certificate Deregistered? + deregister: MaybeUnset, + /// Pool Delegation Address + pool_delegation: MaybeUnset>, +} + +/// TXI by Txn hash Index +const INSERT_STAKE_REGISTRATION_QUERY: &str = include_str!("./cql/insert_stake_registration.cql"); + +impl StakeRegistrationInsertQuery { + /// Create a new Insert Query. + #[allow(clippy::too_many_arguments)] + pub fn new( + stake_hash: Vec, slot_no: u64, txn: i16, stake_address: Vec, script: bool, + register: bool, deregister: bool, pool_delegation: Option>, + ) -> Self { + StakeRegistrationInsertQuery { + stake_hash, + slot_no: slot_no.into(), + txn, + stake_address: if stake_address.is_empty() { + MaybeUnset::Unset + } else { + MaybeUnset::Set(stake_address) + }, + script, + register: if register { + MaybeUnset::Set(true) + } else { + MaybeUnset::Unset + }, + deregister: if deregister { + MaybeUnset::Set(true) + } else { + MaybeUnset::Unset + }, + pool_delegation: if let Some(pool_delegation) = pool_delegation { + MaybeUnset::Set(pool_delegation) + } else { + MaybeUnset::Unset + }, + } + } + + /// Prepare Batch of Insert TXI Index Data Queries + pub(crate) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_STAKE_REGISTRATION_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = insert_queries { + error!(error=%error,"Failed to prepare Insert Stake Registration Query."); + }; + + insert_queries + } +} + +/// Insert Cert Queries +pub(crate) struct CertInsertQuery { + /// Stake Registration Data captured during indexing. + stake_reg_data: Vec, +} + +impl CertInsertQuery { + /// Create new data set for Cert Insert Query Batch. + pub(crate) fn new() -> Self { + CertInsertQuery { + stake_reg_data: Vec::new(), + } + } + + /// Prepare Batch of Insert TXI Index Data Queries + pub(crate) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + // Note: for now we have one query, but there are many certs, and later we may have more + // to add here. + StakeRegistrationInsertQuery::prepare_batch(session, cfg).await + } + + /// Get the stake address for a hash, return an empty address if one can not be found. + #[allow(clippy::too_many_arguments)] + fn stake_address( + &mut self, cred: &alonzo::StakeCredential, slot_no: u64, txn: i16, register: bool, + deregister: bool, delegation: Option>, block: &MultiEraBlock, + ) { + let default_addr = Vec::new(); + let (key_hash, pubkey, script) = match cred { + pallas::ledger::primitives::conway::StakeCredential::AddrKeyhash(cred) => { + let addr = block + .witness_for_tx(cred, u16_from_saturating(txn)) + .unwrap_or(default_addr); + // Note: it is totally possible for the Registration Certificate to not be + // witnessed. + (cred.to_vec(), addr.clone(), false) + }, + pallas::ledger::primitives::conway::StakeCredential::Scripthash(script) => { + (script.to_vec(), default_addr, true) + }, + }; + + if pubkey.is_empty() && !script && deregister { + error!( + "Stake Deregistration Certificate {:?} is NOT Witnessed.", + key_hash + ); + } + + if pubkey.is_empty() && !script && delegation.is_some() { + error!( + "Stake Delegation Certificate {:?} is NOT Witnessed.", + key_hash + ); + } + + // This may not be witnessed, its normal but disappointing. + self.stake_reg_data.push(StakeRegistrationInsertQuery::new( + key_hash, slot_no, txn, pubkey, script, register, deregister, delegation, + )); + } + + /// Index an Alonzo Era certificate into the database. + fn index_alonzo_cert( + &mut self, cert: &alonzo::Certificate, slot_no: u64, txn: i16, block: &MultiEraBlock, + ) { + #[allow(clippy::match_same_arms)] + match cert { + pallas::ledger::primitives::alonzo::Certificate::StakeRegistration(cred) => { + // This may not be witnessed, its normal but disappointing. + self.stake_address(cred, slot_no, txn, true, false, None, block); + }, + pallas::ledger::primitives::alonzo::Certificate::StakeDeregistration(cred) => { + self.stake_address(cred, slot_no, txn, false, true, None, block); + }, + pallas::ledger::primitives::alonzo::Certificate::StakeDelegation(cred, pool) => { + self.stake_address(cred, slot_no, txn, false, false, Some(pool.to_vec()), block); + }, + pallas::ledger::primitives::alonzo::Certificate::PoolRegistration { .. } => {}, + pallas::ledger::primitives::alonzo::Certificate::PoolRetirement(..) => {}, + pallas::ledger::primitives::alonzo::Certificate::GenesisKeyDelegation(..) => {}, + pallas::ledger::primitives::alonzo::Certificate::MoveInstantaneousRewardsCert(_) => {}, + } + } + + /// Index a certificate from a conway transaction. + fn index_conway_cert( + &mut self, cert: &conway::Certificate, slot_no: u64, txn: i16, block: &MultiEraBlock, + ) { + #[allow(clippy::match_same_arms)] + match cert { + pallas::ledger::primitives::conway::Certificate::StakeRegistration(cred) => { + // This may not be witnessed, its normal but disappointing. + self.stake_address(cred, slot_no, txn, true, false, None, block); + }, + pallas::ledger::primitives::conway::Certificate::StakeDeregistration(cred) => { + self.stake_address(cred, slot_no, txn, false, true, None, block); + }, + pallas::ledger::primitives::conway::Certificate::StakeDelegation(cred, pool) => { + self.stake_address(cred, slot_no, txn, false, false, Some(pool.to_vec()), block); + }, + pallas::ledger::primitives::conway::Certificate::PoolRegistration { .. } => {}, + pallas::ledger::primitives::conway::Certificate::PoolRetirement(..) => {}, + pallas::ledger::primitives::conway::Certificate::Reg(..) => {}, + pallas::ledger::primitives::conway::Certificate::UnReg(..) => {}, + pallas::ledger::primitives::conway::Certificate::VoteDeleg(..) => {}, + pallas::ledger::primitives::conway::Certificate::StakeVoteDeleg(..) => {}, + pallas::ledger::primitives::conway::Certificate::StakeRegDeleg(..) => {}, + pallas::ledger::primitives::conway::Certificate::VoteRegDeleg(..) => {}, + pallas::ledger::primitives::conway::Certificate::StakeVoteRegDeleg(..) => {}, + pallas::ledger::primitives::conway::Certificate::AuthCommitteeHot(..) => {}, + pallas::ledger::primitives::conway::Certificate::ResignCommitteeCold(..) => {}, + pallas::ledger::primitives::conway::Certificate::RegDRepCert(..) => {}, + pallas::ledger::primitives::conway::Certificate::UnRegDRepCert(..) => {}, + pallas::ledger::primitives::conway::Certificate::UpdateDRepCert(..) => {}, + } + } + + /// Index the certificates in a transaction. + pub(crate) fn index( + &mut self, txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64, txn: i16, + block: &MultiEraBlock, + ) { + #[allow(clippy::match_same_arms)] + txs.certs().iter().for_each(|cert| { + match cert { + pallas::ledger::traverse::MultiEraCert::NotApplicable => {}, + pallas::ledger::traverse::MultiEraCert::AlonzoCompatible(cert) => { + self.index_alonzo_cert(cert, slot_no, txn, block); + }, + pallas::ledger::traverse::MultiEraCert::Conway(cert) => { + self.index_conway_cert(cert, slot_no, txn, block); + }, + _ => {}, + } + }); + } + + /// Execute the Certificate Indexing Queries. + /// + /// Consumes the `self` and returns a vector of futures. + pub(crate) fn execute(self, session: &Arc) -> FallibleQueryTasks { + let mut query_handles: FallibleQueryTasks = Vec::new(); + + let inner_session = session.clone(); + + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch( + PreparedQuery::StakeRegistrationInsertQuery, + self.stake_reg_data, + ) + .await + })); + + query_handles + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql new file mode 100644 index 00000000000..220954045c8 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36.cql @@ -0,0 +1,22 @@ +-- Index CIP-36 Registrations (Valid) +INSERT INTO cip36_registration ( + stake_address, + nonce, + slot_no, + txn, + vote_key, + payment_address, + is_payable, + raw_nonce, + cip36, +) VALUES ( + :stake_address, + :nonce, + :slot_no, + :txn, + :vote_key, + :payment_address, + :is_payable, + :raw_nonce, + :cip36, +); diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql new file mode 100644 index 00000000000..a09d36d3f55 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_for_vote_key.cql @@ -0,0 +1,14 @@ +-- Index CIP-36 Registration (Valid) +INSERT INTO cip36_registration_for_stake_addr ( + vote_key, + stake_address, + slot_no, + txn, + valid, +) VALUES ( + :vote_key, + :stake_address, + :slot_no, + :txn, + :valid, +); diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql new file mode 100644 index 00000000000..06162661fd0 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/cql/insert_cip36_invalid.cql @@ -0,0 +1,26 @@ +-- Index CIP-36 Registrations by Vote Key +INSERT INTO cip36_registration_invalid ( + stake_address, + slot_no, + txn, + vote_key, + payment_address, + is_payable, + raw_nonce, + nonce, + cip36, + signed, + error_report, +) VALUES ( + :stake_address, + :slot_no, + :txn, + :vote_key, + :payment_address, + :is_payable, + :raw_nonce, + :nonce, + :cip36, + :signed, + :error_report +); diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs new file mode 100644 index 00000000000..d346124998d --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs @@ -0,0 +1,83 @@ +//! Insert CIP36 Registration Query + +use std::sync::Arc; + +use cardano_chain_follower::Metadata::cip36::{Cip36, VotingPubKey}; +use scylla::{frame::value::MaybeUnset, SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// Index Registration by Stake Address +const INSERT_CIP36_REGISTRATION_QUERY: &str = include_str!("./cql/insert_cip36.cql"); + +/// Insert CIP-36 Registration Query Parameters +#[derive(SerializeRow, Clone)] +pub(super) struct Params { + /// Full Stake Address (not hashed, 32 byte ED25519 Public key). + stake_address: Vec, + /// Nonce value after normalization. + nonce: num_bigint::BigInt, + /// Slot Number the cert is in. + slot_no: num_bigint::BigInt, + /// Transaction Index. + txn: i16, + /// Voting Public Key + vote_key: Vec, + /// Full Payment Address (not hashed, 32 byte ED25519 Public key). + payment_address: MaybeUnset>, + /// Is the stake address a script or not. + is_payable: bool, + /// Raw nonce value. + raw_nonce: num_bigint::BigInt, + /// Is the Registration CIP36 format, or CIP15 + cip36: bool, +} + +impl Params { + /// Create a new Insert Query. + pub fn new(vote_key: &VotingPubKey, slot_no: u64, txn: i16, cip36: &Cip36) -> Self { + Params { + stake_address: cip36 + .stake_pk + .map(|s| s.to_bytes().to_vec()) + .unwrap_or_default(), + nonce: cip36.nonce.into(), + slot_no: slot_no.into(), + txn, + vote_key: vote_key.voting_pk.to_bytes().to_vec(), + payment_address: if cip36.payment_addr.is_empty() { + MaybeUnset::Unset + } else { + MaybeUnset::Set(cip36.payment_addr.clone()) + }, + is_payable: cip36.payable, + raw_nonce: cip36.raw_nonce.into(), + cip36: cip36.cip36.unwrap_or_default(), + } + } + + /// Prepare Batch of Insert CIP-36 Registration Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_CIP36_REGISTRATION_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = insert_queries { + error!(error=%error,"Failed to prepare Insert CIP-36 Registration Query."); + }; + + insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs new file mode 100644 index 00000000000..67a892d4f86 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs @@ -0,0 +1,70 @@ +//! Insert CIP36 Registration Query + +use std::sync::Arc; + +use cardano_chain_follower::Metadata::cip36::{Cip36, VotingPubKey}; +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// Index Registration by Vote Key +const INSERT_CIP36_REGISTRATION_FOR_VOTE_KEY_QUERY: &str = + include_str!("./cql/insert_cip36_for_vote_key.cql"); + +/// Insert CIP-36 Registration Invalid Query Parameters +#[derive(SerializeRow, Clone)] +pub(super) struct Params { + /// Voting Public Key + vote_key: Vec, + /// Full Stake Address (not hashed, 32 byte ED25519 Public key). + stake_address: Vec, + /// Slot Number the cert is in. + slot_no: num_bigint::BigInt, + /// Transaction Index. + txn: i16, + /// Is the registration Valid or not. + valid: bool, +} + +impl Params { + /// Create a new Insert Query. + pub fn new( + vote_key: &VotingPubKey, slot_no: u64, txn: i16, cip36: &Cip36, valid: bool, + ) -> Self { + Params { + vote_key: vote_key.voting_pk.to_bytes().to_vec(), + stake_address: cip36 + .stake_pk + .map(|s| s.to_bytes().to_vec()) + .unwrap_or_default(), + slot_no: slot_no.into(), + txn, + valid, + } + } + + /// Prepare Batch of Insert CIP-36 Registration Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_CIP36_REGISTRATION_FOR_VOTE_KEY_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = insert_queries { + error!(error=%error,"Failed to prepare Insert CIP-36 Registration Query."); + }; + + insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs new file mode 100644 index 00000000000..0ee5a4e5b19 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs @@ -0,0 +1,98 @@ +//! Insert CIP36 Registration Query (Invalid Records) + +use std::sync::Arc; + +use cardano_chain_follower::Metadata::cip36::{Cip36, VotingPubKey}; +use scylla::{frame::value::MaybeUnset, SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// Index Registration by Stake Address (Invalid Registrations) +const INSERT_CIP36_REGISTRATION_INVALID_QUERY: &str = + include_str!("./cql/insert_cip36_invalid.cql"); + +/// Insert CIP-36 Registration Invalid Query Parameters +#[derive(SerializeRow, Clone)] +pub(super) struct Params { + /// Full Stake Address (not hashed, 32 byte ED25519 Public key). + stake_address: Vec, + /// Slot Number the cert is in. + slot_no: num_bigint::BigInt, + /// Transaction Index. + txn: i16, + /// Voting Public Key + vote_key: Vec, + /// Full Payment Address (not hashed, 32 byte ED25519 Public key). + payment_address: Vec, + /// Is the stake address a script or not. + is_payable: bool, + /// Raw nonce value. + raw_nonce: num_bigint::BigInt, + /// Nonce value after normalization. + nonce: num_bigint::BigInt, + /// Strict Catalyst validated. + cip36: MaybeUnset, + /// Signature validates. + signed: bool, + /// List of serialization errors. + error_report: Vec, +} + +impl Params { + /// Create a new Insert Query. + pub fn new( + vote_key: Option<&VotingPubKey>, slot_no: u64, txn: i16, cip36: &Cip36, + error_report: Vec, + ) -> Self { + let vote_key = if let Some(vote_key) = vote_key { + vote_key.voting_pk.to_bytes().to_vec() + } else { + Vec::new() + }; + Params { + stake_address: cip36 + .stake_pk + .map(|s| s.to_bytes().to_vec()) + .unwrap_or_default(), + slot_no: slot_no.into(), + txn, + vote_key, + payment_address: cip36.payment_addr.clone(), + is_payable: cip36.payable, + raw_nonce: cip36.raw_nonce.into(), + nonce: cip36.nonce.into(), + cip36: if let Some(cip36) = cip36.cip36 { + MaybeUnset::Set(cip36) + } else { + MaybeUnset::Unset + }, + signed: cip36.signed, + error_report, + } + } + + /// Prepare Batch of Insert CIP-36 Registration Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_CIP36_REGISTRATION_INVALID_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = insert_queries { + error!(error=%error,"Failed to prepare Insert CIP-36 Registration Invalid Query."); + }; + + insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs b/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs new file mode 100644 index 00000000000..aa7efe29b8a --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs @@ -0,0 +1,150 @@ +//! Index CIP-36 Registrations. + +mod insert_cip36; +mod insert_cip36_for_vote_key; +mod insert_cip36_invalid; + +use std::sync::Arc; + +use cardano_chain_follower::{Metadata, MultiEraBlock}; +use scylla::Session; + +use crate::{ + db::index::{ + queries::{FallibleQueryTasks, PreparedQuery, SizedBatch}, + session::CassandraSession, + }, + settings::CassandraEnvVars, +}; + +/// Insert CIP-36 Registration Queries +pub(crate) struct Cip36InsertQuery { + /// Stake Registration Data captured during indexing. + registrations: Vec, + /// Stake Registration Data captured during indexing. + invalid: Vec, + /// Stake Registration Data captured during indexing. + for_vote_key: Vec, +} + +impl Cip36InsertQuery { + /// Create new data set for CIP-36 Registrations Insert Query Batch. + pub(crate) fn new() -> Self { + Cip36InsertQuery { + registrations: Vec::new(), + invalid: Vec::new(), + for_vote_key: Vec::new(), + } + } + + /// Prepare Batch of Insert Cip36 Registration Data Queries + pub(crate) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result<(SizedBatch, SizedBatch, SizedBatch)> { + let insert_cip36_batch = insert_cip36::Params::prepare_batch(session, cfg).await; + let insert_cip36_invalid_batch = + insert_cip36_invalid::Params::prepare_batch(session, cfg).await; + let insert_cip36_for_vote_key_addr_batch = + insert_cip36_for_vote_key::Params::prepare_batch(session, cfg).await; + + Ok(( + insert_cip36_batch?, + insert_cip36_invalid_batch?, + insert_cip36_for_vote_key_addr_batch?, + )) + } + + /// Index the CIP-36 registrations in a transaction. + pub(crate) fn index( + &mut self, txn: usize, txn_index: i16, slot_no: u64, block: &MultiEraBlock, + ) { + if let Some(decoded_metadata) = block.txn_metadata(txn, Metadata::cip36::LABEL) { + #[allow(irrefutable_let_patterns)] + if let Metadata::DecodedMetadataValues::Cip36(cip36) = &decoded_metadata.value { + // Check if we are indexing a valid or invalid registration. + // Note, we ONLY care about catalyst, we should only have 1 voting key, if not, call + // it an error. + if decoded_metadata.report.is_empty() && cip36.voting_keys.len() == 1 { + // Always true, because we already checked if the array has only one entry. + if let Some(vote_key) = cip36.voting_keys.first() { + self.registrations.push(insert_cip36::Params::new( + vote_key, slot_no, txn_index, cip36, + )); + self.for_vote_key + .push(insert_cip36_for_vote_key::Params::new( + vote_key, slot_no, txn_index, cip36, true, + )); + } + } else { + if cip36.voting_keys.is_empty() { + self.invalid.push(insert_cip36_invalid::Params::new( + None, + slot_no, + txn_index, + cip36, + decoded_metadata.report.clone(), + )); + } + for vote_key in &cip36.voting_keys { + self.invalid.push(insert_cip36_invalid::Params::new( + Some(vote_key), + slot_no, + txn_index, + cip36, + decoded_metadata.report.clone(), + )); + self.for_vote_key + .push(insert_cip36_for_vote_key::Params::new( + vote_key, slot_no, txn_index, cip36, false, + )); + } + } + } + } + } + + /// Execute the CIP-36 Registration Indexing Queries. + /// + /// Consumes the `self` and returns a vector of futures. + pub(crate) fn execute(self, session: &Arc) -> FallibleQueryTasks { + let mut query_handles: FallibleQueryTasks = Vec::new(); + + if !self.registrations.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch( + PreparedQuery::Cip36RegistrationInsertQuery, + self.registrations, + ) + .await + })); + } + + if !self.invalid.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch( + PreparedQuery::Cip36RegistrationInsertErrorQuery, + self.invalid, + ) + .await + })); + } + + if !self.for_vote_key.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch( + PreparedQuery::Cip36RegistrationForStakeAddrInsertQuery, + self.for_vote_key, + ) + .await + })); + } + + query_handles + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/cql/insert_stake_registration.cql b/catalyst-gateway/bin/src/db/index/block/cql/insert_stake_registration.cql new file mode 100644 index 00000000000..76907c19dbb --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cql/insert_stake_registration.cql @@ -0,0 +1,20 @@ +-- Index Stake Registrations +INSERT INTO stake_registration ( + stake_hash, + slot_no, + txn, + stake_address, + script, + register, + deregister, + pool_delegation +) VALUES ( + :stake_hash, + :slot_no, + :txn, + :stake_address, + :script, + :register, + :deregister, + :pool_delegation +); \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/block/cql/insert_txi.cql b/catalyst-gateway/bin/src/db/index/block/cql/insert_txi.cql new file mode 100644 index 00000000000..c25bb41a1ff --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/cql/insert_txi.cql @@ -0,0 +1,10 @@ +-- Create the TXI Record for a transaction hash, +INSERT INTO txi_by_txn_hash ( + txn_hash, + txo, + slot_no +) VALUES ( + :txn_hash, + :txo, + :slot_no +); diff --git a/catalyst-gateway/bin/src/db/index/block/mod.rs b/catalyst-gateway/bin/src/db/index/block/mod.rs new file mode 100644 index 00000000000..dd586ed6f81 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/mod.rs @@ -0,0 +1,94 @@ +//! Index a block +//! Primary Data Indexing - Upsert operations + +pub(crate) mod certs; +pub(crate) mod cip36; +pub(crate) mod txi; +pub(crate) mod txo; + +use cardano_chain_follower::MultiEraBlock; +use certs::CertInsertQuery; +use cip36::Cip36InsertQuery; +use tracing::{debug, error}; +use txi::TxiInsertQuery; +use txo::TxoInsertQuery; + +use super::{queries::FallibleQueryTasks, session::CassandraSession}; +use crate::service::utilities::convert::i16_from_saturating; + +/// Add all data needed from the block into the indexes. +pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { + // Get the session. This should never fail. + let Some(session) = CassandraSession::get(block.immutable()) else { + anyhow::bail!("Failed to get Index DB Session. Can not index block."); + }; + + let mut cert_index = CertInsertQuery::new(); + let mut cip36_index = Cip36InsertQuery::new(); + + let mut txi_index = TxiInsertQuery::new(); + let mut txo_index = TxoInsertQuery::new(); + + let block_data = block.decode(); + let slot_no = block_data.slot(); + + // We add all transactions in the block to their respective index data sets. + for (txn_index, txs) in block_data.txs().iter().enumerate() { + let txn = i16_from_saturating(txn_index); + + let txn_hash = txs.hash().to_vec(); + + // Index the TXIs. + txi_index.index(txs, slot_no); + + // TODO: Index minting. + // let mint = txs.mints().iter() {}; + + // TODO: Index Metadata. + cip36_index.index(txn_index, txn, slot_no, block); + + // Index Certificates inside the transaction. + cert_index.index(txs, slot_no, txn, block); + + // Index the TXOs. + txo_index.index(txs, slot_no, &txn_hash, txn); + } + + // We then execute each batch of data from the block. + // This maximizes batching opportunities. + let mut query_handles: FallibleQueryTasks = Vec::new(); + + query_handles.extend(txo_index.execute(&session)); + query_handles.extend(txi_index.execute(&session)); + query_handles.extend(cert_index.execute(&session)); + query_handles.extend(cip36_index.execute(&session)); + + let mut result: anyhow::Result<()> = Ok(()); + + // Wait for operations to complete, and display any errors + for handle in query_handles { + if result.is_err() { + // Try and cancel all futures waiting tasks and return the first error we encountered. + handle.abort(); + continue; + } + match handle.await { + Ok(join_res) => { + match join_res { + Ok(res) => debug!(res=?res,"Query OK"), + Err(error) => { + // IF a query fails, assume everything else is broken. + error!(error=%error,"Query Failed"); + result = Err(error); + }, + } + }, + Err(error) => { + error!(error=%error,"Query Join Failed"); + result = Err(error.into()); + }, + } + } + + result +} diff --git a/catalyst-gateway/bin/src/db/index/block/txi.rs b/catalyst-gateway/bin/src/db/index/block/txi.rs new file mode 100644 index 00000000000..d3a37b3055f --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txi.rs @@ -0,0 +1,104 @@ +//! Insert TXI Index Data Queries. + +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::{ + queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, + session::CassandraSession, + }, + settings::CassandraEnvVars, +}; + +/// Insert TXI Query and Parameters +#[derive(SerializeRow)] +pub(crate) struct TxiInsertParams { + /// Spent Transactions Hash + txn_hash: Vec, + /// TXO Index spent. + txo: i16, + /// Block Slot Number when spend occurred. + slot_no: num_bigint::BigInt, +} + +impl TxiInsertParams { + /// Create a new record for this transaction. + pub fn new(txn_hash: &[u8], txo: i16, slot_no: u64) -> Self { + Self { + txn_hash: txn_hash.to_vec(), + txo, + slot_no: slot_no.into(), + } + } +} + +/// Insert TXI Query and Parameters +pub(crate) struct TxiInsertQuery { + /// Transaction Input Data to be inserted. + txi_data: Vec, +} + +/// TXI by Txn hash Index +const INSERT_TXI_QUERY: &str = include_str!("./cql/insert_txi.cql"); + +impl TxiInsertQuery { + /// Create a new record for this transaction. + pub(crate) fn new() -> Self { + Self { + txi_data: Vec::new(), + } + } + + /// Prepare Batch of Insert TXI Index Data Queries + pub(crate) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txi_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_TXI_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txi_insert_queries { + error!(error=%error,"Failed to prepare Insert TXI Query."); + }; + + txi_insert_queries + } + + /// Index the transaction Inputs. + pub(crate) fn index(&mut self, txs: &pallas_traverse::MultiEraTx<'_>, slot_no: u64) { + // Index the TXI's. + for txi in txs.inputs() { + let txn_hash = txi.hash().to_vec(); + let txo: i16 = txi.index().try_into().unwrap_or(i16::MAX); + + self.txi_data + .push(TxiInsertParams::new(&txn_hash, txo, slot_no)); + } + } + + /// Execute the Certificate Indexing Queries. + /// + /// Consumes the `self` and returns a vector of futures. + pub(crate) fn execute(self, session: &Arc) -> FallibleQueryTasks { + let mut query_handles: FallibleQueryTasks = Vec::new(); + + let inner_session = session.clone(); + + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch(PreparedQuery::TxiInsertQuery, self.txi_data) + .await + })); + + query_handles + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo.cql b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo.cql new file mode 100644 index 00000000000..22293ae6acb --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo.cql @@ -0,0 +1,18 @@ +-- Create the TXO Record for a stake address, +INSERT INTO txo_by_stake ( + stake_address, + slot_no, + txn, + txo, + address, + value, + txn_hash +) VALUES ( + :stake_address, + :slot_no, + :txn, + :txo, + :address, + :value, + :txn_hash +); diff --git a/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo_asset.cql b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo_asset.cql new file mode 100644 index 00000000000..3bdb6342c4b --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_txo_asset.cql @@ -0,0 +1,19 @@ +-- Create the TXO Record for a stake address, +-- Will not overwrite anything if it already exists. +INSERT INTO txo_assets_by_stake ( + stake_address, + slot_no, + txn, + txo, + policy_id, + policy_name, + value +) VALUES ( + :stake_address, + :slot_no, + :txn, + :txo, + :policy_id, + :policy_name, + :value +); diff --git a/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo.cql b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo.cql new file mode 100644 index 00000000000..08cc91808e3 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo.cql @@ -0,0 +1,16 @@ +-- Create the TXO Record for when its not staked. +INSERT INTO unstaked_txo_by_txn_hash ( + txn_hash, + txo, + slot_no, + txn, + address, + value +) VALUES ( + :txn_hash, + :txo, + :slot_no, + :txn, + :address, + :value +); diff --git a/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo_asset.cql b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo_asset.cql new file mode 100644 index 00000000000..e170a0b46c2 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/cql/insert_unstaked_txo_asset.cql @@ -0,0 +1,18 @@ +-- Create the TXO Record for an unstaked TXO Asset. +INSERT INTO unstaked_txo_assets_by_txn_hash ( + txn_hash, + txo, + policy_id, + policy_name, + slot_no, + txn, + value +) VALUES ( + :txn_hash, + :txo, + :policy_id, + :policy_name, + :slot_no, + :txn, + :value +); diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs new file mode 100644 index 00000000000..7d9c0b67216 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs @@ -0,0 +1,75 @@ +//! Insert TXO Indexed Data Queries. +//! +//! Note, there are multiple ways TXO Data is indexed and they all happen in here. + +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// TXO by Stake Address Indexing query +const INSERT_TXO_QUERY: &str = include_str!("./cql/insert_txo.cql"); + +/// Insert TXO Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +pub(super) struct Params { + /// Stake Address - Binary 28 bytes. 0 bytes = not staked. + stake_address: Vec, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Actual full TXO Address + address: String, + /// Actual TXO Value in lovelace + value: num_bigint::BigInt, + /// Transactions hash. + txn_hash: Vec, +} + +impl Params { + /// Create a new record for this transaction. + pub(super) fn new( + stake_address: &[u8], slot_no: u64, txn: i16, txo: i16, address: &str, value: u64, + txn_hash: &[u8], + ) -> Self { + Self { + stake_address: stake_address.to_vec(), + slot_no: slot_no.into(), + txn, + txo, + address: address.to_string(), + value: value.into(), + txn_hash: txn_hash.to_vec(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_TXO_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert TXO Asset Query."); + }; + + txo_insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs new file mode 100644 index 00000000000..9fa349237b4 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs @@ -0,0 +1,77 @@ +//! Insert TXO Native Assets into the DB. + +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// TXO Asset by Stake Address Indexing Query +const INSERT_TXO_ASSET_QUERY: &str = include_str!("./cql/insert_txo_asset.cql"); + +/// Insert TXO Asset Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +pub(super) struct Params { + /// Stake Address - Binary 28 bytes. 0 bytes = not staked. + stake_address: Vec, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Policy hash of the asset + policy_id: Vec, + /// Policy name of the asset + policy_name: String, + /// Value of the asset + value: num_bigint::BigInt, +} + +impl Params { + /// Create a new record for this transaction. + /// + /// Note Value can be either a u64 or an i64, so use a i128 to represent all possible + /// values. + #[allow(clippy::too_many_arguments)] + pub(super) fn new( + stake_address: &[u8], slot_no: u64, txn: i16, txo: i16, policy_id: &[u8], + policy_name: &str, value: i128, + ) -> Self { + Self { + stake_address: stake_address.to_vec(), + slot_no: slot_no.into(), + txn, + txo, + policy_id: policy_id.to_vec(), + policy_name: policy_name.to_owned(), + value: value.into(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_TXO_ASSET_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert TXO Asset Query."); + }; + + txo_insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs new file mode 100644 index 00000000000..e27c7651c23 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs @@ -0,0 +1,68 @@ +//! Insert Unstaked TXOs into the DB. +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// Unstaked TXO by Stake Address Indexing query +const INSERT_UNSTAKED_TXO_QUERY: &str = include_str!("./cql/insert_unstaked_txo.cql"); + +/// Insert TXO Unstaked Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +pub(super) struct Params { + /// Transactions hash. + txn_hash: Vec, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Actual full TXO Address + address: String, + /// Actual TXO Value in lovelace + value: num_bigint::BigInt, +} + +impl Params { + /// Create a new record for this transaction. + pub(super) fn new( + txn_hash: &[u8], txo: i16, slot_no: u64, txn: i16, address: &str, value: u64, + ) -> Self { + Self { + txn_hash: txn_hash.to_vec(), + txo, + slot_no: slot_no.into(), + txn, + address: address.to_string(), + value: value.into(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_UNSTAKED_TXO_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert TXO Asset Query."); + }; + + txo_insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs new file mode 100644 index 00000000000..8ac33aa129d --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs @@ -0,0 +1,77 @@ +//! Insert Unstaked TXO Native Assets into the DB. + +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::queries::{PreparedQueries, SizedBatch}, + settings::CassandraEnvVars, +}; + +/// Unstaked TXO Asset by Stake Address Indexing Query +const INSERT_UNSTAKED_TXO_ASSET_QUERY: &str = include_str!("./cql/insert_unstaked_txo_asset.cql"); + +/// Insert TXO Asset Query Parameters +/// (Superset of data to support both Staked and Unstaked TXO records.) +#[derive(SerializeRow)] +pub(super) struct Params { + /// Transactions hash. + txn_hash: Vec, + /// Transaction Output Offset inside the transaction. + txo: i16, + /// Policy hash of the asset + policy_id: Vec, + /// Policy name of the asset + policy_name: String, + /// Block Slot Number + slot_no: num_bigint::BigInt, + /// Transaction Offset inside the block. + txn: i16, + /// Value of the asset + value: num_bigint::BigInt, +} + +impl Params { + /// Create a new record for this transaction. + /// + /// Note Value can be either a u64 or an i64, so use a i128 to represent all possible + /// values. + #[allow(clippy::too_many_arguments)] + pub(super) fn new( + txn_hash: &[u8], txo: i16, policy_id: &[u8], policy_name: &str, slot_no: u64, txn: i16, + value: i128, + ) -> Self { + Self { + txn_hash: txn_hash.to_vec(), + txo, + policy_id: policy_id.to_vec(), + policy_name: policy_name.to_owned(), + slot_no: slot_no.into(), + txn, + value: value.into(), + } + } + + /// Prepare Batch of Staked Insert TXO Asset Index Data Queries + pub(super) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let txo_insert_queries = PreparedQueries::prepare_batch( + session.clone(), + INSERT_UNSTAKED_TXO_ASSET_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = txo_insert_queries { + error!(error=%error,"Failed to prepare Insert Unstaked TXO Asset Query."); + }; + + txo_insert_queries + } +} diff --git a/catalyst-gateway/bin/src/db/index/block/txo/mod.rs b/catalyst-gateway/bin/src/db/index/block/txo/mod.rs new file mode 100644 index 00000000000..fc1ea2f306e --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/block/txo/mod.rs @@ -0,0 +1,264 @@ +//! Insert TXO Indexed Data Queries. +//! +//! Note, there are multiple ways TXO Data is indexed and they all happen in here. + +mod insert_txo; +mod insert_txo_asset; +mod insert_unstaked_txo; +mod insert_unstaked_txo_asset; + +use std::sync::Arc; + +use scylla::Session; +use tracing::{error, warn}; + +use crate::{ + db::index::{ + queries::{FallibleQueryTasks, PreparedQuery, SizedBatch}, + session::CassandraSession, + }, + service::utilities::convert::i16_from_saturating, + settings::CassandraEnvVars, +}; + +/// This is used to indicate that there is no stake address. +const NO_STAKE_ADDRESS: &[u8] = &[]; + +/// Insert TXO Query and Parameters +/// +/// There are multiple possible parameters to a query, which are represented separately. +#[allow(dead_code)] +pub(crate) struct TxoInsertQuery { + /// Staked TXO Data Parameters + staked_txo: Vec, + /// Unstaked TXO Data Parameters + unstaked_txo: Vec, + /// Staked TXO Asset Data Parameters + staked_txo_asset: Vec, + /// Unstaked TXO Asset Data Parameters + unstaked_txo_asset: Vec, +} + +impl TxoInsertQuery { + /// Create a new Insert TXO Query Batch + pub(crate) fn new() -> Self { + TxoInsertQuery { + staked_txo: Vec::new(), + unstaked_txo: Vec::new(), + staked_txo_asset: Vec::new(), + unstaked_txo_asset: Vec::new(), + } + } + + /// Prepare Batch of Insert TXI Index Data Queries + pub(crate) async fn prepare_batch( + session: &Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result<(SizedBatch, SizedBatch, SizedBatch, SizedBatch)> { + let txo_staked_insert_batch = insert_txo::Params::prepare_batch(session, cfg).await; + let txo_unstaked_insert_batch = + insert_unstaked_txo::Params::prepare_batch(session, cfg).await; + let txo_staked_asset_insert_batch = + insert_txo_asset::Params::prepare_batch(session, cfg).await; + let txo_unstaked_asset_insert_batch = + insert_unstaked_txo_asset::Params::prepare_batch(session, cfg).await; + + Ok(( + txo_staked_insert_batch?, + txo_unstaked_insert_batch?, + txo_staked_asset_insert_batch?, + txo_unstaked_asset_insert_batch?, + )) + } + + /// Extracts a stake address from a TXO if possible. + /// Returns None if it is not possible. + /// If we want to index, but can not determine a stake key hash, then return a Vec + /// with a single 0 byte. This is because the index DB needs data in the + /// primary key, so we use a single byte of 0 to indicate that there is no + /// stake address, and still have a primary key on the table. Otherwise return the + /// stake key hash as a vec of 28 bytes. + fn extract_stake_address( + txo: &pallas::ledger::traverse::MultiEraOutput<'_>, slot_no: u64, txn_id: &str, + ) -> Option<(Vec, String)> { + let stake_address = match txo.address() { + Ok(address) => { + match address { + // Byron addresses do not have stake addresses and are not supported. + pallas::ledger::addresses::Address::Byron(_) => { + return None; + }, + pallas::ledger::addresses::Address::Shelley(address) => { + let address_string = match address.to_bech32() { + Ok(address) => address, + Err(error) => { + // Shouldn't happen, but if it does error and don't index. + error!(error=%error, slot=slot_no, txn=txn_id,"Error converting to bech32: skipping."); + return None; + }, + }; + + match address.delegation() { + pallas::ledger::addresses::ShelleyDelegationPart::Script(hash) + | pallas::ledger::addresses::ShelleyDelegationPart::Key(hash) => { + (hash.to_vec(), address_string) + }, + pallas::ledger::addresses::ShelleyDelegationPart::Pointer(_pointer) => { + // These are not supported from Conway, so we don't support them + // either. + (NO_STAKE_ADDRESS.to_vec(), address_string) + }, + pallas::ledger::addresses::ShelleyDelegationPart::Null => { + (NO_STAKE_ADDRESS.to_vec(), address_string) + }, + } + }, + pallas::ledger::addresses::Address::Stake(_) => { + // This should NOT appear in a TXO, so report if it does. But don't index it + // as a stake address. + warn!( + slot = slot_no, + txn = txn_id, + "Unexpected Stake address found in TXO. Refusing to index." + ); + return None; + }, + } + }, + Err(error) => { + // This should not ever happen. + error!(error=%error, slot = slot_no, txn = txn_id, "Failed to get Address from TXO. Skipping TXO."); + return None; + }, + }; + + Some(stake_address) + } + + /// Index the transaction Inputs. + pub(crate) fn index( + &mut self, txs: &pallas::ledger::traverse::MultiEraTx<'_>, slot_no: u64, txn_hash: &[u8], + txn: i16, + ) { + let txn_id = hex::encode_upper(txn_hash); + + // Accumulate all the data we want to insert from this transaction here. + for (txo_index, txo) in txs.outputs().iter().enumerate() { + // This will only return None if the TXO is not to be indexed (Byron Addresses) + let Some((stake_address, address)) = Self::extract_stake_address(txo, slot_no, &txn_id) + else { + continue; + }; + + let staked = stake_address != NO_STAKE_ADDRESS; + let txo_index = i16_from_saturating(txo_index); + + if staked { + let params = insert_txo::Params::new( + &stake_address, + slot_no, + txn, + txo_index, + &address, + txo.lovelace_amount(), + txn_hash, + ); + + self.staked_txo.push(params); + } else { + let params = insert_unstaked_txo::Params::new( + txn_hash, + txo_index, + slot_no, + txn, + &address, + txo.lovelace_amount(), + ); + + self.unstaked_txo.push(params); + } + + for asset in txo.non_ada_assets() { + let policy_id = asset.policy().to_vec(); + for policy_asset in asset.assets() { + if policy_asset.is_output() { + let policy_name = policy_asset.to_ascii_name().unwrap_or_default(); + let value = policy_asset.any_coin(); + + if staked { + let params = insert_txo_asset::Params::new( + &stake_address, + slot_no, + txn, + txo_index, + &policy_id, + &policy_name, + value, + ); + self.staked_txo_asset.push(params); + } else { + let params = insert_unstaked_txo_asset::Params::new( + txn_hash, + txo_index, + &policy_id, + &policy_name, + slot_no, + txn, + value, + ); + self.unstaked_txo_asset.push(params); + } + } else { + error!("Minting MultiAsset in TXO."); + } + } + } + } + } + + /// Index the transaction Inputs. + /// + /// Consumes `self` and returns a vector of futures. + pub(crate) fn execute(self, session: &Arc) -> FallibleQueryTasks { + let mut query_handles: FallibleQueryTasks = Vec::new(); + + if !self.staked_txo.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch(PreparedQuery::TxoAdaInsertQuery, self.staked_txo) + .await + })); + } + + if !self.unstaked_txo.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch(PreparedQuery::UnstakedTxoAdaInsertQuery, self.unstaked_txo) + .await + })); + } + + if !self.staked_txo_asset.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch(PreparedQuery::TxoAssetInsertQuery, self.staked_txo_asset) + .await + })); + } + if !self.unstaked_txo_asset.is_empty() { + let inner_session = session.clone(); + query_handles.push(tokio::spawn(async move { + inner_session + .execute_batch( + PreparedQuery::UnstakedTxoAssetInsertQuery, + self.unstaked_txo_asset, + ) + .await + })); + } + + query_handles + } +} diff --git a/catalyst-gateway/bin/src/db/index/mod.rs b/catalyst-gateway/bin/src/db/index/mod.rs new file mode 100644 index 00000000000..f4157be8550 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/mod.rs @@ -0,0 +1,6 @@ +//! Blockchain Index Database + +pub(crate) mod block; +pub(crate) mod queries; +pub(crate) mod schema; +pub(crate) mod session; diff --git a/catalyst-gateway/bin/src/db/index/queries/cql/get_txi_by_txn_hashes.cql b/catalyst-gateway/bin/src/db/index/queries/cql/get_txi_by_txn_hashes.cql new file mode 100644 index 00000000000..b2ed6ee3e94 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/cql/get_txi_by_txn_hashes.cql @@ -0,0 +1,6 @@ +SELECT + txn_hash, + txo, + slot_no +FROM txi_by_txn_hash +WHERE txn_hash IN :txn_hashes diff --git a/catalyst-gateway/bin/src/db/index/queries/cql/get_txo_by_stake_address.cql b/catalyst-gateway/bin/src/db/index/queries/cql/get_txo_by_stake_address.cql new file mode 100644 index 00000000000..002e0532177 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/cql/get_txo_by_stake_address.cql @@ -0,0 +1,10 @@ +SELECT + txn_hash, + txn, + txo, + slot_no, + value, + spent_slot +FROM txo_by_stake +WHERE stake_address = :stake_address +AND slot_no <= :slot_no diff --git a/catalyst-gateway/bin/src/db/index/queries/cql/update_txo_spent.cql b/catalyst-gateway/bin/src/db/index/queries/cql/update_txo_spent.cql new file mode 100644 index 00000000000..e74704815c8 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/cql/update_txo_spent.cql @@ -0,0 +1,6 @@ +UPDATE txo_by_stake +SET spent_slot = :spent_slot +WHERE stake_address = :stake_address +AND txn = :txn +AND txo = :txo +AND slot_no = :slot_no diff --git a/catalyst-gateway/bin/src/db/index/queries/mod.rs b/catalyst-gateway/bin/src/db/index/queries/mod.rs new file mode 100644 index 00000000000..89129f8d84c --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/mod.rs @@ -0,0 +1,246 @@ +//! Pre-prepare queries for a given session. +//! +//! This improves query execution time. + +pub(crate) mod staked_ada; + +use std::sync::Arc; + +use anyhow::bail; +use crossbeam_skiplist::SkipMap; +use scylla::{ + batch::Batch, prepared_statement::PreparedStatement, serialize::row::SerializeRow, + transport::iterator::RowIterator, QueryResult, Session, +}; +use staked_ada::{ + get_txi_by_txn_hash::GetTxiByTxnHashesQuery, + get_txo_by_stake_address::GetTxoByStakeAddressQuery, update_txo_spent::UpdateTxoSpentQuery, +}; + +use super::block::{ + certs::CertInsertQuery, cip36::Cip36InsertQuery, txi::TxiInsertQuery, txo::TxoInsertQuery, +}; +use crate::settings::{CassandraEnvVars, CASSANDRA_MIN_BATCH_SIZE}; + +/// Batches of different sizes, prepared and ready for use. +pub(crate) type SizedBatch = SkipMap>; + +/// All Prepared Queries that we know about. +#[allow(clippy::enum_variant_names, dead_code)] +pub(crate) enum PreparedQuery { + /// TXO Insert query. + TxoAdaInsertQuery, + /// TXO Asset Insert query. + TxoAssetInsertQuery, + /// Unstaked TXO Insert query. + UnstakedTxoAdaInsertQuery, + /// Unstaked TXO Asset Insert query. + UnstakedTxoAssetInsertQuery, + /// TXI Insert query. + TxiInsertQuery, + /// Stake Registration Insert query. + StakeRegistrationInsertQuery, + /// CIP 36 Registration Insert Query. + Cip36RegistrationInsertQuery, + /// CIP 36 Registration Error Insert query. + Cip36RegistrationInsertErrorQuery, + /// CIP 36 Registration for stake address Insert query. + Cip36RegistrationForStakeAddrInsertQuery, + /// TXO spent Update query. + TxoSpentUpdateQuery, +} + +/// All prepared SELECT query statements. +pub(crate) enum PreparedSelectQuery { + /// Get TXO by stake address query. + GetTxoByStakeAddress, + /// Get TXI by transaction hash query. + GetTxiByTransactionHash, +} + +/// All prepared queries for a session. +#[allow(clippy::struct_field_names)] +pub(crate) struct PreparedQueries { + /// TXO Insert query. + txo_insert_queries: SizedBatch, + /// TXO Asset Insert query. + txo_asset_insert_queries: SizedBatch, + /// Unstaked TXO Insert query. + unstaked_txo_insert_queries: SizedBatch, + /// Unstaked TXO Asset Insert query. + unstaked_txo_asset_insert_queries: SizedBatch, + /// TXI Insert query. + txi_insert_queries: SizedBatch, + /// TXI Insert query. + stake_registration_insert_queries: SizedBatch, + /// CIP36 Registrations. + cip36_registration_insert_queries: SizedBatch, + /// CIP36 Registration errors. + cip36_registration_error_insert_queries: SizedBatch, + /// CIP36 Registration for Stake Address Insert query. + cip36_registration_for_stake_address_insert_queries: SizedBatch, + /// Update TXO spent query. + txo_spent_update_queries: SizedBatch, + /// Get TXO by stake address query. + txo_by_stake_address_query: PreparedStatement, + /// Get TXI by transaction hash. + txi_by_txn_hash_query: PreparedStatement, +} + +/// An individual query response that can fail +#[allow(dead_code)] +pub(crate) type FallibleQueryResult = anyhow::Result; +/// A set of query responses that can fail. +pub(crate) type FallibleQueryResults = anyhow::Result>; +/// A set of query responses from tasks that can fail. +pub(crate) type FallibleQueryTasks = Vec>; + +impl PreparedQueries { + /// Create new prepared queries for a given session. + pub(crate) async fn new(session: Arc, cfg: &CassandraEnvVars) -> anyhow::Result { + // We initialize like this, so that all errors preparing querys get shown before aborting. + let txi_insert_queries = TxiInsertQuery::prepare_batch(&session, cfg).await; + let all_txo_queries = TxoInsertQuery::prepare_batch(&session, cfg).await; + let stake_registration_insert_queries = CertInsertQuery::prepare_batch(&session, cfg).await; + let all_cip36_queries = Cip36InsertQuery::prepare_batch(&session, cfg).await; + let txo_spent_update_queries = + UpdateTxoSpentQuery::prepare_batch(session.clone(), cfg).await; + let txo_by_stake_address_query = GetTxoByStakeAddressQuery::prepare(session.clone()).await; + let txi_by_txn_hash_query = GetTxiByTxnHashesQuery::prepare(session.clone()).await; + + let ( + txo_insert_queries, + unstaked_txo_insert_queries, + txo_asset_insert_queries, + unstaked_txo_asset_insert_queries, + ) = all_txo_queries?; + + let ( + cip36_registration_insert_queries, + cip36_registration_error_insert_queries, + cip36_registration_for_stake_address_insert_queries, + ) = all_cip36_queries?; + + Ok(Self { + txo_insert_queries, + txo_asset_insert_queries, + unstaked_txo_insert_queries, + unstaked_txo_asset_insert_queries, + txi_insert_queries: txi_insert_queries?, + stake_registration_insert_queries: stake_registration_insert_queries?, + cip36_registration_insert_queries, + cip36_registration_error_insert_queries, + cip36_registration_for_stake_address_insert_queries, + txo_spent_update_queries: txo_spent_update_queries?, + txo_by_stake_address_query: txo_by_stake_address_query?, + txi_by_txn_hash_query: txi_by_txn_hash_query?, + }) + } + + /// Prepares a statement. + pub(crate) async fn prepare( + session: Arc, query: &str, consistency: scylla::statement::Consistency, + idempotent: bool, + ) -> anyhow::Result { + let mut prepared = session.prepare(query).await?; + prepared.set_consistency(consistency); + prepared.set_is_idempotent(idempotent); + + Ok(prepared) + } + + /// Prepares all permutations of the batch from 1 to max. + /// It is necessary to do this because batches are pre-sized, they can not be dynamic. + /// Preparing the batches in advance is a very larger performance increase. + pub(crate) async fn prepare_batch( + session: Arc, query: &str, cfg: &CassandraEnvVars, + consistency: scylla::statement::Consistency, idempotent: bool, logged: bool, + ) -> anyhow::Result { + let sized_batches: SizedBatch = SkipMap::new(); + + // First prepare the query. Only needs to be done once, all queries on a batch are the + // same. + let prepared = Self::prepare(session, query, consistency, idempotent).await?; + + for batch_size in CASSANDRA_MIN_BATCH_SIZE..=cfg.max_batch_size { + let mut batch: Batch = Batch::new(if logged { + scylla::batch::BatchType::Logged + } else { + scylla::batch::BatchType::Unlogged + }); + batch.set_consistency(consistency); + batch.set_is_idempotent(idempotent); + for _ in CASSANDRA_MIN_BATCH_SIZE..=batch_size { + batch.append_statement(prepared.clone()); + } + + sized_batches.insert(batch_size.try_into()?, Arc::new(batch)); + } + + Ok(sized_batches) + } + + /// Executes a select query with the given parameters. + /// + /// Returns an iterator that iterates over all the result pages that the query + /// returns. + pub(crate) async fn execute_iter

( + &self, session: Arc, select_query: PreparedSelectQuery, params: P, + ) -> anyhow::Result + where P: SerializeRow { + let prepared_stmt = match select_query { + PreparedSelectQuery::GetTxoByStakeAddress => &self.txo_by_stake_address_query, + PreparedSelectQuery::GetTxiByTransactionHash => &self.txi_by_txn_hash_query, + }; + + session + .execute_iter(prepared_stmt.clone(), params) + .await + .map_err(|e| anyhow::anyhow!(e)) + } + + /// Execute a Batch query with the given parameters. + /// + /// Values should be a Vec of values which implement `SerializeRow` and they MUST be + /// the same, and must match the query being executed. + /// + /// This will divide the batch into optimal sized chunks and execute them until all + /// values have been executed or the first error is encountered. + pub(crate) async fn execute_batch( + &self, session: Arc, cfg: Arc, query: PreparedQuery, + values: Vec, + ) -> FallibleQueryResults { + let query_map = match query { + PreparedQuery::TxoAdaInsertQuery => &self.txo_insert_queries, + PreparedQuery::TxoAssetInsertQuery => &self.txo_asset_insert_queries, + PreparedQuery::UnstakedTxoAdaInsertQuery => &self.unstaked_txo_insert_queries, + PreparedQuery::UnstakedTxoAssetInsertQuery => &self.unstaked_txo_asset_insert_queries, + PreparedQuery::TxiInsertQuery => &self.txi_insert_queries, + PreparedQuery::StakeRegistrationInsertQuery => &self.stake_registration_insert_queries, + PreparedQuery::Cip36RegistrationInsertQuery => &self.cip36_registration_insert_queries, + PreparedQuery::Cip36RegistrationInsertErrorQuery => { + &self.cip36_registration_error_insert_queries + }, + PreparedQuery::Cip36RegistrationForStakeAddrInsertQuery => { + &self.cip36_registration_for_stake_address_insert_queries + }, + PreparedQuery::TxoSpentUpdateQuery => &self.txo_spent_update_queries, + }; + + let mut results: Vec = Vec::new(); + + let chunks = values.chunks(cfg.max_batch_size.try_into().unwrap_or(1)); + + for chunk in chunks { + let chunk_size: u16 = chunk.len().try_into()?; + let Some(batch_query) = query_map.get(&chunk_size) else { + // This should not actually occur. + bail!("No batch query found for size {}", chunk_size); + }; + let batch_query_statements = batch_query.value().clone(); + results.push(session.batch(&batch_query_statements, chunk).await?); + } + + Ok(results) + } +} diff --git a/catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txi_by_txn_hash.rs b/catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txi_by_txn_hash.rs new file mode 100644 index 00000000000..7c3cc6af048 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txi_by_txn_hash.rs @@ -0,0 +1,83 @@ +//! Get TXI by Transaction hash query + +use std::sync::Arc; + +use scylla::{ + prepared_statement::PreparedStatement, transport::iterator::TypedRowIterator, SerializeRow, + Session, +}; +use tracing::error; + +use crate::db::index::{ + queries::{PreparedQueries, PreparedSelectQuery}, + session::CassandraSession, +}; + +/// Get TXI query string. +const GET_TXI_BY_TXN_HASHES_QUERY: &str = include_str!("../cql/get_txi_by_txn_hashes.cql"); + +/// Get TXI query parameters. +#[derive(SerializeRow)] +pub(crate) struct GetTxiByTxnHashesQueryParams { + /// Transaction hashes. + txn_hashes: Vec>, +} + +impl GetTxiByTxnHashesQueryParams { + /// Create a new instance of [`GetTxiByTxnHashesQueryParams`] + pub(crate) fn new(txn_hashes: Vec>) -> Self { + Self { txn_hashes } + } +} + +/// Get TXI Query Result +// TODO: https://github.com/input-output-hk/catalyst-voices/issues/828 +// The macro uses expect to signal an error in deserializing values. +#[allow(clippy::expect_used)] +mod result { + use scylla::FromRow; + + /// Get TXI query result. + #[derive(FromRow)] + pub(crate) struct GetTxiByTxnHashesQuery { + /// TXI transaction hash. + pub txn_hash: Vec, + /// TXI original TXO index. + pub txo: i16, + /// TXI slot number. + pub slot_no: num_bigint::BigInt, + } +} +/// Get TXI query. +pub(crate) struct GetTxiByTxnHashesQuery; + +impl GetTxiByTxnHashesQuery { + /// Prepares a get txi query. + pub(crate) async fn prepare(session: Arc) -> anyhow::Result { + let get_txi_by_txn_hashes_query = PreparedQueries::prepare( + session, + GET_TXI_BY_TXN_HASHES_QUERY, + scylla::statement::Consistency::All, + true, + ) + .await; + + if let Err(ref error) = get_txi_by_txn_hashes_query { + error!(error=%error, "Failed to prepare get TXI by txn hashes query."); + }; + + get_txi_by_txn_hashes_query + } + + /// Executes a get txi by transaction hashes query. + pub(crate) async fn execute( + session: &CassandraSession, params: GetTxiByTxnHashesQueryParams, + ) -> anyhow::Result> { + let iter = session + .execute_iter(PreparedSelectQuery::GetTxiByTransactionHash, params) + .await? + .into_typed::(); + + Ok(iter) + } +} diff --git a/catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txo_by_stake_address.rs b/catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txo_by_stake_address.rs new file mode 100644 index 00000000000..2beee7e6467 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/staked_ada/get_txo_by_stake_address.rs @@ -0,0 +1,94 @@ +//! Get the TXO by Stake Address +use std::sync::Arc; + +use scylla::{ + prepared_statement::PreparedStatement, transport::iterator::TypedRowIterator, SerializeRow, + Session, +}; +use tracing::error; + +use crate::db::index::{ + queries::{PreparedQueries, PreparedSelectQuery}, + session::CassandraSession, +}; + +/// Get txo by stake address query string. +const GET_TXO_BY_STAKE_ADDRESS_QUERY: &str = include_str!("../cql/get_txo_by_stake_address.cql"); + +/// Get txo by stake address query parameters. +#[derive(SerializeRow)] +pub(crate) struct GetTxoByStakeAddressQueryParams { + /// Stake address. + stake_address: Vec, + /// Max slot num. + slot_no: num_bigint::BigInt, +} + +impl GetTxoByStakeAddressQueryParams { + /// Creates a new [`GetTxoByStakeAddressQueryParams`]. + pub(crate) fn new(stake_address: Vec, slot_no: num_bigint::BigInt) -> Self { + Self { + stake_address, + slot_no, + } + } +} + +/// Get TXO by stake address query row result +// TODO: https://github.com/input-output-hk/catalyst-voices/issues/828 +// The macro uses expect to signal an error in deserializing values. +#[allow(clippy::expect_used)] +mod result { + use scylla::FromRow; + + /// Get txo by stake address query result. + #[derive(FromRow)] + pub(crate) struct GetTxoByStakeAddressQuery { + /// TXO transaction hash. + pub txn_hash: Vec, + /// TXO transaction index within the slot. + pub txn: i16, + /// TXO index. + pub txo: i16, + /// TXO transaction slot number. + pub slot_no: num_bigint::BigInt, + /// TXO value. + pub value: num_bigint::BigInt, + /// TXO spent slot. + pub spent_slot: Option, + } +} + +/// Get staked ADA query. +pub(crate) struct GetTxoByStakeAddressQuery; + +impl GetTxoByStakeAddressQuery { + /// Prepares a get txo by stake address query. + pub(crate) async fn prepare(session: Arc) -> anyhow::Result { + let get_txo_by_stake_address_query = PreparedQueries::prepare( + session, + GET_TXO_BY_STAKE_ADDRESS_QUERY, + scylla::statement::Consistency::All, + true, + ) + .await; + + if let Err(ref error) = get_txo_by_stake_address_query { + error!(error=%error, "Failed to prepare get TXO by stake address"); + }; + + get_txo_by_stake_address_query + } + + /// Executes a get txo by stake address query. + pub(crate) async fn execute( + session: &CassandraSession, params: GetTxoByStakeAddressQueryParams, + ) -> anyhow::Result> { + let iter = session + .execute_iter(PreparedSelectQuery::GetTxoByStakeAddress, params) + .await? + .into_typed::(); + + Ok(iter) + } +} diff --git a/catalyst-gateway/bin/src/db/index/queries/staked_ada/mod.rs b/catalyst-gateway/bin/src/db/index/queries/staked_ada/mod.rs new file mode 100644 index 00000000000..e7114f1d68a --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/staked_ada/mod.rs @@ -0,0 +1,4 @@ +//! Staked ADA related queries. +pub(crate) mod get_txi_by_txn_hash; +pub(crate) mod get_txo_by_stake_address; +pub(crate) mod update_txo_spent; diff --git a/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs b/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs new file mode 100644 index 00000000000..21658d74e29 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs @@ -0,0 +1,69 @@ +//! Update the TXO Spent column to optimize future queries. + +use std::sync::Arc; + +use scylla::{SerializeRow, Session}; +use tracing::error; + +use crate::{ + db::index::{ + queries::{FallibleQueryResults, PreparedQueries, PreparedQuery, SizedBatch}, + session::CassandraSession, + }, + settings::CassandraEnvVars, +}; + +/// Update TXO spent query string. +const UPDATE_TXO_SPENT_QUERY: &str = include_str!("../cql/update_txo_spent.cql"); + +/// Update TXO spent query params. +#[derive(SerializeRow)] +pub(crate) struct UpdateTxoSpentQueryParams { + /// TXO stake address. + pub stake_address: Vec, + /// TXO transaction index within the slot. + pub txn: i16, + /// TXO index. + pub txo: i16, + /// TXO slot number. + pub slot_no: num_bigint::BigInt, + /// TXO spent slot number. + pub spent_slot: num_bigint::BigInt, +} + +/// Update TXO spent query. +pub(crate) struct UpdateTxoSpentQuery; + +impl UpdateTxoSpentQuery { + /// Prepare a batch of update TXO spent queries. + pub(crate) async fn prepare_batch( + session: Arc, cfg: &CassandraEnvVars, + ) -> anyhow::Result { + let update_txo_spent_queries = PreparedQueries::prepare_batch( + session.clone(), + UPDATE_TXO_SPENT_QUERY, + cfg, + scylla::statement::Consistency::Any, + true, + false, + ) + .await; + + if let Err(ref error) = update_txo_spent_queries { + error!(error=%error,"Failed to prepare update TXO spent query."); + }; + + update_txo_spent_queries + } + + /// Executes a update txo spent query. + pub(crate) async fn execute( + session: &CassandraSession, params: Vec, + ) -> FallibleQueryResults { + let results = session + .execute_batch(PreparedQuery::TxoSpentUpdateQuery, params) + .await?; + + Ok(results) + } +} diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql new file mode 100644 index 00000000000..17c6886e3b7 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration.cql @@ -0,0 +1,18 @@ +-- Index of CIP-36 registrations. Valid. +CREATE TABLE IF NOT EXISTS cip36_registration ( + -- Primary Key Data + stake_address blob, -- 32 Bytes of Stake Address. + nonce varint, -- Nonce that has been slot corrected. + slot_no varint, -- slot number when the key_was_registered/re-registered. + txn smallint, -- Index of the TX which holds the registration data. + + -- Non-Key Data + vote_key blob, -- 32 Bytes of Vote Key. + payment_address blob, -- Bytes of address for payment of rewards. + is_payable boolean, -- True if payment to the address is possible. + raw_nonce varint, -- Nonce that has not been slot corrected. + cip36 boolean, -- True if the registration is CIP-36 format, Cip-15=False. + + PRIMARY KEY (stake_address, nonce, slot_no, txn) +) +WITH CLUSTERING ORDER BY (nonce, DESC, slot_no DESC, txn DESC); diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql new file mode 100644 index 00000000000..3ab03c8f1ef --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_for_vote_key.cql @@ -0,0 +1,14 @@ +-- Index of CIP-36 registrations searchable by Stake Address. +-- Full registration data needs to be queried from the man cip36 registration tables. +-- Includes both Valid and Invalid registrations. +CREATE TABLE IF NOT EXISTS cip36_registration_for_stake_addr ( + -- Primary Key Data + vote_key blob, -- 32 Bytes of Vote Key. + stake_address blob, -- 32 Bytes of Stake Address. + slot_no varint, -- slot number when the key_was_registered/re-registered. + txn smallint, -- Index of the TX which holds the registration data. + valid boolean, -- True if the registration is valid. + + PRIMARY KEY ((vote_key, stake_address), slot_no, txn, valid) +) +WITH CLUSTERING ORDER BY (slot_no DESC, txn DESC); diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql new file mode 100644 index 00000000000..e72eaf304ea --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/cip36_registration_invalid.cql @@ -0,0 +1,20 @@ +-- Index of CIP-36 registrations that are invalid. +CREATE TABLE IF NOT EXISTS cip36_registration_invalid ( + -- Primary Key Data + stake_address blob, -- 32 Bytes of Stake Address. + slot_no varint, -- slot number when the key_was_registered/re-registered. + txn smallint, -- Index of the TX which holds the registration data. + + -- Non-Key Data + vote_key blob, -- 32 Bytes of Vote Key. + payment_address blob, -- Bytes of address for payment of rewards. + is_payable boolean, -- True if payment to the address is possible. + raw_nonce varint, -- Nonce that has not been slot corrected. + nonce varint, -- Nonce that has been slot corrected. + cip36 boolean, -- True if CIP-36 Registration format used. CIP-15 = False. + signed boolean, -- Signature validates. + error_report list, -- List of serialization errors in the registration. + + PRIMARY KEY (vote_key, slot_no, txn) +) +WITH CLUSTERING ORDER BY (slot_no DESC, txn DESC); diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/namespace.cql b/catalyst-gateway/bin/src/db/index/schema/cql/namespace.cql new file mode 100644 index 00000000000..29654d114e1 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/namespace.cql @@ -0,0 +1,4 @@ +-- Create the namespace in the DB. +-- Template. +CREATE KEYSPACE IF NOT EXISTS {{keyspace}} + With replication = {'class': 'NetworkTopologyStrategy','replication_factor': 1}; \ No newline at end of file diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/stake_registration.cql b/catalyst-gateway/bin/src/db/index/schema/cql/stake_registration.cql new file mode 100644 index 00000000000..71afa7d5b8d --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/stake_registration.cql @@ -0,0 +1,20 @@ +-- Index of stake registrations. +-- Can also be used to convert a known stake key hash back to a full stake address. +CREATE TABLE IF NOT EXISTS stake_registration ( + -- Primary Key Data + stake_hash blob, -- 28 Bytes Stake Key Hash. + slot_no varint, -- slot number when the key_was_registered/re-registered. + txn smallint, -- Index of the TX which holds the registration data. + + -- Non-Key Data + stake_address blob, -- 32 Bytes Stake address - not present for scripts and may not be present for `register`. + + -- Stake key lifecycle data, shows what happened with the stake key at this slot#. + script boolean, -- Is the address a script address. + register boolean, -- True if the stake was registered in this transaction. + deregister boolean, -- True if the stake key was deregistered in this transaction. + pool_delegation blob, -- Stake was delegated to this Pool address. + -- Not present if delegation did not change. + + PRIMARY KEY (stake_hash, script, slot_no, txn) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/txi_by_txn_hash_table.cql b/catalyst-gateway/bin/src/db/index/schema/cql/txi_by_txn_hash_table.cql new file mode 100644 index 00000000000..4ed7bd12cd9 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/txi_by_txn_hash_table.cql @@ -0,0 +1,11 @@ +-- This could be ADA or a native asset being spent. +-- This can represent a spend on either immutable data or volatile data. +CREATE TABLE IF NOT EXISTS txi_by_txn_hash ( + txn_hash blob, -- 32 Bytes Transaction Hash that was spent. + txo smallint, -- Index of the TXO which was spent + + -- Non key data, we can only spend a transaction hash/txo once, so this should be unique in any event. + slot_no varint, -- slot number when the spend occurred. + + PRIMARY KEY (txn_hash, txo) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/txo_assets_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/cql/txo_assets_by_stake_table.cql new file mode 100644 index 00000000000..f575fffc757 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/txo_assets_by_stake_table.cql @@ -0,0 +1,16 @@ +-- Transaction Outputs (Native Assets) per stake address. +-- Unstaked Assets are not present in this table. +CREATE TABLE IF NOT EXISTS txo_assets_by_stake ( + -- Primary Key Fields + stake_address blob, -- stake address hash (28 bytes stake address hash, zero length if not staked.) + slot_no varint, -- slot number the txo was created in. + txn smallint, -- Which Transaction in the Slot is the TXO. + txo smallint, -- offset in the txo list of the transaction the txo is in. + policy_id blob, -- asset policy hash (id) (28 byte binary hash) + policy_name text, -- name of the policy (UTF8) + + -- None Key Data of the asset. + value varint, -- Value of the asset (u64) + + PRIMARY KEY (stake_address, slot_no, txn, txo, policy_id, policy_name) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/txo_by_stake_table.cql b/catalyst-gateway/bin/src/db/index/schema/cql/txo_by_stake_table.cql new file mode 100644 index 00000000000..b9ad080358a --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/txo_by_stake_table.cql @@ -0,0 +1,23 @@ +-- Transaction Outputs (ADA) per stake address. +-- Unstaked ADA is not present in this table. +CREATE TABLE IF NOT EXISTS txo_by_stake ( + -- Primary Key Fields + stake_address blob, -- stake address hash (28 bytes stake address hash, zero length if not staked.) + slot_no varint, -- slot number the txo was created in. + txn smallint, -- Which Transaction in the Slot is the TXO. + txo smallint, -- offset in the txo list of the transaction the txo is in. + + -- Transaction Output Data + address ascii, -- TXO address (CIP19 Formatted Text). + value varint, -- Lovelace value of the TXO (u64). + + -- Data needed to correlate a spent TXO. + txn_hash blob, -- 32 byte hash of this transaction. + + spent_slot varint, -- Slot this TXO was spent in. + -- This is ONLY calculated/stored + -- when first detected in a query lookup. + -- It serves as an optimization on subsequent queries. + + PRIMARY KEY (stake_address, slot_no, txn, txo) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_assets_by_txn_hash.cql b/catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_assets_by_txn_hash.cql new file mode 100644 index 00000000000..047567d895d --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_assets_by_txn_hash.cql @@ -0,0 +1,17 @@ +-- Transaction Outputs (Native Assets) per stake address. +CREATE TABLE IF NOT EXISTS unstaked_txo_assets_by_txn_hash ( + -- Primary Key Fields + txn_hash blob, -- 32 byte hash of this transaction. + txo smallint, -- offset in the txo list of the transaction the txo is in. + policy_id blob, -- asset policy hash (id) (28 byte binary hash) + policy_name text, -- name of the policy (UTF8) + + -- Secondary Location information for the transaction. + slot_no varint, -- slot number the txo was created in. + txn smallint, -- Which Transaction in the Slot is the TXO. + + -- Value of the asset. + value varint, -- Value of the asset (u64) + + PRIMARY KEY (txn_hash, txo, policy_id, policy_name) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_by_txn_hash.cql b/catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_by_txn_hash.cql new file mode 100644 index 00000000000..b6627cbbe38 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/cql/unstaked_txo_by_txn_hash.cql @@ -0,0 +1,22 @@ +-- Transaction Outputs (ADA) that are not staked, by their transaction hash. +CREATE TABLE IF NOT EXISTS unstaked_txo_by_txn_hash ( + -- Primary Key Fields + txn_hash blob, -- 32 byte hash of this transaction. + txo smallint, -- offset in the txo list of the transaction the txo is in. + + -- Secondary Location information for the transaction. + slot_no varint, -- slot number the txo was created in. + txn smallint, -- Which Transaction in the Slot is the TXO. + + + -- Transaction Output Data + address ascii, -- TXO address (CIP19 Formatted Text). + value varint, -- Lovelace value of the TXO (u64). + + spent_slot varint, -- Slot this TXO was spent in. + -- This is ONLY calculated/stored + -- when first detected in a query lookup. + -- It serves as an optimization on subsequent queries. + + PRIMARY KEY (txn_hash, txo) +); diff --git a/catalyst-gateway/bin/src/db/index/schema/mod.rs b/catalyst-gateway/bin/src/db/index/schema/mod.rs new file mode 100644 index 00000000000..4bfd4725db9 --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/schema/mod.rs @@ -0,0 +1,125 @@ +//! Index Schema + +use std::sync::Arc; + +use anyhow::Context; +use handlebars::Handlebars; +use scylla::Session; +use serde_json::json; +use tracing::error; + +use crate::settings::CassandraEnvVars; + +/// Keyspace Create (Templated) +const CREATE_NAMESPACE_CQL: &str = include_str!("./cql/namespace.cql"); + +/// The version of the Schema we are using. +/// Must be incremented if there is a breaking change in any schema tables below. +pub(crate) const SCHEMA_VERSION: u64 = 1; + +/// All Schema Creation Statements +const SCHEMAS: &[(&str, &str)] = &[ + ( + // TXO by Stake Address Table Schema + include_str!("./cql/txo_by_stake_table.cql"), + "Create Table TXO By Stake Address", + ), + ( + // TXO Assets by Stake Address Table Schema + include_str!("./cql/txo_assets_by_stake_table.cql"), + "Create Table TXO Assets By Stake Address", + ), + ( + // TXO Unstaked Table Schema + include_str!("./cql/unstaked_txo_by_txn_hash.cql"), + "Create Table Unstaked TXO By Txn Hash", + ), + ( + // TXO Unstaked Assets Table Schema + include_str!("./cql/unstaked_txo_assets_by_txn_hash.cql"), + "Create Table Unstaked TXO Assets By Txn Hash", + ), + ( + // TXI by Stake Address Table Schema + include_str!("./cql/txi_by_txn_hash_table.cql"), + "Create Table TXI By Stake Address", + ), + ( + // Stake Address/Registration Table Schema + include_str!("./cql/stake_registration.cql"), + "Create Table Stake Registration", + ), + ( + // CIP-36 Registration Table Schema + include_str!("./cql/cip36_registration.cql"), + "Create Table CIP-36 Registration", + ), + ( + // CIP-36 Registration Table Schema + include_str!("./cql/cip36_registration_invalid.cql"), + "Create Table CIP-36 Registration Invalid", + ), + ( + // CIP-36 Registration Table Schema + include_str!("./cql/cip36_registration_for_vote_key.cql"), + "Create Table CIP-36 Registration For a stake address", + ), +]; + +/// Get the namespace for a particular db configuration +pub(crate) fn namespace(cfg: &CassandraEnvVars) -> String { + // Build and set the Keyspace to use. + format!("{}_V{}", cfg.namespace.as_str(), SCHEMA_VERSION) +} + +/// Create the namespace we will use for this session +/// Ok to run this if the namespace already exists. +async fn create_namespace( + session: &mut Arc, cfg: &CassandraEnvVars, +) -> anyhow::Result<()> { + let keyspace = namespace(cfg); + + let mut reg = Handlebars::new(); + // disable default `html_escape` function + // which transforms `<`, `>` symbols to `<`, `>` + reg.register_escape_fn(|s| s.into()); + let query = reg.render_template(CREATE_NAMESPACE_CQL, &json!({"keyspace": keyspace}))?; + + // Create the Keyspace if it doesn't exist already. + let stmt = session.prepare(query).await?; + session.execute_unpaged(&stmt, ()).await?; + + // Wait for the Schema to be ready. + session.await_schema_agreement().await?; + + // Set the Keyspace to use for this session. + if let Err(error) = session.use_keyspace(keyspace.clone(), false).await { + error!(keyspace = keyspace, error = %error, "Failed to set keyspace"); + } + + Ok(()) +} + +/// Create the Schema on the connected Cassandra DB +pub(crate) async fn create_schema( + session: &mut Arc, cfg: &CassandraEnvVars, +) -> anyhow::Result<()> { + create_namespace(session, cfg).await?; + + for schema in SCHEMAS { + let stmt = session + .prepare(schema.0) + .await + .context(format!("{} : Prepared", schema.1))?; + + session + .execute_unpaged(&stmt, ()) + .await + .context(format!("{} : Executed", schema.1))?; + } + + // Wait for the Schema to be ready. + session.await_schema_agreement().await?; + + Ok(()) +} diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs new file mode 100644 index 00000000000..884f5a7a7ac --- /dev/null +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -0,0 +1,285 @@ +//! Session creation and storage + +use std::{ + path::PathBuf, + sync::{Arc, OnceLock}, + time::Duration, +}; + +use openssl::ssl::{SslContextBuilder, SslFiletype, SslMethod, SslVerifyMode}; +use scylla::{ + frame::Compression, serialize::row::SerializeRow, transport::iterator::RowIterator, + ExecutionProfile, Session, SessionBuilder, +}; +use tokio::fs; +use tracing::{error, info}; + +use super::{ + queries::{FallibleQueryResults, PreparedQueries, PreparedQuery, PreparedSelectQuery}, + schema::create_schema, +}; +use crate::{ + db::index::queries, + settings::{CassandraEnvVars, Settings}, +}; + +/// Configuration Choices for compression +#[derive(Clone, strum::EnumString, strum::Display, strum::VariantNames)] +#[strum(ascii_case_insensitive)] +pub(crate) enum CompressionChoice { + /// LZ4 link data compression. + Lz4, + /// Snappy link data compression. + Snappy, + /// No compression. + None, +} + +/// Configuration Choices for TLS. +#[derive(Clone, strum::EnumString, strum::Display, strum::VariantNames, PartialEq)] +#[strum(ascii_case_insensitive)] +pub(crate) enum TlsChoice { + /// Disable TLS. + Disabled, + /// Verifies that the peer's certificate is trusted. + Verified, + /// Disables verification of the peer's certificate. + Unverified, +} + +/// All interaction with cassandra goes through this struct. +#[derive(Clone)] +pub(crate) struct CassandraSession { + /// Is the session to the persistent or volatile DB? + #[allow(dead_code)] + persistent: bool, + /// Configuration for this session. + cfg: Arc, + /// The actual session. + session: Arc, + /// All prepared queries we can use on this session. + queries: Arc, +} + +/// Persistent DB Session. +static PERSISTENT_SESSION: OnceLock> = OnceLock::new(); + +/// Volatile DB Session. +static VOLATILE_SESSION: OnceLock> = OnceLock::new(); + +impl CassandraSession { + /// Initialise the Cassandra Cluster Connections. + pub(crate) fn init() { + let (persistent, volatile) = Settings::cassandra_db_cfg(); + + let _join_handle = tokio::task::spawn(async move { retry_init(persistent, true).await }); + let _join_handle = tokio::task::spawn(async move { retry_init(volatile, false).await }); + } + + /// Check to see if the Cassandra Indexing DB is ready for use + pub(crate) fn is_ready() -> bool { + PERSISTENT_SESSION.get().is_some() && VOLATILE_SESSION.get().is_some() + } + + /// Wait for the Cassandra Indexing DB to be ready before continuing + pub(crate) async fn wait_is_ready(interval: Duration) { + loop { + if Self::is_ready() { + break; + } + + tokio::time::sleep(interval).await; + } + } + + /// Get the session needed to perform a query. + pub(crate) fn get(persistent: bool) -> Option> { + if persistent { + PERSISTENT_SESSION.get().cloned() + } else { + VOLATILE_SESSION.get().cloned() + } + } + + /// Executes a select query with the given parameters. + /// + /// Returns an iterator that iterates over all the result pages that the query + /// returns. + pub(crate) async fn execute_iter

( + &self, select_query: PreparedSelectQuery, params: P, + ) -> anyhow::Result + where P: SerializeRow { + let session = self.session.clone(); + let queries = self.queries.clone(); + + queries.execute_iter(session, select_query, params).await + } + + /// Execute a Batch query with the given parameters. + /// + /// Values should be a Vec of values which implement `SerializeRow` and they MUST be + /// the same, and must match the query being executed. + /// + /// This will divide the batch into optimal sized chunks and execute them until all + /// values have been executed or the first error is encountered. + pub(crate) async fn execute_batch( + &self, query: PreparedQuery, values: Vec, + ) -> FallibleQueryResults { + let session = self.session.clone(); + let cfg = self.cfg.clone(); + let queries = self.queries.clone(); + + queries.execute_batch(session, cfg, query, values).await + } +} + +/// Create a new execution profile based on the given configuration. +/// +/// The intention here is that we should be able to tune this based on configuration, +/// but for now we don't so the `cfg` is not used yet. +fn make_execution_profile(_cfg: &CassandraEnvVars) -> ExecutionProfile { + ExecutionProfile::builder() + .consistency(scylla::statement::Consistency::LocalQuorum) + .serial_consistency(Some(scylla::statement::SerialConsistency::LocalSerial)) + .retry_policy(Box::new(scylla::retry_policy::DefaultRetryPolicy::new())) + .load_balancing_policy( + scylla::load_balancing::DefaultPolicyBuilder::new() + .permit_dc_failover(true) + .build(), + ) + .speculative_execution_policy(Some(Arc::new( + scylla::speculative_execution::SimpleSpeculativeExecutionPolicy { + max_retry_count: 3, + retry_interval: Duration::from_millis(100), + }, + ))) + .build() +} + +/// Construct a session based on the given configuration. +async fn make_session(cfg: &CassandraEnvVars) -> anyhow::Result> { + let cluster_urls: Vec<&str> = cfg.url.as_str().split(',').collect(); + + let mut sb = SessionBuilder::new() + .known_nodes(cluster_urls) + .auto_await_schema_agreement(false); + + let profile_handle = make_execution_profile(cfg).into_handle(); + sb = sb.default_execution_profile_handle(profile_handle); + + sb = match cfg.compression { + CompressionChoice::Lz4 => sb.compression(Some(Compression::Lz4)), + CompressionChoice::Snappy => sb.compression(Some(Compression::Snappy)), + CompressionChoice::None => sb.compression(None), + }; + + if cfg.tls != TlsChoice::Disabled { + let mut context_builder = SslContextBuilder::new(SslMethod::tls())?; + + if let Some(cert_name) = &cfg.tls_cert { + let certdir = fs::canonicalize(PathBuf::from(cert_name.as_str())).await?; + context_builder.set_certificate_file(certdir.as_path(), SslFiletype::PEM)?; + } + + if cfg.tls == TlsChoice::Verified { + context_builder.set_verify(SslVerifyMode::PEER); + } else { + context_builder.set_verify(SslVerifyMode::NONE); + } + + let ssl_context = context_builder.build(); + + sb = sb.ssl_context(Some(ssl_context)); + } + + // Set the username and password, if required. + if let Some(username) = &cfg.username { + if let Some(password) = &cfg.password { + sb = sb.user(username.as_str(), password.as_str()); + } + } + + let session = Box::pin(sb.build()).await?; + + Ok(Arc::new(session)) +} + +/// Continuously try and init the DB, if it fails, backoff. +/// +/// Display reasonable logs to help diagnose DB connection issues. +async fn retry_init(cfg: CassandraEnvVars, persistent: bool) { + let mut retry_delay = Duration::from_secs(0); + let db_type = if persistent { "Persistent" } else { "Volatile" }; + + info!(db_type = db_type, "Index DB Session Creation: Started."); + + cfg.log(persistent); + + loop { + tokio::time::sleep(retry_delay).await; + retry_delay = Duration::from_secs(30); // 30 seconds if we every try again. + + info!( + db_type = db_type, + "Attempting to connect to Cassandra DB..." + ); + + // Create a Session to the Cassandra DB. + let session = match make_session(&cfg).await { + Ok(session) => session, + Err(error) => { + let error = format!("{error:?}"); + error!( + db_type = db_type, + error = error, + "Failed to Create Cassandra DB Session" + ); + continue; + }, + }; + + // Set up the Schema for it. + if let Err(error) = create_schema(&mut session.clone(), &cfg).await { + let error = format!("{error:?}"); + error!( + db_type = db_type, + error = error, + "Failed to Create Cassandra DB Schema" + ); + continue; + } + + let queries = match queries::PreparedQueries::new(session.clone(), &cfg).await { + Ok(queries) => Arc::new(queries), + Err(error) => { + error!( + db_type = db_type, + error = %error, + "Failed to Create Cassandra Prepared Queries" + ); + continue; + }, + }; + + let cassandra_session = CassandraSession { + persistent, + cfg: Arc::new(cfg), + session, + queries, + }; + + // Save the session so we can execute queries on the DB + if persistent { + if PERSISTENT_SESSION.set(Arc::new(cassandra_session)).is_err() { + error!("Persistent Session already set. This should not happen."); + }; + } else if VOLATILE_SESSION.set(Arc::new(cassandra_session)).is_err() { + error!("Volatile Session already set. This should not happen."); + }; + + // IF we get here, then everything seems to have worked, so finish init. + break; + } + + info!(db_type = db_type, "Index DB Session Creation: OK."); +} diff --git a/catalyst-gateway/bin/src/db/mod.rs b/catalyst-gateway/bin/src/db/mod.rs new file mode 100644 index 00000000000..6fcc83ba975 --- /dev/null +++ b/catalyst-gateway/bin/src/db/mod.rs @@ -0,0 +1,4 @@ +//! Database Interfaces + +pub(crate) mod event; +pub(crate) mod index; diff --git a/catalyst-gateway/bin/src/event_db/mod.rs b/catalyst-gateway/bin/src/event_db/mod.rs deleted file mode 100644 index 9dcb83b9aa3..00000000000 --- a/catalyst-gateway/bin/src/event_db/mod.rs +++ /dev/null @@ -1,306 +0,0 @@ -//! Catalyst Election Database crate -use std::{str::FromStr, sync::Arc}; - -use bb8::Pool; -use bb8_postgres::PostgresConnectionManager; -use dotenvy::dotenv; -use stringzilla::StringZilla; -use tokio::sync::RwLock; -use tokio_postgres::{types::ToSql, NoTls, Row}; -use tracing::{debug, debug_span, Instrument}; - -pub(crate) mod cardano; -pub(crate) mod error; -pub(crate) mod legacy; -pub(crate) mod schema_check; - -/// Database URL Environment Variable name. -/// eg: "`postgres://catalyst-dev:CHANGE_ME@localhost/CatalystDev`" -const DATABASE_URL_ENVVAR: &str = "EVENT_DB_URL"; - -/// Database version this crate matches. -/// Must equal the last Migrations Version Number. -pub(crate) const DATABASE_SCHEMA_VERSION: i32 = 9; - -#[derive(Clone, Copy, Default, Debug, PartialEq, Eq)] -/// Settings for deep query inspection -pub(crate) enum DeepQueryInspectionFlag { - /// Enable deep query inspection - Enabled, - /// Disable deep query inspection - #[default] - Disabled, -} - -impl From for DeepQueryInspectionFlag { - fn from(b: bool) -> Self { - if b { - Self::Enabled - } else { - Self::Disabled - } - } -} - -#[allow(unused)] -/// Connection to the Election Database -pub(crate) struct EventDB { - /// Internal database connection. DO NOT MAKE PUBLIC. - /// All database operations (queries, inserts, etc) should be constrained - /// to this crate and should be exported with a clean data access api. - pool: Pool>, - /// Deep query inspection flag. - deep_query_inspection_flag: Arc>, -} - -/// `EventDB` Errors -#[derive(thiserror::Error, Debug, PartialEq, Eq)] -pub(crate) enum Error { - /// Database statement is not a valid modify statement - #[error("Invalid Modify Statement")] - InvalidModifyStatement, - /// Database statement is not a valid query statement - #[error("Invalid Query Statement")] - InvalidQueryStatement, - /// No DB URL was provided - #[error("DB URL is undefined")] - NoDatabaseUrl, -} - -impl EventDB { - /// Determine if deep query inspection is enabled. - pub(crate) async fn is_deep_query_enabled(&self) -> bool { - *self.deep_query_inspection_flag.read().await == DeepQueryInspectionFlag::Enabled - } - - /// Modify the deep query inspection setting. - /// - /// # Arguments - /// - /// * `deep_query` - `DeepQueryInspection` setting. - pub(crate) async fn modify_deep_query( - &self, deep_query_inspection_flag: DeepQueryInspectionFlag, - ) { - let mut flag = self.deep_query_inspection_flag.write().await; - *flag = deep_query_inspection_flag; - } - - /// Query the database. - /// - /// If deep query inspection is enabled, this will log the query plan inside a - /// rolled-back transaction, before running the query. - /// - /// # Arguments - /// - /// * `stmt` - `&str` SQL statement. - /// * `params` - `&[&(dyn ToSql + Sync)]` SQL parameters. - /// - /// # Returns - /// - /// `Result, anyhow::Error>` - #[must_use = "ONLY use this function for SELECT type operations which return row data, otherwise use `modify()`"] - pub(crate) async fn query( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], - ) -> Result, anyhow::Error> { - if self.is_deep_query_enabled().await { - // Check if this is a query statement - // if is_query_stmt(stmt) { - // self.explain_analyze_rollback(stmt, params).await?; - // } else { - // return Err(Error::InvalidQueryStatement.into()); - // } - self.explain_analyze_rollback(stmt, params).await?; - } - let rows = self.pool.get().await?.query(stmt, params).await?; - Ok(rows) - } - - /// Query the database for a single row. - /// - /// # Arguments - /// - /// * `stmt` - `&str` SQL statement. - /// * `params` - `&[&(dyn ToSql + Sync)]` SQL parameters. - /// - /// # Returns - /// - /// `Result` - #[must_use = "ONLY use this function for SELECT type operations which return row data, otherwise use `modify()`"] - pub(crate) async fn query_one( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], - ) -> Result { - if self.is_deep_query_enabled().await { - // Check if this is a query statement - // if is_query_stmt(stmt) { - // self.explain_analyze_rollback(stmt, params).await?; - // } else { - // return Err(Error::InvalidQueryStatement.into()); - // } - self.explain_analyze_rollback(stmt, params).await?; - } - let row = self.pool.get().await?.query_one(stmt, params).await?; - Ok(row) - } - - /// Modify the database. - /// - /// Use this for `UPDATE`, `DELETE`, and other DB statements that - /// don't return data. - /// - /// # Arguments - /// - /// * `stmt` - `&str` SQL statement. - /// * `params` - `&[&(dyn ToSql + Sync)]` SQL parameters. - /// - /// # Returns - /// - /// `anyhow::Result<()>` - pub(crate) async fn modify( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], - ) -> anyhow::Result<()> { - if self.is_deep_query_enabled().await { - // Check if this is a query statement - // if is_query_stmt(stmt) { - // return Err(Error::InvalidModifyStatement.into()); - // } - self.explain_analyze_commit(stmt, params).await?; - } else { - self.pool.get().await?.query(stmt, params).await?; - } - Ok(()) - } - - /// Prepend `EXPLAIN ANALYZE` to the query, and rollback the transaction. - async fn explain_analyze_rollback( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], - ) -> anyhow::Result<()> { - self.explain_analyze(stmt, params, true).await - } - - /// Prepend `EXPLAIN ANALYZE` to the query, and commit the transaction. - async fn explain_analyze_commit( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], - ) -> anyhow::Result<()> { - self.explain_analyze(stmt, params, false).await - } - - /// Prepend `EXPLAIN ANALYZE` to the query. - /// - /// Log the query plan inside a transaction that may be committed or rolled back. - /// - /// # Arguments - /// - /// * `stmt` - `&str` SQL statement. - /// * `params` - `&[&(dyn ToSql + Sync)]` SQL parameters. - /// * `rollback` - `bool` whether to roll back the transaction or not. - async fn explain_analyze( - &self, stmt: &str, params: &[&(dyn ToSql + Sync)], rollback: bool, - ) -> anyhow::Result<()> { - let span = debug_span!( - "query_plan", - query_statement = stmt, - params = format!("{:?}", params), - uuid = uuid::Uuid::new_v4().to_string() - ); - - async move { - let mut conn = self.pool.get().await?; - let transaction = conn.transaction().await?; - let explain_stmt = transaction - .prepare(format!("EXPLAIN ANALYZE {stmt}").as_str()) - .await?; - let rows = transaction.query(&explain_stmt, params).await?; - for r in rows { - let query_plan_str: String = r.get("QUERY PLAN"); - debug!("{}", query_plan_str); - } - if rollback { - transaction.rollback().await?; - } else { - transaction.commit().await?; - } - Ok(()) - } - .instrument(span) - .await - } -} - -/// Establish a connection to the database, and check the schema is up-to-date. -/// -/// # Parameters -/// -/// * `url` set to the postgres connection string needed to connect to the database. IF -/// it is None, then the env var "`DATABASE_URL`" will be used for this connection -/// string. eg: "`postgres://catalyst-dev:CHANGE_ME@localhost/CatalystDev`" -/// * `do_schema_check` boolean flag to decide whether to verify the schema version or -/// not. If it is `true`, a query is made to verify the DB schema version. -/// -/// # Errors -/// -/// This function will return an error if: -/// * `url` is None and the environment variable "`DATABASE_URL`" isn't set. -/// * There is any error communicating the the database to check its schema. -/// * The database schema in the DB does not 100% match the schema supported by this -/// library. -/// -/// # Notes -/// -/// The env var "`DATABASE_URL`" can be set directly as an anv var, or in a -/// `.env` file. -pub(crate) async fn establish_connection(url: Option) -> anyhow::Result { - // Support env vars in a `.env` file, doesn't need to exist. - dotenv().ok(); - - let database_url = match url { - Some(url) => url, - // If the Database connection URL is not supplied, try and get from the env var. - None => std::env::var(DATABASE_URL_ENVVAR).map_err(|_| Error::NoDatabaseUrl)?, - }; - - let config = tokio_postgres::config::Config::from_str(&database_url)?; - - let pg_mgr = PostgresConnectionManager::new(config, tokio_postgres::NoTls); - - let pool = Pool::builder().build(pg_mgr).await?; - - Ok(EventDB { - pool, - deep_query_inspection_flag: Arc::default(), - }) -} - -/// Determine if the statement is a query statement. -/// -/// Returns true f the query statement starts with `SELECT` or contains `RETURNING`. -fn is_query_stmt(stmt: &str) -> bool { - // First, determine if the statement is a `SELECT` operation - if let Some(stmt) = &stmt.get(..6) { - if *stmt == "SELECT" { - return true; - } - } - // Otherwise, determine if the statement contains `RETURNING` - stmt.sz_rfind("RETURNING").is_some() -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_is_query_statement() { - let stmt = "SELECT * FROM dummy"; - assert!(is_query_stmt(stmt)); - let stmt = "UPDATE dummy SET foo = $1 WHERE bar = $2 RETURNING *"; - assert!(is_query_stmt(stmt)); - } - - #[test] - fn test_is_not_query_statement() { - let stmt = "UPDATE dummy SET foo_count = foo_count + 1 WHERE bar = (SELECT bar_id FROM foo WHERE name = 'FooBar')"; - assert!(!is_query_stmt(stmt)); - let stmt = "UPDATE dummy SET foo = $1 WHERE bar = $2"; - assert!(!is_query_stmt(stmt)); - } -} diff --git a/catalyst-gateway/bin/src/logger.rs b/catalyst-gateway/bin/src/logger.rs index 4fc0abcb6b0..104c3cce6fe 100644 --- a/catalyst-gateway/bin/src/logger.rs +++ b/catalyst-gateway/bin/src/logger.rs @@ -1,7 +1,9 @@ //! Setup for logging for the service. +use std::sync::OnceLock; + use clap::ValueEnum; -use tracing::level_filters::LevelFilter; +use tracing::{level_filters::LevelFilter, log::error}; use tracing_subscriber::{ fmt::{self, format::FmtSpan, time}, prelude::*, @@ -9,6 +11,7 @@ use tracing_subscriber::{ Registry, }; +use crate::settings::Settings; /// Default log level pub(crate) const LOG_LEVEL_DEFAULT: &str = "info"; @@ -47,31 +50,89 @@ impl From for tracing::log::LevelFilter { } } +/// Logger Handle for the Service. +static LOGGER_HANDLE: OnceLock = OnceLock::new(); + +/// Default Span Guard for the Service. +static GLOBAL_SPAN: OnceLock = OnceLock::new(); + +/// Default Span Guard for the Service. +static SPAN_GUARD: OnceLock = OnceLock::new(); + +/// Handle to our Logger +pub(crate) type LoggerHandle = Handle; + +/// Set the default fields in a log, using a global span. +fn set_default_span() { + let server_id = Settings::service_id(); + // This is a hacky way to add fields to every log line. + // Add Fields here, as required. + let global_span = tracing::info_span!("Global", ServerID = server_id); + if GLOBAL_SPAN.set(global_span).is_err() { + error!("Failed to set default span. Is it already set?"); + } + + // It MUST be Some because of the above. + if let Some(global_span) = GLOBAL_SPAN.get() { + let span_guard = global_span.enter(); + if SPAN_GUARD.set(span_guard).is_err() { + error!("Failed to set default span. Is it already set?"); + } + } +} + /// Initialize the tracing subscriber -pub(crate) fn init(log_level: LogLevel) -> Handle { +pub(crate) fn init(log_level: LogLevel) { // Create the formatting layer let layer = fmt::layer() .json() .with_timer(time::UtcTime::rfc_3339()) .with_span_events(FmtSpan::CLOSE) + .with_current_span(true) + .with_span_list(true) .with_target(true) .with_file(true) .with_line_number(true) .with_level(true) .with_thread_names(true) .with_thread_ids(true) - .with_current_span(true) - .with_span_list(true); + .flatten_event(true); + // Create a reloadable layer with the specified log_level let filter = LevelFilter::from_level(log_level.into()); let (filter, logger_handle) = reload::Layer::new(filter); tracing_subscriber::registry() .with(filter) .with(layer) + .with( + tracing_subscriber::EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .from_env_lossy(), + ) .init(); // Logging is globally disabled by default, so globally enable it to the required level. tracing::log::set_max_level(log_level.into()); - logger_handle + if LOGGER_HANDLE.set(logger_handle).is_err() { + error!("Failed to initialize logger handle. Called multiple times?"); + } + + set_default_span(); +} + +/// Modify the logger level setting. +/// This will reload the logger. +pub(crate) fn modify_logger_level(level: LogLevel) { + if let Some(logger_handle) = LOGGER_HANDLE.get() { + if let Err(error) = logger_handle.modify(|f| *f = LevelFilter::from_level(level.into())) { + error!("Failed to modify log level to {:?} : {}", level, error); + } + } else { + // This should never happen. + error!( + "Failed to modify log level to {:?} : Logger handle not available.", + level + ); + } } diff --git a/catalyst-gateway/bin/src/main.rs b/catalyst-gateway/bin/src/main.rs index 05b436d63d8..7331e373d34 100644 --- a/catalyst-gateway/bin/src/main.rs +++ b/catalyst-gateway/bin/src/main.rs @@ -1,14 +1,13 @@ //! Catalyst Data Gateway use clap::Parser; +mod build_info; mod cardano; mod cli; -#[allow(dead_code)] -mod event_db; +mod db; mod logger; mod service; mod settings; -mod state; #[tokio::main] async fn main() -> anyhow::Result<()> { diff --git a/catalyst-gateway/bin/src/service/api/auth/endpoint.rs b/catalyst-gateway/bin/src/service/api/auth/endpoint.rs new file mode 100644 index 00000000000..54bf7e46224 --- /dev/null +++ b/catalyst-gateway/bin/src/service/api/auth/endpoint.rs @@ -0,0 +1,128 @@ +use std::{sync::LazyLock, time::Duration}; + +use dashmap::DashMap; +use ed25519_dalek::{Signature, VerifyingKey, PUBLIC_KEY_LENGTH}; +use moka::future::Cache; +use poem::{error::ResponseError, http::StatusCode, Request}; +use poem_openapi::{auth::Bearer, SecurityScheme}; +use tracing::error; + +use super::token::{Kid, SignatureEd25519, UlidBytes}; +use crate::service::api::auth::token::decode_auth_token_ed25519; + +/// Decoded token consists of a Kid, Ulid and Signature +pub type DecodedAuthToken = (Kid, UlidBytes, SignatureEd25519); + +/// Auth token in the form of catv1.. +pub type EncodedAuthToken = String; + +/// Cached auth tokens +static CACHE: LazyLock> = LazyLock::new(|| { + Cache::builder() + // Time to live (TTL): 30 minutes + .time_to_live(Duration::from_secs(30 * 60)) + // Time to idle (TTI): 5 minutes + .time_to_idle(Duration::from_secs(5 * 60)) + // Create the cache. + .build() +}); + +/// Mocked Valid certificates +/// TODO: the following is temporary state for POC until RBAC database is complete. +static CERTS: LazyLock> = LazyLock::new(|| { + /// Mock KID + const KID: &str = "0467de6bd945b9207bfa09d846b77ef5"; + + let public_key_bytes: [u8; PUBLIC_KEY_LENGTH] = [ + 180, 91, 130, 149, 226, 112, 29, 45, 188, 141, 64, 147, 250, 233, 75, 151, 151, 53, 248, + 197, 225, 122, 24, 67, 207, 100, 162, 152, 232, 102, 89, 162, + ]; + + let cert_map = DashMap::new(); + cert_map.insert(KID.to_string(), public_key_bytes); + cert_map +}); + +#[derive(SecurityScheme)] +#[oai( + rename = "CatalystSecurityScheme", + ty = "bearer", + key_in = "header", + key_name = "Bearer", + checker = "checker_api_catalyst_auth" +)] +#[allow(dead_code)] +/// Auth token security scheme +/// Add to endpoint params e.g async fn endpoint(&self, auth: `CatalystSecurityScheme`) +pub struct CatalystSecurityScheme(pub DecodedAuthToken); + +#[derive(Debug, thiserror::Error)] +#[error("Corrupt Auth Token")] +pub struct AuthTokenError; + +impl ResponseError for AuthTokenError { + fn status(&self) -> StatusCode { + StatusCode::FORBIDDEN + } +} + +/// When added to an endpoint, this hook is called per request to verify the bearer token +/// is valid. +async fn checker_api_catalyst_auth( + _req: &Request, bearer: Bearer, +) -> poem::Result { + if CACHE.contains_key(&bearer.token) { + // This get() will extend the entry life for another 5 minutes. + // Even though we keep calling get(), the entry will expire + // after 30 minutes (TTL) from the origin insert(). + if let Some((kid, ulid, sig)) = CACHE.get(&bearer.token).await { + Ok((kid, ulid, sig)) + } else { + error!("Auth token is not in the cache: {:?}", bearer.token); + Err(AuthTokenError)? + } + } else { + // Decode bearer token + let (kid, ulid, sig, msg) = match decode_auth_token_ed25519(&bearer.token.clone()) { + Ok((kid, ulid, sig, msg)) => (kid, ulid, sig, msg), + Err(err) => { + error!("Corrupt auth token: {:?}", err); + Err(AuthTokenError)? + }, + }; + + // Get pub key from CERTS state given decoded KID from decoded bearer token + let pub_key_bytes = if let Some(cert) = CERTS.get(&hex::encode(kid.0)) { + *cert + } else { + error!("Invalid KID {:?}", kid); + Err(AuthTokenError)? + }; + + let public_key = match VerifyingKey::from_bytes(&pub_key_bytes) { + Ok(pub_key) => pub_key, + Err(err) => { + error!("Invalid public key: {:?}", err); + Err(AuthTokenError)? + }, + }; + + // Strictly verify a signature on a message with this key-pair public key. + if public_key + .verify_strict(&msg, &Signature::from_bytes(&sig.0)) + .is_err() + { + error!( + "Message {:?} was not signed by this key-pair {:?}", + hex::encode(msg), + public_key, + ); + Err(AuthTokenError)?; + } + + // This entry will expire after 5 minutes (TTI) if there is no get(). + CACHE.insert(bearer.token, (kid, ulid, sig.clone())).await; + + Ok((kid, ulid, sig)) + } +} diff --git a/catalyst-gateway/bin/src/service/api/auth/mod.rs b/catalyst-gateway/bin/src/service/api/auth/mod.rs new file mode 100644 index 00000000000..d54d3fd16ce --- /dev/null +++ b/catalyst-gateway/bin/src/service/api/auth/mod.rs @@ -0,0 +1,4 @@ +/// Cat security scheme +pub mod endpoint; +/// Token encoding decoding logic +mod token; diff --git a/catalyst-gateway/bin/src/service/api/auth/token.rs b/catalyst-gateway/bin/src/service/api/auth/token.rs new file mode 100644 index 00000000000..7684f3fc3b1 --- /dev/null +++ b/catalyst-gateway/bin/src/service/api/auth/token.rs @@ -0,0 +1,145 @@ +use anyhow::Ok; +use base64::{prelude::BASE64_STANDARD, Engine}; +use ed25519_dalek::{Signer, SigningKey, SECRET_KEY_LENGTH, SIGNATURE_LENGTH}; +use pallas::codec::minicbor; + +/// Key ID - Blake2b-128 hash of the Role 0 Certificate defining the Session public key. +/// BLAKE2b-128 produces digest side of 16 bytes. +#[derive(Debug, Clone, Copy)] +pub struct Kid(pub [u8; 16]); + +/// Identifier for this token, encodes both the time the token was issued and a random +/// nonce. +#[derive(Debug, Clone, Copy)] +pub struct UlidBytes(pub [u8; 16]); + +/// Ed25519 signatures are (64 bytes) +#[derive(Debug, Clone)] +pub struct SignatureEd25519(pub [u8; 64]); + +/// The Encoded Binary Auth Token is a [CBOR sequence] that consists of 3 fields [ kid, +/// ulid, signature ]. ED25519 Signature over the preceding two fields - sig(cbor(kid), +/// cbor(ulid)) +#[allow(dead_code)] +pub fn encode_auth_token_ed25519( + kid: &Kid, ulid: &UlidBytes, secret_key_bytes: [u8; SECRET_KEY_LENGTH], +) -> anyhow::Result { + /// Auth token prefix as per spec + const AUTH_TOKEN_PREFIX: &str = "catv1"; + + let sk: SigningKey = SigningKey::from_bytes(&secret_key_bytes); + + let out: Vec = Vec::new(); + let mut encoder = minicbor::Encoder::new(out); + + encoder.bytes(&kid.0)?; + encoder.bytes(&ulid.0)?; + + let signature: [u8; SIGNATURE_LENGTH] = sk.sign(encoder.writer()).to_bytes(); + + encoder.bytes(&signature)?; + + Ok(format!( + "{}.{}", + AUTH_TOKEN_PREFIX, + BASE64_STANDARD.encode(encoder.writer()) + )) +} + +/// Decode base64 cbor encoded auth token into constituent parts of (kid, ulid, signature) +/// e.g catv1.UAARIjNEVWZ3iJmqu8zd7v9QAZEs7HHPLEwUpV1VhdlNe1hAAAAAAAAAAAAA... +#[allow(dead_code)] +pub fn decode_auth_token_ed25519( + auth_token: &str, +) -> anyhow::Result<(Kid, UlidBytes, SignatureEd25519, Vec)> { + /// The message is a Cbor sequence (cbor(kid) + cbor(ulid)): + /// kid + ulid are 16 bytes a piece, with 1 byte extra due to cbor encoding, + /// The two fields include their encoding resulting in 17 bytes each. + const KID_ULID_CBOR_ENCODED_BYTES: u8 = 34; + /// Auth token prefix + const AUTH_TOKEN_PREFIX: &str = "catv1"; + + let token = auth_token.split('.').collect::>(); + + let prefix = token.first().ok_or(anyhow::anyhow!("No valid prefix"))?; + if *prefix != AUTH_TOKEN_PREFIX { + return Err(anyhow::anyhow!("Corrupt token, invalid prefix")); + } + let token_base64 = token.get(1).ok_or(anyhow::anyhow!("No valid token"))?; + let token_cbor_encoded = BASE64_STANDARD.decode(token_base64)?; + + // We verify the signature on the message which corresponds to a Cbor sequence (cbor(kid) + // + cbor(ulid)): + let message_cbor_encoded = &token_cbor_encoded + .get(0..KID_ULID_CBOR_ENCODED_BYTES.into()) + .ok_or(anyhow::anyhow!("No valid token"))?; + + // Decode cbor to bytes + let mut cbor_decoder = minicbor::Decoder::new(&token_cbor_encoded); + + // Raw kid bytes + let kid = Kid(cbor_decoder + .bytes() + .map_err(|e| anyhow::anyhow!(format!("Invalid cbor for kid : {e}")))? + .try_into()?); + + // Raw ulid bytes + let ulid = UlidBytes( + cbor_decoder + .bytes() + .map_err(|e| anyhow::anyhow!(format!("Invalid cbor for ulid : {e}")))? + .try_into()?, + ); + + // Raw signature + let signature = SignatureEd25519( + cbor_decoder + .bytes() + .map_err(|e| anyhow::anyhow!(format!("Invalid cbor for sig : {e}")))? + .try_into()?, + ); + + Ok((kid, ulid, signature, message_cbor_encoded.to_vec())) +} + +#[cfg(test)] +mod tests { + + use ed25519_dalek::{Signature, SigningKey, Verifier, SECRET_KEY_LENGTH}; + use rand::rngs::OsRng; + + use super::{encode_auth_token_ed25519, Kid, UlidBytes}; + use crate::service::api::auth::token::decode_auth_token_ed25519; + + #[test] + fn test_token_generation_and_decoding() { + let kid: [u8; 16] = hex::decode("00112233445566778899aabbccddeeff") + .unwrap() + .try_into() + .unwrap(); + let ulid: [u8; 16] = hex::decode("01912cec71cf2c4c14a55d5585d94d7b") + .unwrap() + .try_into() + .unwrap(); + + let mut random_seed = OsRng; + let signing_key: SigningKey = SigningKey::generate(&mut random_seed); + + let verifying_key = signing_key.verifying_key(); + + let secret_key_bytes: [u8; SECRET_KEY_LENGTH] = *signing_key.as_bytes(); + + let auth_token = + encode_auth_token_ed25519(&Kid(kid), &UlidBytes(ulid), secret_key_bytes).unwrap(); + + let (decoded_kid, decoded_ulid, decoded_sig, message) = + decode_auth_token_ed25519(&auth_token).unwrap(); + + assert_eq!(decoded_kid.0, kid); + assert_eq!(decoded_ulid.0, ulid); + + verifying_key + .verify(&message, &Signature::from(&decoded_sig.0)) + .unwrap(); + } +} diff --git a/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs b/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs index bc9738e8f02..b459460b9ae 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/date_time_to_slot_number_get.rs @@ -1,20 +1,14 @@ //! Implementation of the GET `/date_time_to_slot_number` endpoint -use poem_openapi::{payload::Json, ApiResponse}; +use poem_openapi::{payload::Json, types::Example, ApiResponse}; -use crate::{ - event_db::{ - cardano::chain_state::{BlockHash, DateTime, SlotInfoQueryType, SlotNumber}, - error::NotFoundError, +use super::types::DateTime; +use crate::service::common::{ + objects::cardano::{ + network::Network, + slot_info::{Slot, SlotInfo}, }, - service::common::{ - objects::cardano::{ - network::Network, - slot_info::{Slot, SlotInfo}, - }, - responses::WithErrorResponses, - }, - state::State, + responses::WithErrorResponses, }; /// Endpoint responses. @@ -29,56 +23,60 @@ pub(crate) enum Responses { pub(crate) type AllResponses = WithErrorResponses; /// # GET `/date_time_to_slot_number` -#[allow(clippy::unused_async)] +#[allow(clippy::unused_async, clippy::no_effect_underscore_binding)] pub(crate) async fn endpoint( - state: &State, date_time: Option, network: Option, + date_time: Option, network: Option, ) -> AllResponses { - let event_db = state.event_db(); + let _date_time = date_time.unwrap_or_else(chrono::Utc::now); + let _network = network.unwrap_or(Network::Mainnet); - let date_time = date_time.unwrap_or_else(chrono::Utc::now); - let network = network.unwrap_or(Network::Mainnet); + let previous = Some(Slot::example()); + let current = Some(Slot::example()); + let next = Some(Slot::example()); - let (previous, current, next) = tokio::join!( - event_db.get_slot_info( - date_time, - network.clone().into(), - SlotInfoQueryType::Previous - ), - event_db.get_slot_info( - date_time, - network.clone().into(), - SlotInfoQueryType::Current - ), - event_db.get_slot_info(date_time, network.into(), SlotInfoQueryType::Next) - ); + let _unused = " + let (previous, current, next) = tokio::join!( + EventDB::get_slot_info( + date_time, + network.clone().into(), + SlotInfoQueryType::Previous + ), + EventDB::get_slot_info( + date_time, + network.clone().into(), + SlotInfoQueryType::Current + ), + EventDB::get_slot_info(date_time, network.into(), SlotInfoQueryType::Next) + ); - let process_slot_info_result = - |slot_info_result: anyhow::Result<(SlotNumber, BlockHash, DateTime)>| { - match slot_info_result { - Ok((slot_number, block_hash, block_time)) => { - Ok(Some(Slot { - slot_number, - block_hash: From::from(block_hash), - block_time, - })) - }, - Err(err) if err.is::() => Ok(None), - Err(err) => Err(err), - } - }; + let process_slot_info_result = + |slot_info_result: anyhow::Result<(SlotNumber, BlockHash, DateTime)>| { + match slot_info_result { + Ok((slot_number, block_hash, block_time)) => { + Ok(Some(Slot { + slot_number, + block_hash: From::from(block_hash), + block_time, + })) + }, + Err(err) if err.is::() => Ok(None), + Err(err) => Err(err), + } + }; - let current = match process_slot_info_result(current) { - Ok(current) => current, - Err(err) => return AllResponses::handle_error(&err), - }; - let previous = match process_slot_info_result(previous) { - Ok(current) => current, - Err(err) => return AllResponses::handle_error(&err), - }; - let next = match process_slot_info_result(next) { - Ok(current) => current, - Err(err) => return AllResponses::handle_error(&err), - }; + let current = match process_slot_info_result(current) { + Ok(current) => current, + Err(err) => return AllResponses::handle_error(&err), + }; + let previous = match process_slot_info_result(previous) { + Ok(current) => current, + Err(err) => return AllResponses::handle_error(&err), + }; + let next = match process_slot_info_result(next) { + Ok(current) => current, + Err(err) => return AllResponses::handle_error(&err), + }; + "; Responses::Ok(Json(SlotInfo { previous, diff --git a/catalyst-gateway/bin/src/service/api/cardano/mod.rs b/catalyst-gateway/bin/src/service/api/cardano/mod.rs index 727d2b1bed9..9e83da3c475 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/mod.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/mod.rs @@ -1,29 +1,23 @@ //! Cardano API endpoints - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{ param::{Path, Query}, OpenApi, }; +use types::{DateTime, SlotNumber}; -use crate::{ - event_db::cardano::chain_state::{DateTime, SlotNumber}, - service::{ - common::{ - objects::cardano::{network::Network, stake_address::StakeAddress}, - tags::ApiTags, - }, - utilities::middleware::schema_validation::schema_version_validation, +use crate::service::{ + common::{ + objects::cardano::{network::Network, stake_address::StakeAddress}, + tags::ApiTags, }, - state::State, + utilities::middleware::schema_validation::schema_version_validation, }; mod date_time_to_slot_number_get; mod registration_get; mod staked_ada_get; mod sync_state_get; +pub(crate) mod types; /// Cardano Follower API Endpoints pub(crate) struct CardanoApi; @@ -41,7 +35,7 @@ impl CardanoApi { /// This endpoint returns the total Cardano's staked ada amount to the corresponded /// user's stake address. async fn staked_ada_get( - &self, data: Data<&Arc>, + &self, /// The stake address of the user. /// Should a valid Bech32 encoded address followed by the https://cips.cardano.org/cip/CIP-19/#stake-addresses. stake_address: Path, @@ -59,7 +53,7 @@ impl CardanoApi { #[oai(validator(minimum(value = "0"), maximum(value = "9223372036854775807")))] slot_number: Query>, ) -> staked_ada_get::AllResponses { - staked_ada_get::endpoint(&data, stake_address.0, network.0, slot_number.0).await + staked_ada_get::endpoint(stake_address.0, network.0, slot_number.0).await } #[oai( @@ -73,7 +67,7 @@ impl CardanoApi { /// This endpoint returns the registration info followed by the [CIP-36](https://cips.cardano.org/cip/CIP-36/) to the /// corresponded user's stake address. async fn registration_get( - &self, data: Data<&Arc>, + &self, /// The stake address of the user. /// Should a valid Bech32 encoded address followed by the https://cips.cardano.org/cip/CIP-19/#stake-addresses. stake_address: Path, @@ -91,7 +85,7 @@ impl CardanoApi { #[oai(validator(minimum(value = "0"), maximum(value = "9223372036854775807")))] slot_number: Query>, ) -> registration_get::AllResponses { - registration_get::endpoint(&data, stake_address.0, network.0, slot_number.0).await + registration_get::endpoint(stake_address.0, network.0, slot_number.0).await } #[oai( @@ -104,7 +98,7 @@ impl CardanoApi { /// /// This endpoint returns the current cardano follower's sync state info. async fn sync_state_get( - &self, data: Data<&Arc>, + &self, /// Cardano network type. /// If omitted `mainnet` network type is defined. /// As `preprod` and `preview` network types in the stake address encoded as a @@ -112,7 +106,7 @@ impl CardanoApi { /// query parameter. network: Query>, ) -> sync_state_get::AllResponses { - sync_state_get::endpoint(&data, network.0).await + sync_state_get::endpoint(network.0).await } #[oai( @@ -126,7 +120,7 @@ impl CardanoApi { /// This endpoint returns the closest cardano slot info to the provided /// date-time. async fn date_time_to_slot_number_get( - &self, data: Data<&Arc>, + &self, /// The date-time for which the slot number should be calculated. /// If omitted current date time is used. date_time: Query>, @@ -137,6 +131,6 @@ impl CardanoApi { /// query parameter. network: Query>, ) -> date_time_to_slot_number_get::AllResponses { - date_time_to_slot_number_get::endpoint(&data, date_time.0, network.0).await + date_time_to_slot_number_get::endpoint(date_time.0, network.0).await } } diff --git a/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs b/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs index 86f284dfbf5..00c4e89d1bd 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/registration_get.rs @@ -2,22 +2,20 @@ use poem_openapi::{payload::Json, ApiResponse}; -use crate::{ - event_db::{cardano::chain_state::SlotNumber, error::NotFoundError}, - service::{ - common::{ - objects::cardano::{ - network::Network, registration_info::RegistrationInfo, stake_address::StakeAddress, - }, - responses::WithErrorResponses, +use super::types::SlotNumber; +use crate::service::{ + common::{ + objects::cardano::{ + network::Network, registration_info::RegistrationInfo, stake_address::StakeAddress, }, - utilities::check_network, + responses::WithErrorResponses, }, - state::State, + utilities::check_network, }; /// Endpoint responses #[derive(ApiResponse)] +#[allow(dead_code)] pub(crate) enum Responses { /// The registration information for the stake address queried. #[oai(status = 200)] @@ -32,24 +30,20 @@ pub(crate) enum Responses { pub(crate) type AllResponses = WithErrorResponses; /// # GET `/registration` +#[allow(clippy::unused_async, clippy::no_effect_underscore_binding)] pub(crate) async fn endpoint( - state: &State, stake_address: StakeAddress, provided_network: Option, - slot_num: Option, + stake_address: StakeAddress, provided_network: Option, slot_num: Option, ) -> AllResponses { - let event_db = state.event_db(); - - let date_time = slot_num.unwrap_or(SlotNumber::MAX); - let stake_credential = stake_address.payload().as_hash().to_vec(); - let network = match check_network(stake_address.network(), provided_network) { + let _date_time = slot_num.unwrap_or(SlotNumber::MAX); + let _stake_credential = stake_address.payload().as_hash().to_vec(); + let _network = match check_network(stake_address.network(), provided_network) { Ok(network) => network, Err(err) => return AllResponses::handle_error(&err), }; + let _unused = " // get the total utxo amount from the database - match event_db - .get_registration_info(stake_credential, network.into(), date_time) - .await - { + match EventDB::get_registration_info(stake_credential, network.into(), date_time).await { Ok((tx_id, payment_address, voting_info, nonce)) => { Responses::Ok(Json(RegistrationInfo::new( tx_id, @@ -62,4 +56,7 @@ pub(crate) async fn endpoint( Err(err) if err.is::() => Responses::NotFound.into(), Err(err) => AllResponses::handle_error(&err), } + "; + + Responses::NotFound.into() } diff --git a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs index 7e039c3f5a6..5ad8f1fef27 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/staked_ada_get.rs @@ -1,27 +1,39 @@ //! Implementation of the GET `/staked_ada` endpoint +use std::collections::HashMap; +use anyhow::anyhow; +use futures::StreamExt; use poem_openapi::{payload::Json, ApiResponse}; +use super::types::SlotNumber; use crate::{ - event_db::{cardano::chain_state::SlotNumber, error::NotFoundError}, - service::{ - common::{ - objects::cardano::{ - network::Network, stake_address::StakeAddress, stake_info::StakeInfo, + db::index::{ + queries::staked_ada::{ + get_txi_by_txn_hash::{GetTxiByTxnHashesQuery, GetTxiByTxnHashesQueryParams}, + get_txo_by_stake_address::{ + GetTxoByStakeAddressQuery, GetTxoByStakeAddressQueryParams, }, - responses::WithErrorResponses, + update_txo_spent::{UpdateTxoSpentQuery, UpdateTxoSpentQueryParams}, }, - utilities::check_network, + session::CassandraSession, + }, + service::common::{ + objects::cardano::{ + network::Network, + stake_address::StakeAddress, + stake_info::{FullStakeInfo, StakeInfo}, + }, + responses::WithErrorResponses, }, - state::State, }; /// Endpoint responses. #[derive(ApiResponse)] +#[allow(dead_code)] pub(crate) enum Responses { /// The amount of ADA staked by the queried stake address, as at the indicated slot. #[oai(status = 200)] - Ok(Json), + Ok(Json), /// The queried stake address was not found at the requested slot number. #[oai(status = 404)] NotFound, @@ -31,33 +43,184 @@ pub(crate) enum Responses { pub(crate) type AllResponses = WithErrorResponses; /// # GET `/staked_ada` +#[allow(clippy::unused_async, clippy::no_effect_underscore_binding)] pub(crate) async fn endpoint( - state: &State, stake_address: StakeAddress, provided_network: Option, - slot_num: Option, + stake_address: StakeAddress, _provided_network: Option, slot_num: Option, ) -> AllResponses { - let event_db = state.event_db(); - - let date_time = slot_num.unwrap_or(SlotNumber::MAX); - let stake_credential = stake_address.payload().as_hash().to_vec(); + let persistent_res = calculate_stake_info(true, stake_address.clone(), slot_num).await; + let persistent_stake_info = match persistent_res { + Ok(stake_info) => stake_info, + Err(err) => return AllResponses::handle_error(&err), + }; - let network = match check_network(stake_address.network(), provided_network) { - Ok(network) => network, + let volatile_res = calculate_stake_info(false, stake_address, slot_num).await; + let volatile_stake_info = match volatile_res { + Ok(stake_info) => stake_info, Err(err) => return AllResponses::handle_error(&err), }; - // get the total utxo amount from the database - match event_db - .total_utxo_amount(stake_credential, network.into(), date_time) - .await - { - Ok((amount, slot_number)) => { - Responses::Ok(Json(StakeInfo { - amount, - slot_number, - })) - .into() - }, - Err(err) if err.is::() => Responses::NotFound.into(), - Err(err) => AllResponses::handle_error(&err), + if persistent_stake_info.is_none() && volatile_stake_info.is_none() { + return Responses::NotFound.into(); + } + + Responses::Ok(Json(FullStakeInfo { + volatile: volatile_stake_info.unwrap_or_default(), + persistent: persistent_stake_info.unwrap_or_default(), + })) + .into() +} + +/// TXO information used when calculating a user's stake info. +struct TxoInfo { + /// TXO value. + value: num_bigint::BigInt, + /// TXO transaction index within the slot. + txn: i16, + /// TXO index. + txo: i16, + /// TXO transaction slot number. + slot_no: num_bigint::BigInt, + /// Whether the TXO was spent. + spent_slot_no: Option, +} + +/// Calculate the stake info for a given stake address. +/// +/// This function also updates the spent column if it detects that a TXO was spent +/// between lookups. +async fn calculate_stake_info( + persistent: bool, stake_address: StakeAddress, slot_num: Option, +) -> anyhow::Result> { + let Some(session) = CassandraSession::get(persistent) else { + anyhow::bail!("Failed to acquire db session"); + }; + + let stake_address_bytes = stake_address.payload().as_hash().to_vec(); + + let mut txos_by_txn = get_txo_by_txn(&session, stake_address_bytes.clone(), slot_num).await?; + if txos_by_txn.is_empty() { + return Ok(None); + } + + check_and_set_spent(&session, &mut txos_by_txn).await?; + update_spent(&session, stake_address_bytes, &txos_by_txn).await?; + + let stake_info = build_stake_info(txos_by_txn)?; + + Ok(Some(stake_info)) +} + +/// Returns a map of TXO infos by transaction hash for the given stake address. +async fn get_txo_by_txn( + session: &CassandraSession, stake_address: Vec, slot_num: Option, +) -> anyhow::Result, HashMap>> { + let mut txos_iter = GetTxoByStakeAddressQuery::execute( + session, + GetTxoByStakeAddressQueryParams::new( + stake_address, + num_bigint::BigInt::from(slot_num.unwrap_or(i64::MAX)), + ), + ) + .await?; + + let mut txos_by_txn = HashMap::new(); + while let Some(row_res) = txos_iter.next().await { + let row = row_res?; + + // Filter out already known spent TXOs. + if row.spent_slot.is_some() { + continue; + } + + let txn_map = txos_by_txn.entry(row.txn_hash).or_insert(HashMap::new()); + txn_map.insert(row.txo, TxoInfo { + value: row.value, + txn: row.txn, + txo: row.txo, + slot_no: row.slot_no, + spent_slot_no: None, + }); + } + + Ok(txos_by_txn) +} + +/// Checks if the given TXOs were spent and mark then as such. +async fn check_and_set_spent( + session: &CassandraSession, txos_by_txn: &mut HashMap, HashMap>, +) -> anyhow::Result<()> { + let txn_hashes = txos_by_txn.keys().cloned().collect::>(); + + for chunk in txn_hashes.chunks(100) { + let mut txi_iter = GetTxiByTxnHashesQuery::execute( + session, + GetTxiByTxnHashesQueryParams::new(chunk.to_vec()), + ) + .await?; + + while let Some(row_res) = txi_iter.next().await { + let row = row_res?; + + if let Some(txn_map) = txos_by_txn.get_mut(&row.txn_hash) { + if let Some(txo_info) = txn_map.get_mut(&row.txo) { + if row.slot_no >= num_bigint::BigInt::ZERO { + txo_info.spent_slot_no = Some(row.slot_no); + } + } + } + } + } + + Ok(()) +} + +/// Sets TXOs as spent in the database if they are marked as spent in the map. +async fn update_spent( + session: &CassandraSession, stake_address: Vec, + txos_by_txn: &HashMap, HashMap>, +) -> anyhow::Result<()> { + let mut params = Vec::new(); + for txn_map in txos_by_txn.values() { + for txo_info in txn_map.values() { + if txo_info.spent_slot_no.is_none() { + continue; + } + + if let Some(spent_slot) = &txo_info.spent_slot_no { + params.push(UpdateTxoSpentQueryParams { + stake_address: stake_address.clone(), + txn: txo_info.txn, + txo: txo_info.txo, + slot_no: txo_info.slot_no.clone(), + spent_slot: spent_slot.clone(), + }); + } + } } + + UpdateTxoSpentQuery::execute(session, params).await?; + + Ok(()) +} + +/// Builds an instance of [`StakeInfo`] based on the TXOs given. +fn build_stake_info( + txos_by_txn: HashMap, HashMap>, +) -> anyhow::Result { + let mut stake_info = StakeInfo::default(); + for txn_map in txos_by_txn.into_values() { + for txo_info in txn_map.into_values() { + if txo_info.spent_slot_no.is_none() { + stake_info.amount += i64::try_from(txo_info.value).map_err(|err| anyhow!(err))?; + + let slot_no = i64::try_from(txo_info.slot_no).map_err(|err| anyhow!(err))?; + + if stake_info.slot_number < slot_no { + stake_info.slot_number = slot_no; + } + } + } + } + + Ok(stake_info) } diff --git a/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs b/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs index e80e2a3b60d..9b93a3c005b 100644 --- a/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs +++ b/catalyst-gateway/bin/src/service/api/cardano/sync_state_get.rs @@ -2,17 +2,14 @@ use poem_openapi::{payload::Json, ApiResponse}; -use crate::{ - event_db::error::NotFoundError, - service::common::{ - objects::cardano::{network::Network, sync_state::SyncState}, - responses::WithErrorResponses, - }, - state::State, +use crate::service::common::{ + objects::cardano::{network::Network, sync_state::SyncState}, + responses::WithErrorResponses, }; /// Endpoint responses. #[derive(ApiResponse)] +#[allow(dead_code)] pub(crate) enum Responses { /// The synchronisation state of the blockchain with the catalyst gateway service. #[oai(status = 200)] @@ -26,13 +23,12 @@ pub(crate) enum Responses { pub(crate) type AllResponses = WithErrorResponses; /// # GET `/sync_state` -#[allow(clippy::unused_async)] -pub(crate) async fn endpoint(state: &State, network: Option) -> AllResponses { - let event_db = state.event_db(); +#[allow(clippy::unused_async, clippy::no_effect_underscore_binding)] +pub(crate) async fn endpoint(network: Option) -> AllResponses { + let _network = network.unwrap_or(Network::Mainnet); - let network = network.unwrap_or(Network::Mainnet); - - match event_db.last_updated_state(network.into()).await { + let _unused = " + match EventDB::last_updated_state(network.into()).await { Ok((slot_number, block_hash, last_updated)) => { Responses::Ok(Json(SyncState { slot_number, @@ -44,4 +40,7 @@ pub(crate) async fn endpoint(state: &State, network: Option) -> AllResp Err(err) if err.is::() => Responses::NotFound.into(), Err(err) => AllResponses::handle_error(&err), } + "; + + Responses::NotFound.into() } diff --git a/catalyst-gateway/bin/src/service/api/cardano/types.rs b/catalyst-gateway/bin/src/service/api/cardano/types.rs new file mode 100644 index 00000000000..f12c4c649c9 --- /dev/null +++ b/catalyst-gateway/bin/src/service/api/cardano/types.rs @@ -0,0 +1,74 @@ +//! Cardano Specific Types +//! +//! These are temporary types are needed to prevent breakage due to the removal of the +//! Event DB logic for chain-sync. They should be replaced with proper types in a better +//! place. + +use cryptoxide::{blake2b::Blake2b, digest::Digest}; +use serde::{Deserialize, Serialize}; + +/// Pub key +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub(crate) struct PubKey(Vec); + +impl PubKey { + /// Get credentials, a blake2b 28 bytes hash of the pub key + #[allow(dead_code)] + pub(crate) fn get_credentials(&self) -> [u8; 28] { + let mut digest = [0u8; 28]; + let mut context = Blake2b::new(28); + context.input(&self.0); + context.result(&mut digest); + digest + } + + /// Get bytes + pub(crate) fn bytes(&self) -> &[u8] { + &self.0 + } +} + +/// The source of voting power for a given registration +/// +/// The voting power can either come from: +/// - a single wallet, OR +/// - a set of delegations +#[derive(Serialize, Deserialize)] +#[serde(untagged)] +#[derive(Debug, Clone, PartialEq)] +pub(crate) enum VotingInfo { + /// Direct voting + /// + /// Voting power is based on the staked ada of the given key + Direct(PubKey), + + /// Delegated voting + /// + /// Voting power is based on the staked ada of the delegated keys + /// order of elements is important and must be preserved. + Delegated(Vec<(PubKey, i64)>), +} + +/// Block time +pub(crate) type DateTime = chrono::DateTime; +/// Slot +pub(crate) type SlotNumber = i64; +/// Transaction id +#[allow(dead_code)] +pub(crate) type TxId = Vec; +/// Stake credential +#[allow(dead_code)] +pub(crate) type StakeCredential = Vec; +/// Public voting key +#[allow(dead_code)] +pub(crate) type PublicVotingInfo = VotingInfo; +/// Payment address +#[allow(dead_code)] +pub(crate) type PaymentAddress = Vec; +/// Nonce +pub(crate) type Nonce = i64; +/// Metadata 61284 +#[allow(dead_code)] +pub(crate) type MetadataCip36 = Vec; +/// Stake amount. +pub(crate) type StakeAmount = i64; diff --git a/catalyst-gateway/bin/src/service/api/health/inspection_get.rs b/catalyst-gateway/bin/src/service/api/health/inspection_get.rs index 77031ff07af..56850e491f0 100644 --- a/catalyst-gateway/bin/src/service/api/health/inspection_get.rs +++ b/catalyst-gateway/bin/src/service/api/health/inspection_get.rs @@ -1,12 +1,8 @@ //! Implementation of the GET /health/inspection endpoint - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{ApiResponse, Enum}; use tracing::debug; -use crate::{event_db, logger, service::common::responses::WithErrorResponses, state::State}; +use crate::{db::event::EventDB, logger, service::common::responses::WithErrorResponses}; /// `LogLevel` Open API definition. #[derive(Debug, Clone, Copy, Enum)] @@ -43,11 +39,11 @@ pub(crate) enum DeepQueryInspectionFlag { Disabled, } -impl From for event_db::DeepQueryInspectionFlag { +impl From for bool { fn from(val: DeepQueryInspectionFlag) -> Self { match val { - DeepQueryInspectionFlag::Enabled => event_db::DeepQueryInspectionFlag::Enabled, - DeepQueryInspectionFlag::Disabled => event_db::DeepQueryInspectionFlag::Disabled, + DeepQueryInspectionFlag::Enabled => true, + DeepQueryInspectionFlag::Disabled => false, } } } @@ -66,20 +62,16 @@ pub(crate) type AllResponses = WithErrorResponses; /// # GET /health/inspection /// /// Inspection settings endpoint. +#[allow(clippy::unused_async)] pub(crate) async fn endpoint( - state: Data<&Arc>, log_level: Option, - query_inspection: Option, + log_level: Option, query_inspection: Option, ) -> AllResponses { if let Some(level) = log_level { - match state.modify_logger_level(level.into()) { - Ok(()) => debug!("successfully set log level to: {:?}", level), - Err(err) => return AllResponses::handle_error(&err), - } + logger::modify_logger_level(level.into()); } if let Some(inspection_mode) = query_inspection { - let event_db = state.event_db(); - event_db.modify_deep_query(inspection_mode.into()).await; + EventDB::modify_deep_query(inspection_mode.into()); debug!( "successfully set deep query inspection mode to: {:?}", inspection_mode diff --git a/catalyst-gateway/bin/src/service/api/health/live_get.rs b/catalyst-gateway/bin/src/service/api/health/live_get.rs index 3acf18bf278..84a6cafaf0a 100644 --- a/catalyst-gateway/bin/src/service/api/health/live_get.rs +++ b/catalyst-gateway/bin/src/service/api/health/live_get.rs @@ -4,7 +4,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; use poem_openapi::ApiResponse; -use crate::service::common::responses::WithErrorResponses; +use crate::{db::index::session::CassandraSession, service::common::responses::WithErrorResponses}; /// Flag to determine if the service has started static IS_LIVE: AtomicBool = AtomicBool::new(true); @@ -17,7 +17,7 @@ pub(crate) fn set_live(flag: bool) { /// Get the started flag #[allow(dead_code)] fn is_live() -> bool { - IS_LIVE.load(Ordering::Acquire) + IS_LIVE.load(Ordering::Acquire) && CassandraSession::is_ready() } /// Endpoint responses. diff --git a/catalyst-gateway/bin/src/service/api/health/mod.rs b/catalyst-gateway/bin/src/service/api/health/mod.rs index c5b0b12a726..24c9b20d96c 100644 --- a/catalyst-gateway/bin/src/service/api/health/mod.rs +++ b/catalyst-gateway/bin/src/service/api/health/mod.rs @@ -1,16 +1,12 @@ //! Health Endpoints -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{param::Query, OpenApi}; -use crate::{service::common::tags::ApiTags, state::State}; +use crate::service::common::tags::ApiTags; mod inspection_get; mod live_get; mod ready_get; mod started_get; - pub(crate) use started_get::started; /// Health API Endpoints @@ -42,8 +38,8 @@ impl HealthApi { /// /// *This endpoint is for internal use of the service deployment infrastructure. /// It may not be exposed publicly.* - async fn ready_get(&self, state: Data<&Arc>) -> ready_get::AllResponses { - ready_get::endpoint(state).await + async fn ready_get(&self) -> ready_get::AllResponses { + ready_get::endpoint().await } #[oai(path = "/live", method = "get", operation_id = "healthLive")] @@ -66,9 +62,9 @@ impl HealthApi { )] /// Options for service inspection. async fn inspection( - &self, state: Data<&Arc>, log_level: Query>, + &self, log_level: Query>, query_inspection: Query>, ) -> inspection_get::AllResponses { - inspection_get::endpoint(state, log_level.0, query_inspection.0).await + inspection_get::endpoint(log_level.0, query_inspection.0).await } } diff --git a/catalyst-gateway/bin/src/service/api/health/ready_get.rs b/catalyst-gateway/bin/src/service/api/health/ready_get.rs index cdf1d2ebb23..df5db8db55b 100644 --- a/catalyst-gateway/bin/src/service/api/health/ready_get.rs +++ b/catalyst-gateway/bin/src/service/api/health/ready_get.rs @@ -1,13 +1,9 @@ //! Implementation of the GET /health/ready endpoint - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::ApiResponse; use crate::{ - event_db::schema_check::MismatchedSchemaError, service::common::responses::WithErrorResponses, - state::State, + db::event::{schema_check::MismatchedSchemaError, EventDB}, + service::common::responses::WithErrorResponses, }; /// Endpoint responses. @@ -41,8 +37,8 @@ pub(crate) type AllResponses = WithErrorResponses; /// and is not able to properly service requests while it is occurring. /// This would let the load balancer shift traffic to other instances of this /// service that are ready. -pub(crate) async fn endpoint(state: Data<&Arc>) -> AllResponses { - match state.event_db().schema_version_check().await { +pub(crate) async fn endpoint() -> AllResponses { + match EventDB::schema_version_check().await { Ok(_) => { tracing::debug!("DB schema version status ok"); Responses::NoContent.into() diff --git a/catalyst-gateway/bin/src/service/api/legacy/registration/mod.rs b/catalyst-gateway/bin/src/service/api/legacy/registration/mod.rs index 398c68c1c46..239fa942e16 100644 --- a/catalyst-gateway/bin/src/service/api/legacy/registration/mod.rs +++ b/catalyst-gateway/bin/src/service/api/legacy/registration/mod.rs @@ -1,7 +1,4 @@ //! Registration Endpoints -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{ param::{Path, Query}, payload::Json, @@ -9,19 +6,16 @@ use poem_openapi::{ ApiResponse, OpenApi, }; -use crate::{ - service::{ - common::{ - objects::legacy::{ - event_id::EventId, voter_registration::VoterRegistration, - voting_public_key::VotingPublicKey, - }, - responses::WithErrorResponses, - tags::ApiTags, +use crate::service::{ + common::{ + objects::legacy::{ + event_id::EventId, voter_registration::VoterRegistration, + voting_public_key::VotingPublicKey, }, - utilities::middleware::schema_validation::schema_version_validation, + responses::WithErrorResponses, + tags::ApiTags, }, - state::State, + utilities::middleware::schema_validation::schema_version_validation, }; /// Registration API Endpoints @@ -58,7 +52,7 @@ impl RegistrationApi { #[allow(clippy::unused_async)] #[allow(unused_variables)] async fn get_voter_info( - &self, pool: Data<&Arc>, + &self, /// A Voters Public ED25519 Key (as registered in their most recent valid /// [CIP-15](https://cips.cardano.org/cips/cip15) or [CIP-36](https://cips.cardano.org/cips/cip36) registration). #[oai(validator(max_length = 66, min_length = 66, pattern = "0x[0-9a-f]{64}"))] diff --git a/catalyst-gateway/bin/src/service/api/legacy/v0/mod.rs b/catalyst-gateway/bin/src/service/api/legacy/v0/mod.rs index 060b7b81042..1f7cadcfe11 100644 --- a/catalyst-gateway/bin/src/service/api/legacy/v0/mod.rs +++ b/catalyst-gateway/bin/src/service/api/legacy/v0/mod.rs @@ -1,15 +1,8 @@ //! `v0` Endpoints - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{payload::Binary, OpenApi}; -use crate::{ - service::{ - common::tags::ApiTags, utilities::middleware::schema_validation::schema_version_validation, - }, - state::State, +use crate::service::{ + common::tags::ApiTags, utilities::middleware::schema_validation::schema_version_validation, }; mod message_post; @@ -43,7 +36,7 @@ impl V0Api { transform = "schema_version_validation", deprecated = true )] - async fn plans_get(&self, state: Data<&Arc>) -> plans_get::AllResponses { - plans_get::endpoint(state).await + async fn plans_get(&self) -> plans_get::AllResponses { + plans_get::endpoint().await } } diff --git a/catalyst-gateway/bin/src/service/api/legacy/v0/plans_get.rs b/catalyst-gateway/bin/src/service/api/legacy/v0/plans_get.rs index 8f7c600d883..b5b5065ea46 100644 --- a/catalyst-gateway/bin/src/service/api/legacy/v0/plans_get.rs +++ b/catalyst-gateway/bin/src/service/api/legacy/v0/plans_get.rs @@ -1,14 +1,7 @@ //! Implementation of the GET /vote/active/plans endpoint - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{payload::Json, ApiResponse}; -use crate::{ - service::common::{objects::legacy::vote_plan::VotePlan, responses::WithErrorResponses}, - state::State, -}; +use crate::service::common::{objects::legacy::vote_plan::VotePlan, responses::WithErrorResponses}; /// Endpoint responses #[derive(ApiResponse)] @@ -25,6 +18,6 @@ pub(crate) type AllResponses = WithErrorResponses; /// /// Get all active vote plans endpoint. #[allow(clippy::unused_async)] -pub(crate) async fn endpoint(_state: Data<&Arc>) -> AllResponses { +pub(crate) async fn endpoint() -> AllResponses { Responses::Ok(Json(Vec::new())).into() } diff --git a/catalyst-gateway/bin/src/service/api/legacy/v1/account_votes_get.rs b/catalyst-gateway/bin/src/service/api/legacy/v1/account_votes_get.rs index 70dea8d4d24..855e84c6c06 100644 --- a/catalyst-gateway/bin/src/service/api/legacy/v1/account_votes_get.rs +++ b/catalyst-gateway/bin/src/service/api/legacy/v1/account_votes_get.rs @@ -1,16 +1,9 @@ //! Implementation of the `GET /v1/votes/plan/account-votes/:account_id` endpoint - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{param::Path, payload::Json, ApiResponse}; -use crate::{ - service::common::{ - objects::legacy::account_votes::{AccountId, AccountVote}, - responses::WithErrorResponses, - }, - state::State, +use crate::service::common::{ + objects::legacy::account_votes::{AccountId, AccountVote}, + responses::WithErrorResponses, }; /// Endpoint responses @@ -31,8 +24,6 @@ pub(crate) type AllResponses = WithErrorResponses; /// For each active vote plan, this endpoint returns an array /// with the proposal index number that the account voted for. #[allow(clippy::unused_async)] -pub(crate) async fn endpoint( - _state: Data<&Arc>, _account_id: Path, -) -> AllResponses { +pub(crate) async fn endpoint(_account_id: Path) -> AllResponses { Responses::Ok(Json(Vec::new())).into() } diff --git a/catalyst-gateway/bin/src/service/api/legacy/v1/mod.rs b/catalyst-gateway/bin/src/service/api/legacy/v1/mod.rs index 692ec585362..2fa186e035d 100644 --- a/catalyst-gateway/bin/src/service/api/legacy/v1/mod.rs +++ b/catalyst-gateway/bin/src/service/api/legacy/v1/mod.rs @@ -1,26 +1,19 @@ //! `v1` Endpoints - -use std::sync::Arc; - -use poem::web::Data; use poem_openapi::{ param::{Path, Query}, payload::Json, OpenApi, }; -use crate::{ - service::{ - common::{ - objects::legacy::{ - account_votes::AccountId, fragments_batch::FragmentsBatch, - fragments_processing_summary::FragmentId, - }, - tags::ApiTags, +use crate::service::{ + common::{ + objects::legacy::{ + account_votes::AccountId, fragments_batch::FragmentsBatch, + fragments_processing_summary::FragmentId, }, - utilities::middleware::schema_validation::schema_version_validation, + tags::ApiTags, }, - state::State, + utilities::middleware::schema_validation::schema_version_validation, }; mod account_votes_get; @@ -45,11 +38,10 @@ impl V1Api { /// Get from all active vote plans, the index of the voted proposals /// by the given account ID. async fn get_account_votes( - &self, state: Data<&Arc>, - /// A account ID to get the votes for. + &self, /// A account ID to get the votes for. account_id: Path, ) -> account_votes_get::AllResponses { - account_votes_get::endpoint(state, account_id).await + account_votes_get::endpoint(account_id).await } /// Process fragments diff --git a/catalyst-gateway/bin/src/service/api/mod.rs b/catalyst-gateway/bin/src/service/api/mod.rs index 0af519e0481..2ab54ffb2ff 100644 --- a/catalyst-gateway/bin/src/service/api/mod.rs +++ b/catalyst-gateway/bin/src/service/api/mod.rs @@ -11,9 +11,10 @@ use local_ip_address::list_afinet_netifas; use poem_openapi::{ContactObject, LicenseObject, OpenApiService, ServerObject}; use self::cardano::CardanoApi; -use crate::settings::{DocsSettings, API_URL_PREFIX}; - -mod cardano; +use crate::settings::Settings; +/// Auth +mod auth; +pub(crate) mod cardano; mod health; mod legacy; @@ -58,9 +59,7 @@ const TERMS_OF_SERVICE: &str = "https://github.com/input-output-hk/catalyst-voices/blob/main/CODE_OF_CONDUCT.md"; /// Create the `OpenAPI` definition -pub(crate) fn mk_api( - hosts: Vec, settings: &DocsSettings, -) -> OpenApiService<(HealthApi, CardanoApi, LegacyApi), ()> { +pub(crate) fn mk_api() -> OpenApiService<(HealthApi, CardanoApi, LegacyApi), ()> { let mut service = OpenApiService::new( ( HealthApi, @@ -74,25 +73,24 @@ pub(crate) fn mk_api( .description(API_DESCRIPTION) .license(get_api_license()) .terms_of_service(TERMS_OF_SERVICE) - .url_prefix(API_URL_PREFIX.as_str()); - - // Retrieve the port from the socket address - let port = settings.address.port().to_string(); + .url_prefix(Settings::api_url_prefix()); - let server_name = &settings.server_name; + let hosts = Settings::api_host_names(); for host in hosts { service = service.server(ServerObject::new(host).description("API Host")); } // Add server name if it is set - if let Some(name) = server_name { + if let Some(name) = Settings::server_name() { service = service.server(ServerObject::new(name).description("Server at server name")); } + let port = Settings::bound_address().port(); + // Get localhost name if let Ok(hostname) = gethostname().into_string() { - let hostname_address = format!("http://{hostname}:{port}"); + let hostname_address = format!("http://{hostname}:{port}",); service = service .server(ServerObject::new(hostname_address).description("Server at localhost name")); } diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/network.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/network.rs index 7ce7cd464fc..c40e4e36deb 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/network.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/network.rs @@ -8,9 +8,6 @@ pub(crate) enum Network { /// Cardano mainnet. #[oai(rename = "mainnet")] Mainnet, - /// Cardano testnet. - #[oai(rename = "testnet")] - Testnet, /// Cardano preprod. #[oai(rename = "preprod")] Preprod, @@ -23,7 +20,6 @@ impl From for cardano_chain_follower::Network { fn from(value: Network) -> Self { match value { Network::Mainnet => Self::Mainnet, - Network::Testnet => Self::Testnet, Network::Preprod => Self::Preprod, Network::Preview => Self::Preview, } diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs index 581b0fe7506..6a855cc7998 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/registration_info.rs @@ -2,9 +2,10 @@ use poem_openapi::{types::Example, Object, Union}; -use crate::{ - event_db::cardano::cip36_registration::{Nonce, PaymentAddress, PublicVotingInfo, TxId}, - service::{common::objects::cardano::hash::Hash, utilities::to_hex_with_prefix}, +use crate::service::{ + api::cardano::types::{Nonce, PaymentAddress, PublicVotingInfo, TxId}, + common::objects::cardano::hash::Hash, + utilities::to_hex_with_prefix, }; /// Delegation type @@ -68,6 +69,7 @@ pub(crate) struct RegistrationInfo { impl RegistrationInfo { /// Creates a new `RegistrationInfo` + #[allow(dead_code)] pub(crate) fn new( tx_hash: TxId, rewards_address: &PaymentAddress, voting_info: PublicVotingInfo, nonce: Nonce, diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs index f4d22157265..5a741887fc1 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/slot_info.rs @@ -2,9 +2,9 @@ use poem_openapi::{types::Example, Object}; -use crate::{ - event_db::cardano::chain_state::{DateTime, SlotNumber}, - service::common::objects::cardano::hash::Hash, +use crate::service::{ + api::cardano::types::{DateTime, SlotNumber}, + common::objects::cardano::hash::Hash, }; /// Cardano block's slot data. diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_address.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_address.rs index 52ac82d61c4..e1428c13fa8 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_address.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_address.rs @@ -10,7 +10,7 @@ use poem_openapi::{ /// Cardano stake address of the user. /// Should a valid Bech32 encoded stake address followed by the `https://cips.cardano.org/cip/CIP-19/#stake-addresses.` -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct StakeAddress(StakeAddressPallas); impl StakeAddress { diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs index bff2f734896..eb718f899df 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/stake_info.rs @@ -2,10 +2,10 @@ use poem_openapi::{types::Example, Object}; -use crate::event_db::cardano::{chain_state::SlotNumber, utxo::StakeAmount}; +use crate::service::api::cardano::types::{SlotNumber, StakeAmount}; /// User's cardano stake info. -#[derive(Object)] +#[derive(Object, Default)] #[oai(example = true)] pub(crate) struct StakeInfo { /// Total stake amount. @@ -27,3 +27,22 @@ impl Example for StakeInfo { } } } + +/// Full user's cardano stake info. +#[derive(Object, Default)] +#[oai(example = true)] +pub(crate) struct FullStakeInfo { + /// Volatile stake information. + pub(crate) volatile: StakeInfo, + /// Persistent stake information. + pub(crate) persistent: StakeInfo, +} + +impl Example for FullStakeInfo { + fn example() -> Self { + Self { + volatile: StakeInfo::example(), + persistent: StakeInfo::example(), + } + } +} diff --git a/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs b/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs index d736775dc2a..326d1633406 100644 --- a/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs +++ b/catalyst-gateway/bin/src/service/common/objects/cardano/sync_state.rs @@ -2,9 +2,9 @@ use poem_openapi::{types::Example, Object}; -use crate::{ - event_db::cardano::chain_state::{DateTime, SlotNumber}, - service::common::objects::cardano::hash::Hash, +use crate::service::{ + api::cardano::types::{DateTime, SlotNumber}, + common::objects::cardano::hash::Hash, }; /// Cardano follower's sync state info. diff --git a/catalyst-gateway/bin/src/service/common/objects/legacy/event_id.rs b/catalyst-gateway/bin/src/service/common/objects/legacy/event_id.rs index 5f47b9f51a7..ba393ac3fad 100644 --- a/catalyst-gateway/bin/src/service/common/objects/legacy/event_id.rs +++ b/catalyst-gateway/bin/src/service/common/objects/legacy/event_id.rs @@ -13,8 +13,8 @@ impl Example for EventId { } } -impl From for crate::event_db::legacy::types::event::EventId { +impl From for crate::db::event::legacy::types::event::EventId { fn from(event_id: EventId) -> Self { - crate::event_db::legacy::types::event::EventId(event_id.0) + crate::db::event::legacy::types::event::EventId(event_id.0) } } diff --git a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_group_id.rs b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_group_id.rs index 00b9516d522..1a7d1a4b908 100644 --- a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_group_id.rs +++ b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_group_id.rs @@ -19,11 +19,11 @@ impl Example for VoterGroupId { } } -impl TryFrom for VoterGroupId { +impl TryFrom for VoterGroupId { type Error = String; fn try_from( - value: crate::event_db::legacy::types::registration::VoterGroupId, + value: crate::db::event::legacy::types::registration::VoterGroupId, ) -> Result { match value.0.as_str() { "rep" => Ok(Self::Rep), diff --git a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_info.rs b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_info.rs index 1a18140e050..ff6afdc24ac 100644 --- a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_info.rs +++ b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_info.rs @@ -54,11 +54,11 @@ impl Example for VoterInfo { } } -impl TryFrom for VoterInfo { +impl TryFrom for VoterInfo { type Error = String; fn try_from( - value: crate::event_db::legacy::types::registration::VoterInfo, + value: crate::db::event::legacy::types::registration::VoterInfo, ) -> Result { Ok(Self { voting_power: value.voting_power, diff --git a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_registration.rs b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_registration.rs index 197436dfd8a..0a60bad3621 100644 --- a/catalyst-gateway/bin/src/service/common/objects/legacy/voter_registration.rs +++ b/catalyst-gateway/bin/src/service/common/objects/legacy/voter_registration.rs @@ -34,11 +34,11 @@ impl Example for VoterRegistration { } } -impl TryFrom for VoterRegistration { +impl TryFrom for VoterRegistration { type Error = String; fn try_from( - value: crate::event_db::legacy::types::registration::Voter, + value: crate::db::event::legacy::types::registration::Voter, ) -> Result { Ok(Self { voter_info: value.info.try_into()?, diff --git a/catalyst-gateway/bin/src/service/common/objects/server_error.rs b/catalyst-gateway/bin/src/service/common/objects/server_error.rs index 980f4329c62..b8499ea30c0 100644 --- a/catalyst-gateway/bin/src/service/common/objects/server_error.rs +++ b/catalyst-gateway/bin/src/service/common/objects/server_error.rs @@ -6,7 +6,7 @@ use uuid::Uuid; /// While using macro-vis lib, you will get the `uncommon_codepoints` warning, so you will /// probably want to place this in your crate root -use crate::settings::generate_github_issue_url; +use crate::settings::Settings; #[derive(Debug, Object)] #[oai(example, skip_serializing_if_is_none)] @@ -33,7 +33,7 @@ impl ServerError { ); let id = Uuid::new_v4(); let issue_title = format!("Internal Server Error - {id}"); - let issue = generate_github_issue_url(&issue_title); + let issue = Settings::generate_github_issue_url(&issue_title); Self { id, msg, issue } } diff --git a/catalyst-gateway/bin/src/service/docs/stoplight_elements/mod.rs b/catalyst-gateway/bin/src/service/docs/stoplight_elements/mod.rs index 315e4b3100f..6a3874e5622 100644 --- a/catalyst-gateway/bin/src/service/docs/stoplight_elements/mod.rs +++ b/catalyst-gateway/bin/src/service/docs/stoplight_elements/mod.rs @@ -43,7 +43,7 @@ fn create_html(document: &str) -> String { .replace("{:spec}", document) } -/// Create an endpoint to return teh Stoplight documentation for our API. +/// Create an endpoint to return the Stoplight documentation for our API. pub(crate) fn create_endpoint(document: &str) -> impl Endpoint { let ui_html = create_html(document); poem::Route::new().at("/", make_sync(move |_| Html(ui_html.clone()))) diff --git a/catalyst-gateway/bin/src/service/mod.rs b/catalyst-gateway/bin/src/service/mod.rs index 30aa724ffca..3a46f96aabe 100644 --- a/catalyst-gateway/bin/src/service/mod.rs +++ b/catalyst-gateway/bin/src/service/mod.rs @@ -1,15 +1,13 @@ //! Main entrypoint to the service -use std::sync::Arc; - -use crate::{settings::DocsSettings, state::State}; // These Modules contain endpoints mod api; + mod docs; // These modules are utility or common types/functions mod common; mod poem_service; -mod utilities; +pub(crate) mod utilities; pub(crate) use api::started; pub(crate) use poem_service::get_app_docs; @@ -28,6 +26,6 @@ pub(crate) use poem_service::get_app_docs; /// `Error::CannotRunService` - cannot run the service /// `Error::EventDbError` - cannot connect to the event db /// `Error::IoError` - An IO error has occurred. -pub(crate) async fn run(settings: &DocsSettings, state: Arc) -> anyhow::Result<()> { - poem_service::run(settings, state).await +pub(crate) async fn run() -> anyhow::Result<()> { + poem_service::run().await } diff --git a/catalyst-gateway/bin/src/service/poem_service.rs b/catalyst-gateway/bin/src/service/poem_service.rs index 41067f31aa6..ebc795f3917 100644 --- a/catalyst-gateway/bin/src/service/poem_service.rs +++ b/catalyst-gateway/bin/src/service/poem_service.rs @@ -2,8 +2,6 @@ //! //! This provides only the primary entrypoint to the service. -use std::sync::Arc; - use poem::{ endpoint::PrometheusExporter, listener::TcpListener, @@ -21,27 +19,24 @@ use crate::{ middleware::tracing_mw::{init_prometheus, Tracing}, }, }, - settings::{get_api_host_names, DocsSettings, API_URL_PREFIX}, - state::State, + settings::Settings, }; /// This exists to allow us to add extra routes to the service for testing purposes. -fn mk_app( - hosts: Vec, base_route: Option, state: &Arc, settings: &DocsSettings, -) -> impl Endpoint { +fn mk_app(base_route: Option) -> impl Endpoint { // Get the base route if defined, or a new route if not. let base_route = match base_route { Some(route) => route, None => Route::new(), }; - let api_service = mk_api(hosts, settings); + let api_service = mk_api(); let docs = docs(&api_service); let prometheus_registry = init_prometheus(); base_route - .nest(API_URL_PREFIX.as_str(), api_service) + .nest(Settings::api_url_prefix(), api_service) .nest("/docs", docs) .nest("/metrics", PrometheusExporter::new(prometheus_registry)) .nest("/favicon.ico", favicon()) @@ -49,12 +44,11 @@ fn mk_app( .with(Compression::new().with_quality(CompressionLevel::Fastest)) .with(CatchPanic::new().with_handler(ServicePanicHandler)) .with(Tracing) - .data(state.clone()) } /// Get the API docs as a string in the JSON format. -pub(crate) fn get_app_docs(setting: &DocsSettings) -> String { - let api_service = mk_api(vec![], setting); +pub(crate) fn get_app_docs() -> String { + let api_service = mk_api(); api_service.spec() } @@ -71,11 +65,12 @@ pub(crate) fn get_app_docs(setting: &DocsSettings) -> String { /// * `Error::CannotRunService` - cannot run the service /// * `Error::EventDbError` - cannot connect to the event db /// * `Error::IoError` - An IO error has occurred. -pub(crate) async fn run(settings: &DocsSettings, state: Arc) -> anyhow::Result<()> { +pub(crate) async fn run() -> anyhow::Result<()> { // The address to listen on - let addr = settings.address; - tracing::info!("Starting Poem Service ..."); - tracing::info!("Listening on {addr}"); + tracing::info!( + ServiceAddr = Settings::bound_address().to_string(), + "Starting Cat-Gateway API Service ..." + ); // Set a custom panic hook, so we can catch panics and not crash the service. // And also get data from the panic so we can log it. @@ -83,11 +78,13 @@ pub(crate) async fn run(settings: &DocsSettings, state: Arc) -> anyhow::R // help find them in the logs if they happen in production. set_panic_hook(); - let hosts = get_api_host_names(&addr); - - let app = mk_app(hosts, None, &state, settings); + let app = mk_app(None); - Ok(poem::Server::new(TcpListener::bind(addr)).run(app).await?) + Ok( + poem::Server::new(TcpListener::bind(Settings::bound_address())) + .run(app) + .await?, + ) } #[cfg(test)] diff --git a/catalyst-gateway/bin/src/service/utilities/convert.rs b/catalyst-gateway/bin/src/service/utilities/convert.rs new file mode 100644 index 00000000000..f5733f1360e --- /dev/null +++ b/catalyst-gateway/bin/src/service/utilities/convert.rs @@ -0,0 +1,94 @@ +//! Simple general purpose utility functions. + +/// Convert T to an i16. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn i16_from_saturating>(value: T) -> i16 { + match value.try_into() { + Ok(value) => value, + Err(_) => i16::MAX, + } +} + +/// Convert an `` to `u16`. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn u16_from_saturating< + T: Copy + + TryInto + + std::ops::Sub + + std::cmp::PartialOrd + + num_traits::identities::Zero, +>( + value: T, +) -> u16 { + if value < T::zero() { + u16::MIN + } else { + match value.try_into() { + Ok(value) => value, + Err(_) => u16::MAX, + } + } +} + +/// Convert an `` to `usize`. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn usize_from_saturating< + T: Copy + + TryInto + + std::ops::Sub + + std::cmp::PartialOrd + + num_traits::identities::Zero, +>( + value: T, +) -> usize { + if value < T::zero() { + usize::MIN + } else { + match value.try_into() { + Ok(value) => value, + Err(_) => usize::MAX, + } + } +} + +/// Convert an `` to `u32`. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn u32_from_saturating< + T: Copy + + TryInto + + std::ops::Sub + + std::cmp::PartialOrd + + num_traits::identities::Zero, +>( + value: T, +) -> u32 { + if value < T::zero() { + u32::MIN + } else { + match value.try_into() { + Ok(converted) => converted, + Err(_) => u32::MAX, + } + } +} + +/// Convert an `` to `u64`. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn u64_from_saturating< + T: Copy + + TryInto + + std::ops::Sub + + std::cmp::PartialOrd + + num_traits::identities::Zero, +>( + value: T, +) -> u64 { + if value < T::zero() { + u64::MIN + } else { + match value.try_into() { + Ok(converted) => converted, + Err(_) => u64::MAX, + } + } +} diff --git a/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs b/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs index 8279731ef1f..9f4d70fc0b2 100644 --- a/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs +++ b/catalyst-gateway/bin/src/service/utilities/middleware/schema_validation.rs @@ -7,11 +7,9 @@ //! This middleware checks the `State.schema_version_status` value, if it is Ok, //! the wrapped endpoint is called and its response is returned. -use std::sync::Arc; +use poem::{http::StatusCode, Endpoint, EndpointExt, Middleware, Request, Result}; -use poem::{http::StatusCode, web::Data, Endpoint, EndpointExt, Middleware, Request, Result}; - -use crate::state::State; +use crate::db::event::EventDB; /// A middleware that raises an error with `ServiceUnavailable` and 503 status code /// if a DB schema version mismatch is found the existing `State`. @@ -35,12 +33,10 @@ impl Endpoint for SchemaVersionValidationImpl { type Output = E::Output; async fn call(&self, req: Request) -> Result { - if let Some(state) = req.data::>>() { - // Check if the inner schema version status is set to `Mismatch`, - // if so, return the `StatusCode::SERVICE_UNAVAILABLE` code. - if state.event_db().schema_version_check().await.is_err() { - return Err(StatusCode::SERVICE_UNAVAILABLE.into()); - } + // Check if the inner schema version status is set to `Mismatch`, + // if so, return the `StatusCode::SERVICE_UNAVAILABLE` code. + if EventDB::schema_version_check().await.is_err() { + return Err(StatusCode::SERVICE_UNAVAILABLE.into()); } // Calls the endpoint with the request, and returns the response. self.ep.call(req).await diff --git a/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs b/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs index b31d140ddd0..7ac2cfbfcea 100644 --- a/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs +++ b/catalyst-gateway/bin/src/service/utilities/middleware/tracing_mw.rs @@ -1,9 +1,8 @@ //! Full Tracing and metrics middleware. -use std::time::Instant; +use std::{sync::LazyLock, time::Instant}; use cpu_time::ProcessTime; // ThreadTime doesn't work. use cryptoxide::{blake2b::Blake2b, digest::Digest}; -use lazy_static::lazy_static; use poem::{ http::{header, HeaderMap}, web::RealIp, @@ -18,7 +17,7 @@ use tracing::{error, field, Instrument, Level, Span}; use ulid::Ulid; use uuid::Uuid; -use crate::settings::CLIENT_ID_KEY; +use crate::settings::Settings; /// Labels for the metrics const METRIC_LABELS: [&str; 3] = ["endpoint", "method", "status_code"]; @@ -26,78 +25,84 @@ const METRIC_LABELS: [&str; 3] = ["endpoint", "method", "status_code"]; const CLIENT_METRIC_LABELS: [&str; 2] = ["client", "status_code"]; // Prometheus Metrics maintained by the service -lazy_static! { - static ref HTTP_REQ_DURATION_MS: HistogramVec = + +/// HTTP Request duration histogram. +static HTTP_REQ_DURATION_MS: LazyLock = LazyLock::new(|| { #[allow(clippy::ignored_unit_patterns)] register_histogram_vec!( "http_request_duration_ms", "Duration of HTTP requests in milliseconds", &METRIC_LABELS ) - .unwrap(); + .unwrap() +}); - static ref HTTP_REQ_CPU_TIME_MS: HistogramVec = +/// HTTP Request CPU Time histogram. +static HTTP_REQ_CPU_TIME_MS: LazyLock = LazyLock::new(|| { #[allow(clippy::ignored_unit_patterns)] register_histogram_vec!( "http_request_cpu_time_ms", "CPU Time of HTTP requests in milliseconds", &METRIC_LABELS ) - .unwrap(); - - // No Tacho implemented to enable this. - /* - static ref HTTP_REQUEST_RATE: GaugeVec = register_gauge_vec!( - "http_request_rate", - "Rate of HTTP requests per second", - &METRIC_LABELS - ) - .unwrap(); - */ - - static ref HTTP_REQUEST_COUNT: IntCounterVec = + .unwrap() +}); + +// No Tacho implemented to enable this. +// static ref HTTP_REQUEST_RATE: GaugeVec = register_gauge_vec!( +// "http_request_rate", +// "Rate of HTTP requests per second", +// &METRIC_LABELS +// ) +// .unwrap(); + +/// HTTP Request count histogram. +static HTTP_REQUEST_COUNT: LazyLock = LazyLock::new(|| { #[allow(clippy::ignored_unit_patterns)] register_int_counter_vec!( "http_request_count", "Number of HTTP requests", &METRIC_LABELS ) - .unwrap(); + .unwrap() +}); - static ref CLIENT_REQUEST_COUNT: IntCounterVec = +/// Client Request Count histogram. +static CLIENT_REQUEST_COUNT: LazyLock = LazyLock::new(|| { #[allow(clippy::ignored_unit_patterns)] register_int_counter_vec!( "client_request_count", "Number of HTTP requests per client", &CLIENT_METRIC_LABELS ) - .unwrap(); - - static ref PANIC_REQUEST_COUNT: IntCounterVec = - #[allow(clippy::ignored_unit_patterns)] - register_int_counter_vec!( - "panic_request_count", - "Number of HTTP requests that panicked", - &METRIC_LABELS - ) - .unwrap(); - - // Currently no way to get these values without reading the whole response which is BAD. - /* - static ref HTTP_REQUEST_SIZE_BYTES: HistogramVec = register_histogram_vec!( - "http_request_size_bytes", - "Size of HTTP requests in bytes", - &METRIC_LABELS - ) - .unwrap(); - static ref HTTP_RESPONSE_SIZE_BYTES: HistogramVec = register_histogram_vec!( - "http_response_size_bytes", - "Size of HTTP responses in bytes", - &METRIC_LABELS - ) - .unwrap(); - */ -} + .unwrap() +}); + +// Currently no way to get these values. TODO. +// Panic Request Count histogram. +// static PANIC_REQUEST_COUNT: LazyLock = LazyLock::new(|| { +// #[allow(clippy::ignored_unit_patterns)] +// register_int_counter_vec!( +// "panic_request_count", +// "Number of HTTP requests that panicked", +// &METRIC_LABELS +// ) +// .unwrap() +// }); + +// Currently no way to get these values without reading the whole response which is BAD. +// static ref HTTP_REQUEST_SIZE_BYTES: HistogramVec = register_histogram_vec!( +// "http_request_size_bytes", +// "Size of HTTP requests in bytes", +// &METRIC_LABELS +// ) +// .unwrap(); +// static ref HTTP_RESPONSE_SIZE_BYTES: HistogramVec = register_histogram_vec!( +// "http_response_size_bytes", +// "Size of HTTP responses in bytes", +// &METRIC_LABELS +// ) +// .unwrap(); /// Middleware for [`tracing`](https://crates.io/crates/tracing). #[derive(Default)] @@ -117,6 +122,24 @@ pub(crate) struct TracingEndpoint { inner: E, } +/// Given a Clients IP Address, return the anonymized version of it. +fn anonymize_ip_address(remote_addr: &str) -> String { + // We are going to represent it as a UUID. + let mut b2b = Blake2b::new_keyed(16, Settings::client_id_key().as_bytes()); + let mut out = [0; 16]; + + b2b.input_str(Settings::client_id_key()); + b2b.input_str(remote_addr); + b2b.result(&mut out); + + uuid::Builder::from_bytes(out) + .with_version(uuid::Version::Random) + .with_variant(uuid::Variant::RFC4122) + .into_uuid() + .hyphenated() + .to_string() +} + /// Get an anonymized client ID from the request. /// /// This simply takes the clients IP address, @@ -125,31 +148,13 @@ pub(crate) struct TracingEndpoint { /// The Hash is unique per client IP, but not able to /// be reversed or analyzed without both the client IP and the key. async fn anonymous_client_id(req: &Request) -> String { - let mut b2b = Blake2b::new(16); // We are going to represent it as a UUID. - let mut out = [0; 16]; - let remote_addr = RealIp::from_request_without_body(req) .await .ok() .and_then(|real_ip| real_ip.0) .map_or_else(|| req.remote_addr().to_string(), |addr| addr.to_string()); - b2b.input_str(CLIENT_ID_KEY.as_str()); - b2b.input_str(&remote_addr); - b2b.result(&mut out); - - // Note: This will only panic if the `out` is not 16 bytes long. - // Which it is. - // Therefore the `unwrap()` is safe and will not cause a panic here under any - // circumstances. - #[allow(clippy::unwrap_used)] - uuid::Builder::from_slice(&out) - .unwrap() - .with_version(uuid::Version::Random) - .with_variant(uuid::Variant::RFC4122) - .into_uuid() - .hyphenated() - .to_string() + anonymize_ip_address(&remote_addr) } /// Data we collected about the response diff --git a/catalyst-gateway/bin/src/service/utilities/mod.rs b/catalyst-gateway/bin/src/service/utilities/mod.rs index 7765b0064b9..796aca69224 100644 --- a/catalyst-gateway/bin/src/service/utilities/mod.rs +++ b/catalyst-gateway/bin/src/service/utilities/mod.rs @@ -1,6 +1,8 @@ //! `API` Utility operations pub(crate) mod catch_panic; +pub(crate) mod convert; pub(crate) mod middleware; +pub(crate) mod net; use pallas::ledger::addresses::Network as PallasNetwork; use poem_openapi::types::ToJSON; @@ -46,10 +48,7 @@ pub(crate) fn check_network( // one, and if not - we return an error. // if the `provided_network` omitted - we return the `testnet` network type if let Some(network) = provided_network { - if !matches!( - network, - Network::Testnet | Network::Preprod | Network::Preview - ) { + if !matches!(network, Network::Preprod | Network::Preview) { return Err(NetworkValidationError::NetworkMismatch( network.to_json_string(), "Testnet".to_string(), @@ -58,7 +57,7 @@ pub(crate) fn check_network( } Ok(network) } else { - Ok(Network::Testnet) + Ok(Network::Preprod) } }, PallasNetwork::Other(x) => Err(NetworkValidationError::UnknownNetwork(x).into()), diff --git a/catalyst-gateway/bin/src/service/utilities/net.rs b/catalyst-gateway/bin/src/service/utilities/net.rs new file mode 100644 index 00000000000..e047e610057 --- /dev/null +++ b/catalyst-gateway/bin/src/service/utilities/net.rs @@ -0,0 +1,42 @@ +//! Networking utility functions. +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, UdpSocket}; + +use tracing::error; + +/// Get the public IPv4 Address of the Service. +/// +/// In the unlikely event this fails, the address will be 0.0.0.0 +pub(crate) fn get_public_ipv4() -> IpAddr { + if let Ok(socket) = UdpSocket::bind("0.0.0.0:0") { + // Note: UDP is connection-less, we don't actually connect to google here. + if let Err(error) = socket.connect("8.8.8.8:53") { + error!("Failed to connect IPv4 to Google DNS : {}", error); + } else if let Ok(local_addr) = socket.local_addr() { + return local_addr.ip().to_canonical(); + } else { + error!("Failed to get local address"); + } + } else { + error!("Failed to bind IPv4 Address"); + } + IpAddr::V4(Ipv4Addr::from([0, 0, 0, 0])) +} + +/// Get the public IPv4 Address of the Service. +/// +/// In the unlikely event this fails, the address will be `::` +pub(crate) fn get_public_ipv6() -> IpAddr { + if let Ok(socket) = UdpSocket::bind("[::]:0") { + // Note: UDP is connection-less, we don't actually connect to google here. + if let Err(error) = socket.connect("[2001:4860:4860::8888]:53") { + error!("Failed to connect IPv6 to Google DNS : {}", error); + } else if let Ok(local_addr) = socket.local_addr() { + return local_addr.ip().to_canonical(); + } else { + error!("Failed to get local IPv6 address"); + } + } else { + error!("Failed to bind IPv6 Address"); + } + IpAddr::V6(Ipv6Addr::from(0)) +} diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings.rs index 0d566bada8e..b6d2d99b8f1 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings.rs @@ -1,17 +1,33 @@ //! Command line and environment variable settings for the service use std::{ - env, - net::{IpAddr, SocketAddr}, + env::{self, VarError}, + fmt::{self, Display}, + net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, + str::FromStr, + sync::{LazyLock, OnceLock}, + time::Duration, }; +use anyhow::anyhow; +use cardano_chain_follower::Network; use clap::Args; +use cryptoxide::{blake2b::Blake2b, mac::Mac}; use dotenvy::dotenv; -use lazy_static::lazy_static; -use tracing::log::error; +use duration_string::DurationString; +use strum::VariantNames; +use tracing::{error, info}; use url::Url; -use crate::logger::{LogLevel, LOG_LEVEL_DEFAULT}; +use crate::{ + build_info::{log_build_info, BUILD_INFO}, + db::{ + self, + index::session::{CompressionChoice, TlsChoice}, + }, + logger::{self, LogLevel, LOG_LEVEL_DEFAULT}, + service::utilities::net::{get_public_ipv4, get_public_ipv6}, +}; /// Default address to start service on. const ADDRESS_DEFAULT: &str = "0.0.0.0:3030"; @@ -36,13 +52,62 @@ const API_HOST_NAMES_DEFAULT: &str = "https://api.prod.projectcatalyst.io"; const API_URL_PREFIX_DEFAULT: &str = "/api"; /// Default `CHECK_CONFIG_TICK` used in development. -const CHECK_CONFIG_TICK_DEFAULT: &str = "5"; +const CHECK_CONFIG_TICK_DEFAULT: &str = "5s"; + +/// Default Event DB URL. +const EVENT_DB_URL_DEFAULT: &str = + "postgresql://postgres:postgres@localhost/catalyst_events?sslmode=disable"; + +/// Default Cassandra DB URL for the Persistent DB. +const CASSANDRA_PERSISTENT_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; + +/// Default Cassandra DB URL for the Persistent DB. +const CASSANDRA_PERSISTENT_DB_NAMESPACE_DEFAULT: &str = "persistent"; + +/// Default Cassandra DB URL for the Persistent DB. +const CASSANDRA_VOLATILE_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; + +/// Default Cassandra DB URL for the Persistent DB. +const CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT: &str = "volatile"; + +/// Default maximum batch size. +/// This comes from: +/// +/// Scylla may support larger batches for better performance. +/// Larger batches will incur more memory overhead to store the prepared batches. +const CASSANDRA_MAX_BATCH_SIZE_DEFAULT: i64 = 30; + +/// Minimum possible batch size. +pub(crate) const CASSANDRA_MIN_BATCH_SIZE: i64 = 1; + +/// Maximum possible batch size. +const CASSANDRA_MAX_BATCH_SIZE: i64 = 256; + +/// Default chain to follow. +const CHAIN_FOLLOWER_DEFAULT: Network = Network::Mainnet; + +/// Default number of sync tasks (must be in the range 1 to 255 inclusive.) +const CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT: u16 = 16; + +/// Hash the Public IPv4 and IPv6 address of the machine, and convert to a 128 bit V4 +/// UUID. +fn calculate_service_uuid() -> String { + let mut hasher = Blake2b::new_keyed(16, "Catalyst-Gateway-Machine-UID".as_bytes()); -/// Default `DATA_REFRESH_TICK` used in development -const DATA_REFRESH_TICK_DEFAULT: &str = "5"; + let ipv4 = get_public_ipv4().to_string(); + let ipv6 = get_public_ipv6().to_string(); -/// Default `MACHINE_UID` used in development -const MACHINE_UID_DEFAULT: &str = "UID"; + hasher.input(ipv4.as_bytes()); + hasher.input(ipv6.as_bytes()); + + let mut hash = [0u8; 16]; + + hasher.raw_result(&mut hash); + uuid::Builder::from_custom_bytes(hash) + .into_uuid() + .hyphenated() + .to_string() +} /// Settings for the application. /// @@ -51,10 +116,11 @@ const MACHINE_UID_DEFAULT: &str = "UID"; /// the URL to the `PostgreSQL` event database, /// and the logging level. #[derive(Args, Clone)] +#[clap(version = BUILD_INFO)] pub(crate) struct ServiceSettings { /// Url to the postgres event db #[clap(long, env)] - pub(crate) database_url: String, + pub(crate) event_db_url: Option, /// Logging level #[clap(long, default_value = LOG_LEVEL_DEFAULT)] @@ -63,14 +129,6 @@ pub(crate) struct ServiceSettings { /// Docs settings. #[clap(flatten)] pub(crate) docs_settings: DocsSettings, - - /// Follower settings. - #[clap(flatten)] - pub(crate) follower_settings: FollowerSettings, - - /// Enable deep query inspection. - #[clap(long, action = clap::ArgAction::SetTrue)] - pub(crate) deep_query_inspection: bool, } /// Settings specifies `OpenAPI` docs generation. @@ -88,24 +146,54 @@ pub(crate) struct DocsSettings { pub(crate) server_name: Option, } -/// Settings for follower mechanics. -#[derive(Args, Clone)] -pub(crate) struct FollowerSettings { - /// Check config tick - #[clap(long, default_value = CHECK_CONFIG_TICK_DEFAULT, env = "CHECK_CONFIG_TICK")] - pub(crate) check_config_tick: u64, - - /// Data Refresh tick - #[clap(long, default_value = DATA_REFRESH_TICK_DEFAULT, env = "DATA_REFRESH_TICK")] - pub(crate) data_refresh_tick: u64, - - /// Machine UID - #[clap(long, default_value = MACHINE_UID_DEFAULT, env = "MACHINE_UID")] - pub(crate) machine_uid: String, +/// An environment variable read as a string. +#[derive(Clone)] +pub(crate) struct StringEnvVar { + /// Value of the env var. + value: String, + /// Whether the env var is displayed redacted or not. + redacted: bool, } -/// An environment variable read as a string. -pub(crate) struct StringEnvVar(String); +/// Ergonomic way of specifying if a env var needs to be redacted or not. +enum StringEnvVarParams { + /// The env var is plain and should not be redacted. + Plain(String, Option), + /// The env var is redacted and should be redacted. + Redacted(String, Option), +} + +impl From<&str> for StringEnvVarParams { + fn from(s: &str) -> Self { + StringEnvVarParams::Plain(String::from(s), None) + } +} + +impl From for StringEnvVarParams { + fn from(s: String) -> Self { + StringEnvVarParams::Plain(s, None) + } +} + +impl From<(&str, bool)> for StringEnvVarParams { + fn from((s, r): (&str, bool)) -> Self { + if r { + StringEnvVarParams::Redacted(String::from(s), None) + } else { + StringEnvVarParams::Plain(String::from(s), None) + } + } +} + +impl From<(&str, bool, &str)> for StringEnvVarParams { + fn from((s, r, c): (&str, bool, &str)) -> Self { + if r { + StringEnvVarParams::Redacted(String::from(s), Some(String::from(c))) + } else { + StringEnvVarParams::Plain(String::from(s), Some(String::from(c))) + } + } +} /// An environment variable read as a string. impl StringEnvVar { @@ -131,10 +219,154 @@ impl StringEnvVar { /// let var = StringEnvVar::new("MY_VAR", "default"); /// assert_eq!(var.as_str(), "default"); /// ``` - fn new(var_name: &str, default_value: &str) -> Self { - dotenv().ok(); - let value = env::var(var_name).unwrap_or_else(|_| default_value.to_owned()); - Self(value) + fn new(var_name: &str, param: StringEnvVarParams) -> Self { + let (default_value, redacted, choices) = match param { + StringEnvVarParams::Plain(s, c) => (s, false, c), + StringEnvVarParams::Redacted(s, c) => (s, true, c), + }; + + match env::var(var_name) { + Ok(value) => { + if redacted { + info!(env = var_name, value = "Redacted", "Env Var Defined"); + } else { + info!(env = var_name, value = value, "Env Var Defined"); + } + Self { value, redacted } + }, + Err(VarError::NotPresent) => { + if let Some(choices) = choices { + if redacted { + info!( + env = var_name, + default = "Default Redacted", + choices = choices, + "Env Var Defaulted" + ); + } else { + info!( + env = var_name, + default = default_value, + choices = choices, + "Env Var Defaulted" + ); + }; + } else if redacted { + info!( + env = var_name, + default = "Default Redacted", + "Env Var Defined" + ); + } else { + info!(env = var_name, default = default_value, "Env Var Defaulted"); + } + + Self { + value: default_value, + redacted, + } + }, + Err(error) => { + error!( + env = var_name, + default = default_value, + error = ?error, + "Env Var Error" + ); + Self { + value: default_value, + redacted, + } + }, + } + } + + /// New Env Var that is optional. + fn new_optional(var_name: &str, redacted: bool) -> Option { + match env::var(var_name) { + Ok(value) => { + if redacted { + info!(env = var_name, value = "Redacted", "Env Var Defined"); + } else { + info!(env = var_name, value = value, "Env Var Defined"); + } + Some(Self { value, redacted }) + }, + Err(VarError::NotPresent) => { + info!(env = var_name, "Env Var Not Set"); + None + }, + Err(error) => { + error!( + env = var_name, + error = ?error, + "Env Var Error" + ); + None + }, + } + } + + /// Convert an Envvar into the required Enum Type. + fn new_as_enum( + var_name: &str, default: T, redacted: bool, + ) -> T + where ::Err: std::fmt::Display { + let mut choices = String::new(); + for name in T::VARIANTS { + if choices.is_empty() { + choices.push('['); + } else { + choices.push(','); + } + choices.push_str(name); + } + choices.push(']'); + + let choice = StringEnvVar::new( + var_name, + (default.to_string().as_str(), redacted, choices.as_str()).into(), + ); + + let value = match T::from_str(choice.as_str()) { + Ok(var) => var, + Err(error) => { + error!(error=%error, default=%default, choices=choices, choice=%choice, "Invalid choice. Using Default."); + default + }, + }; + + value + } + + /// Convert an Envvar into an integer in the bounded range. + fn new_as_i64(var_name: &str, default: i64, min: i64, max: i64) -> i64 +where { + let choices = format!("A value in the range {min} to {max} inclusive"); + + let raw_value = StringEnvVar::new( + var_name, + (default.to_string().as_str(), false, choices.as_str()).into(), + ) + .as_string(); + + match raw_value.parse::() { + Ok(value) => { + if value < min { + error!("{var_name} out of range. Range = {min} to {max} inclusive. Clamped to {min}"); + min + } else if value > max { + error!("{var_name} out of range. Range = {min} to {max} inclusive. Clamped to {max}"); + max + } else { + value + } + }, + Err(error) => { + error!(error=%error, default=default, "{var_name} not an integer. Range = {min} to {max} inclusive. Defaulted"); + default + }, + } } /// Get the read env var as a str. @@ -143,38 +375,415 @@ impl StringEnvVar { /// /// * &str - the value pub(crate) fn as_str(&self) -> &str { - &self.0 + &self.value + } + + /// Get the read env var as a str. + /// + /// # Returns + /// + /// * &str - the value + pub(crate) fn as_string(&self) -> String { + self.value.clone() } } -// Lazy initialization of all env vars which are not command line parameters. -// All env vars used by the application should be listed here and all should have a -// default. The default for all NON Secret values should be suitable for Production, and -// NOT development. Secrets however should only be used with the default value in -// development. -lazy_static! { +impl fmt::Display for StringEnvVar { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.redacted { + return write!(f, "REDACTED"); + } + write!(f, "{}", self.value) + } +} + +impl fmt::Debug for StringEnvVar { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.redacted { + return write!(f, "REDACTED"); + } + write!(f, "env: {}", self.value) + } +} + +/// Configuration for an individual cassandra cluster. +#[derive(Clone)] +pub(crate) struct CassandraEnvVars { + /// The Address/s of the DB. + pub(crate) url: StringEnvVar, + + /// The Namespace of Cassandra DB. + pub(crate) namespace: StringEnvVar, + + /// The `UserName` to use for the Cassandra DB. + pub(crate) username: Option, + + /// The Password to use for the Cassandra DB.. + pub(crate) password: Option, + + /// Use TLS for the connection? + pub(crate) tls: TlsChoice, + + /// Use TLS for the connection? + pub(crate) tls_cert: Option, + + /// Compression to use. + pub(crate) compression: CompressionChoice, + + /// Maximum Configured Batch size. + pub(crate) max_batch_size: i64, +} + +impl CassandraEnvVars { + /// Create a config for a cassandra cluster, identified by a default namespace. + fn new(url: &str, namespace: &str) -> Self { + let name = namespace.to_uppercase(); + + // We can actually change the namespace, but can't change the name used for env vars. + let namespace = StringEnvVar::new(&format!("CASSANDRA_{name}_NAMESPACE"), namespace.into()); + + let tls = + StringEnvVar::new_as_enum(&format!("CASSANDRA_{name}_TLS"), TlsChoice::Disabled, false); + let compression = StringEnvVar::new_as_enum( + &format!("CASSANDRA_{name}_COMPRESSION"), + CompressionChoice::Lz4, + false, + ); + + Self { + url: StringEnvVar::new(&format!("CASSANDRA_{name}_URL"), url.into()), + namespace, + username: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_USERNAME"), false), + password: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_PASSWORD"), true), + tls, + tls_cert: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_TLS_CERT"), false), + compression, + max_batch_size: StringEnvVar::new_as_i64( + &format!("CASSANDRA_{name}_BATCH_SIZE"), + CASSANDRA_MAX_BATCH_SIZE_DEFAULT, + CASSANDRA_MIN_BATCH_SIZE, + CASSANDRA_MAX_BATCH_SIZE, + ), + } + } + + /// Log the configuration of this Cassandra DB + pub(crate) fn log(&self, persistent: bool) { + let db_type = if persistent { "Persistent" } else { "Volatile" }; + + let auth = match (&self.username, &self.password) { + (Some(u), Some(_)) => format!("Username: {} Password: REDACTED", u.as_str()), + _ => "No Authentication".to_string(), + }; + + let tls_cert = match &self.tls_cert { + None => "No TLS Certificate Defined".to_string(), + Some(cert) => cert.as_string(), + }; + + info!( + url = self.url.as_str(), + namespace = db::index::schema::namespace(self), + auth = auth, + tls = self.tls.to_string(), + cert = tls_cert, + compression = self.compression.to_string(), + "Cassandra {db_type} DB Configuration" + ); + } +} + +/// Configuration for the chain follower. +#[derive(Clone)] +pub(crate) struct ChainFollowerEnvVars { + /// The Blockchain we sync from. + pub(crate) chain: Network, + + /// The maximum number of sync tasks. + pub(crate) sync_tasks: u16, +} + +impl ChainFollowerEnvVars { + /// Create a config for a cassandra cluster, identified by a default namespace. + fn new() -> Self { + let chain = StringEnvVar::new_as_enum("CHAIN_NETWORK", CHAIN_FOLLOWER_DEFAULT, false); + let sync_tasks: u16 = StringEnvVar::new_as_i64( + "CHAIN_FOLLOWER_SYNC_TASKS", + CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT.into(), + 1, + u16::MAX.into(), + ) + .try_into() + .unwrap_or(CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT); + + Self { chain, sync_tasks } + } + + /// Log the configuration of this Chain Follower + pub(crate) fn log(&self) { + info!( + chain = self.chain.to_string(), + sync_tasks = self.sync_tasks, + "Chain Follower Configuration" + ); + } +} + +/// All the `EnvVars` used by the service. +struct EnvVars { /// The github repo owner - pub(crate) static ref GITHUB_REPO_OWNER: StringEnvVar = StringEnvVar::new("GITHUB_REPO_OWNER", GITHUB_REPO_OWNER_DEFAULT); + github_repo_owner: StringEnvVar, /// The github repo name - pub(crate) static ref GITHUB_REPO_NAME: StringEnvVar = StringEnvVar::new("GITHUB_REPO_NAME", GITHUB_REPO_NAME_DEFAULT); + github_repo_name: StringEnvVar, /// The github issue template to use - pub(crate) static ref GITHUB_ISSUE_TEMPLATE: StringEnvVar = StringEnvVar::new("GITHUB_ISSUE_TEMPLATE", GITHUB_ISSUE_TEMPLATE_DEFAULT); + github_issue_template: StringEnvVar, + + /// The Service ID used to anonymize client connections. + service_id: StringEnvVar, /// The client id key used to anonymize client connections. - pub(crate) static ref CLIENT_ID_KEY: StringEnvVar = StringEnvVar::new("CLIENT_ID_KEY", CLIENT_ID_KEY_DEFAULT); + client_id_key: StringEnvVar, - /// A List of servers to provideThe client id key used to anonymize client connections. - pub(crate) static ref API_HOST_NAMES: StringEnvVar = StringEnvVar::new("API_HOST_NAMES", API_HOST_NAMES_DEFAULT); + /// A List of servers to provide + api_host_names: StringEnvVar, /// The base path the API is served at. - pub(crate) static ref API_URL_PREFIX: StringEnvVar = StringEnvVar::new("API_URL_PREFIX", API_URL_PREFIX_DEFAULT); + api_url_prefix: StringEnvVar, + + /// The Address of the Event DB. + event_db_url: StringEnvVar, + + /// The `UserName` to use for the Event DB. + event_db_username: Option, + + /// The Address of the Event DB. + event_db_password: Option, + + /// The Config of the Persistent Cassandra DB. + cassandra_persistent_db: CassandraEnvVars, + + /// The Config of the Volatile Cassandra DB. + cassandra_volatile_db: CassandraEnvVars, + + /// The Chain Follower configuration + chain_follower: ChainFollowerEnvVars, /// Tick every N seconds until config exists in db - pub(crate) static ref CHECK_CONFIG_TICK: StringEnvVar = StringEnvVar::new("CHECK_CONFIG_TICK", CHECK_CONFIG_TICK_DEFAULT); + #[allow(unused)] + check_config_tick: Duration, +} + +// Lazy initialization of all env vars which are not command line parameters. +// All env vars used by the application should be listed here and all should have a +// default. The default for all NON Secret values should be suitable for Production, and +// NOT development. Secrets however should only be used with the default value in +// development + +/// Handle to the mithril sync thread. One for each Network ONLY. +static ENV_VARS: LazyLock = LazyLock::new(|| { + // Support env vars in a `.env` file, doesn't need to exist. + dotenv().ok(); + + let check_interval = StringEnvVar::new("CHECK_CONFIG_TICK", CHECK_CONFIG_TICK_DEFAULT.into()); + let check_config_tick = match DurationString::try_from(check_interval.as_string()) { + Ok(duration) => duration.into(), + Err(error) => { + error!( + "Invalid Check Config Tick Duration: {} : {}. Defaulting to 5 seconds.", + check_interval.as_str(), + error + ); + Duration::from_secs(5) + }, + }; + + EnvVars { + github_repo_owner: StringEnvVar::new("GITHUB_REPO_OWNER", GITHUB_REPO_OWNER_DEFAULT.into()), + github_repo_name: StringEnvVar::new("GITHUB_REPO_NAME", GITHUB_REPO_NAME_DEFAULT.into()), + github_issue_template: StringEnvVar::new( + "GITHUB_ISSUE_TEMPLATE", + GITHUB_ISSUE_TEMPLATE_DEFAULT.into(), + ), + service_id: StringEnvVar::new("SERVICE_ID", calculate_service_uuid().into()), + client_id_key: StringEnvVar::new("CLIENT_ID_KEY", CLIENT_ID_KEY_DEFAULT.into()), + api_host_names: StringEnvVar::new("API_HOST_NAMES", API_HOST_NAMES_DEFAULT.into()), + api_url_prefix: StringEnvVar::new("API_URL_PREFIX", API_URL_PREFIX_DEFAULT.into()), + event_db_url: StringEnvVar::new("EVENT_DB_URL", EVENT_DB_URL_DEFAULT.into()), + event_db_username: StringEnvVar::new_optional("EVENT_DB_USERNAME", false), + event_db_password: StringEnvVar::new_optional("EVENT_DB_PASSWORD", true), + cassandra_persistent_db: CassandraEnvVars::new( + CASSANDRA_PERSISTENT_DB_URL_DEFAULT, + CASSANDRA_PERSISTENT_DB_NAMESPACE_DEFAULT, + ), + cassandra_volatile_db: CassandraEnvVars::new( + CASSANDRA_VOLATILE_DB_URL_DEFAULT, + CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT, + ), + chain_follower: ChainFollowerEnvVars::new(), + check_config_tick, + } +}); + +impl EnvVars { + /// Validate env vars in ways we couldn't when they were first loaded. + pub(crate) fn validate() -> anyhow::Result<()> { + let mut status = Ok(()); + + let url = ENV_VARS.event_db_url.as_str(); + if let Err(error) = tokio_postgres::config::Config::from_str(url) { + error!(error=%error, url=url, "Invalid Postgres DB URL."); + status = Err(anyhow!("Environment Variable Validation Error.")); + } + + status + } +} + +/// All Settings/Options for the Service. +static SERVICE_SETTINGS: OnceLock = OnceLock::new(); + +/// Our Global Settings for this running service. +pub(crate) struct Settings(); + +impl Settings { + /// Initialize the settings data. + pub(crate) fn init(settings: ServiceSettings) -> anyhow::Result<()> { + let log_level = settings.log_level; + + if SERVICE_SETTINGS.set(settings).is_err() { + // We use println here, because logger not yet configured. + println!("Failed to initialize service settings. Called multiple times?"); + } + + // Init the logger. + logger::init(log_level); + + log_build_info(); + + // Validate any settings we couldn't validate when loaded. + EnvVars::validate() + } + + /// Get the current Event DB settings for this service. + pub(crate) fn event_db_settings() -> (&'static str, Option<&'static str>, Option<&'static str>) + { + let url = ENV_VARS.event_db_url.as_str(); + let user = ENV_VARS + .event_db_username + .as_ref() + .map(StringEnvVar::as_str); + let pass = ENV_VARS + .event_db_password + .as_ref() + .map(StringEnvVar::as_str); + + (url, user, pass) + } + + /// Get the Persistent & Volatile Cassandra DB config for this service. + pub(crate) fn cassandra_db_cfg() -> (CassandraEnvVars, CassandraEnvVars) { + ( + ENV_VARS.cassandra_persistent_db.clone(), + ENV_VARS.cassandra_volatile_db.clone(), + ) + } + + /// Get the configuration of the chain follower. + pub(crate) fn follower_cfg() -> ChainFollowerEnvVars { + ENV_VARS.chain_follower.clone() + } + + /// The API Url prefix + pub(crate) fn api_url_prefix() -> &'static str { + ENV_VARS.api_url_prefix.as_str() + } + + /// The Key used to anonymize client connections in the logs. + pub(crate) fn client_id_key() -> &'static str { + ENV_VARS.client_id_key.as_str() + } + + /// The Service UUID + #[allow(unused)] + pub(crate) fn service_id() -> &'static str { + ENV_VARS.service_id.as_str() + } + + /// Get a list of all host names to serve the API on. + /// + /// Used by the `OpenAPI` Documentation to point to the correct backend. + /// Take a list of [scheme://] + host names from the env var and turns it into + /// a lits of strings. + /// + /// Host names are taken from the `API_HOST_NAMES` environment variable. + /// If that is not set, `addr` is used. + pub(crate) fn api_host_names() -> Vec { + if let Some(settings) = SERVICE_SETTINGS.get() { + let addr = settings.docs_settings.address; + string_to_api_host_names(&addr, ENV_VARS.api_host_names.as_str()) + } else { + Vec::new() + } + } + + /// The socket address we are bound to. + pub(crate) fn bound_address() -> SocketAddr { + if let Some(settings) = SERVICE_SETTINGS.get() { + settings.docs_settings.address + } else { + // This should never happen, needed to satisfy the compiler. + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + } + /// Get the server name to be used in the `Server` object of the `OpenAPI` Document. + pub(crate) fn server_name() -> Option { + if let Some(settings) = SERVICE_SETTINGS.get() { + settings.docs_settings.server_name.clone() + } else { + None + } + } + /// Generate a github issue url with a given title + /// + /// ## Arguments + /// + /// * `title`: &str - the title to give the issue + /// + /// ## Returns + /// + /// * String - the url + /// + /// ## Example + /// + /// ```rust,no_run + /// # use cat_data_service::settings::generate_github_issue_url; + /// assert_eq!( + /// generate_github_issue_url("Hello, World! How are you?"), + /// "https://github.com/input-output-hk/catalyst-voices/issues/new?template=bug_report.yml&title=Hello%2C%20World%21%20How%20are%20you%3F" + /// ); + /// ``` + pub(crate) fn generate_github_issue_url(title: &str) -> Option { + let path = format!( + "https://github.com/{}/{}/issues/new", + ENV_VARS.github_repo_owner.as_str(), + ENV_VARS.github_repo_name.as_str() + ); + + match Url::parse_with_params(&path, &[ + ("template", ENV_VARS.github_issue_template.as_str()), + ("title", title), + ]) { + Ok(url) => Some(url), + Err(e) => { + error!("Failed to generate github issue url {:?}", e.to_string()); + None + }, + } + } } /// Transform a string list of host names into a vec of host names. @@ -242,80 +851,23 @@ fn string_to_api_host_names(addr: &SocketAddr, hosts: &str) -> Vec { } } -/// Get a list of all host names to serve the API on. -/// -/// Used by the `OpenAPI` Documentation to point to the correct backend. -/// Take a list of [scheme://] + host names from the env var and turns it into -/// a lits of strings. -/// -/// Host names are taken from the `API_HOST_NAMES` environment variable. -/// If that is not set, `addr` is used. -pub(crate) fn get_api_host_names(addr: &SocketAddr) -> Vec { - string_to_api_host_names(addr, API_HOST_NAMES.as_str()) -} - -/// Generate a github issue url with a given title -/// -/// ## Arguments -/// -/// * `title`: &str - the title to give the issue -/// -/// ## Returns -/// -/// * String - the url -/// -/// ## Example -/// -/// ```rust,no_run -/// # use cat_data_service::settings::generate_github_issue_url; -/// assert_eq!( -/// generate_github_issue_url("Hello, World! How are you?"), -/// "https://github.com/input-output-hk/catalyst-voices/issues/new?template=bug_report.yml&title=Hello%2C%20World%21%20How%20are%20you%3F" -/// ); -/// ``` -pub(crate) fn generate_github_issue_url(title: &str) -> Option { - let path = format!( - "https://github.com/{}/{}/issues/new", - GITHUB_REPO_OWNER.as_str(), - GITHUB_REPO_NAME.as_str() - ); - - match Url::parse_with_params(&path, &[ - ("template", GITHUB_ISSUE_TEMPLATE.as_str()), - ("title", title), - ]) { - Ok(url) => Some(url), - Err(e) => { - error!("Failed to generate github issue url {:?}", e.to_string()); - None - }, - } -} - #[cfg(test)] mod tests { use super::*; - #[test] - fn github_repo_name_default() { - assert_eq!(GITHUB_REPO_NAME.as_str(), GITHUB_REPO_NAME_DEFAULT); - } - #[test] fn generate_github_issue_url_test() { let title = "Hello, World! How are you?"; assert_eq!( - generate_github_issue_url(title).expect("Failed to generate url").as_str(), + Settings::generate_github_issue_url(title).expect("Failed to generate url").as_str(), "https://github.com/input-output-hk/catalyst-voices/issues/new?template=bug_report.yml&title=Hello%2C+World%21+How+are+you%3F" ); } #[test] fn configured_hosts_default() { - let configured_hosts = get_api_host_names(&SocketAddr::from(([127, 0, 0, 1], 8080))); - assert_eq!(configured_hosts, vec![ - "https://api.prod.projectcatalyst.io" - ]); + let configured_hosts = Settings::api_host_names(); + assert_eq!(configured_hosts, Vec::::new()); } #[test] diff --git a/catalyst-gateway/bin/src/state/mod.rs b/catalyst-gateway/bin/src/state/mod.rs deleted file mode 100644 index 31a78c3560a..00000000000 --- a/catalyst-gateway/bin/src/state/mod.rs +++ /dev/null @@ -1,66 +0,0 @@ -//! Shared state used by all endpoints. -use std::sync::Arc; - -use tracing::level_filters::LevelFilter; -use tracing_subscriber::{reload::Handle, Registry}; - -use crate::{ - event_db::{establish_connection, EventDB}, - logger::LogLevel, -}; - -/// Settings for logger level -pub(crate) struct LoggerSettings { - /// Logger handle for formatting layer. - logger_handle: Handle, -} - -/// Global State of the service -pub(crate) struct State { - /// This can be None, or a handle to the DB. - /// If the DB fails, it can be set to None. - /// If its None, an attempt to get it will try and connect to the DB. - /// This is Private, it needs to be accessed with a function. - // event_db_handle: Arc>>, - // Private need to get it with a function. - event_db: Arc, /* This needs to be obsoleted, we want the DB - * to be able to be down. */ - /// Logger settings - logger_settings: Arc, -} - -impl State { - /// Create a new global [`State`] - pub(crate) async fn new( - database_url: Option, logger_handle: Handle, - ) -> anyhow::Result { - // Get a configured pool to the Database, runs schema version check internally. - let event_db = Arc::new(establish_connection(database_url).await?); - let logger_settings = Arc::new(LoggerSettings { logger_handle }); - - let state = Self { - event_db, - logger_settings, - }; - - // We don't care if this succeeds or not. - // We just try our best to connect to the event DB. - // let _ = state.event_db().await; - - Ok(state) - } - - /// Get the reference to the database connection pool for `EventDB`. - pub(crate) fn event_db(&self) -> Arc { - self.event_db.clone() - } - - /// Modify the logger level setting. - /// This will reload the logger. - pub(crate) fn modify_logger_level(&self, level: LogLevel) -> anyhow::Result<()> { - self.logger_settings - .logger_handle - .modify(|f| *f = LevelFilter::from_level(level.into()))?; - Ok(()) - } -} diff --git a/catalyst-gateway/clippy.toml b/catalyst-gateway/clippy.toml index 6933b816419..0358cdb508c 100644 --- a/catalyst-gateway/clippy.toml +++ b/catalyst-gateway/clippy.toml @@ -1 +1,2 @@ +allow-unwrap-in-tests = true allow-expect-in-tests = true diff --git a/catalyst-gateway/crates/README.md b/catalyst-gateway/crates/README.md deleted file mode 100644 index 5ee79b849de..00000000000 --- a/catalyst-gateway/crates/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Catalyst Data Gateway - Crates - -These are fully re-usable generalized `rust` crates that the Catalyst Gateway uses and are developed with it. -They are also able to be used stand-alone in other projects and can be published separately. diff --git a/catalyst-gateway/deny.toml b/catalyst-gateway/deny.toml index 7e6bd8d8121..26ec8794bbf 100644 --- a/catalyst-gateway/deny.toml +++ b/catalyst-gateway/deny.toml @@ -18,17 +18,21 @@ targets = [ version = 2 ignore = [ { id = "RUSTSEC-2020-0168", reason = "`mach` is used by wasmtime and we have no control over that." }, - { id = "RUSTSEC-2021-0145", reason = "we don't target windows, and don;t use a custom global allocator." }, + { id = "RUSTSEC-2021-0145", reason = "we don't target windows, and don't use a custom global allocator." }, + { id = "RUSTSEC-2024-0370", reason = "`proc-macro-error` is used by crates we rely on, we can't control what they use."}, ] [bans] multiple-versions = "warn" wildcards = 'deny' deny = [ - # { crate = "git2", use-instead = "gix" }, - { crate = "openssl", use-instead = "rustls" }, - { crate = "openssl-sys", use-instead = "rustls" }, + # Scylla DB Drivers currently require OpenSSL. Its unavoidable. + # However, there is movement to enable support for Rustls. + # So, for now, allow open-ssl but it needs to be disabled as soon as Scylla DB enables Rustls. + #{ crate = "openssl", use-instead = "rustls" }, + #{ crate = "openssl-sys", use-instead = "rustls" }, "libssh2-sys", + # { crate = "git2", use-instead = "gix" }, # { crate = "cmake", use-instead = "cc" }, # { crate = "windows", reason = "bloated and unnecessary", use-instead = "ideally inline bindings, practically, windows-sys" }, ] @@ -49,8 +53,9 @@ unknown-git = "deny" # List of URLs for allowed Git repositories allow-git = [ - "https://github.com/input-output-hk/hermes.git", + "https://github.com/input-output-hk/catalyst-libs.git", "https://github.com/input-output-hk/catalyst-pallas.git", + "https://github.com/input-output-hk/catalyst-mithril.git", "https://github.com/bytecodealliance/wasmtime", "https://github.com/aldanor/hdf5-rust", ] @@ -73,6 +78,7 @@ allow = [ "ISC", "Unicode-3.0", "MPL-2.0", + "Zlib", ] exceptions = [ #{ allow = ["Zlib"], crate = "tinyvec" }, @@ -94,6 +100,7 @@ license-files = [{ path = "../LICENSE-MIT", hash = 0x001c7e6c }] crate = "ring" expression = "MIT" license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] + # SPDX considers OpenSSL to encompass both the OpenSSL and SSLeay licenses # https://spdx.org/licenses/OpenSSL.html # ISC - Both BoringSSL and ring use this for their new files @@ -113,4 +120,4 @@ license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] #[[licenses.clarify]] #crate = "rustls-webpki" #expression = "ISC" -#license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] \ No newline at end of file +#license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] diff --git a/catalyst-gateway/event-db/Earthfile b/catalyst-gateway/event-db/Earthfile index c85600ef886..823d60a0c81 100644 --- a/catalyst-gateway/event-db/Earthfile +++ b/catalyst-gateway/event-db/Earthfile @@ -3,7 +3,7 @@ # the database and its associated software. VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.1.21 AS postgresql-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.2.03 AS postgresql-ci # cspell: words diff --git a/catalyst-gateway/rust-toolchain.toml b/catalyst-gateway/rust-toolchain.toml index f175cc34180..20a42f2a9f7 100644 --- a/catalyst-gateway/rust-toolchain.toml +++ b/catalyst-gateway/rust-toolchain.toml @@ -1,5 +1,3 @@ [toolchain] -channel = "1.75.0" -profile = "default" -components = [] -targets = ["x86_64-unknown-linux-musl"] \ No newline at end of file +channel = "1.80" +profile = "default" \ No newline at end of file diff --git a/catalyst-gateway/tests/Earthfile b/catalyst-gateway/tests/Earthfile index 16aed63c4a5..41d344b0241 100644 --- a/catalyst-gateway/tests/Earthfile +++ b/catalyst-gateway/tests/Earthfile @@ -1,5 +1,5 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/spectral:v3.1.21 AS spectral-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/spectral:v3.2.03 AS spectral-ci # test-lint-openapi - OpenAPI linting from an artifact # testing whether the OpenAPI generated during build stage follows good practice. diff --git a/catalyst-gateway/tests/api_tests/Earthfile b/catalyst-gateway/tests/api_tests/Earthfile index 4cbe641d7bb..0aac1a2e786 100644 --- a/catalyst-gateway/tests/api_tests/Earthfile +++ b/catalyst-gateway/tests/api_tests/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/python:v3.1.21 AS python-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/python:v3.2.03 AS python-ci builder: FROM python-ci+python-base @@ -10,7 +10,7 @@ builder: COPY ./snapshot_tool-56364174.json . DO python-ci+BUILDER -test: +disabled-test: FROM +builder RUN apk update && apk add iptables-legacy # workaround for https://github.com/earthly/earthly/issues/3784 @@ -29,7 +29,7 @@ test: SAVE ARTIFACT coverage.lcov AS LOCAL api-tests.coverage.info END -nightly-test: +disabled-nightly-test: FROM +builder RUN apk update && apk add iptables-legacy # workaround for https://github.com/earthly/earthly/issues/3784 @@ -38,7 +38,7 @@ nightly-test: WITH DOCKER \ --compose docker-compose.yml \ --load event-db:latest=(../../event-db+build) \ - --load cat-gateway:latest=(../../+nightly-package-cat-gateway-with-preprod) \ + --load cat-gateway:latest=(../../+package-cat-gateway) \ --service cat-gateway \ --allow-privileged RUN poetry run pytest -s -m nightly --junitxml=junit-report.xml --cov=api_tests --cov-report lcov diff --git a/catalyst_voices/packages/catalyst_voices_repositories/lib/src/catalyst_data_gateway_repository.dart b/catalyst_voices/packages/catalyst_voices_repositories/lib/src/catalyst_data_gateway_repository.dart index 394d7989131..d22ae32e641 100644 --- a/catalyst_voices/packages/catalyst_voices_repositories/lib/src/catalyst_data_gateway_repository.dart +++ b/catalyst_voices/packages/catalyst_voices_repositories/lib/src/catalyst_data_gateway_repository.dart @@ -81,7 +81,7 @@ final class CatalystDataGatewayRepository { } } - Future> getCardanoStakedAdaStakeAddress({ + Future> getCardanoStakedAdaStakeAddress({ required String stakeAddress, enums.Network network = enums.Network.mainnet, int? slotNumber, diff --git a/catalyst_voices/packages/catalyst_voices_repositories/test/src/catalyst_data_gateway_repository/catalyst_data_gateway_repository_test.dart b/catalyst_voices/packages/catalyst_voices_repositories/test/src/catalyst_data_gateway_repository/catalyst_data_gateway_repository_test.dart index 746020be8f8..d215bcfbd5b 100644 --- a/catalyst_voices/packages/catalyst_voices_repositories/test/src/catalyst_data_gateway_repository/catalyst_data_gateway_repository_test.dart +++ b/catalyst_voices/packages/catalyst_voices_repositories/test/src/catalyst_data_gateway_repository/catalyst_data_gateway_repository_test.dart @@ -30,12 +30,12 @@ class FakeCatGatewayApi extends Fake implements CatGatewayApi { Future> apiHealthLiveGet() async => response; @override - Future> apiCardanoStakedAdaStakeAddressGet({ + Future> apiCardanoStakedAdaStakeAddressGet({ required String? stakeAddress, enums.Network? network, int? slotNumber, }) async => - response as chopper.Response; + response as chopper.Response; @override Future> apiCardanoSyncStateGet({ @@ -155,17 +155,22 @@ void main() { amount: 1, slotNumber: 5, ); - final repository = setupRepository( - chopper.Response(http.Response('', HttpStatus.ok), stakeInfo), + const fullStakeInfo = FullStakeInfo( + volatile: stakeInfo, + persistent: stakeInfo, + ); + + final repository = setupRepository( + chopper.Response(http.Response('', HttpStatus.ok), fullStakeInfo), ); final result = await repository.getCardanoStakedAdaStakeAddress( stakeAddress: validStakeAddress, ); expect(result.isSuccess, true); - expect(result.success, equals(stakeInfo)); + expect(result.success, equals(fullStakeInfo)); }); test('getCardanoStakedAdaStakeAddress Bad request', () async { - final repository = setupRepository( + final repository = setupRepository( chopper.Response(http.Response('', HttpStatus.badRequest), null), ); final result = await repository.getCardanoStakedAdaStakeAddress( @@ -176,7 +181,7 @@ void main() { }); test('getCardanoStakedAdaStakeAddress Not found', () async { - final repository = setupRepository( + final repository = setupRepository( chopper.Response(http.Response('', HttpStatus.notFound), null), ); final result = await repository.getCardanoStakedAdaStakeAddress( @@ -186,7 +191,7 @@ void main() { expect(result.failure, equals(NetworkErrors.notFound)); }); test('getCardanoStakedAdaStakeAddress Server Error', () async { - final repository = setupRepository( + final repository = setupRepository( chopper.Response( http.Response('', HttpStatus.internalServerError), null, @@ -200,7 +205,7 @@ void main() { }); test('getCardanoStakedAdaStakeAddress Service Unavailable', () async { - final repository = setupRepository( + final repository = setupRepository( chopper.Response( http.Response('', HttpStatus.serviceUnavailable), null, diff --git a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.enums.swagger.dart b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.enums.swagger.dart index bfe9f1faa12..a88cc123f58 100644 --- a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.enums.swagger.dart +++ b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.enums.swagger.dart @@ -39,8 +39,6 @@ enum Network { @JsonValue('mainnet') mainnet('mainnet'), - @JsonValue('testnet') - testnet('testnet'), @JsonValue('preprod') preprod('preprod'), @JsonValue('preview') diff --git a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.dart b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.dart index 9b06413c74a..3385365eaf1 100644 --- a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.dart +++ b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.dart @@ -432,6 +432,62 @@ extension $FragmentsProcessingSummaryExtension on FragmentsProcessingSummary { } } +@JsonSerializable(explicitToJson: true) +class FullStakeInfo { + const FullStakeInfo({ + required this.volatile, + required this.persistent, + }); + + factory FullStakeInfo.fromJson(Map json) => + _$FullStakeInfoFromJson(json); + + static const toJsonFactory = _$FullStakeInfoToJson; + Map toJson() => _$FullStakeInfoToJson(this); + + @JsonKey(name: 'volatile') + final StakeInfo volatile; + @JsonKey(name: 'persistent') + final StakeInfo persistent; + static const fromJsonFactory = _$FullStakeInfoFromJson; + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other is FullStakeInfo && + (identical(other.volatile, volatile) || + const DeepCollectionEquality() + .equals(other.volatile, volatile)) && + (identical(other.persistent, persistent) || + const DeepCollectionEquality() + .equals(other.persistent, persistent))); + } + + @override + String toString() => jsonEncode(this); + + @override + int get hashCode => + const DeepCollectionEquality().hash(volatile) ^ + const DeepCollectionEquality().hash(persistent) ^ + runtimeType.hashCode; +} + +extension $FullStakeInfoExtension on FullStakeInfo { + FullStakeInfo copyWith({StakeInfo? volatile, StakeInfo? persistent}) { + return FullStakeInfo( + volatile: volatile ?? this.volatile, + persistent: persistent ?? this.persistent); + } + + FullStakeInfo copyWithWrapped( + {Wrapped? volatile, Wrapped? persistent}) { + return FullStakeInfo( + volatile: (volatile != null ? volatile.value : this.volatile), + persistent: (persistent != null ? persistent.value : this.persistent)); + } +} + @JsonSerializable(explicitToJson: true) class Hash { const Hash({ diff --git a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.g.dart b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.g.dart index 9a106a7e118..4dba49dd5a9 100644 --- a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.g.dart +++ b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.models.swagger.g.dart @@ -113,6 +113,19 @@ Map _$FragmentsProcessingSummaryToJson( 'rejected': instance.rejected.map((e) => e.toJson()).toList(), }; +FullStakeInfo _$FullStakeInfoFromJson(Map json) => + FullStakeInfo( + volatile: StakeInfo.fromJson(json['volatile'] as Map), + persistent: + StakeInfo.fromJson(json['persistent'] as Map), + ); + +Map _$FullStakeInfoToJson(FullStakeInfo instance) => + { + 'volatile': instance.volatile.toJson(), + 'persistent': instance.persistent.toJson(), + }; + Hash _$HashFromJson(Map json) => Hash( hash: json['hash'] as String, ); diff --git a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.chopper.dart b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.chopper.dart index cbd9f774aa5..88a535d0873 100644 --- a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.chopper.dart +++ b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.chopper.dart @@ -70,7 +70,7 @@ final class _$CatGatewayApi extends CatGatewayApi { } @override - Future> _apiCardanoStakedAdaStakeAddressGet({ + Future> _apiCardanoStakedAdaStakeAddressGet({ required String? stakeAddress, String? network, int? slotNumber, @@ -86,7 +86,7 @@ final class _$CatGatewayApi extends CatGatewayApi { client.baseUrl, parameters: $params, ); - return client.send($request); + return client.send($request); } @override diff --git a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.dart b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.dart index f8c79a33455..121209f0e74 100644 --- a/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.dart +++ b/catalyst_voices/packages/catalyst_voices_services/lib/generated/catalyst_gateway/cat_gateway_api.swagger.dart @@ -100,12 +100,13 @@ abstract class CatGatewayApi extends ChopperService { ///@param stake_address The stake address of the user. Should a valid Bech32 encoded address followed by the https://cips.cardano.org/cip/CIP-19/#stake-addresses. ///@param network Cardano network type. If omitted network type is identified from the stake address. If specified it must be correspondent to the network type encoded in the stake address. As `preprod` and `preview` network types in the stake address encoded as a `testnet`, to specify `preprod` or `preview` network type use this query parameter. ///@param slot_number Slot number at which the staked ada amount should be calculated. If omitted latest slot number is used. - Future> apiCardanoStakedAdaStakeAddressGet({ + Future> apiCardanoStakedAdaStakeAddressGet({ required String? stakeAddress, enums.Network? network, int? slotNumber, }) { - generatedMapping.putIfAbsent(StakeInfo, () => StakeInfo.fromJsonFactory); + generatedMapping.putIfAbsent( + FullStakeInfo, () => FullStakeInfo.fromJsonFactory); return _apiCardanoStakedAdaStakeAddressGet( stakeAddress: stakeAddress, @@ -118,7 +119,7 @@ abstract class CatGatewayApi extends ChopperService { ///@param network Cardano network type. If omitted network type is identified from the stake address. If specified it must be correspondent to the network type encoded in the stake address. As `preprod` and `preview` network types in the stake address encoded as a `testnet`, to specify `preprod` or `preview` network type use this query parameter. ///@param slot_number Slot number at which the staked ada amount should be calculated. If omitted latest slot number is used. @Get(path: '/api/cardano/staked_ada/{stake_address}') - Future> _apiCardanoStakedAdaStakeAddressGet({ + Future> _apiCardanoStakedAdaStakeAddressGet({ @Path('stake_address') required String? stakeAddress, @Query('network') String? network, @Query('slot_number') int? slotNumber, diff --git a/catalyst_voices_packages/README.md b/catalyst_voices_packages/README.md index 1e7c5ac0c25..41f0d73fe19 100644 --- a/catalyst_voices_packages/README.md +++ b/catalyst_voices_packages/README.md @@ -14,15 +14,15 @@ A collection of Catalyst packages and plugins for Flutter and Dart. | Name | Pub | Documentation | Android | iOS | Web | macOS | Windows | Linux | |--------|-----|---------------| ------- |-----|-------|-----|---------|-------| -| [`catalyst_cardano_serialization`](catalyst_cardano_serialization) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_serialization.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_cardano_serialization/latest/catalyst_cardano_serialization/catalyst_cardano_serialization-library.html) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | -| [`catalyst_analysis`](catalyst_analysis) | ![pub package](https://img.shields.io/pub/v/catalyst_analysis.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_analysis/latest/) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | -| [`catalyst_cardano`](catalyst_cardano/catalyst_cardano) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_cardano/latest/catalyst_cardano/catalyst_cardano-library.html) | N/A | N/A | āœ”ļø | N/A | N/A | N/A | N/A | -| [`catalyst_cardano_platform_interface`](catalyst_cardano/catalyst_cardano_platform_interface) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_platform_interface.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_cardano_platform_interface/latest/catalyst_cardano_platform_interface/catalyst_cardano_platform_interface-library.html) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | -| [`catalyst_cardano_web`](catalyst_cardano/catalyst_cardano_web) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_web.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_cardano_web/latest/catalyst_cardano_web/catalyst_cardano_web-library.html) | N/A | N/A | āœ”ļø | N/A | N/A | N/A | N/A | -| [`catalyst_compression`](catalyst_compression/catalyst_compression) | ![pub package](https://img.shields.io/pub/v/catalyst_compression.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_compression/latest/catalyst_compression/catalyst_compression-library.html) | N/A | N/A | āœ”ļø | N/A | N/A | N/A | N/A | -| [`catalyst_compression_platform_interface`](catalyst_compression/catalyst_compression_platform_interface) | ![pub package](https://img.shields.io/pub/v/catalyst_compression_platform_interface.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_compression_platform_interface/latest/catalyst_compression_platform_interface/catalyst_compression_platform_interface-library.html) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | -| [`catalyst_compression_web`](catalyst_compression/catalyst_compression_web) | ![pub package](https://img.shields.io/pub/v/catalyst_compression_web.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_compression_web/latest/catalyst_compression_web/catalyst_compression_web-library.html) | N/A | N/A | āœ”ļø | N/A | N/A | N/A | N/A | -| [`catalyst_cose`](catalyst_cose) | ![pub package](https://img.shields.io/pub/v/catalyst_cose.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_cose/latest/catalyst_cose/catalyst_cose-library.html) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | +| [`catalyst_cardano_serialization`](catalyst_cardano_serialization) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_serialization.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_cardano_serialization/latest/catalyst_cardano_serialization/catalyst_cardano_serialization-library.html) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | +| [`catalyst_analysis`](catalyst_analysis) | ![pub package](https://img.shields.io/pub/v/catalyst_analysis.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_analysis/latest/) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | +| [`catalyst_cardano`](catalyst_cardano/catalyst_cardano) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_cardano/latest/catalyst_cardano/catalyst_cardano-library.html) | N/A | N/A | āœ”ļø | N/A | N/A | N/A | +| [`catalyst_cardano_platform_interface`](catalyst_cardano/catalyst_cardano_platform_interface) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_platform_interface.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_cardano_platform_interface/latest/catalyst_cardano_platform_interface/catalyst_cardano_platform_interface-library.html) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | +| [`catalyst_cardano_web`](catalyst_cardano/catalyst_cardano_web) | ![pub package](https://img.shields.io/pub/v/catalyst_cardano_web.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_cardano_web/latest/catalyst_cardano_web/catalyst_cardano_web-library.html) | N/A | N/A | āœ”ļø | N/A | N/A | N/A | +| [`catalyst_compression`](catalyst_compression/catalyst_compression) | ![pub package](https://img.shields.io/pub/v/catalyst_compression.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_compression/latest/catalyst_compression/catalyst_compression-library.html) | N/A | N/A | āœ”ļø | N/A | N/A | N/A | +| [`catalyst_compression_platform_interface`](catalyst_compression/catalyst_compression_platform_interface) | ![pub package](https://img.shields.io/pub/v/catalyst_compression_platform_interface.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_compression_platform_interface/latest/catalyst_compression_platform_interface/catalyst_compression_platform_interface-library.html) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | +| [`catalyst_compression_web`](catalyst_compression/catalyst_compression_web) | ![pub package](https://img.shields.io/pub/v/catalyst_compression_web.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_compression_web/latest/catalyst_compression_web/catalyst_compression_web-library.html) | N/A | N/A | āœ”ļø | N/A | N/A | N/A | +| [`catalyst_cose`](catalyst_cose) | ![pub package](https://img.shields.io/pub/v/catalyst_cose.svg) | [šŸ“–](https://pub.dev/documentation/catalyst_cose/latest/catalyst_cose/catalyst_cose-library.html) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | ## Requirements diff --git a/docs/Earthfile b/docs/Earthfile index 2aa57213a4f..79eb3c40d2d 100644 --- a/docs/Earthfile +++ b/docs/Earthfile @@ -1,7 +1,6 @@ VERSION 0.8 IMPORT github.com/input-output-hk/catalyst-ci/earthly/docs:v3.2.03 AS docs-ci -# IMPORT /Users/alexeypoghilenkov/Work/catalyst-ci/earthly/docs AS docs-ci IMPORT .. AS repo IMPORT ../catalyst-gateway AS catalyst-gateway diff --git a/docs/src/catalyst-standards/permissionless-auth/auth-header.md b/docs/src/catalyst-standards/permissionless-auth/auth-header.md new file mode 100644 index 00000000000..a32ccbfcfc1 --- /dev/null +++ b/docs/src/catalyst-standards/permissionless-auth/auth-header.md @@ -0,0 +1,106 @@ +# Permission-less Authentication for Catalyst + +## Overview + +There is a requirement to establish identity with the catalyst backend to provide secure and +contextual access to resources managed by project Catalyst. + +For example, a query of a voter's current voting power, should provide that information from the voter's identity. + +This provides better security and also simplifies API's because they can have implicit parameters based on +the verified identity of the user. + +This document defines the format of the Authentication Token, and how it should be used. + +## Token Format + +The Authentication Token is based loosely on JWT. +It consists of an Authentication Header attached to every authenticated request, and an encoded signed. + +This token can be attached to either individual HTTP requests, or to the beginning of a web socket connection. + +The authentication header is in the format: + +```http +Authorization: Bearer catv1. +``` + +The `` is a [base64-url] encoded binary token whose format is defined in +[auth-token.cddl](./auth-token.cddl). + +### Encoded Binary Token Format + +The Encoded Binary Token is a [CBOR sequence] that consists of 3 fields. + +* `kid` : The key identifier. +* `ulid` : A ULID which defines when the token was issued, and a random nonce. +* `signature` : The signature over the `kid` and `ulid` fields. + +#### kid + +The Key ID is used to identify the Public Key Certificate, which identifies the Public Key used to sign the token. +Because this certificate is the Role 0 Certificate from the on-chain Role-Based Access Control specification, +it can be used to also provide identifying information about the user. +Such as: + +* Stake Address +* Registered Rewards Address +* The Identity of the issuer of the Certificate (Self Signed, or issued by an Authority). +* Other Roles keys they have registered. +* Or any other data attached to the registration. + +The `kid` is simply the Blake2b-128 hash of the Role 0 Certificate. + +The backend will use this hash to identify the certificate from the on-chain registration and use +that information to both authenticate the user and provide identifying information about them to the +backend. + +#### ulid + +A standard [ULID] will be created when the token is first issued. +The [ULID] contains a timestamp of when it was created, and a random nonce. +The timestamp is used to protect against replay attack by allowing the backend to reject +authentication if the timestamp is too old (or too far into the future). + +#### signature + +Initially, the only supported signature algorithm is ED25519. +However, the KID could in-future refer to a certificate which uses different cryptography. +Accordingly, the formal specification of the signature is that it is as many bytes as required to +embed a signature of the type defined by the certificate identified by the `kid`. + +For ED25519, the signature will be 64 bytes. + +## Example Token + +The [CDDL Specification](./auth-token.cddl) contains an example token. +This is binary. + +The binary of that example is: + +```hex +50 00 11 22 33 44 55 66 77 88 99 AA BB CC DD EE FF +50 01 91 2C EC 71 CF 2C 4C 14 A5 5D 55 85 D9 4D 7B +58 40 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +``` + +[base64-url] encoded it becomes: + + +```base64 +UAARIjNEVWZ3iJmqu8zd7v9QAZEs7HHPLEwUpV1VhdlNe1hAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +``` + +The full token header would then be: + +```http +Authorization: Bearer catv1.UAARIjNEVWZ3iJmqu8zd7v9QAZEs7HHPLEwUpV1VhdlNe1hAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +``` + + +* [base64-url] +* [CBOR sequence] +* [ULID] diff --git a/docs/src/catalyst-standards/permissionless-auth/auth-token.cddl b/docs/src/catalyst-standards/permissionless-auth/auth-token.cddl new file mode 100644 index 00000000000..bd12a907b78 --- /dev/null +++ b/docs/src/catalyst-standards/permissionless-auth/auth-token.cddl @@ -0,0 +1,34 @@ +; Permissionless Authorization using RBAC Certificates for Catalyst. +; +; Token Data Definition + +auth-token-v1 = bytes .cborseq auth-token-v1-fields + +; Note: This is NOT an array it is a set of fields in a cbor sequence. +auth-token-v1-fields = [ kid, ulid, signature ] + +; Key ID - Blake2b-128 hash of the Role 0 Certificate defining the Session public key. +; This Certificate defines the cryptography used to sign the token. +; Current, ONLY ed25519 is supported, but other signature cryptography may be allowed in future. +kid = (bstr .size 16) + +; ULID - Identifier for this token, encodes both the time the token was issued and a random nonce. +ulid = (bstr .size 16) + +; Signature - ED25519 Signature over the preceding two fields. +; Must be signed using the Private Key of the Role 0 Certificate identified by the Kid field. +signature = (bstr .size 64) + +; Example Signed Token +; # CBOR sequence with 3 elements +; 50 # bytes(16) +; 00112233445566778899AABBCCDDEEFF +; 50 # bytes(16) +; 01912CEC71CF2C4C14A55D5585D94D7B +; 58 40 # bytes(64) +; 00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +; +; Where: +; kid = 0x00112233445566778899aabbccddeeff +; ulid = 0x01912cec71cf2c4c14a55d5585d94d7b +; signature = 0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 diff --git a/justfile b/justfile new file mode 100644 index 00000000000..88cd8c7737e --- /dev/null +++ b/justfile @@ -0,0 +1,30 @@ +# use with https://github.com/casey/just +# + +# cspell: words prereqs, commitlog + +default: + @just --list --unsorted + + +# Fix and Check Markdown files +check-markdown: + earthly +markdown-check-fix + +# Check Spelling +check-spelling: + earthly +clean-spelling-list + earthly +check-spelling + +# Pre Push Checks - intended to be run by a git pre-push hook. +pre-push: check-markdown check-spelling + just catalyst-gateway/pre-push + earthly ./catalyst_voices+code-generator --platform=linux/amd64 --save_locally=true + +# Run cat-gateway natively on preprod +run-cat-gateway: + just catalyst-gateway/run-cat-gateway + +# Run cat-gateway natively on mainnet +run-cat-gateway-mainnet: + just catalyst-gateway/run-cat-gateway-mainnet diff --git a/utilities/local-cluster/Readme.md b/utilities/local-cluster/Readme.md index 7029822ff15..43032c69757 100644 --- a/utilities/local-cluster/Readme.md +++ b/utilities/local-cluster/Readme.md @@ -54,10 +54,10 @@ For testing purposes, the ScyllaDB is accessible on the Cluster IP Address: `192 ## Deploying the Cluster -### Setup hosts on windows +### Setup hosts on Windows -On Windows you need to setup the hosts before starting the cluster -From Windows terminal open the hosts file: +On Windows, you need to set up the hosts before starting the cluster +From the Windows terminal to open the hosts file: ```sh notepad %SystemRoot%\System32\drivers\etc\hosts @@ -75,7 +75,7 @@ From the root of the repo: just start-cluster ``` -#### MacOS +#### macOS From the root of the repo: @@ -176,11 +176,11 @@ vagrant ssh agent99 Found (and tested) description how to connect using only open-source via DBeaver: -1. Download dbeaver (Community Edition) -2. Download cassandra jdbc jar files: +1. Download DBeaver (Community Edition) +2. Download Cassandra JDBC jar files: (Downloading and Testing the Driver Binaries section have links to binary and source) -3. extract cassandra jdbc zip -4. run dbeaver +3. extract Cassandra JDBC zip +4. run DBeaver 5. go to Database > Driver Manager 6. click New 7. Fill in details as follows: @@ -192,7 +192,7 @@ Found (and tested) description how to connect using only open-source via DBeaver * Embedded: `no` * Category: * Description: `Cassandra` (or whatever you want it to say) -8. click Add File and add all the jars in the cassandra jdbc zip file. +8. click Add File and add all the jars in the Cassandra JDBC zip file. 9. click Find Class to make sure the Class Name is found okay 10. click OK 11. Create New Connection, selecting the database driver you just added diff --git a/utilities/local-cluster/Vagrantfile b/utilities/local-cluster/Vagrantfile index bca02ee7620..68c633b9041 100644 --- a/utilities/local-cluster/Vagrantfile +++ b/utilities/local-cluster/Vagrantfile @@ -180,7 +180,7 @@ Vagrant.configure("2") do |config| control.vm.provision "shell", inline: helm_install_script control.vm.provision "shell", inline: control_plane_script control.vm.provision "shell", inline: cert_manager_install_script - # We use longhorn, so don;t setup the local-path-provisioner + # We use longhorn, so don't setup the local-path-provisioner # control.vm.provision "shell", inline: local_path_provisioner_script control.vm.provision "shell", inline: longhorn_install_script control.vm.provision "shell", inline: monitoring_install_script diff --git a/utilities/local-cluster/justfile b/utilities/local-cluster/justfile index 22d44ce6b54..0a202096dec 100644 --- a/utilities/local-cluster/justfile +++ b/utilities/local-cluster/justfile @@ -3,6 +3,12 @@ # Catalyst Voices Test Cluster basic Control export KUBECONFIG := "shared/k3s.yaml" +#set dotenv-required := true +#set dotenv-path := "./scylla/.env" +#set dotenv-load + +host_ip := `hostname -i | cut -d " " -f 1` + # cspell: words prereqs, commitlog default: @@ -54,13 +60,3 @@ ssh-into-agent99: get-all-logs: mkdir -p logs kail --log-level=error --since 6h > cluster.logs - -# Temporary local scylla dev DB until we can get it exposed from the cluster. -# TODO: Get the cluster scylla DB exposed on port 9042 of the cluster. -temp-scylla-dev-db: - mkdir -p /var/lib/scylla/data /var/lib/scylla/commitlog /var/lib/scylla/hints /var/lib/scylla/view_hints - docker run --privileged -p 9042:9042 --name scylla-dev --volume /var/lib/scylla:/var/lib/scylla -d scylladb/scylla --developer-mode=0 --smp 24 - -stop-temp-scylla-dev-db: - docker stop scylla-dev - docker rm scylla-dev \ No newline at end of file diff --git a/utilities/local-scylla/Readme.md b/utilities/local-scylla/Readme.md new file mode 100644 index 00000000000..881b935e817 --- /dev/null +++ b/utilities/local-scylla/Readme.md @@ -0,0 +1,165 @@ +# Local Scylla Cluster + +This is a setup for a local Scylla cluster using Docker for local testing and development. + +## Prerequisites + +* [Just](https://github.com/casey/just) +* [Docker](https://www.docker.com/) + +## Cluster Architecture + +The Cluster is based Scylla published docker images. + +The Cluster is 4 nodes, consisting of 2 cores each, and 1GB of ram. +They are exposed on ports 9042-9045. + +## Starting the Scylla Cluster + +```sh +just + +## Getting Cluster Status and Metrics + +### Setup hosts on Windows + +On Windows, you need to set up the hosts before starting the cluster +From the Windows terminal to open the hosts file: + +```sh +notepad %SystemRoot%\System32\drivers\etc\hosts +``` + +and copy the hosts from `./shared/extra.hosts` into the Windows host file + +### Startup + +#### Linux/Windows + +From the root of the repo: + +```sh +just start-cluster +``` + +#### macOS + +From the root of the repo: + +```sh +just start-cluster-aarch64-macos +``` + +### Getting Basic Cluster details + +From the root of the repo: + +```sh +just show-cluster +``` + +Note the report is **VERY** Wide. +Best viewed with a small terminal font. + +### Suspending the Cluster + +The cluster can be suspended to save local system resources, without tearing it down. + +```sh +just suspend-cluster +``` + +### Resuming a suspended the Cluster + +The suspended cluster can then be resumed with: + +```sh +just resume-cluster +``` + +### Stopping the Cluster + +```sh +just stop-cluster +``` + +## Catalyst Voices Services + +These services are not deployed by default. + +* [Catalyst Voices Frontend](http://voices.cluster.test/) + * [HTTPS](https://voices.cluster.test/) +* [Catalyst Voices Backend](http://voices.cluster.test/api/) + * [HTTPS](https://voices.cluster.test/api/) +* [Catalyst Voices Documentation](http://docs.voices.cluster.test/) + * [HTTPS](https://docs.voices.cluster.test/) + +### Deploying Catalyst Voices Frontend and Backend Services + +TODO. + +### Deploying Catalyst Voices Documentation Service + +From the root of the repo: + + 1. Make sure the documentation is built, and its container pushed to the container repo: + + ```sh + earthly --push ./docs+local + ``` + +2. Deploy the Documentation Service: + + ```sh + earthly ./utilities/local-cluster+deploy-docs + ``` + + +3. Stop the Documentation Service: + + ```sh + earthly ./utilities/local-cluster+stop-docs + ``` + +## Debugging the cluster + +### SSH into a running VM + +To SSH into a VM running the cluster, use `vagrant`: + +```sh +vagrant ssh control +``` + +```sh +vagrant ssh agent86 +``` + +```sh +vagrant ssh agent99 +``` + +## Local UI to access ScyllaDB + +Found (and tested) description how to connect using only open-source via DBeaver: + +1. Download DBeaver (Community Edition) +2. Download Cassandra JDBC jar files: + (Downloading and Testing the Driver Binaries section have links to binary and source) +3. extract Cassandra JDBC zip +4. run DBeaver +5. go to Database > Driver Manager +6. click New +7. Fill in details as follows: + * Driver Name: `Cassandra` (or whatever you want it to say) + * Driver Type: `Generic` + * Class Name: `com.dbschema.CassandraJdbcDriver` + * URL Template: `jdbc:cassandra://{host}[:{port}][/{database}]` + * Default Port: `9042` + * Embedded: `no` + * Category: + * Description: `Cassandra` (or whatever you want it to say) +8. click Add File and add all the jars in the Cassandra JDBC zip file. +9. click Find Class to make sure the Class Name is found okay +10. click OK +11. Create New Connection, selecting the database driver you just added diff --git a/utilities/local-scylla/docker-compose.yml b/utilities/local-scylla/docker-compose.yml new file mode 100644 index 00000000000..fdb237c914d --- /dev/null +++ b/utilities/local-scylla/docker-compose.yml @@ -0,0 +1,56 @@ +services: + scylla-node1: + container_name: scylla-node1 + image: scylladb/scylla:latest + restart: unless-stopped + command: --seeds=scylla-node1 --smp 2 --memory 1G --overprovisioned 1 --api-address 0.0.0.0 --broadcast-rpc-address ${HOST_IP} + ports: + - "9042:9042" + volumes: + - "/var/lib/scylla/1:/var/lib/scylla" + - "./node1-scylla.yaml:/etc/scylla/scylla.yaml" + networks: + cluster: + + scylla-node2: + container_name: scylla-node2 + image: scylladb/scylla:latest + restart: unless-stopped + command: --seeds=scylla-node1 --smp 2 --memory 1G --overprovisioned 1 --api-address 0.0.0.0 --broadcast-rpc-address ${HOST_IP} + ports: + - "9043:9043" + volumes: + - "/var/lib/scylla/2:/var/lib/scylla" + - "./node2-scylla.yaml:/etc/scylla/scylla.yaml" + networks: + cluster: + + scylla-node3: + container_name: scylla-node3 + image: scylladb/scylla:latest + restart: unless-stopped + command: --seeds=scylla-node1 --smp 2 --memory 1G --overprovisioned 1 --api-address 0.0.0.0 --broadcast-rpc-address ${HOST_IP} + ports: + - "9044:9044" + volumes: + - "/var/lib/scylla/3:/var/lib/scylla" + - "./node3-scylla.yaml:/etc/scylla/scylla.yaml" + networks: + cluster: + + scylla-node4: + container_name: scylla-node4 + image: scylladb/scylla:latest + restart: unless-stopped + command: --seeds=scylla-node1 --smp 2 --memory 1G --overprovisioned 1 --api-address 0.0.0.0 --broadcast-rpc-address ${HOST_IP} + ports: + - "9045:9045" + volumes: + - "/var/lib/scylla/4:/var/lib/scylla" + - "./node4-scylla.yaml:/etc/scylla/scylla.yaml" + networks: + cluster: + +networks: + cluster: + driver: bridge diff --git a/utilities/local-scylla/justfile b/utilities/local-scylla/justfile new file mode 100644 index 00000000000..2cbf82e207f --- /dev/null +++ b/utilities/local-scylla/justfile @@ -0,0 +1,58 @@ +# use with https://github.com/casey/just +# + +host_ip := `hostname -i | cut -d " " -f 1` + +default: + @just --list --unsorted + +# Local scylla dev DB (Developer Mode) - Starts with pre-existing data. +scylla-dev-db: + docker run --name scylla-node1 --volume /var/lib/scylla/1:/var/lib/scylla -d scylladb/scylla --developer-mode=1 --smp 1 + +# Local scylla dev DB Logs (Developer Mode) - Follow the running scylla DB logs. +scylla-dev-db-logs: + docker logs scylla-node1 -f + +# Local scylla dev DB Logs (Developer Mode) - Follow the running scylla DB logs. +scylla-dev-db-stop: + docker stop scylla-node1 + +# Local scylla dev DB CLUSTER - Starts with pre-existing data. +scylla-dev-db-cluster: + HOST_IP="{{host_ip}}" \ + docker compose up + +# Stop the scylla development DB CLUSTER +scylla-dev-db-stop-cluster: + HOST_IP="{{host_ip}}" \ + docker compose down + +# Reset the dev mode scylla instance and start a new dev scylla dev instance +scylla-dev-db-reset: scylla-dev-db-purge scylla-dev-db + +# Reset the cluster storage and start a new dev scylla cluster +scylla-dev-db-reset-cluster: scylla-dev-db-purge scylla-dev-db-cluster + +# Run CQLSH on the dev Scylla cluster +scylla-dev-db-cqlsh: + docker run --rm -it scylladb/scylla-cqlsh `hostname` 9043 + +# Run Nodetool on the dev Scylla cluster to dump status info. +scylla-dev-db-nodetool: + docker exec -it scylla-node1 nodetool status + docker exec -it scylla-node1 nodetool info + docker exec -it scylla-node1 nodetool tablestats + docker exec -it scylla-node1 nodetool sstableinfo + +# Shell into running node 1. +scylla-dev-db-shell: + docker exec -it scylla-node1 sh + +# Purge the storage used by the local test scylla cluster +scylla-dev-db-purge: + sudo rm -rf /var/lib/scylla/* + mkdir -p /var/lib/scylla/1/data /var/lib/scylla/1/commitlog /var/lib/scylla/1/hints /var/lib/scylla/1/view_hints + mkdir -p /var/lib/scylla/2/data /var/lib/scylla/2/commitlog /var/lib/scylla/2/hints /var/lib/scylla/2/view_hints + mkdir -p /var/lib/scylla/3/data /var/lib/scylla/3/commitlog /var/lib/scylla/3/hints /var/lib/scylla/3/view_hints + mkdir -p /var/lib/scylla/4/data /var/lib/scylla/4/commitlog /var/lib/scylla/4/hints /var/lib/scylla/4/view_hints diff --git a/utilities/local-scylla/node1-scylla.yaml b/utilities/local-scylla/node1-scylla.yaml new file mode 100644 index 00000000000..63267c2bcad --- /dev/null +++ b/utilities/local-scylla/node1-scylla.yaml @@ -0,0 +1,627 @@ +# Scylla storage config YAML + +# cspell: words fsyncs rackdc partitioner mbean certficate degraade defragment + +####################################### +# This file is split to two sections: +# 1. Supported parameters +# 2. Unsupported parameters: reserved for future use or backwards +# compatibility. +# Scylla will only read and use the first segment +####################################### + +### Supported Parameters + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +# It is recommended to change the default value when creating a new cluster. +# You can NOT modify this value for an existing cluster +#cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +num_tokens: 256 + +# Directory where Scylla should store all its files, which are commitlog, +# data, hints, view_hints and saved_caches subdirectories. All of these +# subs can be overridden by the respective options below. +# If unset, the value defaults to /var/lib/scylla +# workdir: /var/lib/scylla + +# Directory where Scylla should store data on disk. +# data_file_directories: +# - /var/lib/scylla/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# commitlog_directory: /var/lib/scylla/commitlog + +# schema commit log. A special commitlog instance +# used for schema and system tables. +# When running on magnetic HDD, this should be a +# separate spindle than the data directories. +# schema_commitlog_directory: /var/lib/scylla/commitlog/schema + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Scylla won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# The size of the individual schema commitlog file segments. +# +# The default size is 128, which is 4 times larger than the default +# size of the data commitlog. It's because the segment size puts +# a limit on the mutation size that can be written at once, and some +# schema mutation writes are much larger than average. +schema_commitlog_segment_size_in_mb: 128 + +# seed_provider class_name is saved for future use. +# A seed address is mandatory. +seed_provider: + # The addresses of hosts that will serve as contact points for the joining node. + # It allows the node to discover the cluster ring topology on startup (when + # joining the cluster). + # Once the node has joined the cluster, the seed list has no function. + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # In a new cluster, provide the address of the first node. + # In an existing cluster, specify the address of at least one existing node. + # If you specify addresses of more than one node, use a comma to separate them. + # For example: ",," + - seeds: "127.0.0.1" + +# Address to bind to and tell other Scylla nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# If you leave broadcast_address (below) empty, then setting listen_address +# to 0.0.0.0 is wrong as other nodes will not know how to reach this node. +# If you set broadcast_address, then you can set listen_address to 0.0.0.0. +listen_address: localhost + +# Address to broadcast to other Scylla nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + + +# When using multiple physical network interfaces, set this to true to listen on broadcast_address +# in addition to the listen_address, allowing nodes to communicate in both interfaces. +# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2. +# +# listen_on_broadcast_address: false + +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# To disable the CQL native transport, remove this option and configure native_transport_port_ssl. +native_transport_port: 9042 + +# Like native_transport_port, but clients are forwarded to specific shards, based on the +# client-side port numbers. +native_shard_aware_transport_port: 19042 + +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +#native_transport_port_ssl: 9142 + +# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the +# client-side port numbers. +#native_shard_aware_transport_port_ssl: 19142 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 + +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# how long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# IEndpointSnitch. The snitch has two functions: +# - it teaches Scylla enough about your network topology to route +# requests efficiently +# - it allows Scylla to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Scylla will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Scylla provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# - GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Scylla will switch to the private IP after +# establishing a connection.) +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +rpc_address: localhost +# rpc_interface: eth1 +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# port for REST API server +api_port: 10000 + +# IP for the REST API server +api_address: 127.0.0.1 + +# Log WARN on any batch size exceeding this value. 128 kiB per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 128 + +# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default. +batch_size_fail_threshold_in_kb: 1024 + +# Authentication backend, identifying users +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# - com.scylladb.auth.TransitionalAuthenticator requires username/password pair +# to authenticate in the same manner as PasswordAuthenticator, but improper credentials +# result in being logged in as an anonymous user. Use for upgrading clusters' auth. +# authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +# - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for +# authorizing permission management. Otherwise, it allows all. Use for upgrading +# clusters' auth. +# authorizer: AllowAllAuthorizer + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# RPC address to broadcast to drivers and other Scylla nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +# broadcast_rpc_address: 1.2.3.4 + +# Uncomment to enable experimental features +# experimental_features: +# - udf +# - alternator-streams +# - broadcast-tables +# - keyspace-storage-options +# - tablets + +# The directory where hints files are stored if hinted handoff is enabled. +# hints_directory: /var/lib/scylla/hints + +# The directory where hints files are stored for materialized-view updates +# view_hints_directory: /var/lib/scylla/view_hints + +# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff +# May either be "true" or "false" to enable globally, or contain a list +# of data centers to enable per-datacenter. +# hinted_handoff_enabled: DC1,DC2 +# hinted_handoff_enabled: true + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +# max_hint_window_in_ms: 10800000 # 3 hours + + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 10000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +# permissions_validity_in_ms: 10000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this also must have +# a non-zero value. Defaults to 2000. It's recommended to set this value to +# be at least 3 times smaller than the permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Murmur3Partitioner is currently the only supported partitioner, +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Total space to use for commitlogs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Scylla will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# +# A value of -1 (default) will automatically equate it to the total amount of memory +# available for Scylla. +commitlog_total_space_in_mb: -1 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# ssl_storage_port: 7001 + +# listen_interface: eth0 +# listen_interface_prefer_ipv6: false + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +# start_native_transport: true + +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. +# native_transport_max_frame_size_in_mb: 256 + +# Whether to start the thrift rpc server. +# start_rpc: true + +# enable or disable keepalive on rpc/native connections +# rpc_keepalive: true + +# Set to true to have Scylla create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +# incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Scylla won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +# snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +# auto_snapshot: true + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exhaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +# tombstone_warn_threshold: 1000 +# tombstone_failure_threshold: 100000 + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# 1) a smaller granularity means more index entries are generated +# and looking up rows within the partition by collation column +# is faster +# 2) but, Scylla will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +# column_index_size_in_kb: 64 + +# Auto-scaling of the promoted index prevents running out of memory +# when the promoted index grows too large (due to partitions with many rows +# vs. too small column_index_size_in_kb). When the serialized representation +# of the promoted index grows by this threshold, the desired block size +# for this partition (initialized to column_index_size_in_kb) +# is doubled, to decrease the sampling resolution by half. +# +# To disable promoted index auto-scaling, set the threshold to 0. +# column_index_auto_scale_threshold_in_kb: 10240 + +# Log a warning when writing partitions larger than this value +# compaction_large_partition_warning_threshold_mb: 1000 + +# Log a warning when writing rows larger than this value +# compaction_large_row_warning_threshold_mb: 10 + +# Log a warning when writing cells larger than this value +# compaction_large_cell_warning_threshold_mb: 1 + +# Log a warning when row number is larger than this value +# compaction_rows_count_warning_threshold: 100000 + +# Log a warning when writing a collection containing more elements than this value +# compaction_collection_elements_count_warning_threshold: 10000 + +# How long the coordinator should wait for seq or index scans to complete +# range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +# counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +# cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +# truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +# request_timeout_in_ms: 10000 + +# Enable or disable inter-node encryption. +# You must also generate keys and provide the appropriate key and trust store locations and passwords. +# +# The available internode options are : all, none, dc, rack +# If set to dc scylla will encrypt the traffic between the DCs +# If set to rack scylla will encrypt the traffic between the racks +# +# SSL/TLS algorithm and ciphers used can be controlled by +# the priority_string parameter. Info on priority string +# syntax and values is available at: +# https://gnutls.org/manual/html_node/Priority-Strings.html +# +# The require_client_auth parameter allows you to +# restrict access to service based on certificate +# validation. Client must provide a certificate +# accepted by the used trust store to connect. +# +# server_encryption_options: +# internode_encryption: none +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# enable or disable client/server encryption. +# client_encryption_options: +# enabled: false +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +# internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +# inter_dc_tcp_nodelay: false + +# Relaxation of environment checks. +# +# Scylla places certain requirements on its environment. If these requirements are +# not met, performance and reliability can be degraded. +# +# These requirements include: +# - A filesystem with good support for asynchronous I/O (AIO). Currently, +# this means XFS. +# +# false: strict environment checks are in place; do not start if they are not met. +# true: relaxed environment checks; performance and reliability may degraade. +# +# developer_mode: false + + +# Idle-time background processing +# +# Scylla can perform certain jobs in the background while the system is otherwise idle, +# freeing processor resources when there is other work to be done. +# +# defragment_memory_on_idle: true +# +# prometheus port +# By default, Scylla opens prometheus API port on port 9180 +# setting the port to 0 will disable the prometheus API. +# prometheus_port: 9180 +# +# prometheus address +# Leaving this blank will set it to the same value as listen_address. +# This means that by default, Scylla listens to the prometheus API on the same +# listening address (and therefore network interface) used to listen for +# internal communication. If the monitoring node is not in this internal +# network, you can override prometheus_address explicitly - e.g., setting +# it to 0.0.0.0 to listen on all interfaces. +# prometheus_address: 1.2.3.4 + +# Distribution of data among cores (shards) within a node +# +# Scylla distributes data within a node among shards, using a round-robin +# strategy: +# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ... +# +# Scylla versions 1.6 and below used just one repetition of the pattern; +# this interfered with data placement among nodes (vnodes). +# +# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this +# provides for better data distribution. +# +# the value below is log (base 2) of the number of repetitions. +# +# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and +# below. +# +# Keep at 12 for new clusters. +murmur3_partitioner_ignore_msb_bits: 12 + +# Bypass in-memory data cache (the row cache) when performing reversed queries. +# reversed_reads_auto_bypass_cache: false + +# Use a new optimized algorithm for performing reversed reads. +# Set to `false` to fall-back to the old algorithm. +# enable_optimized_reversed_reads: true + +# Use on a new, parallel algorithm for performing aggregate queries. +# Set to `false` to fall-back to the old algorithm. +# enable_parallelized_aggregation: true + +# Time for which task manager task is kept in memory after it completes. +# task_ttl_in_seconds: 0 + +# In materialized views, restrictions are allowed only on the view's primary key columns. +# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part +# of the view's primary key. These invalid restrictions were ignored. +# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions. +# +# Can be true, false, or warn. +# * `true`: IS NOT NULL is allowed only on the view's primary key columns, +# trying to use it on other columns will cause an error, as it should. +# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored. +# It's useful for backwards compatibility. +# * `warn`: The same as false, but there's a warning about invalid view restrictions. +# +# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`. +# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`) +# to make sure that trying to create an invalid view causes an error. +strict_is_not_null_in_views: true + +# The Unix Domain Socket the node uses for maintenance socket. +# The possible options are: +# * ignore: the node will not open the maintenance socket, +# * workdir: the node will open the maintenance socket on the path /cql.m, +# where is a path defined by the workdir configuration option, +# * : the node will open the maintenance socket on the path . +maintenance_socket: ignore + +# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL +# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime +# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating +# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false +# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers. +# live_updatable_config_params_changeable_via_cql: true + +# **************** +# * GUARDRAILS * +# **************** + +# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold. +# Please note that the value of 0 is always allowed, +# which means that having no replication at all, i.e. RF = 0, is always valid. +# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled. +# Commenting out a guardrail also means it is disabled. +# minimum_replication_factor_fail_threshold: -1 +# minimum_replication_factor_warn_threshold: 3 +# maximum_replication_factor_warn_threshold: -1 +# maximum_replication_factor_fail_threshold: -1 + +# Guardrails to warn about or disallow creating a keyspace with specific replication strategy. +# Each of these 2 settings is a list storing replication strategies considered harmful. +# The replication strategies to choose from are: +# 1) SimpleStrategy, +# 2) NetworkTopologyStrategy, +# 3) LocalStrategy, +# 4) EverywhereStrategy +# +# replication_strategy_warn_list: +# - SimpleStrategy +# replication_strategy_fail_list: + +# This enables tablets on newly created keyspaces +enable_tablets: true +api_ui_dir: /opt/scylladb/swagger-ui/dist/ +api_doc_dir: /opt/scylladb/api/api-doc/ \ No newline at end of file diff --git a/utilities/local-scylla/node2-scylla.yaml b/utilities/local-scylla/node2-scylla.yaml new file mode 100644 index 00000000000..d0f15402524 --- /dev/null +++ b/utilities/local-scylla/node2-scylla.yaml @@ -0,0 +1,624 @@ +# Scylla storage config YAML + +# cspell: words fsyncs rackdc partitioner mbean certficate degraade defragment + +####################################### +# This file is split to two sections: +# 1. Supported parameters +# 2. Unsupported parameters: reserved for future use or backwards +# compatibility. +# Scylla will only read and use the first segment +####################################### + +### Supported Parameters + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +# It is recommended to change the default value when creating a new cluster. +# You can NOT modify this value for an existing cluster +#cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +num_tokens: 256 + +# Directory where Scylla should store all its files, which are commitlog, +# data, hints, view_hints and saved_caches subdirectories. All of these +# subs can be overridden by the respective options below. +# If unset, the value defaults to /var/lib/scylla +# workdir: /var/lib/scylla + +# Directory where Scylla should store data on disk. +# data_file_directories: +# - /var/lib/scylla/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# commitlog_directory: /var/lib/scylla/commitlog + +# schema commit log. A special commitlog instance +# used for schema and system tables. +# When running on magnetic HDD, this should be a +# separate spindle than the data directories. +# schema_commitlog_directory: /var/lib/scylla/commitlog/schema + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Scylla won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# The size of the individual schema commitlog file segments. +# +# The default size is 128, which is 4 times larger than the default +# size of the data commitlog. It's because the segment size puts +# a limit on the mutation size that can be written at once, and some +# schema mutation writes are much larger than average. +schema_commitlog_segment_size_in_mb: 128 + +# seed_provider class_name is saved for future use. +# A seed address is mandatory. +seed_provider: + # The addresses of hosts that will serve as contact points for the joining node. + # It allows the node to discover the cluster ring topology on startup (when + # joining the cluster). + # Once the node has joined the cluster, the seed list has no function. + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # In a new cluster, provide the address of the first node. + # In an existing cluster, specify the address of at least one existing node. + # If you specify addresses of more than one node, use a comma to separate them. + # For example: ",," + - seeds: "127.0.0.1" + +# Address to bind to and tell other Scylla nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# If you leave broadcast_address (below) empty, then setting listen_address +# to 0.0.0.0 is wrong as other nodes will not know how to reach this node. +# If you set broadcast_address, then you can set listen_address to 0.0.0.0. +listen_address: localhost + +# Address to broadcast to other Scylla nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# When using multiple physical network interfaces, set this to true to listen on broadcast_address +# in addition to the listen_address, allowing nodes to communicate in both interfaces. +# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2. +# +# listen_on_broadcast_address: false + +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# To disable the CQL native transport, remove this option and configure native_transport_port_ssl. +native_transport_port: 9043 + +# Like native_transport_port, but clients are forwarded to specific shards, based on the +# client-side port numbers. +native_shard_aware_transport_port: 19043 + +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +#native_transport_port_ssl: 9142 + +# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the +# client-side port numbers. +#native_shard_aware_transport_port_ssl: 19142 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 + +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# how long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# IEndpointSnitch. The snitch has two functions: +# - it teaches Scylla enough about your network topology to route +# requests efficiently +# - it allows Scylla to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Scylla will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Scylla provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# - GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Scylla will switch to the private IP after +# establishing a connection.) +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +rpc_address: localhost +# rpc_interface: eth1 +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# port for REST API server +api_port: 10000 + +# IP for the REST API server +api_address: 127.0.0.1 + +# Log WARN on any batch size exceeding this value. 128 kiB per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 128 + +# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default. +batch_size_fail_threshold_in_kb: 1024 + +# Authentication backend, identifying users +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# - com.scylladb.auth.TransitionalAuthenticator requires username/password pair +# to authenticate in the same manner as PasswordAuthenticator, but improper credentials +# result in being logged in as an anonymous user. Use for upgrading clusters' auth. +# authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +# - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for +# authorizing permission management. Otherwise, it allows all. Use for upgrading +# clusters' auth. +# authorizer: AllowAllAuthorizer + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# RPC address to broadcast to drivers and other Scylla nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +# broadcast_rpc_address: 1.2.3.4 + +# Uncomment to enable experimental features +# experimental_features: +# - udf +# - alternator-streams +# - broadcast-tables +# - keyspace-storage-options +# - tablets + +# The directory where hints files are stored if hinted handoff is enabled. +# hints_directory: /var/lib/scylla/hints + +# The directory where hints files are stored for materialized-view updates +# view_hints_directory: /var/lib/scylla/view_hints + +# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff +# May either be "true" or "false" to enable globally, or contain a list +# of data centers to enable per-datacenter. +# hinted_handoff_enabled: DC1,DC2 +# hinted_handoff_enabled: true + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +# max_hint_window_in_ms: 10800000 # 3 hours + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 10000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +# permissions_validity_in_ms: 10000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this also must have +# a non-zero value. Defaults to 2000. It's recommended to set this value to +# be at least 3 times smaller than the permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Murmur3Partitioner is currently the only supported partitioner, +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Total space to use for commitlogs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Scylla will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# +# A value of -1 (default) will automatically equate it to the total amount of memory +# available for Scylla. +commitlog_total_space_in_mb: -1 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# ssl_storage_port: 7001 + +# listen_interface: eth0 +# listen_interface_prefer_ipv6: false + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +# start_native_transport: true + +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. +# native_transport_max_frame_size_in_mb: 256 + +# Whether to start the thrift rpc server. +# start_rpc: true + +# enable or disable keepalive on rpc/native connections +# rpc_keepalive: true + +# Set to true to have Scylla create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +# incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Scylla won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +# snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +# auto_snapshot: true + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exhaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +# tombstone_warn_threshold: 1000 +# tombstone_failure_threshold: 100000 + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# 1) a smaller granularity means more index entries are generated +# and looking up rows within the partition by collation column +# is faster +# 2) but, Scylla will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +# column_index_size_in_kb: 64 + +# Auto-scaling of the promoted index prevents running out of memory +# when the promoted index grows too large (due to partitions with many rows +# vs. too small column_index_size_in_kb). When the serialized representation +# of the promoted index grows by this threshold, the desired block size +# for this partition (initialized to column_index_size_in_kb) +# is doubled, to decrease the sampling resolution by half. +# +# To disable promoted index auto-scaling, set the threshold to 0. +# column_index_auto_scale_threshold_in_kb: 10240 + +# Log a warning when writing partitions larger than this value +# compaction_large_partition_warning_threshold_mb: 1000 + +# Log a warning when writing rows larger than this value +# compaction_large_row_warning_threshold_mb: 10 + +# Log a warning when writing cells larger than this value +# compaction_large_cell_warning_threshold_mb: 1 + +# Log a warning when row number is larger than this value +# compaction_rows_count_warning_threshold: 100000 + +# Log a warning when writing a collection containing more elements than this value +# compaction_collection_elements_count_warning_threshold: 10000 + +# How long the coordinator should wait for seq or index scans to complete +# range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +# counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +# cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +# truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +# request_timeout_in_ms: 10000 + +# Enable or disable inter-node encryption. +# You must also generate keys and provide the appropriate key and trust store locations and passwords. +# +# The available internode options are : all, none, dc, rack +# If set to dc scylla will encrypt the traffic between the DCs +# If set to rack scylla will encrypt the traffic between the racks +# +# SSL/TLS algorithm and ciphers used can be controlled by +# the priority_string parameter. Info on priority string +# syntax and values is available at: +# https://gnutls.org/manual/html_node/Priority-Strings.html +# +# The require_client_auth parameter allows you to +# restrict access to service based on certificate +# validation. Client must provide a certificate +# accepted by the used trust store to connect. +# +# server_encryption_options: +# internode_encryption: none +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# enable or disable client/server encryption. +# client_encryption_options: +# enabled: false +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +# internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +# inter_dc_tcp_nodelay: false + +# Relaxation of environment checks. +# +# Scylla places certain requirements on its environment. If these requirements are +# not met, performance and reliability can be degraded. +# +# These requirements include: +# - A filesystem with good support for asynchronous I/O (AIO). Currently, +# this means XFS. +# +# false: strict environment checks are in place; do not start if they are not met. +# true: relaxed environment checks; performance and reliability may degraade. +# +# developer_mode: false + +# Idle-time background processing +# +# Scylla can perform certain jobs in the background while the system is otherwise idle, +# freeing processor resources when there is other work to be done. +# +# defragment_memory_on_idle: true +# +# prometheus port +# By default, Scylla opens prometheus API port on port 9180 +# setting the port to 0 will disable the prometheus API. +# prometheus_port: 9180 +# +# prometheus address +# Leaving this blank will set it to the same value as listen_address. +# This means that by default, Scylla listens to the prometheus API on the same +# listening address (and therefore network interface) used to listen for +# internal communication. If the monitoring node is not in this internal +# network, you can override prometheus_address explicitly - e.g., setting +# it to 0.0.0.0 to listen on all interfaces. +# prometheus_address: 1.2.3.4 + +# Distribution of data among cores (shards) within a node +# +# Scylla distributes data within a node among shards, using a round-robin +# strategy: +# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ... +# +# Scylla versions 1.6 and below used just one repetition of the pattern; +# this interfered with data placement among nodes (vnodes). +# +# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this +# provides for better data distribution. +# +# the value below is log (base 2) of the number of repetitions. +# +# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and +# below. +# +# Keep at 12 for new clusters. +murmur3_partitioner_ignore_msb_bits: 12 + +# Bypass in-memory data cache (the row cache) when performing reversed queries. +# reversed_reads_auto_bypass_cache: false + +# Use a new optimized algorithm for performing reversed reads. +# Set to `false` to fall-back to the old algorithm. +# enable_optimized_reversed_reads: true + +# Use on a new, parallel algorithm for performing aggregate queries. +# Set to `false` to fall-back to the old algorithm. +# enable_parallelized_aggregation: true + +# Time for which task manager task is kept in memory after it completes. +# task_ttl_in_seconds: 0 + +# In materialized views, restrictions are allowed only on the view's primary key columns. +# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part +# of the view's primary key. These invalid restrictions were ignored. +# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions. +# +# Can be true, false, or warn. +# * `true`: IS NOT NULL is allowed only on the view's primary key columns, +# trying to use it on other columns will cause an error, as it should. +# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored. +# It's useful for backwards compatibility. +# * `warn`: The same as false, but there's a warning about invalid view restrictions. +# +# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`. +# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`) +# to make sure that trying to create an invalid view causes an error. +strict_is_not_null_in_views: true + +# The Unix Domain Socket the node uses for maintenance socket. +# The possible options are: +# * ignore: the node will not open the maintenance socket, +# * workdir: the node will open the maintenance socket on the path /cql.m, +# where is a path defined by the workdir configuration option, +# * : the node will open the maintenance socket on the path . +maintenance_socket: ignore + +# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL +# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime +# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating +# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false +# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers. +# live_updatable_config_params_changeable_via_cql: true + +# **************** +# * GUARDRAILS * +# **************** + +# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold. +# Please note that the value of 0 is always allowed, +# which means that having no replication at all, i.e. RF = 0, is always valid. +# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled. +# Commenting out a guardrail also means it is disabled. +# minimum_replication_factor_fail_threshold: -1 +# minimum_replication_factor_warn_threshold: 3 +# maximum_replication_factor_warn_threshold: -1 +# maximum_replication_factor_fail_threshold: -1 + +# Guardrails to warn about or disallow creating a keyspace with specific replication strategy. +# Each of these 2 settings is a list storing replication strategies considered harmful. +# The replication strategies to choose from are: +# 1) SimpleStrategy, +# 2) NetworkTopologyStrategy, +# 3) LocalStrategy, +# 4) EverywhereStrategy +# +# replication_strategy_warn_list: +# - SimpleStrategy +# replication_strategy_fail_list: + +# This enables tablets on newly created keyspaces +enable_tablets: true +api_ui_dir: /opt/scylladb/swagger-ui/dist/ +api_doc_dir: /opt/scylladb/api/api-doc/ diff --git a/utilities/local-scylla/node3-scylla.yaml b/utilities/local-scylla/node3-scylla.yaml new file mode 100644 index 00000000000..aec5e3d224f --- /dev/null +++ b/utilities/local-scylla/node3-scylla.yaml @@ -0,0 +1,624 @@ +# Scylla storage config YAML + +# cspell: words fsyncs rackdc partitioner mbean certficate degraade defragment + +####################################### +# This file is split to two sections: +# 1. Supported parameters +# 2. Unsupported parameters: reserved for future use or backwards +# compatibility. +# Scylla will only read and use the first segment +####################################### + +### Supported Parameters + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +# It is recommended to change the default value when creating a new cluster. +# You can NOT modify this value for an existing cluster +#cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +num_tokens: 256 + +# Directory where Scylla should store all its files, which are commitlog, +# data, hints, view_hints and saved_caches subdirectories. All of these +# subs can be overridden by the respective options below. +# If unset, the value defaults to /var/lib/scylla +# workdir: /var/lib/scylla + +# Directory where Scylla should store data on disk. +# data_file_directories: +# - /var/lib/scylla/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# commitlog_directory: /var/lib/scylla/commitlog + +# schema commit log. A special commitlog instance +# used for schema and system tables. +# When running on magnetic HDD, this should be a +# separate spindle than the data directories. +# schema_commitlog_directory: /var/lib/scylla/commitlog/schema + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Scylla won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# The size of the individual schema commitlog file segments. +# +# The default size is 128, which is 4 times larger than the default +# size of the data commitlog. It's because the segment size puts +# a limit on the mutation size that can be written at once, and some +# schema mutation writes are much larger than average. +schema_commitlog_segment_size_in_mb: 128 + +# seed_provider class_name is saved for future use. +# A seed address is mandatory. +seed_provider: + # The addresses of hosts that will serve as contact points for the joining node. + # It allows the node to discover the cluster ring topology on startup (when + # joining the cluster). + # Once the node has joined the cluster, the seed list has no function. + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # In a new cluster, provide the address of the first node. + # In an existing cluster, specify the address of at least one existing node. + # If you specify addresses of more than one node, use a comma to separate them. + # For example: ",," + - seeds: "127.0.0.1" + +# Address to bind to and tell other Scylla nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# If you leave broadcast_address (below) empty, then setting listen_address +# to 0.0.0.0 is wrong as other nodes will not know how to reach this node. +# If you set broadcast_address, then you can set listen_address to 0.0.0.0. +listen_address: localhost + +# Address to broadcast to other Scylla nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# When using multiple physical network interfaces, set this to true to listen on broadcast_address +# in addition to the listen_address, allowing nodes to communicate in both interfaces. +# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2. +# +# listen_on_broadcast_address: false + +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# To disable the CQL native transport, remove this option and configure native_transport_port_ssl. +native_transport_port: 9044 + +# Like native_transport_port, but clients are forwarded to specific shards, based on the +# client-side port numbers. +native_shard_aware_transport_port: 19044 + +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +#native_transport_port_ssl: 9142 + +# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the +# client-side port numbers. +#native_shard_aware_transport_port_ssl: 19142 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 + +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# how long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# IEndpointSnitch. The snitch has two functions: +# - it teaches Scylla enough about your network topology to route +# requests efficiently +# - it allows Scylla to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Scylla will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Scylla provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# - GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Scylla will switch to the private IP after +# establishing a connection.) +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +rpc_address: localhost +# rpc_interface: eth1 +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# port for REST API server +api_port: 10000 + +# IP for the REST API server +api_address: 127.0.0.1 + +# Log WARN on any batch size exceeding this value. 128 kiB per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 128 + +# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default. +batch_size_fail_threshold_in_kb: 1024 + +# Authentication backend, identifying users +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# - com.scylladb.auth.TransitionalAuthenticator requires username/password pair +# to authenticate in the same manner as PasswordAuthenticator, but improper credentials +# result in being logged in as an anonymous user. Use for upgrading clusters' auth. +# authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +# - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for +# authorizing permission management. Otherwise, it allows all. Use for upgrading +# clusters' auth. +# authorizer: AllowAllAuthorizer + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# RPC address to broadcast to drivers and other Scylla nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +# broadcast_rpc_address: 1.2.3.4 + +# Uncomment to enable experimental features +# experimental_features: +# - udf +# - alternator-streams +# - broadcast-tables +# - keyspace-storage-options +# - tablets + +# The directory where hints files are stored if hinted handoff is enabled. +# hints_directory: /var/lib/scylla/hints + +# The directory where hints files are stored for materialized-view updates +# view_hints_directory: /var/lib/scylla/view_hints + +# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff +# May either be "true" or "false" to enable globally, or contain a list +# of data centers to enable per-datacenter. +# hinted_handoff_enabled: DC1,DC2 +# hinted_handoff_enabled: true + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +# max_hint_window_in_ms: 10800000 # 3 hours + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 10000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +# permissions_validity_in_ms: 10000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this also must have +# a non-zero value. Defaults to 2000. It's recommended to set this value to +# be at least 3 times smaller than the permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Murmur3Partitioner is currently the only supported partitioner, +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Total space to use for commitlogs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Scylla will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# +# A value of -1 (default) will automatically equate it to the total amount of memory +# available for Scylla. +commitlog_total_space_in_mb: -1 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# ssl_storage_port: 7001 + +# listen_interface: eth0 +# listen_interface_prefer_ipv6: false + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +# start_native_transport: true + +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. +# native_transport_max_frame_size_in_mb: 256 + +# Whether to start the thrift rpc server. +# start_rpc: true + +# enable or disable keepalive on rpc/native connections +# rpc_keepalive: true + +# Set to true to have Scylla create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +# incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Scylla won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +# snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +# auto_snapshot: true + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exhaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +# tombstone_warn_threshold: 1000 +# tombstone_failure_threshold: 100000 + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# 1) a smaller granularity means more index entries are generated +# and looking up rows within the partition by collation column +# is faster +# 2) but, Scylla will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +# column_index_size_in_kb: 64 + +# Auto-scaling of the promoted index prevents running out of memory +# when the promoted index grows too large (due to partitions with many rows +# vs. too small column_index_size_in_kb). When the serialized representation +# of the promoted index grows by this threshold, the desired block size +# for this partition (initialized to column_index_size_in_kb) +# is doubled, to decrease the sampling resolution by half. +# +# To disable promoted index auto-scaling, set the threshold to 0. +# column_index_auto_scale_threshold_in_kb: 10240 + +# Log a warning when writing partitions larger than this value +# compaction_large_partition_warning_threshold_mb: 1000 + +# Log a warning when writing rows larger than this value +# compaction_large_row_warning_threshold_mb: 10 + +# Log a warning when writing cells larger than this value +# compaction_large_cell_warning_threshold_mb: 1 + +# Log a warning when row number is larger than this value +# compaction_rows_count_warning_threshold: 100000 + +# Log a warning when writing a collection containing more elements than this value +# compaction_collection_elements_count_warning_threshold: 10000 + +# How long the coordinator should wait for seq or index scans to complete +# range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +# counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +# cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +# truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +# request_timeout_in_ms: 10000 + +# Enable or disable inter-node encryption. +# You must also generate keys and provide the appropriate key and trust store locations and passwords. +# +# The available internode options are : all, none, dc, rack +# If set to dc scylla will encrypt the traffic between the DCs +# If set to rack scylla will encrypt the traffic between the racks +# +# SSL/TLS algorithm and ciphers used can be controlled by +# the priority_string parameter. Info on priority string +# syntax and values is available at: +# https://gnutls.org/manual/html_node/Priority-Strings.html +# +# The require_client_auth parameter allows you to +# restrict access to service based on certificate +# validation. Client must provide a certificate +# accepted by the used trust store to connect. +# +# server_encryption_options: +# internode_encryption: none +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# enable or disable client/server encryption. +# client_encryption_options: +# enabled: false +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +# internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +# inter_dc_tcp_nodelay: false + +# Relaxation of environment checks. +# +# Scylla places certain requirements on its environment. If these requirements are +# not met, performance and reliability can be degraded. +# +# These requirements include: +# - A filesystem with good support for asynchronous I/O (AIO). Currently, +# this means XFS. +# +# false: strict environment checks are in place; do not start if they are not met. +# true: relaxed environment checks; performance and reliability may degraade. +# +# developer_mode: false + +# Idle-time background processing +# +# Scylla can perform certain jobs in the background while the system is otherwise idle, +# freeing processor resources when there is other work to be done. +# +# defragment_memory_on_idle: true +# +# prometheus port +# By default, Scylla opens prometheus API port on port 9180 +# setting the port to 0 will disable the prometheus API. +# prometheus_port: 9180 +# +# prometheus address +# Leaving this blank will set it to the same value as listen_address. +# This means that by default, Scylla listens to the prometheus API on the same +# listening address (and therefore network interface) used to listen for +# internal communication. If the monitoring node is not in this internal +# network, you can override prometheus_address explicitly - e.g., setting +# it to 0.0.0.0 to listen on all interfaces. +# prometheus_address: 1.2.3.4 + +# Distribution of data among cores (shards) within a node +# +# Scylla distributes data within a node among shards, using a round-robin +# strategy: +# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ... +# +# Scylla versions 1.6 and below used just one repetition of the pattern; +# this interfered with data placement among nodes (vnodes). +# +# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this +# provides for better data distribution. +# +# the value below is log (base 2) of the number of repetitions. +# +# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and +# below. +# +# Keep at 12 for new clusters. +murmur3_partitioner_ignore_msb_bits: 12 + +# Bypass in-memory data cache (the row cache) when performing reversed queries. +# reversed_reads_auto_bypass_cache: false + +# Use a new optimized algorithm for performing reversed reads. +# Set to `false` to fall-back to the old algorithm. +# enable_optimized_reversed_reads: true + +# Use on a new, parallel algorithm for performing aggregate queries. +# Set to `false` to fall-back to the old algorithm. +# enable_parallelized_aggregation: true + +# Time for which task manager task is kept in memory after it completes. +# task_ttl_in_seconds: 0 + +# In materialized views, restrictions are allowed only on the view's primary key columns. +# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part +# of the view's primary key. These invalid restrictions were ignored. +# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions. +# +# Can be true, false, or warn. +# * `true`: IS NOT NULL is allowed only on the view's primary key columns, +# trying to use it on other columns will cause an error, as it should. +# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored. +# It's useful for backwards compatibility. +# * `warn`: The same as false, but there's a warning about invalid view restrictions. +# +# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`. +# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`) +# to make sure that trying to create an invalid view causes an error. +strict_is_not_null_in_views: true + +# The Unix Domain Socket the node uses for maintenance socket. +# The possible options are: +# * ignore: the node will not open the maintenance socket, +# * workdir: the node will open the maintenance socket on the path /cql.m, +# where is a path defined by the workdir configuration option, +# * : the node will open the maintenance socket on the path . +maintenance_socket: ignore + +# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL +# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime +# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating +# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false +# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers. +# live_updatable_config_params_changeable_via_cql: true + +# **************** +# * GUARDRAILS * +# **************** + +# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold. +# Please note that the value of 0 is always allowed, +# which means that having no replication at all, i.e. RF = 0, is always valid. +# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled. +# Commenting out a guardrail also means it is disabled. +# minimum_replication_factor_fail_threshold: -1 +# minimum_replication_factor_warn_threshold: 3 +# maximum_replication_factor_warn_threshold: -1 +# maximum_replication_factor_fail_threshold: -1 + +# Guardrails to warn about or disallow creating a keyspace with specific replication strategy. +# Each of these 2 settings is a list storing replication strategies considered harmful. +# The replication strategies to choose from are: +# 1) SimpleStrategy, +# 2) NetworkTopologyStrategy, +# 3) LocalStrategy, +# 4) EverywhereStrategy +# +# replication_strategy_warn_list: +# - SimpleStrategy +# replication_strategy_fail_list: + +# This enables tablets on newly created keyspaces +enable_tablets: true +api_ui_dir: /opt/scylladb/swagger-ui/dist/ +api_doc_dir: /opt/scylladb/api/api-doc/ diff --git a/utilities/local-scylla/node4-scylla.yaml b/utilities/local-scylla/node4-scylla.yaml new file mode 100644 index 00000000000..4bf06b2f620 --- /dev/null +++ b/utilities/local-scylla/node4-scylla.yaml @@ -0,0 +1,624 @@ +# Scylla storage config YAML + +# cspell: words fsyncs rackdc partitioner mbean certficate degraade defragment + +####################################### +# This file is split to two sections: +# 1. Supported parameters +# 2. Unsupported parameters: reserved for future use or backwards +# compatibility. +# Scylla will only read and use the first segment +####################################### + +### Supported Parameters + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +# It is recommended to change the default value when creating a new cluster. +# You can NOT modify this value for an existing cluster +#cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +num_tokens: 256 + +# Directory where Scylla should store all its files, which are commitlog, +# data, hints, view_hints and saved_caches subdirectories. All of these +# subs can be overridden by the respective options below. +# If unset, the value defaults to /var/lib/scylla +# workdir: /var/lib/scylla + +# Directory where Scylla should store data on disk. +# data_file_directories: +# - /var/lib/scylla/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# commitlog_directory: /var/lib/scylla/commitlog + +# schema commit log. A special commitlog instance +# used for schema and system tables. +# When running on magnetic HDD, this should be a +# separate spindle than the data directories. +# schema_commitlog_directory: /var/lib/scylla/commitlog/schema + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Scylla won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# The size of the individual schema commitlog file segments. +# +# The default size is 128, which is 4 times larger than the default +# size of the data commitlog. It's because the segment size puts +# a limit on the mutation size that can be written at once, and some +# schema mutation writes are much larger than average. +schema_commitlog_segment_size_in_mb: 128 + +# seed_provider class_name is saved for future use. +# A seed address is mandatory. +seed_provider: + # The addresses of hosts that will serve as contact points for the joining node. + # It allows the node to discover the cluster ring topology on startup (when + # joining the cluster). + # Once the node has joined the cluster, the seed list has no function. + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # In a new cluster, provide the address of the first node. + # In an existing cluster, specify the address of at least one existing node. + # If you specify addresses of more than one node, use a comma to separate them. + # For example: ",," + - seeds: "127.0.0.1" + +# Address to bind to and tell other Scylla nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# If you leave broadcast_address (below) empty, then setting listen_address +# to 0.0.0.0 is wrong as other nodes will not know how to reach this node. +# If you set broadcast_address, then you can set listen_address to 0.0.0.0. +listen_address: localhost + +# Address to broadcast to other Scylla nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# When using multiple physical network interfaces, set this to true to listen on broadcast_address +# in addition to the listen_address, allowing nodes to communicate in both interfaces. +# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2. +# +# listen_on_broadcast_address: false + +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# To disable the CQL native transport, remove this option and configure native_transport_port_ssl. +native_transport_port: 9045 + +# Like native_transport_port, but clients are forwarded to specific shards, based on the +# client-side port numbers. +native_shard_aware_transport_port: 19045 + +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +#native_transport_port_ssl: 9142 + +# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the +# client-side port numbers. +#native_shard_aware_transport_port_ssl: 19142 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 + +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# how long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# IEndpointSnitch. The snitch has two functions: +# - it teaches Scylla enough about your network topology to route +# requests efficiently +# - it allows Scylla to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Scylla will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Scylla provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# - GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Scylla will switch to the private IP after +# establishing a connection.) +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +rpc_address: localhost +# rpc_interface: eth1 +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# port for REST API server +api_port: 10000 + +# IP for the REST API server +api_address: 127.0.0.1 + +# Log WARN on any batch size exceeding this value. 128 kiB per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 128 + +# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default. +batch_size_fail_threshold_in_kb: 1024 + +# Authentication backend, identifying users +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# - com.scylladb.auth.TransitionalAuthenticator requires username/password pair +# to authenticate in the same manner as PasswordAuthenticator, but improper credentials +# result in being logged in as an anonymous user. Use for upgrading clusters' auth. +# authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +# - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for +# authorizing permission management. Otherwise, it allows all. Use for upgrading +# clusters' auth. +# authorizer: AllowAllAuthorizer + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# RPC address to broadcast to drivers and other Scylla nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +# broadcast_rpc_address: 1.2.3.4 + +# Uncomment to enable experimental features +# experimental_features: +# - udf +# - alternator-streams +# - broadcast-tables +# - keyspace-storage-options +# - tablets + +# The directory where hints files are stored if hinted handoff is enabled. +# hints_directory: /var/lib/scylla/hints + +# The directory where hints files are stored for materialized-view updates +# view_hints_directory: /var/lib/scylla/view_hints + +# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff +# May either be "true" or "false" to enable globally, or contain a list +# of data centers to enable per-datacenter. +# hinted_handoff_enabled: DC1,DC2 +# hinted_handoff_enabled: true + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +# max_hint_window_in_ms: 10800000 # 3 hours + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 10000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +# permissions_validity_in_ms: 10000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this also must have +# a non-zero value. Defaults to 2000. It's recommended to set this value to +# be at least 3 times smaller than the permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Murmur3Partitioner is currently the only supported partitioner, +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Total space to use for commitlogs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Scylla will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# +# A value of -1 (default) will automatically equate it to the total amount of memory +# available for Scylla. +commitlog_total_space_in_mb: -1 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# ssl_storage_port: 7001 + +# listen_interface: eth0 +# listen_interface_prefer_ipv6: false + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +# start_native_transport: true + +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. +# native_transport_max_frame_size_in_mb: 256 + +# Whether to start the thrift rpc server. +# start_rpc: true + +# enable or disable keepalive on rpc/native connections +# rpc_keepalive: true + +# Set to true to have Scylla create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +# incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Scylla won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +# snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +# auto_snapshot: true + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exhaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +# tombstone_warn_threshold: 1000 +# tombstone_failure_threshold: 100000 + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# 1) a smaller granularity means more index entries are generated +# and looking up rows within the partition by collation column +# is faster +# 2) but, Scylla will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +# column_index_size_in_kb: 64 + +# Auto-scaling of the promoted index prevents running out of memory +# when the promoted index grows too large (due to partitions with many rows +# vs. too small column_index_size_in_kb). When the serialized representation +# of the promoted index grows by this threshold, the desired block size +# for this partition (initialized to column_index_size_in_kb) +# is doubled, to decrease the sampling resolution by half. +# +# To disable promoted index auto-scaling, set the threshold to 0. +# column_index_auto_scale_threshold_in_kb: 10240 + +# Log a warning when writing partitions larger than this value +# compaction_large_partition_warning_threshold_mb: 1000 + +# Log a warning when writing rows larger than this value +# compaction_large_row_warning_threshold_mb: 10 + +# Log a warning when writing cells larger than this value +# compaction_large_cell_warning_threshold_mb: 1 + +# Log a warning when row number is larger than this value +# compaction_rows_count_warning_threshold: 100000 + +# Log a warning when writing a collection containing more elements than this value +# compaction_collection_elements_count_warning_threshold: 10000 + +# How long the coordinator should wait for seq or index scans to complete +# range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +# counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +# cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +# truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +# request_timeout_in_ms: 10000 + +# Enable or disable inter-node encryption. +# You must also generate keys and provide the appropriate key and trust store locations and passwords. +# +# The available internode options are : all, none, dc, rack +# If set to dc scylla will encrypt the traffic between the DCs +# If set to rack scylla will encrypt the traffic between the racks +# +# SSL/TLS algorithm and ciphers used can be controlled by +# the priority_string parameter. Info on priority string +# syntax and values is available at: +# https://gnutls.org/manual/html_node/Priority-Strings.html +# +# The require_client_auth parameter allows you to +# restrict access to service based on certificate +# validation. Client must provide a certificate +# accepted by the used trust store to connect. +# +# server_encryption_options: +# internode_encryption: none +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# enable or disable client/server encryption. +# client_encryption_options: +# enabled: false +# certificate: conf/scylla.crt +# keyfile: conf/scylla.key +# truststore: +# certficate_revocation_list: +# require_client_auth: False +# priority_string: + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +# internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +# inter_dc_tcp_nodelay: false + +# Relaxation of environment checks. +# +# Scylla places certain requirements on its environment. If these requirements are +# not met, performance and reliability can be degraded. +# +# These requirements include: +# - A filesystem with good support for asynchronous I/O (AIO). Currently, +# this means XFS. +# +# false: strict environment checks are in place; do not start if they are not met. +# true: relaxed environment checks; performance and reliability may degraade. +# +# developer_mode: false + +# Idle-time background processing +# +# Scylla can perform certain jobs in the background while the system is otherwise idle, +# freeing processor resources when there is other work to be done. +# +# defragment_memory_on_idle: true +# +# prometheus port +# By default, Scylla opens prometheus API port on port 9180 +# setting the port to 0 will disable the prometheus API. +# prometheus_port: 9180 +# +# prometheus address +# Leaving this blank will set it to the same value as listen_address. +# This means that by default, Scylla listens to the prometheus API on the same +# listening address (and therefore network interface) used to listen for +# internal communication. If the monitoring node is not in this internal +# network, you can override prometheus_address explicitly - e.g., setting +# it to 0.0.0.0 to listen on all interfaces. +# prometheus_address: 1.2.3.4 + +# Distribution of data among cores (shards) within a node +# +# Scylla distributes data within a node among shards, using a round-robin +# strategy: +# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ... +# +# Scylla versions 1.6 and below used just one repetition of the pattern; +# this interfered with data placement among nodes (vnodes). +# +# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this +# provides for better data distribution. +# +# the value below is log (base 2) of the number of repetitions. +# +# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and +# below. +# +# Keep at 12 for new clusters. +murmur3_partitioner_ignore_msb_bits: 12 + +# Bypass in-memory data cache (the row cache) when performing reversed queries. +# reversed_reads_auto_bypass_cache: false + +# Use a new optimized algorithm for performing reversed reads. +# Set to `false` to fall-back to the old algorithm. +# enable_optimized_reversed_reads: true + +# Use on a new, parallel algorithm for performing aggregate queries. +# Set to `false` to fall-back to the old algorithm. +# enable_parallelized_aggregation: true + +# Time for which task manager task is kept in memory after it completes. +# task_ttl_in_seconds: 0 + +# In materialized views, restrictions are allowed only on the view's primary key columns. +# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part +# of the view's primary key. These invalid restrictions were ignored. +# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions. +# +# Can be true, false, or warn. +# * `true`: IS NOT NULL is allowed only on the view's primary key columns, +# trying to use it on other columns will cause an error, as it should. +# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored. +# It's useful for backwards compatibility. +# * `warn`: The same as false, but there's a warning about invalid view restrictions. +# +# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`. +# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`) +# to make sure that trying to create an invalid view causes an error. +strict_is_not_null_in_views: true + +# The Unix Domain Socket the node uses for maintenance socket. +# The possible options are: +# * ignore: the node will not open the maintenance socket, +# * workdir: the node will open the maintenance socket on the path /cql.m, +# where is a path defined by the workdir configuration option, +# * : the node will open the maintenance socket on the path . +maintenance_socket: ignore + +# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL +# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime +# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating +# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false +# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers. +# live_updatable_config_params_changeable_via_cql: true + +# **************** +# * GUARDRAILS * +# **************** + +# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold. +# Please note that the value of 0 is always allowed, +# which means that having no replication at all, i.e. RF = 0, is always valid. +# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled. +# Commenting out a guardrail also means it is disabled. +# minimum_replication_factor_fail_threshold: -1 +# minimum_replication_factor_warn_threshold: 3 +# maximum_replication_factor_warn_threshold: -1 +# maximum_replication_factor_fail_threshold: -1 + +# Guardrails to warn about or disallow creating a keyspace with specific replication strategy. +# Each of these 2 settings is a list storing replication strategies considered harmful. +# The replication strategies to choose from are: +# 1) SimpleStrategy, +# 2) NetworkTopologyStrategy, +# 3) LocalStrategy, +# 4) EverywhereStrategy +# +# replication_strategy_warn_list: +# - SimpleStrategy +# replication_strategy_fail_list: + +# This enables tablets on newly created keyspaces +enable_tablets: true +api_ui_dir: /opt/scylladb/swagger-ui/dist/ +api_doc_dir: /opt/scylladb/api/api-doc/ diff --git a/utilities/wallet-tester/src/common/components/TxBuilder.tsx b/utilities/wallet-tester/src/common/components/TxBuilder.tsx index fe29b8f31bd..94617a45d52 100644 --- a/utilities/wallet-tester/src/common/components/TxBuilder.tsx +++ b/utilities/wallet-tester/src/common/components/TxBuilder.tsx @@ -200,8 +200,8 @@ function TxBuilder({ utxos, addresses, onSubmit: onPropSubmit = noop }: Props) { certificateFields.fields[i]?.type === value ? null : certificateFields.replace({ - type: value as any /* TODO: support default values for each type */, - }) + type: value as any /* TODO: support default values for each type */, + }) } /> {certificateFields.fields[i]?.type === CertificateType.StakeDelegation ? ( @@ -212,7 +212,7 @@ function TxBuilder({ utxos, addresses, onSubmit: onPropSubmit = noop }: Props) { className={twMerge( "w-full rounded px-1 border border-solid border-black", (certificateFields.fields[i] as any)?.hashType === "addr_keyhash" && - "bg-black text-white" + "bg-black text-white" )} onClick={() => certificateFields.update(i, { @@ -228,7 +228,7 @@ function TxBuilder({ utxos, addresses, onSubmit: onPropSubmit = noop }: Props) { className={twMerge( "w-full rounded px-1 border border-solid border-black", (certificateFields.fields[i] as any)?.hashType === "scripthash" && - "bg-black text-white" + "bg-black text-white" )} onClick={() => certificateFields.update(i, { @@ -409,7 +409,7 @@ function TxBuilder({ utxos, addresses, onSubmit: onPropSubmit = noop }: Props) { {({ open }) => ( <>

{open ? : }
-

Auxillary Metadata

+

Auxiliary Metadata

)} diff --git a/utilities/wallet-tester/src/common/helpers/buildUnsignedTx.ts b/utilities/wallet-tester/src/common/helpers/buildUnsignedTx.ts index 59f981467be..a863556d4b0 100644 --- a/utilities/wallet-tester/src/common/helpers/buildUnsignedTx.ts +++ b/utilities/wallet-tester/src/common/helpers/buildUnsignedTx.ts @@ -149,7 +149,7 @@ export default async function buildUnsignedTx( // #7 add auxiliary data hash if (builder.auxiliaryDataHash) { - // note: the hash will be set after building auxillary data + // note: the hash will be set after building auxiliary data } // #8 add validity interval start @@ -219,7 +219,7 @@ export default async function buildUnsignedTx( // build a full transaction, passing in empty witness set const txBody = txBuilder.build(); - + // #15 add network id if (builder.networkId && [0, 1].includes(Number(builder.networkId))) { const networkId = Number(builder.networkId) === 0 ? NetworkId.testnet() : NetworkId.mainnet() From 2d3295166abd52709987467f2c47cf96d9885b8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Damian=20Moli=C5=84ski?= <47773413+damian-molinski@users.noreply.github.com> Date: Thu, 19 Sep 2024 10:10:35 +0200 Subject: [PATCH 2/2] fix(cat-voices): spaces navigation behaviour (#835) * feat: interactive drawer nav tiles * feat: remove drawer space header arrow * fix: update spaces order * fix: generate loc files * feat: KeyboardKeyButton, VoicesPlainTooltip leading and trailing * feat: spaces shortcuts --- catalyst_voices/lib/common/ext/space_ext.dart | 12 +++--- .../spaces_overview_list_view.dart | 2 +- .../pages/spaces/drawer/discovery_menu.dart | 4 ++ .../lib/pages/spaces/drawer/guest_menu.dart | 4 ++ .../drawer/individual_private_campaigns.dart | 15 ++++--- .../spaces/drawer/my_private_proposals.dart | 15 ++++--- .../lib/pages/spaces/drawer/space_header.dart | 4 -- .../pages/spaces/drawer/voting_rounds.dart | 8 +++- .../lib/pages/workspace/workspace_page.dart | 19 ++++++++- .../lib/routes/routing/spaces_route.dart | 22 +++++----- .../lib/routes/routing/spaces_route.g.dart | 42 +++++++++---------- .../catalyst_voices_localizations.dart | 4 +- .../catalyst_voices_localizations_en.dart | 4 +- .../catalyst_voices_localizations_es.dart | 4 +- .../lib/l10n/intl_en.arb | 4 +- .../catalyst_voices_models/lib/src/space.dart | 4 +- 16 files changed, 98 insertions(+), 69 deletions(-) diff --git a/catalyst_voices/lib/common/ext/space_ext.dart b/catalyst_voices/lib/common/ext/space_ext.dart index 3d234f86fd9..b71cfe26f93 100644 --- a/catalyst_voices/lib/common/ext/space_ext.dart +++ b/catalyst_voices/lib/common/ext/space_ext.dart @@ -8,8 +8,6 @@ import 'package:flutter/material.dart'; extension SpaceExt on Space { void go(BuildContext context) { switch (this) { - case Space.treasury: - const TreasuryRoute().go(context); case Space.discovery: const DiscoveryRoute().go(context); case Space.workspace: @@ -18,42 +16,44 @@ extension SpaceExt on Space { const VotingRoute().go(context); case Space.fundedProjects: const FundedProjectsRoute().go(context); + case Space.treasury: + const TreasuryRoute().go(context); } } String localizedName(VoicesLocalizations localizations) { return switch (this) { - Space.treasury => localizations.spaceTreasuryName, Space.discovery => localizations.spaceDiscoveryName, Space.workspace => localizations.spaceWorkspaceName, Space.voting => localizations.spaceVotingName, Space.fundedProjects => localizations.spaceFundedProjects, + Space.treasury => localizations.spaceTreasuryName, }; } SvgGenImage get icon => switch (this) { - Space.treasury => VoicesAssets.icons.fund, Space.discovery => VoicesAssets.icons.lightBulb, Space.workspace => VoicesAssets.icons.briefcase, Space.voting => VoicesAssets.icons.vote, Space.fundedProjects => VoicesAssets.icons.flag, + Space.treasury => VoicesAssets.icons.fund, }; Color backgroundColor(BuildContext context) => switch (this) { - Space.treasury => Theme.of(context).colors.successContainer!, Space.discovery => Theme.of(context).colors.iconsSecondary!.withOpacity(0.16), Space.workspace => Theme.of(context).colorScheme.primaryContainer, Space.voting => Theme.of(context).colors.warningContainer!, Space.fundedProjects => Theme.of(context).colors.iconsSecondary!.withOpacity(0.16), + Space.treasury => Theme.of(context).colors.successContainer!, }; Color foregroundColor(BuildContext context) => switch (this) { - Space.treasury => Theme.of(context).colors.iconsSuccess!, Space.discovery => Theme.of(context).colors.iconsSecondary!, Space.workspace => Theme.of(context).colorScheme.primary, Space.voting => Theme.of(context).colors.iconsWarning!, Space.fundedProjects => Theme.of(context).colors.iconsSecondary!, + Space.treasury => Theme.of(context).colors.iconsSuccess!, }; } diff --git a/catalyst_voices/lib/pages/overall_spaces/spaces_overview_list_view.dart b/catalyst_voices/lib/pages/overall_spaces/spaces_overview_list_view.dart index 8f3814ade5b..4dbe45c741e 100644 --- a/catalyst_voices/lib/pages/overall_spaces/spaces_overview_list_view.dart +++ b/catalyst_voices/lib/pages/overall_spaces/spaces_overview_list_view.dart @@ -38,12 +38,12 @@ class _SpacesListViewState extends State { itemBuilder: (context, index) { final space = Space.values[index]; return switch (space) { - Space.treasury => TreasuryOverview(key: ObjectKey(space)), Space.discovery => DiscoveryOverview(key: ObjectKey(space)), Space.workspace => WorkspaceOverview(key: ObjectKey(space)), Space.voting => VotingOverview(key: ObjectKey(space)), Space.fundedProjects => FundedProjectsOverview(key: ObjectKey(space)), + Space.treasury => TreasuryOverview(key: ObjectKey(space)), }; }, separatorBuilder: (context, index) => const SizedBox(width: 16), diff --git a/catalyst_voices/lib/pages/spaces/drawer/discovery_menu.dart b/catalyst_voices/lib/pages/spaces/drawer/discovery_menu.dart index 39e5b356e79..fa588ba1df4 100644 --- a/catalyst_voices/lib/pages/spaces/drawer/discovery_menu.dart +++ b/catalyst_voices/lib/pages/spaces/drawer/discovery_menu.dart @@ -18,20 +18,24 @@ class DiscoveryDrawerMenu extends StatelessWidget { leading: VoicesAssets.icons.home.buildIcon(), name: 'Discovery Dashboard', backgroundColor: Space.discovery.backgroundColor(context), + onTap: () => Scaffold.of(context).closeDrawer(), ), const VoicesDivider(), VoicesNavTile( leading: VoicesAssets.icons.user.buildIcon(), name: 'Catalyst Roles', + onTap: () => Scaffold.of(context).closeDrawer(), ), VoicesNavTile( leading: VoicesAssets.icons.annotation.buildIcon(), name: 'Feedback', + onTap: () => Scaffold.of(context).closeDrawer(), ), const VoicesDivider(), VoicesNavTile( leading: VoicesAssets.icons.arrowRight.buildIcon(), name: 'Catalyst Gitbook documentation', + onTap: () => Scaffold.of(context).closeDrawer(), ), ], ); diff --git a/catalyst_voices/lib/pages/spaces/drawer/guest_menu.dart b/catalyst_voices/lib/pages/spaces/drawer/guest_menu.dart index 99d937c50f8..bc2df6ec182 100644 --- a/catalyst_voices/lib/pages/spaces/drawer/guest_menu.dart +++ b/catalyst_voices/lib/pages/spaces/drawer/guest_menu.dart @@ -21,18 +21,22 @@ class GuestMenu extends StatelessWidget { leading: VoicesAssets.icons.home.buildIcon(), name: 'Home', backgroundColor: space.backgroundColor(context), + onTap: () => Scaffold.of(context).closeDrawer(), ), VoicesNavTile( leading: VoicesAssets.icons.calendar.buildIcon(), name: 'Discover ideas', + onTap: () => Scaffold.of(context).closeDrawer(), ), VoicesNavTile( leading: VoicesAssets.icons.clipboardCheck.buildIcon(), name: 'Learn about Keychain', + onTap: () => Scaffold.of(context).closeDrawer(), ), VoicesNavTile( leading: VoicesAssets.icons.questionMarkCircle.buildIcon(), name: 'FAQ', + onTap: () => Scaffold.of(context).closeDrawer(), ), ], ); diff --git a/catalyst_voices/lib/pages/spaces/drawer/individual_private_campaigns.dart b/catalyst_voices/lib/pages/spaces/drawer/individual_private_campaigns.dart index 641cc9cb0bc..37b49509d90 100644 --- a/catalyst_voices/lib/pages/spaces/drawer/individual_private_campaigns.dart +++ b/catalyst_voices/lib/pages/spaces/drawer/individual_private_campaigns.dart @@ -8,28 +8,31 @@ class IndividualPrivateCampaigns extends StatelessWidget { @override Widget build(BuildContext context) { - return const Column( + return Column( mainAxisSize: MainAxisSize.min, children: [ - SpaceHeader(Space.treasury), - SectionHeader( + const SpaceHeader(Space.treasury), + const SectionHeader( leading: SizedBox(width: 12), title: Text('Individual private campaigns'), ), VoicesNavTile( name: 'Fund name 1', status: ProposalStatus.ready, - trailing: MoreOptionsButton(), + trailing: const MoreOptionsButton(), + onTap: () => Scaffold.of(context).closeDrawer(), ), VoicesNavTile( name: 'Campaign 1', status: ProposalStatus.draft, - trailing: MoreOptionsButton(), + trailing: const MoreOptionsButton(), + onTap: () => Scaffold.of(context).closeDrawer(), ), VoicesNavTile( name: 'What happens with a campaign title that is longer that', status: ProposalStatus.draft, - trailing: MoreOptionsButton(), + trailing: const MoreOptionsButton(), + onTap: () => Scaffold.of(context).closeDrawer(), ), ], ); diff --git a/catalyst_voices/lib/pages/spaces/drawer/my_private_proposals.dart b/catalyst_voices/lib/pages/spaces/drawer/my_private_proposals.dart index a7a5f93af02..39813bf2fa3 100644 --- a/catalyst_voices/lib/pages/spaces/drawer/my_private_proposals.dart +++ b/catalyst_voices/lib/pages/spaces/drawer/my_private_proposals.dart @@ -8,28 +8,31 @@ class MyPrivateProposals extends StatelessWidget { @override Widget build(BuildContext context) { - return const Column( + return Column( mainAxisSize: MainAxisSize.min, children: [ - SpaceHeader(Space.workspace), - SectionHeader( + const SpaceHeader(Space.workspace), + const SectionHeader( leading: SizedBox(width: 12), title: Text('My private proposals (3/5)'), ), VoicesNavTile( name: 'My first proposal', status: ProposalStatus.draft, - trailing: MoreOptionsButton(), + trailing: const MoreOptionsButton(), + onTap: () => Scaffold.of(context).closeDrawer(), ), VoicesNavTile( name: 'My second proposal', status: ProposalStatus.inProgress, - trailing: MoreOptionsButton(), + trailing: const MoreOptionsButton(), + onTap: () => Scaffold.of(context).closeDrawer(), ), VoicesNavTile( name: 'My third proposal', status: ProposalStatus.inProgress, - trailing: MoreOptionsButton(), + trailing: const MoreOptionsButton(), + onTap: () => Scaffold.of(context).closeDrawer(), ), ], ); diff --git a/catalyst_voices/lib/pages/spaces/drawer/space_header.dart b/catalyst_voices/lib/pages/spaces/drawer/space_header.dart index 595f0f9db67..724c6d72db3 100644 --- a/catalyst_voices/lib/pages/spaces/drawer/space_header.dart +++ b/catalyst_voices/lib/pages/spaces/drawer/space_header.dart @@ -37,10 +37,6 @@ class SpaceHeader extends StatelessWidget { ?.copyWith(color: theme.colors.textPrimary), ), ), - ChevronExpandButton( - isExpanded: false, - onTap: () {}, - ), ], ), ); diff --git a/catalyst_voices/lib/pages/spaces/drawer/voting_rounds.dart b/catalyst_voices/lib/pages/spaces/drawer/voting_rounds.dart index da89cae0b26..598cedd01ab 100644 --- a/catalyst_voices/lib/pages/spaces/drawer/voting_rounds.dart +++ b/catalyst_voices/lib/pages/spaces/drawer/voting_rounds.dart @@ -21,14 +21,16 @@ class VotingRounds extends StatelessWidget { name: 'Voting round 14', status: ProposalStatus.open, leading: VoicesAssets.icons.vote.buildIcon(), + onTap: () => Scaffold.of(context).closeDrawer(), ), const SectionHeader( leading: SizedBox(width: 12), title: Text('Funding tracks / Categories'), ), - const VoicesNavTile( + VoicesNavTile( name: 'My first proposal', - trailing: MoreOptionsButton(), + trailing: const MoreOptionsButton(), + onTap: () => Scaffold.of(context).closeDrawer(), ), const VoicesDivider(), const SectionHeader( @@ -38,10 +40,12 @@ class VotingRounds extends StatelessWidget { VoicesNavTile( name: 'Drep signup', leading: VoicesAssets.icons.user.buildIcon(), + onTap: () => Scaffold.of(context).closeDrawer(), ), VoicesNavTile( name: 'Drep delegation', leading: VoicesAssets.icons.user.buildIcon(), + onTap: () => Scaffold.of(context).closeDrawer(), ), const VoicesDivider(), ], diff --git a/catalyst_voices/lib/pages/workspace/workspace_page.dart b/catalyst_voices/lib/pages/workspace/workspace_page.dart index a788f9bab8b..ae19d067832 100644 --- a/catalyst_voices/lib/pages/workspace/workspace_page.dart +++ b/catalyst_voices/lib/pages/workspace/workspace_page.dart @@ -57,6 +57,12 @@ class WorkspacePage extends StatefulWidget { } class _WorkspacePageState extends State { + // This future is here only because we're loading too much at once + // and drawer animation hangs for sec. + // + // Should be deleted later with normal data source + final _delayFuture = Future.delayed(const Duration(milliseconds: 500)); + @override Widget build(BuildContext context) { return ProposalControllerScope( @@ -66,8 +72,17 @@ class _WorkspacePageState extends State { navigation: _proposalNavigation, ), right: const ProposalSetupPanel(), - child: ProposalDetails( - navigation: _proposalNavigation, + child: FutureBuilder( + future: _delayFuture, + builder: (context, snapshot) { + if (snapshot.connectionState != ConnectionState.done) { + return const SizedBox.shrink(); + } + + return ProposalDetails( + navigation: _proposalNavigation, + ); + }, ), ), ); diff --git a/catalyst_voices/lib/routes/routing/spaces_route.dart b/catalyst_voices/lib/routes/routing/spaces_route.dart index ee13a93ba20..8d9ca25a7ac 100644 --- a/catalyst_voices/lib/routes/routing/spaces_route.dart +++ b/catalyst_voices/lib/routes/routing/spaces_route.dart @@ -14,22 +14,22 @@ part 'spaces_route.g.dart'; @TypedShellRoute( routes: >[ - TypedGoRoute(path: '/${Routes.currentMilestone}/treasury'), TypedGoRoute(path: '/${Routes.currentMilestone}/discovery'), TypedGoRoute(path: '/${Routes.currentMilestone}/workspace'), TypedGoRoute(path: '/${Routes.currentMilestone}/voting'), TypedGoRoute( path: '/${Routes.currentMilestone}/funded_projects', ), + TypedGoRoute(path: '/${Routes.currentMilestone}/treasury'), ], ) final class SpacesShellRouteData extends ShellRouteData { static const _spacePathMapping = { - 'treasury': Space.treasury, 'discovery': Space.discovery, 'workspace': Space.workspace, 'voting': Space.voting, 'funded_projects': Space.fundedProjects, + 'treasury': Space.treasury, }; const SpacesShellRouteData(); @@ -59,15 +59,6 @@ final class SpacesShellRouteData extends ShellRouteData { } } -final class TreasuryRoute extends GoRouteData with FadePageTransitionMixin { - const TreasuryRoute(); - - @override - Widget build(BuildContext context, GoRouterState state) { - return const TreasuryPage(); - } -} - final class DiscoveryRoute extends GoRouteData with FadePageTransitionMixin { const DiscoveryRoute(); @@ -104,3 +95,12 @@ final class FundedProjectsRoute extends GoRouteData return const FundedProjectsPage(); } } + +final class TreasuryRoute extends GoRouteData with FadePageTransitionMixin { + const TreasuryRoute(); + + @override + Widget build(BuildContext context, GoRouterState state) { + return const TreasuryPage(); + } +} diff --git a/catalyst_voices/lib/routes/routing/spaces_route.g.dart b/catalyst_voices/lib/routes/routing/spaces_route.g.dart index dc54a81a2a8..a3ae841792e 100644 --- a/catalyst_voices/lib/routes/routing/spaces_route.g.dart +++ b/catalyst_voices/lib/routes/routing/spaces_route.g.dart @@ -13,10 +13,6 @@ List get $appRoutes => [ RouteBase get $spacesShellRouteData => ShellRouteData.$route( factory: $SpacesShellRouteDataExtension._fromState, routes: [ - GoRouteData.$route( - path: '/m4/treasury', - factory: $TreasuryRouteExtension._fromState, - ), GoRouteData.$route( path: '/m4/discovery', factory: $DiscoveryRouteExtension._fromState, @@ -33,6 +29,10 @@ RouteBase get $spacesShellRouteData => ShellRouteData.$route( path: '/m4/funded_projects', factory: $FundedProjectsRouteExtension._fromState, ), + GoRouteData.$route( + path: '/m4/treasury', + factory: $TreasuryRouteExtension._fromState, + ), ], ); @@ -41,23 +41,6 @@ extension $SpacesShellRouteDataExtension on SpacesShellRouteData { const SpacesShellRouteData(); } -extension $TreasuryRouteExtension on TreasuryRoute { - static TreasuryRoute _fromState(GoRouterState state) => const TreasuryRoute(); - - String get location => GoRouteData.$location( - '/m4/treasury', - ); - - void go(BuildContext context) => context.go(location); - - Future push(BuildContext context) => context.push(location); - - void pushReplacement(BuildContext context) => - context.pushReplacement(location); - - void replace(BuildContext context) => context.replace(location); -} - extension $DiscoveryRouteExtension on DiscoveryRoute { static DiscoveryRoute _fromState(GoRouterState state) => const DiscoveryRoute(); @@ -128,3 +111,20 @@ extension $FundedProjectsRouteExtension on FundedProjectsRoute { void replace(BuildContext context) => context.replace(location); } + +extension $TreasuryRouteExtension on TreasuryRoute { + static TreasuryRoute _fromState(GoRouterState state) => const TreasuryRoute(); + + String get location => GoRouteData.$location( + '/m4/treasury', + ); + + void go(BuildContext context) => context.go(location); + + Future push(BuildContext context) => context.push(location); + + void pushReplacement(BuildContext context) => + context.pushReplacement(location); + + void replace(BuildContext context) => context.replace(location); +} diff --git a/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations.dart b/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations.dart index 68db51b6101..2ae03bd9b8a 100644 --- a/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations.dart +++ b/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations.dart @@ -571,7 +571,7 @@ abstract class VoicesLocalizations { /// Name of space shown in different spaces that indicates its origin /// /// In en, this message translates to: - /// **'Treasury'** + /// **'Treasury space'** String get spaceTreasuryName; /// Name of space shown in different spaces that indicates its origin @@ -595,7 +595,7 @@ abstract class VoicesLocalizations { /// Name of space shown in different spaces that indicates its origin /// /// In en, this message translates to: - /// **'Funded Projects'** + /// **'Funded project space'** String get spaceFundedProjects; /// Refers to a lock action, i.e. to lock the session. diff --git a/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations_en.dart b/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations_en.dart index be2c03cdb54..967640ee62b 100644 --- a/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations_en.dart +++ b/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations_en.dart @@ -299,7 +299,7 @@ class VoicesLocalizationsEn extends VoicesLocalizations { String get clickToRestart => 'Click to restart'; @override - String get spaceTreasuryName => 'Treasury'; + String get spaceTreasuryName => 'Treasury space'; @override String get spaceDiscoveryName => 'Discovery space'; @@ -311,7 +311,7 @@ class VoicesLocalizationsEn extends VoicesLocalizations { String get spaceVotingName => 'Voting space'; @override - String get spaceFundedProjects => 'Funded Projects'; + String get spaceFundedProjects => 'Funded project space'; @override String get lock => 'Lock'; diff --git a/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations_es.dart b/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations_es.dart index 55c10668621..33e8773d20f 100644 --- a/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations_es.dart +++ b/catalyst_voices/packages/catalyst_voices_localization/lib/generated/catalyst_voices_localizations_es.dart @@ -299,7 +299,7 @@ class VoicesLocalizationsEs extends VoicesLocalizations { String get clickToRestart => 'Click to restart'; @override - String get spaceTreasuryName => 'Treasury'; + String get spaceTreasuryName => 'Treasury space'; @override String get spaceDiscoveryName => 'Discovery space'; @@ -311,7 +311,7 @@ class VoicesLocalizationsEs extends VoicesLocalizations { String get spaceVotingName => 'Voting space'; @override - String get spaceFundedProjects => 'Funded Projects'; + String get spaceFundedProjects => 'Funded project space'; @override String get lock => 'Lock'; diff --git a/catalyst_voices/packages/catalyst_voices_localization/lib/l10n/intl_en.arb b/catalyst_voices/packages/catalyst_voices_localization/lib/l10n/intl_en.arb index faf1ba26304..215e697aaf0 100644 --- a/catalyst_voices/packages/catalyst_voices_localization/lib/l10n/intl_en.arb +++ b/catalyst_voices/packages/catalyst_voices_localization/lib/l10n/intl_en.arb @@ -362,7 +362,7 @@ "@clickToRestart": { "description": "In different places update popup body" }, - "spaceTreasuryName": "Treasury", + "spaceTreasuryName": "Treasury space", "@spaceTreasuryName": { "description": "Name of space shown in different spaces that indicates its origin" }, @@ -378,7 +378,7 @@ "@spaceVotingName": { "description": "Name of space shown in different spaces that indicates its origin" }, - "spaceFundedProjects": "Funded Projects", + "spaceFundedProjects": "Funded project space", "@spaceFundedProjects": { "description": "Name of space shown in different spaces that indicates its origin" }, diff --git a/catalyst_voices/packages/catalyst_voices_models/lib/src/space.dart b/catalyst_voices/packages/catalyst_voices_models/lib/src/space.dart index ebe3214a430..ecb202ad14f 100644 --- a/catalyst_voices/packages/catalyst_voices_models/lib/src/space.dart +++ b/catalyst_voices/packages/catalyst_voices_models/lib/src/space.dart @@ -1,8 +1,8 @@ /// Main spaces between which user can navigate. enum Space { - treasury, discovery, workspace, voting, - fundedProjects; + fundedProjects, + treasury; }