diff --git a/.github/scripts/downstream-project-spl-common.sh b/.github/scripts/downstream-project-spl-common.sh index c6dcfaca007867..861be12c7d1a45 100644 --- a/.github/scripts/downstream-project-spl-common.sh +++ b/.github/scripts/downstream-project-spl-common.sh @@ -22,3 +22,6 @@ if semverGT "$project_used_solana_version" "$SOLANA_VER"; then fi ./patch.crates-io.sh "$SOLANA_DIR" + +# anza migration stopgap. can be removed when agave is fully recommended for public usage. +sed -i 's/solana-geyser-plugin-interface/agave-geyser-plugin-interface/g' ./Cargo.toml diff --git a/.github/workflows/release-artifacts-auto.yml b/.github/workflows/release-artifacts-auto.yml index a8309cdffc8a72..0cdd176e04396c 100644 --- a/.github/workflows/release-artifacts-auto.yml +++ b/.github/workflows/release-artifacts-auto.yml @@ -14,14 +14,12 @@ concurrency: jobs: release-artifacts: - if: github.repository == 'solana-labs/solana' + if: github.repository == 'anza-xyz/agave' uses: ./.github/workflows/release-artifacts.yml with: commit: ${{ github.sha }} secrets: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} + GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL: ${{ secrets.GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL }} error_reporting: needs: diff --git a/.github/workflows/release-artifacts-manually.yml b/.github/workflows/release-artifacts-manually.yml index 35de72922c32c8..fe5c1b03b638b3 100644 --- a/.github/workflows/release-artifacts-manually.yml +++ b/.github/workflows/release-artifacts-manually.yml @@ -14,6 +14,4 @@ jobs: with: commit: ${{ github.event.inputs.commit }} secrets: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} + GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL: ${{ secrets.GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL }} diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 98dc697920262c..7aec77f0dac45f 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -7,11 +7,7 @@ on: required: false type: string secrets: - AWS_ACCESS_KEY_ID: - required: true - AWS_SECRET_ACCESS_KEY: - required: true - AWS_S3_BUCKET: + GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL: required: true jobs: @@ -47,17 +43,8 @@ jobs: id: build shell: bash run: | - choco install openssl --version=3.1.1 - if [[ -d "C:\Program Files\OpenSSL" ]]; then - echo "OPENSSL_DIR: C:\Program Files\OpenSSL" - export OPENSSL_DIR="C:\Program Files\OpenSSL" - elif [[ -d "C:\Program Files\OpenSSL-Win64" ]]; then - echo "OPENSSL_DIR: C:\Program Files\OpenSSL-Win64" - export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64" - else - echo "can't determine OPENSSL_DIR" - exit 1 - fi + vcpkg install openssl:x64-windows-static-md + vcpkg integrate install choco install protoc export PROTOC="C:\ProgramData\chocolatey\lib\protoc\tools\bin\protoc.exe" source /tmp/env.sh @@ -71,19 +58,19 @@ jobs: shell: bash run: | FOLDER_NAME=${{ steps.build.outputs.tag || steps.build.outputs.channel }} - mkdir -p "github-action-s3-upload/$FOLDER_NAME" - cp -v "solana-release-x86_64-pc-windows-msvc.tar.bz2" "github-action-s3-upload/$FOLDER_NAME/" - cp -v "solana-release-x86_64-pc-windows-msvc.yml" "github-action-s3-upload/$FOLDER_NAME/" - cp -v "solana-install-init-x86_64-pc-windows-msvc"* "github-action-s3-upload/$FOLDER_NAME" + mkdir -p "windows-release/$FOLDER_NAME" + cp -v "solana-release-x86_64-pc-windows-msvc.tar.bz2" "windows-release/$FOLDER_NAME/" + cp -v "solana-release-x86_64-pc-windows-msvc.yml" "windows-release/$FOLDER_NAME/" + cp -v "agave-install-init-x86_64-pc-windows-msvc"* "windows-release/$FOLDER_NAME" - name: Upload Artifacts if: ${{ steps.build.outputs.channel != '' || steps.build.outputs.tag != '' }} uses: actions/upload-artifact@v3 with: name: windows-artifact - path: github-action-s3-upload/ + path: windows-release/ - windows-s3-upload: + windows-gcs-upload: if: ${{ needs.windows-build.outputs.channel != '' || needs.windows-build.outputs.tag != '' }} needs: [windows-build] runs-on: ubuntu-20.04 @@ -92,18 +79,16 @@ jobs: uses: actions/download-artifact@v3 with: name: windows-artifact - path: ./github-action-s3-upload + path: ./windows-release - - name: Upload - uses: jakejarvis/s3-sync-action@master + - name: Setup crediential + uses: "google-github-actions/auth@v2" with: - args: --acl public-read --follow-symlinks - env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} - AWS_REGION: "us-west-1" - SOURCE_DIR: "github-action-s3-upload" + credentials_json: "${{ secrets.GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL }}" + + - name: Upload files to GCS + run: | + gcloud storage cp --recursive windows-release/* gs://anza-release/ windows-gh-release: if: ${{ needs.windows-build.outputs.tag != '' }} @@ -114,7 +99,7 @@ jobs: uses: actions/download-artifact@v3 with: name: windows-artifact - path: ./github-action-s3-upload + path: .windows-release/ - name: Release uses: softprops/action-gh-release@v1 @@ -122,4 +107,4 @@ jobs: tag_name: ${{ needs.windows-build.outputs.tag }} draft: true files: | - github-action-s3-upload/${{ needs.windows-build.outputs.tag }}/* + windows-release/${{ needs.windows-build.outputs.tag }}/* diff --git a/.mergify.yml b/.mergify.yml index ef576943d5d635..19f9b8f116a78a 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -33,7 +33,7 @@ pull_request_rules: actions: request_reviews: teams: - - "@solana-labs/community-pr-subscribers" + - "@anza-xyz/community-pr-subscribers" - name: label changes from monorepo-triage conditions: - author≠@core-contributors @@ -50,7 +50,7 @@ pull_request_rules: - name: automatic merge (squash) on CI success conditions: - and: - - status-success=buildkite/solana + - status-success=buildkite/agave - status-success=ci-gate - label=automerge - label!=no-automerge @@ -102,7 +102,7 @@ pull_request_rules: actions: backport: assignees: &BackportAssignee - - "{{ merged_by|replace('mergify[bot]', label|select('equalto', 'community')|first|default(author)|replace('community', '@solana-labs/community-pr-subscribers')) }}" + - "{{ merged_by|replace('mergify[bot]', label|select('equalto', 'community')|first|default(author)|replace('community', '@anza-xyz/community-pr-subscribers')) }}" title: "{{ destination_branch }}: {{ title }} (backport of #{{ number }})" ignore_conflicts: true labels: diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index c2dd13e32551ba..00000000000000 --- a/.travis.yml +++ /dev/null @@ -1,94 +0,0 @@ -branches: - only: - - master - - /^v\d+\.\d+/ - -notifications: - email: false - slack: - on_success: change - if: NOT type = pull_request - secure: F4IjOE05MyaMOdPRL+r8qhs7jBvv4yDM3RmFKE1zNXnfUOqV4X38oQM1EI+YVsgpMQLj/pxnEB7wcTE4Bf86N6moLssEULCpvAuMVoXj4QbWdomLX+01WbFa6fLVeNQIg45NHrz2XzVBhoKOrMNnl+QI5mbR2AlS5oqsudHsXDnyLzZtd4Y5SDMdYG1zVWM01+oNNjgNfjcCGmOE/K0CnOMl6GPi3X9C34tJ19P2XT7MTDsz1/IfEF7fro2Q8DHEYL9dchJMoisXSkem5z7IDQkGzXsWdWT4NnndUvmd1MlTCE9qgoXDqRf95Qh8sB1Dz08HtvgfaosP2XjtNTfDI9BBYS15Ibw9y7PchAJE1luteNjF35EOy6OgmCLw/YpnweqfuNViBZz+yOPWXVC0kxnPIXKZ1wyH9ibeH6E4hr7a8o9SV/6SiWIlbYF+IR9jPXyTCLP/cc3sYljPWxDnhWFwFdRVIi3PbVAhVu7uWtVUO17Oc9gtGPgs/GrhOMkJfwQPXaudRJDpVZowxTX4x9kefNotlMAMRgq+Drbmgt4eEBiCNp0ITWgh17BiE1U09WS3myuduhoct85+FoVeaUkp1sxzHVtGsNQH0hcz7WcpZyOM+AwistJA/qzeEDQao5zi1eKWPbO2xAhi2rV1bDH6bPf/4lDBwLRqSiwvlWU= - -os: linux -dist: bionic -language: minimal - -jobs: - include: - - &release-artifacts - if: type IN (api, cron) OR tag IS present - name: "macOS release artifacts" - os: osx - osx_image: xcode12 - language: rust - rust: - - stable - install: - - source ci/rust-version.sh - - PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH" - - readlink -f . - - brew install gnu-tar - - PATH="/usr/local/opt/gnu-tar/libexec/gnubin:$PATH" - - tar --version - script: - - source ci/env.sh - - rustup set profile default - - ci/publish-tarball.sh - deploy: - - provider: s3 - access_key_id: $AWS_ACCESS_KEY_ID - secret_access_key: $AWS_SECRET_ACCESS_KEY - bucket: release.solana.com - region: us-west-1 - skip_cleanup: true - acl: public_read - local_dir: travis-s3-upload - on: - all_branches: true - - provider: releases - token: $GITHUB_TOKEN - skip_cleanup: true - file_glob: true - file: travis-release-upload/* - on: - tags: true - - <<: *release-artifacts - name: "Windows release artifacts" - os: windows - install: - - choco install openssl - - export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64" - - source ci/rust-version.sh - - PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH" - - readlink -f . - # Linux release artifacts are still built by ci/buildkite-secondary.yml - #- <<: *release-artifacts - # name: "Linux release artifacts" - # os: linux - # before_install: - # - sudo apt-get install libssl-dev libudev-dev - - # docs pull request - - name: "docs" - if: type IN (push, pull_request) OR tag IS present - language: node_js - node_js: - - "lts/*" - - services: - - docker - - cache: - directories: - - ~/.npm - - before_install: - - source ci/env.sh - - .travis/channel_restriction.sh edge beta || travis_terminate 0 - - .travis/affects.sh docs/ .travis || travis_terminate 0 - - cd docs/ - - source .travis/before_install.sh - - script: - - source .travis/script.sh diff --git a/Cargo.lock b/Cargo.lock index 650b369d205c5c..7c18c3a1dbcc93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -62,6 +62,217 @@ dependencies = [ "zeroize", ] +[[package]] +name = "agave-cargo-registry" +version = "1.19.0" +dependencies = [ + "clap 2.33.3", + "flate2", + "hex", + "hyper", + "log", + "rustc_version 0.4.0", + "serde", + "serde_json", + "sha2 0.10.8", + "solana-clap-utils", + "solana-cli", + "solana-cli-config", + "solana-cli-output", + "solana-logger", + "solana-remote-wallet", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "solana-version", + "tar", + "tempfile", + "tokio", + "toml 0.8.10", +] + +[[package]] +name = "agave-geyser-plugin-interface" +version = "1.19.0" +dependencies = [ + "log", + "solana-sdk", + "solana-transaction-status", + "thiserror", +] + +[[package]] +name = "agave-install" +version = "1.19.0" +dependencies = [ + "atty", + "bincode", + "bzip2", + "chrono", + "clap 2.33.3", + "console", + "crossbeam-channel", + "ctrlc", + "dirs-next", + "indicatif", + "lazy_static", + "nix 0.26.4", + "reqwest", + "scopeguard", + "semver 1.0.22", + "serde", + "serde_yaml 0.8.26", + "serde_yaml 0.9.32", + "solana-clap-utils", + "solana-config-program", + "solana-logger", + "solana-rpc-client", + "solana-sdk", + "solana-version", + "tar", + "tempfile", + "url 2.5.0", + "winapi 0.3.9", + "winreg", +] + +[[package]] +name = "agave-ledger-tool" +version = "1.19.0" +dependencies = [ + "assert_cmd", + "bs58", + "bytecount", + "chrono", + "clap 2.33.3", + "crossbeam-channel", + "csv", + "dashmap", + "futures 0.3.30", + "histogram", + "itertools", + "log", + "num_cpus", + "regex", + "serde", + "serde_json", + "signal-hook", + "solana-account-decoder", + "solana-accounts-db", + "solana-bpf-loader-program", + "solana-clap-utils", + "solana-cli-output", + "solana-core", + "solana-cost-model", + "solana-entry", + "solana-geyser-plugin-manager", + "solana-gossip", + "solana-ledger", + "solana-logger", + "solana-measure", + "solana-program-runtime", + "solana-rpc", + "solana-runtime", + "solana-sdk", + "solana-stake-program", + "solana-storage-bigtable", + "solana-streamer", + "solana-svm", + "solana-transaction-status", + "solana-unified-scheduler-pool", + "solana-version", + "solana-vote-program", + "solana_rbpf", + "thiserror", + "tikv-jemallocator", + "tokio", +] + +[[package]] +name = "agave-validator" +version = "1.19.0" +dependencies = [ + "agave-geyser-plugin-interface", + "chrono", + "clap 2.33.3", + "console", + "core_affinity", + "crossbeam-channel", + "fd-lock", + "indicatif", + "itertools", + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-ipc-server", + "lazy_static", + "libc", + "libloading", + "log", + "num_cpus", + "rand 0.8.5", + "rayon", + "serde", + "serde_json", + "serde_yaml 0.9.32", + "signal-hook", + "solana-account-decoder", + "solana-accounts-db", + "solana-clap-utils", + "solana-cli-config", + "solana-core", + "solana-download-utils", + "solana-entry", + "solana-faucet", + "solana-genesis-utils", + "solana-geyser-plugin-manager", + "solana-gossip", + "solana-ledger", + "solana-logger", + "solana-metrics", + "solana-net-utils", + "solana-perf", + "solana-poh", + "solana-rpc", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-runtime", + "solana-sdk", + "solana-send-transaction-service", + "solana-storage-bigtable", + "solana-streamer", + "solana-svm", + "solana-test-validator", + "solana-tpu-client", + "solana-unified-scheduler-pool", + "solana-version", + "solana-vote-program", + "spl-token-2022", + "symlink", + "thiserror", + "tikv-jemallocator", + "tokio", +] + +[[package]] +name = "agave-watchtower" +version = "1.19.0" +dependencies = [ + "clap 2.33.3", + "humantime", + "log", + "solana-clap-utils", + "solana-cli-config", + "solana-cli-output", + "solana-logger", + "solana-metrics", + "solana-notifier", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "solana-version", +] + [[package]] name = "ahash" version = "0.7.6" @@ -3183,9 +3394,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -5179,6 +5390,7 @@ dependencies = [ "bytemuck", "byteorder", "bzip2", + "criterion", "crossbeam-channel", "dashmap", "ed25519-dalek", @@ -5482,35 +5694,6 @@ dependencies = [ "tar", ] -[[package]] -name = "solana-cargo-registry" -version = "1.19.0" -dependencies = [ - "clap 2.33.3", - "flate2", - "hex", - "hyper", - "log", - "rustc_version 0.4.0", - "serde", - "serde_json", - "sha2 0.10.8", - "solana-clap-utils", - "solana-cli", - "solana-cli-config", - "solana-cli-output", - "solana-logger", - "solana-remote-wallet", - "solana-rpc-client", - "solana-rpc-client-api", - "solana-sdk", - "solana-version", - "tar", - "tempfile", - "tokio", - "toml 0.8.10", -] - [[package]] name = "solana-cargo-test-bpf" version = "1.19.0" @@ -6040,31 +6223,20 @@ dependencies = [ "solana-sdk", ] -[[package]] -name = "solana-geyser-plugin-interface" -version = "1.19.0" -dependencies = [ - "log", - "solana-sdk", - "solana-transaction-status", - "thiserror", -] - [[package]] name = "solana-geyser-plugin-manager" version = "1.19.0" dependencies = [ + "agave-geyser-plugin-interface", "bs58", "crossbeam-channel", "json5", "jsonrpc-core", - "jsonrpc-server-utils", "libloading", "log", "serde_json", "solana-accounts-db", "solana-entry", - "solana-geyser-plugin-interface", "solana-ledger", "solana-measure", "solana-metrics", @@ -6073,6 +6245,7 @@ dependencies = [ "solana-sdk", "solana-transaction-status", "thiserror", + "tokio", ] [[package]] @@ -6126,41 +6299,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "solana-install" -version = "1.19.0" -dependencies = [ - "atty", - "bincode", - "bzip2", - "chrono", - "clap 2.33.3", - "console", - "crossbeam-channel", - "ctrlc", - "dirs-next", - "indicatif", - "lazy_static", - "nix 0.26.4", - "reqwest", - "scopeguard", - "semver 1.0.22", - "serde", - "serde_yaml 0.8.26", - "serde_yaml 0.9.32", - "solana-clap-utils", - "solana-config-program", - "solana-logger", - "solana-rpc-client", - "solana-sdk", - "solana-version", - "tar", - "tempfile", - "url 2.5.0", - "winapi 0.3.9", - "winreg", -] - [[package]] name = "solana-keygen" version = "1.19.0" @@ -6248,58 +6386,6 @@ dependencies = [ "trees", ] -[[package]] -name = "solana-ledger-tool" -version = "1.19.0" -dependencies = [ - "assert_cmd", - "bs58", - "bytecount", - "chrono", - "clap 2.33.3", - "crossbeam-channel", - "csv", - "dashmap", - "futures 0.3.30", - "histogram", - "itertools", - "log", - "num_cpus", - "regex", - "serde", - "serde_json", - "signal-hook", - "solana-account-decoder", - "solana-accounts-db", - "solana-bpf-loader-program", - "solana-clap-utils", - "solana-cli-output", - "solana-core", - "solana-cost-model", - "solana-entry", - "solana-geyser-plugin-manager", - "solana-gossip", - "solana-ledger", - "solana-logger", - "solana-measure", - "solana-program-runtime", - "solana-rpc", - "solana-runtime", - "solana-sdk", - "solana-stake-program", - "solana-storage-bigtable", - "solana-streamer", - "solana-svm", - "solana-transaction-status", - "solana-unified-scheduler-pool", - "solana-version", - "solana-vote-program", - "solana_rbpf", - "thiserror", - "tikv-jemallocator", - "tokio", -] - [[package]] name = "solana-loader-v4-program" version = "1.19.0" @@ -7435,7 +7521,9 @@ dependencies = [ name = "solana-unified-scheduler-logic" version = "1.19.0" dependencies = [ + "assert_matches", "solana-sdk", + "static_assertions", ] [[package]] @@ -7444,6 +7532,7 @@ version = "1.19.0" dependencies = [ "assert_matches", "crossbeam-channel", + "dashmap", "derivative", "log", "solana-ledger", @@ -7463,72 +7552,6 @@ dependencies = [ "solana-metrics", ] -[[package]] -name = "solana-validator" -version = "1.19.0" -dependencies = [ - "chrono", - "clap 2.33.3", - "console", - "core_affinity", - "crossbeam-channel", - "fd-lock", - "indicatif", - "itertools", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-ipc-server", - "jsonrpc-server-utils", - "lazy_static", - "libc", - "libloading", - "log", - "num_cpus", - "rand 0.8.5", - "rayon", - "serde", - "serde_json", - "serde_yaml 0.9.32", - "signal-hook", - "solana-account-decoder", - "solana-accounts-db", - "solana-clap-utils", - "solana-cli-config", - "solana-core", - "solana-download-utils", - "solana-entry", - "solana-faucet", - "solana-genesis-utils", - "solana-geyser-plugin-interface", - "solana-geyser-plugin-manager", - "solana-gossip", - "solana-ledger", - "solana-logger", - "solana-metrics", - "solana-net-utils", - "solana-perf", - "solana-poh", - "solana-rpc", - "solana-rpc-client", - "solana-rpc-client-api", - "solana-runtime", - "solana-sdk", - "solana-send-transaction-service", - "solana-storage-bigtable", - "solana-streamer", - "solana-svm", - "solana-test-validator", - "solana-tpu-client", - "solana-unified-scheduler-pool", - "solana-version", - "solana-vote-program", - "spl-token-2022", - "symlink", - "thiserror", - "tikv-jemallocator", -] - [[package]] name = "solana-version" version = "1.19.0" @@ -7585,25 +7608,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "solana-watchtower" -version = "1.19.0" -dependencies = [ - "clap 2.33.3", - "humantime", - "log", - "solana-clap-utils", - "solana-cli-config", - "solana-cli-output", - "solana-logger", - "solana-metrics", - "solana-notifier", - "solana-rpc-client", - "solana-rpc-client-api", - "solana-sdk", - "solana-version", -] - [[package]] name = "solana-wen-restart" version = "1.19.0" diff --git a/Cargo.toml b/Cargo.toml index 66436c9cfb3fd8..4b8ae12dab0078 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -236,7 +236,6 @@ jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" -jsonrpc-server-utils = "18.0.0" lazy_static = "1.4.0" libc = "0.2.153" libloading = "0.7.4" @@ -317,7 +316,7 @@ solana-bench-tps = { path = "bench-tps", version = "=1.19.0" } solana-bloom = { path = "bloom", version = "=1.19.0" } solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.19.0" } solana-bucket-map = { path = "bucket_map", version = "=1.19.0" } -solana-cargo-registry = { path = "cargo-registry", version = "=1.19.0" } +agave-cargo-registry = { path = "cargo-registry", version = "=1.19.0" } solana-clap-utils = { path = "clap-utils", version = "=1.19.0" } solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.19.0" } solana-cli = { path = "cli", version = "=1.19.0" } @@ -336,7 +335,7 @@ solana-frozen-abi = { path = "frozen-abi", version = "=1.19.0" } solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=1.19.0" } solana-genesis = { path = "genesis", version = "=1.19.0" } solana-genesis-utils = { path = "genesis-utils", version = "=1.19.0" } -solana-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=1.19.0" } +agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=1.19.0" } solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=1.19.0" } solana-gossip = { path = "gossip", version = "=1.19.0" } solana-ledger = { path = "ledger", version = "=1.19.0" } diff --git a/README.md b/README.md index c6183f6ab6183e..bbaeb3d019a658 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

- Solana + Solana

@@ -113,35 +113,3 @@ problem is solved by this code?" On the other hand, if a test does fail and you better way to solve the same problem, a Pull Request with your solution would most certainly be welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please send us that patch! - -# Disclaimer - -All claims, content, designs, algorithms, estimates, roadmaps, -specifications, and performance measurements described in this project -are done with the Solana Labs, Inc. (“SL”) good faith efforts. It is up to -the reader to check and validate their accuracy and truthfulness. -Furthermore, nothing in this project constitutes a solicitation for -investment. - -Any content produced by SL or developer resources that SL provides are -for educational and inspirational purposes only. SL does not encourage, -induce or sanction the deployment, integration or use of any such -applications (including the code comprising the Solana blockchain -protocol) in violation of applicable laws or regulations and hereby -prohibits any such deployment, integration or use. This includes the use of -any such applications by the reader (a) in violation of export control -or sanctions laws of the United States or any other applicable -jurisdiction, (b) if the reader is located in or ordinarily resident in -a country or territory subject to comprehensive sanctions administered -by the U.S. Office of Foreign Assets Control (OFAC), or (c) if the -reader is or is working on behalf of a Specially Designated National -(SDN) or a person subject to similar blocking or denied party -prohibitions. - -The reader should be aware that U.S. export control and sanctions laws prohibit -U.S. persons (and other persons that are subject to such laws) from transacting -with persons in certain countries and territories or that are on the SDN list. -Accordingly, there is a risk to individuals that other persons using any of the -code contained in this repo, or a derivation thereof, may be sanctioned persons -and that transactions with such persons would be a violation of U.S. export -controls and sanctions law. diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index b986c17de0636b..0fc5a381fbda5e 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -42,7 +42,7 @@ regex = { workspace = true } seqlock = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } -smallvec = { workspace = true } +smallvec = { workspace = true, features = ["const_generics"] } solana-bucket-map = { workspace = true } solana-config-program = { workspace = true } solana-frozen-abi = { workspace = true } @@ -70,6 +70,7 @@ name = "solana_accounts_db" [dev-dependencies] assert_matches = { workspace = true } +criterion = { workspace = true } ed25519-dalek = { workspace = true } libsecp256k1 = { workspace = true } memoffset = { workspace = true } @@ -89,3 +90,7 @@ rustc_version = { workspace = true } [features] dev-context-only-utils = [] + +[[bench]] +name = "bench_hashing" +harness = false diff --git a/accounts-db/benches/bench_hashing.rs b/accounts-db/benches/bench_hashing.rs new file mode 100644 index 00000000000000..3158f78c7a938f --- /dev/null +++ b/accounts-db/benches/bench_hashing.rs @@ -0,0 +1,43 @@ +use { + criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}, + solana_accounts_db::accounts_db::AccountsDb, + solana_sdk::{account::AccountSharedData, pubkey::Pubkey}, +}; + +const KB: usize = 1024; +const MB: usize = KB * KB; + +const DATA_SIZES: [usize; 6] = [ + 0, // the smallest account + 165, // the size of an spl token account + 200, // the size of a stake account + KB, // a medium sized account + MB, // a large sized account + 10 * MB, // the largest account +]; + +/// The number of bytes of *non account data* that are also hashed as +/// part of computing an account's hash. +/// +/// Ensure this constant stays in sync with the value of `META_SIZE` in +/// AccountsDb::hash_account_data(). +const META_SIZE: usize = 81; + +fn bench_hash_account(c: &mut Criterion) { + let lamports = 123_456_789; + let owner = Pubkey::default(); + let address = Pubkey::default(); + + let mut group = c.benchmark_group("hash_account"); + for data_size in DATA_SIZES { + let num_bytes = META_SIZE.checked_add(data_size).unwrap(); + group.throughput(Throughput::Bytes(num_bytes as u64)); + let account = AccountSharedData::new(lamports, data_size, &owner); + group.bench_function(BenchmarkId::new("data_size", data_size), |b| { + b.iter(|| AccountsDb::hash_account(&account, &address)); + }); + } +} + +criterion_group!(benches, bench_hash_account,); +criterion_main!(benches); diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index 69c24d7be75f7d..b6c8d72042097a 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -128,7 +128,8 @@ impl<'storage> StoredAccountMeta<'storage> { pub fn hash(&self) -> &'storage AccountHash { match self { Self::AppendVec(av) => av.hash(), - Self::Hot(hot) => hot.hash().unwrap_or(&DEFAULT_ACCOUNT_HASH), + // tiered-storage has deprecated the use of AccountHash + Self::Hot(_) => &DEFAULT_ACCOUNT_HASH, } } diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 371db9eb08c095..33a57d56461c78 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -80,11 +80,20 @@ impl AccountLocks { if *count == 0 { occupied_entry.remove_entry(); } + } else { + debug_assert!( + false, + "Attempted to remove a read-lock for a key that wasn't read-locked" + ); } } fn unlock_write(&mut self, key: &Pubkey) { - self.write_locks.remove(key); + let removed = self.write_locks.remove(key); + debug_assert!( + removed, + "Attempted to remove a write-lock for a key that wasn't write-locked" + ); } } @@ -618,14 +627,16 @@ impl Accounts { #[allow(clippy::needless_collect)] pub fn unlock_accounts<'a>( &self, - txs: impl Iterator, - results: &[Result<()>], + txs_and_results: impl Iterator)>, ) { - let keys: Vec<_> = txs - .zip(results) + let keys: Vec<_> = txs_and_results .filter(|(_, res)| res.is_ok()) .map(|(tx, _)| tx.get_account_locks_unchecked()) .collect(); + if keys.is_empty() { + return; + } + let mut account_locks = self.account_locks.lock().unwrap(); debug!("bank unlock accounts"); keys.into_iter().for_each(|keys| { @@ -812,6 +823,7 @@ mod tests { }, std::{ borrow::Cow, + iter, sync::atomic::{AtomicBool, AtomicU64, Ordering}, thread, time, }, @@ -1099,8 +1111,8 @@ mod tests { let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())]; let results = accounts.lock_accounts(txs.iter(), MAX_TX_ACCOUNT_LOCKS); - assert_eq!(results[0], Ok(())); - accounts.unlock_accounts(txs.iter(), &results); + assert_eq!(results, vec![Ok(())]); + accounts.unlock_accounts(txs.iter().zip(&results)); } // Disallow over MAX_TX_ACCOUNT_LOCKS @@ -1156,7 +1168,7 @@ mod tests { let tx = new_sanitized_tx(&[&keypair0], message, Hash::default()); let results0 = accounts.lock_accounts([tx.clone()].iter(), MAX_TX_ACCOUNT_LOCKS); - assert!(results0[0].is_ok()); + assert_eq!(results0, vec![Ok(())]); assert_eq!( *accounts .account_locks @@ -1190,9 +1202,13 @@ mod tests { let tx1 = new_sanitized_tx(&[&keypair1], message, Hash::default()); let txs = vec![tx0, tx1]; let results1 = accounts.lock_accounts(txs.iter(), MAX_TX_ACCOUNT_LOCKS); - - assert!(results1[0].is_ok()); // Read-only account (keypair1) can be referenced multiple times - assert!(results1[1].is_err()); // Read-only account (keypair1) cannot also be locked as writable + assert_eq!( + results1, + vec![ + Ok(()), // Read-only account (keypair1) can be referenced multiple times + Err(TransactionError::AccountInUse), // Read-only account (keypair1) cannot also be locked as writable + ], + ); assert_eq!( *accounts .account_locks @@ -1204,8 +1220,8 @@ mod tests { 2 ); - accounts.unlock_accounts([tx].iter(), &results0); - accounts.unlock_accounts(txs.iter(), &results1); + accounts.unlock_accounts(iter::once(&tx).zip(&results0)); + accounts.unlock_accounts(txs.iter().zip(&results1)); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, @@ -1217,7 +1233,10 @@ mod tests { ); let tx = new_sanitized_tx(&[&keypair1], message, Hash::default()); let results2 = accounts.lock_accounts([tx].iter(), MAX_TX_ACCOUNT_LOCKS); - assert!(results2[0].is_ok()); // Now keypair1 account can be locked as writable + assert_eq!( + results2, + vec![Ok(())] // Now keypair1 account can be locked as writable + ); // Check that read-only lock with zero references is deleted assert!(accounts @@ -1285,7 +1304,7 @@ mod tests { counter_clone.clone().fetch_add(1, Ordering::SeqCst); } } - accounts_clone.unlock_accounts(txs.iter(), &results); + accounts_clone.unlock_accounts(txs.iter().zip(&results)); if exit_clone.clone().load(Ordering::Relaxed) { break; } @@ -1301,7 +1320,7 @@ mod tests { thread::sleep(time::Duration::from_millis(50)); assert_eq!(counter_value, counter_clone.clone().load(Ordering::SeqCst)); } - accounts_arc.unlock_accounts(txs.iter(), &results); + accounts_arc.unlock_accounts(txs.iter().zip(&results)); thread::sleep(time::Duration::from_millis(50)); } exit.store(true, Ordering::Relaxed); @@ -1442,9 +1461,14 @@ mod tests { MAX_TX_ACCOUNT_LOCKS, ); - assert!(results[0].is_ok()); // Read-only account (keypair0) can be referenced multiple times - assert!(results[1].is_err()); // is not locked due to !qos_results[1].is_ok() - assert!(results[2].is_ok()); // Read-only account (keypair0) can be referenced multiple times + assert_eq!( + results, + vec![ + Ok(()), // Read-only account (keypair0) can be referenced multiple times + Err(TransactionError::WouldExceedMaxBlockCostLimit), // is not locked due to !qos_results[1].is_ok() + Ok(()), // Read-only account (keypair0) can be referenced multiple times + ], + ); // verify that keypair0 read-only lock twice (for tx0 and tx2) assert_eq!( @@ -1466,7 +1490,7 @@ mod tests { .get(&keypair2.pubkey()) .is_none()); - accounts.unlock_accounts(txs.iter(), &results); + accounts.unlock_accounts(txs.iter().zip(&results)); // check all locks to be removed assert!(accounts diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1f3c36876f4531..cf4d17745b1b73 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6119,17 +6119,18 @@ impl AccountsDb { } let mut hasher = blake3::Hasher::new(); - // allocate 128 bytes buffer on the stack - const BUFFER_SIZE: usize = 128; - const METADATA_SIZE: usize = 8 /* lamports */ + 8 /* rent_epoch */ + 1 /* executable */ + 32 /* owner */ + 32 /* pubkey */; - const REMAINING_SIZE: usize = BUFFER_SIZE - METADATA_SIZE; + // allocate a buffer on the stack that's big enough + // to hold a token account or a stake account + const META_SIZE: usize = 8 /* lamports */ + 8 /* rent_epoch */ + 1 /* executable */ + 32 /* owner */ + 32 /* pubkey */; + const DATA_SIZE: usize = 200; // stake acounts are 200 B and token accounts are 165-182ish B + const BUFFER_SIZE: usize = META_SIZE + DATA_SIZE; let mut buffer = SmallVec::<[u8; BUFFER_SIZE]>::new(); // collect lamports, rent_epoch into buffer to hash buffer.extend_from_slice(&lamports.to_le_bytes()); buffer.extend_from_slice(&rent_epoch.to_le_bytes()); - if data.len() > REMAINING_SIZE { + if data.len() > DATA_SIZE { // For larger accounts whose data can't fit into the buffer, update the hash now. hasher.update(&buffer); buffer.clear(); diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 7883f852d1e3f2..b7994fe4354118 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -37,6 +37,7 @@ pub mod secondary_index; pub mod shared_buffer_reader; pub mod sorted_storages; pub mod stake_rewards; +pub mod starting_snapshot_storages; pub mod storable_accounts; pub mod tiered_storage; pub mod utils; diff --git a/accounts-db/src/starting_snapshot_storages.rs b/accounts-db/src/starting_snapshot_storages.rs new file mode 100644 index 00000000000000..cc5e26c61872b7 --- /dev/null +++ b/accounts-db/src/starting_snapshot_storages.rs @@ -0,0 +1,19 @@ +use {crate::accounts_db::AccountStorageEntry, std::sync::Arc}; + +/// Snapshot storages that the node loaded from +/// +/// This is used to support fastboot. Since fastboot reuses existing storages, we must carefully +/// handle the storages used to load at startup. If we do not handle these storages properly, +/// restarting from the same local state (i.e. bank snapshot) may fail. +#[derive(Debug)] +pub enum StartingSnapshotStorages { + /// Starting from genesis has no storages yet + Genesis, + /// Starting from a snapshot archive always extracts the storages from the archive, so no + /// special handling is necessary to preserve them. + Archive, + /// Starting from local state must preserve the loaded storages. These storages must *not* be + /// recycled or removed prior to taking the next snapshot, otherwise restarting from the same + /// bank snapshot may fail. + Fastboot(Vec>), +} diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index a6f4ea89428bf9..2f8ebac65e3b57 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -362,15 +362,15 @@ mod tests { let mut expected_accounts_map = HashMap::new(); for i in 0..num_accounts { - let (account, address, account_hash, _write_version) = storable_accounts.get(i); - expected_accounts_map.insert(address, (account, account_hash)); + let (account, address, _account_hash, _write_version) = storable_accounts.get(i); + expected_accounts_map.insert(address, account); } let mut index_offset = IndexOffset(0); let mut verified_accounts = HashSet::new(); while let Some((stored_meta, next)) = reader.get_account(index_offset).unwrap() { - if let Some((account, account_hash)) = expected_accounts_map.get(stored_meta.pubkey()) { - verify_test_account(&stored_meta, *account, stored_meta.pubkey(), account_hash); + if let Some(account) = expected_accounts_map.get(stored_meta.pubkey()) { + verify_test_account(&stored_meta, *account, stored_meta.pubkey()); verified_accounts.insert(stored_meta.pubkey()); } index_offset = next; diff --git a/accounts-db/src/tiered_storage/byte_block.rs b/accounts-db/src/tiered_storage/byte_block.rs index 1cd80add0c2307..6fc7dec611e9a9 100644 --- a/accounts-db/src/tiered_storage/byte_block.rs +++ b/accounts-db/src/tiered_storage/byte_block.rs @@ -95,9 +95,6 @@ impl ByteBlockWriter { if let Some(rent_epoch) = opt_fields.rent_epoch { size += self.write_pod(&rent_epoch)?; } - if let Some(hash) = opt_fields.account_hash { - size += self.write_pod(hash)?; - } debug_assert_eq!(size, opt_fields.size()); @@ -191,11 +188,7 @@ impl ByteBlockReader { #[cfg(test)] mod tests { - use { - super::*, - crate::accounts_hash::AccountHash, - solana_sdk::{hash::Hash, stake_history::Epoch}, - }; + use {super::*, solana_sdk::stake_history::Epoch}; fn read_type_unaligned(buffer: &[u8], offset: usize) -> (T, usize) { let size = std::mem::size_of::(); @@ -352,19 +345,13 @@ mod tests { let mut writer = ByteBlockWriter::new(format); let mut opt_fields_vec = vec![]; let mut some_count = 0; - let acc_hash = AccountHash(Hash::new_unique()); // prepare a vector of optional fields that contains all combinations // of Some and None. for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(&acc_hash)] { - some_count += rent_epoch.iter().count() + account_hash.iter().count(); + some_count += rent_epoch.iter().count(); - opt_fields_vec.push(AccountMetaOptionalFields { - rent_epoch, - account_hash, - }); - } + opt_fields_vec.push(AccountMetaOptionalFields { rent_epoch }); test_epoch += 1; } @@ -396,12 +383,6 @@ mod tests { verified_count += 1; offset += std::mem::size_of::(); } - if let Some(expected_hash) = opt_fields.account_hash { - let hash = read_pod::(&decoded_buffer, offset).unwrap(); - assert_eq!(hash, expected_hash); - verified_count += 1; - offset += std::mem::size_of::(); - } } // make sure the number of Some fields matches the number of fields we diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index f662c2e062ee11..34f7915186ba9b 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -242,19 +242,6 @@ impl TieredAccountMeta for HotAccountMeta { .flatten() } - /// Returns the account hash by parsing the specified account block. None - /// will be returned if this account does not persist this optional field. - fn account_hash<'a>(&self, account_block: &'a [u8]) -> Option<&'a AccountHash> { - self.flags() - .has_account_hash() - .then(|| { - let offset = self.optional_fields_offset(account_block) - + AccountMetaOptionalFields::account_hash_offset(self.flags()); - byte_block::read_pod::(account_block, offset) - }) - .flatten() - } - /// Returns the offset of the optional fields based on the specified account /// block. fn optional_fields_offset(&self, account_block: &[u8]) -> usize { @@ -488,9 +475,6 @@ fn write_optional_fields( if let Some(rent_epoch) = opt_fields.rent_epoch { size += file.write_pod(&rent_epoch)?; } - if let Some(hash) = opt_fields.account_hash { - size += file.write_pod(hash)?; - } debug_assert_eq!(size, opt_fields.size()); @@ -520,12 +504,8 @@ impl HotStorageWriter { account_data: &[u8], executable: bool, rent_epoch: Option, - account_hash: Option<&AccountHash>, ) -> TieredStorageResult { - let optional_fields = AccountMetaOptionalFields { - rent_epoch, - account_hash, - }; + let optional_fields = AccountMetaOptionalFields { rent_epoch }; let mut flags = AccountMetaFlags::new_from(&optional_fields); flags.set_executable(executable); @@ -574,7 +554,7 @@ impl HotStorageWriter { let total_input_accounts = len - skip; let mut stored_infos = Vec::with_capacity(total_input_accounts); for i in skip..len { - let (account, address, account_hash, _write_version) = accounts.get(i); + let (account, address, _account_hash, _write_version) = accounts.get(i); let index_entry = AccountIndexWriterEntry { address, offset: HotAccountOffset::new(cursor)?, @@ -582,7 +562,7 @@ impl HotStorageWriter { // Obtain necessary fields from the account, or default fields // for a zero-lamport account in the None case. - let (lamports, owner, data, executable, rent_epoch, account_hash) = account + let (lamports, owner, data, executable, rent_epoch) = account .map(|acc| { ( acc.lamports(), @@ -591,19 +571,12 @@ impl HotStorageWriter { acc.executable(), // only persist rent_epoch for those rent-paying accounts (acc.rent_epoch() != RENT_EXEMPT_RENT_EPOCH).then_some(acc.rent_epoch()), - Some(account_hash), ) }) - .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None, None)); + .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); let owner_offset = owners_table.insert(owner); - let stored_size = self.write_account( - lamports, - owner_offset, - data, - executable, - rent_epoch, - account_hash, - )?; + let stored_size = + self.write_account(lamports, owner_offset, data, executable, rent_epoch)?; cursor += stored_size; stored_infos.push(StoredAccountInfo { @@ -755,11 +728,9 @@ pub mod tests { const TEST_PADDING: u8 = 5; const TEST_OWNER_OFFSET: OwnerOffset = OwnerOffset(0x1fef_1234); const TEST_RENT_EPOCH: Epoch = 7; - let acc_hash = AccountHash(Hash::new_unique()); let optional_fields = AccountMetaOptionalFields { rent_epoch: Some(TEST_RENT_EPOCH), - account_hash: Some(&acc_hash), }; let flags = AccountMetaFlags::new_from(&optional_fields); @@ -779,7 +750,6 @@ pub mod tests { fn test_hot_account_meta_full() { let account_data = [11u8; 83]; let padding = [0u8; 5]; - let acc_hash = AccountHash(Hash::new_unique()); const TEST_LAMPORT: u64 = 2314232137; const OWNER_OFFSET: u32 = 0x1fef_1234; @@ -787,7 +757,6 @@ pub mod tests { let optional_fields = AccountMetaOptionalFields { rent_epoch: Some(TEST_RENT_EPOCH), - account_hash: Some(&acc_hash), }; let flags = AccountMetaFlags::new_from(&optional_fields); @@ -810,7 +779,6 @@ pub mod tests { let meta = byte_block::read_pod::(&buffer, 0).unwrap(); assert_eq!(expected_meta, *meta); assert!(meta.flags().has_rent_epoch()); - assert!(meta.flags().has_account_hash()); assert_eq!(meta.account_data_padding() as usize, padding.len()); let account_block = &buffer[std::mem::size_of::()..]; @@ -823,10 +791,6 @@ pub mod tests { assert_eq!(account_data.len(), meta.account_data_size(account_block)); assert_eq!(account_data, meta.account_data(account_block)); assert_eq!(meta.rent_epoch(account_block), optional_fields.rent_epoch); - assert_eq!( - (meta.account_hash(account_block).unwrap()), - optional_fields.account_hash.unwrap() - ); } #[test] @@ -1334,8 +1298,8 @@ pub mod tests { .unwrap() .unwrap(); - let (account, address, account_hash, _write_version) = storable_accounts.get(i); - verify_test_account(&stored_meta, account, address, account_hash); + let (account, address, _account_hash, _write_version) = storable_accounts.get(i); + verify_test_account(&stored_meta, account, address); assert_eq!(i + 1, next.0 as usize); } @@ -1352,9 +1316,9 @@ pub mod tests { .unwrap() .unwrap(); - let (account, address, account_hash, _write_version) = + let (account, address, _account_hash, _write_version) = storable_accounts.get(stored_info.offset); - verify_test_account(&stored_meta, account, address, account_hash); + verify_test_account(&stored_meta, account, address); } // verify get_accounts @@ -1362,8 +1326,8 @@ pub mod tests { // first, we verify everything for (i, stored_meta) in accounts.iter().enumerate() { - let (account, address, account_hash, _write_version) = storable_accounts.get(i); - verify_test_account(stored_meta, account, address, account_hash); + let (account, address, _account_hash, _write_version) = storable_accounts.get(i); + verify_test_account(stored_meta, account, address); } // second, we verify various initial position diff --git a/accounts-db/src/tiered_storage/meta.rs b/accounts-db/src/tiered_storage/meta.rs index 4e2bb0d95041ca..2aa53e5a4de1ed 100644 --- a/accounts-db/src/tiered_storage/meta.rs +++ b/accounts-db/src/tiered_storage/meta.rs @@ -1,7 +1,7 @@ //! The account meta and related structs for the tiered storage. use { - crate::{accounts_hash::AccountHash, tiered_storage::owners::OwnerOffset}, + crate::tiered_storage::owners::OwnerOffset, bytemuck::{Pod, Zeroable}, modular_bitfield::prelude::*, solana_sdk::stake_history::Epoch, @@ -14,12 +14,10 @@ use { pub struct AccountMetaFlags { /// whether the account meta has rent epoch pub has_rent_epoch: bool, - /// whether the account meta has account hash - pub has_account_hash: bool, /// whether the account is executable pub executable: bool, /// the reserved bits. - reserved: B29, + reserved: B30, } // Ensure there are no implicit padding bytes @@ -70,10 +68,6 @@ pub trait TieredAccountMeta: Sized { /// does not persist this optional field. fn rent_epoch(&self, _account_block: &[u8]) -> Option; - /// Returns the account hash by parsing the specified account block. None - /// will be returned if this account does not persist this optional field. - fn account_hash<'a>(&self, _account_block: &'a [u8]) -> Option<&'a AccountHash>; - /// Returns the offset of the optional fields based on the specified account /// block. fn optional_fields_offset(&self, _account_block: &[u8]) -> usize; @@ -91,7 +85,6 @@ impl AccountMetaFlags { pub fn new_from(optional_fields: &AccountMetaOptionalFields) -> Self { let mut flags = AccountMetaFlags::default(); flags.set_has_rent_epoch(optional_fields.rent_epoch.is_some()); - flags.set_has_account_hash(optional_fields.account_hash.is_some()); flags.set_executable(false); flags } @@ -102,20 +95,15 @@ impl AccountMetaFlags { /// Note that the storage representation of the optional fields might be /// different from its in-memory representation. #[derive(Debug, PartialEq, Eq, Clone)] -pub struct AccountMetaOptionalFields<'a> { +pub struct AccountMetaOptionalFields { /// the epoch at which its associated account will next owe rent pub rent_epoch: Option, - /// the hash of its associated account - pub account_hash: Option<&'a AccountHash>, } -impl<'a> AccountMetaOptionalFields<'a> { +impl AccountMetaOptionalFields { /// The size of the optional fields in bytes (excluding the boolean flags). pub fn size(&self) -> usize { self.rent_epoch.map_or(0, |_| std::mem::size_of::()) - + self - .account_hash - .map_or(0, |_| std::mem::size_of::()) } /// Given the specified AccountMetaFlags, returns the size of its @@ -125,9 +113,6 @@ impl<'a> AccountMetaOptionalFields<'a> { if flags.has_rent_epoch() { fields_size += std::mem::size_of::(); } - if flags.has_account_hash() { - fields_size += std::mem::size_of::(); - } fields_size } @@ -137,29 +122,17 @@ impl<'a> AccountMetaOptionalFields<'a> { pub fn rent_epoch_offset(_flags: &AccountMetaFlags) -> usize { 0 } - - /// Given the specified AccountMetaFlags, returns the relative offset - /// of its account_hash field to the offset of its optional fields entry. - pub fn account_hash_offset(flags: &AccountMetaFlags) -> usize { - let mut offset = Self::rent_epoch_offset(flags); - // rent_epoch is the previous field to account hash - if flags.has_rent_epoch() { - offset += std::mem::size_of::(); - } - offset - } } #[cfg(test)] pub mod tests { - use {super::*, solana_sdk::hash::Hash}; + use super::*; #[test] fn test_account_meta_flags_new() { let flags = AccountMetaFlags::new(); assert!(!flags.has_rent_epoch()); - assert!(!flags.has_account_hash()); assert_eq!(flags.reserved(), 0u32); assert_eq!( @@ -179,20 +152,11 @@ pub mod tests { flags.set_has_rent_epoch(true); assert!(flags.has_rent_epoch()); - assert!(!flags.has_account_hash()); - assert!(!flags.executable()); - verify_flags_serialization(&flags); - - flags.set_has_account_hash(true); - - assert!(flags.has_rent_epoch()); - assert!(flags.has_account_hash()); assert!(!flags.executable()); verify_flags_serialization(&flags); flags.set_executable(true); assert!(flags.has_rent_epoch()); - assert!(flags.has_account_hash()); assert!(flags.executable()); verify_flags_serialization(&flags); @@ -203,84 +167,58 @@ pub mod tests { fn update_and_verify_flags(opt_fields: &AccountMetaOptionalFields) { let flags: AccountMetaFlags = AccountMetaFlags::new_from(opt_fields); assert_eq!(flags.has_rent_epoch(), opt_fields.rent_epoch.is_some()); - assert_eq!(flags.has_account_hash(), opt_fields.account_hash.is_some()); assert_eq!(flags.reserved(), 0u32); } #[test] fn test_optional_fields_update_flags() { let test_epoch = 5432312; - let acc_hash = AccountHash(Hash::new_unique()); for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(&acc_hash)] { - update_and_verify_flags(&AccountMetaOptionalFields { - rent_epoch, - account_hash, - }); - } + update_and_verify_flags(&AccountMetaOptionalFields { rent_epoch }); } } #[test] fn test_optional_fields_size() { let test_epoch = 5432312; - let acc_hash = AccountHash(Hash::new_unique()); for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(&acc_hash)] { - let opt_fields = AccountMetaOptionalFields { - rent_epoch, - account_hash, - }; - assert_eq!( - opt_fields.size(), - rent_epoch.map_or(0, |_| std::mem::size_of::()) - + account_hash.map_or(0, |_| std::mem::size_of::()) - ); - assert_eq!( - opt_fields.size(), - AccountMetaOptionalFields::size_from_flags(&AccountMetaFlags::new_from( - &opt_fields - )) - ); - } + let opt_fields = AccountMetaOptionalFields { rent_epoch }; + assert_eq!( + opt_fields.size(), + rent_epoch.map_or(0, |_| std::mem::size_of::()), + ); + assert_eq!( + opt_fields.size(), + AccountMetaOptionalFields::size_from_flags(&AccountMetaFlags::new_from( + &opt_fields + )) + ); } } #[test] fn test_optional_fields_offset() { let test_epoch = 5432312; - let acc_hash = AccountHash(Hash::new_unique()); for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(&acc_hash)] { - let rent_epoch_offset = 0; - let account_hash_offset = - rent_epoch_offset + rent_epoch.as_ref().map(std::mem::size_of_val).unwrap_or(0); - let derived_size = account_hash_offset - + account_hash - .as_ref() - .map(|acc_hash| std::mem::size_of_val(*acc_hash)) - .unwrap_or(0); - let opt_fields = AccountMetaOptionalFields { - rent_epoch, - account_hash, - }; - let flags = AccountMetaFlags::new_from(&opt_fields); - assert_eq!( - AccountMetaOptionalFields::rent_epoch_offset(&flags), - rent_epoch_offset - ); - assert_eq!( - AccountMetaOptionalFields::account_hash_offset(&flags), - account_hash_offset - ); - assert_eq!( - AccountMetaOptionalFields::size_from_flags(&flags), - derived_size - ); - } + let rent_epoch_offset = 0; + let derived_size = if rent_epoch.is_some() { + std::mem::size_of::() + } else { + 0 + }; + let opt_fields = AccountMetaOptionalFields { rent_epoch }; + let flags = AccountMetaFlags::new_from(&opt_fields); + assert_eq!( + AccountMetaOptionalFields::rent_epoch_offset(&flags), + rent_epoch_offset + ); + assert_eq!( + AccountMetaOptionalFields::size_from_flags(&flags), + derived_size + ); } } } diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 1801b04fcecd80..8f1d2007182a5b 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -2,7 +2,6 @@ use { crate::{ account_storage::meta::StoredAccountMeta, accounts_file::MatchAccountOwnerError, - accounts_hash::AccountHash, tiered_storage::{ footer::{AccountMetaFormat, TieredStorageFooter}, hot::HotStorageReader, @@ -40,11 +39,6 @@ impl<'accounts_file, M: TieredAccountMeta> TieredReadableAccount<'accounts_file, self.address } - /// Returns the hash of this account. - pub fn hash(&self) -> Option<&'accounts_file AccountHash> { - self.meta.account_hash(self.account_block) - } - /// Returns the index to this account in its AccountsFile. pub fn index(&self) -> IndexOffset { self.index diff --git a/accounts-db/src/tiered_storage/test_utils.rs b/accounts-db/src/tiered_storage/test_utils.rs index 2ed2399f30fbaa..f44f20f77cc5dd 100644 --- a/accounts-db/src/tiered_storage/test_utils.rs +++ b/accounts-db/src/tiered_storage/test_utils.rs @@ -48,20 +48,10 @@ pub(super) fn verify_test_account( stored_meta: &StoredAccountMeta<'_>, account: Option<&impl ReadableAccount>, address: &Pubkey, - account_hash: &AccountHash, ) { - let (lamports, owner, data, executable, account_hash) = account - .map(|acc| { - ( - acc.lamports(), - acc.owner(), - acc.data(), - acc.executable(), - // only persist rent_epoch for those rent-paying accounts - Some(*account_hash), - ) - }) - .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); + let (lamports, owner, data, executable) = account + .map(|acc| (acc.lamports(), acc.owner(), acc.data(), acc.executable())) + .unwrap_or((0, &OWNER_NO_OWNER, &[], false)); assert_eq!(stored_meta.lamports(), lamports); assert_eq!(stored_meta.data().len(), data.len()); @@ -69,8 +59,5 @@ pub(super) fn verify_test_account( assert_eq!(stored_meta.executable(), executable); assert_eq!(stored_meta.owner(), owner); assert_eq!(stored_meta.pubkey(), address); - assert_eq!( - *stored_meta.hash(), - account_hash.unwrap_or(AccountHash(Hash::default())) - ); + assert_eq!(*stored_meta.hash(), AccountHash(Hash::default())); } diff --git a/cargo-registry/Cargo.toml b/cargo-registry/Cargo.toml index 4e13f477ee5e2d..395493a8e85f00 100644 --- a/cargo-registry/Cargo.toml +++ b/cargo-registry/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-cargo-registry" +name = "agave-cargo-registry" description = "Solana cargo registry" -documentation = "https://docs.rs/solana-cargo-registry" +documentation = "https://docs.rs/agave-cargo-registry" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/ci/buildkite-pipeline-in-disk.sh b/ci/buildkite-pipeline-in-disk.sh index ad12e1fc000a89..2ce8af0432106b 100755 --- a/ci/buildkite-pipeline-in-disk.sh +++ b/ci/buildkite-pipeline-in-disk.sh @@ -289,7 +289,7 @@ if [[ -n $BUILDKITE_TAG ]]; then start_pipeline "Tag pipeline for $BUILDKITE_TAG" annotate --style info --context release-tag \ - "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG" + "https://github.com/anza-xyz/agave/releases/$BUILDKITE_TAG" # Jump directly to the secondary build to publish release artifacts quickly trigger_secondary_step @@ -307,7 +307,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then # Add helpful link back to the corresponding Github Pull Request annotate --style info --context pr-backlink \ - "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH" + "Github Pull Request: https://github.com/anza-xyz/agave/$BUILDKITE_BRANCH" if [[ $GITHUB_USER = "dependabot[bot]" ]]; then command_step dependabot "ci/dependabot-pr.sh" 5 diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index d40273863cc7a3..4ae00c9feab586 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -121,8 +121,8 @@ EOF trigger_secondary_step() { cat >> "$output_file" <<"EOF" - - name: "Trigger Build on solana-secondary" - trigger: "solana-secondary" + - name: "Trigger Build on agave-secondary" + trigger: "agave-secondary" branches: "!pull/*" async: true soft_fail: true @@ -315,7 +315,7 @@ if [[ -n $BUILDKITE_TAG ]]; then start_pipeline "Tag pipeline for $BUILDKITE_TAG" annotate --style info --context release-tag \ - "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG" + "https://github.com/anza-xyz/agave/releases/$BUILDKITE_TAG" # Jump directly to the secondary build to publish release artifacts quickly trigger_secondary_step @@ -333,7 +333,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then # Add helpful link back to the corresponding Github Pull Request annotate --style info --context pr-backlink \ - "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH" + "Github Pull Request: https://github.com/anza-xyz/agave/$BUILDKITE_BRANCH" if [[ $GITHUB_USER = "dependabot[bot]" ]]; then command_step dependabot "ci/dependabot-pr.sh" 5 diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index 70d8e4bfe4f59f..e5886a314eb27c 100755 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -287,7 +287,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then # Add helpful link back to the corresponding Github Pull Request annotate --style info --context pr-backlink \ - "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH" + "Github Pull Request: https://github.com/anza-xyz/agave/$BUILDKITE_BRANCH" if [[ $GITHUB_USER = "dependabot[bot]" ]]; then command_step dependabot "ci/dependabot-pr.sh" 5 diff --git a/ci/channel-info.sh b/ci/channel-info.sh index c82806454d012c..2bb808365653c6 100755 --- a/ci/channel-info.sh +++ b/ci/channel-info.sh @@ -11,7 +11,7 @@ here="$(dirname "$0")" # shellcheck source=ci/semver_bash/semver.sh source "$here"/semver_bash/semver.sh -remote=https://github.com/solana-labs/solana.git +remote=https://github.com/anza-xyz/agave.git # Fetch all vX.Y.Z tags # diff --git a/ci/dependabot-pr.sh b/ci/dependabot-pr.sh index 91ecd5948c9a43..bb019001a0bcfa 100755 --- a/ci/dependabot-pr.sh +++ b/ci/dependabot-pr.sh @@ -21,7 +21,7 @@ fi echo --- "(FAILING) Backpropagating dependabot-triggered Cargo.lock updates" name="dependabot-buildkite" -api_base="https://api.github.com/repos/solana-labs/solana/pulls" +api_base="https://api.github.com/repos/anza-xyz/agave/pulls" pr_num=$(echo "$BUILDKITE_BRANCH" | grep -Eo '[0-9]+') branch=$(curl -s "$api_base/$pr_num" | python3 -c 'import json,sys;print(json.load(sys.stdin)["head"]["ref"])') diff --git a/ci/localnet-sanity.sh b/ci/localnet-sanity.sh index e6734e180aa2da..b01eca31d50d81 100755 --- a/ci/localnet-sanity.sh +++ b/ci/localnet-sanity.sh @@ -202,8 +202,8 @@ killNodes() { # Try to use the RPC exit API to cleanly exit the first two nodes # (dynamic nodes, -x, are just killed) echo "--- RPC exit" - $solana_validator --ledger "$SOLANA_CONFIG_DIR"/bootstrap-validator exit --force || true - $solana_validator --ledger "$SOLANA_CONFIG_DIR"/validator exit --force || true + $agave_validator --ledger "$SOLANA_CONFIG_DIR"/bootstrap-validator exit --force || true + $agave_validator --ledger "$SOLANA_CONFIG_DIR"/validator exit --force || true # Give the nodes a splash of time to cleanly exit before killing them sleep 2 diff --git a/ci/publish-installer.sh b/ci/publish-installer.sh index 4b5345ae0d26fe..f7d98ffd5ddcf9 100755 --- a/ci/publish-installer.sh +++ b/ci/publish-installer.sh @@ -26,14 +26,14 @@ fi # upload install script source ci/upload-ci-artifact.sh -cat >release.solana.com-install <release.anza.xyz-install <>release.solana.com-install +cat install/agave-install-init.sh >>release.anza.xyz-install -echo --- AWS S3 Store: "install" -upload-s3-artifact "/solana/release.solana.com-install" "s3://release.solana.com/$CHANNEL_OR_TAG/install" +echo --- GCS: "install" +upload-gcs-artifact "/solana/release.anza.xyz-install" "gs://anza-release/$CHANNEL_OR_TAG/install" echo Published to: -ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/install +ci/format-url.sh https://release.anza.xyz/"$CHANNEL_OR_TAG"/install diff --git a/ci/publish-tarball.sh b/ci/publish-tarball.sh index ff72bb7da2d066..da5862fb3de1d2 100755 --- a/ci/publish-tarball.sh +++ b/ci/publish-tarball.sh @@ -93,7 +93,7 @@ echo --- Creating release tarball tar cvf "${TARBALL_BASENAME}"-$TARGET.tar "${RELEASE_BASENAME}" bzip2 "${TARBALL_BASENAME}"-$TARGET.tar - cp "${RELEASE_BASENAME}"/bin/solana-install-init solana-install-init-$TARGET + cp "${RELEASE_BASENAME}"/bin/agave-install-init agave-install-init-$TARGET cp "${RELEASE_BASENAME}"/version.yml "${TARBALL_BASENAME}"-$TARGET.yml ) @@ -110,7 +110,7 @@ fi source ci/upload-ci-artifact.sh -for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_TARBALLS; do +for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.yml agave-install-init-"$TARGET"* $MAYBE_TARBALLS; do if [[ -n $DO_NOT_PUBLISH_TAR ]]; then upload-ci-artifact "$file" echo "Skipped $file due to DO_NOT_PUBLISH_TAR" @@ -118,11 +118,11 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET. fi if [[ -n $BUILDKITE ]]; then - echo --- AWS S3 Store: "$file" - upload-s3-artifact "/solana/$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file" + echo --- GCS Store: "$file" + upload-gcs-artifact "/solana/$file" gs://anza-release/"$CHANNEL_OR_TAG"/"$file" echo Published to: - $DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/"$file" + $DRYRUN ci/format-url.sh https://release.anza.xyz/"$CHANNEL_OR_TAG"/"$file" if [[ -n $TAG ]]; then ci/upload-github-release-asset.sh "$file" diff --git a/ci/run-sanity.sh b/ci/run-sanity.sh index 8108d13a061fd5..88a6f40b1adf28 100755 --- a/ci/run-sanity.sh +++ b/ci/run-sanity.sh @@ -31,7 +31,7 @@ while [[ $latest_slot -le $((snapshot_slot + 1)) ]]; do latest_slot=$($solana_cli --url http://localhost:8899 slot --commitment processed) done -$solana_validator --ledger config/ledger exit --force || true +$agave_validator --ledger config/ledger exit --force || true wait $pid diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 3321f1d5ecb6a1..97ebb1c7935006 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -37,7 +37,7 @@ export rust_stable="$stable_version" export rust_nightly=nightly-"$nightly_version" -export ci_docker_image="solanalabs/ci:rust_${rust_stable}_${rust_nightly}" +export ci_docker_image="anzaxyz/ci:rust_${rust_stable}_${rust_nightly}" [[ -z $1 ]] || ( diff --git a/ci/test-coverage.sh b/ci/test-coverage.sh index 44231cd338a13e..ffd362acd287b8 100755 --- a/ci/test-coverage.sh +++ b/ci/test-coverage.sh @@ -32,5 +32,5 @@ else codecov -t "${CODECOV_TOKEN}" annotate --style success --context codecov.io \ - "CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${CI_COMMIT:0:9}" + "CodeCov report: https://codecov.io/github/anza-xyz/agave/commit/${CI_COMMIT:0:9}" fi diff --git a/ci/upload-ci-artifact.sh b/ci/upload-ci-artifact.sh index 1236da9f27114a..e7cc34ab2b2d8c 100644 --- a/ci/upload-ci-artifact.sh +++ b/ci/upload-ci-artifact.sh @@ -40,3 +40,13 @@ upload-s3-artifact() { docker run "${args[@]}" ) } + +upload-gcs-artifact() { + echo "--- artifact: $1 to $2" + docker run --rm \ + -v "$GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL:/application_default_credentials.json" \ + -v "$PWD:/solana" \ + -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=/application_default_credentials.json \ + gcr.io/google.com/cloudsdktool/google-cloud-cli:latest \ + gcloud storage cp "$1" "$2" +} diff --git a/ci/upload-github-release-asset.sh b/ci/upload-github-release-asset.sh index ca2ae2a8f60443..229fb8993edafd 100755 --- a/ci/upload-github-release-asset.sh +++ b/ci/upload-github-release-asset.sh @@ -26,7 +26,7 @@ fi # Force CI_REPO_SLUG since sometimes # BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG is not set correctly, causing the # artifact upload to fail -CI_REPO_SLUG=solana-labs/solana +CI_REPO_SLUG=anza-xyz/agave #if [[ -z $CI_REPO_SLUG ]]; then # echo Error: CI_REPO_SLUG not defined # exit 1 diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 0eca9cde5c1a52..62b66eddf27eb0 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -1648,7 +1648,23 @@ impl VerboseDisplay for CliAuthorizedVoters {} impl fmt::Display for CliAuthorizedVoters { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.authorized_voters) + if let Some((_epoch, current_authorized_voter)) = self.authorized_voters.first_key_value() { + write!(f, "{current_authorized_voter}")?; + } else { + write!(f, "None")?; + } + if self.authorized_voters.len() > 1 { + let (epoch, upcoming_authorized_voter) = self + .authorized_voters + .last_key_value() + .expect("CliAuthorizedVoters::authorized_voters.len() > 1"); + writeln!(f)?; + write!( + f, + " New Vote Authority as of Epoch {epoch}: {upcoming_authorized_voter}" + )?; + } + Ok(()) } } @@ -3379,12 +3395,12 @@ mod tests { ..CliVoteAccount::default() }; let s = format!("{c}"); - assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: {}\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\n Epoch Reward Slot Time Amount New Balance Percent Change APR Commission\n 1 100 1970-01-01 00:00:00 UTC ◎0.000000010 ◎0.000000100 11.000% 10.00% 1%\n 2 200 1970-01-12 13:46:40 UTC ◎0.000000012 ◎0.000000100 11.000% 13.00% 1%\n"); + assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: None\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\n Epoch Reward Slot Time Amount New Balance Percent Change APR Commission\n 1 100 1970-01-01 00:00:00 UTC ◎0.000000010 ◎0.000000100 11.000% 10.00% 1%\n 2 200 1970-01-12 13:46:40 UTC ◎0.000000012 ◎0.000000100 11.000% 13.00% 1%\n"); println!("{s}"); c.use_csv = true; let s = format!("{c}"); - assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: {}\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\nEpoch,Reward Slot,Time,Amount,New Balance,Percent Change,APR,Commission\n1,100,1970-01-01 00:00:00 UTC,0.00000001,0.0000001,11%,10.00%,1%\n2,200,1970-01-12 13:46:40 UTC,0.000000012,0.0000001,11%,13.00%,1%\n"); + assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: None\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\nEpoch,Reward Slot,Time,Amount,New Balance,Percent Change,APR,Commission\n1,100,1970-01-01 00:00:00 UTC,0.00000001,0.0000001,11%,10.00%,1%\n2,200,1970-01-12 13:46:40 UTC,0.000000012,0.0000001,11%,13.00%,1%\n"); println!("{s}"); } } diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index b53b66b155e719..a94bc7cd3d8ca8 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -260,6 +260,7 @@ mod tests { thread: response_recv_thread, key_updater: _, } = solana_streamer::quic::spawn_server( + "solQuicTest", "quic_streamer_test", response_recv_socket, &keypair2, diff --git a/core/benches/sigverify_stage.rs b/core/benches/sigverify_stage.rs index 70f33020dd3e70..7013f718e4ab2e 100644 --- a/core/benches/sigverify_stage.rs +++ b/core/benches/sigverify_stage.rs @@ -160,7 +160,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher, use_same_tx: bool) { let (packet_s, packet_r) = unbounded(); let (verified_s, verified_r) = BankingTracer::channel_for_test(); let verifier = TransactionSigVerifier::new(verified_s); - let stage = SigVerifyStage::new(packet_r, verifier, "bench"); + let stage = SigVerifyStage::new(packet_r, verifier, "solSigVerBench", "bench"); bencher.iter(move || { let now = Instant::now(); diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 0e427d0675a2b1..f5572d94a3c7d1 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -9,6 +9,7 @@ use { IncrementalAccountsHash, }, sorted_storages::SortedStorages, + starting_snapshot_storages::StartingSnapshotStorages, }, solana_measure::measure_us, solana_runtime::{ @@ -42,6 +43,7 @@ impl AccountsHashVerifier { accounts_package_sender: Sender, accounts_package_receiver: Receiver, snapshot_package_sender: Option>, + starting_snapshot_storages: StartingSnapshotStorages, exit: Arc, snapshot_config: SnapshotConfig, ) -> Self { @@ -54,7 +56,11 @@ impl AccountsHashVerifier { // To support fastboot, we must ensure the storages used in the latest POST snapshot are // not recycled nor removed early. Hold an Arc of their AppendVecs to prevent them from // expiring. - let mut fastboot_storages = None; + let mut fastboot_storages = match starting_snapshot_storages { + StartingSnapshotStorages::Genesis => None, + StartingSnapshotStorages::Archive => None, + StartingSnapshotStorages::Fastboot(storages) => Some(storages), + }; loop { if exit.load(Ordering::Relaxed) { break; diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 39cc193adad96e..84f1520e649822 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -148,6 +148,7 @@ impl ShredFetchStage { #[allow(clippy::too_many_arguments)] fn packet_modifier( receiver_thread_name: &'static str, + modifier_thread_name: &'static str, sockets: Vec>, exit: Arc, sender: Sender, @@ -178,7 +179,7 @@ impl ShredFetchStage { }) .collect(); let modifier_hdl = Builder::new() - .name("solTvuFetchPMod".to_string()) + .name(modifier_thread_name.to_string()) .spawn(move || { let repair_context = repair_context .as_ref() @@ -215,6 +216,7 @@ impl ShredFetchStage { let (mut tvu_threads, tvu_filter) = Self::packet_modifier( "solRcvrShred", + "solTvuPktMod", sockets, exit.clone(), sender.clone(), @@ -229,6 +231,7 @@ impl ShredFetchStage { let (repair_receiver, repair_handler) = Self::packet_modifier( "solRcvrShredRep", + "solTvuRepPktMod", vec![repair_socket.clone()], exit.clone(), sender.clone(), diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index e5e06a3bc701c9..cde1735611c0d0 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -238,9 +238,11 @@ impl SigVerifyStage { pub fn new( packet_receiver: Receiver, verifier: T, - name: &'static str, + thread_name: &'static str, + metrics_name: &'static str, ) -> Self { - let thread_hdl = Self::verifier_services(packet_receiver, verifier, name); + let thread_hdl = + Self::verifier_service(packet_receiver, verifier, thread_name, metrics_name); Self { thread_hdl } } @@ -407,7 +409,8 @@ impl SigVerifyStage { fn verifier_service( packet_receiver: Receiver, mut verifier: T, - name: &'static str, + thread_name: &'static str, + metrics_name: &'static str, ) -> JoinHandle<()> { let mut stats = SigVerifierStats::default(); let mut last_print = Instant::now(); @@ -415,7 +418,7 @@ impl SigVerifyStage { const DEDUPER_FALSE_POSITIVE_RATE: f64 = 0.001; const DEDUPER_NUM_BITS: u64 = 63_999_979; Builder::new() - .name("solSigVerifier".to_string()) + .name(thread_name.to_string()) .spawn(move || { let mut rng = rand::thread_rng(); let mut deduper = Deduper::<2, [u8]>::new(&mut rng, DEDUPER_NUM_BITS); @@ -440,7 +443,7 @@ impl SigVerifyStage { } } if last_print.elapsed().as_secs() > 2 { - stats.report(name); + stats.report(metrics_name); stats = SigVerifierStats::default(); last_print = Instant::now(); } @@ -449,14 +452,6 @@ impl SigVerifyStage { .unwrap() } - fn verifier_services( - packet_receiver: Receiver, - verifier: T, - name: &'static str, - ) -> JoinHandle<()> { - Self::verifier_service(packet_receiver, verifier, name) - } - pub fn join(self) -> thread::Result<()> { self.thread_hdl.join() } @@ -552,7 +547,7 @@ mod tests { let (packet_s, packet_r) = unbounded(); let (verified_s, verified_r) = BankingTracer::channel_for_test(); let verifier = TransactionSigVerifier::new(verified_s); - let stage = SigVerifyStage::new(packet_r, verifier, "test"); + let stage = SigVerifyStage::new(packet_r, verifier, "solSigVerTest", "test"); let now = Instant::now(); let packets_per_batch = 128; diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 548b299148d935..640caf64544d45 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -153,6 +153,7 @@ impl Tpu { thread: tpu_quic_t, key_updater, } = spawn_server( + "solQuicTpu", "quic_streamer_tpu", transactions_quic_sockets, keypair, @@ -172,6 +173,7 @@ impl Tpu { thread: tpu_forwards_quic_t, key_updater: forwards_key_updater, } = spawn_server( + "solQuicTpuFwd", "quic_streamer_tpu_forwards", transactions_forwards_quic_sockets, keypair, @@ -188,14 +190,19 @@ impl Tpu { let sigverify_stage = { let verifier = TransactionSigVerifier::new(non_vote_sender); - SigVerifyStage::new(packet_receiver, verifier, "tpu-verifier") + SigVerifyStage::new(packet_receiver, verifier, "solSigVerTpu", "tpu-verifier") }; let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); let vote_sigverify_stage = { let verifier = TransactionSigVerifier::new_reject_non_vote(tpu_vote_sender); - SigVerifyStage::new(vote_packet_receiver, verifier, "tpu-vote-verifier") + SigVerifyStage::new( + vote_packet_receiver, + verifier, + "solSigVerTpuVot", + "tpu-vote-verifier", + ) }; let (gossip_vote_sender, gossip_vote_receiver) = diff --git a/core/src/validator.rs b/core/src/validator.rs index a6d5921bcef5c9..196dad5f25d17a 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -35,6 +35,7 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, + starting_snapshot_storages::StartingSnapshotStorages, utils::{move_and_async_delete_path, move_and_async_delete_path_contents}, }, solana_client::connection_cache::{ConnectionCache, Protocol}, @@ -690,6 +691,7 @@ impl Validator { completed_slots_receiver, leader_schedule_cache, starting_snapshot_hashes, + starting_snapshot_storages, TransactionHistoryServices { transaction_status_sender, transaction_status_service, @@ -779,6 +781,7 @@ impl Validator { accounts_package_sender.clone(), accounts_package_receiver, snapshot_package_sender, + starting_snapshot_storages, exit.clone(), config.snapshot_config.clone(), ); @@ -1767,6 +1770,7 @@ fn load_blockstore( CompletedSlotsReceiver, LeaderScheduleCache, Option, + StartingSnapshotStorages, TransactionHistoryServices, blockstore_processor::ProcessOptions, BlockstoreRootScan, @@ -1856,23 +1860,27 @@ fn load_blockstore( let entry_notifier_service = entry_notifier .map(|entry_notifier| EntryNotifierService::new(entry_notifier, exit.clone())); - let (bank_forks, mut leader_schedule_cache, starting_snapshot_hashes) = - bank_forks_utils::load_bank_forks( - &genesis_config, - &blockstore, - config.account_paths.clone(), - Some(&config.snapshot_config), - &process_options, - transaction_history_services - .cache_block_meta_sender - .as_ref(), - entry_notifier_service - .as_ref() - .map(|service| service.sender()), - accounts_update_notifier, - exit, - ) - .map_err(|err| err.to_string())?; + let ( + bank_forks, + mut leader_schedule_cache, + starting_snapshot_hashes, + starting_snapshot_storages, + ) = bank_forks_utils::load_bank_forks( + &genesis_config, + &blockstore, + config.account_paths.clone(), + Some(&config.snapshot_config), + &process_options, + transaction_history_services + .cache_block_meta_sender + .as_ref(), + entry_notifier_service + .as_ref() + .map(|service| service.sender()), + accounts_update_notifier, + exit, + ) + .map_err(|err| err.to_string())?; // Before replay starts, set the callbacks in each of the banks in BankForks so that // all dropped banks come through the `pruned_banks_receiver` channel. This way all bank @@ -1898,6 +1906,7 @@ fn load_blockstore( completed_slots_receiver, leader_schedule_cache, starting_snapshot_hashes, + starting_snapshot_storages, transaction_history_services, process_options, blockstore_root_scan, diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index b0dd111676af79..62e31f0a88b766 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -9,6 +9,7 @@ use { accounts_hash::CalcAccountsHashConfig, accounts_index::AccountSecondaryIndexes, epoch_accounts_hash::EpochAccountsHash, + starting_snapshot_storages::StartingSnapshotStorages, }, solana_core::{ accounts_hash_verifier::AccountsHashVerifier, @@ -196,6 +197,7 @@ impl BackgroundServices { accounts_package_sender.clone(), accounts_package_receiver, Some(snapshot_package_sender), + StartingSnapshotStorages::Genesis, exit.clone(), snapshot_config.clone(), ); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 2694f7294a7217..e67c942f07ab0b 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -11,6 +11,7 @@ use { accounts_hash::AccountsHash, accounts_index::AccountSecondaryIndexes, epoch_accounts_hash::EpochAccountsHash, + starting_snapshot_storages::StartingSnapshotStorages, }, solana_core::{ accounts_hash_verifier::AccountsHashVerifier, @@ -1043,6 +1044,7 @@ fn test_snapshots_with_background_services( accounts_package_sender, accounts_package_receiver, Some(snapshot_package_sender), + StartingSnapshotStorages::Genesis, exit.clone(), snapshot_test_config.snapshot_config.clone(), ); diff --git a/cost-model/src/transaction_cost.rs b/cost-model/src/transaction_cost.rs index e765eee3bc7038..76865fff30fd57 100644 --- a/cost-model/src/transaction_cost.rs +++ b/cost-model/src/transaction_cost.rs @@ -18,8 +18,19 @@ pub enum TransactionCost { impl TransactionCost { pub fn sum(&self) -> u64 { + #![allow(clippy::assertions_on_constants)] match self { - Self::SimpleVote { .. } => SIMPLE_VOTE_USAGE_COST, + Self::SimpleVote { .. } => { + const _: () = assert!( + SIMPLE_VOTE_USAGE_COST + == solana_vote_program::vote_processor::DEFAULT_COMPUTE_UNITS + + block_cost_limits::SIGNATURE_COST + + 2 * block_cost_limits::WRITE_LOCK_UNITS + + 8 + ); + + SIMPLE_VOTE_USAGE_COST + } Self::Transaction(usage_cost) => usage_cost.sum(), } } diff --git a/docs/src/backwards-compatibility.md b/docs/src/backwards-compatibility.md index 4a3c60b8e129bd..0fdc388ea2dbae 100644 --- a/docs/src/backwards-compatibility.md +++ b/docs/src/backwards-compatibility.md @@ -76,7 +76,7 @@ Major releases: - [`solana-program`](https://docs.rs/solana-program/) - Rust SDK for writing programs - [`solana-client`](https://docs.rs/solana-client/) - Rust client for connecting to RPC API - [`solana-cli-config`](https://docs.rs/solana-cli-config/) - Rust client for managing Solana CLI config files -- [`solana-geyser-plugin-interface`](https://docs.rs/solana-geyser-plugin-interface/) - Rust interface for developing Solana Geyser plugins. +- [`agave-geyser-plugin-interface`](https://docs.rs/agave-geyser-plugin-interface/) - Rust interface for developing Solana Geyser plugins. Patch releases: diff --git a/docs/src/cli/install.md b/docs/src/cli/install.md index 3667c733e3f4d4..20f6516314fb02 100644 --- a/docs/src/cli/install.md +++ b/docs/src/cli/install.md @@ -56,7 +56,7 @@ Please update your PATH environment variable to include the solana programs: solana --version ``` -- After a successful install, `solana-install update` may be used to easily +- After a successful install, `agave-install update` may be used to easily update the Solana software to a newer version at any time. --- @@ -74,7 +74,7 @@ solana --version installer into a temporary directory: ```bash -cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/solana-install-init-x86_64-pc-windows-msvc.exe --output C:\solana-install-tmp\solana-install-init.exe --create-dirs" +cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/agave-install-init-x86_64-pc-windows-msvc.exe --output C:\agave-install-tmp\agave-install-init.exe --create-dirs" ``` - Copy and paste the following command, then press Enter to install the latest @@ -82,7 +82,7 @@ cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/solana-ins to allow the program to run. ```bash -C:\solana-install-tmp\solana-install-init.exe LATEST_SOLANA_RELEASE_VERSION +C:\agave-install-tmp\agave-install-init.exe LATEST_SOLANA_RELEASE_VERSION ``` - When the installer is finished, press Enter. @@ -97,12 +97,12 @@ C:\solana-install-tmp\solana-install-init.exe LATEST_SOLANA_RELEASE_VERSION solana --version ``` -- After a successful install, `solana-install update` may be used to easily +- After a successful install, `agave-install update` may be used to easily update the Solana software to a newer version at any time. ## Download Prebuilt Binaries -If you would rather not use `solana-install` to manage the install, you can +If you would rather not use `agave-install` to manage the install, you can manually download and install the binaries. ### Linux @@ -255,7 +255,7 @@ You can then run the following command to obtain the same result as with prebuilt binaries: ```bash -solana-install init +agave-install init ``` ## Use Homebrew diff --git a/docs/src/clusters/available.md b/docs/src/clusters/available.md index dfbca41672b499..52a7d469ad0cc5 100644 --- a/docs/src/clusters/available.md +++ b/docs/src/clusters/available.md @@ -41,10 +41,10 @@ export SOLANA_METRICS_CONFIG="host=https://metrics.solana.com:8086,db=devnet,u=s solana config set --url https://api.devnet.solana.com ``` -##### Example `solana-validator` command-line +##### Example `agave-validator` command-line ```bash -$ solana-validator \ +$ agave-validator \ --identity validator-keypair.json \ --vote-account vote-account-keypair.json \ --known-validator dv1ZAGvdsz5hHLwWXsVnM94hWf1pjbKVau1QVkaMJ92 \ @@ -93,10 +93,10 @@ export SOLANA_METRICS_CONFIG="host=https://metrics.solana.com:8086,db=tds,u=test solana config set --url https://api.testnet.solana.com ``` -##### Example `solana-validator` command-line +##### Example `agave-validator` command-line ```bash -$ solana-validator \ +$ agave-validator \ --identity validator-keypair.json \ --vote-account vote-account-keypair.json \ --known-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \ @@ -145,10 +145,10 @@ export SOLANA_METRICS_CONFIG="host=https://metrics.solana.com:8086,db=mainnet-be solana config set --url https://api.mainnet-beta.solana.com ``` -##### Example `solana-validator` command-line +##### Example `agave-validator` command-line ```bash -$ solana-validator \ +$ agave-validator \ --identity ~/validator-keypair.json \ --vote-account ~/vote-account-keypair.json \ --known-validator 7Np41oeYqPefeNQEHSv1UDhYrehxin3NStELsSKCT4K2 \ diff --git a/docs/src/clusters/benchmark.md b/docs/src/clusters/benchmark.md index d913f9e5f16392..35978cdd0967dd 100644 --- a/docs/src/clusters/benchmark.md +++ b/docs/src/clusters/benchmark.md @@ -108,7 +108,7 @@ For example Generally we are using `debug` for infrequent debug messages, `trace` for potentially frequent messages and `info` for performance-related logging. -You can also attach to a running process with GDB. The leader's process is named _solana-validator_: +You can also attach to a running process with GDB. The leader's process is named _agave-validator_: ```bash sudo gdb diff --git a/docs/src/implemented-proposals/installer.md b/docs/src/implemented-proposals/installer.md index a3ad797171c5b8..c052aa7b4e54e5 100644 --- a/docs/src/implemented-proposals/installer.md +++ b/docs/src/implemented-proposals/installer.md @@ -13,16 +13,16 @@ This document proposes an easy to use software install and updater that can be u The easiest install method for supported platforms: ```bash -$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/solana-install-init.sh | sh +$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/agave-install-init.sh | sh ``` -This script will check github for the latest tagged release and download and run the `solana-install-init` binary from there. +This script will check github for the latest tagged release and download and run the `agave-install-init` binary from there. If additional arguments need to be specified during the installation, the following shell syntax is used: ```bash -$ init_args=.... # arguments for `solana-install-init ...` -$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/solana-install-init.sh | sh -s - ${init_args} +$ init_args=.... # arguments for `agave-install-init ...` +$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/agave-install-init.sh | sh -s - ${init_args} ``` ### Fetch and run a pre-built installer from a Github release @@ -30,9 +30,9 @@ $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/ With a well-known release URL, a pre-built binary can be obtained for supported platforms: ```bash -$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v1.0.0/solana-install-init-x86_64-apple-darwin -$ chmod +x ./solana-install-init -$ ./solana-install-init --help +$ curl -o agave-install-init https://github.com/solana-labs/solana/releases/download/v1.0.0/agave-install-init-x86_64-apple-darwin +$ chmod +x ./agave-install-init +$ ./agave-install-init --help ``` ### Build and run the installer from source @@ -51,16 +51,16 @@ Given a solana release tarball \(as created by `ci/publish-tarball.sh`\) that ha ```bash $ solana-keygen new -o update-manifest.json # <-- only generated once, the public key is shared with users -$ solana-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json +$ agave-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json ``` ### Run a validator node that auto updates itself ```bash -$ solana-install init --pubkey 92DMonmBYXwEMHJ99c9ceRSpAmk9v6i3RdvDdXaVcrfj # <-- pubkey is obtained from whoever is deploying the updates -$ export PATH=~/.local/share/solana-install/bin:$PATH +$ agave-install init --pubkey 92DMonmBYXwEMHJ99c9ceRSpAmk9v6i3RdvDdXaVcrfj # <-- pubkey is obtained from whoever is deploying the updates +$ export PATH=~/.local/share/agave-install/bin:$PATH $ solana-keygen ... # <-- runs the latest solana-keygen -$ solana-install run solana-validator ... # <-- runs a validator, restarting it as necessary when an update is applied +$ agave-install run agave-validator ... # <-- runs a validator, restarting it as necessary when an update is applied ``` ## On-chain Update Manifest @@ -87,9 +87,9 @@ pub struct SignedUpdateManifest { } ``` -Note that the `manifest` field itself contains a corresponding signature \(`manifest_signature`\) to guard against man-in-the-middle attacks between the `solana-install` tool and the solana cluster RPC API. +Note that the `manifest` field itself contains a corresponding signature \(`manifest_signature`\) to guard against man-in-the-middle attacks between the `agave-install` tool and the solana cluster RPC API. -To guard against rollback attacks, `solana-install` will refuse to install an update with an older `timestamp_secs` than what is currently installed. +To guard against rollback attacks, `agave-install` will refuse to install an update with an older `timestamp_secs` than what is currently installed. ## Release Archive Contents @@ -101,17 +101,17 @@ A release archive is expected to be a tar file compressed with bzip2 with the fo - `/bin/` -- directory containing available programs in the release. - `solana-install` will symlink this directory to + `agave-install` will symlink this directory to - `~/.local/share/solana-install/bin` for use by the `PATH` environment + `~/.local/share/agave-install/bin` for use by the `PATH` environment variable. - `...` -- any additional files and directories are permitted -## solana-install Tool +## agave-install Tool -The `solana-install` tool is used by the user to install and update their cluster software. +The `agave-install` tool is used by the user to install and update their cluster software. It manages the following files and directories in the user's home directory: @@ -122,11 +122,11 @@ It manages the following files and directories in the user's home directory: ### Command-line Interface ```text -solana-install 0.16.0 +agave-install 0.16.0 The solana cluster software installer USAGE: - solana-install [OPTIONS] + agave-install [OPTIONS] FLAGS: -h, --help Prints help information @@ -145,11 +145,11 @@ SUBCOMMANDS: ``` ```text -solana-install-init +agave-install-init initializes a new installation USAGE: - solana-install init [OPTIONS] + agave-install init [OPTIONS] FLAGS: -h, --help Prints help information @@ -161,11 +161,11 @@ OPTIONS: ``` ```text -solana-install info +agave-install info displays information about the current installation USAGE: - solana-install info [FLAGS] + agave-install info [FLAGS] FLAGS: -h, --help Prints help information @@ -173,11 +173,11 @@ FLAGS: ``` ```text -solana-install deploy +agave-install deploy deploys a new update USAGE: - solana-install deploy + agave-install deploy FLAGS: -h, --help Prints help information @@ -188,22 +188,22 @@ ARGS: ``` ```text -solana-install update +agave-install update checks for an update, and if available downloads and applies it USAGE: - solana-install update + agave-install update FLAGS: -h, --help Prints help information ``` ```text -solana-install run +agave-install run Runs a program while periodically checking and applying software updates USAGE: - solana-install run [program_arguments]... + agave-install run [program_arguments]... FLAGS: -h, --help Prints help information diff --git a/docs/src/implemented-proposals/rpc-transaction-history.md b/docs/src/implemented-proposals/rpc-transaction-history.md index 54288ad9659bd7..607a79ce658b98 100644 --- a/docs/src/implemented-proposals/rpc-transaction-history.md +++ b/docs/src/implemented-proposals/rpc-transaction-history.md @@ -68,7 +68,7 @@ the results of BigTable queries more complicated but is not a significant issue. ## Data Population The ongoing population of instance data will occur on an epoch cadence through -the use of a new `solana-ledger-tool` command that will convert rocksdb data for +the use of a new `agave-ledger-tool` command that will convert rocksdb data for a given slot range into the instance schema. The same process will be run once, manually, to backfill the existing ledger diff --git a/docs/src/operations/best-practices/general.md b/docs/src/operations/best-practices/general.md index 29ef42c81b7f5f..3e531b0160c571 100644 --- a/docs/src/operations/best-practices/general.md +++ b/docs/src/operations/best-practices/general.md @@ -23,12 +23,12 @@ watch past workshops through the ## Help with the validator command line -From within the Solana CLI, you can execute the `solana-validator` command with +From within the Solana CLI, you can execute the `agave-validator` command with the `--help` flag to get a better understanding of the flags and sub commands available. ``` -solana-validator --help +agave-validator --help ``` ## Restarting your validator @@ -49,14 +49,14 @@ solana leader-schedule Based on the current slot and the leader schedule, you can calculate open time windows where your validator is not expected to produce blocks. -Assuming you are ready to restart, you may use the `solana-validator exit` +Assuming you are ready to restart, you may use the `agave-validator exit` command. The command exits your validator process when an appropriate idle time window is reached. Assuming that you have systemd implemented for your validator process, the validator should restart automatically after the exit. See the below help command for details: ``` -solana-validator exit --help +agave-validator exit --help ``` ## Upgrading @@ -75,28 +75,28 @@ process. It is a best practice to always build your Solana binaries from source. If you build from source, you are certain that the code you are building has not been tampered with before the binary was created. You may also be able to optimize -your `solana-validator` binary to your specific hardware. +your `agave-validator` binary to your specific hardware. If you build from source on the validator machine (or a machine with the same CPU), you can target your specific architecture using the `-march` flag. Refer to the following doc for [instructions on building from source](../../cli/install.md#build-from-source). -### solana-install +### agave-install If you are not comfortable building from source, or you need to quickly install a new version to test something out, you could instead try using the -`solana-install` command. +`agave-install` command. Assuming you want to install Solana version `1.14.17`, you would execute the following: ``` -solana-install init 1.14.17 +agave-install init 1.14.17 ``` This command downloads the executable for `1.14.17` and installs it into a -`.local` directory. You can also look at `solana-install --help` for more +`.local` directory. You can also look at `agave-install --help` for more options. > **Note** this command only works if you already have the solana cli installed. @@ -106,7 +106,7 @@ options. ### Restart For all install methods, the validator process will need to be restarted before -the newly installed version is in use. Use `solana-validator exit` to restart +the newly installed version is in use. Use `agave-validator exit` to restart your validator process. ### Verifying version @@ -132,13 +132,13 @@ have state locally. In other cases such as restarts for upgrades, a snapshot download should be avoided. To avoid downloading a snapshot on restart, add the following flag to the -`solana-validator` command: +`agave-validator` command: ``` --no-snapshot-fetch ``` -If you use this flag with the `solana-validator` command, make sure that you run +If you use this flag with the `agave-validator` command, make sure that you run `solana catchup ` after your validator starts to make sure that the validator is catching up in a reasonable time. After some time (potentially a few hours), if it appears that your validator continues to fall behind, then you @@ -199,7 +199,7 @@ It is important that you do not accidentally run out of funds in your identity account, as your node will stop voting. It is also important to note that this account keypair is the most vulnerable of the three keypairs in a vote account because the keypair for the identity account is stored on your validator when -running the `solana-validator` software. How much SOL you should store there is +running the `agave-validator` software. How much SOL you should store there is up to you. As a best practice, make sure to check the account regularly and refill or deduct from it as needed. To check the account balance do: @@ -207,7 +207,7 @@ refill or deduct from it as needed. To check the account balance do: solana balance validator-keypair.json ``` -> **Note** `solana-watchtower` can monitor for a minimum validator identity +> **Note** `agave-watchtower` can monitor for a minimum validator identity > balance. See [monitoring best practices](./monitoring.md) for details. ## Withdrawing From The Vote Account diff --git a/docs/src/operations/best-practices/monitoring.md b/docs/src/operations/best-practices/monitoring.md index 6d04fc38487be7..a0f2ef9df9fa22 100644 --- a/docs/src/operations/best-practices/monitoring.md +++ b/docs/src/operations/best-practices/monitoring.md @@ -4,34 +4,34 @@ sidebar_label: Monitoring pagination_label: "Best Practices: Validator Monitoring" --- -It is essential that you have monitoring in place on your validator. In the event that your validator is delinquent (behind the rest of the network) you want to respond immediately to fix the issue. One very useful tool to monitor your validator is [`solana-watchtower`](#solana-watchtower). +It is essential that you have monitoring in place on your validator. In the event that your validator is delinquent (behind the rest of the network) you want to respond immediately to fix the issue. One very useful tool to monitor your validator is [`agave-watchtower`](#agave-watchtower). ## Solana Watchtower -Solana Watchtower is an extremely useful monitoring tool that will regularly monitor the health of your validator. It can monitor your validator for delinquency then notify you on your application of choice: Slack, Discord, Telegram or Twilio. Additionally, `solana-watchtower` has the ability to monitor the health of the entire cluster so that you can be aware of any cluster wide problems. +Solana Watchtower is an extremely useful monitoring tool that will regularly monitor the health of your validator. It can monitor your validator for delinquency then notify you on your application of choice: Slack, Discord, Telegram or Twilio. Additionally, `agave-watchtower` has the ability to monitor the health of the entire cluster so that you can be aware of any cluster wide problems. ### Getting Started -To get started with Solana Watchtower, run `solana-watchtower --help`. From the help menu, you can see the optional flags and an explanation of the command. +To get started with Solana Watchtower, run `agave-watchtower --help`. From the help menu, you can see the optional flags and an explanation of the command. Here is a sample command that will monitor a validator node with an identity public key of `2uTk98rqqwENevkPH2AHHzGHXgeGc1h6ku8hQUqWeXZp`: ``` -solana-watchtower --monitor-active-stake --validator-identity \ +agave-watchtower --monitor-active-stake --validator-identity \ 2uTk98rqqwENevkPH2AHHzGHXgeGc1h6ku8hQUqWeXZp ``` -The command will monitor your validator, but you will not get notifications unless you added the environment variables mentioned in `solana-watchtower --help`. Since getting each of these services setup for notifications is not straight forward, the next section will walk through [setting up watchtower notifications on Telegram](#setup-telegram-notifications). +The command will monitor your validator, but you will not get notifications unless you added the environment variables mentioned in `agave-watchtower --help`. Since getting each of these services setup for notifications is not straight forward, the next section will walk through [setting up watchtower notifications on Telegram](#setup-telegram-notifications). ### Best Practices -It is a best practice to run the `solana-watchtower` command on a separate server from your validator. +It is a best practice to run the `agave-watchtower` command on a separate server from your validator. -In the case that you run `solana-watchtower` on the same computer as your `solana-validator` process, then during catastrophic events like a power outage, you will not be aware of the issue, because your `solana-watchtower` process will stop at the same time as your `solana-validator` process. +In the case that you run `agave-watchtower` on the same computer as your `agave-validator` process, then during catastrophic events like a power outage, you will not be aware of the issue, because your `agave-watchtower` process will stop at the same time as your `agave-validator` process. -Additionally, while running the `solana-watchtower` process manually with environment variables set in the terminal is a good way to test out the command, it is not operationally sound because the process will not be restarted when the terminal closes or during a system restart. +Additionally, while running the `agave-watchtower` process manually with environment variables set in the terminal is a good way to test out the command, it is not operationally sound because the process will not be restarted when the terminal closes or during a system restart. -Instead, you could run your `solana-watchtower` command as a system process similar to `solana-validator`. In the system process file, you can specify the environment variables for your bot. +Instead, you could run your `agave-watchtower` command as a system process similar to `agave-validator`. In the system process file, you can specify the environment variables for your bot. ### Setup Telegram Notifications @@ -41,7 +41,7 @@ To send validator health notifications to your Telegram account, we are going to 2. Send a message to the bot 3. Create a Telegram group that will get the watchtower notifications 4. Add the environment variables to your command line environment -5. Restart the `solana-watchtower` command +5. Restart the `agave-watchtower` command #### Create a Bot Using BotFather @@ -61,7 +61,7 @@ In Telegram, click on the new message icon and then select new group. Find your Now that you have a bot setup, you will need to set the environment variables for the bot so that watchtower can send notifications. -First, recall the chat message that you got from _@BotFather_. In the message, there was an HTTP API token for your bot. The token will have this format: `389178471:MMTKMrnZB4ErUzJmuFIXTKE6DupLSgoa7h4o`. You will use that token to set the `TELEGRAM_BOT_TOKEN` environment variable. In the terminal where you plan to run `solana-watchtower`, run the following: +First, recall the chat message that you got from _@BotFather_. In the message, there was an HTTP API token for your bot. The token will have this format: `389178471:MMTKMrnZB4ErUzJmuFIXTKE6DupLSgoa7h4o`. You will use that token to set the `TELEGRAM_BOT_TOKEN` environment variable. In the terminal where you plan to run `agave-watchtower`, run the following: ``` export TELEGRAM_BOT_TOKEN= @@ -73,14 +73,14 @@ Next, in your browser, go to `https://api.telegram.org/bot/getUp The response should be in JSON. Search for the string `"chat":` in the JSON. The `id` value of that chat is your `TELEGRAM_CHAT_ID`. It will be a negative number like: `-781559558`. Remember to include the negative sign! If you cannot find `"chat":` in the JSON, then you may have to remove the bot from your chat group and add it again. -With your Telegram chat id in hand, export the environment variable where you plan to run `solana-watchtower`: +With your Telegram chat id in hand, export the environment variable where you plan to run `agave-watchtower`: ``` export TELEGRAM_CHAT_ID= ``` -#### Restart solana-watchtower +#### Restart agave-watchtower -Once your environment variables are set, restart `solana-watchtower`. You should see output about your validator. +Once your environment variables are set, restart `agave-watchtower`. You should see output about your validator. To test that your Telegram configuration is working properly, you could stop your validator briefly until it is labeled as delinquent. Up to a minute after the validator is delinquent, you should receive a message in the Telegram group from your bot. Start the validator again and verify that you get another message in your Telegram group from the bot. The message should say `all clear`. \ No newline at end of file diff --git a/docs/src/operations/guides/restart-cluster.md b/docs/src/operations/guides/restart-cluster.md index 85d4731d604c65..cda3f30a5a016d 100644 --- a/docs/src/operations/guides/restart-cluster.md +++ b/docs/src/operations/guides/restart-cluster.md @@ -11,7 +11,7 @@ pagination_label: "Validator Guides: Restart a Cluster" In Solana 1.14 or greater, run the following command to output the latest optimistically confirmed slot your validator observed: ```bash -solana-ledger-tool -l ledger latest-optimistic-slots +agave-ledger-tool -l ledger latest-optimistic-slots ``` In Solana 1.13 or less, the latest optimistically confirmed can be found by looking for the more recent occurrence of @@ -34,11 +34,11 @@ instead. ### Step 4. Create a new snapshot for slot `SLOT_X` with a hard fork at slot `SLOT_X` ```bash -$ solana-ledger-tool -l --snapshot-archive-path --incremental-snapshot-archive-path create-snapshot SLOT_X --hard-fork SLOT_X +$ agave-ledger-tool -l --snapshot-archive-path --incremental-snapshot-archive-path create-snapshot SLOT_X --hard-fork SLOT_X ``` The snapshots directory should now contain the new snapshot. -`solana-ledger-tool create-snapshot` will also output the new shred version, and bank hash value, +`agave-ledger-tool create-snapshot` will also output the new shred version, and bank hash value, call this NEW_SHRED_VERSION and NEW_BANK_HASH respectively. Adjust your validator's arguments: @@ -68,7 +68,7 @@ Post something like the following to #announcements (adjusting the text as appro > 2. a. Preferred method, start from your local ledger with: > > ```bash -> solana-validator +> agave-validator > --wait-for-supermajority SLOT_X # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --expected-bank-hash NEW_BANK_HASH # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --hard-fork SLOT_X # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART @@ -84,7 +84,7 @@ Post something like the following to #announcements (adjusting the text as appro > b. If your validator doesn't have ledger up to slot SLOT_X or if you have deleted your ledger, have it instead download a snapshot with: > > ```bash -> solana-validator +> agave-validator > --wait-for-supermajority SLOT_X # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --expected-bank-hash NEW_BANK_HASH # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --entrypoint entrypoint.testnet.solana.com:8001 @@ -95,7 +95,7 @@ Post something like the following to #announcements (adjusting the text as appro > ... # <-- your other --identity/--vote-account/etc arguments > ``` > -> You can check for which slots your ledger has with: `solana-ledger-tool -l path/to/ledger bounds` +> You can check for which slots your ledger has with: `agave-ledger-tool -l path/to/ledger bounds` > > 3. Wait until 80% of the stake comes online > @@ -122,7 +122,7 @@ and create a new snapshot with additional `--destake-vote-account ` arguments for each of the non-responsive validator's vote account address ```bash -$ solana-ledger-tool -l ledger create-snapshot SLOT_X ledger --hard-fork SLOT_X \ +$ agave-ledger-tool -l ledger create-snapshot SLOT_X ledger --hard-fork SLOT_X \ --destake-vote-account \ --destake-vote-account \ . diff --git a/docs/src/operations/guides/validator-failover.md b/docs/src/operations/guides/validator-failover.md index 168a1a4312cec0..b7b3fea568194b 100644 --- a/docs/src/operations/guides/validator-failover.md +++ b/docs/src/operations/guides/validator-failover.md @@ -85,11 +85,11 @@ For more information on etcd TLS setup, please refer to https://etcd.io/docs/v3.5/op-guide/security/#example-2-client-to-server-authentication-with-https-client-certificates ### Primary Validator -The following additional `solana-validator` parameters are required to enable +The following additional `agave-validator` parameters are required to enable tower storage into etcd: ``` -solana-validator ... \ +agave-validator ... \ --tower-storage etcd \ --etcd-cacert-file certs/etcd-ca.pem \ --etcd-cert-file certs/validator.pem \ @@ -103,7 +103,7 @@ that your etcd endpoint remain accessible at all times. ### Secondary Validator Configure the secondary validator like the primary with the exception of the -following `solana-validator` command-line argument changes: +following `agave-validator` command-line argument changes: * Generate and use a secondary validator identity: `--identity secondary-validator-keypair.json` * Add `--no-check-vote-account` * Add `--authorized-voter validator-keypair.json` (where @@ -114,8 +114,8 @@ When both validators are running normally and caught up to the cluster, a failover from primary to secondary can be triggered by running the following command on the secondary validator: ```bash -$ solana-validator wait-for-restart-window --identity validator-keypair.json \ - && solana-validator set-identity validator-keypair.json +$ agave-validator wait-for-restart-window --identity validator-keypair.json \ + && agave-validator set-identity validator-keypair.json ``` The secondary validator will acquire a lock on the tower in etcd to ensure @@ -131,7 +131,7 @@ exit. However if/when the secondary validator restarts, it will do so using the secondary validator identity and thus the restart cycle is broken. ## Triggering a failover via monitoring -Monitoring of your choosing can invoke the `solana-validator set-identity +Monitoring of your choosing can invoke the `agave-validator set-identity validator-keypair.json` command mentioned in the previous section. It is not necessary to guarantee the primary validator has halted before failing diff --git a/docs/src/operations/guides/validator-start.md b/docs/src/operations/guides/validator-start.md index 378783798b3ce8..d86c714be4e6a6 100644 --- a/docs/src/operations/guides/validator-start.md +++ b/docs/src/operations/guides/validator-start.md @@ -32,7 +32,7 @@ detail on cluster activity. ## Enabling CUDA If your machine has a GPU with CUDA installed \(Linux-only currently\), include -the `--cuda` argument to `solana-validator`. +the `--cuda` argument to `agave-validator`. When your validator is started look for the following log message to indicate that CUDA is enabled: `"[ solana::validator] CUDA is enabled"` @@ -47,7 +47,7 @@ the following commands. #### **Optimize sysctl knobs** ```bash -sudo bash -c "cat >/etc/sysctl.d/21-solana-validator.conf </etc/sysctl.d/21-agave-validator.conf <` -argument to `solana-validator`. You can specify multiple ones by repeating the argument `--known-validator --known-validator `. +argument to `agave-validator`. You can specify multiple ones by repeating the argument `--known-validator --known-validator `. This has two effects, one is when the validator is booting with `--only-known-rpc`, it will only ask that set of known nodes for downloading genesis and snapshot data. Another is that in combination with the `--halt-on-known-validators-accounts-hash-mismatch` option, it will monitor the merkle root hash of the entire accounts state of other known nodes on gossip and if the hashes produce any mismatch, @@ -277,13 +277,13 @@ account state divergence. Connect to the cluster by running: ```bash -solana-validator \ +agave-validator \ --identity ~/validator-keypair.json \ --vote-account ~/vote-account-keypair.json \ --rpc-port 8899 \ --entrypoint entrypoint.devnet.solana.com:8001 \ --limit-ledger-size \ - --log ~/solana-validator.log + --log ~/agave-validator.log ``` To force validator logging to the console add a `--log -` argument, otherwise @@ -296,7 +296,7 @@ The ledger will be placed in the `ledger/` directory by default, use the > [paper wallet seed phrase](../../cli/wallets/paper.md) > for your `--identity` and/or > `--authorized-voter` keypairs. To use these, pass the respective argument as -> `solana-validator --identity ASK ... --authorized-voter ASK ...` +> `agave-validator --identity ASK ... --authorized-voter ASK ...` > and you will be prompted to enter your seed phrases and optional passphrase. Confirm your validator is connected to the network by opening a new terminal and @@ -312,7 +312,7 @@ If your validator is connected, its public key and IP address will appear in the By default the validator will dynamically select available network ports in the 8000-10000 range, and may be overridden with `--dynamic-port-range`. For -example, `solana-validator --dynamic-port-range 11000-11020 ...` will restrict +example, `agave-validator --dynamic-port-range 11000-11020 ...` will restrict the validator to ports 11000-11020. ### Limiting ledger size to conserve disk space @@ -366,8 +366,8 @@ WantedBy=multi-user.target ``` Now create `/home/sol/bin/validator.sh` to include the desired -`solana-validator` command-line. Ensure that the 'exec' command is used to -start the validator process (i.e. "exec solana-validator ..."). This is +`agave-validator` command-line. Ensure that the 'exec' command is used to +start the validator process (i.e. "exec agave-validator ..."). This is important because without it, logrotate will end up killing the validator every time the logs are rotated. @@ -394,14 +394,14 @@ to be reverted and the issue reproduced before help can be provided. #### Log rotation -The validator log file, as specified by `--log ~/solana-validator.log`, can get +The validator log file, as specified by `--log ~/agave-validator.log`, can get very large over time and it's recommended that log rotation be configured. The validator will re-open its log file when it receives the `USR1` signal, which is the basic primitive that enables log rotation. If the validator is being started by a wrapper shell script, it is important to -launch the process with `exec` (`exec solana-validator ...`) when using logrotate. +launch the process with `exec` (`exec agave-validator ...`) when using logrotate. This will prevent the `USR1` signal from being sent to the script's process instead of the validator's, which will kill them both. @@ -409,13 +409,13 @@ instead of the validator's, which will kill them both. An example setup for the `logrotate`, which assumes that the validator is running as a systemd service called `sol.service` and writes a log file at -/home/sol/solana-validator.log: +/home/sol/agave-validator.log: ```bash # Setup log rotation cat > logrotate.sol </etc/sysctl.d/21-solana-validator.conf </etc/sysctl.d/21-agave-validator.conf < For more explanation on the flags used in the command, refer to the `solana-validator --help` command +> For more explanation on the flags used in the command, refer to the `agave-validator --help` command ``` #!/bin/bash -exec solana-validator \ +exec agave-validator \ --identity /home/sol/validator-keypair.json \ --known-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \ --known-validator dDzy5SR3AXdYWVqbDEkVFdvSPCtS9ihF5kJkHCtXoFs \ diff --git a/docs/src/validator/geyser.md b/docs/src/validator/geyser.md index 769856303767d6..efea2e18e30269 100644 --- a/docs/src/validator/geyser.md +++ b/docs/src/validator/geyser.md @@ -24,20 +24,20 @@ implementation for the PostgreSQL database. ### Important Crates: -- [`solana-geyser-plugin-interface`] — This crate defines the plugin +- [`agave-geyser-plugin-interface`] — This crate defines the plugin interfaces. - [`solana-accountsdb-plugin-postgres`] — The crate for the referential plugin implementation for the PostgreSQL database. -[`solana-geyser-plugin-interface`]: https://docs.rs/solana-geyser-plugin-interface +[`agave-geyser-plugin-interface`]: https://docs.rs/agave-geyser-plugin-interface [`solana-accountsdb-plugin-postgres`]: https://docs.rs/solana-accountsdb-plugin-postgres [`solana-sdk`]: https://docs.rs/solana-sdk [`solana-transaction-status`]: https://docs.rs/solana-transaction-status ## The Plugin Interface -The Plugin interface is declared in [`solana-geyser-plugin-interface`]. It +The Plugin interface is declared in [`agave-geyser-plugin-interface`]. It is defined by the trait `GeyserPlugin`. The plugin should implement the trait and expose a "C" function `_create_plugin` to return the pointer to this trait. For example, in the referential implementation, the following code @@ -166,7 +166,7 @@ please refer to [`solana-sdk`] and [`solana-transaction-status`] The `slot` points to the slot the transaction is executed at. For more details, please refer to the Rust documentation in -[`solana-geyser-plugin-interface`]. +[`agave-geyser-plugin-interface`]. ## Example PostgreSQL Plugin diff --git a/frozen-abi/build.rs b/frozen-abi/build.rs index c9550c1c5c4f22..e17ca70cb4718b 100644 --- a/frozen-abi/build.rs +++ b/frozen-abi/build.rs @@ -17,11 +17,6 @@ fn main() { } Channel::Dev => { println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - // See https://github.com/solana-labs/solana/issues/11055 - // We may be running the custom `rust-bpf-builder` toolchain, - // which currently needs `#![feature(proc_macro_hygiene)]` to - // be applied. - println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); } } } diff --git a/frozen-abi/src/lib.rs b/frozen-abi/src/lib.rs index 189535ccddaa74..4747cf64b9e50f 100644 --- a/frozen-abi/src/lib.rs +++ b/frozen-abi/src/lib.rs @@ -1,6 +1,5 @@ #![allow(incomplete_features)] #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] -#![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] // Allows macro expansion of `use ::solana_frozen_abi::*` to work within this crate extern crate self as solana_frozen_abi; diff --git a/geyser-plugin-interface/Cargo.toml b/geyser-plugin-interface/Cargo.toml index af99758b47d630..56f42fd4612cec 100644 --- a/geyser-plugin-interface/Cargo.toml +++ b/geyser-plugin-interface/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-geyser-plugin-interface" +name = "agave-geyser-plugin-interface" description = "The Solana Geyser plugin interface." -documentation = "https://docs.rs/solana-geyser-plugin-interface" +documentation = "https://docs.rs/agave-geyser-plugin-interface" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index 037aedf8b87e89..d9a3b00f8dc4c8 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -327,7 +327,7 @@ pub trait GeyserPlugin: Any + Send + Sync + std::fmt::Debug { /// # Examples /// /// ``` - /// use solana_geyser_plugin_interface::geyser_plugin_interface::{GeyserPlugin, + /// use agave_geyser_plugin_interface::geyser_plugin_interface::{GeyserPlugin, /// GeyserPluginError, Result}; /// /// #[derive(Debug)] diff --git a/geyser-plugin-manager/Cargo.toml b/geyser-plugin-manager/Cargo.toml index d905248150b717..a7b02f8d593a8d 100644 --- a/geyser-plugin-manager/Cargo.toml +++ b/geyser-plugin-manager/Cargo.toml @@ -10,17 +10,16 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +agave-geyser-plugin-interface = { workspace = true } bs58 = { workspace = true } crossbeam-channel = { workspace = true } json5 = { workspace = true } jsonrpc-core = { workspace = true } -jsonrpc-server-utils = { workspace = true } libloading = { workspace = true } log = { workspace = true } serde_json = { workspace = true } solana-accounts-db = { workspace = true } solana-entry = { workspace = true } -solana-geyser-plugin-interface = { workspace = true } solana-ledger = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } @@ -29,6 +28,7 @@ solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-transaction-status = { workspace = true } thiserror = { workspace = true } +tokio = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/geyser-plugin-manager/src/accounts_update_notifier.rs b/geyser-plugin-manager/src/accounts_update_notifier.rs index 7c7e3370fc00eb..90ab0b7998a35c 100644 --- a/geyser-plugin-manager/src/accounts_update_notifier.rs +++ b/geyser-plugin-manager/src/accounts_update_notifier.rs @@ -1,14 +1,14 @@ /// Module responsible for notifying plugins of account updates use { crate::geyser_plugin_manager::GeyserPluginManager, + agave_geyser_plugin_interface::geyser_plugin_interface::{ + ReplicaAccountInfoV3, ReplicaAccountInfoVersions, + }, log::*, solana_accounts_db::{ account_storage::meta::StoredAccountMeta, accounts_update_notifier_interface::AccountsUpdateNotifierInterface, }, - solana_geyser_plugin_interface::geyser_plugin_interface::{ - ReplicaAccountInfoV3, ReplicaAccountInfoVersions, - }, solana_measure::measure::Measure, solana_metrics::*, solana_sdk::{ diff --git a/geyser-plugin-manager/src/block_metadata_notifier.rs b/geyser-plugin-manager/src/block_metadata_notifier.rs index 76d203c5e0ed44..87f15f41fc0ae0 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier.rs @@ -3,10 +3,10 @@ use { block_metadata_notifier_interface::BlockMetadataNotifier, geyser_plugin_manager::GeyserPluginManager, }, - log::*, - solana_geyser_plugin_interface::geyser_plugin_interface::{ + agave_geyser_plugin_interface::geyser_plugin_interface::{ ReplicaBlockInfoV3, ReplicaBlockInfoVersions, }, + log::*, solana_measure::measure::Measure, solana_metrics::*, solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey, reward_info::RewardInfo}, diff --git a/geyser-plugin-manager/src/entry_notifier.rs b/geyser-plugin-manager/src/entry_notifier.rs index ea14592b615db8..da9a9698ed1540 100644 --- a/geyser-plugin-manager/src/entry_notifier.rs +++ b/geyser-plugin-manager/src/entry_notifier.rs @@ -1,11 +1,11 @@ /// Module responsible for notifying plugins about entries use { crate::geyser_plugin_manager::GeyserPluginManager, - log::*, - solana_entry::entry::EntrySummary, - solana_geyser_plugin_interface::geyser_plugin_interface::{ + agave_geyser_plugin_interface::geyser_plugin_interface::{ ReplicaEntryInfoV2, ReplicaEntryInfoVersions, }, + log::*, + solana_entry::entry::EntrySummary, solana_ledger::entry_notifier_interface::EntryNotifier, solana_measure::measure::Measure, solana_metrics::*, diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index a15f9e1318075d..d88814d88e9470 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -1,13 +1,13 @@ use { + agave_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, jsonrpc_core::{ErrorCode, Result as JsonRpcResult}, - jsonrpc_server_utils::tokio::sync::oneshot::Sender as OneShotSender, libloading::Library, log::*, - solana_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, std::{ ops::{Deref, DerefMut}, path::Path, }, + tokio::sync::oneshot::Sender as OneShotSender, }; #[derive(Debug)] @@ -442,8 +442,8 @@ mod tests { crate::geyser_plugin_manager::{ GeyserPluginManager, LoadedGeyserPlugin, TESTPLUGIN2_CONFIG, TESTPLUGIN_CONFIG, }, + agave_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, libloading::Library, - solana_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, std::sync::{Arc, RwLock}, }; diff --git a/geyser-plugin-manager/src/slot_status_notifier.rs b/geyser-plugin-manager/src/slot_status_notifier.rs index 587abe2f79d4de..1557bb2d4d8c36 100644 --- a/geyser-plugin-manager/src/slot_status_notifier.rs +++ b/geyser-plugin-manager/src/slot_status_notifier.rs @@ -1,7 +1,7 @@ use { crate::geyser_plugin_manager::GeyserPluginManager, + agave_geyser_plugin_interface::geyser_plugin_interface::SlotStatus, log::*, - solana_geyser_plugin_interface::geyser_plugin_interface::SlotStatus, solana_measure::measure::Measure, solana_metrics::*, solana_sdk::clock::Slot, diff --git a/geyser-plugin-manager/src/transaction_notifier.rs b/geyser-plugin-manager/src/transaction_notifier.rs index ab821e811047d2..b757c1202b377d 100644 --- a/geyser-plugin-manager/src/transaction_notifier.rs +++ b/geyser-plugin-manager/src/transaction_notifier.rs @@ -1,10 +1,10 @@ /// Module responsible for notifying plugins of transactions use { crate::geyser_plugin_manager::GeyserPluginManager, - log::*, - solana_geyser_plugin_interface::geyser_plugin_interface::{ + agave_geyser_plugin_interface::geyser_plugin_interface::{ ReplicaTransactionInfoV2, ReplicaTransactionInfoVersions, }, + log::*, solana_measure::measure::Measure, solana_metrics::*, solana_rpc::transaction_notifier_interface::TransactionNotifier, diff --git a/install/Cargo.toml b/install/Cargo.toml index 588d4315df5f35..c40a0ee6e9eee3 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-install" +name = "agave-install" description = "The solana cluster software installer" -documentation = "https://docs.rs/solana-install" +documentation = "https://docs.rs/agave-install" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/install/solana-install-init.sh b/install/agave-install-init.sh similarity index 86% rename from install/solana-install-init.sh rename to install/agave-install-init.sh index db36dc61e2ff30..cf2d1babf3c306 100755 --- a/install/solana-install-init.sh +++ b/install/agave-install-init.sh @@ -10,25 +10,25 @@ # except according to those terms. # This is just a little script that can be downloaded from the internet to -# install solana-install. It just does platform detection, downloads the installer +# install agave-install. It just does platform detection, downloads the installer # and runs it. { # this ensures the entire script is downloaded # if [ -z "$SOLANA_DOWNLOAD_ROOT" ]; then - SOLANA_DOWNLOAD_ROOT="https://github.com/solana-labs/solana/releases/download/" + SOLANA_DOWNLOAD_ROOT="https://github.com/anza-xyz/agave/releases/download/" fi -GH_LATEST_RELEASE="https://api.github.com/repos/solana-labs/solana/releases/latest" +GH_LATEST_RELEASE="https://api.github.com/repos/anza-xyz/agave/releases/latest" set -e usage() { cat 1>&2 < --pubkey + agave-install-init [FLAGS] [OPTIONS] --data_dir --pubkey FLAGS: -h, --help Prints help information @@ -81,7 +81,7 @@ main() { esac TARGET="${_cputype}-${_ostype}" - temp_dir="$(mktemp -d 2>/dev/null || ensure mktemp -d -t solana-install-init)" + temp_dir="$(mktemp -d 2>/dev/null || ensure mktemp -d -t agave-install-init)" ensure mkdir -p "$temp_dir" # Check for SOLANA_RELEASE environment variable override. Otherwise fetch @@ -101,8 +101,8 @@ main() { fi fi - download_url="$SOLANA_DOWNLOAD_ROOT/$release/solana-install-init-$TARGET" - solana_install_init="$temp_dir/solana-install-init" + download_url="$SOLANA_DOWNLOAD_ROOT/$release/agave-install-init-$TARGET" + solana_install_init="$temp_dir/agave-install-init" printf 'downloading %s installer\n' "$release" 1>&2 @@ -111,7 +111,7 @@ main() { ensure chmod u+x "$solana_install_init" if [ ! -x "$solana_install_init" ]; then printf '%s\n' "Cannot execute $solana_install_init (likely because of mounting /tmp as noexec)." 1>&2 - printf '%s\n' "Please copy the file to a location where you can execute binaries and run ./solana-install-init." 1>&2 + printf '%s\n' "Please copy the file to a location where you can execute binaries and run ./agave-install-init." 1>&2 exit 1 fi @@ -130,7 +130,7 @@ main() { } err() { - printf 'solana-install-init: %s\n' "$1" >&2 + printf 'agave-install-init: %s\n' "$1" >&2 exit 1 } diff --git a/install/install-help.sh b/install/install-help.sh index 9fb08afa6d14c9..7604777e378677 100755 --- a/install/install-help.sh +++ b/install/install-help.sh @@ -4,11 +4,11 @@ set -e cd "$(dirname "$0")"/.. cargo="$(readlink -f "./cargo")" -"$cargo" build --package solana-install +"$cargo" build --package agave-install export PATH=$PWD/target/debug:$PATH echo "\`\`\`manpage" -solana-install --help +agave-install --help echo "\`\`\`" echo "" @@ -16,7 +16,7 @@ commands=(init info deploy update run) for x in "${commands[@]}"; do echo "\`\`\`manpage" - solana-install "${x}" --help + agave-install "${x}" --help echo "\`\`\`" echo "" done diff --git a/install/src/bin/solana-install-init.rs b/install/src/bin/agave-install-init.rs similarity index 92% rename from install/src/bin/solana-install-init.rs rename to install/src/bin/agave-install-init.rs index ec888d8f452090..84c154ac12b35e 100644 --- a/install/src/bin/solana-install-init.rs +++ b/install/src/bin/agave-install-init.rs @@ -16,7 +16,7 @@ fn press_enter() { } fn main() { - solana_install::main_init().unwrap_or_else(|err| { + agave_install::main_init().unwrap_or_else(|err| { println!("Error: {err}"); press_enter(); exit(1); diff --git a/install/src/command.rs b/install/src/command.rs index d7b92c17690bda..4ae9e7ee38cedd 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -540,7 +540,7 @@ pub fn init( explicit_release: Option, ) -> Result<(), String> { let config = { - // Write new config file only if different, so that running |solana-install init| + // Write new config file only if different, so that running |agave-install init| // repeatedly doesn't unnecessarily re-download let mut current_config = Config::load(config_file).unwrap_or_default(); current_config.current_update_manifest = None; @@ -572,7 +572,7 @@ pub fn init( fn github_release_download_url(release_semver: &str) -> String { format!( - "https://github.com/solana-labs/solana/releases/download/v{}/solana-release-{}.tar.bz2", + "https://github.com/anza-xyz/agave/releases/download/v{}/solana-release-{}.tar.bz2", release_semver, crate::build_env::TARGET ) @@ -580,7 +580,7 @@ fn github_release_download_url(release_semver: &str) -> String { fn release_channel_download_url(release_channel: &str) -> String { format!( - "https://release.solana.com/{}/solana-release-{}.tar.bz2", + "https://release.anza.xyz/{}/solana-release-{}.tar.bz2", release_channel, crate::build_env::TARGET ) @@ -588,7 +588,7 @@ fn release_channel_download_url(release_channel: &str) -> String { fn release_channel_version_url(release_channel: &str) -> String { format!( - "https://release.solana.com/{}/solana-release-{}.yml", + "https://release.anza.xyz/{}/solana-release-{}.yml", release_channel, crate::build_env::TARGET ) @@ -870,7 +870,7 @@ fn check_for_newer_github_release( prerelease_allowed: bool, ) -> Result, String> { let client = reqwest::blocking::Client::builder() - .user_agent("solana-install") + .user_agent("agave-install") .build() .map_err(|err| err.to_string())?; @@ -905,7 +905,7 @@ fn check_for_newer_github_release( while page == 1 || releases.len() == PER_PAGE { let url = reqwest::Url::parse_with_params( - "https://api.github.com/repos/solana-labs/solana/releases", + "https://api.github.com/repos/anza-xyz/agave/releases", &[ ("per_page", &format!("{PER_PAGE}")), ("page", &format!("{page}")), diff --git a/install/src/lib.rs b/install/src/lib.rs index 159317edd2e5a8..a28b963d65f825 100644 --- a/install/src/lib.rs +++ b/install/src/lib.rs @@ -281,7 +281,7 @@ pub fn main() -> Result<(), String> { pub fn main_init() -> Result<(), String> { solana_logger::setup(); - let matches = App::new("solana-install-init") + let matches = App::new("agave-install-init") .about("Initializes a new installation") .version(solana_version::version!()) .arg({ diff --git a/install/src/main.rs b/install/src/main.rs index c7b15aa6a67206..245f09825ddc6a 100644 --- a/install/src/main.rs +++ b/install/src/main.rs @@ -1,3 +1,3 @@ fn main() -> Result<(), String> { - solana_install::main() + agave_install::main() } diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 6da42940a4ba7f..cb87a0e16f4a36 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-ledger-tool" +name = "agave-ledger-tool" description = "Blockchain, Rebuilt for Scale" -documentation = "https://docs.rs/solana-ledger-tool" +documentation = "https://docs.rs/agave-ledger-tool" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/ledger-tool/src/blockstore.rs b/ledger-tool/src/blockstore.rs index 453a801702f864..fed6abde2f2d08 100644 --- a/ledger-tool/src/blockstore.rs +++ b/ledger-tool/src/blockstore.rs @@ -359,7 +359,7 @@ pub fn blockstore_subcommands<'a, 'b>(hidden: bool) -> Vec> { and timestamps.", ) // This command is important in cluster restart scenarios, so do not hide it ever - // such that the subcommand will be visible as the top level of solana-ledger-tool + // such that the subcommand will be visible as the top level of agave-ledger-tool .arg( Arg::with_name("num_slots") .long("num-slots") diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 116b21527ae4d8..8a8302d7e4e94b 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -187,14 +187,14 @@ pub fn load_and_process_ledger( } let account_paths = if let Some(account_paths) = arg_matches.value_of("account_paths") { - // If this blockstore access is Primary, no other process (solana-validator) can hold + // If this blockstore access is Primary, no other process (agave-validator) can hold // Primary access. So, allow a custom accounts path without worry of wiping the accounts - // of solana-validator. + // of agave-validator. if !blockstore.is_primary_access() { // Attempt to open the Blockstore in Primary access; if successful, no other process // was holding Primary so allow things to proceed with custom accounts path. Release - // the Primary access instead of holding it to give priority to solana-validator over - // solana-ledger-tool should solana-validator start before we've finished. + // the Primary access instead of holding it to give priority to agave-validator over + // agave-ledger-tool should agave-validator start before we've finished. info!( "Checking if another process currently holding Primary access to {:?}", blockstore.ledger_path() @@ -268,19 +268,24 @@ pub fn load_and_process_ledger( }; let exit = Arc::new(AtomicBool::new(false)); - let (bank_forks, leader_schedule_cache, starting_snapshot_hashes, ..) = - bank_forks_utils::load_bank_forks( - genesis_config, - blockstore.as_ref(), - account_paths, - snapshot_config.as_ref(), - &process_options, - None, - None, // Maybe support this later, though - accounts_update_notifier, - exit.clone(), - ) - .map_err(LoadAndProcessLedgerError::LoadBankForks)?; + let ( + bank_forks, + leader_schedule_cache, + starting_snapshot_hashes, + starting_snapshot_storages, + .., + ) = bank_forks_utils::load_bank_forks( + genesis_config, + blockstore.as_ref(), + account_paths, + snapshot_config.as_ref(), + &process_options, + None, + None, // Maybe support this later, though + accounts_update_notifier, + exit.clone(), + ) + .map_err(LoadAndProcessLedgerError::LoadBankForks)?; let block_verification_method = value_t!( arg_matches, "block_verification_method", @@ -325,6 +330,7 @@ pub fn load_and_process_ledger( accounts_package_sender.clone(), accounts_package_receiver, None, + starting_snapshot_storages, exit.clone(), SnapshotConfig::new_load_only(), ); diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 17412c1801ac68..b30f90986bb9c2 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -10,7 +10,10 @@ use { use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, }, log::*, - solana_accounts_db::accounts_update_notifier_interface::AccountsUpdateNotifier, + solana_accounts_db::{ + accounts_update_notifier_interface::AccountsUpdateNotifier, + starting_snapshot_storages::StartingSnapshotStorages, + }, solana_runtime::{ accounts_background_service::AbsRequestSender, bank_forks::BankForks, @@ -67,6 +70,7 @@ pub type LoadResult = result::Result< Arc>, LeaderScheduleCache, Option, + StartingSnapshotStorages, ), BankForksUtilsError, >; @@ -88,7 +92,13 @@ pub fn load( accounts_update_notifier: Option, exit: Arc, ) -> LoadResult { - let (bank_forks, leader_schedule_cache, starting_snapshot_hashes, ..) = load_bank_forks( + let ( + bank_forks, + leader_schedule_cache, + starting_snapshot_hashes, + starting_snapshot_storages, + .., + ) = load_bank_forks( genesis_config, blockstore, account_paths, @@ -111,7 +121,12 @@ pub fn load( ) .map_err(BankForksUtilsError::ProcessBlockstoreFromRoot)?; - Ok((bank_forks, leader_schedule_cache, starting_snapshot_hashes)) + Ok(( + bank_forks, + leader_schedule_cache, + starting_snapshot_hashes, + starting_snapshot_storages, + )) } #[allow(clippy::too_many_arguments)] @@ -161,7 +176,7 @@ pub fn load_bank_forks( )) } - let (bank_forks, starting_snapshot_hashes) = + let (bank_forks, starting_snapshot_hashes, starting_snapshot_storages) = if let Some((full_snapshot_archive_info, incremental_snapshot_archive_info)) = get_snapshots_to_load(snapshot_config) { @@ -173,17 +188,22 @@ pub fn load_bank_forks( ); std::fs::create_dir_all(&snapshot_config.bank_snapshots_dir) .expect("create bank snapshots dir"); - let (bank_forks, starting_snapshot_hashes) = bank_forks_from_snapshot( - full_snapshot_archive_info, - incremental_snapshot_archive_info, - genesis_config, - account_paths, - snapshot_config, - process_options, - accounts_update_notifier, - exit, - )?; - (bank_forks, Some(starting_snapshot_hashes)) + let (bank_forks, starting_snapshot_hashes, starting_snapshot_storages) = + bank_forks_from_snapshot( + full_snapshot_archive_info, + incremental_snapshot_archive_info, + genesis_config, + account_paths, + snapshot_config, + process_options, + accounts_update_notifier, + exit, + )?; + ( + bank_forks, + Some(starting_snapshot_hashes), + starting_snapshot_storages, + ) } else { info!("Processing ledger from genesis"); let bank_forks = blockstore_processor::process_blockstore_for_bank_0( @@ -202,7 +222,7 @@ pub fn load_bank_forks( .root_bank() .set_startup_verification_complete(); - (bank_forks, None) + (bank_forks, None, StartingSnapshotStorages::Genesis) }; let mut leader_schedule_cache = @@ -218,7 +238,12 @@ pub fn load_bank_forks( .for_each(|hard_fork_slot| root_bank.register_hard_fork(*hard_fork_slot)); } - Ok((bank_forks, leader_schedule_cache, starting_snapshot_hashes)) + Ok(( + bank_forks, + leader_schedule_cache, + starting_snapshot_hashes, + starting_snapshot_storages, + )) } #[allow(clippy::too_many_arguments)] @@ -231,7 +256,14 @@ fn bank_forks_from_snapshot( process_options: &ProcessOptions, accounts_update_notifier: Option, exit: Arc, -) -> Result<(Arc>, StartingSnapshotHashes), BankForksUtilsError> { +) -> Result< + ( + Arc>, + StartingSnapshotHashes, + StartingSnapshotStorages, + ), + BankForksUtilsError, +> { // Fail hard here if snapshot fails to load, don't silently continue if account_paths.is_empty() { return Err(BankForksUtilsError::AccountPathsNotPresent); @@ -257,7 +289,7 @@ fn bank_forks_from_snapshot( .unwrap_or(true), }; - let bank = if will_startup_from_snapshot_archives { + let (bank, starting_snapshot_storages) = if will_startup_from_snapshot_archives { // Given that we are going to boot from an archive, the append vecs held in the snapshot dirs for fast-boot should // be released. They will be released by the account_background_service anyway. But in the case of the account_paths // using memory-mounted file system, they are not released early enough to give space for the new append-vecs from @@ -292,7 +324,7 @@ fn bank_forks_from_snapshot( .map(|archive| archive.path().display().to_string()) .unwrap_or("none".to_string()), })?; - bank + (bank, StartingSnapshotStorages::Archive) } else { let bank_snapshot = latest_bank_snapshot.ok_or_else(|| BankForksUtilsError::NoBankSnapshotDirectory { @@ -346,7 +378,8 @@ fn bank_forks_from_snapshot( // snapshot archive next time, which is safe. snapshot_utils::purge_all_bank_snapshots(&snapshot_config.bank_snapshots_dir); - bank + let storages = bank.get_snapshot_storages(None); + (bank, StartingSnapshotStorages::Fastboot(storages)) }; let full_snapshot_hash = FullSnapshotHash(( @@ -365,5 +398,9 @@ fn bank_forks_from_snapshot( incremental: incremental_snapshot_hash, }; - Ok((BankForks::new_rw_arc(bank), starting_snapshot_hashes)) + Ok(( + BankForks::new_rw_arc(bank), + starting_snapshot_hashes, + starting_snapshot_storages, + )) } diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 15a5c4890e9f05..d8b4c7424cd8c1 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -213,7 +213,7 @@ impl Blockstore { delete_range_timer.stop(); let mut write_timer = Measure::start("write_batch"); - self.db.write(write_batch).inspect(|e| { + self.db.write(write_batch).inspect_err(|e| { error!( "Error: {:?} while submitting write batch for purge from_slot {} to_slot {}", e, from_slot, to_slot diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 18ba491ea34bd1..8b6b44edae61f6 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -431,7 +431,7 @@ impl Rocks { info!( "Opening Rocks with secondary (read only) access at: {secondary_path:?}. \ This secondary access could temporarily degrade other accesses, such as \ - by solana-validator" + by agave-validator" ); DB::open_cf_descriptors_as_secondary( &db_options, diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 3b18ba44bf2d03..20eef0bb0e3e2d 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -2321,13 +2321,13 @@ fn test_hard_fork_with_gap_in_roots() { ); // create hard-forked snapshot only for validator a, emulating the manual cluster restart - // procedure with `solana-ledger-tool create-snapshot` + // procedure with `agave-ledger-tool create-snapshot` let genesis_slot = 0; { let blockstore_a = Blockstore::open(&val_a_ledger_path).unwrap(); create_snapshot_to_hard_fork(&blockstore_a, hard_fork_slot, vec![hard_fork_slot]); - // Intentionally make solana-validator unbootable by replaying blocks from the genesis to + // Intentionally make agave-validator unbootable by replaying blocks from the genesis to // ensure the hard-forked snapshot is used always. Otherwise, we couldn't create a gap // in the ledger roots column family reliably. // There was a bug which caused the hard-forked snapshot at an unrooted slot to forget diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index 5afc543b2f0032..2872af5cc426af 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -14,9 +14,9 @@ if [[ "$SOLANA_GPU_MISSING" -eq 1 ]]; then fi if [[ -n $SOLANA_CUDA ]]; then - program=$solana_validator_cuda + program=$agave_validator_cuda else - program=$solana_validator + program=$agave_validator fi no_restart=0 diff --git a/multinode-demo/common.sh b/multinode-demo/common.sh index 9ae9331cb7a11d..1643208947b643 100644 --- a/multinode-demo/common.sh +++ b/multinode-demo/common.sh @@ -40,6 +40,8 @@ else if [[ -z $program ]]; then crate="cli" program="solana" + elif [[ $program == "validator" || $program == "ledger-tool" || $program == "watchtower" || $program == "install" ]]; then + program="agave-$program" else program="solana-$program" fi @@ -63,8 +65,8 @@ fi solana_bench_tps=$(solana_program bench-tps) solana_faucet=$(solana_program faucet) -solana_validator=$(solana_program validator) -solana_validator_cuda="$solana_validator --cuda" +agave_validator=$(solana_program validator) +agave_validator_cuda="$agave_validator --cuda" solana_genesis=$(solana_program genesis) solana_gossip=$(solana_program gossip) solana_keygen=$(solana_program keygen) diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index 487154101ac979..efb7a6afd56ea0 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -64,7 +64,7 @@ while [[ -n $1 ]]; do elif [[ $1 = --no-airdrop ]]; then airdrops_enabled=0 shift - # solana-validator options + # agave-validator options elif [[ $1 = --expected-genesis-hash ]]; then args+=("$1" "$2") shift 2 @@ -270,9 +270,9 @@ if [[ $maybeRequireTower = true ]]; then fi if [[ -n $SOLANA_CUDA ]]; then - program=$solana_validator_cuda + program=$agave_validator_cuda else - program=$solana_validator + program=$agave_validator fi set -e diff --git a/net-utils/src/ip_echo_server.rs b/net-utils/src/ip_echo_server.rs index 7d4186ccb6a810..64fbedadc7acf9 100644 --- a/net-utils/src/ip_echo_server.rs +++ b/net-utils/src/ip_echo_server.rs @@ -173,7 +173,11 @@ pub fn ip_echo_server( ) -> IpEchoServer { tcp_listener.set_nonblocking(true).unwrap(); - let runtime = Runtime::new().expect("Failed to create Runtime"); + let runtime = tokio::runtime::Builder::new_multi_thread() + .thread_name("solIpEchoSrvrRt") + .enable_all() + .build() + .expect("new tokio runtime"); runtime.spawn(run_echo_server(tcp_listener, shred_version)); runtime } diff --git a/net/net.sh b/net/net.sh index a2d16cef20f417..36bc48efdb7861 100755 --- a/net/net.sh +++ b/net/net.sh @@ -122,7 +122,7 @@ Operate a configured testnet sanity/start-specific options: -F - Discard validator nodes that didn't bootup successfully - -o noInstallCheck - Skip solana-install sanity + -o noInstallCheck - Skip agave-install sanity -o rejectExtraNodes - Require the exact number of nodes stop-specific options: @@ -138,7 +138,7 @@ Operate a configured testnet --netem-cmd - Optional command argument to netem. Default is "add". Use "cleanup" to remove rules. update-specific options: - --platform linux|osx|windows - Deploy the tarball using 'solana-install deploy ...' for the + --platform linux|osx|windows - Deploy the tarball using 'agave-install deploy ...' for the given platform (multiple platforms may be specified) (-t option must be supplied as well) @@ -514,11 +514,11 @@ deployUpdate() { declare bootstrapLeader=${validatorIpList[0]} for updatePlatform in $updatePlatforms; do - echo "--- Deploying solana-install update: $updatePlatform" + echo "--- Deploying agave-install update: $updatePlatform" ( set -x - scripts/solana-install-update-manifest-keypair.sh "$updatePlatform" + scripts/agave-install-update-manifest-keypair.sh "$updatePlatform" timeout 30s scp "${sshOptions[@]}" \ update_manifest_keypair.json "$bootstrapLeader:solana/update_manifest_keypair.json" diff --git a/net/remote/remote-deploy-update.sh b/net/remote/remote-deploy-update.sh index dd772927c0e119..3a71cf5725123e 100755 --- a/net/remote/remote-deploy-update.sh +++ b/net/remote/remote-deploy-update.sh @@ -35,6 +35,6 @@ loadConfigFile PATH="$HOME"/.cargo/bin:"$PATH" set -x -scripts/solana-install-deploy.sh \ +scripts/agave-install-deploy.sh \ --keypair config/faucet.json \ localhost "$releaseChannel" "$updatePlatform" diff --git a/net/remote/remote-node.sh b/net/remote/remote-node.sh index aeb920bd50bab0..b7d224088da9f9 100755 --- a/net/remote/remote-node.sh +++ b/net/remote/remote-node.sh @@ -121,7 +121,7 @@ cat >> ~/solana/on-reboot < system-stats.pid if ${GPU_CUDA_OK} && [[ -e /dev/nvidia0 ]]; then - echo Selecting solana-validator-cuda + echo Selecting agave-validator-cuda export SOLANA_CUDA=1 elif ${GPU_FAIL_IF_NONE} ; then echo "Expected GPU, found none!" @@ -257,13 +257,13 @@ EOF if [[ -n "$maybeWarpSlot" ]]; then # shellcheck disable=SC2086 # Do not want to quote $maybeWarSlot - solana-ledger-tool -l config/bootstrap-validator create-snapshot 0 config/bootstrap-validator $maybeWarpSlot + agave-ledger-tool -l config/bootstrap-validator create-snapshot 0 config/bootstrap-validator $maybeWarpSlot fi - solana-ledger-tool -l config/bootstrap-validator shred-version --max-genesis-archive-unpacked-size 1073741824 | tee config/shred-version + agave-ledger-tool -l config/bootstrap-validator shred-version --max-genesis-archive-unpacked-size 1073741824 | tee config/shred-version if [[ -n "$maybeWaitForSupermajority" ]]; then - bankHash=$(solana-ledger-tool -l config/bootstrap-validator bank-hash --halt-at-slot 0) + bankHash=$(agave-ledger-tool -l config/bootstrap-validator bank-hash --halt-at-slot 0) extraNodeArgs="$extraNodeArgs --expected-bank-hash $bankHash" echo "$bankHash" > config/bank-hash fi diff --git a/net/remote/remote-sanity.sh b/net/remote/remote-sanity.sh index 8c36e99ffdf936..91dae4b57336fa 100755 --- a/net/remote/remote-sanity.sh +++ b/net/remote/remote-sanity.sh @@ -65,7 +65,7 @@ local|tar|skip) export USE_INSTALL=1 solana_cli=solana solana_gossip=solana-gossip - solana_install=solana-install + solana_install=agave-install ;; *) echo "Unknown deployment method: $deployMethod" @@ -122,7 +122,7 @@ else fi if $installCheck && [[ -r update_manifest_keypair.json ]]; then - echo "--- $sanityTargetIp: solana-install test" + echo "--- $sanityTargetIp: agave-install test" ( set -x diff --git a/notifier/src/lib.rs b/notifier/src/lib.rs index a369225772492c..75406d2fbdae33 100644 --- a/notifier/src/lib.rs +++ b/notifier/src/lib.rs @@ -19,7 +19,7 @@ /// /// To receive a Twilio SMS notification on failure, having a Twilio account, /// and a sending number owned by that account, -/// define environment variable before running `solana-watchtower`: +/// define environment variable before running `agave-watchtower`: /// ```bash /// export TWILIO_CONFIG='ACCOUNT=,TOKEN=,TO=,FROM=' /// ``` @@ -208,7 +208,7 @@ impl Notifier { NotificationType::Resolve { ref incident } => incident.clone().to_string(), }; - let data = json!({"payload":{"summary":msg,"source":"solana-watchtower","severity":"critical"},"routing_key":routing_key,"event_action":event_action,"dedup_key":dedup_key}); + let data = json!({"payload":{"summary":msg,"source":"agave-watchtower","severity":"critical"},"routing_key":routing_key,"event_action":event_action,"dedup_key":dedup_key}); let url = "https://events.pagerduty.com/v2/enqueue"; if let Err(err) = self.client.post(url).json(&data).send() { diff --git a/perf/build.rs b/perf/build.rs index 4925ee898eb612..eef20dd887bc42 100644 --- a/perf/build.rs +++ b/perf/build.rs @@ -27,11 +27,6 @@ fn main() { } Channel::Dev => { println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - // See https://github.com/solana-labs/solana/issues/11055 - // We may be running the custom `rust-bpf-builder` toolchain, - // which currently needs `#![feature(proc_macro_hygiene)]` to - // be applied. - println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); } } } diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 926d1179837380..e8a691c537934f 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -67,7 +67,7 @@ pub enum LoadedProgramType { /// /// These can potentially come back alive if the environment changes. FailedVerification(ProgramRuntimeEnvironment), - /// Tombstone for programs which were explicitly undeployoed / closed. + /// Tombstone for programs which were explicitly undeployed / closed. #[default] Closed, /// Tombstone for programs which have recently been modified but the new version is not visible yet. diff --git a/programs/address-lookup-table/src/lib.rs b/programs/address-lookup-table/src/lib.rs index 737ec32c8f6782..737c35e4c4b2f4 100644 --- a/programs/address-lookup-table/src/lib.rs +++ b/programs/address-lookup-table/src/lib.rs @@ -1,6 +1,5 @@ #![allow(incomplete_features)] #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] -#![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] #[cfg(not(target_os = "solana"))] pub mod processor; diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2829cf27b6da6f..2ff22f3264aa41 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -63,6 +63,80 @@ dependencies = [ "zeroize", ] +[[package]] +name = "agave-geyser-plugin-interface" +version = "1.19.0" +dependencies = [ + "log", + "solana-sdk", + "solana-transaction-status", + "thiserror", +] + +[[package]] +name = "agave-validator" +version = "1.19.0" +dependencies = [ + "agave-geyser-plugin-interface", + "chrono", + "clap 2.33.3", + "console", + "core_affinity", + "crossbeam-channel", + "fd-lock", + "indicatif", + "itertools", + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-ipc-server", + "lazy_static", + "libc", + "libloading", + "log", + "num_cpus", + "rand 0.8.5", + "rayon", + "serde", + "serde_json", + "serde_yaml", + "signal-hook", + "solana-accounts-db", + "solana-clap-utils", + "solana-cli-config", + "solana-core", + "solana-download-utils", + "solana-entry", + "solana-faucet", + "solana-genesis-utils", + "solana-geyser-plugin-manager", + "solana-gossip", + "solana-ledger", + "solana-logger", + "solana-metrics", + "solana-net-utils", + "solana-perf", + "solana-poh", + "solana-rpc", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-runtime", + "solana-sdk", + "solana-send-transaction-service", + "solana-storage-bigtable", + "solana-streamer", + "solana-svm", + "solana-test-validator", + "solana-tpu-client", + "solana-unified-scheduler-pool", + "solana-version", + "solana-vote-program", + "symlink", + "thiserror", + "tikv-jemallocator", + "tokio", +] + [[package]] name = "ahash" version = "0.7.6" @@ -2818,9 +2892,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -5044,31 +5118,20 @@ dependencies = [ "solana-sdk", ] -[[package]] -name = "solana-geyser-plugin-interface" -version = "1.19.0" -dependencies = [ - "log", - "solana-sdk", - "solana-transaction-status", - "thiserror", -] - [[package]] name = "solana-geyser-plugin-manager" version = "1.19.0" dependencies = [ + "agave-geyser-plugin-interface", "bs58", "crossbeam-channel", "json5", "jsonrpc-core", - "jsonrpc-server-utils", "libloading", "log", "serde_json", "solana-accounts-db", "solana-entry", - "solana-geyser-plugin-interface", "solana-ledger", "solana-measure", "solana-metrics", @@ -5077,6 +5140,7 @@ dependencies = [ "solana-sdk", "solana-transaction-status", "thiserror", + "tokio", ] [[package]] @@ -6055,11 +6119,11 @@ dependencies = [ name = "solana-sbf-rust-simulation" version = "1.19.0" dependencies = [ + "agave-validator", "solana-logger", "solana-program", "solana-program-test", "solana-sdk", - "solana-validator", ] [[package]] @@ -6443,7 +6507,9 @@ dependencies = [ name = "solana-unified-scheduler-logic" version = "1.19.0" dependencies = [ + "assert_matches", "solana-sdk", + "static_assertions", ] [[package]] @@ -6452,6 +6518,7 @@ version = "1.19.0" dependencies = [ "assert_matches", "crossbeam-channel", + "dashmap", "derivative", "log", "solana-ledger", @@ -6462,70 +6529,6 @@ dependencies = [ "solana-vote", ] -[[package]] -name = "solana-validator" -version = "1.19.0" -dependencies = [ - "chrono", - "clap 2.33.3", - "console", - "core_affinity", - "crossbeam-channel", - "fd-lock", - "indicatif", - "itertools", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-ipc-server", - "jsonrpc-server-utils", - "lazy_static", - "libc", - "libloading", - "log", - "num_cpus", - "rand 0.8.5", - "rayon", - "serde", - "serde_json", - "serde_yaml", - "signal-hook", - "solana-accounts-db", - "solana-clap-utils", - "solana-cli-config", - "solana-core", - "solana-download-utils", - "solana-entry", - "solana-faucet", - "solana-genesis-utils", - "solana-geyser-plugin-interface", - "solana-geyser-plugin-manager", - "solana-gossip", - "solana-ledger", - "solana-logger", - "solana-metrics", - "solana-net-utils", - "solana-perf", - "solana-poh", - "solana-rpc", - "solana-rpc-client", - "solana-rpc-client-api", - "solana-runtime", - "solana-sdk", - "solana-send-transaction-service", - "solana-storage-bigtable", - "solana-streamer", - "solana-svm", - "solana-test-validator", - "solana-tpu-client", - "solana-unified-scheduler-pool", - "solana-version", - "solana-vote-program", - "symlink", - "thiserror", - "tikv-jemallocator", -] - [[package]] name = "solana-version" version = "1.19.0" diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 8a99a0f005471a..dee6a947b1965d 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -46,7 +46,7 @@ solana-sbf-rust-realloc = { path = "rust/realloc", version = "=1.19.0", default- solana-sbf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.19.0" } solana-sdk = { path = "../../sdk", version = "=1.19.0" } solana-transaction-status = { path = "../../transaction-status", version = "=1.19.0" } -solana-validator = { path = "../../validator", version = "=1.19.0" } +agave-validator = { path = "../../validator", version = "=1.19.0" } solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.19.0" } solana-svm = { path = "../../svm", version = "=1.19.0" } solana_rbpf = "=0.8.0" diff --git a/programs/sbf/rust/simulation/Cargo.toml b/programs/sbf/rust/simulation/Cargo.toml index 7091ef9d5ade0c..e9728e5916b801 100644 --- a/programs/sbf/rust/simulation/Cargo.toml +++ b/programs/sbf/rust/simulation/Cargo.toml @@ -16,10 +16,10 @@ test-bpf = [] solana-program = { workspace = true } [dev-dependencies] +agave-validator = { workspace = true } solana-logger = { workspace = true } solana-program-test = { workspace = true } solana-sdk = { workspace = true } -solana-validator = { workspace = true } [lib] crate-type = ["cdylib", "lib"] diff --git a/programs/sbf/rust/simulation/tests/validator.rs b/programs/sbf/rust/simulation/tests/validator.rs index 3044ad9a642629..17de51e665e3ec 100644 --- a/programs/sbf/rust/simulation/tests/validator.rs +++ b/programs/sbf/rust/simulation/tests/validator.rs @@ -1,13 +1,13 @@ #![cfg(feature = "test-bpf")] use { + agave_validator::test_validator::*, solana_program::{ instruction::{AccountMeta, Instruction}, pubkey::Pubkey, sysvar, }, solana_sdk::{signature::Signer, transaction::Transaction}, - solana_validator::test_validator::*, }; #[test] diff --git a/pubsub-client/src/nonblocking/pubsub_client.rs b/pubsub-client/src/nonblocking/pubsub_client.rs index 408df60454e4e1..b79e91f681b97f 100644 --- a/pubsub-client/src/nonblocking/pubsub_client.rs +++ b/pubsub-client/src/nonblocking/pubsub_client.rs @@ -33,7 +33,7 @@ //! By default the [`block_subscribe`] and [`vote_subscribe`] events are //! disabled on RPC nodes. They can be enabled by passing //! `--rpc-pubsub-enable-block-subscription` and -//! `--rpc-pubsub-enable-vote-subscription` to `solana-validator`. When these +//! `--rpc-pubsub-enable-vote-subscription` to `agave-validator`. When these //! methods are disabled, the RPC server will return a "Method not found" error //! message. //! @@ -381,7 +381,7 @@ impl PubsubClient { /// Receives messages of type [`RpcBlockUpdate`] when a block is confirmed or finalized. /// /// This method is disabled by default. It can be enabled by passing - /// `--rpc-pubsub-enable-block-subscription` to `solana-validator`. + /// `--rpc-pubsub-enable-block-subscription` to `agave-validator`. /// /// # RPC Reference /// @@ -452,7 +452,7 @@ impl PubsubClient { /// votes are observed prior to confirmation and may never be confirmed. /// /// This method is disabled by default. It can be enabled by passing - /// `--rpc-pubsub-enable-vote-subscription` to `solana-validator`. + /// `--rpc-pubsub-enable-vote-subscription` to `agave-validator`. /// /// # RPC Reference /// diff --git a/pubsub-client/src/pubsub_client.rs b/pubsub-client/src/pubsub_client.rs index e1a2dd34546528..70769619db1f4d 100644 --- a/pubsub-client/src/pubsub_client.rs +++ b/pubsub-client/src/pubsub_client.rs @@ -32,7 +32,7 @@ //! By default the [`block_subscribe`] and [`vote_subscribe`] events are //! disabled on RPC nodes. They can be enabled by passing //! `--rpc-pubsub-enable-block-subscription` and -//! `--rpc-pubsub-enable-vote-subscription` to `solana-validator`. When these +//! `--rpc-pubsub-enable-vote-subscription` to `agave-validator`. When these //! methods are disabled, the RPC server will return a "Method not found" error //! message. //! @@ -416,7 +416,7 @@ impl PubsubClient { /// Receives messages of type [`RpcBlockUpdate`] when a block is confirmed or finalized. /// /// This method is disabled by default. It can be enabled by passing - /// `--rpc-pubsub-enable-block-subscription` to `solana-validator`. + /// `--rpc-pubsub-enable-block-subscription` to `agave-validator`. /// /// # RPC Reference /// @@ -578,7 +578,7 @@ impl PubsubClient { /// votes are observed prior to confirmation and may never be confirmed. /// /// This method is disabled by default. It can be enabled by passing - /// `--rpc-pubsub-enable-vote-subscription` to `solana-validator`. + /// `--rpc-pubsub-enable-vote-subscription` to `agave-validator`. /// /// # RPC Reference /// diff --git a/quic-client/src/quic_client.rs b/quic-client/src/quic_client.rs index f057980c79fe06..8c8e8e5338993f 100644 --- a/quic-client/src/quic_client.rs +++ b/quic-client/src/quic_client.rs @@ -69,7 +69,7 @@ lazy_static! { static ref ASYNC_TASK_SEMAPHORE: AsyncTaskSemaphore = AsyncTaskSemaphore::new(MAX_OUTSTANDING_TASK); static ref RUNTIME: Runtime = tokio::runtime::Builder::new_multi_thread() - .thread_name("quic-client") + .thread_name("solQuicClientRt") .enable_all() .build() .unwrap(); diff --git a/quic-client/tests/quic_client.rs b/quic-client/tests/quic_client.rs index 658ee6a57d672d..0237fc21d098dc 100644 --- a/quic-client/tests/quic_client.rs +++ b/quic-client/tests/quic_client.rs @@ -72,6 +72,7 @@ mod tests { thread: t, key_updater: _, } = solana_streamer::quic::spawn_server( + "solQuicTest", "quic_streamer_test", s.try_clone().unwrap(), &keypair, @@ -212,6 +213,7 @@ mod tests { thread: request_recv_thread, key_updater: _, } = solana_streamer::quic::spawn_server( + "solQuicTest", "quic_streamer_test", request_recv_socket.try_clone().unwrap(), &keypair, @@ -239,6 +241,7 @@ mod tests { thread: response_recv_thread, key_updater: _, } = solana_streamer::quic::spawn_server( + "solQuicTest", "quic_streamer_test", response_recv_socket, &keypair2, diff --git a/rbpf-cli/src/main.rs b/rbpf-cli/src/main.rs index e7db982026f82a..9e243f0836aa0f 100644 --- a/rbpf-cli/src/main.rs +++ b/rbpf-cli/src/main.rs @@ -1,6 +1,6 @@ fn main() { println!( - r##"rbpf-cli is replaced by solana-ledger-tool program run subcommand. -Please, use 'solana-ledger-tool program run --help' for more information."## + r##"rbpf-cli is replaced by agave-ledger-tool program run subcommand. +Please, use 'agave-ledger-tool program run --help' for more information."## ); } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index caeb0953109fbb..41b26e5fa1e2c2 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1786,16 +1786,10 @@ impl JsonRpcRequestProcessor { } else { StakeActivationState::Inactive }; - let inactive_stake = match stake_activation_state { - StakeActivationState::Activating => activating, - StakeActivationState::Active => 0, - StakeActivationState::Deactivating => stake_account - .lamports() - .saturating_sub(effective + rent_exempt_reserve), - StakeActivationState::Inactive => { - stake_account.lamports().saturating_sub(rent_exempt_reserve) - } - }; + let inactive_stake = stake_account + .lamports() + .saturating_sub(effective) + .saturating_sub(rent_exempt_reserve); Ok(RpcStakeActivation { state: stake_activation_state, active: effective, @@ -2561,7 +2555,7 @@ pub mod rpc_minimal { #[rpc(meta, name = "getVersion")] fn get_version(&self, meta: Self::Metadata) -> Result; - // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal #[rpc(meta, name = "getVoteAccounts")] fn get_vote_accounts( @@ -2570,7 +2564,7 @@ pub mod rpc_minimal { config: Option, ) -> Result; - // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal #[rpc(meta, name = "getLeaderSchedule")] fn get_leader_schedule( @@ -2696,7 +2690,7 @@ pub mod rpc_minimal { }) } - // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal fn get_vote_accounts( &self, @@ -2707,7 +2701,7 @@ pub mod rpc_minimal { meta.get_vote_accounts(config) } - // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal fn get_leader_schedule( &self, @@ -2991,14 +2985,6 @@ pub mod rpc_accounts { block: Slot, ) -> Result>; - #[rpc(meta, name = "getStakeActivation")] - fn get_stake_activation( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result; - // SPL Token-specific RPC endpoints // See https://github.com/solana-labs/solana-program-library/releases/tag/token-v2.0.0 for // program details @@ -3071,20 +3057,6 @@ pub mod rpc_accounts { Ok(meta.get_block_commitment(block)) } - fn get_stake_activation( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result { - debug!( - "get_stake_activation rpc request received: {:?}", - pubkey_str - ); - let pubkey = verify_pubkey(&pubkey_str)?; - meta.get_stake_activation(&pubkey, config) - } - fn get_token_account_balance( &self, meta: Self::Metadata, @@ -4091,7 +4063,43 @@ fn rpc_perf_sample_from_perf_sample(slot: u64, sample: PerfSample) -> RpcPerfSam } } -// RPC methods deprecated in v1.8 +pub mod rpc_deprecated_v1_18 { + use super::*; + #[rpc] + pub trait DeprecatedV1_18 { + type Metadata; + + // DEPRECATED + #[rpc(meta, name = "getStakeActivation")] + fn get_stake_activation( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result; + } + + pub struct DeprecatedV1_18Impl; + impl DeprecatedV1_18 for DeprecatedV1_18Impl { + type Metadata = JsonRpcRequestProcessor; + + fn get_stake_activation( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result { + debug!( + "get_stake_activation rpc request received: {:?}", + pubkey_str + ); + let pubkey = verify_pubkey(&pubkey_str)?; + meta.get_stake_activation(&pubkey, config) + } + } +} + +// RPC methods deprecated in v1.9 pub mod rpc_deprecated_v1_9 { #![allow(deprecated)] use super::*; diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 8597394f102325..d8791ab6c3bf6b 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -6,8 +6,9 @@ use { max_slots::MaxSlots, optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, rpc::{ - rpc_accounts::*, rpc_accounts_scan::*, rpc_bank::*, rpc_deprecated_v1_7::*, - rpc_deprecated_v1_9::*, rpc_full::*, rpc_minimal::*, rpc_obsolete_v1_7::*, *, + rpc_accounts::*, rpc_accounts_scan::*, rpc_bank::*, rpc_deprecated_v1_18::*, + rpc_deprecated_v1_7::*, rpc_deprecated_v1_9::*, rpc_full::*, rpc_minimal::*, + rpc_obsolete_v1_7::*, *, }, rpc_cache::LargestAccountsCache, rpc_health::*, @@ -510,6 +511,7 @@ impl JsonRpcService { io.extend_with(rpc_full::FullImpl.to_delegate()); io.extend_with(rpc_deprecated_v1_7::DeprecatedV1_7Impl.to_delegate()); io.extend_with(rpc_deprecated_v1_9::DeprecatedV1_9Impl.to_delegate()); + io.extend_with(rpc_deprecated_v1_18::DeprecatedV1_18Impl.to_delegate()); } if obsolete_v1_7_api { io.extend_with(rpc_obsolete_v1_7::ObsoleteV1_7Impl.to_delegate()); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 3ea316f857a2bc..04c72d2f138289 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1483,7 +1483,10 @@ impl Bank { let epoch = self.epoch(); let slot = self.slot(); let (thread_pool, thread_pool_time) = measure!( - ThreadPoolBuilder::new().build().unwrap(), + ThreadPoolBuilder::new() + .thread_name(|i| format!("solBnkNewEpch{i:02}")) + .build() + .expect("new rayon threadpool"), "thread_pool_creation", ); @@ -4248,6 +4251,9 @@ impl Bank { transaction: &'a SanitizedTransaction, ) -> TransactionBatch<'_, '_> { let tx_account_lock_limit = self.get_transaction_account_lock_limit(); + // Note that switching this to .get_account_locks_unchecked() is unacceptable currently. + // The unified scheduler relies on the checks enforced here. + // See a comment in SchedulingStateMachine::create_task(). let lock_result = transaction .get_account_locks(tx_account_lock_limit) .map(|_| ()); @@ -4376,13 +4382,11 @@ impl Bank { account_overrides } - pub fn unlock_accounts(&self, batch: &mut TransactionBatch) { - if batch.needs_unlock() { - batch.set_needs_unlock(false); - self.rc - .accounts - .unlock_accounts(batch.sanitized_transactions().iter(), batch.lock_results()) - } + pub fn unlock_accounts<'a>( + &self, + txs_and_results: impl Iterator)>, + ) { + self.rc.accounts.unlock_accounts(txs_and_results) } pub fn remove_unrooted_slots(&self, slots: &[(Slot, BankId)]) { diff --git a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs index 0c6116274b1cb1..30cbbd4afd5970 100644 --- a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs +++ b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs @@ -418,9 +418,10 @@ impl SnapshotStorageRebuilder { /// Builds thread pool to rebuild with fn build_thread_pool(&self) -> ThreadPool { ThreadPoolBuilder::default() + .thread_name(|i| format!("solRbuildSnap{i:02}")) .num_threads(self.num_threads) .build() - .unwrap() + .expect("new rayon threadpool") } } diff --git a/runtime/src/transaction_batch.rs b/runtime/src/transaction_batch.rs index 66711fd5a1acd5..ecec27e02e93aa 100644 --- a/runtime/src/transaction_batch.rs +++ b/runtime/src/transaction_batch.rs @@ -46,12 +46,52 @@ impl<'a, 'b> TransactionBatch<'a, 'b> { pub fn needs_unlock(&self) -> bool { self.needs_unlock } + + /// For every error result, if the corresponding transaction is + /// still locked, unlock the transaction and then record the new error. + pub fn unlock_failures(&mut self, transaction_results: Vec>) { + assert_eq!(self.lock_results.len(), transaction_results.len()); + // Shouldn't happen but if a batch was marked as not needing an unlock, + // don't unlock failures. + if !self.needs_unlock() { + return; + } + + let txs_and_results = transaction_results + .iter() + .enumerate() + .inspect(|(index, result)| { + // It's not valid to update a previously recorded lock error to + // become an "ok" result because this could lead to serious + // account lock violations where accounts are later unlocked + // when they were not currently locked. + assert!(!(result.is_ok() && self.lock_results[*index].is_err())) + }) + .filter(|(index, result)| result.is_err() && self.lock_results[*index].is_ok()) + .map(|(index, _)| (&self.sanitized_txs[index], &self.lock_results[index])); + + // Unlock the accounts for all transactions which will be updated to an + // lock error below. + self.bank.unlock_accounts(txs_and_results); + + // Record all new errors by overwriting lock results. Note that it's + // not valid to update from err -> ok and the assertion above enforces + // that validity constraint. + self.lock_results = transaction_results; + } } // Unlock all locked accounts in destructor. impl<'a, 'b> Drop for TransactionBatch<'a, 'b> { fn drop(&mut self) { - self.bank.unlock_accounts(self) + if self.needs_unlock() { + self.set_needs_unlock(false); + self.bank.unlock_accounts( + self.sanitized_transactions() + .iter() + .zip(self.lock_results()), + ) + } } } @@ -60,12 +100,12 @@ mod tests { use { super::*, crate::genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, - solana_sdk::{signature::Keypair, system_transaction}, + solana_sdk::{signature::Keypair, system_transaction, transaction::TransactionError}, }; #[test] fn test_transaction_batch() { - let (bank, txs) = setup(); + let (bank, txs) = setup(false); // Test getting locked accounts let batch = bank.prepare_sanitized_batch(&txs); @@ -87,7 +127,7 @@ mod tests { #[test] fn test_simulation_batch() { - let (bank, txs) = setup(); + let (bank, txs) = setup(false); // Prepare batch without locks let batch = bank.prepare_unlocked_batch_from_single_tx(&txs[0]); @@ -102,7 +142,37 @@ mod tests { assert!(batch3.lock_results().iter().all(|x| x.is_ok())); } - fn setup() -> (Bank, Vec) { + #[test] + fn test_unlock_failures() { + let (bank, txs) = setup(true); + + // Test getting locked accounts + let mut batch = bank.prepare_sanitized_batch(&txs); + assert_eq!( + batch.lock_results, + vec![Ok(()), Err(TransactionError::AccountInUse), Ok(())] + ); + + let qos_results = vec![ + Ok(()), + Err(TransactionError::AccountInUse), + Err(TransactionError::WouldExceedMaxBlockCostLimit), + ]; + batch.unlock_failures(qos_results.clone()); + assert_eq!(batch.lock_results, qos_results); + + // Dropping the batch should unlock remaining locked transactions + drop(batch); + + // The next batch should be able to lock all but the conflicting tx + let batch2 = bank.prepare_sanitized_batch(&txs); + assert_eq!( + batch2.lock_results, + vec![Ok(()), Err(TransactionError::AccountInUse), Ok(())] + ); + } + + fn setup(insert_conflicting_tx: bool) -> (Bank, Vec) { let dummy_leader_pubkey = solana_sdk::pubkey::new_rand(); let GenesisConfigInfo { genesis_config, @@ -115,20 +185,17 @@ mod tests { let keypair2 = Keypair::new(); let pubkey2 = solana_sdk::pubkey::new_rand(); - let txs = vec![ - SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &mint_keypair, - &pubkey, - 1, - genesis_config.hash(), - )), - SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &keypair2, - &pubkey2, - 1, - genesis_config.hash(), - )), - ]; + let mut txs = vec![SanitizedTransaction::from_transaction_for_tests( + system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()), + )]; + if insert_conflicting_tx { + txs.push(SanitizedTransaction::from_transaction_for_tests( + system_transaction::transfer(&mint_keypair, &pubkey2, 1, genesis_config.hash()), + )); + } + txs.push(SanitizedTransaction::from_transaction_for_tests( + system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()), + )); (bank, txs) } diff --git a/scripts/solana-install-deploy.sh b/scripts/agave-install-deploy.sh similarity index 90% rename from scripts/solana-install-deploy.sh rename to scripts/agave-install-deploy.sh index ea77ca34bc9ea3..a8f8eeb65b3857 100755 --- a/scripts/solana-install-deploy.sh +++ b/scripts/agave-install-deploy.sh @@ -26,7 +26,7 @@ if [[ -z $URL || -z $TAG ]]; then fi if [[ ! -f update_manifest_keypair.json ]]; then - "$SOLANA_ROOT"/scripts/solana-install-update-manifest-keypair.sh "$OS" + "$SOLANA_ROOT"/scripts/agave-install-update-manifest-keypair.sh "$OS" fi case "$OS" in @@ -76,4 +76,4 @@ if [[ $balance = "0 lamports" ]]; then fi # shellcheck disable=SC2086 # Don't want to double quote $maybeKeypair -solana-install deploy $maybeKeypair --url "$URL" "$DOWNLOAD_URL" update_manifest_keypair.json +agave-install deploy $maybeKeypair --url "$URL" "$DOWNLOAD_URL" update_manifest_keypair.json diff --git a/scripts/build-downstream-anchor-projects.sh b/scripts/build-downstream-anchor-projects.sh index cdfa0bae10addb..7d75ccc08ab0e2 100755 --- a/scripts/build-downstream-anchor-projects.sh +++ b/scripts/build-downstream-anchor-projects.sh @@ -8,6 +8,7 @@ cd "$(dirname "$0")"/.. source ci/_ source scripts/patch-crates.sh source scripts/read-cargo-variable.sh +source scripts/patch-spl-crates-for-anchor.sh anchor_version=$1 solana_ver=$(readCargoVariable version Cargo.toml) @@ -43,6 +44,14 @@ EOF # NOTE This isn't run in a subshell to get $anchor_dir and $anchor_ver anchor() { set -x + + rm -rf spl + git clone https://github.com/solana-labs/solana-program-library.git spl + cd spl || exit 1 + spl_dir=$PWD + get_spl_versions "$spl_dir" + cd .. + rm -rf anchor git clone https://github.com/coral-xyz/anchor.git cd anchor || exit 1 @@ -57,9 +66,13 @@ anchor() { update_solana_dependencies . "$solana_ver" patch_crates_io_solana Cargo.toml "$solana_dir" + patch_spl_crates . Cargo.toml "$spl_dir" $cargo test - (cd spl && $cargo_build_sbf --features dex metadata stake) + # serum_dex and mpl-token-metadata are using caret versions of solana and SPL dependencies + # rather pull and patch those as well, ignore for now + # (cd spl && $cargo_build_sbf --features dex metadata stake) + (cd spl && $cargo_build_sbf --features stake) (cd client && $cargo test --all-features) anchor_dir=$PWD diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index 549aa15550b0eb..029b1fbf27943d 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -91,8 +91,8 @@ if [[ $CI_OS_NAME = windows ]]; then cargo-test-bpf cargo-test-sbf solana - solana-install - solana-install-init + agave-install + agave-install-init solana-keygen solana-stake-accounts solana-test-validator @@ -106,12 +106,12 @@ else solana-bench-tps solana-faucet solana-gossip - solana-install + agave-install solana-keygen - solana-ledger-tool + agave-ledger-tool solana-log-analyzer solana-net-shaper - solana-validator + agave-validator rbpf-cli ) @@ -123,11 +123,11 @@ else cargo-test-bpf cargo-test-sbf solana-dos - solana-install-init + agave-install-init solana-stake-accounts solana-test-validator solana-tokens - solana-watchtower + agave-watchtower ) fi diff --git a/scripts/check-dev-context-only-utils.sh b/scripts/check-dev-context-only-utils.sh index 8719af96a212e4..6a4f798c633e26 100755 --- a/scripts/check-dev-context-only-utils.sh +++ b/scripts/check-dev-context-only-utils.sh @@ -31,7 +31,7 @@ source ci/rust-version.sh nightly declare tainted_packages=( solana-accounts-bench solana-banking-bench - solana-ledger-tool + agave-ledger-tool ) # convert to comma separeted (ref: https://stackoverflow.com/a/53839433) diff --git a/scripts/patch-spl-crates-for-anchor.sh b/scripts/patch-spl-crates-for-anchor.sh new file mode 100644 index 00000000000000..93ea67b8fceb20 --- /dev/null +++ b/scripts/patch-spl-crates-for-anchor.sh @@ -0,0 +1,55 @@ +spl_memo_version= +spl_token_version= +spl_token_2022_version= +spl_tlv_account_resolution_verison= +spl_transfer_hook_interface_version= + +get_spl_versions() { + declare spl_dir="$1" + spl_memo_version=$(readCargoVariable version "$spl_dir/memo/program/Cargo.toml") + spl_token_version=$(readCargoVariable version "$spl_dir/token/program/Cargo.toml") + spl_token_2022_version=$(readCargoVariable version "$spl_dir/token/program-2022/Cargo.toml"| head -c1) # only use the major version for convenience + spl_tlv_account_resolution_verison=$(readCargoVariable version "$spl_dir/libraries/tlv-account-resolution/Cargo.toml") + spl_transfer_hook_interface_version=$(readCargoVariable version "$spl_dir/token/transfer-hook/interface/Cargo.toml") +} + +patch_spl_crates() { + declare project_root="$1" + declare Cargo_toml="$2" + declare spl_dir="$3" + update_spl_dependencies "$project_root" + patch_crates_io "$Cargo_toml" "$spl_dir" +} + +update_spl_dependencies() { + declare project_root="$1" + declare tomls=() + while IFS='' read -r line; do tomls+=("$line"); done < <(find "$project_root" -name Cargo.toml) + + sed -i -e "s#\(spl-memo = \"\)[^\"]*\(\"\)#\1$spl_memo_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-memo = { version = \"\)[^\"]*\(\"\)#\1$spl_memo_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-token = \"\)[^\"]*\(\"\)#\1$spl_token_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-token = { version = \"\)[^\"]*\(\"\)#\1$spl_token_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-token-2022 = \"\).*\(\"\)#\1$spl_token_2022_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-token-2022 = { version = \"\)[^\"]*\(\"\)#\1$spl_token_2022_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-tlv-account-resolution = \"\)[^\"]*\(\"\)#\1=$spl_tlv_account_resolution_verison\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-tlv-account-resolution = { version = \"\)[^\"]*\(\"\)#\1=$spl_tlv_account_resolution_verison\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-transfer-hook-interface = \"\)[^\"]*\(\"\)#\1=$spl_transfer_hook_interface_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-transfer-hook-interface = { version = \"\)[^\"]*\(\"\)#\1=$spl_transfer_hook_interface_version\2#g" "${tomls[@]}" || return $? + + # patch ahash. This is super brittle; putting here for convenience, since we are already iterating through the tomls + ahash_minor_version="0.8" + sed -i -e "s#\(ahash = \"\)[^\"]*\(\"\)#\1$ahash_minor_version\2#g" "${tomls[@]}" || return $? +} + +patch_crates_io() { + declare Cargo_toml="$1" + declare spl_dir="$2" + cat >> "$Cargo_toml" < Runtime { +fn rt(name: String) -> Runtime { tokio::runtime::Builder::new_multi_thread() - .thread_name("quic-server") + .thread_name(name) .enable_all() .build() .unwrap() @@ -431,7 +431,8 @@ impl StreamStats { #[allow(clippy::too_many_arguments)] pub fn spawn_server( - name: &'static str, + thread_name: &'static str, + metrics_name: &'static str, sock: UdpSocket, keypair: &Keypair, packet_sender: Sender, @@ -443,11 +444,11 @@ pub fn spawn_server( wait_for_chunk_timeout: Duration, coalesce: Duration, ) -> Result { - let runtime = rt(); + let runtime = rt(format!("{thread_name}Rt")); let (endpoint, _stats, task) = { let _guard = runtime.enter(); crate::nonblocking::quic::spawn_server( - name, + metrics_name, sock, keypair, packet_sender, @@ -461,7 +462,7 @@ pub fn spawn_server( ) }?; let handle = thread::Builder::new() - .name("solQuicServer".into()) + .name(thread_name.into()) .spawn(move || { if let Err(e) = runtime.block_on(task) { warn!("error from runtime.block_on: {:?}", e); @@ -505,6 +506,7 @@ mod test { thread: t, key_updater: _, } = spawn_server( + "solQuicTest", "quic_streamer_test", s, &keypair, @@ -532,7 +534,7 @@ mod test { fn test_quic_timeout() { solana_logger::setup(); let (t, exit, receiver, server_address) = setup_quic_server(); - let runtime = rt(); + let runtime = rt("solQuicTestRt".to_string()); runtime.block_on(check_timeout(receiver, server_address)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -543,7 +545,7 @@ mod test { solana_logger::setup(); let (t, exit, _receiver, server_address) = setup_quic_server(); - let runtime = rt(); + let runtime = rt("solQuicTestRt".to_string()); runtime.block_on(check_block_multiple_connections(server_address)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -563,6 +565,7 @@ mod test { thread: t, key_updater: _, } = spawn_server( + "solQuicTest", "quic_streamer_test", s, &keypair, @@ -577,7 +580,7 @@ mod test { ) .unwrap(); - let runtime = rt(); + let runtime = rt("solQuicTestRt".to_string()); runtime.block_on(check_multiple_streams(receiver, server_address)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -588,7 +591,7 @@ mod test { solana_logger::setup(); let (t, exit, receiver, server_address) = setup_quic_server(); - let runtime = rt(); + let runtime = rt("solQuicTestRt".to_string()); runtime.block_on(check_multiple_writes(receiver, server_address, None)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -608,6 +611,7 @@ mod test { thread: t, key_updater: _, } = spawn_server( + "solQuicTest", "quic_streamer_test", s, &keypair, @@ -622,7 +626,7 @@ mod test { ) .unwrap(); - let runtime = rt(); + let runtime = rt("solQuicTestRt".to_string()); runtime.block_on(check_unstaked_node_connect_failure(server_address)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); diff --git a/system-test/abi-testcases/mixed-validator-test.sh b/system-test/abi-testcases/mixed-validator-test.sh index 8ab673b26a3d21..c0400560dc519e 100755 --- a/system-test/abi-testcases/mixed-validator-test.sh +++ b/system-test/abi-testcases/mixed-validator-test.sh @@ -30,14 +30,14 @@ solanaInstallGlobalOpts=( bootstrapInstall() { declare v=$1 if [[ ! -h $solanaInstallDataDir/active_release ]]; then - sh "$SOLANA_ROOT"/install/solana-install-init.sh "$v" "${solanaInstallGlobalOpts[@]}" + sh "$SOLANA_ROOT"/install/agave-install-init.sh "$v" "${solanaInstallGlobalOpts[@]}" fi export PATH="$solanaInstallDataDir/active_release/bin/:$PATH" } bootstrapInstall "$baselineVersion" for v in "${otherVersions[@]}"; do - solana-install-init "${solanaInstallGlobalOpts[@]}" "$v" + agave-install-init "${solanaInstallGlobalOpts[@]}" "$v" solana -V done @@ -113,7 +113,7 @@ for v in "${otherVersions[@]}"; do ( set -x tmux new-window -t abi -n "$v" " \ - $SOLANA_BIN/solana-validator \ + $SOLANA_BIN/agave-validator \ --ledger $ledger \ --no-snapshot-fetch \ --entrypoint 127.0.0.1:8001 \ diff --git a/system-test/stability-testcases/gossip-dos-test.sh b/system-test/stability-testcases/gossip-dos-test.sh index f8afade75dc847..68c3c540d5948c 100755 --- a/system-test/stability-testcases/gossip-dos-test.sh +++ b/system-test/stability-testcases/gossip-dos-test.sh @@ -19,14 +19,14 @@ solanaInstallGlobalOpts=( bootstrapInstall() { declare v=$1 if [[ ! -h $solanaInstallDataDir/active_release ]]; then - sh "$SOLANA_ROOT"/install/solana-install-init.sh "$v" "${solanaInstallGlobalOpts[@]}" + sh "$SOLANA_ROOT"/install/agave-install-init.sh "$v" "${solanaInstallGlobalOpts[@]}" fi export PATH="$solanaInstallDataDir/active_release/bin/:$PATH" } bootstrapInstall "edge" -solana-install-init --version -solana-install-init edge +agave-install-init --version +agave-install-init edge solana-gossip --version solana-dos --version diff --git a/unified-scheduler-logic/Cargo.toml b/unified-scheduler-logic/Cargo.toml index b2e80c79c7a08f..b05cec41a7c862 100644 --- a/unified-scheduler-logic/Cargo.toml +++ b/unified-scheduler-logic/Cargo.toml @@ -10,4 +10,6 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +assert_matches = { workspace = true } solana-sdk = { workspace = true } +static_assertions = { workspace = true } diff --git a/unified-scheduler-logic/src/lib.rs b/unified-scheduler-logic/src/lib.rs index 997c6c1745a7c9..2c155cc8534434 100644 --- a/unified-scheduler-logic/src/lib.rs +++ b/unified-scheduler-logic/src/lib.rs @@ -1,15 +1,325 @@ -use solana_sdk::transaction::SanitizedTransaction; +#![allow(rustdoc::private_intra_doc_links)] +//! The task (transaction) scheduling code for the unified scheduler +//! +//! ### High-level API and design +//! +//! The most important type is [`SchedulingStateMachine`]. It takes new tasks (= transactions) and +//! may return back them if runnable via +//! [`::schedule_task()`](SchedulingStateMachine::schedule_task) while maintaining the account +//! readonly/writable lock rules. Those returned runnable tasks are guaranteed to be safe to +//! execute in parallel. Lastly, `SchedulingStateMachine` should be notified about the completion +//! of the exeuction via [`::deschedule_task()`](SchedulingStateMachine::deschedule_task), so that +//! conflicting tasks can be returned from +//! [`::schedule_unblocked_task()`](SchedulingStateMachine::schedule_unblocked_task) as +//! newly-unblocked runnable ones. +//! +//! The design principle of this crate (`solana-unified-scheduler-logic`) is simplicity for the +//! separation of concern. It is interacted only with a few of its public API by +//! `solana-unified-scheduler-pool`. This crate doesn't know about banks, slots, solana-runtime, +//! threads, crossbeam-channel at all. Becasue of this, it's deterministic, easy-to-unit-test, and +//! its perf footprint is well understood. It really focuses on its single job: sorting +//! transactions in executable order. +//! +//! ### Algorithm +//! +//! The algorithm can be said it's based on per-address FIFO queues, which are updated every time +//! both new task is coming (= called _scheduling_) and runnable (= _post-scheduling_) task is +//! finished (= called _descheduling_). +//! +//! For the _non-conflicting scheduling_ case, the story is very simple; it just remembers that all +//! of accessed addresses are write-locked or read-locked with the number of active (= +//! _currently-scheduled-and-not-descheduled-yet_) tasks. Correspondingly, descheduling does the +//! opposite book-keeping process, regardless whether a finished task has been conflicted or not. +//! +//! For the _conflicting scheduling_ case, it remembers that each of **non-conflicting addresses** +//! like the non-conflicting case above. As for **conflicting addresses**, each task is recorded to +//! respective FIFO queues attached to the (conflicting) addresses. Importantly, the number of +//! conflicting addresses of the conflicting task is also remembered. +//! +//! The last missing piece is that the scheduler actually tries to reschedule previously blocked +//! tasks while deschduling, in addition to the above-mentioned book-keeping processing. Namely, +//! when given address is ready for new fresh locking resulted from descheduling a task (i.e. write +//! lock is released or read lock count has reached zero), it pops out the first element of the +//! FIFO blocked-task queue of the address. Then, it immediately marks the address as relocked. It +//! also decrements the number of conflicting addresses of the popped-out task. As the final step, +//! if the number reaches to the zero, it means the task has fully finished locking all of its +//! addresses and is directly routed to be runnable. Lastly, if the next first element of the +//! blocked-task queue is trying to read-lock the address like the popped-out one, this +//! rescheduling is repeated as an optimization to increase parallelism of task execution. +//! +//! Put differently, this algorithm tries to gradually lock all of addresses of tasks at different +//! timings while not deviating the execution order from the original task ingestion order. This +//! implies there's no locking retries in general, which is the primary source of non-linear perf. +//! degration. +//! +//! As a ballpark number from a synthesized micro benchmark on usual CPU for `mainnet-beta` +//! validators, it takes roughly 100ns to schedule and deschedule a transaction with 10 accounts. +//! And 1us for a transaction with 100 accounts. Note that this excludes crossbeam communication +//! overhead at all. That's said, it's not unrealistic to say the whole unified scheduler can +//! attain 100k-1m tps overall, assuming those transaction executions aren't bottlenecked. +//! +//! ### Runtime performance characteristics and data structure arrangement +//! +//! Its algorithm is very fast for high throughput, real-time for low latency. The whole +//! unified-scheduler architecture is designed from grounds up to support the fastest execution of +//! this scheduling code. For that end, unified scheduler pre-loads address-specific locking state +//! data structures (called [`UsageQueue`]) for all of transaction's accounts, in order to offload +//! the job to other threads from the scheduler thread. This preloading is done inside +//! [`create_task()`](SchedulingStateMachine::create_task). In this way, task scheduling +//! computational complexity is basically reduced to several word-sized loads and stores in the +//! schduler thread (i.e. constant; no allocations nor syscalls), while being proportional to the +//! number of addresses in a given transaction. Note that this statement is held true, regardless +//! of conflicts. This is because the preloading also pre-allocates some scratch-pad area +//! ([`blocked_usages_from_tasks`](UsageQueueInner::blocked_usages_from_tasks)) to stash blocked +//! ones. So, a conflict only incurs some additional fixed number of mem stores, within error +//! margin of the constant complexity. And additional memory allocation for the scratchpad could +//! said to be amortized, if such an unusual event should occur. +//! +//! [`Arc`] is used to implement this preloading mechanism, because `UsageQueue`s are shared across +//! tasks accessing the same account, and among threads due to the preloading. Also, interior +//! mutability is needed. However, `SchedulingStateMachine` doesn't use conventional locks like +//! RwLock. Leveraging the fact it's the only state-mutating exclusive thread, it instead uses +//! `UnsafeCell`, which is sugar-coated by a tailored wrapper called [`TokenCell`]. `TokenCell` +//! imposes an overly restrictive aliasing rule via rust type system to maintain the memory safety. +//! By localizing any synchronization to the message passing, the scheduling code itself attains +//! maximally possible single-threaed execution without stalling cpu pipelines at all, only +//! constrained to mem access latency, while efficiently utilizing L1-L3 cpu cache with full of +//! `UsageQueue`s. +//! +//! ### Buffer bloat insignificance +//! +//! The scheduler code itself doesn't care about the buffer bloat problem, which can occur in +//! unified scheduler, where a run of heavily linearized and blocked tasks could be severely +//! hampered by very large number of interleaved runnable tasks along side. The reason is again +//! for separation of concerns. This is acceptable because the scheduling code itself isn't +//! susceptible to the buffer bloat problem by itself as explained by the description and validated +//! by the mentioned benchmark above. Thus, this should be solved elsewhere, specifically at the +//! scheduler pool. +use { + crate::utils::{ShortCounter, Token, TokenCell}, + solana_sdk::{pubkey::Pubkey, transaction::SanitizedTransaction}, + static_assertions::const_assert_eq, + std::{collections::VecDeque, mem, sync::Arc}, +}; -pub struct Task { +/// Internal utilities. Namely this contains [`ShortCounter`] and [`TokenCell`]. +mod utils { + use std::{ + any::{self, TypeId}, + cell::{RefCell, UnsafeCell}, + collections::BTreeSet, + marker::PhantomData, + thread, + }; + + /// A really tiny counter to hide `.checked_{add,sub}` all over the place. + /// + /// It's caller's reponsibility to ensure this (backed by [`u32`]) never overflow. + #[derive(Debug, Clone, Copy)] + pub(super) struct ShortCounter(u32); + + impl ShortCounter { + pub(super) fn zero() -> Self { + Self(0) + } + + pub(super) fn one() -> Self { + Self(1) + } + + pub(super) fn is_one(&self) -> bool { + self.0 == 1 + } + + pub(super) fn is_zero(&self) -> bool { + self.0 == 0 + } + + pub(super) fn current(&self) -> u32 { + self.0 + } + + #[must_use] + pub(super) fn increment(self) -> Self { + Self(self.0.checked_add(1).unwrap()) + } + + #[must_use] + pub(super) fn decrement(self) -> Self { + Self(self.0.checked_sub(1).unwrap()) + } + + pub(super) fn increment_self(&mut self) -> &mut Self { + *self = self.increment(); + self + } + + pub(super) fn decrement_self(&mut self) -> &mut Self { + *self = self.decrement(); + self + } + + pub(super) fn reset_to_zero(&mut self) -> &mut Self { + self.0 = 0; + self + } + } + + /// A conditionally [`Send`]-able and [`Sync`]-able cell leveraging scheduler's one-by-one data + /// access pattern with zero runtime synchronization cost. + /// + /// To comply with Rust's aliasing rules, these cells require a carefully-created [`Token`] to + /// be passed around to access the inner values. The token is a special-purpose phantom object + /// to get rid of its inherent `unsafe`-ness in [`UnsafeCell`], which is internally used for + /// the interior mutability. + /// + /// The final objective of [`Token`] is to ensure there's only one mutable reference to the + /// [`TokenCell`] at most _at any given moment_. To that end, it's `unsafe` to create it, + /// shifting the responsibility of binding the only singleton instance to a particular thread + /// and not creating more than one, onto the API consumers. And its constructor is non-`const`, + /// and the type is `!Clone` (and `!Copy` as well), `!Default`, `!Send` and `!Sync` to make it + /// relatively hard to cross thread boundaries accidentally. + /// + /// In other words, the token semantically _owns_ all of its associated instances of + /// [`TokenCell`]s. And `&mut Token` is needed to access one of them as if the one is of + /// [`Token`]'s `*_mut()` getters. Thus, the Rust aliasing rule for `UnsafeCell` can + /// transitively be proven to be satisfied simply based on the usual borrow checking of the + /// `&mut` reference of [`Token`] itself via [`::borrow_mut()`](TokenCell::borrow_mut). + /// + /// By extension, it's allowed to create _multiple_ tokens in a _single_ process as long as no + /// instance of [`TokenCell`] is shared by multiple instances of [`Token`]. + /// + /// Note that this is overly restrictive in that it's forbidden, yet, technically possible + /// to _have multiple mutable references to the inner values at the same time, if and only + /// if the respective cells aren't aliased to each other (i.e. different instances)_. This + /// artificial restriction is acceptable for its intended use by the unified scheduler's code + /// because its algorithm only needs to access each instance of [`TokenCell`]-ed data once at a + /// time. Finally, this restriction is traded off for restoration of Rust aliasing rule at zero + /// runtime cost. Without this token mechanism, there's no way to realize this. + #[derive(Debug, Default)] + pub(super) struct TokenCell(UnsafeCell); + + impl TokenCell { + /// Creates a new `TokenCell` with the `value` typed as `V`. + /// + /// Note that this isn't parametric over the its accompanied `Token`'s lifetime to avoid + /// complex handling of non-`'static` heaped data in general. Instead, it's manually + /// required to ensure this instance is accessed only via its associated Token for the + /// entire lifetime. + /// + /// This is intentionally left to be non-`const` to forbid unprotected sharing via static + /// variables among threads. + pub(super) fn new(value: V) -> Self { + Self(UnsafeCell::new(value)) + } + + /// Returns a mutable reference with its lifetime bound to the mutable reference of the + /// given token. + /// + /// In this way, any additional reborrow can never happen at the same time across all + /// instances of [`TokenCell`] conceptually owned by the instance of [`Token`] (a + /// particular thread), unless previous borrow is released. After the release, the used + /// singleton token should be free to be reused for reborrows. + pub(super) fn borrow_mut<'t>(&self, _token: &'t mut Token) -> &'t mut V { + unsafe { &mut *self.0.get() } + } + } + + // Safety: Once after a (`Send`-able) `TokenCell` is transferred to a thread from other + // threads, access to `TokenCell` is assumed to be only from the single thread by proper use of + // Token. Thereby, implementing `Sync` can be thought as safe and doing so is needed for the + // particular implementation pattern in the unified scheduler (multi-threaded off-loading). + // + // In other words, TokenCell is technically still `!Sync`. But there should be no + // legalized usage which depends on real `Sync` to avoid undefined behaviors. + unsafe impl Sync for TokenCell {} + + /// A auxiliary zero-sized type to enforce aliasing rule to [`TokenCell`] via rust type system + /// + /// Token semantically owns a collection of `TokenCell` objects and governs the _unique_ + /// existence of mutable access over them by requiring the token itself to be mutably borrowed + /// to get a mutable reference to the internal value of `TokenCell`. + // *mut is used to make this type !Send and !Sync + pub(super) struct Token(PhantomData<*mut V>); + + impl Token { + /// Returns the token to acquire a mutable reference to the inner value of [TokenCell]. + /// + /// This is intentionally left to be non-`const` to forbid unprotected sharing via static + /// variables among threads. + /// + /// # Panics + /// + /// This function will `panic!()` if called multiple times with same type `V` from the same + /// thread to detect potential misuses. + /// + /// # Safety + /// + /// This method should be called exactly once for each thread at most to avoid undefined + /// behavior when used with [`Token`]. + #[must_use] + pub(super) unsafe fn assume_exclusive_mutating_thread() -> Self { + thread_local! { + static TOKENS: RefCell> = const { RefCell::new(BTreeSet::new()) }; + } + // TOKEN.with_borrow_mut can't panic because it's the only non-overlapping + // bound-to-local-variable borrow of the _thread local_ variable. + assert!( + TOKENS.with_borrow_mut(|tokens| tokens.insert(TypeId::of::())), + "{:?} is wrongly initialized twice on {:?}", + any::type_name::(), + thread::current() + ); + + Self(PhantomData) + } + } + + #[cfg(test)] + mod tests { + use super::Token; + + #[test] + #[should_panic( + expected = "\"solana_unified_scheduler_logic::utils::Token\" is wrongly \ + initialized twice on Thread" + )] + fn test_second_creation_of_tokens_in_a_thread() { + unsafe { + let _ = Token::::assume_exclusive_mutating_thread(); + let _ = Token::::assume_exclusive_mutating_thread(); + } + } + } +} + +/// [`Result`] for locking a [usage_queue](UsageQueue) with particular +/// [current_usage](RequestedUsage). +type LockResult = Result; +const_assert_eq!(mem::size_of::(), 8); + +/// Something to be scheduled; usually a wrapper of [`SanitizedTransaction`]. +pub type Task = Arc; +const_assert_eq!(mem::size_of::(), 8); + +/// [`Token`] for [`UsageQueue`]. +type UsageQueueToken = Token; +const_assert_eq!(mem::size_of::(), 0); + +/// [`Token`] for [task](Task)'s [internal mutable data](`TaskInner::blocked_usage_count`). +type BlockedUsageCountToken = Token; +const_assert_eq!(mem::size_of::(), 0); + +/// Internal scheduling data about a particular task. +#[derive(Debug)] +pub struct TaskInner { transaction: SanitizedTransaction, index: usize, + lock_attempts: Vec, + blocked_usage_count: TokenCell, } -impl Task { - pub fn create_task(transaction: SanitizedTransaction, index: usize) -> Self { - Task { transaction, index } - } - +impl TaskInner { pub fn task_index(&self) -> usize { self.index } @@ -17,4 +327,1101 @@ impl Task { pub fn transaction(&self) -> &SanitizedTransaction { &self.transaction } + + fn lock_attempts(&self) -> &[LockAttempt] { + &self.lock_attempts + } + + fn blocked_usage_count_mut<'t>( + &self, + token: &'t mut BlockedUsageCountToken, + ) -> &'t mut ShortCounter { + self.blocked_usage_count.borrow_mut(token) + } + + fn set_blocked_usage_count(&self, token: &mut BlockedUsageCountToken, count: ShortCounter) { + *self.blocked_usage_count_mut(token) = count; + } + + #[must_use] + fn try_unblock(self: Task, token: &mut BlockedUsageCountToken) -> Option { + self.blocked_usage_count_mut(token) + .decrement_self() + .is_zero() + .then_some(self) + } +} + +/// [`Task`]'s per-address attempt to use a [usage_queue](UsageQueue) with [certain kind of +/// request](RequestedUsage). +#[derive(Debug)] +struct LockAttempt { + usage_queue: UsageQueue, + requested_usage: RequestedUsage, +} +const_assert_eq!(mem::size_of::(), 16); + +impl LockAttempt { + fn new(usage_queue: UsageQueue, requested_usage: RequestedUsage) -> Self { + Self { + usage_queue, + requested_usage, + } + } + + fn usage_queue_mut<'t>( + &self, + usage_queue_token: &'t mut UsageQueueToken, + ) -> &'t mut UsageQueueInner { + self.usage_queue.0.borrow_mut(usage_queue_token) + } +} + +/// Status about how the [`UsageQueue`] is used currently. Unlike [`RequestedUsage`], it has +/// additional variant of [`Unused`](`Usage::Unused`). +#[derive(Copy, Clone, Debug, Default)] +enum Usage { + #[default] + Unused, + Readonly(ShortCounter), + Writable, +} +const_assert_eq!(mem::size_of::(), 8); + +impl From for Usage { + fn from(requested_usage: RequestedUsage) -> Self { + match requested_usage { + RequestedUsage::Readonly => Usage::Readonly(ShortCounter::one()), + RequestedUsage::Writable => Usage::Writable, + } + } +} + +/// Status about how a task is requesting to use a particular [`UsageQueue`]. Unlike [`Usage`], it +/// has only two unit variants. +#[derive(Clone, Copy, Debug)] +enum RequestedUsage { + Readonly, + Writable, +} + +/// Internal scheduling data about a particular address. +/// +/// Specifically, it holds the current [`Usage`] (or no usage with [`Usage::Unused`]) and which +/// [`Task`]s are blocked to be executed after the current task is notified to be finished via +/// [`::deschedule_task`](`SchedulingStateMachine::deschedule_task`) +#[derive(Debug)] +struct UsageQueueInner { + current_usage: Usage, + blocked_usages_from_tasks: VecDeque, +} + +type UsageFromTask = (RequestedUsage, Task); + +impl Default for UsageQueueInner { + fn default() -> Self { + Self { + current_usage: Usage::default(), + // Capacity should be configurable to create with large capacity like 1024 inside the + // (multi-threaded) closures passed to create_task(). In this way, reallocs can be + // avoided happening in the scheduler thread. Also, this configurability is desired for + // unified-scheduler-logic's motto: separation of concerns (the pure logic should be + // sufficiently distanced from any some random knob's constants needed for messy + // reality for author's personal preference...). + // + // Note that large cap should be accompanied with proper scheduler cleaning after use, + // which should be handled by higher layers (i.e. scheduler pool). + blocked_usages_from_tasks: VecDeque::with_capacity(128), + } + } +} + +impl UsageQueueInner { + fn push_blocked_usage_from_task(&mut self, usage_from_task: UsageFromTask) { + self.blocked_usages_from_tasks.push_back(usage_from_task); + } + + #[must_use] + fn pop_unblocked_usage_from_task(&mut self) -> Option { + self.blocked_usages_from_tasks.pop_front() + } + + #[must_use] + fn pop_unblocked_readonly_usage_from_task(&mut self) -> Option { + if matches!( + self.blocked_usages_from_tasks.front(), + Some((RequestedUsage::Readonly, _)) + ) { + self.pop_unblocked_usage_from_task() + } else { + None + } + } + + fn has_no_blocked_usage(&self) -> bool { + self.blocked_usages_from_tasks.is_empty() + } +} + +const_assert_eq!(mem::size_of::>(), 40); + +/// Scheduler's internal data for each address ([`Pubkey`](`solana_sdk::pubkey::Pubkey`)). Very +/// opaque wrapper type; no methods just with [`::clone()`](Clone::clone) and +/// [`::default()`](Default::default). +#[derive(Debug, Clone, Default)] +pub struct UsageQueue(Arc>); +const_assert_eq!(mem::size_of::(), 8); + +/// A high-level `struct`, managing the overall scheduling of [tasks](Task), to be used by +/// `solana-unified-scheduler-pool`. +pub struct SchedulingStateMachine { + last_task_index: Option, + unblocked_task_queue: VecDeque, + active_task_count: ShortCounter, + handled_task_count: ShortCounter, + unblocked_task_count: ShortCounter, + total_task_count: ShortCounter, + count_token: BlockedUsageCountToken, + usage_queue_token: UsageQueueToken, +} +const_assert_eq!(mem::size_of::(), 64); + +impl SchedulingStateMachine { + pub fn has_no_active_task(&self) -> bool { + self.active_task_count.is_zero() + } + + pub fn has_unblocked_task(&self) -> bool { + !self.unblocked_task_queue.is_empty() + } + + pub fn unblocked_task_queue_count(&self) -> usize { + self.unblocked_task_queue.len() + } + + pub fn active_task_count(&self) -> u32 { + self.active_task_count.current() + } + + pub fn handled_task_count(&self) -> u32 { + self.handled_task_count.current() + } + + pub fn unblocked_task_count(&self) -> u32 { + self.unblocked_task_count.current() + } + + pub fn total_task_count(&self) -> u32 { + self.total_task_count.current() + } + + /// Schedules given `task`, returning it if successful. + /// + /// Returns `Some(task)` if it's immediately scheduled. Otherwise, returns `None`, + /// indicating the task is blocked currently. + /// + /// Note that this function's type signature is intentionally redundant to take the ownership + /// of given task _conditionally_ for future optimization. + #[must_use] + pub fn schedule_task(&mut self, task: Task) -> Option { + let new_task_index = task.task_index(); + if let Some(old_task_index) = self.last_task_index.replace(new_task_index) { + assert!( + new_task_index > old_task_index, + "bad new task index: {new_task_index} > {old_task_index}" + ); + } + self.total_task_count.increment_self(); + self.active_task_count.increment_self(); + self.try_lock_for_task(task) + } + + #[must_use] + pub fn schedule_unblocked_task(&mut self) -> Option { + self.unblocked_task_queue.pop_front().inspect(|_| { + self.unblocked_task_count.increment_self(); + }) + } + + pub fn deschedule_task(&mut self, task: &Task) { + let descheduled_task_index = task.task_index(); + let largest_task_index = self + .last_task_index + .expect("task should have been scheduled"); + assert!( + descheduled_task_index <= largest_task_index, + "bad descheduled task index: {descheduled_task_index} <= {largest_task_index}" + ); + self.active_task_count.decrement_self(); + self.handled_task_count.increment_self(); + self.unlock_for_task(task); + } + + fn try_lock_usage_queue( + usage_queue: &UsageQueueInner, + requested_usage: RequestedUsage, + ) -> LockResult { + match usage_queue.current_usage { + Usage::Unused => LockResult::Ok(Usage::from(requested_usage)), + Usage::Readonly(count) => match requested_usage { + RequestedUsage::Readonly => LockResult::Ok(Usage::Readonly(count.increment())), + RequestedUsage::Writable => LockResult::Err(()), + }, + Usage::Writable => LockResult::Err(()), + } + } + + #[must_use] + fn unlock_usage_queue( + usage_queue: &mut UsageQueueInner, + attempt: &LockAttempt, + ) -> Option<(RequestedUsage, Task)> { + let mut is_unused_now = false; + match &mut usage_queue.current_usage { + Usage::Readonly(ref mut count) => match attempt.requested_usage { + RequestedUsage::Readonly => { + if count.is_one() { + is_unused_now = true; + } else { + count.decrement_self(); + } + } + RequestedUsage::Writable => unreachable!(), + }, + Usage::Writable => match attempt.requested_usage { + RequestedUsage::Writable => { + is_unused_now = true; + } + RequestedUsage::Readonly => unreachable!(), + }, + Usage::Unused => unreachable!(), + } + + if is_unused_now { + usage_queue.current_usage = Usage::Unused; + usage_queue.pop_unblocked_usage_from_task() + } else { + None + } + } + + #[must_use] + fn try_lock_for_task(&mut self, task: Task) -> Option { + let mut blocked_usage_count = ShortCounter::zero(); + + for attempt in task.lock_attempts() { + let usage_queue = attempt.usage_queue_mut(&mut self.usage_queue_token); + let lock_result = if usage_queue.has_no_blocked_usage() { + Self::try_lock_usage_queue(usage_queue, attempt.requested_usage) + } else { + LockResult::Err(()) + }; + match lock_result { + LockResult::Ok(Usage::Unused) => unreachable!(), + LockResult::Ok(new_usage) => { + usage_queue.current_usage = new_usage; + } + LockResult::Err(()) => { + blocked_usage_count.increment_self(); + let usage_from_task = (attempt.requested_usage, task.clone()); + usage_queue.push_blocked_usage_from_task(usage_from_task); + } + } + } + + // no blocked usage count means success + if blocked_usage_count.is_zero() { + Some(task) + } else { + task.set_blocked_usage_count(&mut self.count_token, blocked_usage_count); + None + } + } + + fn unlock_for_task(&mut self, task: &Task) { + for attempt in task.lock_attempts() { + let usage_queue = attempt.usage_queue_mut(&mut self.usage_queue_token); + let mut unblocked_task_from_queue = Self::unlock_usage_queue(usage_queue, attempt); + + while let Some((requested_usage, task_with_unblocked_queue)) = unblocked_task_from_queue + { + if let Some(task) = task_with_unblocked_queue.try_unblock(&mut self.count_token) { + self.unblocked_task_queue.push_back(task); + } + + match Self::try_lock_usage_queue(usage_queue, requested_usage) { + LockResult::Ok(Usage::Unused) => unreachable!(), + LockResult::Ok(new_usage) => { + usage_queue.current_usage = new_usage; + // Try to further schedule blocked task for parallelism in the case of + // readonly usages + unblocked_task_from_queue = if matches!(new_usage, Usage::Readonly(_)) { + usage_queue.pop_unblocked_readonly_usage_from_task() + } else { + None + }; + } + LockResult::Err(_) => panic!("should never fail in this context"), + } + } + } + } + + /// Creates a new task with [`SanitizedTransaction`] with all of its corresponding + /// [`UsageQueue`]s preloaded. + /// + /// Closure (`usage_queue_loader`) is used to delegate the (possibly multi-threaded) + /// implementation of [`UsageQueue`] look-up by [`pubkey`](Pubkey) to callers. It's the + /// caller's responsibility to ensure the same instance is returned from the closure, given a + /// particular pubkey. + /// + /// Closure is used here to delegate the responsibility of primary ownership of `UsageQueue` + /// (and caching/pruning if any) to the caller. `SchedulingStateMachine` guarantees that all of + /// shared owndership of `UsageQueue`s are released and UsageQueue state is identical to just + /// after created, if `has_no_active_task()` is `true`. Also note that this is desired for + /// separation of concern. + pub fn create_task( + transaction: SanitizedTransaction, + index: usize, + usage_queue_loader: &mut impl FnMut(Pubkey) -> UsageQueue, + ) -> Task { + // Calling the _unchecked() version here is safe for faster operation, because + // `get_account_locks()` (the safe variant) is ensured to be called in + // DefaultTransactionHandler::handle() via Bank::prepare_unlocked_batch_from_single_tx(). + // + // The safe variant has additional account-locking related verifications, which is crucial. + // + // Currently the replaying stage is redundantly calling `get_account_locks()` when unified + // scheduler is enabled on the given transaction at the blockstore. This will be relaxed + // for optimization in the future. As for banking stage with unified scheduler, it will + // need to run .get_account_locks() at least once somewhere in the code path. In the + // distant future, this function `create_task()` should be adjusted so that both stages do + // the checks before calling this (say, with some ad-hoc type like + // `SanitizedTransactionWithCheckedAccountLocks`) or do the checks here, resulting in + // eliminating the redundant one in the replaying stage and in the handler. + let locks = transaction.get_account_locks_unchecked(); + + let writable_locks = locks + .writable + .iter() + .map(|address| (address, RequestedUsage::Writable)); + let readonly_locks = locks + .readonly + .iter() + .map(|address| (address, RequestedUsage::Readonly)); + + let lock_attempts = writable_locks + .chain(readonly_locks) + .map(|(address, requested_usage)| { + LockAttempt::new(usage_queue_loader(**address), requested_usage) + }) + .collect(); + + Task::new(TaskInner { + transaction, + index, + lock_attempts, + blocked_usage_count: TokenCell::new(ShortCounter::zero()), + }) + } + + /// Rewind the inactive state machine to be initialized + /// + /// This isn't called _reset_ to indicate this isn't safe to call this at any given moment. + /// This panics if the state machine hasn't properly been finished (i.e. there should be no + /// active task) to uphold invariants of [`UsageQueue`]s. + /// + /// This method is intended to reuse SchedulingStateMachine instance (to avoid its `unsafe` + /// [constructor](SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling) + /// as much as possible) and its (possbily cached) associated [`UsageQueue`]s for processing + /// other slots. + pub fn reinitialize(&mut self) { + assert!(self.has_no_active_task()); + assert_eq!(self.unblocked_task_queue.len(), 0); + // nice trick to ensure all fields are handled here if new one is added. + let Self { + last_task_index, + unblocked_task_queue: _, + active_task_count, + handled_task_count, + unblocked_task_count, + total_task_count, + count_token: _, + usage_queue_token: _, + // don't add ".." here + } = self; + *last_task_index = None; + active_task_count.reset_to_zero(); + handled_task_count.reset_to_zero(); + unblocked_task_count.reset_to_zero(); + total_task_count.reset_to_zero(); + } + + /// Creates a new instance of [`SchedulingStateMachine`] with its `unsafe` fields created as + /// well, thus carrying over `unsafe`. + /// + /// # Safety + /// Call this exactly once for each thread. See [`TokenCell`] for details. + #[must_use] + pub unsafe fn exclusively_initialize_current_thread_for_scheduling() -> Self { + Self { + last_task_index: None, + // It's very unlikely this is desired to be configurable, like + // `UsageQueueInner::blocked_usages_from_tasks`'s cap. + unblocked_task_queue: VecDeque::with_capacity(1024), + active_task_count: ShortCounter::zero(), + handled_task_count: ShortCounter::zero(), + unblocked_task_count: ShortCounter::zero(), + total_task_count: ShortCounter::zero(), + count_token: unsafe { BlockedUsageCountToken::assume_exclusive_mutating_thread() }, + usage_queue_token: unsafe { UsageQueueToken::assume_exclusive_mutating_thread() }, + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + assert_matches::assert_matches, + solana_sdk::{ + instruction::{AccountMeta, Instruction}, + message::Message, + pubkey::Pubkey, + signature::Signer, + signer::keypair::Keypair, + transaction::{SanitizedTransaction, Transaction}, + }, + std::{cell::RefCell, collections::HashMap, rc::Rc}, + }; + + fn simplest_transaction() -> SanitizedTransaction { + let payer = Keypair::new(); + let message = Message::new(&[], Some(&payer.pubkey())); + let unsigned = Transaction::new_unsigned(message); + SanitizedTransaction::from_transaction_for_tests(unsigned) + } + + fn transaction_with_readonly_address(address: Pubkey) -> SanitizedTransaction { + let instruction = Instruction { + program_id: Pubkey::default(), + accounts: vec![AccountMeta::new_readonly(address, false)], + data: vec![], + }; + let message = Message::new(&[instruction], Some(&Pubkey::new_unique())); + let unsigned = Transaction::new_unsigned(message); + SanitizedTransaction::from_transaction_for_tests(unsigned) + } + + fn transaction_with_writable_address(address: Pubkey) -> SanitizedTransaction { + let instruction = Instruction { + program_id: Pubkey::default(), + accounts: vec![AccountMeta::new(address, false)], + data: vec![], + }; + let message = Message::new(&[instruction], Some(&Pubkey::new_unique())); + let unsigned = Transaction::new_unsigned(message); + SanitizedTransaction::from_transaction_for_tests(unsigned) + } + + fn create_address_loader( + usage_queues: Option>>>, + ) -> impl FnMut(Pubkey) -> UsageQueue { + let usage_queues = usage_queues.unwrap_or_default(); + move |address| { + usage_queues + .borrow_mut() + .entry(address) + .or_default() + .clone() + } + } + + #[test] + fn test_debug() { + // these are almost meaningless just to see eye-pleasing coverage report.... + assert_eq!( + format!("{:?}", LockResult::Ok(Usage::Readonly(ShortCounter::one()))), + "Ok(Readonly(ShortCounter(1)))" + ); + let sanitized = simplest_transaction(); + let task = + SchedulingStateMachine::create_task(sanitized, 0, &mut |_| UsageQueue::default()); + assert!(format!("{:?}", task).contains("TaskInner")); + + assert_eq!( + format!("{:?}", UsageQueueInner::default()), + "UsageQueueInner { current_usage: Unused, blocked_usages_from_tasks: [] }" + ) + } + + #[test] + fn test_scheduling_state_machine_creation() { + let state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_eq!(state_machine.active_task_count(), 0); + assert_eq!(state_machine.total_task_count(), 0); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_scheduling_state_machine_reinitialization() { + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + state_machine.total_task_count.increment_self(); + assert_eq!(state_machine.total_task_count(), 1); + state_machine.last_task_index = Some(1); + state_machine.reinitialize(); + assert_eq!(state_machine.total_task_count(), 0); + assert_eq!(state_machine.last_task_index, None); + } + + #[test] + fn test_create_task() { + let sanitized = simplest_transaction(); + let task = SchedulingStateMachine::create_task(sanitized.clone(), 3, &mut |_| { + UsageQueue::default() + }); + assert_eq!(task.task_index(), 3); + assert_eq!(task.transaction(), &sanitized); + } + + #[test] + fn test_non_conflicting_task_related_counts() { + let sanitized = simplest_transaction(); + let address_loader = &mut create_address_loader(None); + let task = SchedulingStateMachine::create_task(sanitized.clone(), 3, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let task = state_machine.schedule_task(task).unwrap(); + assert_eq!(state_machine.active_task_count(), 1); + assert_eq!(state_machine.total_task_count(), 1); + state_machine.deschedule_task(&task); + assert_eq!(state_machine.active_task_count(), 0); + assert_eq!(state_machine.total_task_count(), 1); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_conflicting_task_related_counts() { + let sanitized = simplest_transaction(); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized.clone(), 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized.clone(), 102, address_loader); + let task3 = SchedulingStateMachine::create_task(sanitized.clone(), 103, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + + state_machine.deschedule_task(&task1); + assert!(state_machine.has_unblocked_task()); + assert_eq!(state_machine.unblocked_task_queue_count(), 1); + assert_eq!( + state_machine + .schedule_unblocked_task() + .unwrap() + .task_index(), + task2.task_index() + ); + assert!(!state_machine.has_unblocked_task()); + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + state_machine.deschedule_task(&task2); + + assert_matches!( + state_machine + .schedule_task(task3.clone()) + .map(|task| task.task_index()), + Some(103) + ); + state_machine.deschedule_task(&task3); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_unblocked_task_related_counts() { + let sanitized = simplest_transaction(); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized.clone(), 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized.clone(), 102, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + + state_machine.deschedule_task(&task1); + + assert_eq!(state_machine.unblocked_task_count(), 0); + assert_matches!( + state_machine + .schedule_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + assert_eq!(state_machine.unblocked_task_count(), 1); + // there's no blocked task anymore; calling schedule_unblocked_task should be noop and + // shouldn't increment the unblocked_task_count(). + assert_matches!(state_machine.schedule_unblocked_task(), None); + assert_eq!(state_machine.unblocked_task_count(), 1); + + state_machine.deschedule_task(&task2); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_existing_blocking_task_then_newly_scheduled_task() { + let sanitized = simplest_transaction(); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized.clone(), 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized.clone(), 102, address_loader); + let task3 = SchedulingStateMachine::create_task(sanitized.clone(), 103, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + state_machine.deschedule_task(&task1); + assert_eq!(state_machine.unblocked_task_queue_count(), 1); + + // new task is arriving after task1 is already descheduled and task2 got unblocked + assert_matches!(state_machine.schedule_task(task3.clone()), None); + + assert_eq!(state_machine.unblocked_task_count(), 0); + assert_matches!( + state_machine + .schedule_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + assert_eq!(state_machine.unblocked_task_count(), 1); + + state_machine.deschedule_task(&task2); + + assert_matches!( + state_machine + .schedule_unblocked_task() + .map(|t| t.task_index()), + Some(103) + ); + assert_eq!(state_machine.unblocked_task_count(), 2); + + state_machine.deschedule_task(&task3); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_multiple_readonly_task_and_counts() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_readonly_address(conflicting_address); + let sanitized2 = transaction_with_readonly_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + // both of read-only tasks should be immediately runnable + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!( + state_machine + .schedule_task(task2.clone()) + .map(|t| t.task_index()), + Some(102) + ); + + assert_eq!(state_machine.active_task_count(), 2); + assert_eq!(state_machine.handled_task_count(), 0); + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + state_machine.deschedule_task(&task1); + assert_eq!(state_machine.active_task_count(), 1); + assert_eq!(state_machine.handled_task_count(), 1); + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + state_machine.deschedule_task(&task2); + assert_eq!(state_machine.active_task_count(), 0); + assert_eq!(state_machine.handled_task_count(), 2); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_all_blocking_redable_tasks_block_writable_task() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_readonly_address(conflicting_address); + let sanitized2 = transaction_with_readonly_address(conflicting_address); + let sanitized3 = transaction_with_writable_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + let task3 = SchedulingStateMachine::create_task(sanitized3, 103, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!( + state_machine + .schedule_task(task2.clone()) + .map(|t| t.task_index()), + Some(102) + ); + assert_matches!(state_machine.schedule_task(task3.clone()), None); + + assert_eq!(state_machine.active_task_count(), 3); + assert_eq!(state_machine.handled_task_count(), 0); + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + state_machine.deschedule_task(&task1); + assert_eq!(state_machine.active_task_count(), 2); + assert_eq!(state_machine.handled_task_count(), 1); + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + assert_matches!(state_machine.schedule_unblocked_task(), None); + state_machine.deschedule_task(&task2); + assert_eq!(state_machine.active_task_count(), 1); + assert_eq!(state_machine.handled_task_count(), 2); + assert_eq!(state_machine.unblocked_task_queue_count(), 1); + // task3 is finally unblocked after all of readble tasks (task1 and task2) is finished. + assert_matches!( + state_machine + .schedule_unblocked_task() + .map(|t| t.task_index()), + Some(103) + ); + state_machine.deschedule_task(&task3); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_readonly_then_writable_then_readonly_linearized() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_readonly_address(conflicting_address); + let sanitized2 = transaction_with_writable_address(conflicting_address); + let sanitized3 = transaction_with_readonly_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + let task3 = SchedulingStateMachine::create_task(sanitized3, 103, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + assert_matches!(state_machine.schedule_task(task3.clone()), None); + + assert_matches!(state_machine.schedule_unblocked_task(), None); + state_machine.deschedule_task(&task1); + assert_matches!( + state_machine + .schedule_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + assert_matches!(state_machine.schedule_unblocked_task(), None); + state_machine.deschedule_task(&task2); + assert_matches!( + state_machine + .schedule_unblocked_task() + .map(|t| t.task_index()), + Some(103) + ); + assert_matches!(state_machine.schedule_unblocked_task(), None); + state_machine.deschedule_task(&task3); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_readonly_then_writable() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_readonly_address(conflicting_address); + let sanitized2 = transaction_with_writable_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + + // descheduling read-locking task1 should equate to unblocking write-locking task2 + state_machine.deschedule_task(&task1); + assert_matches!( + state_machine + .schedule_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + state_machine.deschedule_task(&task2); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_blocked_tasks_writable_2_readonly_then_writable() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_writable_address(conflicting_address); + let sanitized2 = transaction_with_readonly_address(conflicting_address); + let sanitized3 = transaction_with_readonly_address(conflicting_address); + let sanitized4 = transaction_with_writable_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + let task3 = SchedulingStateMachine::create_task(sanitized3, 103, address_loader); + let task4 = SchedulingStateMachine::create_task(sanitized4, 104, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + assert_matches!(state_machine.schedule_task(task3.clone()), None); + assert_matches!(state_machine.schedule_task(task4.clone()), None); + + state_machine.deschedule_task(&task1); + assert_matches!( + state_machine + .schedule_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + assert_matches!( + state_machine + .schedule_unblocked_task() + .map(|t| t.task_index()), + Some(103) + ); + // the above deschedule_task(task1) call should only unblock task2 and task3 because these + // are read-locking. And shouldn't unblock task4 because it's write-locking + assert_matches!(state_machine.schedule_unblocked_task(), None); + + state_machine.deschedule_task(&task2); + // still task4 is blocked... + assert_matches!(state_machine.schedule_unblocked_task(), None); + + state_machine.deschedule_task(&task3); + // finally task4 should be unblocked + assert_matches!( + state_machine + .schedule_unblocked_task() + .map(|t| t.task_index()), + Some(104) + ); + state_machine.deschedule_task(&task4); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_gradual_locking() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_writable_address(conflicting_address); + let sanitized2 = transaction_with_writable_address(conflicting_address); + let usage_queues = Rc::new(RefCell::new(HashMap::new())); + let address_loader = &mut create_address_loader(Some(usage_queues.clone())); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + let usage_queues = usage_queues.borrow_mut(); + let usage_queue = usage_queues.get(&conflicting_address).unwrap(); + assert_matches!( + usage_queue + .0 + .borrow_mut(&mut state_machine.usage_queue_token) + .current_usage, + Usage::Writable + ); + // task2's fee payer should have been locked already even if task2 is blocked still via the + // above the schedule_task(task2) call + let fee_payer = task2.transaction().message().fee_payer(); + let usage_queue = usage_queues.get(fee_payer).unwrap(); + assert_matches!( + usage_queue + .0 + .borrow_mut(&mut state_machine.usage_queue_token) + .current_usage, + Usage::Writable + ); + state_machine.deschedule_task(&task1); + assert_matches!( + state_machine + .schedule_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + state_machine.deschedule_task(&task2); + assert!(state_machine.has_no_active_task()); + } + + #[test] + #[should_panic(expected = "internal error: entered unreachable code")] + fn test_unreachable_unlock_conditions1() { + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let usage_queue = UsageQueue::default(); + let _ = SchedulingStateMachine::unlock_usage_queue( + usage_queue + .0 + .borrow_mut(&mut state_machine.usage_queue_token), + &LockAttempt::new(usage_queue, RequestedUsage::Writable), + ); + } + + #[test] + #[should_panic(expected = "internal error: entered unreachable code")] + fn test_unreachable_unlock_conditions2() { + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let usage_queue = UsageQueue::default(); + usage_queue + .0 + .borrow_mut(&mut state_machine.usage_queue_token) + .current_usage = Usage::Writable; + let _ = SchedulingStateMachine::unlock_usage_queue( + usage_queue + .0 + .borrow_mut(&mut state_machine.usage_queue_token), + &LockAttempt::new(usage_queue, RequestedUsage::Readonly), + ); + } + + #[test] + #[should_panic(expected = "internal error: entered unreachable code")] + fn test_unreachable_unlock_conditions3() { + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let usage_queue = UsageQueue::default(); + usage_queue + .0 + .borrow_mut(&mut state_machine.usage_queue_token) + .current_usage = Usage::Readonly(ShortCounter::one()); + let _ = SchedulingStateMachine::unlock_usage_queue( + usage_queue + .0 + .borrow_mut(&mut state_machine.usage_queue_token), + &LockAttempt::new(usage_queue, RequestedUsage::Writable), + ); + } + + #[test] + #[should_panic(expected = "bad new task index: 101 > 101")] + fn test_schedule_same_task() { + let conflicting_address = Pubkey::new_unique(); + let sanitized = transaction_with_writable_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task = SchedulingStateMachine::create_task(sanitized, 101, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let _ = state_machine.schedule_task(task.clone()); + let _ = state_machine.schedule_task(task.clone()); + } + + #[test] + #[should_panic(expected = "bad new task index: 101 > 102")] + fn test_schedule_task_out_of_order() { + let conflicting_address = Pubkey::new_unique(); + let sanitized = transaction_with_writable_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized.clone(), 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized.clone(), 102, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let _ = state_machine.schedule_task(task2.clone()); + let _ = state_machine.schedule_task(task1.clone()); + } + + #[test] + #[should_panic(expected = "task should have been scheduled")] + fn test_deschedule_new_task_wihout_scheduling() { + let conflicting_address = Pubkey::new_unique(); + let sanitized = transaction_with_writable_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task = SchedulingStateMachine::create_task(sanitized.clone(), 101, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + state_machine.deschedule_task(&task); + } + + #[test] + #[should_panic(expected = "bad descheduled task index: 102 <= 101")] + fn test_deschedule_new_task_out_of_order() { + let conflicting_address = Pubkey::new_unique(); + let sanitized = transaction_with_writable_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized.clone(), 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized.clone(), 102, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let _ = state_machine.schedule_task(task1.clone()); + state_machine.deschedule_task(&task2); + } } diff --git a/unified-scheduler-pool/Cargo.toml b/unified-scheduler-pool/Cargo.toml index 7626215b1e1126..9bd668f2799ab0 100644 --- a/unified-scheduler-pool/Cargo.toml +++ b/unified-scheduler-pool/Cargo.toml @@ -12,6 +12,7 @@ edition = { workspace = true } [dependencies] assert_matches = { workspace = true } crossbeam-channel = { workspace = true } +dashmap = { workspace = true } derivative = { workspace = true } log = { workspace = true } solana-ledger = { workspace = true } diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index 09ded82ee88e7d..4eace6d7a93792 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -1,3 +1,8 @@ +//! NOTE: While the unified scheduler is fully functional and moderately performant even with +//! mainnet-beta, it has known resource-exhaustion related security issues for replaying +//! specially-crafted blocks produced by malicious leaders. Thus, this experimental and +//! nondefault functionality is exempt from the bug bounty program for now. +//! //! Transaction scheduling code. //! //! This crate implements 3 solana-runtime traits (`InstalledScheduler`, `UninstalledScheduler` and @@ -10,7 +15,8 @@ use { assert_matches::assert_matches, - crossbeam_channel::{select, unbounded, Receiver, SendError, Sender}, + crossbeam_channel::{never, select, unbounded, Receiver, RecvError, SendError, Sender}, + dashmap::DashMap, derivative::Derivative, log::*, solana_ledger::blockstore_processor::{ @@ -26,8 +32,11 @@ use { }, prioritization_fee_cache::PrioritizationFeeCache, }, - solana_sdk::transaction::{Result, SanitizedTransaction}, - solana_unified_scheduler_logic::Task, + solana_sdk::{ + pubkey::Pubkey, + transaction::{Result, SanitizedTransaction}, + }, + solana_unified_scheduler_logic::{SchedulingStateMachine, Task, UsageQueue}, solana_vote::vote_sender_types::ReplayVoteSender, std::{ fmt::Debug, @@ -90,10 +99,8 @@ where replay_vote_sender: Option, prioritization_fee_cache: Arc, ) -> Arc { - let handler_count = handler_count.unwrap_or(1); - // we're hard-coding the number of handler thread to 1, meaning this impl is currently - // single-threaded still. - assert_eq!(handler_count, 1); // replace this with assert!(handler_count >= 1) later + let handler_count = handler_count.unwrap_or(Self::default_handler_count()); + assert!(handler_count >= 1); Arc::new_cyclic(|weak_self| Self { scheduler_inners: Mutex::default(), @@ -386,13 +393,35 @@ mod chained_channel { } } +/// The primary owner of all [`UsageQueue`]s used for particular [`PooledScheduler`]. +/// +/// Currently, the simplest implementation. This grows memory usage in unbounded way. Cleaning will +/// be added later. This struct is here to be put outside `solana-unified-scheduler-logic` for the +/// crate's original intent (separation of logics from this crate). Some practical and mundane +/// pruning will be implemented in this type. +#[derive(Default, Debug)] +pub struct UsageQueueLoader { + usage_queues: DashMap, +} + +impl UsageQueueLoader { + pub fn load(&self, address: Pubkey) -> UsageQueue { + self.usage_queues.entry(address).or_default().clone() + } +} + +// (this is slow needing atomic mem reads. However, this can be turned into a lot faster +// optimizer-friendly version as shown in this crossbeam pr: +// https://github.com/crossbeam-rs/crossbeam/pull/1047) +fn disconnected() -> Receiver { + // drop the sender residing at .0, returning an always-disconnected receiver. + unbounded().1 +} + fn initialized_result_with_timings() -> ResultWithTimings { (Ok(()), ExecuteTimings::default()) } -// Currently, simplest possible implementation (i.e. single-threaded) -// this will be replaced with more proper implementation... -// not usable at all, especially for mainnet-beta #[derive(Debug)] pub struct PooledScheduler { inner: PooledSchedulerInner, @@ -402,6 +431,7 @@ pub struct PooledScheduler { #[derive(Debug)] pub struct PooledSchedulerInner, TH: TaskHandler> { thread_manager: ThreadManager, + usage_queue_loader: UsageQueueLoader, } // This type manages the OS threads for scheduling and executing transactions. The term @@ -427,6 +457,7 @@ impl PooledScheduler { Self::from_inner( PooledSchedulerInner:: { thread_manager: ThreadManager::new(pool), + usage_queue_loader: UsageQueueLoader::default(), }, initial_context, ) @@ -518,7 +549,6 @@ impl, TH: TaskHandler> ThreadManager { let new_task_receiver = self.new_task_receiver.clone(); let mut session_ending = false; - let mut active_task_count: usize = 0; // Now, this is the main loop for the scheduler thread, which is a special beast. // @@ -558,61 +588,97 @@ impl, TH: TaskHandler> ThreadManager { // cycles out of the scheduler thread. Thus, any kinds of unessential overhead sources // like syscalls, VDSO, and even memory (de)allocation should be avoided at all costs // by design or by means of offloading at the last resort. - move || loop { - let mut is_finished = false; - while !is_finished { - select! { - recv(finished_task_receiver) -> executed_task => { - let executed_task = executed_task.unwrap(); - - active_task_count = active_task_count.checked_sub(1).unwrap(); - let result_with_timings = result_with_timings.as_mut().unwrap(); - Self::accumulate_result_with_timings(result_with_timings, executed_task); - }, - recv(new_task_receiver) -> message => { - assert!(!session_ending); - - match message.unwrap() { - NewTaskPayload::Payload(task) => { - // so, we're NOT scheduling at all here; rather, just execute - // tx straight off. the inter-tx locking deps aren't needed to - // be resolved in the case of single-threaded FIFO like this. - runnable_task_sender - .send_payload(task) - .unwrap(); - active_task_count = active_task_count.checked_add(1).unwrap(); - } - NewTaskPayload::OpenSubchannel(context) => { - // signal about new SchedulingContext to handler threads - runnable_task_sender - .send_chained_channel(context, handler_count) - .unwrap(); - assert_matches!( - result_with_timings.replace(initialized_result_with_timings()), - None - ); - } - NewTaskPayload::CloseSubchannel => { - session_ending = true; - } - } - }, - }; + move || { + let (do_now, dont_now) = (&disconnected::<()>(), &never::<()>()); + let dummy_receiver = |trigger| { + if trigger { + do_now + } else { + dont_now + } + }; - // a really simplistic termination condition, which only works under the - // assumption of single handler thread... - is_finished = session_ending && active_task_count == 0; - } + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; - if session_ending { - session_result_sender - .send(Some( - result_with_timings - .take() - .unwrap_or_else(initialized_result_with_timings), - )) - .unwrap(); - session_ending = false; + loop { + let mut is_finished = false; + while !is_finished { + // ALL recv selectors are eager-evaluated ALWAYS by current crossbeam impl, + // which isn't great and is inconsistent with `if`s in the Rust's match + // arm. So, eagerly binding the result to a variable unconditionally here + // makes no perf. difference... + let dummy_unblocked_task_receiver = + dummy_receiver(state_machine.has_unblocked_task()); + + // (Assume this is biased; i.e. select_biased! in this crossbeam pr: + // https://github.com/rust-lang/futures-rs/pull/1976) + // + // There's something special called dummy_unblocked_task_receiver here. + // This odd pattern was needed to react to newly unblocked tasks from + // _not-crossbeam-channel_ event sources, precisely at the specified + // precedence among other selectors, while delegating the conrol flow to + // select_biased!. + // + // In this way, hot looping is avoided and overall control flow is much + // consistent. Note that unified scheduler will go + // into busy looping to seek lowest latency eventually. However, not now, + // to measure _actual_ cpu usage easily with the select approach. + select! { + recv(finished_task_receiver) -> executed_task => { + let executed_task = executed_task.unwrap(); + + state_machine.deschedule_task(&executed_task.task); + let result_with_timings = result_with_timings.as_mut().unwrap(); + Self::accumulate_result_with_timings(result_with_timings, executed_task); + }, + recv(dummy_unblocked_task_receiver) -> dummy => { + assert_matches!(dummy, Err(RecvError)); + + let task = state_machine.schedule_unblocked_task().expect("unblocked task"); + runnable_task_sender.send_payload(task).unwrap(); + }, + recv(new_task_receiver) -> message => { + assert!(!session_ending); + + match message.unwrap() { + NewTaskPayload::Payload(task) => { + if let Some(task) = state_machine.schedule_task(task) { + runnable_task_sender.send_payload(task).unwrap(); + } + } + NewTaskPayload::OpenSubchannel(context) => { + // signal about new SchedulingContext to handler threads + runnable_task_sender + .send_chained_channel(context, handler_count) + .unwrap(); + assert_matches!( + result_with_timings.replace(initialized_result_with_timings()), + None + ); + } + NewTaskPayload::CloseSubchannel => { + session_ending = true; + } + } + }, + }; + + is_finished = session_ending && state_machine.has_no_active_task(); + } + + if session_ending { + state_machine.reinitialize(); + session_result_sender + .send(Some( + result_with_timings + .take() + .unwrap_or_else(initialized_result_with_timings), + )) + .unwrap(); + session_ending = false; + } } } }; @@ -741,7 +807,9 @@ impl InstalledScheduler for PooledScheduler { } fn schedule_execution(&self, &(transaction, index): &(&SanitizedTransaction, usize)) { - let task = Task::create_task(transaction.clone(), index); + let task = SchedulingStateMachine::create_task(transaction.clone(), index, &mut |pubkey| { + self.inner.usage_queue_loader.load(pubkey) + }); self.inner.thread_manager.send_task(task); } @@ -1023,7 +1091,7 @@ mod tests { .result, Ok(_) ); - scheduler.schedule_execution(&(good_tx_after_bad_tx, 0)); + scheduler.schedule_execution(&(good_tx_after_bad_tx, 1)); scheduler.pause_for_recent_blockhash(); // transaction_count should remain same as scheduler should be bailing out. // That's because we're testing the serialized failing execution case in this test. @@ -1247,4 +1315,42 @@ mod tests { 4 ); } + + // See comment in SchedulingStateMachine::create_task() for the justification of this test + #[test] + fn test_enfoced_get_account_locks_validation() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + ref mint_keypair, + .. + } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank = &setup_dummy_fork_graph(bank); + + let mut tx = system_transaction::transfer( + mint_keypair, + &solana_sdk::pubkey::new_rand(), + 2, + genesis_config.hash(), + ); + // mangle the transfer tx to try to lock fee_payer (= mint_keypair) address twice! + tx.message.account_keys.push(tx.message.account_keys[0]); + let tx = &SanitizedTransaction::from_transaction_for_tests(tx); + + // this internally should call SanitizedTransaction::get_account_locks(). + let result = &mut Ok(()); + let timings = &mut ExecuteTimings::default(); + let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let handler_context = &HandlerContext { + log_messages_bytes_limit: None, + transaction_status_sender: None, + replay_vote_sender: None, + prioritization_fee_cache, + }; + + DefaultTaskHandler::handle(result, timings, bank, tx, 0, handler_context); + assert_matches!(result, Err(TransactionError::AccountLoadedTwice)); + } } diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 5cc76a810116b3..844a2bca9aa97f 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -1,8 +1,8 @@ [package] -name = "solana-validator" +name = "agave-validator" description = "Blockchain, Rebuilt for Scale" -documentation = "https://docs.rs/solana-validator" -default-run = "solana-validator" +documentation = "https://docs.rs/agave-validator" +default-run = "agave-validator" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } @@ -11,6 +11,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +agave-geyser-plugin-interface = { workspace = true } chrono = { workspace = true, features = ["default", "serde"] } clap = { workspace = true } console = { workspace = true } @@ -23,7 +24,6 @@ jsonrpc-core = { workspace = true } jsonrpc-core-client = { workspace = true, features = ["ipc"] } jsonrpc-derive = { workspace = true } jsonrpc-ipc-server = { workspace = true } -jsonrpc-server-utils = { workspace = true } lazy_static = { workspace = true } libloading = { workspace = true } log = { workspace = true } @@ -41,7 +41,6 @@ solana-download-utils = { workspace = true } solana-entry = { workspace = true } solana-faucet = { workspace = true } solana-genesis-utils = { workspace = true } -solana-geyser-plugin-interface = { workspace = true } solana-geyser-plugin-manager = { workspace = true } solana-gossip = { workspace = true } solana-ledger = { workspace = true } @@ -66,6 +65,7 @@ solana-version = { workspace = true } solana-vote-program = { workspace = true } symlink = { workspace = true } thiserror = { workspace = true } +tokio = { workspace = true } [dev-dependencies] solana-account-decoder = { workspace = true } diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 57be4cf488865d..b6d65e3ec4a4df 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -6,7 +6,6 @@ use { jsonrpc_ipc_server::{ tokio::sync::oneshot::channel as oneshot_channel, RequestContext, ServerBuilder, }, - jsonrpc_server_utils::tokio, log::*, serde::{de::Deserializer, Deserialize, Serialize}, solana_accounts_db::accounts_index::AccountIndex, @@ -35,6 +34,7 @@ use { thread::{self, Builder}, time::{Duration, SystemTime}, }, + tokio::runtime::Runtime, }; #[derive(Clone)] @@ -815,8 +815,12 @@ pub async fn connect(ledger_path: &Path) -> std::result::Result jsonrpc_server_utils::tokio::runtime::Runtime { - jsonrpc_server_utils::tokio::runtime::Runtime::new().expect("new tokio runtime") +pub fn runtime() -> Runtime { + tokio::runtime::Builder::new_multi_thread() + .thread_name("solAdminRpcRt") + .enable_all() + .build() + .expect("new tokio runtime") } #[derive(Default, Deserialize, Clone)] diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 42f5a0634c0cfa..68e6bcca4fd96f 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -1,4 +1,8 @@ use { + agave_validator::{ + admin_rpc_service, cli, dashboard::Dashboard, ledger_lockfile, lock_ledger, + println_name_value, redirect_stderr_to_file, + }, clap::{crate_name, value_t, value_t_or_exit, values_t_or_exit}, crossbeam_channel::unbounded, itertools::Itertools, @@ -28,10 +32,6 @@ use { }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::*, - solana_validator::{ - admin_rpc_service, cli, dashboard::Dashboard, ledger_lockfile, lock_ledger, - println_name_value, redirect_stderr_to_file, - }, std::{ collections::HashSet, fs, io, diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index 8d5457744a23b8..12bbd0b21001c9 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -447,7 +447,7 @@ pub fn attempt_download_genesis_and_snapshot( ) .unwrap_or_else(|err| { // Consider failures here to be more likely due to user error (eg, - // incorrect `solana-validator` command-line arguments) rather than the + // incorrect `agave-validator` command-line arguments) rather than the // RPC node failing. // // Power users can always use the `--no-check-vote-account` option to diff --git a/validator/src/main.rs b/validator/src/main.rs index ec70796130e7d2..9741a2aecd68a8 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -2,6 +2,15 @@ #[cfg(not(target_env = "msvc"))] use jemallocator::Jemalloc; use { + agave_validator::{ + admin_rpc_service, + admin_rpc_service::{load_staked_nodes_overrides, StakedNodesOverrides}, + bootstrap, + cli::{app, warn_for_deprecated_arguments, DefaultArgs}, + dashboard::Dashboard, + ledger_lockfile, lock_ledger, new_spinner_progress_bar, println_name_value, + redirect_stderr_to_file, + }, clap::{crate_name, value_t, value_t_or_exit, values_t, values_t_or_exit, ArgMatches}, console::style, crossbeam_channel::unbounded, @@ -60,15 +69,6 @@ use { solana_streamer::socket::SocketAddrSpace, solana_svm::runtime_config::RuntimeConfig, solana_tpu_client::tpu_client::DEFAULT_TPU_ENABLE_UDP, - solana_validator::{ - admin_rpc_service, - admin_rpc_service::{load_staked_nodes_overrides, StakedNodesOverrides}, - bootstrap, - cli::{app, warn_for_deprecated_arguments, DefaultArgs}, - dashboard::Dashboard, - ledger_lockfile, lock_ledger, new_spinner_progress_bar, println_name_value, - redirect_stderr_to_file, - }, std::{ collections::{HashSet, VecDeque}, env, @@ -917,7 +917,7 @@ pub fn main() { let logfile = matches .value_of("logfile") .map(|s| s.into()) - .unwrap_or_else(|| format!("solana-validator-{}.log", identity_keypair.pubkey())); + .unwrap_or_else(|| format!("agave-validator-{}.log", identity_keypair.pubkey())); if logfile == "-" { None diff --git a/watchtower/Cargo.toml b/watchtower/Cargo.toml index d8bad3cf4d18f0..4088ee7d9b51ab 100644 --- a/watchtower/Cargo.toml +++ b/watchtower/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-watchtower" +name = "agave-watchtower" description = "Blockchain, Rebuilt for Scale" -documentation = "https://docs.rs/solana-watchtower" +documentation = "https://docs.rs/agave-watchtower" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/watchtower/README.md b/watchtower/README.md index 33a13939cd260c..ab219be67575eb 100644 --- a/watchtower/README.md +++ b/watchtower/README.md @@ -1,4 +1,4 @@ -The `solana-watchtower` program is used to monitor the health of a cluster. It +The `agave-watchtower` program is used to monitor the health of a cluster. It periodically polls the cluster over an RPC API to confirm that the transaction count is advancing, new blockhashes are available, and no validators are delinquent. Results are reported as InfluxDB metrics, with an optional push diff --git a/watchtower/src/main.rs b/watchtower/src/main.rs index f42acdaadaabc6..341b7903c0a33e 100644 --- a/watchtower/src/main.rs +++ b/watchtower/src/main.rs @@ -47,7 +47,7 @@ fn get_config() -> Config { .version(solana_version::version!()) .after_help("ADDITIONAL HELP: To receive a Slack, Discord, PagerDuty and/or Telegram notification on sanity failure, - define environment variables before running `solana-watchtower`: + define environment variables before running `agave-watchtower`: export SLACK_WEBHOOK=... export DISCORD_WEBHOOK=... @@ -63,7 +63,7 @@ fn get_config() -> Config { To receive a Twilio SMS notification on failure, having a Twilio account, and a sending number owned by that account, - define environment variable before running `solana-watchtower`: + define environment variable before running `agave-watchtower`: export TWILIO_CONFIG='ACCOUNT=,TOKEN=,TO=,FROM='") .arg({ @@ -166,7 +166,7 @@ fn get_config() -> Config { .value_name("SUFFIX") .takes_value(true) .default_value("") - .help("Add this string into all notification messages after \"solana-watchtower\"") + .help("Add this string into all notification messages after \"agave-watchtower\"") ) .get_matches(); @@ -381,7 +381,7 @@ fn main() -> Result<(), Box> { if let Some((failure_test_name, failure_error_message)) = &failure { let notification_msg = format!( - "solana-watchtower{}: Error: {}: {}", + "agave-watchtower{}: Error: {}: {}", config.name_suffix, failure_test_name, failure_error_message ); num_consecutive_failures += 1; @@ -415,7 +415,7 @@ fn main() -> Result<(), Box> { ); info!("{}", all_clear_msg); notifier.send( - &format!("solana-watchtower{}: {}", config.name_suffix, all_clear_msg), + &format!("agave-watchtower{}: {}", config.name_suffix, all_clear_msg), &NotificationType::Resolve { incident }, ); }