diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index e5f7c85bcb8..8628c222dd7 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,5 +1,5 @@ { - "image": "ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45", + "image": "ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d", "remoteUser": "ubuntu", "privileged": true, "runArgs": [ diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8ae0fbacec3..cf8eaa46260 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -50,6 +50,7 @@ go_deps.bzl @dfinity/idx /packages/ic-ledger-hash-of/ @dfinity/finint /packages/pocket-ic/ @dfinity/pocket-ic /packages/ic-ethereum-types/ @dfinity/cross-chain-team +/packages/ic-metrics-assert/ @dfinity/cross-chain-team /packages/ic-sha3/ @dfinity/crypto-team /packages/ic-signature-verification/ @dfinity/crypto-team /packages/ic-vetkd-utils/ @dfinity/crypto-team diff --git a/.github/workflows-source/ci-main.yml b/.github/workflows-source/ci-main.yml index ae371889aee..ff9a337cc5f 100644 --- a/.github/workflows-source/ci-main.yml +++ b/.github/workflows-source/ci-main.yml @@ -32,7 +32,7 @@ env: anchors: image: &image - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d dind-large-setup: &dind-large-setup runs-on: labels: dind-large @@ -49,13 +49,12 @@ anchors: container: <<: *image timeout-minutes: 30 - docker-login: &docker-login - name: Login to Dockerhub + before-script: &before-script + name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" checkout: &checkout name: Checkout uses: actions/checkout@v4 @@ -105,7 +104,7 @@ jobs: RUN_ON_DIFF_ONLY: ${{ github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'CI_ALL_BAZEL_TARGETS') }} steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Set BAZEL_EXTRA_ARGS shell: bash run: | @@ -163,7 +162,7 @@ jobs: if: ${{ contains(github.event.pull_request.labels.*.name, 'CI_BUILD_CHECK') }} steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Run bazel build --config=check //rs/... id: bazel-build-config-check uses: ./.github/actions/bazel-test-all/ @@ -190,7 +189,8 @@ jobs: run: | echo "/usr/local/bin" >> $GITHUB_PATH echo "$HOME/.cargo/bin:" >> $GITHUB_PATH - - <<: *docker-login + # use llvm-clang instead of apple's + echo "CC=/usr/local/opt/llvm/bin/clang" >> "$GITHUB_ENV" - name: Run Bazel Test Darwin x86-64 id: bazel-test-darwin-x86-64 uses: ./.github/actions/bazel-test-all/ @@ -273,7 +273,7 @@ jobs: if: ${{ github.event_name != 'merge_group' }} steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Run Build IC id: build-ic shell: bash diff --git a/.github/workflows-source/ci-pr-only.yml b/.github/workflows-source/ci-pr-only.yml index 6d13482cbb7..10a556e4a67 100644 --- a/.github/workflows-source/ci-pr-only.yml +++ b/.github/workflows-source/ci-pr-only.yml @@ -17,7 +17,7 @@ env: anchors: image: &image - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d dind-small-setup: &dind-small-setup timeout-minutes: 30 runs-on: diff --git a/.github/workflows-source/release-testing.yml b/.github/workflows-source/release-testing.yml index 9b7c1c854d7..a2a6c2d66ff 100644 --- a/.github/workflows-source/release-testing.yml +++ b/.github/workflows-source/release-testing.yml @@ -23,7 +23,7 @@ env: anchors: image: &image - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d dind-large-setup: &dind-large-setup runs-on: group: zh1 @@ -40,13 +40,12 @@ anchors: uses: actions/checkout@v4 with: ref: ${{ github.event.workflow_run.head_branch }} - docker-login: &docker-login - name: Login to Dockerhub + before-script: &before-script + name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" bazel-bep: &bazel-bep name: Upload bazel-bep # runs only if previous step succeeded or failed; @@ -73,7 +72,7 @@ jobs: <<: *dind-large-setup steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Run Bazel System Test Nightly id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -90,7 +89,7 @@ jobs: <<: *dind-large-setup steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Run Bazel System Test Staging id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -108,7 +107,7 @@ jobs: timeout-minutes: 90 steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Run Bazel Test All id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -135,7 +134,7 @@ jobs: REPO_NAME: ${{ github.repository }} steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Set up Python uses: actions/setup-python@v5 with: @@ -182,7 +181,7 @@ jobs: <<: *dind-large-setup steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Run qualification for version ${{ matrix.version }} from the tip of the branch uses: ./.github/actions/bazel-test-all/ with: diff --git a/.github/workflows-source/schedule-daily.yml b/.github/workflows-source/schedule-daily.yml index d8546002961..a5eb842d9e4 100644 --- a/.github/workflows-source/schedule-daily.yml +++ b/.github/workflows-source/schedule-daily.yml @@ -16,7 +16,7 @@ env: anchors: image: &image - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d dind-large-setup: &dind-large-setup runs-on: group: zh1 @@ -31,13 +31,12 @@ anchors: checkout: &checkout name: Checkout uses: actions/checkout@v4 - docker-login: &docker-login - name: Login to Dockerhub + before-script: &before-script + name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" bazel-bep: &bazel-bep name: Upload bazel-bep # runs only if previous step succeeded or failed; @@ -64,7 +63,7 @@ jobs: labels: dind-large steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Run Bazel Launch Bare Metal shell: bash run: | @@ -108,7 +107,7 @@ jobs: timeout-minutes: 720 # 12 hours steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Run FI Tests Nightly id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -127,7 +126,7 @@ jobs: timeout-minutes: 20 steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Run NNS Tests Nightly id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -146,7 +145,7 @@ jobs: timeout-minutes: 480 steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Set Benchmark Targets shell: bash run: | diff --git a/.github/workflows-source/schedule-hourly.yml b/.github/workflows-source/schedule-hourly.yml index 580333253f4..0ed1834aeb2 100644 --- a/.github/workflows-source/schedule-hourly.yml +++ b/.github/workflows-source/schedule-hourly.yml @@ -16,7 +16,7 @@ env: anchors: image: &image - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d dind-large-setup: &dind-large-setup runs-on: labels: dind-large @@ -30,13 +30,12 @@ anchors: checkout: &checkout name: Checkout uses: actions/checkout@v4 - docker-login: &docker-login - name: Login to Dockerhub + before-script: &before-script + name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" bazel-bep: &bazel-bep name: Upload bazel-bep # runs only if previous step succeeded or failed; @@ -58,7 +57,7 @@ jobs: <<: *dind-large-setup steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Run Bazel Build All No Cache uses: ./.github/actions/bazel-test-all/ env: @@ -77,7 +76,7 @@ jobs: labels: dind-large steps: - <<: *checkout - - <<: *docker-login + - <<: *before-script - name: Run Bazel System Test Hourly id: bazel-test-all uses: ./.github/actions/bazel-test-all/ diff --git a/.github/workflows/anonymization-backend-release.yml b/.github/workflows/anonymization-backend-release.yml index 6cb04a8c820..3f44f4cd37f 100644 --- a/.github/workflows/anonymization-backend-release.yml +++ b/.github/workflows/anonymization-backend-release.yml @@ -33,7 +33,7 @@ jobs: labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp -v /ceph-s3-info:/ceph-s3-info diff --git a/.github/workflows/ci-main.yml b/.github/workflows/ci-main.yml index e69dad8e241..5b140e1467f 100644 --- a/.github/workflows/ci-main.yml +++ b/.github/workflows/ci-main.yml @@ -30,7 +30,7 @@ jobs: bazel-test-all: name: Bazel Test All container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp -v /ceph-s3-info:/ceph-s3-info timeout-minutes: 90 @@ -48,12 +48,11 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: ${{ github.event_name == 'pull_request' && 256 || 0 }} - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Set BAZEL_EXTRA_ARGS shell: bash run: | @@ -127,7 +126,7 @@ jobs: runs-on: labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp -v /ceph-s3-info:/ceph-s3-info timeout-minutes: 90 @@ -138,12 +137,11 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: ${{ github.event_name == 'pull_request' && 256 || 0 }} - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Run bazel build --config=check //rs/... id: bazel-build-config-check uses: ./.github/actions/bazel-test-all/ @@ -183,12 +181,8 @@ jobs: run: | echo "/usr/local/bin" >> $GITHUB_PATH echo "$HOME/.cargo/bin:" >> $GITHUB_PATH - - name: Login to Dockerhub - shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + # use llvm-clang instead of apple's + echo "CC=/usr/local/opt/llvm/bin/clang" >> "$GITHUB_ENV" - name: Run Bazel Test Darwin x86-64 id: bazel-test-darwin-x86-64 uses: ./.github/actions/bazel-test-all/ @@ -224,7 +218,7 @@ jobs: runs-on: labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp -v /ceph-s3-info:/ceph-s3-info timeout-minutes: 90 @@ -259,7 +253,7 @@ jobs: runs-on: labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp -v /ceph-s3-info:/ceph-s3-info timeout-minutes: 90 @@ -294,7 +288,7 @@ jobs: runs-on: labels: dind-small container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d timeout-minutes: 30 steps: - name: Checkout @@ -324,7 +318,7 @@ jobs: build-ic: name: Build IC container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp -v /ceph-s3-info:/ceph-s3-info timeout-minutes: 90 @@ -338,12 +332,11 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: ${{ github.event_name == 'pull_request' && 256 || 0 }} - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Run Build IC id: build-ic shell: bash @@ -427,7 +420,7 @@ jobs: cargo-clippy-linux: name: Cargo Clippy Linux container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d timeout-minutes: 30 runs-on: group: ch1 @@ -464,7 +457,7 @@ jobs: cargo-build-release-linux: name: Cargo Build Release Linux container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d timeout-minutes: 30 runs-on: group: ch1 diff --git a/.github/workflows/ci-pr-only.yml b/.github/workflows/ci-pr-only.yml index 1d9a94645d2..870eef04fd0 100644 --- a/.github/workflows/ci-pr-only.yml +++ b/.github/workflows/ci-pr-only.yml @@ -20,7 +20,7 @@ jobs: runs-on: labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME if: ${{ github.event_name != 'merge_group' }} @@ -61,7 +61,7 @@ jobs: runs-on: labels: dind-small container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME if: ${{ github.event_name != 'merge_group' }} @@ -102,7 +102,7 @@ jobs: runs-on: labels: dind-small container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME if: ${{ github.event_name != 'merge_group' }} @@ -150,7 +150,7 @@ jobs: runs-on: labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME if: contains(github.event.pull_request.labels.*.name, 'CI_COVERAGE') diff --git a/.github/workflows/rate-limits-backend-release.yml b/.github/workflows/rate-limits-backend-release.yml index b17b9c10ffb..02948d0370f 100644 --- a/.github/workflows/rate-limits-backend-release.yml +++ b/.github/workflows/rate-limits-backend-release.yml @@ -33,7 +33,7 @@ jobs: labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp -v /ceph-s3-info:/ceph-s3-info diff --git a/.github/workflows/release-testing.yml b/.github/workflows/release-testing.yml index f6cdc4c7500..f21715b6e63 100644 --- a/.github/workflows/release-testing.yml +++ b/.github/workflows/release-testing.yml @@ -28,7 +28,7 @@ jobs: group: zh1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 180 # 3 hours @@ -37,12 +37,11 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ github.event.workflow_run.head_branch }} - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Run Bazel System Test Nightly id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -71,7 +70,7 @@ jobs: group: zh1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 180 # 3 hours @@ -80,12 +79,11 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ github.event.workflow_run.head_branch }} - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Run Bazel System Test Staging id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -114,7 +112,7 @@ jobs: group: zh1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 90 @@ -123,12 +121,11 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ github.event.workflow_run.head_branch }} - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Run Bazel Test All id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -157,7 +154,7 @@ jobs: group: zh1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 60 @@ -175,12 +172,11 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ github.event.workflow_run.head_branch }} - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Set up Python uses: actions/setup-python@v5 with: @@ -205,7 +201,7 @@ jobs: group: zh1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 180 # 3 hours @@ -233,7 +229,7 @@ jobs: group: zh1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 180 # 3 hours @@ -242,12 +238,11 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ github.event.workflow_run.head_branch }} - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Run qualification for version ${{ matrix.version }} from the tip of the branch uses: ./.github/actions/bazel-test-all/ with: diff --git a/.github/workflows/rosetta-release.yml b/.github/workflows/rosetta-release.yml index 2da864a410d..cd99e734580 100644 --- a/.github/workflows/rosetta-release.yml +++ b/.github/workflows/rosetta-release.yml @@ -22,7 +22,7 @@ jobs: runs-on: labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d steps: - name: Checkout uses: actions/checkout@v4 diff --git a/.github/workflows/schedule-daily.yml b/.github/workflows/schedule-daily.yml index 833d90f273b..771bc20432e 100644 --- a/.github/workflows/schedule-daily.yml +++ b/.github/workflows/schedule-daily.yml @@ -15,7 +15,7 @@ jobs: bazel-test-bare-metal: name: Bazel Test Bare Metal container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 120 @@ -25,12 +25,11 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Run Bazel Launch Bare Metal shell: bash run: | @@ -73,19 +72,18 @@ jobs: group: zh1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 720 # 12 hours steps: - name: Checkout uses: actions/checkout@v4 - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Run FI Tests Nightly id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -115,19 +113,18 @@ jobs: group: zh1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 20 steps: - name: Checkout uses: actions/checkout@v4 - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Run NNS Tests Nightly id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -157,19 +154,18 @@ jobs: group: zh1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 480 steps: - name: Checkout uses: actions/checkout@v4 - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Set Benchmark Targets shell: bash run: | @@ -212,7 +208,7 @@ jobs: group: zh1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 60 @@ -264,7 +260,7 @@ jobs: group: zh1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 120 diff --git a/.github/workflows/schedule-hourly.yml b/.github/workflows/schedule-hourly.yml index f374fb886c1..99c579508e3 100644 --- a/.github/workflows/schedule-hourly.yml +++ b/.github/workflows/schedule-hourly.yml @@ -17,19 +17,18 @@ jobs: runs-on: labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 120 steps: - name: Checkout uses: actions/checkout@v4 - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Run Bazel Build All No Cache uses: ./.github/actions/bazel-test-all/ env: @@ -54,7 +53,7 @@ jobs: bazel-system-test-hourly: name: Bazel System Tests Hourly container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 120 @@ -64,12 +63,11 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Run Bazel System Test Hourly id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -97,7 +95,7 @@ jobs: runs-on: labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp timeout-minutes: 120 diff --git a/.github/workflows/schedule-rust-bench.yml b/.github/workflows/schedule-rust-bench.yml index b95f77f7bc5..eb8e515e004 100644 --- a/.github/workflows/schedule-rust-bench.yml +++ b/.github/workflows/schedule-rust-bench.yml @@ -20,7 +20,7 @@ jobs: # see linux-x86-64 runner group labels: rust-benchmarks container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d # running on bare metal machine using ubuntu user options: --user ubuntu -v /cache:/cache timeout-minutes: 720 # 12 hours diff --git a/.github/workflows/schedule-weekly.yml b/.github/workflows/schedule-weekly.yml index 918fe9b4864..e0ac84c0026 100644 --- a/.github/workflows/schedule-weekly.yml +++ b/.github/workflows/schedule-weekly.yml @@ -14,7 +14,7 @@ jobs: runs-on: labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME -v /cache:/cache diff --git a/.github/workflows/system-tests-k8s.yml b/.github/workflows/system-tests-k8s.yml index 5544f6a0994..7eab15df6d5 100644 --- a/.github/workflows/system-tests-k8s.yml +++ b/.github/workflows/system-tests-k8s.yml @@ -19,9 +19,9 @@ on: required: false default: '//rs/tests/nns:node_removal_from_registry_test' jobs: - description: 'Concurrent Bazel Jobs' + description: 'Concurrent Bazel Test Jobs' required: false - default: '32' + default: '10' env: TARGETS: | @@ -29,9 +29,9 @@ env: github.event_name == 'workflow_dispatch' && github.event.inputs.targets || '//rs/tests/nns:node_removal_from_registry_test' }} JOBS: | - ${{ github.event_name == 'schedule' && '12' || + ${{ github.event_name == 'schedule' && '10' || github.event_name == 'workflow_dispatch' && github.event.inputs.jobs || - '32' }} + '10' }} BRANCH_NAME: ${{ github.head_ref || github.ref_name }} CI_COMMIT_SHA: ${{ github.sha }} CI_JOB_NAME: ${{ github.job }} @@ -48,7 +48,7 @@ jobs: group: ln1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME -e KUBECONFIG --privileged --cgroupns host @@ -58,12 +58,11 @@ jobs: - name: Checkout uses: actions/checkout@v4 - - name: Login to Dockerhub + - name: Before script + id: before-script shell: bash - run: ./ci/scripts/docker-login.sh - env: - DOCKER_HUB_USER: ${{ vars.DOCKER_HUB_USER }} - DOCKER_HUB_PASSWORD_RO: ${{ secrets.DOCKER_HUB_PASSWORD_RO }} + run: | + [ -n "${NODE_NAME:-}" ] && echo "Node: $NODE_NAME" - name: Set KUBECONFIG shell: bash @@ -82,7 +81,7 @@ jobs: BAZEL_COMMAND: "test" BAZEL_TARGETS: "${{ env.TARGETS }}" BAZEL_CI_CONFIG: "--config=ci --repository_cache=/cache/bazel" - BAZEL_EXTRA_ARGS: "--jobs=${{ env.JOBS }} --test_tag_filters=k8s,-manual,-colocated,-system_test_hourly,-system_test_nightly --k8s" + BAZEL_EXTRA_ARGS: "--local_test_jobs=${{ env.JOBS }} --test_tag_filters=k8s,-manual,-colocated,-long_test,-system_test_hourly,-system_test_nightly --k8s" BUILDEVENT_APIKEY: ${{ secrets.HONEYCOMB_API_TOKEN }} - name: Upload bazel-bep @@ -103,7 +102,7 @@ jobs: group: ln1 labels: dind-large container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME -e KUBECONFIG --privileged --cgroupns host @@ -131,6 +130,25 @@ jobs: env: TNET_KUBECONFIG: ${{ secrets.TNET_KUBECONFIG }} + - name: Get Bazel Target List + shell: bash + run: | + # Query Bazel for targets: system tests with k8s flag AND (long_test flag OR system_test_hourly flag) + T=$(bazel query 'attr(tags, "k8s", tests(attr(tags, "long_test|system_test_hourly", //...))) except attr(tags, "colocated|manual|system_test_nightly", //...)') + + # Handle empty target list + if [[ -z "$T" ]]; then + echo "No Bazel targets found matching the criteria." + echo "TARGETS=" >> $GITHUB_ENV + exit 1 + fi + + # Convert to space-separated list and trim + T=$(echo "$T" | tr '\n' ' ' | sed -e 's/,$//' -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//') + + # Export to GitHub environment + echo "TARGETS=$T" >> $GITHUB_ENV + - name: Run System Tests on K8s id: bazel-test-all uses: ./.github/actions/bazel-test-all/ @@ -140,7 +158,7 @@ jobs: BAZEL_COMMAND: "test" BAZEL_TARGETS: "${{ env.TARGETS }}" BAZEL_CI_CONFIG: "--config=ci --repository_cache=/cache/bazel" - BAZEL_EXTRA_ARGS: "--jobs=${{ env.JOBS }} --test_tag_filters=k8s,system_test_hourly,-manual,-colocated,-system_test_nightly --k8s" + BAZEL_EXTRA_ARGS: "--local_test_jobs=${{ env.JOBS }} --test_tag_filters=k8s --k8s" BUILDEVENT_APIKEY: ${{ secrets.HONEYCOMB_API_TOKEN }} - name: Upload bazel-bep diff --git a/.github/workflows/test-namespace-darwin.yaml b/.github/workflows/test-namespace-darwin.yaml index 69f85eb4fa8..2f89d52daca 100644 --- a/.github/workflows/test-namespace-darwin.yaml +++ b/.github/workflows/test-namespace-darwin.yaml @@ -32,6 +32,9 @@ jobs: # Build and test, excluding 'upload' jobs that are not required on macOS (used in reproducibility tests) - name: Test run: | + # Until we have a hermetic CC toolchain, tell bazel to use the "real" clang + # (instead of Apple's, which sometimes breaks on wasm32) + export CC=/opt/homebrew/opt/llvm/bin/clang bazel \ --noworkspace_rc \ --bazelrc=./bazel/conf/.bazelrc.build --bazelrc=/tmp/bazel-cache.bazelrc \ diff --git a/.github/workflows/update-mainnet-revisions.yaml b/.github/workflows/update-mainnet-revisions.yaml index d847894404e..605c82259e1 100644 --- a/.github/workflows/update-mainnet-revisions.yaml +++ b/.github/workflows/update-mainnet-revisions.yaml @@ -38,7 +38,7 @@ jobs: runs-on: labels: dind-small container: - image: ghcr.io/dfinity/ic-build@sha256:4fd13b47285e783c3a6f35aadd9559d097c0de162a1cf221ead66ab1598d5d45 + image: ghcr.io/dfinity/ic-build@sha256:2e8185171700872d48fdfb4b08e175fca5be27b3fbbc4d7bed681ec8486f8b1d options: >- -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp -v /ceph-s3-info:/ceph-s3-info steps: diff --git a/Cargo.Bazel.Fuzzing.json.lock b/Cargo.Bazel.Fuzzing.json.lock index 3ec5130c1bf..62b92f3b732 100644 --- a/Cargo.Bazel.Fuzzing.json.lock +++ b/Cargo.Bazel.Fuzzing.json.lock @@ -1,5 +1,5 @@ { - "checksum": "ee07bd5cde20eb057c9baa703b47a96ece7ed32714378e3b62f6cb2a1bde2d94", + "checksum": "e2f0c9fb8047d4200e8e14e235a3d28792aef48b70952cf21a1f849ff9ea495d", "crates": { "abnf 0.12.0": { "name": "abnf", @@ -5439,7 +5439,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -9614,7 +9614,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -10766,20 +10766,14 @@ ], "license_file": "LICENSE-APACHE" }, - "cc 1.0.83": { + "cc 1.1.37": { "name": "cc", - "version": "1.0.83", + "version": "1.1.37", "package_url": "https://github.com/rust-lang/cc-rs", "repository": { "Http": { - "url": "https://static.crates.io/crates/cc/1.0.83/download", - "sha256": "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0", - "patch_args": [ - "-p1" - ], - "patches": [ - "@@//bazel:cc_rs.patch" - ] + "url": "https://static.crates.io/crates/cc/1.1.37/download", + "sha256": "40545c26d092346d8a8dab71ee48e7685a7a9cba76e634790c215b41a4a7b4cf" } }, "targets": [ @@ -10805,97 +10799,98 @@ "common": [], "selects": { "aarch64-apple-darwin": [ - "jobserver", "parallel" ], "aarch64-pc-windows-msvc": [ - "jobserver", "parallel" ], "aarch64-unknown-linux-gnu": [ - "jobserver", "parallel" ], "aarch64-unknown-nixos-gnu": [ - "jobserver", "parallel" ], "arm-unknown-linux-gnueabi": [ - "jobserver", "parallel" ], "i686-pc-windows-msvc": [ - "jobserver", "parallel" ], "i686-unknown-linux-gnu": [ - "jobserver", "parallel" ], "powerpc-unknown-linux-gnu": [ - "jobserver", "parallel" ], "s390x-unknown-linux-gnu": [ - "jobserver", "parallel" ], "x86_64-apple-darwin": [ - "jobserver", "parallel" ], "x86_64-pc-windows-msvc": [ - "jobserver", "parallel" ], "x86_64-unknown-freebsd": [ - "jobserver", "parallel" ], "x86_64-unknown-linux-gnu": [ - "jobserver", "parallel" ], "x86_64-unknown-nixos-gnu": [ - "jobserver", "parallel" ] } }, "deps": { - "common": [], + "common": [ + { + "id": "shlex 1.3.0", + "target": "shlex" + } + ], "selects": { "aarch64-apple-darwin": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "aarch64-pc-windows-msvc": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" } ], "aarch64-unknown-linux-gnu": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "aarch64-unknown-nixos-gnu": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "arm-unknown-linux-gnueabi": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" - } - ], - "cfg(unix)": [ + }, { "id": "libc 0.2.158", "target": "libc" @@ -10903,62 +10898,90 @@ ], "i686-pc-windows-msvc": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" } ], "i686-unknown-linux-gnu": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "powerpc-unknown-linux-gnu": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "s390x-unknown-linux-gnu": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "x86_64-apple-darwin": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "x86_64-pc-windows-msvc": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" } ], "x86_64-unknown-freebsd": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "x86_64-unknown-linux-gnu": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "x86_64-unknown-nixos-gnu": [ { - "id": "jobserver 0.1.27", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ] } }, "edition": "2018", - "version": "1.0.83" + "version": "1.1.37" }, "license": "MIT OR Apache-2.0", "license_ids": [ @@ -11258,16 +11281,6 @@ "target": "wasm_bindgen" } ], - "wasm32-wasi": [ - { - "id": "serde-wasm-bindgen 0.5.0", - "target": "serde_wasm_bindgen" - }, - { - "id": "wasm-bindgen 0.2.95", - "target": "wasm_bindgen" - } - ], "wasm32-wasip1": [ { "id": "serde-wasm-bindgen 0.5.0", @@ -18517,7 +18530,7 @@ "target": "cargo_metadata" }, { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -19398,6 +19411,10 @@ "id": "syn 1.0.109", "target": "syn" }, + { + "id": "syscalls 0.6.18", + "target": "syscalls" + }, { "id": "tar 0.4.39", "target": "tar" @@ -21965,7 +21982,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -23950,7 +23967,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -29877,7 +29894,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -35819,14 +35836,14 @@ ], "license_file": "LICENSE-APACHE" }, - "jobserver 0.1.27": { + "jobserver 0.1.32": { "name": "jobserver", - "version": "0.1.27", - "package_url": "https://github.com/alexcrichton/jobserver-rs", + "version": "0.1.32", + "package_url": "https://github.com/rust-lang/jobserver-rs", "repository": { "Http": { - "url": "https://static.crates.io/crates/jobserver/0.1.27/download", - "sha256": "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" + "url": "https://static.crates.io/crates/jobserver/0.1.32/download", + "sha256": "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" } }, "targets": [ @@ -35859,10 +35876,10 @@ ] } }, - "edition": "2018", - "version": "0.1.27" + "edition": "2021", + "version": "0.1.32" }, - "license": "MIT/Apache-2.0", + "license": "MIT OR Apache-2.0", "license_ids": [ "Apache-2.0", "MIT" @@ -38250,7 +38267,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -38532,7 +38549,7 @@ "target": "bindgen" }, { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -38633,7 +38650,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -38739,7 +38756,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -38848,7 +38865,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -38946,7 +38963,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -39354,7 +39371,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -40124,7 +40141,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -45196,7 +45213,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -47642,7 +47659,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -52537,7 +52554,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -58390,12 +58407,6 @@ "target": "wasm_streams" } ], - "wasm32-wasi": [ - { - "id": "wasm-streams 0.4.0", - "target": "wasm_streams" - } - ], "wasm32-wasip1": [ { "id": "wasm-streams 0.4.0", @@ -59228,7 +59239,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -59345,7 +59356,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -60951,11 +60962,6 @@ "termios", "use-libc-auxv" ], - "wasm32-wasi": [ - "default", - "termios", - "use-libc-auxv" - ], "wasm32-wasip1": [ "default", "termios", @@ -61291,17 +61297,6 @@ "target": "libc" } ], - "wasm32-wasi": [ - { - "id": "errno 0.3.8", - "target": "errno", - "alias": "libc_errno" - }, - { - "id": "libc 0.2.158", - "target": "libc" - } - ], "wasm32-wasip1": [ { "id": "errno 0.3.8", @@ -63736,7 +63731,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -63821,7 +63816,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -63910,7 +63905,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -68248,7 +68243,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -69707,6 +69702,95 @@ ], "license_file": "LICENSE" }, + "syscalls 0.6.18": { + "name": "syscalls", + "version": "0.6.18", + "package_url": "https://github.com/jasonwhite/syscalls", + "repository": { + "Http": { + "url": "https://static.crates.io/crates/syscalls/0.6.18/download", + "sha256": "43d0e35dc7d73976a53c7e6d7d177ef804a0c0ee774ec77bcc520c2216fd7cbe" + } + }, + "targets": [ + { + "Library": { + "crate_name": "syscalls", + "crate_root": "src/lib.rs", + "srcs": { + "allow_empty": true, + "include": [ + "**/*.rs" + ] + } + } + }, + { + "BuildScript": { + "crate_name": "build_script_build", + "crate_root": "build.rs", + "srcs": { + "allow_empty": true, + "include": [ + "**/*.rs" + ] + } + } + } + ], + "library_target_name": "syscalls", + "common_attrs": { + "compile_data_glob": [ + "**" + ], + "crate_features": { + "common": [ + "default", + "serde", + "serde_repr", + "std" + ], + "selects": {} + }, + "deps": { + "common": [ + { + "id": "serde 1.0.217", + "target": "serde" + }, + { + "id": "syscalls 0.6.18", + "target": "build_script_build" + } + ], + "selects": {} + }, + "edition": "2021", + "proc_macro_deps": { + "common": [ + { + "id": "serde_repr 0.1.19", + "target": "serde_repr" + } + ], + "selects": {} + }, + "version": "0.6.18" + }, + "build_script_attrs": { + "compile_data_glob": [ + "**" + ], + "data_glob": [ + "**" + ] + }, + "license": "BSD-2-Clause", + "license_ids": [ + "BSD-2-Clause" + ], + "license_file": "LICENSE" + }, "system-configuration 0.5.1": { "name": "system-configuration", "version": "0.5.1", @@ -71709,7 +71793,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -80244,7 +80328,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -80783,7 +80867,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -86202,7 +86286,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -86327,7 +86411,6 @@ "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86358,7 +86441,6 @@ "wasm32-unknown-unknown" ], "cfg(all(target_arch = \"wasm32\", target_os = \"wasi\"))": [ - "wasm32-wasi", "wasm32-wasip1" ], "cfg(all(target_arch = \"wasm32\", target_vendor = \"unknown\", target_os = \"unknown\", target_env = \"\"))": [ @@ -86641,7 +86723,6 @@ "i686-unknown-linux-gnu", "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86768,7 +86849,6 @@ "i686-unknown-linux-gnu", "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86831,7 +86911,6 @@ "riscv64gc-unknown-none-elf", "s390x-unknown-linux-gnu", "wasm32-unknown-unknown", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86866,7 +86945,6 @@ "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86955,7 +87033,6 @@ "s390x-unknown-linux-gnu", "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -87057,7 +87134,6 @@ "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -87093,7 +87169,6 @@ "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -87119,7 +87194,6 @@ ], "cfg(target_arch = \"wasm32\")": [ "wasm32-unknown-unknown", - "wasm32-wasi", "wasm32-wasip1" ], "cfg(target_arch = \"x86\")": [ @@ -87184,7 +87258,6 @@ ], "cfg(target_os = \"redox\")": [], "cfg(target_os = \"wasi\")": [ - "wasm32-wasi", "wasm32-wasip1" ], "cfg(target_os = \"windows\")": [ @@ -87264,9 +87337,6 @@ "wasm32-unknown-unknown": [ "wasm32-unknown-unknown" ], - "wasm32-wasi": [ - "wasm32-wasi" - ], "wasm32-wasip1": [ "wasm32-wasip1" ], @@ -87350,7 +87420,7 @@ "candid 0.10.10", "candid_parser 0.1.2", "cargo_metadata 0.14.2", - "cc 1.0.83", + "cc 1.1.37", "cddl 0.9.4", "cfg-if 1.0.0", "chacha20poly1305 0.10.1", @@ -87579,6 +87649,7 @@ "stubborn-io 0.3.2", "subtle 2.6.1", "syn 1.0.109", + "syscalls 0.6.18", "tar 0.4.39", "tarpc 0.34.0", "tempfile 3.12.0", @@ -87641,5 +87712,6 @@ "zeroize 1.8.1", "zstd 0.13.2" ], - "direct_dev_deps": [] + "direct_dev_deps": [], + "unused_patches": [] } diff --git a/Cargo.Bazel.Fuzzing.toml.lock b/Cargo.Bazel.Fuzzing.toml.lock index 11569e4c150..36b66b3cf23 100644 --- a/Cargo.Bazel.Fuzzing.toml.lock +++ b/Cargo.Bazel.Fuzzing.toml.lock @@ -1827,12 +1827,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "40545c26d092346d8a8dab71ee48e7685a7a9cba76e634790c215b41a4a7b4cf" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -3244,6 +3245,7 @@ dependencies = [ "stubborn-io", "subtle", "syn 1.0.109", + "syscalls", "tar", "tarpc", "tempfile", @@ -6019,9 +6021,9 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" -version = "0.1.27" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] @@ -10892,6 +10894,16 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "syscalls" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d0e35dc7d73976a53c7e6d7d177ef804a0c0ee774ec77bcc520c2216fd7cbe" +dependencies = [ + "serde", + "serde_repr", +] + [[package]] name = "system-configuration" version = "0.5.1" diff --git a/Cargo.Bazel.json.lock b/Cargo.Bazel.json.lock index cd26fde8e6d..ae3a6e269f4 100644 --- a/Cargo.Bazel.json.lock +++ b/Cargo.Bazel.json.lock @@ -1,5 +1,5 @@ { - "checksum": "af5e4debd1243293865e30a9b64a67f317d54d2087da3adf9fd816ffdd8b1262", + "checksum": "c940ed0c71e779f38de47e366662d71cd883022d89016936e1952a55719fe089", "crates": { "abnf 0.12.0": { "name": "abnf", @@ -5440,7 +5440,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -9531,7 +9531,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -10662,20 +10662,14 @@ ], "license_file": "LICENSE-APACHE" }, - "cc 1.0.83": { + "cc 1.1.37": { "name": "cc", - "version": "1.0.83", + "version": "1.1.37", "package_url": "https://github.com/rust-lang/cc-rs", "repository": { "Http": { - "url": "https://static.crates.io/crates/cc/1.0.83/download", - "sha256": "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0", - "patch_args": [ - "-p1" - ], - "patches": [ - "@@//bazel:cc_rs.patch" - ] + "url": "https://static.crates.io/crates/cc/1.1.37/download", + "sha256": "40545c26d092346d8a8dab71ee48e7685a7a9cba76e634790c215b41a4a7b4cf" } }, "targets": [ @@ -10701,97 +10695,98 @@ "common": [], "selects": { "aarch64-apple-darwin": [ - "jobserver", "parallel" ], "aarch64-pc-windows-msvc": [ - "jobserver", "parallel" ], "aarch64-unknown-linux-gnu": [ - "jobserver", "parallel" ], "aarch64-unknown-nixos-gnu": [ - "jobserver", "parallel" ], "arm-unknown-linux-gnueabi": [ - "jobserver", "parallel" ], "i686-pc-windows-msvc": [ - "jobserver", "parallel" ], "i686-unknown-linux-gnu": [ - "jobserver", "parallel" ], "powerpc-unknown-linux-gnu": [ - "jobserver", "parallel" ], "s390x-unknown-linux-gnu": [ - "jobserver", "parallel" ], "x86_64-apple-darwin": [ - "jobserver", "parallel" ], "x86_64-pc-windows-msvc": [ - "jobserver", "parallel" ], "x86_64-unknown-freebsd": [ - "jobserver", "parallel" ], "x86_64-unknown-linux-gnu": [ - "jobserver", "parallel" ], "x86_64-unknown-nixos-gnu": [ - "jobserver", "parallel" ] } }, "deps": { - "common": [], + "common": [ + { + "id": "shlex 1.3.0", + "target": "shlex" + } + ], "selects": { "aarch64-apple-darwin": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "aarch64-pc-windows-msvc": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" } ], "aarch64-unknown-linux-gnu": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "aarch64-unknown-nixos-gnu": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "arm-unknown-linux-gnueabi": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" - } - ], - "cfg(unix)": [ + }, { "id": "libc 0.2.158", "target": "libc" @@ -10799,62 +10794,90 @@ ], "i686-pc-windows-msvc": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" } ], "i686-unknown-linux-gnu": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "powerpc-unknown-linux-gnu": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "s390x-unknown-linux-gnu": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "x86_64-apple-darwin": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "x86_64-pc-windows-msvc": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" } ], "x86_64-unknown-freebsd": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "x86_64-unknown-linux-gnu": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "x86_64-unknown-nixos-gnu": [ { - "id": "jobserver 0.1.26", + "id": "jobserver 0.1.32", "target": "jobserver" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ] } }, "edition": "2018", - "version": "1.0.83" + "version": "1.1.37" }, "license": "MIT OR Apache-2.0", "license_ids": [ @@ -11154,16 +11177,6 @@ "target": "wasm_bindgen" } ], - "wasm32-wasi": [ - { - "id": "serde-wasm-bindgen 0.5.0", - "target": "serde_wasm_bindgen" - }, - { - "id": "wasm-bindgen 0.2.95", - "target": "wasm_bindgen" - } - ], "wasm32-wasip1": [ { "id": "serde-wasm-bindgen 0.5.0", @@ -18345,7 +18358,7 @@ "target": "cargo_metadata" }, { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -19226,6 +19239,10 @@ "id": "syn 1.0.109", "target": "syn" }, + { + "id": "syscalls 0.6.18", + "target": "syscalls" + }, { "id": "tar 0.4.39", "target": "tar" @@ -21817,7 +21834,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -23801,7 +23818,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -29732,7 +29749,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -35653,14 +35670,14 @@ ], "license_file": "LICENSE-APACHE" }, - "jobserver 0.1.26": { + "jobserver 0.1.32": { "name": "jobserver", - "version": "0.1.26", - "package_url": "https://github.com/alexcrichton/jobserver-rs", + "version": "0.1.32", + "package_url": "https://github.com/rust-lang/jobserver-rs", "repository": { "Http": { - "url": "https://static.crates.io/crates/jobserver/0.1.26/download", - "sha256": "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" + "url": "https://static.crates.io/crates/jobserver/0.1.32/download", + "sha256": "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" } }, "targets": [ @@ -35693,10 +35710,10 @@ ] } }, - "edition": "2018", - "version": "0.1.26" + "edition": "2021", + "version": "0.1.32" }, - "license": "MIT/Apache-2.0", + "license": "MIT OR Apache-2.0", "license_ids": [ "Apache-2.0", "MIT" @@ -38084,7 +38101,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -38366,7 +38383,7 @@ "target": "bindgen" }, { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -38467,7 +38484,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -38573,7 +38590,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -38682,7 +38699,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -38780,7 +38797,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -39191,7 +39208,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -39965,7 +39982,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -45003,7 +45020,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -47449,7 +47466,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -52339,7 +52356,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -58236,12 +58253,6 @@ "target": "wasm_streams" } ], - "wasm32-wasi": [ - { - "id": "wasm-streams 0.4.0", - "target": "wasm_streams" - } - ], "wasm32-wasip1": [ { "id": "wasm-streams 0.4.0", @@ -59074,7 +59085,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -59191,7 +59202,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -60797,11 +60808,6 @@ "termios", "use-libc-auxv" ], - "wasm32-wasi": [ - "default", - "termios", - "use-libc-auxv" - ], "wasm32-wasip1": [ "default", "termios", @@ -61137,17 +61143,6 @@ "target": "libc" } ], - "wasm32-wasi": [ - { - "id": "errno 0.3.8", - "target": "errno", - "alias": "libc_errno" - }, - { - "id": "libc 0.2.158", - "target": "libc" - } - ], "wasm32-wasip1": [ { "id": "errno 0.3.8", @@ -63582,7 +63577,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -63667,7 +63662,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -63756,7 +63751,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -68094,7 +68089,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -69553,6 +69548,95 @@ ], "license_file": "LICENSE" }, + "syscalls 0.6.18": { + "name": "syscalls", + "version": "0.6.18", + "package_url": "https://github.com/jasonwhite/syscalls", + "repository": { + "Http": { + "url": "https://static.crates.io/crates/syscalls/0.6.18/download", + "sha256": "43d0e35dc7d73976a53c7e6d7d177ef804a0c0ee774ec77bcc520c2216fd7cbe" + } + }, + "targets": [ + { + "Library": { + "crate_name": "syscalls", + "crate_root": "src/lib.rs", + "srcs": { + "allow_empty": true, + "include": [ + "**/*.rs" + ] + } + } + }, + { + "BuildScript": { + "crate_name": "build_script_build", + "crate_root": "build.rs", + "srcs": { + "allow_empty": true, + "include": [ + "**/*.rs" + ] + } + } + } + ], + "library_target_name": "syscalls", + "common_attrs": { + "compile_data_glob": [ + "**" + ], + "crate_features": { + "common": [ + "default", + "serde", + "serde_repr", + "std" + ], + "selects": {} + }, + "deps": { + "common": [ + { + "id": "serde 1.0.217", + "target": "serde" + }, + { + "id": "syscalls 0.6.18", + "target": "build_script_build" + } + ], + "selects": {} + }, + "edition": "2021", + "proc_macro_deps": { + "common": [ + { + "id": "serde_repr 0.1.19", + "target": "serde_repr" + } + ], + "selects": {} + }, + "version": "0.6.18" + }, + "build_script_attrs": { + "compile_data_glob": [ + "**" + ], + "data_glob": [ + "**" + ] + }, + "license": "BSD-2-Clause", + "license_ids": [ + "BSD-2-Clause" + ], + "license_file": "LICENSE" + }, "system-configuration 0.5.1": { "name": "system-configuration", "version": "0.5.1", @@ -71555,7 +71639,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -80069,7 +80153,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -80608,7 +80692,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" } ], @@ -86160,7 +86244,7 @@ "deps": { "common": [ { - "id": "cc 1.0.83", + "id": "cc 1.1.37", "target": "cc" }, { @@ -86285,7 +86369,6 @@ "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86316,7 +86399,6 @@ "wasm32-unknown-unknown" ], "cfg(all(target_arch = \"wasm32\", target_os = \"wasi\"))": [ - "wasm32-wasi", "wasm32-wasip1" ], "cfg(all(target_arch = \"wasm32\", target_vendor = \"unknown\", target_os = \"unknown\", target_env = \"\"))": [ @@ -86583,7 +86665,6 @@ "i686-unknown-linux-gnu", "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86710,7 +86791,6 @@ "i686-unknown-linux-gnu", "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86744,7 +86824,6 @@ "riscv64gc-unknown-none-elf", "s390x-unknown-linux-gnu", "wasm32-unknown-unknown", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86834,7 +86913,6 @@ "s390x-unknown-linux-gnu", "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86936,7 +87014,6 @@ "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86972,7 +87049,6 @@ "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", - "wasm32-wasi", "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", @@ -86998,7 +87074,6 @@ ], "cfg(target_arch = \"wasm32\")": [ "wasm32-unknown-unknown", - "wasm32-wasi", "wasm32-wasip1" ], "cfg(target_arch = \"x86\")": [ @@ -87063,7 +87138,6 @@ ], "cfg(target_os = \"redox\")": [], "cfg(target_os = \"wasi\")": [ - "wasm32-wasi", "wasm32-wasip1" ], "cfg(target_os = \"windows\")": [ @@ -87143,9 +87217,6 @@ "wasm32-unknown-unknown": [ "wasm32-unknown-unknown" ], - "wasm32-wasi": [ - "wasm32-wasi" - ], "wasm32-wasip1": [ "wasm32-wasip1" ], @@ -87229,7 +87300,7 @@ "candid 0.10.10", "candid_parser 0.1.2", "cargo_metadata 0.14.2", - "cc 1.0.83", + "cc 1.1.37", "cddl 0.9.4", "cfg-if 1.0.0", "chacha20poly1305 0.10.1", @@ -87458,6 +87529,7 @@ "stubborn-io 0.3.2", "subtle 2.6.1", "syn 1.0.109", + "syscalls 0.6.18", "tar 0.4.39", "tarpc 0.34.0", "tempfile 3.12.0", @@ -87520,5 +87592,6 @@ "zeroize 1.8.1", "zstd 0.13.2" ], - "direct_dev_deps": [] + "direct_dev_deps": [], + "unused_patches": [] } diff --git a/Cargo.Bazel.toml.lock b/Cargo.Bazel.toml.lock index a41a0957dbd..a81cc6d2d52 100644 --- a/Cargo.Bazel.toml.lock +++ b/Cargo.Bazel.toml.lock @@ -1828,12 +1828,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "40545c26d092346d8a8dab71ee48e7685a7a9cba76e634790c215b41a4a7b4cf" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -3233,6 +3234,7 @@ dependencies = [ "stubborn-io", "subtle", "syn 1.0.109", + "syscalls", "tar", "tarpc", "tempfile", @@ -6009,9 +6011,9 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] @@ -10888,6 +10890,16 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "syscalls" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d0e35dc7d73976a53c7e6d7d177ef804a0c0ee774ec77bcc520c2216fd7cbe" +dependencies = [ + "serde", + "serde_repr", +] + [[package]] name = "system-configuration" version = "0.5.1" diff --git a/Cargo.lock b/Cargo.lock index 80864728a32..da683f8ccbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "abnf" @@ -39,7 +39,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "bytes", "futures-core", "futures-sink", @@ -62,7 +62,7 @@ dependencies = [ "actix-utils", "ahash 0.8.11", "base64 0.22.1", - "bitflags 2.6.0", + "bitflags 2.8.0", "brotli 6.0.0", "bytes", "bytestring", @@ -96,7 +96,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -214,7 +214,7 @@ dependencies = [ "actix-router", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -292,9 +292,9 @@ dependencies = [ [[package]] name = "aide" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0e3b97a21e41ec5c19bfd9b4fc1f7086be104f8b988681230247ffc91cc8ed" +checksum = "5678d2978845ddb4bd736a026f467dd652d831e9e6254b0e41b07f7ee7523309" dependencies = [ "axum", "bytes", @@ -375,7 +375,7 @@ dependencies = [ "prometheus", "rand 0.8.5", "rsa", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", ] @@ -403,7 +403,7 @@ dependencies = [ "lazy_static", "prometheus", "serde", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -447,19 +447,20 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "3.0.6" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" dependencies = [ "anstyle", + "once_cell", "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arbitrary" @@ -525,7 +526,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -567,7 +568,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "synstructure", ] @@ -579,7 +580,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -691,7 +692,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "event-listener-strategy", "pin-project-lite", ] @@ -747,7 +748,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -758,13 +759,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -813,13 +814,13 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1108,7 +1109,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -1119,7 +1120,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1337,9 +1338,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "bitvec" @@ -1386,9 +1387,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2506947f73ad44e344215ccd6403ac2ae18cd8e046e581a441bf8d199f257f03" +checksum = "9fb65153674e51d3a42c8f27b05b9508cea85edfaade8aa46bc8fc18cecdfef3" dependencies = [ "borsh-derive", "cfg_aliases", @@ -1396,15 +1397,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2593a3b8b938bd68373196c9832f516be11fa487ef4ae745eb282e6a56a7244" +checksum = "a396e17ad94059c650db3d253bb6e25927f1eb462eede7e7a153bb6e75dce0a7" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1451,9 +1452,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.11.1" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786a307d683a5bf92e6fd5fd69a7eb613751668d1d8d67d802846dfe367c62c8" +checksum = "531a9155a481e2ee699d4f98f43c0ca4ff8ee1bfd55c31e9e98fb29d2b176fe0" dependencies = [ "memchr", "regex-automata 0.4.9", @@ -1574,9 +1575,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" [[package]] name = "byteorder" @@ -1674,7 +1675,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1710,7 +1711,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bytes", - "clap 4.5.23", + "clap 4.5.26", "futures-util", "http 1.2.0", "http-body 1.0.1", @@ -1750,9 +1751,9 @@ dependencies = [ [[package]] name = "candid" -version = "0.10.11" +version = "0.10.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d04aa85a9ba2542bded33d1eff0ffb17cb98b1be8117e0a25e1ad8c62bedc881" +checksum = "51e129c4051c57daf943586e01ef72faae48b04a8f692d5f646febf17a264c38" dependencies = [ "anyhow", "binread", @@ -1771,6 +1772,14 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "candid-utils" +version = "0.9.0" +dependencies = [ + "candid", + "candid_parser", +] + [[package]] name = "candid_derive" version = "0.6.6" @@ -1780,7 +1789,7 @@ dependencies = [ "lazy_static", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1888,9 +1897,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.5" +version = "1.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31a0499c1dc64f458ad13872de75c0eb7e3fdb0e67964610c914b034fc5956e" +checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" dependencies = [ "jobserver", "libc", @@ -1940,7 +1949,7 @@ dependencies = [ "candid", "certificate_orchestrator_interface", "chacha20poly1305", - "clap 4.5.23", + "clap 4.5.26", "cloudflare 0.12.0 (git+https://github.com/dfinity/cloudflare-rs.git?rev=a6538a036926bd756986c9c0a5de356daef48881)", "flate2", "futures", @@ -1948,7 +1957,7 @@ dependencies = [ "ic-agent", "ic-http-certification", "ic-response-verification", - "ic-utils 0.39.0", + "ic-utils 0.39.2", "idna 1.0.3", "instant-acme", "leb128", @@ -1958,12 +1967,12 @@ dependencies = [ "pem 1.1.1", "prometheus", "rcgen", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "serde_cbor", "serde_json", "sha2 0.10.8", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "tower 0.5.2", "tracing", @@ -1995,7 +2004,7 @@ dependencies = [ "serde", "serde_cbor", "sha2 0.10.8", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -2007,7 +2016,7 @@ dependencies = [ "ic-stable-structures", "serde", "serde_bytes", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -2150,19 +2159,19 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" dependencies = [ "clap_builder", - "clap_derive 4.5.18", + "clap_derive 4.5.24", ] [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" dependencies = [ "anstream", "anstyle", @@ -2185,14 +2194,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -2368,7 +2377,7 @@ name = "config" version = "1.0.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "config_types", "ic-types", "macaddr", @@ -2395,7 +2404,7 @@ dependencies = [ "serde_json", "serde_with 1.14.0", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "url", ] @@ -2504,7 +2513,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "registry-canister", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde_json", "slog", "tokio", @@ -2691,18 +2700,18 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.115.0" +version = "0.115.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac89549be94911dd0e839b4a7db99e9ed29c17517e1c026f61066884c168aa3c" +checksum = "88c1d02b72b6c411c0a2e92b25ed791ad5d071184193c08a34aa0fdcdf000b72" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-bitset" -version = "0.115.0" +version = "0.115.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9bd49369f76c77e34e641af85d0956869237832c118964d08bf5f51f210875a" +checksum = "720b93bd86ebbb23ebfb2db1ed44d54b2ecbdbb2d034d485bc64aa605ee787ab" dependencies = [ "serde", "serde_derive", @@ -2710,9 +2719,9 @@ dependencies = [ [[package]] name = "cranelift-codegen" -version = "0.115.0" +version = "0.115.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd96ce9cf8efebd7f5ab8ced5a0ce44250280bbae9f593d74a6d7effc3582a35" +checksum = "aed3d2d9914d30b460eedd7fd507720203023997bef71452ce84873f9c93537c" dependencies = [ "bumpalo", "cranelift-bforest", @@ -2734,33 +2743,33 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.115.0" +version = "0.115.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a68e358827afe4bfb6239fcbf6fbd5ac56206ece8a99c8f5f9bbd518773281a" +checksum = "888c188d32263ec9e048873ff0b68c700933600d553f4412417916828be25f8e" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.115.0" +version = "0.115.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e184c9767afbe73d50c55ec29abcf4c32f9baf0d9d22b86d58c4d55e06dee181" +checksum = "4ddd5f4114d04ce7e073dd74e2ad16541fc61970726fcc8b2d5644a154ee4127" [[package]] name = "cranelift-control" -version = "0.115.0" +version = "0.115.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc7664f2a66f053e33f149e952bb5971d138e3af637f5097727ed6dc0ed95dd" +checksum = "92cc4c98d6a4256a1600d93ccd3536f3e77da9b4ca2c279de786ac22876e67d6" dependencies = [ "arbitrary", ] [[package]] name = "cranelift-entity" -version = "0.115.0" +version = "0.115.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "118597e3a9cf86c3556fa579a7a23b955fa18231651a52a77a2475d305a9cf84" +checksum = "760af4b5e051b5f82097a27274b917e3751736369fa73660513488248d27f23d" dependencies = [ "cranelift-bitset", "serde", @@ -2769,9 +2778,9 @@ dependencies = [ [[package]] name = "cranelift-frontend" -version = "0.115.0" +version = "0.115.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7638ea1efb069a0aa18d8ee67401b6b0d19f6bfe5de5e9ede348bfc80bb0d8c7" +checksum = "c0bf77ec0f470621655ec7539860b5c620d4f91326654ab21b075b83900f8831" dependencies = [ "cranelift-codegen", "log", @@ -2781,15 +2790,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.115.0" +version = "0.115.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c53e1152a0b01c4ed2b1e0535602b8e86458777dd9d18b28732b16325c7dc0" +checksum = "4b665d0a6932c421620be184f9fc7f7adaf1b0bc2fa77bb7ac5177c49abf645b" [[package]] name = "cranelift-native" -version = "0.115.0" +version = "0.115.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7d8f895444fa52dd7bdd0bed11bf007a7fb43af65a6deac8fcc4094c6372f7" +checksum = "bb2e75d1bd43dfec10924798f15e6474f1dbf63b0024506551aa19394dbe72ab" dependencies = [ "cranelift-codegen", "libc", @@ -2814,7 +2823,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.23", + "clap 4.5.26", "criterion-plot", "futures", "is-terminal", @@ -2913,7 +2922,7 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "crossterm_winapi", "libc", "mio 0.8.11", @@ -2970,7 +2979,7 @@ dependencies = [ "cssparser-macros", "dtoa-short", "itoa", - "phf 0.11.2", + "phf 0.11.3", "smallvec", ] @@ -2981,7 +2990,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13b588ba4ac1a99f7f2964d24b3d896ddc6bf847ee3855dbd4366f058cfcd331" dependencies = [ "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3041,7 +3050,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3178,7 +3187,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3200,7 +3209,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3238,9 +3247,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" [[package]] name = "debugid" @@ -3293,7 +3302,7 @@ checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3314,7 +3323,7 @@ checksum = "2cdc8d50f426189eef89dac62fabfa0abb27d5cc008f25bf4156a0203325becc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3325,7 +3334,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3338,7 +3347,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3358,7 +3367,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3366,11 +3375,11 @@ name = "deterministic_ips" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "config_types", "ic-crypto-sha2", "macaddr", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -3378,7 +3387,7 @@ name = "dflate" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "libc", "tar", ] @@ -3493,7 +3502,7 @@ name = "diroid" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "walkdir", ] @@ -3526,7 +3535,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3771,7 +3780,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3784,7 +3793,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3834,7 +3843,7 @@ checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -3987,9 +3996,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.3.1" +version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" dependencies = [ "concurrent-queue", "parking", @@ -4002,7 +4011,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "pin-project-lite", ] @@ -4017,7 +4026,7 @@ dependencies = [ "ic-cdk 0.16.0", "mockall", "serde", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", ] @@ -4100,11 +4109,11 @@ dependencies = [ "ic-types", "ic-types-test-utils", "ic-universal-canister", - "ic-utils 0.39.0", + "ic-utils 0.39.2", "lazy_static", "rand 0.8.5", "rand_chacha 0.3.1", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde_cbor", "slog", "tokio", @@ -4388,9 +4397,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" dependencies = [ "fastrand", "futures-core", @@ -4407,7 +4416,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -4417,7 +4426,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.20", + "rustls 0.23.21", "rustls-pki-types", ] @@ -4472,6 +4481,19 @@ version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "log", + "rustversion", + "windows", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -4551,9 +4573,9 @@ dependencies = [ [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "governor" @@ -4591,7 +4613,7 @@ name = "guestos_tool" version = "1.0.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "config", "indoc", "itertools 0.12.1", @@ -4928,7 +4950,7 @@ name = "hostos_tool" version = "1.0.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "config", "config_types", "deterministic_ips", @@ -5028,10 +5050,10 @@ name = "httpbin-rs" version = "0.9.0" dependencies = [ "axum", - "clap 4.5.23", + "clap 4.5.26", "hyper 1.5.2", "hyper-util", - "rustls 0.23.20", + "rustls 0.23.21", "rustls-pemfile 2.2.0", "serde_json", "tokio", @@ -5160,7 +5182,7 @@ dependencies = [ "hyper 1.5.2", "hyper-util", "log", - "rustls 0.23.20", + "rustls 0.23.21", "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", @@ -5227,7 +5249,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.52.0", ] [[package]] @@ -5288,7 +5310,7 @@ dependencies = [ "base64 0.13.1", "candid", "chrono", - "clap 4.5.23", + "clap 4.5.26", "cycles-minting-canister", "futures", "hex", @@ -5388,7 +5410,7 @@ dependencies = [ "pkcs8", "rand 0.8.5", "rangemap", - "reqwest 0.12.9", + "reqwest 0.12.12", "ring 0.17.8", "sec1", "serde", @@ -5398,7 +5420,7 @@ dependencies = [ "sha2 0.10.8", "simple_asn1", "stop-token", - "thiserror 2.0.8", + "thiserror 2.0.11", "time", "tokio", "tower-service", @@ -5434,7 +5456,7 @@ dependencies = [ "prost 0.13.4", "rand 0.8.5", "slog", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "tower 0.5.2", "tracing", @@ -5465,7 +5487,7 @@ version = "0.9.0" dependencies = [ "bincode", "byteorder", - "clap 4.5.23", + "clap 4.5.26", "criterion", "ic-config", "ic-crypto-test-utils-canister-threshold-sigs", @@ -5508,7 +5530,7 @@ version = "0.9.0" dependencies = [ "anyhow", "chrono", - "clap 4.5.23", + "clap 4.5.26", "ic-config", "ic-crypto-utils-threshold-sig-der", "ic-logger", @@ -5520,7 +5542,7 @@ dependencies = [ "ic-test-utilities-tmpdir", "ic-types", "rand 0.8.5", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "serde_json", "slog", @@ -5602,8 +5624,8 @@ dependencies = [ "base64 0.22.1", "bytes", "chacha20poly1305", - "clap 4.5.23", - "clap_derive 4.5.18", + "clap 4.5.26", + "clap_derive 4.5.24", "cloudflare 0.12.0 (git+https://github.com/cloudflare/cloudflare-rs.git?rev=f14720e42184ee176a97676e85ef2d2d85bc3aae)", "derive-new", "fqdn 0.4.4", @@ -5625,8 +5647,8 @@ dependencies = [ "prost-types 0.13.4", "rand 0.8.5", "rcgen", - "reqwest 0.12.9", - "rustls 0.23.20", + "reqwest 0.12.12", + "rustls 0.23.21", "rustls-acme", "rustls-pemfile 2.2.0", "rustls-platform-verifier", @@ -5637,7 +5659,7 @@ dependencies = [ "strum", "strum_macros", "systemstat", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "tokio-io-timeout", "tokio-rustls 0.26.1", @@ -5666,7 +5688,7 @@ dependencies = [ "axum-extra", "bytes", "candid", - "clap 4.5.23", + "clap 4.5.26", "criterion", "dashmap 6.1.0", "ethnum", @@ -5718,8 +5740,8 @@ dependencies = [ "ratelimit", "rcgen", "regex", - "reqwest 0.12.9", - "rustls 0.23.20", + "reqwest 0.12.12", + "rustls 0.23.21", "rustls-pemfile 2.2.0", "serde", "serde_bytes", @@ -5731,7 +5753,7 @@ dependencies = [ "slog", "strum", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "tikv-jemalloc-ctl", "tikv-jemallocator", "tokio", @@ -5768,7 +5790,7 @@ dependencies = [ "pem 1.1.1", "rand 0.8.5", "rand_chacha 0.3.1", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde_json", "slog", "tokio", @@ -5785,7 +5807,7 @@ dependencies = [ "ic-crypto-tree-hash", "ic-system-test-driver", "ic-types", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "serde_cbor", "slog", @@ -5809,7 +5831,7 @@ dependencies = [ "ic-system-test-driver", "prost 0.13.4", "rand 0.8.5", - "reqwest 0.12.9", + "reqwest 0.12.12", "slog", "tokio", ] @@ -5830,7 +5852,7 @@ dependencies = [ "ic-registry-subnet-type", "ic-system-test-driver", "ic-types", - "ic-utils 0.39.0", + "ic-utils 0.39.2", "slog", "url", ] @@ -5842,7 +5864,7 @@ dependencies = [ "bitcoin 0.28.2", "bitcoincore-rpc", "bitcoind", - "clap 4.5.23", + "clap 4.5.26", "criterion", "futures", "hashlink", @@ -5871,7 +5893,7 @@ dependencies = [ "slog", "slog-async", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "tokio-socks", "tonic", @@ -5930,6 +5952,7 @@ dependencies = [ "ic-canister-log 0.2.0", "ic-canisters-http-types", "ic-cdk 0.16.0", + "ic-metrics-assert", "ic-metrics-encoder", "ic-stable-structures", "ic-test-utilities-load-wasm", @@ -5937,7 +5960,6 @@ dependencies = [ "ic-universal-canister", "pocket-ic", "proptest", - "regex", "scraper", "serde", "serde_json", @@ -5977,7 +5999,7 @@ dependencies = [ "proptest", "prost 0.13.4", "slog", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -6051,7 +6073,7 @@ dependencies = [ "prost 0.13.4", "rand 0.8.5", "rand_chacha 0.3.1", - "rustls 0.23.20", + "rustls 0.23.21", "serde", "serde_cbor", "tokio", @@ -6177,7 +6199,7 @@ dependencies = [ "serde_bytes", "serde_cbor", "sha2 0.10.8", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -6244,7 +6266,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "scoped_threadpool", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -6375,7 +6397,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream 0.2.2", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -6389,7 +6411,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream 0.2.2", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -6564,6 +6586,7 @@ dependencies = [ "ic-icrc1-ledger", "ic-ledger-core", "ic-management-canister-types", + "ic-metrics-assert", "ic-metrics-encoder", "ic-stable-structures", "ic-state-machine-tests", @@ -6580,7 +6603,6 @@ dependencies = [ "mockall", "num-traits", "proptest", - "regex", "ripemd", "scopeguard", "serde", @@ -6644,7 +6666,7 @@ dependencies = [ "strum", "strum_macros", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "thousands", "time", "tokio", @@ -6893,7 +6915,7 @@ dependencies = [ "assert_matches", "async-trait", "bincode", - "clap 4.5.23", + "clap 4.5.26", "criterion", "hex", "ic-adapter-metrics-server", @@ -6972,7 +6994,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rsa", - "rustls 0.23.20", + "rustls 0.23.21", "serde", "sha2 0.10.8", "simple_asn1", @@ -7016,7 +7038,7 @@ dependencies = [ "pem 1.1.1", "rand 0.8.5", "rand_chacha 0.3.1", - "thiserror 2.0.8", + "thiserror 2.0.11", "wycheproof", "zeroize", ] @@ -7322,7 +7344,7 @@ dependencies = [ "stubborn-io", "tarpc", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "time", "tokio", "tokio-serde", @@ -7564,7 +7586,7 @@ dependencies = [ "serde_cbor", "strum", "strum_macros", - "thiserror 2.0.8", + "thiserror 2.0.11", "zeroize", ] @@ -7733,7 +7755,7 @@ dependencies = [ "ic-types-test-utils", "rand 0.8.5", "rand_chacha 0.3.1", - "rustls 0.23.20", + "rustls 0.23.21", "tempfile", "tokio", ] @@ -7912,7 +7934,7 @@ version = "0.9.0" dependencies = [ "ic-types", "mockall", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -7933,7 +7955,7 @@ dependencies = [ "ic-types", "pkcs8", "rand 0.8.5", - "rustls 0.23.20", + "rustls 0.23.21", "signature", "time", "tokio", @@ -7969,9 +7991,9 @@ dependencies = [ "ic-types", "json5", "maplit", - "rustls 0.23.20", + "rustls 0.23.21", "serde", - "thiserror 2.0.8", + "thiserror 2.0.11", "x509-parser", ] @@ -7982,7 +8004,7 @@ dependencies = [ "ic-base-types", "ic-crypto-tls-interfaces", "mockall", - "rustls 0.23.20", + "rustls 0.23.21", ] [[package]] @@ -8003,7 +8025,7 @@ dependencies = [ "serde", "serde_bytes", "serde_cbor", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -8015,7 +8037,7 @@ dependencies = [ "ic-crypto-tree-hash", "proptest", "rand 0.8.5", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -8082,7 +8104,7 @@ name = "ic-crypto-utils-tls" version = "0.9.0" dependencies = [ "ic-base-types", - "thiserror 2.0.8", + "thiserror 2.0.11", "x509-parser", ] @@ -8097,7 +8119,7 @@ dependencies = [ "ic-registry-nns-data-provider", "ic-types", "prost 0.13.4", - "reqwest 0.12.9", + "reqwest 0.12.12", "tokio", ] @@ -8167,7 +8189,7 @@ dependencies = [ name = "ic-drun" version = "0.9.0" dependencies = [ - "clap 4.5.23", + "clap 4.5.26", "futures", "hex", "ic-canister-sandbox-backend-lib", @@ -8212,7 +8234,7 @@ dependencies = [ "bincode", "candid", "canister-test", - "clap 4.5.23", + "clap 4.5.26", "criterion", "embedders_bench", "ic-base-types", @@ -8390,7 +8412,7 @@ dependencies = [ "anyhow", "assert_cmd", "assert_matches", - "clap 4.5.23", + "clap 4.5.26", "ic-crypto-test-utils-reproducible-rng", "ic-sys", "maplit", @@ -8444,9 +8466,9 @@ dependencies = [ "ic-metrics", "ic-test-utilities-logger", "prometheus", - "reqwest 0.12.9", + "reqwest 0.12.12", "slog", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "tokio-io-timeout", "tower 0.5.2", @@ -8509,7 +8531,7 @@ dependencies = [ "ic-tracing", "ic-types", "ic-validator", - "inferno 0.12.0", + "inferno 0.12.1", "maplit", "mockall", "pretty_assertions", @@ -8517,9 +8539,9 @@ dependencies = [ "proptest", "prost 0.13.4", "rand 0.8.5", - "reqwest 0.12.9", + "reqwest 0.12.12", "rstest", - "rustls 0.23.20", + "rustls 0.23.21", "serde", "serde_bytes", "serde_cbor", @@ -8566,7 +8588,7 @@ dependencies = [ "maplit", "prometheus", "prost 0.13.4", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "serde_json", "slog", @@ -8591,7 +8613,7 @@ dependencies = [ "ic-agent", "ic-http-certification", "ic-response-verification", - "ic-utils 0.39.0", + "ic-utils 0.39.2", "thiserror 1.0.69", ] @@ -8607,7 +8629,7 @@ dependencies = [ "ic-logger", "ic-test-utilities-in-memory-logger", "mockito", - "reqwest 0.12.9", + "reqwest 0.12.12", "slog", "tar", "tempfile", @@ -8622,7 +8644,7 @@ dependencies = [ "async-stream", "byte-unit", "bytes", - "clap 4.5.23", + "clap 4.5.26", "futures", "http 1.2.0", "http-body-util", @@ -8640,13 +8662,13 @@ dependencies = [ "prometheus", "rand 0.8.5", "rstest", - "rustls 0.23.20", + "rustls 0.23.21", "rustls-pemfile 2.2.0", "serde", "serde_json", "slog", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "tokio-rustls 0.26.1", "tonic", @@ -8780,7 +8802,7 @@ dependencies = [ "icrc-ledger-types", "num-bigint 0.4.6", "pocket-ic", - "reqwest 0.12.9", + "reqwest 0.12.12", "rosetta-core", "serde", "tempfile", @@ -8797,7 +8819,7 @@ dependencies = [ "ic-rosetta-test-utils", "icp-ledger", "pocket-ic", - "reqwest 0.12.9", + "reqwest 0.12.12", "tempfile", "tokio", ] @@ -8810,7 +8832,7 @@ dependencies = [ "axum", "candid", "ciborium", - "clap 4.5.23", + "clap 4.5.26", "futures", "hex", "ic-agent", @@ -8830,7 +8852,7 @@ dependencies = [ "ic-rosetta-test-utils", "ic-sys", "ic-test-utilities-load-wasm", - "ic-utils 0.39.0", + "ic-utils 0.39.2", "icrc-ledger-agent", "icrc-ledger-types", "indicatif", @@ -8841,7 +8863,7 @@ dependencies = [ "pocket-ic", "proptest", "rand 0.8.5", - "reqwest 0.12.9", + "reqwest 0.12.12", "rolling-file", "rosetta-core", "rusqlite", @@ -8867,7 +8889,7 @@ version = "0.1.0" dependencies = [ "anyhow", "candid", - "clap 4.5.23", + "clap 4.5.26", "hex", "ic-agent", "ic-crypto-ed25519", @@ -8878,7 +8900,7 @@ dependencies = [ "icrc-ledger-types", "num-bigint 0.4.6", "pocket-ic", - "reqwest 0.12.9", + "reqwest 0.12.12", "rosetta-core", "serde", "tokio", @@ -8893,7 +8915,7 @@ dependencies = [ "candid", "icrc-ledger-types", "pocket-ic", - "reqwest 0.12.9", + "reqwest 0.12.12", "tempfile", "tokio", ] @@ -8931,7 +8953,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_bytes", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -9171,7 +9193,7 @@ dependencies = [ "serde", "strum", "strum_macros", - "thiserror 2.0.8", + "thiserror 2.0.11", "tower 0.5.2", ] @@ -9180,7 +9202,7 @@ name = "ic-interfaces-adapter-client" version = "0.9.0" dependencies = [ "strum_macros", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -9235,7 +9257,7 @@ dependencies = [ "ic-crypto-tree-hash", "ic-types", "phantom_newtype", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -9270,7 +9292,7 @@ dependencies = [ "icp-ledger", "on_wire", "proptest", - "reqwest 0.12.9", + "reqwest 0.12.12", "rusqlite", "serde", "tokio", @@ -9386,6 +9408,7 @@ dependencies = [ "ic-icrc1-ledger", "ic-ledger-suite-orchestrator", "ic-management-canister-types", + "ic-metrics-assert", "ic-state-machine-tests", "ic-test-utilities-load-wasm", "ic-types", @@ -9495,7 +9518,7 @@ dependencies = [ "ic-types", "ic_consensus_system_test_utils", "icp-ledger", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "slog", "url", @@ -9509,11 +9532,11 @@ dependencies = [ "assert_matches", "candid", "candid_parser", - "clap 4.5.23", + "clap 4.5.26", "futures", "hex", "maplit", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "serde_json", "sha2 0.10.8", @@ -9653,6 +9676,17 @@ dependencies = [ "tokio-metrics", ] +[[package]] +name = "ic-metrics-assert" +version = "0.1.0" +dependencies = [ + "candid", + "pocket-ic", + "regex", + "serde", + "serde_bytes", +] + [[package]] name = "ic-metrics-encoder" version = "1.1.1" @@ -9664,7 +9698,7 @@ name = "ic-metrics-tool" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", ] [[package]] @@ -9686,7 +9720,7 @@ dependencies = [ "pocket-ic", "serde", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", ] @@ -10447,7 +10481,7 @@ version = "0.9.0" dependencies = [ "candid", "canister-test", - "clap 4.5.23", + "clap 4.5.26", "ic-base-types", "ic-canister-client", "ic-interfaces-registry", @@ -10471,7 +10505,7 @@ dependencies = [ name = "ic-nns-inspector" version = "0.1.0" dependencies = [ - "clap 4.5.23", + "clap 4.5.26", "csv", "hex", "ic-base-types", @@ -10706,7 +10740,7 @@ dependencies = [ "quinn", "quinn-udp", "rcgen", - "rustls 0.23.20", + "rustls 0.23.21", "serde", "slog", "tempfile", @@ -10744,7 +10778,7 @@ dependencies = [ "pprof", "prost 0.12.6", "regex", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", ] @@ -10755,7 +10789,7 @@ dependencies = [ "anyhow", "assert_matches", "base64 0.13.1", - "clap 4.5.23", + "clap 4.5.26", "fs_extra", "ic-config", "ic-crypto-node-key-generation", @@ -10786,12 +10820,12 @@ dependencies = [ "pretty_assertions", "prost 0.13.4", "rand 0.8.5", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "serde_json", "slog", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "url", "x509-cert", ] @@ -10871,11 +10905,11 @@ dependencies = [ "prost 0.13.4", "quinn", "rstest", - "rustls 0.23.20", + "rustls 0.23.21", "slog", "socket2 0.5.8", "static_assertions", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "tokio-metrics", "tokio-util", @@ -10903,7 +10937,7 @@ name = "ic-recovery" version = "0.9.0" dependencies = [ "base64 0.13.1", - "clap 4.5.23", + "clap 4.5.26", "futures", "hex", "ic-artifact-pool", @@ -10933,7 +10967,7 @@ dependencies = [ "ic-test-utilities-types", "ic-types", "prost 0.13.4", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "serde_cbor", "serde_json", @@ -10953,7 +10987,7 @@ version = "0.9.0" dependencies = [ "anyhow", "base64 0.13.1", - "clap 4.5.23", + "clap 4.5.26", "ic-base-types", "ic-crypto-sha2", "ic-crypto-utils-threshold-sig-der", @@ -10971,7 +11005,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "url", ] @@ -10983,7 +11017,7 @@ dependencies = [ "candid", "ic-base-types", "serde", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -11058,7 +11092,7 @@ dependencies = [ "ic-registry-subnet-features", "ic-types", "serde_cbor", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -11107,7 +11141,7 @@ dependencies = [ "ic-registry-transport", "ic-types", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "url", ] @@ -11180,7 +11214,7 @@ dependencies = [ "ic-registry-transport", "ic-sys", "ic-types", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -11195,7 +11229,7 @@ dependencies = [ name = "ic-registry-replicator" version = "0.9.0" dependencies = [ - "clap 4.5.23", + "clap 4.5.26", "ic-config", "ic-crypto-utils-threshold-sig-der", "ic-http-endpoints-metrics", @@ -11280,7 +11314,7 @@ name = "ic-replay" version = "0.9.0" dependencies = [ "candid", - "clap 4.5.23", + "clap 4.5.26", "hex", "ic-artifact-pool", "ic-canister-client", @@ -11334,7 +11368,7 @@ version = "0.9.0" dependencies = [ "assert_cmd", "canister-test", - "clap 4.5.23", + "clap 4.5.26", "criterion", "hex", "ic-artifact-pool", @@ -11590,7 +11624,7 @@ dependencies = [ "async-trait", "base64 0.13.1", "candid", - "clap 4.5.23", + "clap 4.5.26", "dfn_candid", "dfn_protobuf", "futures", @@ -11637,7 +11671,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "registry-canister", - "reqwest 0.12.9", + "reqwest 0.12.12", "rolling-file", "rosetta-core", "rusqlite", @@ -11670,7 +11704,7 @@ dependencies = [ "icp-ledger", "nix 0.24.3", "rand 0.8.5", - "reqwest 0.12.9", + "reqwest 0.12.12", "rosetta-core", "serde", "serde_bytes", @@ -11751,7 +11785,7 @@ dependencies = [ "serde", "serde_json", "textplots", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", ] @@ -11762,7 +11796,7 @@ dependencies = [ "anyhow", "base64 0.13.1", "candid", - "clap 4.5.23", + "clap 4.5.26", "futures", "hex", "ic-agent", @@ -11788,7 +11822,7 @@ dependencies = [ "serde_json", "serde_yaml", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", ] @@ -11804,7 +11838,7 @@ dependencies = [ "canbench-rs", "candid", "candid_parser", - "clap 4.5.23", + "clap 4.5.26", "comparable", "futures", "hex", @@ -11879,7 +11913,7 @@ version = "0.9.0" dependencies = [ "bytes", "candid", - "clap 4.5.23", + "clap 4.5.26", "comparable", "ic-base-types", "ic-nervous-system-proto", @@ -12294,7 +12328,7 @@ name = "ic-starter" version = "0.9.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "ic-config", "ic-logger", "ic-management-canister-types", @@ -12346,7 +12380,7 @@ version = "0.9.0" dependencies = [ "candid", "ciborium", - "clap 4.5.23", + "clap 4.5.26", "hex", "ic-artifact-pool", "ic-base-types", @@ -12506,7 +12540,7 @@ dependencies = [ "prost 0.13.4", "rand 0.8.5", "slog", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "tokio-metrics", "tokio-util", @@ -12518,7 +12552,7 @@ dependencies = [ name = "ic-state-tool" version = "0.9.0" dependencies = [ - "clap 4.5.23", + "clap 4.5.26", "hex", "ic-config", "ic-logger", @@ -12543,7 +12577,7 @@ dependencies = [ name = "ic-subnet-splitting" version = "0.9.0" dependencies = [ - "clap 4.5.23", + "clap 4.5.26", "hex", "ic-agent", "ic-base-types", @@ -12582,7 +12616,7 @@ dependencies = [ "prost 0.13.4", "rand 0.8.5", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "wsl", ] @@ -12639,7 +12673,7 @@ dependencies = [ "candid", "canister-test", "chrono", - "clap 4.5.23", + "clap 4.5.26", "config_types", "crossbeam-channel", "cycles-minting-canister", @@ -12717,7 +12751,7 @@ dependencies = [ "ic-types", "ic-types-test-utils", "ic-universal-canister", - "ic-utils 0.39.0", + "ic-utils 0.39.2", "ic-wasm-types", "icp-ledger", "icrc-ledger-types", @@ -12745,7 +12779,7 @@ dependencies = [ "rcgen", "regex", "registry-canister", - "reqwest 0.12.9", + "reqwest 0.12.12", "ring 0.17.8", "rosetta-core", "rsa", @@ -12763,7 +12797,7 @@ dependencies = [ "strum", "strum_macros", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "time", "tokio", "tokio-util", @@ -13152,7 +13186,7 @@ dependencies = [ "ic_consensus_system_test_utils", "ic_consensus_threshold_sig_system_test_utils", "icrc-ledger-types", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde_json", "slog", ] @@ -13205,7 +13239,7 @@ dependencies = [ "serde_cbor", "serde_repr", "sha2 0.10.8", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -13252,7 +13286,7 @@ dependencies = [ "serde_with 1.14.0", "strum", "strum_macros", - "thiserror 2.0.8", + "thiserror 2.0.11", "thousands", ] @@ -13293,9 +13327,9 @@ dependencies = [ [[package]] name = "ic-utils" -version = "0.39.0" +version = "0.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb1da4a68c45146018b8496c157ad94126b9c202ab4400c6c0a9030c1ef0f0ba" +checksum = "e1fb9c35ef4976a71d37f3ebf73ee43bb52b360be60d91d3a77f74fbc875dda4" dependencies = [ "async-trait", "candid", @@ -13308,7 +13342,7 @@ dependencies = [ "sha2 0.10.8", "strum", "strum_macros", - "thiserror 1.0.69", + "thiserror 2.0.11", "time", "tokio", ] @@ -13372,7 +13406,7 @@ dependencies = [ "ic-types", "mockall", "rand 0.8.5", - "thiserror 2.0.8", + "thiserror 2.0.11", ] [[package]] @@ -13504,7 +13538,7 @@ checksum = "19fabaeecfe37f24b433c62489242fc54503d98d4cc8d0f9ef7544dfdfc0ddcb" dependencies = [ "anyhow", "candid", - "clap 4.5.23", + "clap 4.5.26", "libflate", "rustc-demangle", "serde", @@ -13544,7 +13578,7 @@ dependencies = [ "byte-unit", "candid", "chrono", - "clap 4.5.23", + "clap 4.5.26", "console 0.11.3", "futures", "hex", @@ -13636,10 +13670,10 @@ dependencies = [ "prometheus", "proptest", "rand 0.8.5", - "reqwest 0.12.9", + "reqwest 0.12.12", "slog", "tempfile", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "url", ] @@ -13789,7 +13823,7 @@ dependencies = [ "prost 0.13.4", "rand 0.8.5", "registry-canister", - "reqwest 0.12.9", + "reqwest 0.12.12", "rsa", "serde_json", "slog", @@ -13828,7 +13862,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "registry-canister", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde_cbor", "serde_json", "slog", @@ -13886,7 +13920,7 @@ dependencies = [ "ic_consensus_system_test_utils", "k256", "rand 0.8.5", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde_bytes", "serde_cbor", "slog", @@ -13911,7 +13945,7 @@ dependencies = [ name = "icp-config" version = "0.9.0" dependencies = [ - "clap 4.5.23", + "clap 4.5.26", "eyre", "ic-config", "ic-replicated-state", @@ -14041,7 +14075,7 @@ dependencies = [ [[package]] name = "icrc-ledger-types" -version = "0.1.7" +version = "0.1.8" dependencies = [ "assert_matches", "base32", @@ -14201,7 +14235,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -14263,9 +14297,9 @@ dependencies = [ [[package]] name = "impl-more" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae21c3177a27788957044151cc2800043d127acaa460a47ebb9b84dfa2c6aa0" +checksum = "e8a5a9a0ff0086c7a148acb942baaabeadf9504d10400b5a05645853729b9cd2" [[package]] name = "impl-rlp" @@ -14293,7 +14327,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -14363,12 +14397,12 @@ dependencies = [ [[package]] name = "inferno" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a5d75fee4d36809e6b021e4b96b686e763d365ffdb03af2bd00786353f84fe" +checksum = "692eda1cc790750b9f5a5e3921ef9c117fd5498b97cfacbc910693e5b29002dc" dependencies = [ "ahash 0.8.11", - "clap 4.5.23", + "clap 4.5.26", "crossbeam-channel", "crossbeam-utils", "dashmap 6.1.0", @@ -14378,7 +14412,7 @@ dependencies = [ "log", "num-format", "once_cell", - "quick-xml 0.37.1", + "quick-xml 0.37.2", "rgb", "str_stack", ] @@ -14388,7 +14422,7 @@ name = "inject-files" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "partition_tools", "tempfile", "tokio", @@ -14405,13 +14439,13 @@ dependencies = [ [[package]] name = "insta" -version = "1.41.1" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9ffc4d4892617c50a928c52b2961cb5174b6fc6ebf252b2fac9d21955c48b8" +checksum = "6513e4067e16e69ed1db5ab56048ed65db32d10ba5fc1217f5393f8f17d8b5a5" dependencies = [ "console 0.15.10", - "lazy_static", "linked-hash-map", + "once_cell", "similar", ] @@ -14585,9 +14619,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.76" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ "once_cell", "wasm-bindgen", @@ -14722,7 +14756,7 @@ dependencies = [ "k8s-openapi", "kube-core", "pem 3.0.4", - "rustls 0.23.20", + "rustls 0.23.21", "rustls-pemfile 2.2.0", "secrecy", "serde", @@ -14838,12 +14872,12 @@ checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" name = "launch-single-vm" version = "0.1.0" dependencies = [ - "clap 4.5.23", + "clap 4.5.26", "ic-prep", "ic-registry-subnet-type", "ic-system-test-driver", "ic-types", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "slog", "slog-async", @@ -15077,7 +15111,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "libc", "redox_syscall 0.5.8", ] @@ -15135,9 +15169,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.20" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "libc", @@ -15169,9 +15203,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux_kernel_command_line" @@ -15263,9 +15297,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" [[package]] name = "logos" @@ -15296,7 +15330,7 @@ dependencies = [ "proc-macro2", "quote", "regex-syntax 0.6.29", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -15322,6 +15356,19 @@ dependencies = [ "logos-codegen", ] +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if 1.0.0", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + [[package]] name = "lru" version = "0.7.8" @@ -15546,7 +15593,7 @@ dependencies = [ "ic-registry-subnet-type", "ic-system-test-driver", "ic-types", - "ic-utils 0.39.0", + "ic-utils 0.39.2", "itertools 0.12.1", "rand 0.8.5", "rand_chacha 0.3.1", @@ -15616,9 +15663,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" +checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" dependencies = [ "adler2", ] @@ -15676,7 +15723,7 @@ dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -15705,25 +15752,23 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.8" +version = "0.12.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32cf62eb4dd975d2dde76432fb1075c49e3ee2331cf36f1f8fd4b66550d32b6f" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" dependencies = [ "async-lock", - "async-trait", "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", - "event-listener 5.3.1", + "event-listener 5.4.0", "futures-util", - "once_cell", + "loom", "parking_lot 0.12.3", - "quanta", + "portable-atomic", "rustc_version", "smallvec", "tagptr", "thiserror 1.0.69", - "triomphe", "uuid", ] @@ -15776,9 +15821,9 @@ checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" [[package]] name = "neli" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1100229e06604150b3becd61a4965d5c70f3be1759544ea7274166f4be41ef43" +checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" dependencies = [ "byteorder", "libc", @@ -15788,9 +15833,9 @@ dependencies = [ [[package]] name = "neli-proc-macros" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" +checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" dependencies = [ "either", "proc-macro2", @@ -15860,12 +15905,12 @@ dependencies = [ "ic-test-utilities", "ic-test-utilities-types", "ic-types", - "ic-utils 0.39.0", + "ic-utils 0.39.2", "proxy_canister", "rand 0.8.5", "rand_chacha 0.3.1", "registry-canister", - "reqwest 0.12.9", + "reqwest 0.12.12", "slog", "tokio", "url", @@ -15882,7 +15927,7 @@ name = "nft_exporter" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "serde", "serde_json", ] @@ -15930,7 +15975,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if 1.0.0", "libc", "memoffset 0.9.1", @@ -15942,7 +15987,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if 1.0.0", "cfg_aliases", "libc", @@ -15992,7 +16037,7 @@ dependencies = [ "on_wire", "prost 0.13.4", "registry-canister", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde_cbor", "slog", "tokio", @@ -16221,7 +16266,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -16241,9 +16286,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "crc32fast", "hashbrown 0.15.2", @@ -16513,7 +16558,7 @@ dependencies = [ "async-trait", "backoff", "candid", - "clap 4.5.23", + "clap 4.5.26", "env-file-reader", "exec", "get_if_addrs", @@ -16623,7 +16668,7 @@ dependencies = [ "ic-types", "ic_consensus_system_test_utils", "itertools 0.12.1", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "slog", "tokio", @@ -16849,7 +16894,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.8", + "thiserror 2.0.11", "ucd-trie", ] @@ -16873,7 +16918,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -16929,12 +16974,12 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ "phf_macros", - "phf_shared 0.11.2", + "phf_shared 0.11.3", ] [[package]] @@ -16959,25 +17004,25 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared 0.11.2", + "phf_shared 0.11.3", "rand 0.8.5", ] [[package]] name = "phf_macros" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" dependencies = [ - "phf_generator 0.11.2", - "phf_shared 0.11.2", + "phf_generator 0.11.3", + "phf_shared 0.11.3", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -16986,16 +17031,16 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" dependencies = [ - "siphasher", + "siphasher 0.3.11", ] [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ - "siphasher", + "siphasher 1.0.1", ] [[package]] @@ -17006,29 +17051,29 @@ checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -17142,7 +17187,7 @@ dependencies = [ "ic-transport-types", "k256", "lazy_static", - "reqwest 0.12.9", + "reqwest 0.12.12", "schemars", "serde", "serde_bytes", @@ -17152,7 +17197,7 @@ dependencies = [ "slog", "strum", "strum_macros", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "tracing", "tracing-appender", @@ -17177,7 +17222,7 @@ dependencies = [ "bitcoincore-rpc", "bytes", "candid", - "clap 4.5.23", + "clap 4.5.26", "ctrlc", "flate2", "form_urlencoded", @@ -17226,7 +17271,7 @@ dependencies = [ "ic-test-utilities", "ic-test-utilities-registry", "ic-types", - "ic-utils 0.39.0", + "ic-utils 0.39.2", "ic-utils-thread", "ic-validator-ingress-message", "itertools 0.12.1", @@ -17235,7 +17280,7 @@ dependencies = [ "rand 0.8.5", "rcgen", "registry-canister", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "serde_cbor", "serde_json", @@ -17431,12 +17476,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.25" +version = "0.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -17514,9 +17559,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -17541,7 +17586,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "hex", "lazy_static", "procfs-core", @@ -17554,7 +17599,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "hex", ] @@ -17595,7 +17640,7 @@ checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ "bit-set 0.8.0", "bit-vec 0.8.0", - "bitflags 2.6.0", + "bitflags 2.8.0", "lazy_static", "num-traits", "rand 0.8.5", @@ -17615,7 +17660,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -17655,7 +17700,7 @@ dependencies = [ "prost 0.12.6", "prost-types 0.12.6", "regex", - "syn 2.0.90", + "syn 2.0.96", "tempfile", ] @@ -17675,7 +17720,7 @@ dependencies = [ "prost 0.13.4", "prost-types 0.13.4", "regex", - "syn 2.0.90", + "syn 2.0.96", "tempfile", ] @@ -17689,7 +17734,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -17702,7 +17747,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -17787,9 +17832,9 @@ dependencies = [ [[package]] name = "pulley-interpreter" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403a1a95f4c18a45c86c7bff13df00347afd0abcbf2e54af273c837339ffcf77" +checksum = "8324e531de91a3c25021a30fb7862d39cc516b61fbb801176acb5ff279ea887b" dependencies = [ "cranelift-bitset", "log", @@ -17798,9 +17843,9 @@ dependencies = [ [[package]] name = "quanta" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773ce68d0bb9bc7ef20be3536ffe94e223e1f365bd374108b2659fac0c65cfe6" +checksum = "3bd1fe6824cea6538803de3ff1bc0cf3949024db3d43c9643024bfb33a807c0e" dependencies = [ "crossbeam-utils", "libc", @@ -17828,9 +17873,9 @@ dependencies = [ [[package]] name = "quick-xml" -version = "0.37.1" +version = "0.37.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f22f29bdff3987b4d8632ef95fd6424ec7e4e0a57e2f4fc63e489e75357f6a03" +checksum = "165859e9e55f79d67b96c5d96f4e88b6f2695a1972849c15a6a3f5c59fc2c003" dependencies = [ "memchr", ] @@ -17846,9 +17891,9 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.0", - "rustls 0.23.20", + "rustls 0.23.21", "socket2 0.5.8", - "thiserror 2.0.8", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -17864,10 +17909,10 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "rustc-hash 2.1.0", - "rustls 0.23.20", + "rustls 0.23.21", "rustls-pki-types", "slab", - "thiserror 2.0.8", + "thiserror 2.0.11", "tinyvec", "tracing", "web-time", @@ -17889,9 +17934,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -18113,7 +18158,7 @@ version = "0.9.0" dependencies = [ "anyhow", "candid", - "clap 4.5.23", + "clap 4.5.26", "ic-agent", "k256", "rate-limits-api", @@ -18167,7 +18212,7 @@ dependencies = [ "serde_cbor", "serde_json", "strum", - "thiserror 2.0.8", + "thiserror 2.0.11", "uuid", ] @@ -18184,11 +18229,11 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "11.2.0" +version = "11.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" +checksum = "c6928fa44c097620b706542d428957635951bade7143269085389d42c8a4927e" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", ] [[package]] @@ -18213,9 +18258,9 @@ dependencies = [ [[package]] name = "rcgen" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54077e1872c46788540de1ea3d7f4ccb1983d12f9aa909b234468676c1a36779" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" dependencies = [ "pem 3.0.4", "ring 0.17.8", @@ -18249,7 +18294,7 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", ] [[package]] @@ -18512,9 +18557,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ "base64 0.22.1", "bytes", @@ -18538,7 +18583,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.20", + "rustls 0.23.21", "rustls-native-certs 0.8.1", "rustls-pemfile 2.2.0", "rustls-pki-types", @@ -18550,6 +18595,7 @@ dependencies = [ "tokio-rustls 0.26.1", "tokio-socks", "tokio-util", + "tower 0.5.2", "tower-service", "url", "wasm-bindgen", @@ -18807,7 +18853,7 @@ dependencies = [ "on_wire", "prost 0.13.4", "rand 0.8.5", - "reqwest 0.12.9", + "reqwest 0.12.12", "rosetta-core", "serde", "serde_json", @@ -18862,7 +18908,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.90", + "syn 2.0.96", "unicode-ident", ] @@ -18985,11 +19031,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.42" +version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "errno 0.3.10", "libc", "linux-raw-sys", @@ -19024,9 +19070,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.20" +version = "0.23.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" +checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" dependencies = [ "brotli 7.0.0", "brotli-decompressor", @@ -19060,7 +19106,7 @@ dependencies = [ "ring 0.17.8", "serde", "serde_json", - "thiserror 2.0.8", + "thiserror 2.0.11", "webpki-roots 0.26.7", "x509-parser", ] @@ -19087,7 +19133,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.1.0", + "security-framework 3.2.0", ] [[package]] @@ -19128,7 +19174,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.20", + "rustls 0.23.21", "rustls-native-certs 0.7.3", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -19167,9 +19213,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rusty-fork" @@ -19219,7 +19265,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -19253,7 +19299,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -19312,7 +19358,7 @@ dependencies = [ "ic-system-test-driver", "ic_consensus_system_test_utils", "nns_dapp", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde_json", "slog", ] @@ -19413,7 +19459,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -19423,11 +19469,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81d3f8c9bfcc3cbb6b0179eb57042d75b1582bdc65c3cb95f3fa999509c03cbc" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "core-foundation 0.10.0", "core-foundation-sys", "libc", @@ -19436,9 +19482,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.13.0" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -19450,7 +19496,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eb30575f3638fc8f6815f448d50cb1a2e255b0897985c8c59f4d37b72a07b06" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cssparser", "derive_more 0.99.18", "fxhash", @@ -19474,9 +19520,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -19534,13 +19580,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -19551,14 +19597,14 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" dependencies = [ "itoa", "memchr", @@ -19607,7 +19653,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -19630,7 +19676,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -19692,7 +19738,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -19722,7 +19768,7 @@ name = "setupos-disable-checks" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "indoc", "linux_kernel_command_line", "partition_tools", @@ -19736,7 +19782,7 @@ name = "setupos-inject-configuration" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "config", "partition_tools", "serde", @@ -19752,7 +19798,7 @@ name = "setupos_tool" version = "1.0.0" dependencies = [ "anyhow", - "clap 4.5.23", + "clap 4.5.26", "config", "config_types", "deterministic_ips", @@ -19874,13 +19920,13 @@ checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" [[package]] name = "simple_asn1" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint 0.4.6", "num-traits", - "thiserror 1.0.69", + "thiserror 2.0.11", "time", ] @@ -19910,6 +19956,12 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + [[package]] name = "slab" version = "0.4.9" @@ -20039,7 +20091,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -20067,7 +20119,7 @@ dependencies = [ "ic-system-test-driver", "ic-types", "ic-universal-canister", - "ic-utils 0.39.0", + "ic-utils 0.39.2", "ic_consensus_system_test_utils", "icp-ledger", "icrc-ledger-agent", @@ -20125,7 +20177,7 @@ dependencies = [ "ic-registry-subnet-type", "ic-system-test-driver", "ic-types", - "reqwest 0.12.9", + "reqwest 0.12.12", "slog", ] @@ -20305,7 +20357,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive 0.2.0", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -20317,7 +20369,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive 0.3.0", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -20328,7 +20380,7 @@ checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -20339,7 +20391,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -20361,7 +20413,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -20389,9 +20441,9 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "symbolic-common" -version = "12.12.4" +version = "12.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd33e73f154e36ec223c18013f7064a2c120f1162fc086ac9933542def186b00" +checksum = "8150eae9699e3c73a3e6431dc1f80d87748797c0457336af23e94c1de619ed24" dependencies = [ "debugid", "memmap2", @@ -20401,9 +20453,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.12.4" +version = "12.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e51191290147f071777e37fe111800bb82a9059f9c95b19d2dd41bfeddf477" +checksum = "95f4a9846f7a8933b6d198c022faa2c9bd89e1a970bed9d9a98d25708bf8de17" dependencies = [ "rustc-demangle", "symbolic-common", @@ -20422,9 +20474,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.90" +version = "2.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" dependencies = [ "proc-macro2", "quote", @@ -20476,7 +20528,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -20507,11 +20559,11 @@ dependencies = [ "anyhow", "async-trait", "axum", - "clap 4.5.23", + "clap 4.5.26", "http 1.2.0", "itertools 0.12.1", - "reqwest 0.12.9", - "thiserror 2.0.8", + "reqwest 0.12.12", + "thiserror 2.0.11", "tokio", "url", ] @@ -20616,12 +20668,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.14.0" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if 1.0.0", "fastrand", + "getrandom", "once_cell", "rustix", "windows-sys 0.59.0", @@ -20692,7 +20745,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta 0.2.0", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -20704,7 +20757,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta 0.3.0", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -20744,7 +20797,7 @@ dependencies = [ "ic_consensus_system_test_utils", "nns_dapp", "os_qualification_utils", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "serde_json", "slog", @@ -20779,11 +20832,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.8" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f5383f3e0071702bf93ab5ee99b52d26936be9dedd9413067cbdcddcb6141a" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" dependencies = [ - "thiserror-impl 2.0.8", + "thiserror-impl 2.0.11", ] [[package]] @@ -20794,18 +20847,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "thiserror-impl" -version = "2.0.8" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f357fcec90b3caef6623a099691be676d033b40a058ac95d2a6ade6fa0c943" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -20928,9 +20981,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -20981,14 +21034,14 @@ checksum = "8d9ef545650e79f30233c0003bcc2504d7efac6dad25fca40744de773fe2049c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "tokio" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", @@ -21014,13 +21067,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -21062,7 +21115,7 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ - "rustls 0.23.20", + "rustls 0.23.21", "tokio", ] @@ -21205,7 +21258,7 @@ dependencies = [ "prost-build 0.13.4", "prost-types 0.13.4", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -21255,7 +21308,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "base64 0.21.7", - "bitflags 2.6.0", + "bitflags 2.8.0", "bytes", "http 1.2.0", "http-body 1.0.1", @@ -21274,7 +21327,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "bitflags 2.6.0", + "bitflags 2.8.0", "bytes", "futures-core", "http 1.2.0", @@ -21376,7 +21429,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -21516,12 +21569,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "triomphe" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" - [[package]] name = "trust-dns-proto" version = "0.22.0" @@ -21644,11 +21691,10 @@ dependencies = [ [[package]] name = "ulid" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289" +checksum = "f294bff79170ed1c5633812aff1e565c35d993a36e757f9bc0accf5eec4e6045" dependencies = [ - "getrandom", "rand 0.8.5", "web-time", ] @@ -21661,9 +21707,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-bidi" @@ -21815,9 +21861,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +checksum = "744018581f9a3454a9e15beb8a33b017183f1e7c0cd170232a2d1453b23a51c4" dependencies = [ "getrandom", "serde", @@ -21825,9 +21871,9 @@ dependencies = [ [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "vcpkg" @@ -21874,7 +21920,7 @@ dependencies = [ name = "vsock_guest" version = "1.0.0" dependencies = [ - "clap 4.5.23", + "clap 4.5.26", "vsock_lib", ] @@ -21891,7 +21937,7 @@ version = "1.0.0" dependencies = [ "anyhow", "regex", - "reqwest 0.12.9", + "reqwest 0.12.12", "rusb", "serde", "serde_json", @@ -22003,34 +22049,35 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if 1.0.0", "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.49" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -22041,9 +22088,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -22051,22 +22098,25 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "wasm-encoder" @@ -22099,12 +22149,12 @@ dependencies = [ [[package]] name = "wasm-encoder" -version = "0.222.0" +version = "0.223.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3432682105d7e994565ef928ccf5856cf6af4ba3dddebedb737f61caed70f956" +checksum = "7e636076193fa68103e937ac951b5f2f587624097017d764b8984d9c0f149464" dependencies = [ "leb128", - "wasmparser 0.222.0", + "wasmparser 0.223.0", ] [[package]] @@ -22127,7 +22177,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d28bc49ba1e5c5b61ffa7a2eace10820443c4b7d1c0b144109261d14570fdf8" dependencies = [ "ahash 0.8.11", - "bitflags 2.6.0", + "bitflags 2.8.0", "hashbrown 0.14.5", "indexmap 2.7.0", "semver", @@ -22141,7 +22191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca917a21307d3adf2b9857b94dd05ebf8496bdcff4437a9b9fb3899d3e6c74e7" dependencies = [ "ahash 0.8.11", - "bitflags 2.6.0", + "bitflags 2.8.0", "hashbrown 0.14.5", "indexmap 2.7.0", "semver", @@ -22154,7 +22204,7 @@ version = "0.221.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9845c470a2e10b61dd42c385839cdd6496363ed63b5c9e420b5488b77bd22083" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "hashbrown 0.15.2", "indexmap 2.7.0", "semver", @@ -22163,11 +22213,11 @@ dependencies = [ [[package]] name = "wasmparser" -version = "0.222.0" +version = "0.223.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4adf50fde1b1a49c1add6a80d47aea500c88db70551805853aa8b88f3ea27ab5" +checksum = "d5a99faceb1a5a84dd6084ec4bfa4b2ab153b5793b43fd8f58b89232634afc35" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "indexmap 2.7.0", "semver", ] @@ -22196,12 +22246,12 @@ dependencies = [ [[package]] name = "wasmtime" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639ecae347b9a2227e453a7b7671e84370a0b61f47a15e0390fe9b7725e47b3" +checksum = "edd30973c65eceb0f37dfcc430d83abd5eb24015fdfcab6912f52949287e04f0" dependencies = [ "anyhow", - "bitflags 2.6.0", + "bitflags 2.8.0", "bumpalo", "cc", "cfg-if 1.0.0", @@ -22240,23 +22290,23 @@ dependencies = [ [[package]] name = "wasmtime-asm-macros" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "882a18800471cfc063c8b3ccf75723784acc3fd534009ac09421f2fac2fcdcec" +checksum = "c6c21dd30d1f3f93ee390ac1a7ec304ecdbfdab6390e1add41a1f52727b0992b" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "wasmtime-component-macro" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5c0a77c9e1927c3d471f53cc13767c3d3438e5d5ffd394e3eb31c86445fd60" +checksum = "9f948a6ef3119d52c9f12936970de28ddf3f9bea04bc65571f4a92d2e5ab38f4" dependencies = [ "anyhow", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "wasmtime-component-util", "wasmtime-wit-bindgen", "wit-parser", @@ -22264,15 +22314,15 @@ dependencies = [ [[package]] name = "wasmtime-component-util" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43702ca98bf5162eca0573db691ed9ecd36d716f8c6688410fe26ec16b6f9bcb" +checksum = "b9275aa01ceaaa2fa6c0ecaa5267518d80b9d6e9ae7c7ea42f4c6e073e6a69ef" [[package]] name = "wasmtime-cranelift" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20070aa5b75080a8932ec328419faf841df2bc6ceb16b55b0df2b952098392a2" +checksum = "0701a44a323267aae4499672dae422b266cee3135a23b640972ec8c0e10a44a2" dependencies = [ "anyhow", "cfg-if 1.0.0", @@ -22295,9 +22345,9 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2604ddb24879d4dc1dedcb7081d7a8e017259bce916fdae097a97db52cbaab80" +checksum = "264c968c1b81d340355ece2be0bc31a10f567ccb6ce08512c3b7d10e26f3cbe5" dependencies = [ "anyhow", "cranelift-bitset", @@ -22318,9 +22368,9 @@ dependencies = [ [[package]] name = "wasmtime-fiber" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98593412d2b167ebe2b59d4a17a184978a72f976b53b3a0ec05629451079ac1d" +checksum = "78505221fd5bd7b07b4e1fa2804edea49dc231e626ad6861adc8f531812973e6" dependencies = [ "anyhow", "cc", @@ -22333,9 +22383,9 @@ dependencies = [ [[package]] name = "wasmtime-jit-icache-coherence" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40d7722b9e1fbeae135715710a8a2570b1e6cf72b74dd653962d89831c6c70d" +checksum = "9bedb677ca1b549d98f95e9e1f9251b460090d99a2c196a0614228c064bf2e59" dependencies = [ "anyhow", "cfg-if 1.0.0", @@ -22345,26 +22395,26 @@ dependencies = [ [[package]] name = "wasmtime-slab" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8579c335220b4ece9aa490a0e8b46de78cd342b195ab21ff981d095e14b52383" +checksum = "564905638c132c275d365c1fa074f0b499790568f43148d29de84ccecfb5cb31" [[package]] name = "wasmtime-versioned-export-macros" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7de0a56fb0a69b185968f2d7a9ba54750920a806470dff7ad8de91ac06d277e" +checksum = "1e91092e6cf77390eeccee273846a9327f3e8f91c3c6280f60f37809f0e62d29" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "wasmtime-winch" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abd309943c443f5590d12f9aba9ba63c481091c955a0a14de0c2a9e0e3aaeca9" +checksum = "b111d909dc604c741bd8ac2f4af373eaa5c68c34b5717271bcb687688212cef8" dependencies = [ "anyhow", "cranelift-codegen", @@ -22379,9 +22429,9 @@ dependencies = [ [[package]] name = "wasmtime-wit-bindgen" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "969f83022dac3435d6469edb582ceed04cfe32aa44dc3ef16e5cb55574633df8" +checksum = "5f38f7a5eb2f06f53fe943e7fb8bf4197f7cf279f1bc52c0ce56e9d3ffd750a4" dependencies = [ "anyhow", "heck 0.5.0", @@ -22404,31 +22454,31 @@ dependencies = [ [[package]] name = "wast" -version = "222.0.0" +version = "223.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce7191f4b7da0dd300cc32476abae6457154e4625d9b1bc26890828a9a26f6e" +checksum = "d59b2ba8a2ff9f06194b7be9524f92e45e70149f4dacc0d0c7ad92b59ac875e4" dependencies = [ "bumpalo", "leb128", "memchr", "unicode-width 0.2.0", - "wasm-encoder 0.222.0", + "wasm-encoder 0.223.0", ] [[package]] name = "wat" -version = "1.222.0" +version = "1.223.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fde61b4b52f9a84ae31b5e8902a2cd3162ea45d8bf564c729c3288fe52f4334" +checksum = "662786915c427e4918ff01eabb3c4756d4d947cd8f635761526b4cc9da2eaaad" dependencies = [ - "wast 222.0.0", + "wast 223.0.0", ] [[package]] name = "web-sys" -version = "0.3.76" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", @@ -22549,9 +22599,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "winch-codegen" -version = "28.0.0" +version = "28.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9110decc2983ed94de904804dcd979ba59cbabc78a94fec6b1d8468ec513d0f6" +checksum = "6232f40a795be2ce10fc761ed3b403825126a60d12491ac556ea104a932fd18a" dependencies = [ "anyhow", "cranelift-codegen", @@ -22564,6 +22614,16 @@ dependencies = [ "wasmtime-environ", ] +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" version = "0.52.0" @@ -22573,6 +22633,41 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "windows-registry" version = "0.2.0" @@ -22753,9 +22848,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" dependencies = [ "memchr", ] @@ -22871,9 +22966,9 @@ dependencies = [ [[package]] name = "xattr" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f" +checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909" dependencies = [ "libc", "linux-raw-sys", @@ -22970,7 +23065,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "synstructure", ] @@ -22992,7 +23087,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -23012,7 +23107,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "synstructure", ] @@ -23033,7 +23128,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -23055,7 +23150,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 4a3f645d2a1..e5ce89c1e91 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,7 @@ members = [ "packages/icrc-ledger-client-cdk", "packages/icrc-ledger-types", "packages/ic-ethereum-types", + "packages/ic-metrics-assert", "packages/ic-sha3", "packages/ic-ledger-hash-of", "packages/ic-signature-verification", @@ -172,6 +173,7 @@ members = [ "rs/monitoring/tracing/jaeger_exporter", "rs/monitoring/tracing/logging_layer", "rs/nervous_system/agent", + "rs/nervous_system/candid_utils", "rs/nervous_system/clients", "rs/nervous_system/collections/union_multi_map", "rs/nervous_system/common", @@ -611,7 +613,7 @@ minicbor-derive = "0.13.0" mockall = "0.13.0" mockito = "1.2.0" nftables = "0.4" -nix = "0.24.3" +nix = { version = "0.24.3", features = ["ptrace"] } num_cpus = "1.16.0" num-bigint = "0.4.6" num-traits = { version = "0.2.12", features = ["libm"] } diff --git a/WORKSPACE.bazel b/WORKSPACE.bazel index 10c29ef4de2..68e854535ad 100644 --- a/WORKSPACE.bazel +++ b/WORKSPACE.bazel @@ -28,11 +28,13 @@ canisters( "ck_btc_ledger": "ic-icrc1-ledger.wasm.gz", "ck_btc_ledger_v1": "ic-icrc1-ledger.wasm.gz", "ck_btc_ledger_v2": "ic-icrc1-ledger.wasm.gz", + "ck_btc_ledger_v2_noledgerversion": "ic-icrc1-ledger.wasm.gz", "ck_btc_index": "ic-icrc1-index-ng.wasm.gz", "ck_eth_archive": "ic-icrc1-archive-u256.wasm.gz", "ck_eth_ledger": "ic-icrc1-ledger-u256.wasm.gz", "ck_eth_ledger_v1": "ic-icrc1-ledger-u256.wasm.gz", "ck_eth_ledger_v2": "ic-icrc1-ledger-u256.wasm.gz", + "ck_eth_ledger_v2_noledgerversion": "ic-icrc1-ledger-u256.wasm.gz", "ck_eth_index": "ic-icrc1-index-ng-u256.wasm.gz", "sns_root": "sns-root-canister.wasm.gz", "sns_governance": "sns-governance-canister.wasm.gz", @@ -58,11 +60,13 @@ canisters( "ck_btc_ledger": "mainnet_ckbtc_ic-icrc1-ledger", "ck_btc_ledger_v1": "mainnet_ckbtc_ic-icrc1-ledger-v1", "ck_btc_ledger_v2": "mainnet_ckbtc_ic-icrc1-ledger-v2", + "ck_btc_ledger_v2_noledgerversion": "mainnet_ckbtc_ic-icrc1-ledger-v2-noledgerversion", "ck_btc_index": "mainnet_ckbtc-index-ng", "ck_eth_archive": "mainnet_cketh_ic-icrc1-archive-u256", "ck_eth_ledger": "mainnet_cketh_ic-icrc1-ledger-u256", "ck_eth_ledger_v1": "mainnet_cketh_ic-icrc1-ledger-u256-v1", "ck_eth_ledger_v2": "mainnet_cketh_ic-icrc1-ledger-u256-v2", + "ck_eth_ledger_v2_noledgerversion": "mainnet_cketh_ic-icrc1-ledger-u256-v2-noledgerversion", "ck_eth_index": "mainnet_cketh-index-ng", "sns_root": "mainnet_sns-root-canister", "sns_governance": "mainnet_sns-governance-canister", @@ -96,8 +100,8 @@ sol_register_toolchains( http_archive( name = "rules_rust", - sha256 = "af4f56caae50a99a68bfce39b141b509dd68548c8204b98ab7a1cafc94d5bb02", - urls = ["https://github.com/bazelbuild/rules_rust/releases/download/0.54.1/rules_rust-v0.54.1.tar.gz"], + integrity = "sha256-8TBqrAsli3kN8BrZq8arsN8LZUFsdLTvJ/Sqsph4CmQ=", + urls = ["https://github.com/bazelbuild/rules_rust/releases/download/0.56.0/rules_rust-0.56.0.tar.gz"], ) load("@rules_rust//rust:repositories.bzl", "rules_rust_dependencies", "rust_register_toolchains") @@ -112,12 +116,12 @@ rust_register_toolchains( # The nightly version is required to compile fuzz tests from Bazel. # The version below is chosen so that it is in sync with the non-nightly version. versions = [ - "1.82.0", + "1.84.0", # Use the nightly version from the day before the branch. # - # NB! Due to a regression in the compiler https://github.com/rust-lang/rust/issues/128895, that's # only back-ported to stable, we use a more recent version. - "nightly/2024-09-23", + "nightly/2024-11-21", ], ) diff --git a/bazel/canisters.bzl b/bazel/canisters.bzl index 8985effd90d..2308dc7147e 100644 --- a/bazel/canisters.bzl +++ b/bazel/canisters.bzl @@ -25,6 +25,13 @@ def _wasm_rust_transition_impl(_settings, attr): "-C", "lto", "-C", + # If combined with -C lto, -C embed-bitcode=no will cause rustc to abort at start-up, + # because the combination is invalid. + # See: https://doc.rust-lang.org/rustc/codegen-options/index.html#embed-bitcode + # + # embed-bitcode is disabled by default by rules_rust. + "embed-bitcode=yes", + "-C", "target-feature=+bulk-memory", ], } diff --git a/bazel/cc_rs.patch b/bazel/cc_rs.patch deleted file mode 100644 index 94e65f91142..00000000000 --- a/bazel/cc_rs.patch +++ /dev/null @@ -1,18 +0,0 @@ -diff --git a/src/lib.rs b/src/lib.rs -index 2fe30b9..88cd566 100644 ---- a/src/lib.rs -+++ b/src/lib.rs -@@ -1121,6 +1121,13 @@ impl Build { - .ok_or_else(|| Error::new(ErrorKind::InvalidArgument, "parent() failure"))? - .to_string_lossy(); - let mut hasher = hash_map::DefaultHasher::new(); -+ let out_dir = self.get_out_dir().expect("Could not get out dir"); -+ -+ let prefix = out_dir.parent().expect("Could not get parent"); -+ let prefix: &str = &prefix.to_string_lossy(); -+ -+ let err = format!("could not strip prefix {prefix} from {dirname}"); -+ let dirname = dirname.strip_prefix(prefix).expect(&err); - hasher.write(dirname.to_string().as_bytes()); - dst.join(format!("{:016x}-{}", hasher.finish(), basename)) - .with_extension("o") diff --git a/bazel/external_crates.bzl b/bazel/external_crates.bzl index 9677beb8222..f25afadc154 100644 --- a/bazel/external_crates.bzl +++ b/bazel/external_crates.bzl @@ -27,12 +27,6 @@ def external_crates_repository(name, cargo_lockfile, lockfile, sanitizers_enable "canbench": [crate.annotation( gen_binaries = True, )], - "cc": [crate.annotation( - # Patch for determinism issues - # https://github.com/rust-lang/cc-rs/issues/1271 - patch_args = ["-p1"], - patches = ["@@//bazel:cc_rs.patch"], - )], "libssh2-sys": [crate.annotation( # Patch for determinism issues patch_args = ["-p1"], @@ -349,7 +343,7 @@ def external_crates_repository(name, cargo_lockfile, lockfile, sanitizers_enable version = "^0.1.2", ), "cc": crate.spec( - version = "^1.0", + version = "=1.1.37", ), "cddl": crate.spec( version = "^0.9.4", @@ -825,6 +819,9 @@ def external_crates_repository(name, cargo_lockfile, lockfile, sanitizers_enable ), "nix": crate.spec( version = "^0.24.3", + features = [ + "ptrace", + ], ), "num-bigint": crate.spec( version = "^0.4.6", @@ -1240,6 +1237,9 @@ def external_crates_repository(name, cargo_lockfile, lockfile, sanitizers_enable "full", ], ), + "syscalls": crate.spec( + version = "^0.6.18", + ), "tar": crate.spec( version = "^0.4.38", ), diff --git a/ci/container/README.md b/ci/container/README.md index eea758cd7e7..ebb11b1e490 100644 --- a/ci/container/README.md +++ b/ci/container/README.md @@ -86,7 +86,7 @@ sudo podman run --pids-limit=-1 -it --rm --privileged --network=host --cgroupns= --mount type=bind,source=/home/john/.local/share/fish,target=/home/ubuntu/.local/share/fish \ --mount type=bind,source=/home/john/.zsh_history,target=/home/ubuntu/.zsh_history \ -v /tmp/ssh-XXXXQAO7kF/agent.113731:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent -w /ic \ - docker.io/dfinity/ic-build:221b79c4f4a966eae67a3f9ef7f20f4c5583d5bc38df17c94128804687a84c29 /usr/bin/fish + ghcr.io/dfinity/ic-build:221b79c4f4a966eae67a3f9ef7f20f4c5583d5bc38df17c94128804687a84c29 /usr/bin/fish ``` ### How to use custom config diff --git a/ci/container/TAG b/ci/container/TAG index 1b88ba01cf3..510327d7002 100644 --- a/ci/container/TAG +++ b/ci/container/TAG @@ -1 +1 @@ -7beeb6e69346fd10ff4421688aec841d072ac4f8c4c80d3b089d64aa280c0894 +b3869ba20c4e9ba2915be302f5b5a4b23da0545af2f1cb896441055372950389 diff --git a/ci/container/build-image.sh b/ci/container/build-image.sh index 401fc422196..2e4b95803d8 100755 --- a/ci/container/build-image.sh +++ b/ci/container/build-image.sh @@ -38,9 +38,8 @@ fi DOCKER_BUILDKIT=1 docker "${ARGS[@]}" build "${BUILD_ARGS[@]}" \ -t ic-build:"$DOCKER_IMG_TAG" \ - -t docker.io/dfinity/ic-build:"$DOCKER_IMG_TAG" \ - -t docker.io/dfinity/ic-build:latest \ -t ghcr.io/dfinity/ic-build:"$DOCKER_IMG_TAG" \ + -t ghcr.io/dfinity/ic-build:latest \ --build-arg RUST_VERSION="$RUST_VERSION" \ -f ci/container/Dockerfile . diff --git a/ci/scripts/docker-login.sh b/ci/scripts/docker-login.sh deleted file mode 100755 index 5834e932887..00000000000 --- a/ci/scripts/docker-login.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -set -eEuo pipefail - -# login to docker hub to avoid rate limit disruptions -if which docker 2>/dev/null; then - docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD_RO" -fi -# docker-bin used by container_pull in WORKSPACES.bazel -if which docker-bin 2>/dev/null; then - # save auth to user's .docker/config.json - docker-bin login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD_RO" - # save auth to root's .docker/config.json - sudo docker-bin login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD_RO" -fi diff --git a/ci/tools/repro-check.sh b/ci/tools/repro-check.sh index 5b9e78bcb28..06c78eb19cf 100755 --- a/ci/tools/repro-check.sh +++ b/ci/tools/repro-check.sh @@ -107,10 +107,11 @@ check_ic_repo() { git_remote="$(git config --get remote.origin.url)" log_debug "Check the repository is an IC repository" - # Possible values of `git_remote` are listed below + # Some of the possible values of `git_remote` that should be matched: # git@github.com:dfinity/ic.git, https://github.com/dfinity/ic.git # git@github.com:dfinity/ic-private.git, https://github.com/dfinity/ic-private.git - if [[ "$git_remote" == *dfinity/ic* ]]; then + # git@github.com:/ic.git, https://github.com//ic.git + if [[ "$git_remote" == */ic* ]]; then log_debug "Inside IC repository" else error "When not specifying any option please run this script inside an IC git repository" diff --git a/ic-os/boundary-guestos/context/Dockerfile b/ic-os/boundary-guestos/context/Dockerfile index 0177f381a6d..af748384095 100644 --- a/ic-os/boundary-guestos/context/Dockerfile +++ b/ic-os/boundary-guestos/context/Dockerfile @@ -2,7 +2,7 @@ # # Build step for example: # - `docker build --pull -t dfinity/boundaryos-main --build-arg BUILD_TYPE=dev \ ` -# `--build-arg BASE_IMAGE=docker.io/dfinity/boundaryos-base@sha256:dc1a2892b0241131dd97ddd4dce560ab274d00a90110a4b5fc4cb2245ff1f0db -f Dockerfile .` +# `--build-arg BASE_IMAGE=ghcr.io/dfinity/boundaryos-base@sha256:dc1a2892b0241131dd97ddd4dce560ab274d00a90110a4b5fc4cb2245ff1f0db -f Dockerfile .` # # # The base images are defined in docker-base.prod and docker-base.dev. Update @@ -25,8 +25,8 @@ WORKDIR /tmp # Download and verify ic-gateway RUN \ - curl -L -O https://github.com/dfinity/ic-gateway/releases/download/v0.1.63/ic-gateway_0.1.63_amd64.deb && \ - echo "f33d2bd14b7dcc9964a62e522fdf3fa6ab9360bd7bd9a87e83a8a515dd36e18b ic-gateway_0.1.63_amd64.deb" | sha256sum -c + curl -L -O https://github.com/dfinity/ic-gateway/releases/download/v0.1.64/ic-gateway_0.1.64_amd64.deb && \ + echo "386ba2466454181fa4c3e8459945bcb86ee4e741ea69fb1ae11eb7a452366331 ic-gateway_0.1.64_amd64.deb" | sha256sum -c # # Second build stage: @@ -56,9 +56,9 @@ FROM image-${BUILD_TYPE} USER root:root -COPY --from=download /tmp/ic-gateway_0.1.63_amd64.deb /tmp/ic-gateway_0.1.63_amd64.deb -RUN dpkg -i --force-confold /tmp/ic-gateway_0.1.63_amd64.deb && \ - rm /tmp/ic-gateway_0.1.63_amd64.deb +COPY --from=download /tmp/ic-gateway_0.1.64_amd64.deb /tmp/ic-gateway_0.1.64_amd64.deb +RUN dpkg -i --force-confold /tmp/ic-gateway_0.1.64_amd64.deb && \ + rm /tmp/ic-gateway_0.1.64_amd64.deb RUN mkdir -p /boot/config \ /boot/efi \ diff --git a/ic-os/boundary-guestos/context/docker-base.prod b/ic-os/boundary-guestos/context/docker-base.prod index b3c3df3acb7..22268b17839 100644 --- a/ic-os/boundary-guestos/context/docker-base.prod +++ b/ic-os/boundary-guestos/context/docker-base.prod @@ -1 +1 @@ -ghcr.io/dfinity/boundaryos-base@sha256:981ae8274be35515e46c78b502f53dae2477396c4beae2ff328f608d719b1bd6 +ghcr.io/dfinity/boundaryos-base@sha256:e3b2107bb66b692f841254e87730214bb0a4081a6dae00a122947c82b9f45af8 diff --git a/ic-os/components/setupos-scripts/check-ntp.sh b/ic-os/components/setupos-scripts/check-ntp.sh new file mode 100644 index 00000000000..fa74d8632d7 --- /dev/null +++ b/ic-os/components/setupos-scripts/check-ntp.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +set -o nounset +set -o pipefail + +SHELL="/bin/bash" +PATH="/sbin:/bin:/usr/sbin:/usr/bin" + +source /opt/ic/bin/functions.sh + +function check_ntp() { + echo "* Checking Chrony status..." + + systemctl is-active --quiet chrony + log_and_halt_installation_on_error "$?" "Chrony service not running or not active." + + retries=0 + max_retries=30 + while [ "$(timedatectl show -p NTPSynchronized --value)" != "yes" ]; do + if [ $retries -ge $max_retries ]; then + local service_logs=$(journalctl -u chrony.service --no-pager) + local log_message="System clock is not synchronized.\n\nChrony service logs:\n${service_logs}" + log_and_halt_installation_on_error 1 "${log_message}" + fi + + echo "* Chrony not yet synchronized. Waiting 2 seconds before retry..." + sleep 2 + ((retries++)) + done + + echo "* Chrony is running and time is in sync." +} + +function set_hwclock_utc() { + echo "* Setting hardware clock to UTC..." + timedatectl set-local-rtc 0 +} + +main() { + log_start "$(basename $0)" + check_ntp + set_hwclock_utc + log_end "$(basename $0)" +} + +main diff --git a/ic-os/components/setupos-scripts/setupos.sh b/ic-os/components/setupos-scripts/setupos.sh index 622dc3973fd..bc4065653ff 100755 --- a/ic-os/components/setupos-scripts/setupos.sh +++ b/ic-os/components/setupos-scripts/setupos.sh @@ -42,6 +42,7 @@ main() { /opt/ic/bin/check-config.sh /opt/ic/bin/check-hardware.sh /opt/ic/bin/check-network.sh + /opt/ic/bin/check-ntp.sh if kernel_cmdline_bool_default_true ic.setupos.perform_installation; then true else diff --git a/ic-os/components/setupos.bzl b/ic-os/components/setupos.bzl index b60e576cd8c..1b5606b6a53 100644 --- a/ic-os/components/setupos.bzl +++ b/ic-os/components/setupos.bzl @@ -16,6 +16,7 @@ component_files = { Label("//ic-os/components/setupos-scripts:check-hardware.sh"): "/opt/ic/bin/check-hardware.sh", Label("//ic-os/components/setupos-scripts:install-hostos.sh"): "/opt/ic/bin/install-hostos.sh", Label("//ic-os/components/setupos-scripts:check-network.sh"): "/opt/ic/bin/check-network.sh", + Label("//ic-os/components/setupos-scripts:check-ntp.sh"): "/opt/ic/bin/check-ntp.sh", Label("//ic-os/components/setupos-scripts:output-wrapper.sh"): "/opt/ic/bin/output-wrapper.sh", Label("//ic-os/components/setupos-scripts:setupos.sh"): "/opt/ic/bin/setupos.sh", Label("//ic-os/components/setupos-scripts:config.service"): "/etc/systemd/system/config.service", diff --git a/ic-os/guestos/context/docker-base.dev b/ic-os/guestos/context/docker-base.dev index 5356587a053..6ceaed7818a 100644 --- a/ic-os/guestos/context/docker-base.dev +++ b/ic-os/guestos/context/docker-base.dev @@ -1 +1 @@ -ghcr.io/dfinity/guestos-base-dev@sha256:736c472f8c8f842cb3abd2c86e1ded12e697b1a12cb196b266c80a783b868614 +ghcr.io/dfinity/guestos-base-dev@sha256:0e1875141caa0fd2bc1337f23a64d9baf2edba16fdae16f914ad8dc86d993a3a diff --git a/ic-os/guestos/context/docker-base.prod b/ic-os/guestos/context/docker-base.prod index e9fffd10991..689d7cb52a0 100644 --- a/ic-os/guestos/context/docker-base.prod +++ b/ic-os/guestos/context/docker-base.prod @@ -1 +1 @@ -ghcr.io/dfinity/guestos-base@sha256:8e811bcd6281315b5f5c746cf234511690824c6b2fc7e8a8884a2205666aef2c +ghcr.io/dfinity/guestos-base@sha256:e7f9a61a9359ff2757253f53145d2423fc5173ee9203a235bfa0c6157fb4bead diff --git a/ic-os/hostos/context/docker-base.dev b/ic-os/hostos/context/docker-base.dev index 6baf6043a9b..3d62df45489 100644 --- a/ic-os/hostos/context/docker-base.dev +++ b/ic-os/hostos/context/docker-base.dev @@ -1 +1 @@ -ghcr.io/dfinity/hostos-base-dev@sha256:59e414bd566abcb00bf669ffd81caf9b31b5b0c44bd200e921486ea6a85e4f2d +ghcr.io/dfinity/hostos-base-dev@sha256:353bea0b3bd70a93e93af4c1d9be6a625e92148f888d7bb355bfc9b125d3d2c0 diff --git a/ic-os/hostos/context/docker-base.prod b/ic-os/hostos/context/docker-base.prod index 4ac9ca1fba5..63520b37301 100644 --- a/ic-os/hostos/context/docker-base.prod +++ b/ic-os/hostos/context/docker-base.prod @@ -1 +1 @@ -ghcr.io/dfinity/hostos-base@sha256:57d417cbd61de9e323434ef9100c724002106119aea4eb31bdc45b47f295986c +ghcr.io/dfinity/hostos-base@sha256:fc4297ac6792091b2e483b7187827e35c52f646d67ca8f0b0e776a3ca89c32d5 diff --git a/ic-os/setupos/context/docker-base.dev b/ic-os/setupos/context/docker-base.dev index 8a36bdaaf24..89f1308adda 100644 --- a/ic-os/setupos/context/docker-base.dev +++ b/ic-os/setupos/context/docker-base.dev @@ -1 +1 @@ -ghcr.io/dfinity/setupos-base-dev@sha256:2d4f7360ca0f9972fb93456ecdfe6ea9dec87f9acd4c36baa05f232515b1a0cb +ghcr.io/dfinity/setupos-base-dev@sha256:1df926805803b2dc37df20dba927ecd8206583b934295de483407067df829047 diff --git a/ic-os/setupos/context/docker-base.prod b/ic-os/setupos/context/docker-base.prod index 664f2e9b3e8..cb738d39f98 100644 --- a/ic-os/setupos/context/docker-base.prod +++ b/ic-os/setupos/context/docker-base.prod @@ -1 +1 @@ -ghcr.io/dfinity/setupos-base@sha256:78df2aa83c2477c3a66441297b5740b3c38c907e4815f479b618e43b7787f358 +ghcr.io/dfinity/setupos-base@sha256:33f77f8b963360c12d47dbf6ed77bcb86c4b6e8e055935f361089e28d51d145e diff --git a/ic-os/setupos/defs.bzl b/ic-os/setupos/defs.bzl index d0c6ce774c2..294c14cb3b6 100644 --- a/ic-os/setupos/defs.bzl +++ b/ic-os/setupos/defs.bzl @@ -81,11 +81,11 @@ def _custom_partitions(mode): if mode == "dev": guest_image = Label("//ic-os/guestos/envs/dev:disk-img.tar.zst") host_image = Label("//ic-os/hostos/envs/dev:disk-img.tar.zst") - nns_url = "https://wiki.internetcomputer.org" + nns_url = "https://cloudflare.com/cdn-cgi/trace" elif mode == "local-base-dev": guest_image = Label("//ic-os/guestos/envs/local-base-dev:disk-img.tar.zst") host_image = Label("//ic-os/hostos/envs/local-base-dev:disk-img.tar.zst") - nns_url = "https://wiki.internetcomputer.org" + nns_url = "https://cloudflare.com/cdn-cgi/trace" elif mode == "local-base-prod": guest_image = Label("//ic-os/guestos/envs/local-base-prod:disk-img.tar.zst") host_image = Label("//ic-os/hostos/envs/local-base-prod:disk-img.tar.zst") diff --git a/mainnet-canister-revisions.json b/mainnet-canister-revisions.json index f9d79c4935f..15a6cacb2cc 100644 --- a/mainnet-canister-revisions.json +++ b/mainnet-canister-revisions.json @@ -23,6 +23,10 @@ "rev": "e54d3fa34ded227c885d04e64505fa4b5d564743", "sha256": "3d808fa63a3d8ebd4510c0400aa078e99a31afaa0515f0b68778f929ce4b2a46" }, + "ck_btc_ledger_v2_noledgerversion": { + "rev": "aba60ffbc46acfc8990bf4d5685c1360bd7026b9", + "sha256": "67cfcbabb79e683b6fc855450d9972c9efaa7a1cd28c6387965616fbead191ea" + }, "ck_eth_archive": { "rev": "2190613d3b5bcd9b74c382b22d151580b8ac271a", "sha256": "2d25f7831894100d48aa9043c65e87c293487523f0958c15760027d004fbbda9" @@ -43,6 +47,10 @@ "rev": "e54d3fa34ded227c885d04e64505fa4b5d564743", "sha256": "98a7b7391608dc4a554d6964bad24157b6aaf890a05bbaad3fcc92033d9c7b02" }, + "ck_eth_ledger_v2_noledgerversion": { + "rev": "aba60ffbc46acfc8990bf4d5685c1360bd7026b9", + "sha256": "73d0c5f057aaf33004218ce588780e1b454c717c702b1cf47532f32c23515f1e" + }, "cycles-minting": { "rev": "b5192581ccd35b67fe5a1f795ead9cbcd25956d6", "sha256": "11c8dedd11741f05990498c90f925e9e37ad60647a65ef47caa59cdba234be6f" @@ -68,8 +76,8 @@ "sha256": "8c8eb285de53ca5609abd7dc41ba3ec8eeb67708b81469311fd670e6738d7d0a" }, "registry": { - "rev": "b5192581ccd35b67fe5a1f795ead9cbcd25956d6", - "sha256": "771041412d2af4eb681262ca525bce1a87c199b631e17b55e1d7f9abb2cde3e6" + "rev": "a5878586e47536d4cd47f0aadb66b73df8131d2b", + "sha256": "f0fb8fa545b2cc68f030b040e1182a8d004c4d4f4bb4341c9f1b432642c85bef" }, "root": { "rev": "b5192581ccd35b67fe5a1f795ead9cbcd25956d6", @@ -84,8 +92,8 @@ "sha256": "f94cf1db965b7042197e5894fef54f5f413bb2ebc607ff0fb59c9d4dfd3babea" }, "sns_governance": { - "rev": "df7d443e6219c462b305152b63ca265171feb6ee", - "sha256": "bd936ef6bb878df87856a0b0c46034a242a88b7f1eeff5439daf6278febca6b7" + "rev": "a5878586e47536d4cd47f0aadb66b73df8131d2b", + "sha256": "5e0547725b033e689f6c7381ca9555fbf09e5ed884178c8286b72e6b2c09e863" }, "sns_index": { "rev": "2190613d3b5bcd9b74c382b22d151580b8ac271a", @@ -100,11 +108,11 @@ "sha256": "3d808fa63a3d8ebd4510c0400aa078e99a31afaa0515f0b68778f929ce4b2a46" }, "sns_root": { - "rev": "aa91ecacdf3824e193e21b70e0127e8d3edab51a", - "sha256": "431cb333feb3f762f742b0dea58745633a2a2ca41075e9933183d850b4ddb259" + "rev": "a5878586e47536d4cd47f0aadb66b73df8131d2b", + "sha256": "dc243135057d13c48f71d2f0a4b8f5fc43ed525d579d97dde23e052dca15bf96" }, "swap": { - "rev": "aa91ecacdf3824e193e21b70e0127e8d3edab51a", - "sha256": "8313ac22d2ef0a0c1290a85b47f235cfa24ca2c96d095b8dbed5502483b9cd18" + "rev": "a5878586e47536d4cd47f0aadb66b73df8131d2b", + "sha256": "45408ed654561dfb17c84b86948dda9498aa0ba8ee669ae774e5faca830c4c24" } } \ No newline at end of file diff --git a/mainnet-subnet-revisions.json b/mainnet-subnet-revisions.json index 2c5e960f9d8..40844a4c1c4 100644 --- a/mainnet-subnet-revisions.json +++ b/mainnet-subnet-revisions.json @@ -1,6 +1,6 @@ { "subnets": { - "tdb26-jop6k-aogll-7ltgs-eruif-6kk7m-qpktf-gdiqx-mxtrf-vb5e6-eqe": "43670245ed6919790e7858813c7e838c6fbcedf5", - "io67a-2jmkw-zup3h-snbwi-g6a5n-rm5dn-b6png-lvdpl-nqnto-yih6l-gqe": "aa705aaa621c2e0d4f146f3a1de801edcb0fa0d5" + "tdb26-jop6k-aogll-7ltgs-eruif-6kk7m-qpktf-gdiqx-mxtrf-vb5e6-eqe": "aa705aaa621c2e0d4f146f3a1de801edcb0fa0d5", + "io67a-2jmkw-zup3h-snbwi-g6a5n-rm5dn-b6png-lvdpl-nqnto-yih6l-gqe": "233c1ee2ef68c1c8800b8151b2b9f38e17b8440a" } } \ No newline at end of file diff --git a/packages/ic-ethereum-types/src/serde_data/mod.rs b/packages/ic-ethereum-types/src/serde_data/mod.rs index 59902e43923..50b13ef7977 100644 --- a/packages/ic-ethereum-types/src/serde_data/mod.rs +++ b/packages/ic-ethereum-types/src/serde_data/mod.rs @@ -30,7 +30,7 @@ where { struct HexStrVisitor(PhantomData); - impl<'de, T> Visitor<'de> for HexStrVisitor + impl Visitor<'_> for HexStrVisitor where T: FromHex, ::Error: fmt::Display, diff --git a/packages/ic-ledger-hash-of/src/lib.rs b/packages/ic-ledger-hash-of/src/lib.rs index 584a9830fad..03716b1577d 100644 --- a/packages/ic-ledger-hash-of/src/lib.rs +++ b/packages/ic-ledger-hash-of/src/lib.rs @@ -93,7 +93,7 @@ impl<'de, T> Deserialize<'de> for HashOf { phantom: PhantomData, } - impl<'de, T> Visitor<'de> for HashOfVisitor { + impl Visitor<'_> for HashOfVisitor { type Value = HashOf; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/packages/ic-metrics-assert/BUILD.bazel b/packages/ic-metrics-assert/BUILD.bazel new file mode 100644 index 00000000000..5ee574d5a81 --- /dev/null +++ b/packages/ic-metrics-assert/BUILD.bazel @@ -0,0 +1,41 @@ +load("@rules_rust//rust:defs.bzl", "rust_doc", "rust_doc_test", "rust_library") + +package(default_visibility = ["//visibility:public"]) + +[ + rust_library( + name = "ic-metrics-assert" + name_suffix, + srcs = glob(["src/**/*.rs"]), + crate_features = features, + crate_name = "ic_metrics_assert", + deps = [ + # Keep sorted. + "@crate_index//:candid", + "@crate_index//:regex", + "@crate_index//:serde", + "@crate_index//:serde_bytes", + ] + extra_deps, + ) + for (name_suffix, features, extra_deps) in [ + [ + "", + [], + [], + ], + [ + "_pocket_ic", + ["pocket_ic"], + ["//packages/pocket-ic"], + ], + ] +] + +rust_doc( + name = "doc", + crate = ":ic-metrics-assert", +) + +rust_doc_test( + name = "doc_test", + crate = ":ic-metrics-assert_pocket_ic", +) diff --git a/packages/ic-metrics-assert/CHANGELOG.md b/packages/ic-metrics-assert/CHANGELOG.md new file mode 100644 index 00000000000..11bddf32c5b --- /dev/null +++ b/packages/ic-metrics-assert/CHANGELOG.md @@ -0,0 +1,8 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] diff --git a/packages/ic-metrics-assert/Cargo.toml b/packages/ic-metrics-assert/Cargo.toml new file mode 100644 index 00000000000..bcf398c76d1 --- /dev/null +++ b/packages/ic-metrics-assert/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "ic-metrics-assert" +version = "0.1.0" +description = "Fluent assertions for metrics" +license = "Apache-2.0" +readme = "README.md" +include = ["src", "Cargo.toml", "CHANGELOG.md", "LICENSE", "README.md"] +repository = "https://github.com/dfinity/ic" +authors.workspace = true +edition.workspace = true +documentation.workspace = true + +[dependencies] +candid = { workspace = true } +pocket-ic = { path = "../../packages/pocket-ic", optional = true } +regex = "1.11.0" +serde = { workspace = true } +serde_bytes = { workspace = true } + +[features] +pocket_ic = ["dep:pocket-ic"] diff --git a/packages/ic-metrics-assert/LICENSE b/packages/ic-metrics-assert/LICENSE new file mode 120000 index 00000000000..c87d0654aa6 --- /dev/null +++ b/packages/ic-metrics-assert/LICENSE @@ -0,0 +1 @@ +../../licenses/Apache-2.0.txt \ No newline at end of file diff --git a/packages/ic-metrics-assert/README.md b/packages/ic-metrics-assert/README.md new file mode 100644 index 00000000000..c8aa2c45cc8 --- /dev/null +++ b/packages/ic-metrics-assert/README.md @@ -0,0 +1,3 @@ +# IC Metrics Assert + +This package defines test utilities to perform assertions on canister metrics collected by Prometheus. diff --git a/packages/ic-metrics-assert/src/lib.rs b/packages/ic-metrics-assert/src/lib.rs new file mode 100644 index 00000000000..173fb85fbfb --- /dev/null +++ b/packages/ic-metrics-assert/src/lib.rs @@ -0,0 +1,180 @@ +//! Fluent assertions for metrics. + +#![forbid(missing_docs)] + +use candid::{CandidType, Decode, Deserialize, Encode}; +use regex::Regex; +use std::fmt::Debug; + +/// Provides fluent test assertions for metrics. +/// +/// # Examples +/// +/// ```rust +/// use ic_metrics_assert::{MetricsAssert, PocketIcHttpQuery}; +/// use pocket_ic::{management_canister::CanisterId, PocketIc}; +/// +/// struct Setup { +/// env: PocketIc, +/// canister_id : CanisterId, +/// } +/// +/// impl Setup { +/// pub fn check_metrics(self) -> MetricsAssert { +/// MetricsAssert::from_http_query(self) +/// } +/// } +/// +/// impl PocketIcHttpQuery for Setup { +/// fn get_pocket_ic(&self) -> &PocketIc { +/// &self.env +/// } +/// +/// fn get_canister_id(&self) -> CanisterId { +/// self.canister_id +/// } +/// } +/// +/// fn assert_metrics () { +/// use pocket_ic::PocketIcBuilder; +/// use candid::Principal; +/// +/// let env = PocketIcBuilder::new().build(); +/// let canister_id = Principal::from_text("7hfb6-caaaa-aaaar-qadga-cai").unwrap(); +/// let setup = Setup {env, canister_id}; +/// +/// setup +/// .check_metrics() +/// .assert_contains_metric_matching("started action \\d+") +/// .assert_contains_metric_matching("completed action 1") +/// .assert_does_not_contain_metric_matching(".*trap.*"); +/// } +pub struct MetricsAssert { + actual: T, + metrics: Vec, +} + +impl MetricsAssert { + /// Initializes an instance of [MetricsAssert] by querying the metrics from the `/metrics` + /// endpoint of a canister via the [CanisterHttpQuery::http_query] method. + pub fn from_http_query(actual: T) -> Self + where + T: CanisterHttpQuery, + E: Debug, + { + let request = http::HttpRequest { + method: "GET".to_string(), + url: "/metrics".to_string(), + headers: Default::default(), + body: Default::default(), + }; + let response = Decode!( + &actual + .http_query(Encode!(&request).expect("failed to encode HTTP request")) + .expect("failed to retrieve metrics"), + http::HttpResponse + ) + .unwrap(); + assert_eq!(response.status_code, 200_u16); + let metrics = String::from_utf8_lossy(response.body.as_slice()) + .trim() + .split('\n') + .map(|line| line.to_string()) + .collect::>(); + Self { metrics, actual } + } + + /// Returns the internal instance being tested. + pub fn into(self) -> T { + self.actual + } + + /// Asserts that the metrics contain at least one entry matching the given Regex pattern. + pub fn assert_contains_metric_matching(self, pattern: &str) -> Self { + assert!( + !self.find_metrics_matching(pattern).is_empty(), + "Expected to find metric matching '{}', but none matched in:\n{:?}", + pattern, + self.metrics + ); + self + } + + /// Asserts that the metrics do not contain any entries matching the given Regex pattern. + pub fn assert_does_not_contain_metric_matching(self, pattern: &str) -> Self { + let matches = self.find_metrics_matching(pattern); + assert!( + matches.is_empty(), + "Expected not to find any metric matching '{}', but found the following matches:\n{:?}", + pattern, + matches + ); + self + } + + fn find_metrics_matching(&self, pattern: &str) -> Vec { + let regex = Regex::new(pattern).unwrap_or_else(|_| panic!("Invalid regex: {}", pattern)); + self.metrics + .iter() + .filter(|line| regex.is_match(line)) + .cloned() + .collect() + } +} + +/// Trait providing the ability to perform an HTTP request to a canister. +pub trait CanisterHttpQuery { + /// Sends a serialized HTTP request to a canister and returns the serialized HTTP response. + fn http_query(&self, request: Vec) -> Result, E>; +} + +#[cfg(feature = "pocket_ic")] +pub use pocket_ic_query_call::PocketIcHttpQuery; + +#[cfg(feature = "pocket_ic")] +mod pocket_ic_query_call { + use super::*; + use candid::Principal; + use pocket_ic::{management_canister::CanisterId, PocketIc, RejectResponse}; + + /// Provides an implementation of the [CanisterHttpQuery] trait in the case where the canister + /// HTTP requests are made through an instance of [PocketIc]. + pub trait PocketIcHttpQuery { + /// Returns a reference to the instance of [PocketIc] through which the HTTP requests are made. + fn get_pocket_ic(&self) -> &PocketIc; + + /// Returns the ID of the canister to which HTTP requests will be made. + fn get_canister_id(&self) -> CanisterId; + } + + impl CanisterHttpQuery for T { + fn http_query(&self, request: Vec) -> Result, RejectResponse> { + self.get_pocket_ic().query_call( + self.get_canister_id(), + Principal::anonymous(), + "http_request", + request, + ) + } + } +} + +mod http { + use super::*; + use serde_bytes::ByteBuf; + + #[derive(Clone, Debug, CandidType, Deserialize)] + pub struct HttpRequest { + pub method: String, + pub url: String, + pub headers: Vec<(String, String)>, + pub body: ByteBuf, + } + + #[derive(Clone, Debug, CandidType, Deserialize)] + pub struct HttpResponse { + pub status_code: u16, + pub headers: Vec<(String, String)>, + pub body: ByteBuf, + } +} diff --git a/packages/ic-sha3/BUILD.bazel b/packages/ic-sha3/BUILD.bazel index 65f411d555a..641d0acc48d 100644 --- a/packages/ic-sha3/BUILD.bazel +++ b/packages/ic-sha3/BUILD.bazel @@ -28,7 +28,7 @@ rust_test_suite( srcs = glob( ["tests/*.rs"], ), - data = [ + compile_data = [ "test_resources/SHAKE256ShortMsg_subset.rsp", "test_resources/SHAKE256VariableOut_subset.rsp", ], diff --git a/packages/icrc-cbor/CHANGELOG.md b/packages/icrc-cbor/CHANGELOG.md index 0d523739a83..35d42ec0e9a 100644 --- a/packages/icrc-cbor/CHANGELOG.md +++ b/packages/icrc-cbor/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## 0.1.0 + ### Added - Initial version of the library diff --git a/packages/icrc-ledger-types/Cargo.toml b/packages/icrc-ledger-types/Cargo.toml index 0ffee34cbb6..3164c145c7f 100644 --- a/packages/icrc-ledger-types/Cargo.toml +++ b/packages/icrc-ledger-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "icrc-ledger-types" -version = "0.1.7" +version = "0.1.8" description = "Types for interacting with DFINITY's implementation of the ICRC-1 fungible token standard." license = "Apache-2.0" readme = "README.md" @@ -16,7 +16,7 @@ candid = { workspace = true } crc32fast = "1.2.0" hex = { workspace = true } ic-stable-structures = { workspace = true } -icrc-cbor = { path = "../icrc-cbor" } +icrc-cbor = { path = "../icrc-cbor", version = "0.1.0" } itertools = { workspace = true } minicbor = { workspace = true } num-bigint = { workspace = true } diff --git a/packages/pocket-ic/CHANGELOG.md b/packages/pocket-ic/CHANGELOG.md index 08ab5d8349e..1818992860f 100644 --- a/packages/pocket-ic/CHANGELOG.md +++ b/packages/pocket-ic/CHANGELOG.md @@ -11,6 +11,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - The function `PocketIcBuilder::with_bitcoind_addrs` to specify multiple addresses and ports at which `bitcoind` processes are listening. - The function `PocketIc::query_call_with_effective_principal` for making generic query calls (including management canister query calls). - The function `PocketIc::ingress_status` to fetch the status of an update call submitted through an ingress message. +- The function `PocketIc::ingress_status_as` to fetch the status of an update call submitted through an ingress message. + If the status of the update call is known, but the update call was submitted by a different caller, then an error is returned. - The function `PocketIc::await_call_no_ticks` to await the status of an update call (submitted through an ingress message) becoming known without triggering round execution (round execution must be triggered separarely, e.g., on a "live" instance or by separate PocketIC library calls). diff --git a/packages/pocket-ic/HOWTO.md b/packages/pocket-ic/HOWTO.md index 8c9a10a8ea0..e3e6b8dc6ff 100644 --- a/packages/pocket-ic/HOWTO.md +++ b/packages/pocket-ic/HOWTO.md @@ -585,9 +585,20 @@ To mine blocks with rewards credited to a given `bitcoin_address: String`, you c .unwrap(); let mut n = 101; // must be more than 100 (Coinbase maturity rule) - btc_rpc - .generate_to_address(n, &Address::from_str(&bitcoin_address).unwrap()) - .unwrap(); + // retry generating blocks until the bitcoind is up and running + let start = std::time::Instant::now(); + loop { + match btc_rpc.generate_to_address(n, &Address::from_str(&bitcoin_address).unwrap()) { + Ok(_) => break, + Err(bitcoincore_rpc::Error::JsonRpc(err)) => { + if start.elapsed() > std::time::Duration::from_secs(30) { + panic!("Timed out when waiting for bitcoind; last error: {}", err); + } + std::thread::sleep(std::time::Duration::from_millis(100)); + } + Err(err) => panic!("Unexpected error when talking to bitcoind: {}", err), + } + } ``` For an example of a test canister that can be deployed to an application subnet of the PocketIC instance, diff --git a/packages/pocket-ic/src/lib.rs b/packages/pocket-ic/src/lib.rs index cf74dbed171..947e057e3aa 100644 --- a/packages/pocket-ic/src/lib.rs +++ b/packages/pocket-ic/src/lib.rs @@ -695,19 +695,31 @@ impl PocketIc { /// Fetch the status of an update call submitted previously by `submit_call` or `submit_call_with_effective_principal`. /// Note that the status of the update call can only change if the PocketIC instance is in live mode - /// or a round has been executed due to a separate PocketIC library call. + /// or a round has been executed due to a separate PocketIC library call, e.g., `PocketIc::tick()`. pub fn ingress_status( &self, message_id: RawMessageId, - caller: Option, + ) -> Option, RejectResponse>> { + let runtime = self.runtime.clone(); + runtime.block_on(async { self.pocket_ic.ingress_status(message_id).await }) + } + + /// Fetch the status of an update call submitted previously by `submit_call` or `submit_call_with_effective_principal`. + /// Note that the status of the update call can only change if the PocketIC instance is in live mode + /// or a round has been executed due to a separate PocketIC library call, e.g., `PocketIc::tick()`. + /// If the status of the update call is known, but the update call was submitted by a different caller, then an error is returned. + pub fn ingress_status_as( + &self, + message_id: RawMessageId, + caller: Principal, ) -> IngressStatusResult { let runtime = self.runtime.clone(); - runtime.block_on(async { self.pocket_ic.ingress_status(message_id, caller).await }) + runtime.block_on(async { self.pocket_ic.ingress_status_as(message_id, caller).await }) } /// Await an update call submitted previously by `submit_call` or `submit_call_with_effective_principal`. - /// This function does not execute rounds and thus should only be called on a "live" PocketIC instance - /// or if rounds are executed due to separate PocketIC library calls. + /// Note that the status of the update call can only change if the PocketIC instance is in live mode + /// or a round has been executed due to a separate PocketIC library call, e.g., `PocketIc::tick()`. pub fn await_call_no_ticks(&self, message_id: RawMessageId) -> Result, RejectResponse> { let runtime = self.runtime.clone(); runtime.block_on(async { self.pocket_ic.await_call_no_ticks(message_id).await }) @@ -1673,6 +1685,9 @@ To download the binary, please visit https://github.com/dfinity/pocketic." cmd.stderr(std::process::Stdio::null()); } } + + // TODO: SDK-1936 + #[allow(clippy::zombie_processes)] cmd.spawn() .unwrap_or_else(|_| panic!("Failed to start PocketIC binary ({:?})", bin_path)); diff --git a/packages/pocket-ic/src/nonblocking.rs b/packages/pocket-ic/src/nonblocking.rs index 375b9034d9d..cd39af24cae 100644 --- a/packages/pocket-ic/src/nonblocking.rs +++ b/packages/pocket-ic/src/nonblocking.rs @@ -586,10 +586,38 @@ impl PocketIc { /// Fetch the status of an update call submitted previously by `submit_call` or `submit_call_with_effective_principal`. /// Note that the status of the update call can only change if the PocketIC instance is in live mode - /// or a round has been executed due to a separate PocketIC library call. + /// or a round has been executed due to a separate PocketIC library call, e.g., `PocketIc::tick()`. pub async fn ingress_status( &self, raw_message_id: RawMessageId, + ) -> Option, RejectResponse>> { + let status = self.ingress_status_as_caller(raw_message_id, None).await; + match status { + IngressStatusResult::NotAvailable => None, + IngressStatusResult::Success(status) => Some(status), + IngressStatusResult::Forbidden(err) => panic!( + "Retrieving ingress status was forbidden: {}. This is a bug!", + err + ), + } + } + + /// Fetch the status of an update call submitted previously by `submit_call` or `submit_call_with_effective_principal`. + /// Note that the status of the update call can only change if the PocketIC instance is in live mode + /// or a round has been executed due to a separate PocketIC library call, e.g., `PocketIc::tick()`. + /// If the status of the update call is known, but the update call was submitted by a different caller, then an error is returned. + pub async fn ingress_status_as( + &self, + raw_message_id: RawMessageId, + caller: Principal, + ) -> IngressStatusResult { + self.ingress_status_as_caller(raw_message_id, Some(caller)) + .await + } + + async fn ingress_status_as_caller( + &self, + raw_message_id: RawMessageId, caller: Option, ) -> IngressStatusResult { let endpoint = "read/ingress_status"; @@ -603,15 +631,15 @@ impl PocketIc { Ok(None) => IngressStatusResult::NotAvailable, Ok(Some(result)) => IngressStatusResult::Success(result.into()), Err((status, message)) => { - assert_eq!(status, StatusCode::FORBIDDEN, "HTTP error code {} for PocketIc::ingress_status is not StatusCode::FORBIDDEN. This is a bug!", status); + assert_eq!(status, StatusCode::FORBIDDEN, "HTTP error code {} for /read/ingress_status is not StatusCode::FORBIDDEN. This is a bug!", status); IngressStatusResult::Forbidden(message) } } } /// Await an update call submitted previously by `submit_call` or `submit_call_with_effective_principal`. - /// This function does not execute rounds and thus should only be called on a "live" PocketIC instance - /// or if rounds are executed due to separate PocketIC library calls. + /// Note that the status of the update call can only change if the PocketIC instance is in live mode + /// or a round has been executed due to a separate PocketIC library call. pub async fn await_call_no_ticks( &self, message_id: RawMessageId, @@ -622,9 +650,7 @@ impl PocketIc { .with_multiplier(2.0) .build(); loop { - if let IngressStatusResult::Success(ingress_status) = - self.ingress_status(message_id.clone(), None).await - { + if let Some(ingress_status) = self.ingress_status(message_id.clone()).await { break ingress_status; } tokio::time::sleep(retry_policy.next_backoff().unwrap()).await; diff --git a/packages/pocket-ic/tests/tests.rs b/packages/pocket-ic/tests/tests.rs index a872790851b..e0c090c8516 100644 --- a/packages/pocket-ic/tests/tests.rs +++ b/packages/pocket-ic/tests/tests.rs @@ -1998,29 +1998,23 @@ fn ingress_status() { .submit_call(canister_id, caller, "whoami", encode_one(()).unwrap()) .unwrap(); - match pic.ingress_status(msg_id.clone(), None) { - IngressStatusResult::NotAvailable => (), - status => panic!("Unexpected ingress status: {:?}", status), - } + assert!(pic.ingress_status(msg_id.clone()).is_none()); // since the ingress status is not available, any caller can attempt to retrieve it - match pic.ingress_status(msg_id.clone(), Some(Principal::anonymous())) { + match pic.ingress_status_as(msg_id.clone(), Principal::anonymous()) { IngressStatusResult::NotAvailable => (), status => panic!("Unexpected ingress status: {:?}", status), } pic.tick(); - let reply = match pic.ingress_status(msg_id.clone(), None) { - IngressStatusResult::Success(result) => result.unwrap(), - status => panic!("Unexpected ingress status: {:?}", status), - }; + let reply = pic.ingress_status(msg_id.clone()).unwrap().unwrap(); let principal = Decode!(&reply, String).unwrap(); assert_eq!(principal, canister_id.to_string()); // now that the ingress status is available, the caller must match let expected_err = "The user tries to access Request ID not signed by the caller."; - match pic.ingress_status(msg_id.clone(), Some(Principal::anonymous())) { + match pic.ingress_status_as(msg_id.clone(), Principal::anonymous()) { IngressStatusResult::Forbidden(msg) => assert_eq!(msg, expected_err,), status => panic!("Unexpected ingress status: {:?}", status), } @@ -2175,10 +2169,7 @@ fn test_reject_response_type() { (err, None) }; if let Some(msg_id) = msg_id { - let ingress_status_err = match pic.ingress_status(msg_id, None) { - IngressStatusResult::Success(result) => result.unwrap_err(), - status => panic!("Unexpected ingress status: {:?}", status), - }; + let ingress_status_err = pic.ingress_status(msg_id).unwrap().unwrap_err(); assert_eq!(ingress_status_err, err); } if action == "reject" { diff --git a/publish/canisters/BUILD.bazel b/publish/canisters/BUILD.bazel index 1955eff350f..d2c64d9e258 100644 --- a/publish/canisters/BUILD.bazel +++ b/publish/canisters/BUILD.bazel @@ -78,8 +78,8 @@ CANISTERS_MAX_SIZE_COMPRESSED_E5_BYTES = { # The orchestrator needs to embed 3 wasms at compile time # (ICRC1 index, ICRC1 ledger, and ICRC1 archive) and size is # therefore strictly controlled. - # Size when constraint addded: 1_655_752 bytes - "ic-ledger-suite-orchestrator-canister.wasm.gz": "17", + # Size when constraint addded: 1_704_979 bytes + "ic-ledger-suite-orchestrator-canister.wasm.gz": "18", # -- BN team -- # Size when constraint addded: 540_349 bytes diff --git a/rs/artifact_pool/src/consensus_pool_cache.rs b/rs/artifact_pool/src/consensus_pool_cache.rs index 5b5ef9462b3..5de2a1d307d 100644 --- a/rs/artifact_pool/src/consensus_pool_cache.rs +++ b/rs/artifact_pool/src/consensus_pool_cache.rs @@ -120,7 +120,7 @@ impl<'a> CachedChainIterator<'a> { } } -impl<'a> Iterator for CachedChainIterator<'a> { +impl Iterator for CachedChainIterator<'_> { type Item = Block; fn next(&mut self) -> Option { diff --git a/rs/artifact_pool/src/idkg_pool.rs b/rs/artifact_pool/src/idkg_pool.rs index aaa3c7f4a2f..b28484165b2 100644 --- a/rs/artifact_pool/src/idkg_pool.rs +++ b/rs/artifact_pool/src/idkg_pool.rs @@ -12,17 +12,14 @@ use crate::{ IntoInner, }; use ic_config::artifact_pool::{ArtifactPoolConfig, PersistentPoolBackend}; +use ic_interfaces::idkg::{ + IDkgChangeAction, IDkgChangeSet, IDkgPool, IDkgPoolSection, IDkgPoolSectionOp, + IDkgPoolSectionOps, MutableIDkgPoolSection, +}; use ic_interfaces::p2p::consensus::{ ArtifactTransmit, ArtifactTransmits, ArtifactWithOpt, MutablePool, UnvalidatedArtifact, ValidatedPoolReader, }; -use ic_interfaces::{ - idkg::{ - IDkgChangeAction, IDkgChangeSet, IDkgPool, IDkgPoolSection, IDkgPoolSectionOp, - IDkgPoolSectionOps, MutableIDkgPoolSection, - }, - time_source::TimeSource, -}; use ic_logger::{info, warn, ReplicaLogger}; use ic_metrics::MetricsRegistry; use ic_types::consensus::{ @@ -380,11 +377,7 @@ impl IDkgPoolImpl { } // Populates the unvalidated pool with the initial dealings from the CUP. - pub fn add_initial_dealings( - &mut self, - catch_up_package: &CatchUpPackage, - time_source: &dyn TimeSource, - ) { + pub fn add_initial_dealings(&mut self, catch_up_package: &CatchUpPackage) { let block = catch_up_package.content.block.get_value(); let mut initial_dealings = Vec::new(); @@ -413,7 +406,7 @@ impl IDkgPoolImpl { self.insert(UnvalidatedArtifact { message: IDkgMessage::Dealing(signed_dealing.clone()), peer_id: signed_dealing.dealer_id(), - timestamp: time_source.get_relative_time(), + timestamp: block.context.time, }) } } diff --git a/rs/artifact_pool/src/lmdb_iterator.rs b/rs/artifact_pool/src/lmdb_iterator.rs index 59437b6c1b8..36924fc0fec 100644 --- a/rs/artifact_pool/src/lmdb_iterator.rs +++ b/rs/artifact_pool/src/lmdb_iterator.rs @@ -43,7 +43,7 @@ pub(crate) struct LMDBIterator<'a, F> { db_env: Arc, } -impl<'a, F> LMDBIterator<'a, F> { +impl LMDBIterator<'_, F> { /// Return a new iterator that will iterator through DB objects between /// min_key and max_key (inclusive) that are deserialized using the /// given deserialize function. @@ -71,7 +71,7 @@ impl<'a, F> LMDBIterator<'a, F> { } } -impl<'a, T, F: Fn(&RoTransaction<'_>, &[u8]) -> lmdb::Result> Iterator for LMDBIterator<'a, F> { +impl, &[u8]) -> lmdb::Result> Iterator for LMDBIterator<'_, F> { type Item = T; fn next(&mut self) -> Option { @@ -106,7 +106,7 @@ pub(crate) struct LMDBIDkgIterator<'a, F> { _db_env: Arc, } -impl<'a, F> LMDBIDkgIterator<'a, F> { +impl LMDBIDkgIterator<'_, F> { pub fn new( db_env: Arc, db: Database, @@ -136,7 +136,7 @@ impl<'a, F> LMDBIDkgIterator<'a, F> { } } -impl<'a, K, T, F: Fn(&[u8], &[u8]) -> Option<(K, T)>> Iterator for LMDBIDkgIterator<'a, F> { +impl Option<(K, T)>> Iterator for LMDBIDkgIterator<'_, F> { type Item = (K, T); fn next(&mut self) -> Option { diff --git a/rs/artifact_pool/src/rocksdb_iterator.rs b/rs/artifact_pool/src/rocksdb_iterator.rs index b61483cb1d0..55936a2e0ec 100644 --- a/rs/artifact_pool/src/rocksdb_iterator.rs +++ b/rs/artifact_pool/src/rocksdb_iterator.rs @@ -68,7 +68,7 @@ enum Status { Stopped, } -impl<'a, F> StandaloneIterator<'a, F> { +impl StandaloneIterator<'_, F> { /// Create an iterator for the given column family 'name' of the given 'db' /// starting from 'start_key' pub fn new( diff --git a/rs/artifact_pool/src/rocksdb_pool.rs b/rs/artifact_pool/src/rocksdb_pool.rs index ab35775a2f0..0c89925857b 100755 --- a/rs/artifact_pool/src/rocksdb_pool.rs +++ b/rs/artifact_pool/src/rocksdb_pool.rs @@ -568,7 +568,7 @@ fn deserialize_consensus_artifact( impl PoolSection for PersistentHeightIndexedPool { fn contains(&self, msg_id: &ConsensusMessageId) -> bool { - self.lookup_key(msg_id).map_or(false, |key| { + self.lookup_key(msg_id).is_some_and(|key| { let info = info_for_msg_id(msg_id); let cf_handle = check_not_none_uw!(self.db.cf_handle(info.name)); check_ok_uw!(self.db.get_pinned_cf(cf_handle, &key)).is_some() diff --git a/rs/backup/BUILD.bazel b/rs/backup/BUILD.bazel index a792b7b5e34..32709db022a 100644 --- a/rs/backup/BUILD.bazel +++ b/rs/backup/BUILD.bazel @@ -61,7 +61,7 @@ rust_binary( rust_test( name = "backup_test", + compile_data = ["test_data/fake_input_config.json.template"], crate = ":backup", - data = ["test_data/fake_input_config.json.template"], deps = DEPENDENCIES + DEV_DEPENDENCIES, ) diff --git a/rs/bitcoin/adapter/BUILD.bazel b/rs/bitcoin/adapter/BUILD.bazel index 2050fc86d39..bf093511e2d 100644 --- a/rs/bitcoin/adapter/BUILD.bazel +++ b/rs/bitcoin/adapter/BUILD.bazel @@ -89,11 +89,11 @@ rust_binary( rust_test( name = "adapter_test", - crate = ":adapter", - data = [ + compile_data = [ "test_data/first_2500_mainnet_headers.json", "test_data/first_2500_testnet_headers.json", ], + crate = ":adapter", tags = ["requires-network"], deps = DEV_DEPENDENCIES, ) diff --git a/rs/bitcoin/adapter/src/lib.rs b/rs/bitcoin/adapter/src/lib.rs index ffec373df3f..61329ee5136 100644 --- a/rs/bitcoin/adapter/src/lib.rs +++ b/rs/bitcoin/adapter/src/lib.rs @@ -1,4 +1,4 @@ -#![warn(missing_docs)] +#![cfg_attr(not(test), warn(missing_docs))] //! The Bitcoin adapter interacts with the Bitcoin P2P network to obtain blocks //! and publish transactions. Moreover, it interacts with the Bitcoin system diff --git a/rs/bitcoin/checker/BUILD.bazel b/rs/bitcoin/checker/BUILD.bazel index 68a9c9427af..3bc774f10df 100644 --- a/rs/bitcoin/checker/BUILD.bazel +++ b/rs/bitcoin/checker/BUILD.bazel @@ -97,6 +97,7 @@ rust_ic_test( # Keep sorted. ":btc_checker_lib", "//:pocket-ic-server", + "//packages/ic-metrics-assert:ic-metrics-assert_pocket_ic", "//packages/pocket-ic", "//rs/rust_canisters/http_types", "//rs/test_utilities/load_wasm", diff --git a/rs/bitcoin/checker/Cargo.toml b/rs/bitcoin/checker/Cargo.toml index 6b39a2952e2..757e9ef16bc 100644 --- a/rs/bitcoin/checker/Cargo.toml +++ b/rs/bitcoin/checker/Cargo.toml @@ -25,18 +25,18 @@ ic-cdk = { workspace = true } ic-metrics-encoder = "1.1" ic-stable-structures = { workspace = true } serde = { workspace = true } -serde_json = {workspace = true } +serde_json = { workspace = true } time = { workspace = true } url = { workspace = true } [dev-dependencies] candid_parser = { workspace = true } ic-base-types = { path = "../../types/base_types" } +ic-metrics-assert = { path = "../../../packages/ic-metrics-assert", features = ["pocket_ic"] } ic-types = { path = "../../types/types" } ic-test-utilities-load-wasm = { path = "../../test_utilities/load_wasm" } ic-universal-canister = { path = "../../universal_canister/lib" } pocket-ic = { path = "../../../packages/pocket-ic" } proptest = { workspace = true } tokio = { workspace = true } -regex = { workspace = true } scraper = "0.17.1" diff --git a/rs/bitcoin/checker/tests/tests.rs b/rs/bitcoin/checker/tests/tests.rs index 018c78d2b4b..426816e3e82 100644 --- a/rs/bitcoin/checker/tests/tests.rs +++ b/rs/bitcoin/checker/tests/tests.rs @@ -8,10 +8,13 @@ use ic_btc_checker::{ INITIAL_MAX_RESPONSE_BYTES, }; use ic_btc_interface::Txid; +use ic_canisters_http_types::{HttpRequest, HttpResponse}; use ic_cdk::api::call::RejectionCode; +use ic_metrics_assert::{MetricsAssert, PocketIcHttpQuery}; use ic_test_utilities_load_wasm::load_wasm; use ic_types::Cycles; use ic_universal_canister::{call_args, wasm, UNIVERSAL_CANISTER_WASM}; +use pocket_ic::management_canister::CanisterId; use pocket_ic::{ common::rest::{ CanisterHttpHeader, CanisterHttpReject, CanisterHttpReply, CanisterHttpRequest, @@ -19,7 +22,6 @@ use pocket_ic::{ }, query_candid, PocketIc, PocketIcBuilder, RejectResponse, }; -use regex::Regex; use std::str::FromStr; const MAX_TICKS: usize = 10; @@ -369,7 +371,7 @@ fn test_check_transaction_passed() { let actual_cost = cycles_before - cycles_after; assert!(actual_cost > expected_cost); assert!(actual_cost - expected_cost < UNIVERSAL_CANISTER_CYCLE_MARGIN); - MetricsAssert::from_querying_metrics(&setup).assert_contains_metric_matching( + MetricsAssert::from_http_query(&setup).assert_contains_metric_matching( r#"btc_check_requests_total\{type=\"check_transaction\"\} 1 \d+"#, ); }; @@ -414,7 +416,7 @@ fn test_check_transaction_passed() { let actual_cost = cycles_before - cycles_after; assert!(actual_cost > expected_cost); assert!(actual_cost - expected_cost < UNIVERSAL_CANISTER_CYCLE_MARGIN); - MetricsAssert::from_querying_metrics(&setup).assert_contains_metric_matching( + MetricsAssert::from_http_query(&setup).assert_contains_metric_matching( r#"btc_check_requests_total\{type=\"check_transaction\"\} 1 \d+"#, ); @@ -458,7 +460,7 @@ fn test_check_transaction_passed() { actual_cost - expected_cost < UNIVERSAL_CANISTER_CYCLE_MARGIN, "actual_cost: {actual_cost}, expected_cost: {expected_cost}" ); - MetricsAssert::from_querying_metrics(&setup).assert_contains_metric_matching( + MetricsAssert::from_http_query(&setup).assert_contains_metric_matching( r#"btc_check_requests_total\{type=\"check_transaction\"\} 1 \d+"#, ); @@ -746,7 +748,7 @@ fn test_check_transaction_error() { assert!(actual_cost > expected_cost); assert!(actual_cost - expected_cost < UNIVERSAL_CANISTER_CYCLE_MARGIN); - MetricsAssert::from_querying_metrics(&setup) + MetricsAssert::from_http_query(&setup) .assert_contains_metric_matching( r#"btc_check_requests_total\{type=\"check_transaction\"\} 5 \d+"#, ) @@ -790,7 +792,7 @@ fn should_query_logs_and_metrics() { fn make_http_query>(setup: &Setup, url: U) -> Vec { use candid::Decode; - let request = ic_canisters_http_types::HttpRequest { + let request = HttpRequest { method: "GET".to_string(), url: url.into(), headers: Default::default(), @@ -807,7 +809,7 @@ fn make_http_query>(setup: &Setup, url: U) -> Vec { Encode!(&request).expect("failed to encode HTTP request"), ) .expect("failed to query get_transactions on the ledger"), - ic_canisters_http_types::HttpResponse + HttpResponse ) .unwrap(); @@ -815,37 +817,12 @@ fn make_http_query>(setup: &Setup, url: U) -> Vec { response.body.into_vec() } -pub struct MetricsAssert { - metrics: Vec, -} - -impl MetricsAssert { - fn from_querying_metrics(setup: &Setup) -> Self { - let response = make_http_query(setup, "/metrics"); - let metrics = String::from_utf8_lossy(&response) - .trim() - .split('\n') - .map(|line| line.to_string()) - .collect::>(); - Self { metrics } - } - - fn assert_contains_metric_matching(self, pattern: &str) -> Self { - assert!( - !self.find_metrics_matching(pattern).is_empty(), - "Expected to find metric matching '{}', but none matched in:\n{:?}", - pattern, - self.metrics - ); - self +impl PocketIcHttpQuery for &Setup { + fn get_pocket_ic(&self) -> &PocketIc { + &self.env } - fn find_metrics_matching(&self, pattern: &str) -> Vec { - let regex = Regex::new(pattern).unwrap_or_else(|_| panic!("Invalid regex: {}", pattern)); - self.metrics - .iter() - .filter(|line| regex.is_match(line)) - .cloned() - .collect() + fn get_canister_id(&self) -> CanisterId { + self.btc_checker_canister } } diff --git a/rs/bitcoin/ckbtc/mainnet/ckbtc_archive_upgrade_2025_01_17.md b/rs/bitcoin/ckbtc/mainnet/ckbtc_archive_upgrade_2025_01_17.md new file mode 100644 index 00000000000..5443f70ffa2 --- /dev/null +++ b/rs/bitcoin/ckbtc/mainnet/ckbtc_archive_upgrade_2025_01_17.md @@ -0,0 +1,47 @@ +# Proposal to upgrade the ckBTC archive canister + +Repository: `https://github.com/dfinity/ic.git` + +Git hash: `c741e349451edf0c9792149ad439bb32a0161371` + +New compressed Wasm hash: `2b0970a84976bc2eb9591b68d44501566937994fa5594972f8aac9c8b058672f` + +Upgrade args hash: `0fee102bd16b053022b69f2c65fd5e2f41d150ce9c214ac8731cfaf496ebda4e` + +Target canister: `nbsys-saaaa-aaaar-qaaga-cai` + +Previous ckBTC archive proposal: https://dashboard.internetcomputer.org/proposal/134451 + +--- + +## Motivation + +Upgrade the ckBTC archive canister to the same version ([ledger-suite-icrc-2025-01-07](https://github.com/dfinity/ic/releases/tag/ledger-suite-icrc-2025-01-07)) as the ckBTC ledger canister to maintain a consistent versioning across the ckBTC ledger suite. + +## Upgrade args + +``` +git fetch +git checkout c741e349451edf0c9792149ad439bb32a0161371 +cd rs/ledger_suite/icrc1/archive +didc encode '()' | xxd -r -p | sha256sum +``` + +## Release Notes + +No changes since last version (`2190613d3b5bcd9b74c382b22d151580b8ac271a`). + +``` +git log --format='%C(auto) %h %s' 2190613d3b5bcd9b74c382b22d151580b8ac271a..c741e349451edf0c9792149ad439bb32a0161371 -- rs/ledger_suite/icrc1/archive + ``` + +## Wasm Verification + +Verify that the hash of the gzipped WASM matches the proposed hash. + +``` +git fetch +git checkout c741e349451edf0c9792149ad439bb32a0161371 +"./ci/container/build-ic.sh" "--canisters" +sha256sum ./artifacts/canisters/ic-icrc1-archive.wasm.gz +``` diff --git a/rs/bitcoin/ckbtc/mainnet/ckbtc_index_upgrade_2025_01_17.md b/rs/bitcoin/ckbtc/mainnet/ckbtc_index_upgrade_2025_01_17.md new file mode 100644 index 00000000000..0c1786fc1bd --- /dev/null +++ b/rs/bitcoin/ckbtc/mainnet/ckbtc_index_upgrade_2025_01_17.md @@ -0,0 +1,49 @@ +# Proposal to upgrade the ckBTC index canister + +Repository: `https://github.com/dfinity/ic.git` + +Git hash: `c741e349451edf0c9792149ad439bb32a0161371` + +New compressed Wasm hash: `e155db9d06b6147ece4f9defe599844f132a7db21693265671aa6ac60912935f` + +Upgrade args hash: `0fee102bd16b053022b69f2c65fd5e2f41d150ce9c214ac8731cfaf496ebda4e` + +Target canister: `n5wcd-faaaa-aaaar-qaaea-cai` + +Previous ckBTC index proposal: https://dashboard.internetcomputer.org/proposal/134449 + +--- + +## Motivation + +Upgrade the ckBTC index canister to the same version ([ledger-suite-icrc-2025-01-07](https://github.com/dfinity/ic/releases/tag/ledger-suite-icrc-2025-01-07)) as the ckBTC ledger canister to maintain a consistent versioning across the ckBTC ledger suite. + +## Upgrade args + +``` +git fetch +git checkout c741e349451edf0c9792149ad439bb32a0161371 +cd rs/ledger_suite/icrc1/index-ng +didc encode '()' | xxd -r -p | sha256sum +``` + +## Release Notes + +``` +git log --format='%C(auto) %h %s' 2190613d3b5bcd9b74c382b22d151580b8ac271a..c741e349451edf0c9792149ad439bb32a0161371 -- rs/ledger_suite/icrc1/index-ng +c741e34945 feat: ICRC-ledger: FI-1439: Implement V4 for ICRC ledger - migrate balances to stable structures (#2901) +575ca531a7 chore(ICRC_Index): FI-1468: Remove old ICRC index canister (#3286) +8d4fcddc6e test(ICRC_Index): FI-1617: Optimize retrieve_blocks_from_ledger_interval tests (#3236) +e369646b76 fix: Use default rust edition instead of specifying it in the BUILD rules (#3047) + ``` + +## Wasm Verification + +Verify that the hash of the gzipped WASM matches the proposed hash. + +``` +git fetch +git checkout c741e349451edf0c9792149ad439bb32a0161371 +"./ci/container/build-ic.sh" "--canisters" +sha256sum ./artifacts/canisters/ic-icrc1-index-ng.wasm.gz +``` diff --git a/rs/bitcoin/ckbtc/mainnet/ckbtc_ledger_upgrade_2025_01_17.md b/rs/bitcoin/ckbtc/mainnet/ckbtc_ledger_upgrade_2025_01_17.md new file mode 100644 index 00000000000..bcef187ba8b --- /dev/null +++ b/rs/bitcoin/ckbtc/mainnet/ckbtc_ledger_upgrade_2025_01_17.md @@ -0,0 +1,49 @@ +# Proposal to upgrade the ckBTC ledger canister + +Repository: `https://github.com/dfinity/ic.git` + +Git hash: `c741e349451edf0c9792149ad439bb32a0161371` + +New compressed Wasm hash: `3b03d1bb1145edbcd11101ab2788517bc0f427c3bd7b342b9e3e7f42e29d5822` + +Upgrade args hash: `0fee102bd16b053022b69f2c65fd5e2f41d150ce9c214ac8731cfaf496ebda4e` + +Target canister: `mxzaz-hqaaa-aaaar-qaada-cai` + +Previous ckBTC ledger proposal: https://dashboard.internetcomputer.org/proposal/134450 + +--- + +## Motivation + +Upgrade the ckBTC ledger canister to the latest version ([ledger-suite-icrc-2025-01-07](https://github.com/dfinity/ic/releases/tag/ledger-suite-icrc-2025-01-07)) to continue the migration towards stable memory. + +## Upgrade args + +``` +git fetch +git checkout c741e349451edf0c9792149ad439bb32a0161371 +cd rs/ledger_suite/icrc1/ledger +didc encode '()' | xxd -r -p | sha256sum +``` + +## Release Notes + +``` +git log --format='%C(auto) %h %s' 2190613d3b5bcd9b74c382b22d151580b8ac271a..c741e349451edf0c9792149ad439bb32a0161371 -- rs/ledger_suite/icrc1/ledger +c741e34945 feat: ICRC-ledger: FI-1439: Implement V4 for ICRC ledger - migrate balances to stable structures (#2901) +ddadaafd51 test(ICP_Ledger): FI-1616: Fix ICP ledger upgrade tests (#3213) +dfc3810851 fix(ICRC-Ledger): changed certificate version (#2848) +b006ae9934 feat(ICP-ledger): FI-1438: Implement V3 for ICP ledger - migrate allowances to stable structures (#2818) + ``` + +## Wasm Verification + +Verify that the hash of the gzipped WASM matches the proposed hash. + +``` +git fetch +git checkout c741e349451edf0c9792149ad439bb32a0161371 +"./ci/container/build-ic.sh" "--canisters" +sha256sum ./artifacts/canisters/ic-icrc1-ledger.wasm.gz +``` diff --git a/rs/bitcoin/ckbtc/minter/BUILD.bazel b/rs/bitcoin/ckbtc/minter/BUILD.bazel index f1a304a7c34..23ceb2ce1bc 100644 --- a/rs/bitcoin/ckbtc/minter/BUILD.bazel +++ b/rs/bitcoin/ckbtc/minter/BUILD.bazel @@ -170,6 +170,7 @@ rust_ic_test( deps = [ # Keep sorted. ":ckbtc_minter_lib", + "//packages/ic-metrics-assert", "//packages/icrc-ledger-types:icrc_ledger_types", "//rs/bitcoin/checker:btc_checker_lib", "//rs/bitcoin/mock", diff --git a/rs/bitcoin/ckbtc/minter/Cargo.toml b/rs/bitcoin/ckbtc/minter/Cargo.toml index 22f238d721a..8f867f2f11c 100644 --- a/rs/bitcoin/ckbtc/minter/Cargo.toml +++ b/rs/bitcoin/ckbtc/minter/Cargo.toml @@ -56,13 +56,13 @@ ic-agent = { workspace = true } ic-bitcoin-canister-mock = { path = "../../mock" } ic-config = { path = "../../../config" } ic-icrc1-ledger = { path = "../../../ledger_suite/icrc1/ledger" } +ic-metrics-assert = { path = "../../../../packages/ic-metrics-assert" } ic-state-machine-tests = { path = "../../../state_machine_tests" } ic-test-utilities-load-wasm = { path = "../../../test_utilities/load_wasm" } ic-types = { path = "../../../types/types" } maplit = "1.0.2" mockall = { workspace = true } proptest = { workspace = true } -regex = "1.11.0" simple_asn1 = { workspace = true } tokio = { workspace = true } diff --git a/rs/bitcoin/ckbtc/minter/tests/tests.rs b/rs/bitcoin/ckbtc/minter/tests/tests.rs index 6f2190eb8ab..d6bb399554d 100644 --- a/rs/bitcoin/ckbtc/minter/tests/tests.rs +++ b/rs/bitcoin/ckbtc/minter/tests/tests.rs @@ -26,14 +26,14 @@ use ic_ckbtc_minter::{ Log, MinterInfo, CKBTC_LEDGER_MEMO_SIZE, MIN_RELAY_FEE_PER_VBYTE, MIN_RESUBMISSION_DELAY, }; use ic_icrc1_ledger::{InitArgsBuilder as LedgerInitArgsBuilder, LedgerArgument}; -use ic_state_machine_tests::{StateMachine, StateMachineBuilder, WasmResult}; +use ic_metrics_assert::{CanisterHttpQuery, MetricsAssert}; +use ic_state_machine_tests::{StateMachine, StateMachineBuilder, UserError, WasmResult}; use ic_test_utilities_load_wasm::load_wasm; use ic_types::Cycles; use icrc_ledger_types::icrc1::account::Account; use icrc_ledger_types::icrc1::transfer::{TransferArg, TransferError}; use icrc_ledger_types::icrc2::approve::{ApproveArgs, ApproveError}; use icrc_ledger_types::icrc3::transactions::{GetTransactionsRequest, GetTransactionsResponse}; -use regex::Regex; use std::collections::BTreeMap; use std::path::PathBuf; use std::str::FromStr; @@ -1232,8 +1232,16 @@ impl CkBtcSetup { .expect("minter self-check failed") } - pub fn check_minter_metrics(self) -> MetricsAssert { - MetricsAssert::from_querying_metrics(self.env, self.minter_id) + pub fn check_minter_metrics(self) -> MetricsAssert { + MetricsAssert::from_http_query(self) + } +} + +impl CanisterHttpQuery for CkBtcSetup { + fn http_query(&self, request: Vec) -> Result, UserError> { + self.env + .query(self.minter_id, "http_request", request) + .map(assert_reply) } } @@ -2110,69 +2118,3 @@ fn test_retrieve_btc_with_approval_fail() { vec![] ); } - -pub struct MetricsAssert { - metrics: Vec, -} - -impl MetricsAssert { - pub fn from_querying_metrics(state_machine: StateMachine, canister_id: CanisterId) -> Self { - use ic_canisters_http_types::{HttpRequest, HttpResponse}; - let request = HttpRequest { - method: "GET".to_string(), - url: "/metrics".to_string(), - headers: Default::default(), - body: Default::default(), - }; - let response = Decode!( - &assert_reply( - state_machine - .query( - canister_id, - "http_request", - Encode!(&request).expect("failed to encode HTTP request"), - ) - .expect("failed to get metrics") - ), - HttpResponse - ) - .unwrap(); - assert_eq!(response.status_code, 200_u16); - let metrics = String::from_utf8_lossy(response.body.as_slice()) - .trim() - .split('\n') - .map(|line| line.to_string()) - .collect::>(); - Self { metrics } - } - - pub fn assert_contains_metric_matching(self, pattern: &str) -> Self { - assert!( - !self.find_metrics_matching(pattern).is_empty(), - "Expected to find metric matching '{}', but none matched in:\n{:?}", - pattern, - self.metrics - ); - self - } - - pub fn assert_does_not_contain_metric_matching(self, pattern: &str) -> Self { - let matches = self.find_metrics_matching(pattern); - assert!( - matches.is_empty(), - "Expected not to find any metric matching '{}', but found the following matches:\n{:?}", - pattern, - matches - ); - self - } - - fn find_metrics_matching(&self, pattern: &str) -> Vec { - let regex = Regex::new(pattern).unwrap_or_else(|_| panic!("Invalid regex: {}", pattern)); - self.metrics - .iter() - .filter(|line| regex.is_match(line)) - .cloned() - .collect() - } -} diff --git a/rs/boundary_node/canary_proxy/src/support/auto_server.rs b/rs/boundary_node/canary_proxy/src/support/auto_server.rs index 0dd0d4079a7..cd5ce9926fe 100644 --- a/rs/boundary_node/canary_proxy/src/support/auto_server.rs +++ b/rs/boundary_node/canary_proxy/src/support/auto_server.rs @@ -123,7 +123,7 @@ enum Version { H1, H2, } -async fn read_version<'a, A>(mut reader: A) -> IoResult<(Version, Rewind)> +async fn read_version(mut reader: A) -> IoResult<(Version, Rewind)> where A: AsyncRead + Unpin, { @@ -171,7 +171,7 @@ where if this.buf.filled() == H2_PREFACE { *this.version = Version::H2; } - return Poll::Ready(Ok((*this.version, this.buf.filled().to_vec()))); + Poll::Ready(Ok((*this.version, this.buf.filled().to_vec()))) } } diff --git a/rs/boundary_node/ic_boundary/src/cache.rs b/rs/boundary_node/ic_boundary/src/cache.rs index 11f046cf4cc..5920874df4d 100644 --- a/rs/boundary_node/ic_boundary/src/cache.rs +++ b/rs/boundary_node/ic_boundary/src/cache.rs @@ -100,7 +100,7 @@ fn weigh_entry(k: &Arc, v: &CacheItem) -> u32 { + 58; // 2 x Principal for (k, v) in v.headers.iter() { - cost += k.as_str().as_bytes().len(); + cost += k.as_str().len(); cost += v.as_bytes().len(); } diff --git a/rs/boundary_node/ic_boundary/src/core.rs b/rs/boundary_node/ic_boundary/src/core.rs index 3f88273f4a9..8c06873aa21 100644 --- a/rs/boundary_node/ic_boundary/src/core.rs +++ b/rs/boundary_node/ic_boundary/src/core.rs @@ -299,6 +299,7 @@ pub async fn main(cli: Cli) -> Result<(), Error> { v.clone(), generic_limiter_opts, channel_snapshot_recv, + &metrics_registry, ))) } else if let Some(v) = cli.rate_limiting.rate_limit_generic_canister_id { Some(Arc::new(generic::GenericLimiter::new_from_canister( @@ -307,6 +308,7 @@ pub async fn main(cli: Cli) -> Result<(), Error> { generic_limiter_opts, cli.misc.crypto_config.is_some(), channel_snapshot_recv, + &metrics_registry, ))) } else { None diff --git a/rs/boundary_node/ic_boundary/src/rate_limiting/generic.rs b/rs/boundary_node/ic_boundary/src/rate_limiting/generic.rs index 81ee83f615d..8d6414aeffe 100644 --- a/rs/boundary_node/ic_boundary/src/rate_limiting/generic.rs +++ b/rs/boundary_node/ic_boundary/src/rate_limiting/generic.rs @@ -5,7 +5,7 @@ use std::{ atomic::{AtomicU32, Ordering}, Arc, }, - time::Duration, + time::{Duration, Instant}, }; use anyhow::{Context as _, Error}; @@ -23,8 +23,13 @@ use ic_bn_lib::http::ConnInfo; use ic_canister_client::Agent; use ic_types::CanisterId; use ipnet::IpNet; +use prometheus::{ + register_int_counter_vec_with_registry, register_int_gauge_with_registry, IntCounterVec, + IntGauge, Registry, +}; use rate_limits_api::v1::{Action, IpPrefixes, RateLimitRule, RequestType as RequestTypeRule}; use ratelimit::Ratelimiter; +use strum::{Display, IntoStaticStr}; #[allow(clippy::disallowed_types)] use tokio::sync::{watch, Mutex}; use tracing::warn; @@ -57,7 +62,7 @@ fn convert_request_type(rt: RequestType) -> RequestTypeRule { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Display, IntoStaticStr)] enum Decision { Pass, Block, @@ -170,6 +175,65 @@ pub struct Options { pub autoscale: bool, } +struct Metrics { + scale: IntGauge, + last_successful_fetch: IntGauge, + active_rules: IntGauge, + fetches: IntCounterVec, + decisions: IntCounterVec, + shards_count: IntGauge, +} + +impl Metrics { + fn new(registry: &Registry) -> Self { + Self { + scale: register_int_gauge_with_registry!( + format!("generic_limiter_scale"), + format!("Current scale that's applied to the rules"), + registry, + ) + .unwrap(), + + last_successful_fetch: register_int_gauge_with_registry!( + format!("generic_limiter_last_successful_fetch"), + format!("How many seconds ago the last successful fetch happened"), + registry + ) + .unwrap(), + + active_rules: register_int_gauge_with_registry!( + format!("generic_limiter_rules"), + format!("Number of rules currently installed"), + registry + ) + .unwrap(), + + fetches: register_int_counter_vec_with_registry!( + format!("generic_limiter_fetches"), + format!("Count of rule fetches and their outcome"), + &["result"], + registry + ) + .unwrap(), + + decisions: register_int_counter_vec_with_registry!( + format!("generic_limiter_decisions"), + format!("Count of decisions made by the ratelimiter"), + &["decision"], + registry + ) + .unwrap(), + + shards_count: register_int_gauge_with_registry!( + format!("generic_limiter_shards_count"), + format!("Number of dynamic shards if the corresponding rules are used"), + registry, + ) + .unwrap(), + } + } +} + pub struct GenericLimiter { fetcher: Arc, buckets: ArcSwap>, @@ -177,7 +241,10 @@ pub struct GenericLimiter { scale: AtomicU32, #[allow(clippy::disallowed_types)] channel_snapshot: Mutex>>>, + #[allow(clippy::disallowed_types)] + last_refresh: Mutex, opts: Options, + metrics: Metrics, } impl GenericLimiter { @@ -185,9 +252,10 @@ impl GenericLimiter { path: PathBuf, opts: Options, channel_snapshot: watch::Receiver>>, + registry: &Registry, ) -> Self { let fetcher = Arc::new(FileFetcher(path)); - Self::new_with_fetcher(fetcher, opts, channel_snapshot) + Self::new_with_fetcher(fetcher, opts, channel_snapshot, registry) } pub fn new_from_canister( @@ -196,6 +264,7 @@ impl GenericLimiter { opts: Options, use_update_call: bool, channel_snapshot: watch::Receiver>>, + registry: &Registry, ) -> Self { let config_fetcher: Arc = if use_update_call { Arc::new(CanisterConfigFetcherUpdate(agent, canister_id)) @@ -204,22 +273,26 @@ impl GenericLimiter { }; let fetcher = Arc::new(CanisterFetcher(config_fetcher)); - Self::new_with_fetcher(fetcher, opts, channel_snapshot) + Self::new_with_fetcher(fetcher, opts, channel_snapshot, registry) } fn new_with_fetcher( fetcher: Arc, opts: Options, channel_snapshot: watch::Receiver>>, + registry: &Registry, ) -> Self { Self { fetcher, buckets: ArcSwap::new(Arc::new(vec![])), active_rules: ArcSwap::new(Arc::new(vec![])), opts, + #[allow(clippy::disallowed_types)] + last_refresh: Mutex::new(Instant::now()), scale: AtomicU32::new(1), #[allow(clippy::disallowed_types)] channel_snapshot: Mutex::new(channel_snapshot), + metrics: Metrics::new(registry), } } @@ -300,10 +373,13 @@ impl GenericLimiter { .await .context("unable to fetch rules")?; + self.metrics.active_rules.set(rules.len() as i64); + self.apply_rules(rules.clone(), self.scale.load(Ordering::SeqCst)); // Store the new copy of the rules as a golden copy for future recalculation self.active_rules.store(Arc::new(rules)); + *self.last_refresh.lock().await = Instant::now(); Ok(()) } @@ -324,6 +400,21 @@ impl GenericLimiter { // No rules / no match -> pass Decision::Pass } + + /// Count the number of shards in sharded limiters (if there are any) + fn shards_count(&self) -> u64 { + self.buckets + .load_full() + .iter() + .filter_map(|x| { + if let Some(Limiter::Sharded(v, _)) = &x.limiter { + Some(v.shards_count()) + } else { + None + } + }) + .sum() + } } #[async_trait] @@ -345,7 +436,7 @@ impl Run for Arc { // Store the count of API BNs as a scale and make sure it's >= 1 let scale = v.api_bns.len().max(1) as u32; self.scale.store(scale, Ordering::SeqCst); - + self.metrics.scale.set(scale as i64); warn!("GenericLimiter: got a new registry snapshot, recalculating with scale {scale}"); // Recalculate the rules based on the potentially new scale @@ -354,9 +445,15 @@ impl Run for Arc { } _ = interval.tick() => { - if let Err(e) = self.refresh().await { + let r = self.refresh().await; + self.metrics.fetches.with_label_values(&[if r.is_ok() { "success" } else {"failure"}]).inc(); + if let Err(e) = r { warn!("GenericLimiter: unable to refresh: {e:#}"); } + + // Update the metrics + self.metrics.last_successful_fetch.set(self.last_refresh.lock().await.elapsed().as_secs_f64() as i64); + self.metrics.shards_count.set(self.shards_count() as i64); } } } @@ -380,7 +477,16 @@ pub async fn middleware( ip: conn_info.remote_addr.ip(), }; - match state.evaluate(ctx) { + let decision = state.evaluate(ctx); + + let decision_str: &'static str = decision.into(); + state + .metrics + .decisions + .with_label_values(&[decision_str]) + .inc(); + + match decision { Decision::Pass => Ok(next.run(request).await), Decision::Block => Err(ErrorCause::Forbidden), Decision::Limit => Err(ErrorCause::RateLimited(RateLimitCause::Generic)), @@ -482,6 +588,7 @@ mod test { Arc::new(fetcher), opts.clone(), rx, + &Registry::new(), )); assert!(limiter.refresh().await.is_ok()); assert_eq!(limiter.active_rules.load().len(), 7); @@ -521,6 +628,7 @@ mod test { Arc::new(BrokenFetcher), opts, rx, + &Registry::new(), )); let mut runner = limiter.clone(); diff --git a/rs/boundary_node/ic_boundary/src/rate_limiting/sharded.rs b/rs/boundary_node/ic_boundary/src/rate_limiting/sharded.rs index afd9af3fad3..9943dd3370f 100644 --- a/rs/boundary_node/ic_boundary/src/rate_limiting/sharded.rs +++ b/rs/boundary_node/ic_boundary/src/rate_limiting/sharded.rs @@ -46,6 +46,11 @@ impl ShardedRatelimiter { shard.limiter.try_wait().is_ok() } + + pub fn shards_count(&self) -> u64 { + self.shards.run_pending_tasks(); + self.shards.entry_count() + } } #[cfg(test)] diff --git a/rs/boundary_node/rate_limits/api/src/schema_versions/v1.rs b/rs/boundary_node/rate_limits/api/src/schema_versions/v1.rs index 7067518a60f..42644593858 100644 --- a/rs/boundary_node/rate_limits/api/src/schema_versions/v1.rs +++ b/rs/boundary_node/rate_limits/api/src/schema_versions/v1.rs @@ -28,7 +28,7 @@ pub enum RequestType { /// Implement serde parser for Action struct ActionVisitor; -impl<'de> de::Visitor<'de> for ActionVisitor { +impl de::Visitor<'_> for ActionVisitor { type Value = Action; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { diff --git a/rs/boundary_node/rate_limits/proposals/install_10-01-2025_134775.md b/rs/boundary_node/rate_limits/proposals/install_10-01-2025_134775.md new file mode 100644 index 00000000000..28a5ace58a3 --- /dev/null +++ b/rs/boundary_node/rate_limits/proposals/install_10-01-2025_134775.md @@ -0,0 +1,72 @@ +# Install the Rate-Limit Canister from Commit ab29295 + +__Proposer__: DFINITY Foundation + +__Source code__: [ab29295b39258e753aafaaad72c740d938d61e35][new-commit] + +[new-commit]: https://github.com/dfinity/ic/tree/ab29295b39258e753aafaaad72c740d938d61e35 + +## Summary + +Following the adoption of the motion proposal addressing [incident handling](https://dashboard.internetcomputer.org/proposal/134031) within the framework of the new decentralized boundary node architecture, we propose the deployment of the new rate-limit canister. + +This canister will enable API boundary nodes to enforce rate-limiting rules issued by an authorized DFINITY principal, hence protecting the ICP during incidents. Canister is designed as an append-only storage model, ensuring transparency and auditability of rate-limit rules after incidents disclosure. + +The authorized principal responsible for pushing new rate-limit configurations and disclosing them is specified through the upgrade arguments in this proposal. + +## Verifying the installation + +First, make sure your Git repo has the right information. + +``` +# Option A. Get a fresh copy of the code. +git clone git@github.com:dfinity/ic.git && cd ic +# Option B. If you already have a copy of the ICP repo. +git fetch +``` + +Second, checkout the right version of the code. + +``` +git checkout ab29295b39258e753aafaaad72c740d938d61e35 +``` + +### Argument Verification + +The [didc][latest_didc] tool is required. + +[latest_didc]: https://github.com/dfinity/candid/releases/latest + +Fingerprint the canister argument: + +``` +didc encode \ + -d rs/boundary_node/rate_limits/canister/interface.did \ + -t '(InitArg)' \ + '(record { + authorized_principal = opt principal "2igsz-4cjfz-unvfj-s4d3u-ftcdb-6ibug-em6tf-nzm2h-6igks-spdus-rqe"; + registry_polling_period_secs = 60; + })' | xxd -r -p | sha256sum +``` + +This should match `arg_hash` field of this proposal. + +### WASM Verification + +See ["Building the code"][prereqs] for prerequisites. + +[prereqs]: https://github.com/dfinity/ic/tree/ab29295b39258e753aafaaad72c740d938d61e35/README.adoc#building-the-code + +Build the release version of canisters: + +``` +./ci/container/build-ic.sh -c +``` + +Fingerprint the canister module: + +``` +sha256sum ./artifacts/canisters/rate-limit-canister.wasm.gz +``` + +This should match `wasm_module_hash` field of this proposal. diff --git a/rs/canister_sandbox/src/rpc.rs b/rs/canister_sandbox/src/rpc.rs index 8cdef287c94..4dc06be5d51 100644 --- a/rs/canister_sandbox/src/rpc.rs +++ b/rs/canister_sandbox/src/rpc.rs @@ -5,7 +5,6 @@ use std::task::{Context, Poll}; /// Pieces for a very simple bidirectional RPC using an underlying /// duplex stream channel. - /// Describe RPC error -- can be either related to transport (i.e. /// failure to transport or parse a message) or to server (i.e. server /// responded, but gave us a message indicating an error). @@ -276,7 +275,6 @@ impl MessageSink for ReplyManager { /// An RPC result that is immediately "ready" (i.e. pass a value to /// a caller such that it does not need to wait). - pub struct ReadyResult { value: Mutex>, } diff --git a/rs/canister_sandbox/src/transport.rs b/rs/canister_sandbox/src/transport.rs index 57975178cf6..9e6f9a4e9ba 100644 --- a/rs/canister_sandbox/src/transport.rs +++ b/rs/canister_sandbox/src/transport.rs @@ -471,7 +471,7 @@ pub fn socket_read_messages< // updating the socket timeout. loop { if let Some(bytes) = reader.receive_message(&mut buf, &mut fds, 0, None) { - break (bytes); + break bytes; } } } diff --git a/rs/canonical_state/src/encoding/tests/compatibility.rs b/rs/canonical_state/src/encoding/tests/compatibility.rs index dd46b757209..c451d085f61 100644 --- a/rs/canonical_state/src/encoding/tests/compatibility.rs +++ b/rs/canonical_state/src/encoding/tests/compatibility.rs @@ -333,7 +333,6 @@ fn canonical_encoding_stream_header_v19_plus() { /// 00 # unsigned(0) /// 03 # field_index(SubnetMetrics::update_transactions_total) /// 19 1068 # unsigned(4200) - /// ``` /// Used http://cbor.me/ for printing the human friendly output. #[test] diff --git a/rs/canonical_state/src/subtree_visitor.rs b/rs/canonical_state/src/subtree_visitor.rs index a9ebbd7a574..8c9d92c8312 100644 --- a/rs/canonical_state/src/subtree_visitor.rs +++ b/rs/canonical_state/src/subtree_visitor.rs @@ -91,7 +91,7 @@ impl<'a, V> SubtreeVisitor<'a, V> { } } -impl<'a, V> Visitor for SubtreeVisitor<'a, V> +impl Visitor for SubtreeVisitor<'_, V> where V: Visitor, { diff --git a/rs/canonical_state/tree_hash/src/lazy_tree.rs b/rs/canonical_state/tree_hash/src/lazy_tree.rs index 64f6c8d8a4f..fd76d0a3cdc 100644 --- a/rs/canonical_state/tree_hash/src/lazy_tree.rs +++ b/rs/canonical_state/tree_hash/src/lazy_tree.rs @@ -25,7 +25,7 @@ pub enum Lazy<'a, T> { Func(ArcFn<'a, T>), } -impl<'a, T: Clone> Lazy<'a, T> { +impl Lazy<'_, T> { pub fn force(&self) -> T { match self { Self::Value(v) => v.clone(), diff --git a/rs/config/src/config_sample.rs b/rs/config/src/config_sample.rs index 46069274b59..147248c05c6 100644 --- a/rs/config/src/config_sample.rs +++ b/rs/config/src/config_sample.rs @@ -60,7 +60,6 @@ /// # EXAMPLE: y: "bad" /// y: "good" /// ``` - pub const SAMPLE_CONFIG: &str = r#" { // ============================================ diff --git a/rs/config/src/embedders.rs b/rs/config/src/embedders.rs index 9c6ef4db9d7..837f62f5cbb 100644 --- a/rs/config/src/embedders.rs +++ b/rs/config/src/embedders.rs @@ -3,7 +3,10 @@ use std::time::Duration; use ic_base_types::NumBytes; use ic_registry_subnet_type::SubnetType; use ic_sys::PAGE_SIZE; -use ic_types::{NumInstructions, NumOsPages, MAX_STABLE_MEMORY_IN_BYTES, MAX_WASM_MEMORY_IN_BYTES}; +use ic_types::{ + NumInstructions, NumOsPages, MAX_STABLE_MEMORY_IN_BYTES, MAX_WASM64_MEMORY_IN_BYTES, + MAX_WASM_MEMORY_IN_BYTES, +}; use serde::{Deserialize, Serialize}; use crate::flag_status::FlagStatus; @@ -245,6 +248,9 @@ pub struct Config { /// The maximum size of the wasm heap memory. pub max_wasm_memory_size: NumBytes, + /// The maximum size of the wasm heap memory for Wasm64 canisters. + pub max_wasm64_memory_size: NumBytes, + /// The maximum size of the stable memory. pub max_stable_memory_size: NumBytes, } @@ -284,6 +290,7 @@ impl Config { dirty_page_copy_overhead: DIRTY_PAGE_COPY_OVERHEAD, wasm_max_size: WASM_MAX_SIZE, max_wasm_memory_size: NumBytes::new(MAX_WASM_MEMORY_IN_BYTES), + max_wasm64_memory_size: NumBytes::new(MAX_WASM64_MEMORY_IN_BYTES), max_stable_memory_size: NumBytes::new(MAX_STABLE_MEMORY_IN_BYTES), wasm64_dirty_page_overhead_multiplier: WASM64_DIRTY_PAGE_OVERHEAD_MULTIPLIER, } diff --git a/rs/config/src/execution_environment.rs b/rs/config/src/execution_environment.rs index d6e47d83aa6..84636d07e8f 100644 --- a/rs/config/src/execution_environment.rs +++ b/rs/config/src/execution_environment.rs @@ -2,7 +2,8 @@ use crate::embedders::Config as EmbeddersConfig; use crate::flag_status::FlagStatus; use ic_base_types::{CanisterId, NumSeconds}; use ic_types::{ - Cycles, NumBytes, NumInstructions, MAX_STABLE_MEMORY_IN_BYTES, MAX_WASM_MEMORY_IN_BYTES, + Cycles, NumBytes, NumInstructions, MAX_STABLE_MEMORY_IN_BYTES, MAX_WASM64_MEMORY_IN_BYTES, + MAX_WASM_MEMORY_IN_BYTES, }; use serde::{Deserialize, Serialize}; use std::{str::FromStr, time::Duration}; @@ -204,7 +205,12 @@ pub struct Config { pub subnet_memory_reservation: NumBytes, /// The maximum amount of memory that can be utilized by a single canister. - pub max_canister_memory_size: NumBytes, + /// running in Wasm32 mode. + pub max_canister_memory_size_wasm32: NumBytes, + + /// The maximum amount of memory that can be utilized by a single canister. + /// running in Wasm64 mode. + pub max_canister_memory_size_wasm64: NumBytes, /// The soft limit on the subnet-wide number of callbacks. Beyond this limit, /// canisters are only allowed to make downstream calls up to their individual @@ -342,9 +348,12 @@ impl Default for Config { subnet_wasm_custom_sections_memory_capacity: SUBNET_WASM_CUSTOM_SECTIONS_MEMORY_CAPACITY, subnet_memory_reservation: SUBNET_MEMORY_RESERVATION, - max_canister_memory_size: NumBytes::new( + max_canister_memory_size_wasm32: NumBytes::new( MAX_STABLE_MEMORY_IN_BYTES + MAX_WASM_MEMORY_IN_BYTES, ), + max_canister_memory_size_wasm64: NumBytes::new( + MAX_STABLE_MEMORY_IN_BYTES + MAX_WASM64_MEMORY_IN_BYTES, + ), subnet_callback_soft_limit: SUBNET_CALLBACK_SOFT_LIMIT, canister_guaranteed_callback_quota: CANISTER_GUARANTEED_CALLBACK_QUOTA, default_provisional_cycles_balance: Cycles::new(100_000_000_000_000), diff --git a/rs/consensus/src/consensus/malicious_consensus.rs b/rs/consensus/src/consensus/malicious_consensus.rs index 5a9e5be9300..71e80f00c64 100644 --- a/rs/consensus/src/consensus/malicious_consensus.rs +++ b/rs/consensus/src/consensus/malicious_consensus.rs @@ -234,7 +234,6 @@ fn maliciously_notarize_all(notary: &Notary, pool: &PoolReader<'_>) -> Vec, diff --git a/rs/consensus/src/idkg/pre_signer.rs b/rs/consensus/src/idkg/pre_signer.rs index 71e910638c4..bbf5bd39f08 100644 --- a/rs/consensus/src/idkg/pre_signer.rs +++ b/rs/consensus/src/idkg/pre_signer.rs @@ -1210,7 +1210,7 @@ impl<'a> IDkgTranscriptBuilderImpl<'a> { } } -impl<'a> IDkgTranscriptBuilder for IDkgTranscriptBuilderImpl<'a> { +impl IDkgTranscriptBuilder for IDkgTranscriptBuilderImpl<'_> { fn get_completed_transcript(&self, transcript_id: IDkgTranscriptId) -> Option { timed_call( "get_completed_transcript", @@ -1278,7 +1278,7 @@ impl<'a> Action<'a> { } /// Needed as IDKGTranscriptParams doesn't implement Debug -impl<'a> Debug for Action<'a> { +impl Debug for Action<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match &self { Self::Process(transcript_params) => { diff --git a/rs/consensus/src/idkg/signer.rs b/rs/consensus/src/idkg/signer.rs index dce0600d594..1b9aa8ff7d4 100644 --- a/rs/consensus/src/idkg/signer.rs +++ b/rs/consensus/src/idkg/signer.rs @@ -726,7 +726,7 @@ impl<'a> ThresholdSignatureBuilderImpl<'a> { } } -impl<'a> ThresholdSignatureBuilder for ThresholdSignatureBuilderImpl<'a> { +impl ThresholdSignatureBuilder for ThresholdSignatureBuilderImpl<'_> { fn get_completed_signature( &self, callback_id: CallbackId, @@ -832,7 +832,7 @@ impl<'a> Action<'a> { } } -impl<'a> Debug for Action<'a> { +impl Debug for Action<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match &self { Self::Process(sig_inputs) => { diff --git a/rs/consensus/src/lib.rs b/rs/consensus/src/lib.rs index 4f0a9ecdc66..471a29d69e8 100755 --- a/rs/consensus/src/lib.rs +++ b/rs/consensus/src/lib.rs @@ -1,4 +1,4 @@ -#![deny(missing_docs)] +#![cfg_attr(not(test), deny(missing_docs))] //! The consensus crate provides implementations of the consensus algorithm of //! the internet computer block chain, a component responsible for executing //! distributed key generation using said block chain to hold the state of the diff --git a/rs/consensus/utils/src/lib.rs b/rs/consensus/utils/src/lib.rs index 99a079b1b8b..32ff4260341 100644 --- a/rs/consensus/utils/src/lib.rs +++ b/rs/consensus/utils/src/lib.rs @@ -87,7 +87,7 @@ pub fn find_lowest_ranked_non_disqualified_proposals( .filter(|proposal| !disqualified.contains(&proposal.signature.signer)) { let best_rank = best_proposals.first().map(HasRank::rank); - if !best_rank.is_some_and(|rank| rank <= proposal.rank()) { + if best_rank.is_none_or(|rank| rank > proposal.rank()) { best_proposals = vec![proposal]; } else if Some(proposal.rank()) == best_rank { best_proposals.push(proposal); diff --git a/rs/consensus/utils/src/pool_reader.rs b/rs/consensus/utils/src/pool_reader.rs index ebdf2bf8e5d..2e015943a82 100644 --- a/rs/consensus/utils/src/pool_reader.rs +++ b/rs/consensus/utils/src/pool_reader.rs @@ -64,7 +64,7 @@ impl<'a> PoolReader<'a> { /// Get the range of ancestor blocks of `block` specified (inclusively) by /// `min` and `max`. This assumes the correctness of the state of the pool. pub fn get_range( - &'a self, + &self, block: Block, min: Height, max: Height, diff --git a/rs/cross-chain/scripts/generate_blocklist.py b/rs/cross-chain/scripts/generate_blocklist.py new file mode 100644 index 00000000000..1c68a1f0c68 --- /dev/null +++ b/rs/cross-chain/scripts/generate_blocklist.py @@ -0,0 +1,157 @@ +import argparse +import xml.etree.ElementTree as ET + +# The following steps need to be carried out to update the blocklist for BTC or ETH. +# +# 1) Download the latest version of the OFAC SDN list from their website: +# https://sanctionslist.ofac.treas.gov/Home/SdnList +# +# Specifically, download the file +# +# SDN_XML.ZIP +# +# and decompress it to retrieve the file 'SDN.XML'. +# +# 2) Run this script as follows: +# +# python generate_blocklist.py [currency (BTC or ETH)] [path to the SDN.XML file] +# +# The command will generate the file 'blocklist.rs' containing the retrieved addresses. +# +# 3) Override the current 'blocklist.rs' file with the newly generated file. + + +# The ID type prefix for digital currencies. +DIGITAL_CURRENCY_TYPE_PREFIX = "Digital Currency Address - " + +# The blocked addresses are stored in this Rust file by default. +DEFAULT_BLOCKLIST_FILENAME = "blocklist.rs" + +# This prefix is needed for each element in the XML tree. +PREFIX = "{https://sanctionslistservice.ofac.treas.gov/api/PublicationPreview/exports/XML}" + + +# Handlers for different blocklists. +class BitcoinBlocklistHandler: + def preamble(self): + return """#[cfg(test)] + mod tests; + + use bitcoin::Address; + + /// The script to generate this file, including information about the source data, can be found here: + /// /rs/cross-chain/scripts/generate_blocklist.py + + /// BTC is not accepted from nor sent to addresses on this list. + /// NOTE: Keep it sorted! + pub const BTC_ADDRESS_BLOCKLIST: &[&str] = &[\n""" + + def postamble(self): + return """pub fn is_blocked(address: &Address) -> bool { + BTC_ADDRESS_BLOCKLIST + .binary_search(&address.to_string().as_ref()) + .is_ok() +}""" + + def format_address(self, address): + return f'"{address}"' + + def currency_symbol(self): + return "XBT" + + def sort(self, addresses): + return sorted(addresses) + + +class EthereumBlocklistHandler: + def preamble(self): + return """#[cfg(test)] + mod tests; + + use ic_ethereum_types::Address; + + macro_rules! ethereum_address { + ($address:expr) => { + Address::new(hex_literal::hex!($address)) + }; + } + + /// The script to generate this file, including information about the source data, can be found here: + /// /rs/cross-chain/scripts/generate_blocklist.py + + /// ETH is not accepted from nor sent to addresses on this list. + /// NOTE: Keep it sorted! + const ETH_ADDRESS_BLOCKLIST: &[Address] = &[\n""" + + def postamble(self): + return """pub fn is_blocked(address: &Address) -> bool { + ETH_ADDRESS_BLOCKLIST.binary_search(address).is_ok() +}""" + + def format_address(self, address): + return f'ethereum_address!("{address[2:]}")' + + def currency_symbol(self): + return "ETH" + + def sort(self, addresses): + return sorted(addresses, key=lambda x: int(x[2:], 16)) + + +def extract_addresses(handler, xml_file_path): + tree = ET.parse(xml_file_path) + root = tree.getroot() + + addresses = [] + + # Iterate over all ID elements. + for id_item in root.findall(PREFIX + "sdnEntry/" + PREFIX + "idList" + "/" + PREFIX + "id"): + # Put the ID components into a dictionary for simpler handling. + id_dict = {} + for sub_item in id_item: + if sub_item.text.strip(): + id_dict[sub_item.tag] = sub_item.text + + # Read the address, if any. + if id_dict[PREFIX + "idType"] == DIGITAL_CURRENCY_TYPE_PREFIX + handler.currency_symbol(): + address = id_dict[PREFIX + "idNumber"] + addresses.append(address) + + # Remove duplicates. + addresses = list(set(addresses)) + # Sort the addresses. + addresses = handler.sort(addresses) + return addresses + + +def store_blocklist(blocklist_handler, addresses, filename): + blocklist_file = open(filename, "w") + blocklist_file.write(blocklist_handler.preamble()) + for address in addresses: + blocklist_file.write(" " + blocklist_handler.format_address(address) + ",\n") + print(address) + blocklist_file.write("];\n\n") + blocklist_file.write(blocklist_handler.postamble()) + blocklist_file.close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--currency", "-c", type=str, required=True, choices=["BTC", "ETH"], help="select the currency") + parser.add_argument("--input", "-i", type=str, required=True, help="read the provided SDN.XML file") + parser.add_argument( + "--output", "-o", type=str, default=DEFAULT_BLOCKLIST_FILENAME, help="write the output to the provided path" + ) + + args = parser.parse_args() + + if args.currency == "BTC": + blocklist_handler = BitcoinBlocklistHandler() + else: + blocklist_handler = EthereumBlocklistHandler() + print("Extracting addresses from " + args.input + "...") + addresses = extract_addresses(blocklist_handler, args.input) + print("Done. Found " + str(len(addresses)) + " addresses.") + print("Storing the addresses in the file " + args.output + "...") + store_blocklist(blocklist_handler, addresses, args.output) + print("Done.") diff --git a/rs/crypto/internal/crypto_lib/bls12_381/type/src/lib.rs b/rs/crypto/internal/crypto_lib/bls12_381/type/src/lib.rs index 71d7f707f29..b25fe1ba06e 100644 --- a/rs/crypto/internal/crypto_lib/bls12_381/type/src/lib.rs +++ b/rs/crypto/internal/crypto_lib/bls12_381/type/src/lib.rs @@ -852,7 +852,7 @@ macro_rules! define_affine_and_projective_types { const WINDOW_MASK: u8 = (1 << Self::WINDOW_BITS) - 1; // The total number of windows in a scalar - const WINDOWS : usize = (Self::SUBGROUP_BITS + Self::WINDOW_BITS - 1) / Self::WINDOW_BITS; + const WINDOWS: usize = Self::SUBGROUP_BITS.div_ceil(Self::WINDOW_BITS); // We must select from 2^WINDOW_BITS elements in each table // group. However one element of the table group is always the @@ -2337,7 +2337,7 @@ struct WindowInfo {} impl WindowInfo { const SIZE: usize = WINDOW_SIZE; - const WINDOWS: usize = (Scalar::BYTES * 8 + Self::SIZE - 1) / Self::SIZE; + const WINDOWS: usize = (Scalar::BYTES * 8).div_ceil(Self::SIZE); const MASK: u8 = 0xFFu8 >> (8 - Self::SIZE); const ELEMENTS: usize = (1 << Self::SIZE) as usize; diff --git a/rs/crypto/internal/crypto_lib/hmac/src/lib.rs b/rs/crypto/internal/crypto_lib/hmac/src/lib.rs index 4b3d4baee18..22434146dee 100644 --- a/rs/crypto/internal/crypto_lib/hmac/src/lib.rs +++ b/rs/crypto/internal/crypto_lib/hmac/src/lib.rs @@ -181,7 +181,7 @@ pub fn hkdf( // Step 2. HKDF-Expand(PRK, info, L) -> OKM - let blocks = (output_len + H::OUTPUT_LENGTH - 1) / H::OUTPUT_LENGTH; + let blocks = output_len.div_ceil(H::OUTPUT_LENGTH); let mut prev_t: Option> = None; diff --git a/rs/crypto/internal/crypto_lib/seed/src/xmd.rs b/rs/crypto/internal/crypto_lib/seed/src/xmd.rs index be9e275b12f..d497a576a8e 100644 --- a/rs/crypto/internal/crypto_lib/seed/src/xmd.rs +++ b/rs/crypto/internal/crypto_lib/seed/src/xmd.rs @@ -78,7 +78,7 @@ pub fn xmd(msg: &[u8], dst: &[u8], len: usize) -> XmdResult< // len ≤ 255*H::OUTPUT_BYTES â­¢ ell ≤ 255 // thus values ≤ ell can be safely cast to u8 - let ell = (len + H::OUTPUT_BYTES - 1) / H::OUTPUT_BYTES; + let ell = len.div_ceil(H::OUTPUT_BYTES); let mut out = vec![0u8; len]; diff --git a/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/chunking.rs b/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/chunking.rs index 469d17827db..460aa92ce3a 100644 --- a/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/chunking.rs +++ b/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/chunking.rs @@ -23,7 +23,7 @@ pub const CHUNK_MAX: Chunk = CHUNK_MIN + (CHUNK_SIZE as Chunk) - 1; pub(crate) const MESSAGE_BYTES: usize = Scalar::BYTES; /// NUM_CHUNKS is simply the number of chunks needed to hold a message -pub const NUM_CHUNKS: usize = (MESSAGE_BYTES + CHUNK_BYTES - 1) / CHUNK_BYTES; +pub const NUM_CHUNKS: usize = MESSAGE_BYTES.div_ceil(CHUNK_BYTES); #[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)] pub struct PlaintextChunks { diff --git a/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/dlog_recovery.rs b/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/dlog_recovery.rs index 91340142309..d337ced36f1 100644 --- a/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/dlog_recovery.rs +++ b/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/dlog_recovery.rs @@ -274,7 +274,7 @@ impl BabyStepGiantStep { BabyStepGiantStepTable::compute_table_size(range, max_mbytes, max_table_mul); let giant_steps = if range > 0 && table_size > 0 { - (range + table_size - 1) / table_size + range.div_ceil(table_size) } else { 0 }; diff --git a/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/nizk_chunking.rs b/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/nizk_chunking.rs index 16651c18bea..771fe54318a 100644 --- a/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/nizk_chunking.rs +++ b/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/src/ni_dkg/fs_ni_dkg/nizk_chunking.rs @@ -24,7 +24,7 @@ const SECURITY_LEVEL: usize = 256; pub const NUM_ZK_REPETITIONS: usize = 32; /// Defined as ceil(SECURITY_LEVEL/NUM_ZK_REPETITIONS) -pub const CHALLENGE_BITS: usize = (SECURITY_LEVEL + NUM_ZK_REPETITIONS - 1) / NUM_ZK_REPETITIONS; +pub const CHALLENGE_BITS: usize = SECURITY_LEVEL.div_ceil(NUM_ZK_REPETITIONS); // The number of bytes needed to represent a challenge (which must fit in a usize) pub const CHALLENGE_BYTES: usize = (CHALLENGE_BITS + 7) / 8; diff --git a/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/tests/integration_tests.rs b/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/tests/integration_tests.rs index f400c9f5abc..ef5f87832da 100644 --- a/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/tests/integration_tests.rs +++ b/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/tests/integration_tests.rs @@ -190,7 +190,7 @@ fn encrypted_chunks_should_validate(epoch: Epoch) { /// Context: Most of this code converts the data used for the fs /// encryption to the form needed by the zk crypto. Suggestion: /// Put the conversion code in the library. - + /// /// Combine a big endian array of group elements (first chunk is the /// most significant) into a single group element. fn g1_from_big_endian_chunks(terms: &[G1Affine]) -> G1Affine { diff --git a/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/fe-derive/src/lib.rs b/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/fe-derive/src/lib.rs index 07e39b01501..89aa33d1360 100644 --- a/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/fe-derive/src/lib.rs +++ b/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/fe-derive/src/lib.rs @@ -24,7 +24,7 @@ struct FieldElementConfig { impl FieldElementConfig { fn new(ident: syn::Ident, modulus: BigUint, mut params: BTreeMap) -> Self { let limb_size = 64; // bits of u64 - let limbs = (modulus.bits() + limb_size - 1) / limb_size; + let limbs = modulus.bits().div_ceil(limb_size); let m64 = BigUint::one() << limb_size; diff --git a/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/src/utils/group.rs b/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/src/utils/group.rs index cf18fc7960a..1aa0ed4ecfa 100644 --- a/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/src/utils/group.rs +++ b/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/src/utils/group.rs @@ -1317,7 +1317,7 @@ impl WindowInfo { /// Returns the number of windows if scalar_bits bits are used #[inline(always)] pub(crate) const fn number_of_windows_for_bits(scalar_bits: usize) -> usize { - (scalar_bits + WINDOW_SIZE - 1) / WINDOW_SIZE + scalar_bits.div_ceil(WINDOW_SIZE) } /// Extract a window from a serialized scalar value @@ -1336,7 +1336,7 @@ impl WindowInfo { const BITS_IN_BYTE: usize = 8; let scalar_bytes = scalar.len(); - let windows = (scalar_bytes * 8 + WINDOW_SIZE - 1) / WINDOW_SIZE; + let windows = (scalar_bytes * 8).div_ceil(WINDOW_SIZE); // to compute the correct bit offset for bit lengths that are not a power of 2, // we need to start from the inverted value or otherwise we will have multiple options diff --git a/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/src/utils/group/algos.rs b/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/src/utils/group/algos.rs index 350647e9ee9..cb455812138 100644 --- a/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/src/utils/group/algos.rs +++ b/rs/crypto/internal/crypto_lib/threshold_sig/canister_threshold_sig/src/utils/group/algos.rs @@ -124,7 +124,7 @@ macro_rules! declare_mul2_table_impl { // The number of windows (of WINDOW_BITS size) required to examine every // bit of a scalar of this curve. - const WINDOWS: usize = (<$scalar>::BITS + Self::WINDOW_BITS - 1) / Self::WINDOW_BITS; + const WINDOWS: usize = <$scalar>::BITS.div_ceil(Self::WINDOW_BITS); pub fn for_standard_generators() -> Self { let g = <$projective>::generator(); diff --git a/rs/crypto/internal/crypto_lib/types/src/encrypt/forward_secure.rs b/rs/crypto/internal/crypto_lib/types/src/encrypt/forward_secure.rs index 88926195935..c43704911d9 100644 --- a/rs/crypto/internal/crypto_lib/types/src/encrypt/forward_secure.rs +++ b/rs/crypto/internal/crypto_lib/types/src/encrypt/forward_secure.rs @@ -171,7 +171,7 @@ pub mod groth20_bls12_381 { } pub const CHUNK_BYTES: usize = 2; - pub const NUM_CHUNKS: usize = (FrBytes::SIZE + CHUNK_BYTES - 1) / CHUNK_BYTES; + pub const NUM_CHUNKS: usize = FrBytes::SIZE.div_ceil(CHUNK_BYTES); // Note: the spec currently has: Vec<(r,s,z)>; this could be represented more // strongly as [(G1,G1,G2);NUM_CHUNKS], which is equivalent to the below. diff --git a/rs/crypto/internal/crypto_service_provider/src/vault/api.rs b/rs/crypto/internal/crypto_service_provider/src/vault/api.rs index 3b82edf4386..93727e5bc21 100644 --- a/rs/crypto/internal/crypto_service_provider/src/vault/api.rs +++ b/rs/crypto/internal/crypto_service_provider/src/vault/api.rs @@ -187,31 +187,31 @@ impl NodeKeysErrors { } pub fn keys_in_registry_missing_locally(&self) -> bool { - self.node_signing_key_error.as_ref().map_or(false, |err| { + self.node_signing_key_error.as_ref().is_some_and(|err| { err.external_public_key_error.is_none() && err.contains_local_public_or_secret_key_error() }) || self .committee_signing_key_error .as_ref() - .map_or(false, |err| { + .is_some_and(|err| { err.external_public_key_error.is_none() && err.contains_local_public_or_secret_key_error() }) - || self.tls_certificate_error.as_ref().map_or(false, |err| { + || self.tls_certificate_error.as_ref().is_some_and(|err| { err.external_public_key_error.is_none() && err.contains_local_public_or_secret_key_error() }) || self .dkg_dealing_encryption_key_error .as_ref() - .map_or(false, |err| { + .is_some_and(|err| { err.external_public_key_error.is_none() && err.contains_local_public_or_secret_key_error() }) || self .idkg_dealing_encryption_key_error .as_ref() - .map_or(false, |err| { + .is_some_and(|err| { err.external_public_key_error.is_none() && err.contains_local_public_or_secret_key_error() }) diff --git a/rs/crypto/internal/crypto_service_provider/src/vault/local_csp_vault/mod.rs b/rs/crypto/internal/crypto_service_provider/src/vault/local_csp_vault/mod.rs index 4f60844b5db..32a6cc4627d 100644 --- a/rs/crypto/internal/crypto_service_provider/src/vault/local_csp_vault/mod.rs +++ b/rs/crypto/internal/crypto_service_provider/src/vault/local_csp_vault/mod.rs @@ -73,7 +73,6 @@ use std::sync::Arc; /// only during node key generation and rotation). /// /// [1]: https://medium.com/digitalfrontiers/rust-dynamic-dispatching-deep-dive-236a5896e49b - pub struct LocalCspVault< R: Rng + CryptoRng, S: SecretKeyStore, diff --git a/rs/crypto/secrets_containers/src/secret_array.rs b/rs/crypto/secrets_containers/src/secret_array.rs index 1a2f84721d7..43f2494b3a7 100644 --- a/rs/crypto/secrets_containers/src/secret_array.rs +++ b/rs/crypto/secrets_containers/src/secret_array.rs @@ -56,7 +56,7 @@ impl serde::Serialize for SecretArray { struct SecretArrayVisitor; -impl<'de, const N: usize> serde::de::Visitor<'de> for SecretArrayVisitor { +impl serde::de::Visitor<'_> for SecretArrayVisitor { type Value = SecretArray; fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { diff --git a/rs/crypto/secrets_containers/src/secret_bytes.rs b/rs/crypto/secrets_containers/src/secret_bytes.rs index 4baed52781f..954f5588e93 100644 --- a/rs/crypto/secrets_containers/src/secret_bytes.rs +++ b/rs/crypto/secrets_containers/src/secret_bytes.rs @@ -49,7 +49,7 @@ impl<'a> serde::Deserialize<'a> for SecretBytes { fn deserialize>(deserializer: D) -> Result { struct Visitor; - impl<'de> serde::de::Visitor<'de> for Visitor { + impl serde::de::Visitor<'_> for Visitor { type Value = SecretBytes; fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { diff --git a/rs/crypto/test_utils/canister_threshold_sigs/src/lib.rs b/rs/crypto/test_utils/canister_threshold_sigs/src/lib.rs index 1a6fc5f8000..385db085960 100644 --- a/rs/crypto/test_utils/canister_threshold_sigs/src/lib.rs +++ b/rs/crypto/test_utils/canister_threshold_sigs/src/lib.rs @@ -821,7 +821,7 @@ pub mod node { pub fn filter_by_receivers<'a, T: AsRef + 'a>( &'a self, idkg_receivers: T, - ) -> impl Iterator + 'a { + ) -> impl Iterator + 'a { self.iter() .filter(move |node| idkg_receivers.as_ref().contains(node.id)) } @@ -829,7 +829,7 @@ pub mod node { pub fn filter_by_dealers<'a, T: AsRef + 'a>( &'a self, idkg_dealers: T, - ) -> impl Iterator + 'a { + ) -> impl Iterator + 'a { self.iter() .filter(move |node| idkg_dealers.as_ref().contains(node.id)) } @@ -838,7 +838,7 @@ pub mod node { &'a self, minimum_size: usize, rng: &'a mut R, - ) -> impl Iterator + 'a { + ) -> impl Iterator + 'a { assert!( minimum_size <= self.len(), "Requested a random subset with at least {} elements but there are only {} elements", @@ -853,7 +853,7 @@ pub mod node { &'a self, size: usize, rng: &'a mut R, - ) -> impl Iterator + 'a { + ) -> impl Iterator + 'a { assert!( size <= self.len(), "Requested a random subset with {} elements but there are only {} elements", @@ -867,7 +867,7 @@ pub mod node { &'a self, idkg_receivers: T, rng: &mut R, - ) -> &Node { + ) -> &'a Node { self.filter_by_receivers(idkg_receivers) .choose(rng) .expect("empty receivers") @@ -878,7 +878,7 @@ pub mod node { exclusion: &Node, idkg_receivers: T, rng: &mut R, - ) -> &Node { + ) -> &'a Node { self.filter_by_receivers(idkg_receivers) .filter(|node| *node != exclusion) .choose(rng) @@ -889,7 +889,7 @@ pub mod node { &'a self, params: &'a IDkgTranscriptParams, rng: &mut R, - ) -> &Node { + ) -> &'a Node { self.filter_by_dealers(params) .choose(rng) .expect("empty dealers") diff --git a/rs/crypto/test_utils/tls/src/test_client.rs b/rs/crypto/test_utils/tls/src/test_client.rs index 281ab26616a..755458f0a4f 100644 --- a/rs/crypto/test_utils/tls/src/test_client.rs +++ b/rs/crypto/test_utils/tls/src/test_client.rs @@ -119,7 +119,7 @@ impl Client { let msg_for_server_with_eol = format!("{}\n", msg_for_server); #[allow(clippy::disallowed_methods)] let num_bytes_written = wr.write(msg_for_server_with_eol.as_bytes()).await.unwrap(); - assert_eq!(num_bytes_written, msg_for_server_with_eol.as_bytes().len()); + assert_eq!(num_bytes_written, msg_for_server_with_eol.len()); const ACK: u8 = 0x06; let reply = rd.read_u8().await.unwrap(); diff --git a/rs/crypto/test_utils/tls/src/test_server.rs b/rs/crypto/test_utils/tls/src/test_server.rs index 6e3ec7c98a3..1d730712b90 100644 --- a/rs/crypto/test_utils/tls/src/test_server.rs +++ b/rs/crypto/test_utils/tls/src/test_server.rs @@ -180,7 +180,7 @@ impl Server { let msg_with_eol = format!("{}\n", msg_for_client); #[allow(clippy::disallowed_methods)] let num_bytes_written = wr.write(msg_with_eol.as_bytes()).await.unwrap(); - assert_eq!(num_bytes_written, msg_with_eol.as_bytes().len()); + assert_eq!(num_bytes_written, msg_with_eol.len()); const ACK: u8 = 0x06; let reply = rd.read_u8().await.unwrap(); diff --git a/rs/crypto/tree_hash/src/lib.rs b/rs/crypto/tree_hash/src/lib.rs index e1753aeaf55..65ed25aee22 100644 --- a/rs/crypto/tree_hash/src/lib.rs +++ b/rs/crypto/tree_hash/src/lib.rs @@ -491,7 +491,7 @@ impl MixedHashTree { Collect(&'a MixedHashTree), } - impl<'a> StackItem<'a> { + impl StackItem<'_> { fn to_collect(&self) -> Self { match self { Self::Expand(t) => Self::Collect(t), diff --git a/rs/drun/src/main.rs b/rs/drun/src/main.rs index 7d2af5662f3..1c3b70b37f1 100644 --- a/rs/drun/src/main.rs +++ b/rs/drun/src/main.rs @@ -67,9 +67,9 @@ async fn drun_main() -> Result<(), String> { .embedders_config .feature_flags .best_effort_responses = FlagStatus::Enabled; - hypervisor_config.embedders_config.max_wasm_memory_size = MAIN_MEMORY_CAPACITY; - hypervisor_config.max_canister_memory_size = - hypervisor_config.embedders_config.max_wasm_memory_size + hypervisor_config.embedders_config.max_wasm64_memory_size = MAIN_MEMORY_CAPACITY; + hypervisor_config.max_canister_memory_size_wasm64 = + hypervisor_config.embedders_config.max_wasm64_memory_size + hypervisor_config.embedders_config.max_stable_memory_size; let cfg = Config::load_with_default(&source, default_config).unwrap_or_else(|err| { diff --git a/rs/embedders/benches/embedders_bench/src/lib.rs b/rs/embedders/benches/embedders_bench/src/lib.rs index 977e5b79b46..a162ff9e9ad 100644 --- a/rs/embedders/benches/embedders_bench/src/lib.rs +++ b/rs/embedders/benches/embedders_bench/src/lib.rs @@ -57,7 +57,7 @@ fn initialize_execution_test( if is_wasm64 { test = test.with_wasm64(); // Set memory size to 8 GiB for Wasm64. - test = test.with_max_wasm_memory_size(NumBytes::from(8 * 1024 * 1024 * 1024)); + test = test.with_max_wasm64_memory_size(NumBytes::from(8 * 1024 * 1024 * 1024)); } let mut test = test.build(); diff --git a/rs/embedders/fuzz/src/ic_wasm.rs b/rs/embedders/fuzz/src/ic_wasm.rs index 05ef46a01c9..0dfd1ad9c62 100644 --- a/rs/embedders/fuzz/src/ic_wasm.rs +++ b/rs/embedders/fuzz/src/ic_wasm.rs @@ -142,9 +142,8 @@ pub fn ic_wasm_config(embedder_config: EmbeddersConfig) -> Config { max_funcs: embedder_config.max_functions, max_instructions: WASM_FUNCTION_SIZE_LIMIT, - // TODO: Ignore data segments for now - min_data_segments: 0, - max_data_segments: 0, + min_data_segments: 2, + max_data_segments: 10, allow_start_export: true, export_everything: true, diff --git a/rs/embedders/src/wasm_utils.rs b/rs/embedders/src/wasm_utils.rs index 771a5d328f3..c1139c10629 100644 --- a/rs/embedders/src/wasm_utils.rs +++ b/rs/embedders/src/wasm_utils.rs @@ -14,7 +14,9 @@ use ic_types::{methods::WasmMethod, NumInstructions}; use ic_wasm_types::{BinaryEncodedWasm, WasmInstrumentationError}; use serde::{Deserialize, Serialize}; -use self::{instrumentation::instrument, validation::validate_wasm_binary}; +use self::{ + instrumentation::instrument, validation::has_wasm64_memory, validation::validate_wasm_binary, +}; use crate::wasmtime_embedder::StoreData; use crate::{serialized_module::SerializedModule, CompilationResult, WasmtimeEmbedder}; use wasmtime::InstancePre; @@ -204,6 +206,13 @@ fn validate_and_instrument( config: &EmbeddersConfig, ) -> HypervisorResult<(WasmValidationDetails, InstrumentationOutput)> { let (wasm_validation_details, module) = validate_wasm_binary(wasm, config)?; + // Instrumentation bytemap depends on the Wasm memory size, so for larger heaps we need + // to pass in the corresponding Wasm64 heap memory size. + let max_wasm_memory_size = if has_wasm64_memory(&module) { + config.max_wasm64_memory_size + } else { + config.max_wasm_memory_size + }; let instrumentation_output = instrument( module, config.cost_to_compile_wasm_instruction, @@ -212,7 +221,7 @@ fn validate_and_instrument( config.metering_type, config.subnet_type, config.dirty_page_overhead, - config.max_wasm_memory_size, + max_wasm_memory_size, config.max_stable_memory_size, )?; Ok((wasm_validation_details, instrumentation_output)) @@ -243,9 +252,7 @@ fn compile_inner( let is_wasm64 = module .get_export(crate::wasmtime_embedder::WASM_HEAP_MEMORY_NAME) - .map_or(false, |export| { - export.memory().map_or(false, |mem| mem.is_64()) - }); + .is_some_and(|export| export.memory().is_some_and(|mem| mem.is_64())); let serialized_module = SerializedModule::new( &module, diff --git a/rs/embedders/src/wasm_utils/validation.rs b/rs/embedders/src/wasm_utils/validation.rs index 9b8cd727a71..2193a407235 100644 --- a/rs/embedders/src/wasm_utils/validation.rs +++ b/rs/embedders/src/wasm_utils/validation.rs @@ -1068,6 +1068,11 @@ fn validate_function_section( Ok(()) } +// Checks if the module has a Wasm64 memory. +pub fn has_wasm64_memory(module: &Module) -> bool { + module.memories.first().is_some_and(|m| m.memory64) +} + // Checks that the initial size of the wasm (heap) memory is not larger than // the allowed maximum size. This is only needed for Wasm64, because in Wasm32 this // is checked by Wasmtime. @@ -1552,7 +1557,14 @@ pub(super) fn validate_wasm_binary<'a>( validate_data_section(&module)?; validate_global_section(&module, config.max_globals)?; validate_function_section(&module, config.max_functions)?; - validate_initial_wasm_memory_size(&module, config.max_wasm_memory_size)?; + // The maximum Wasm memory size is different for Wasm32 and Wasm64 and + // each needs to be validated accordingly. + let max_wasm_memory_size = if has_wasm64_memory(&module) { + config.max_wasm64_memory_size + } else { + config.max_wasm_memory_size + }; + validate_initial_wasm_memory_size(&module, max_wasm_memory_size)?; let (largest_function_instruction_count, max_complexity) = validate_code_section(&module)?; let wasm_metadata = validate_custom_section(&module, config)?; Ok(( diff --git a/rs/embedders/src/wasmtime_embedder/system_api_complexity.rs b/rs/embedders/src/wasmtime_embedder/system_api_complexity.rs index 4dc8a107589..304520fd6b8 100644 --- a/rs/embedders/src/wasmtime_embedder/system_api_complexity.rs +++ b/rs/embedders/src/wasmtime_embedder/system_api_complexity.rs @@ -1,7 +1,6 @@ /// /// System API Calls Complexity Module /// - /// The Fixed System API Overhead (in Instructions) /// /// The cost of the System API calls is proportional to the work the call performs. diff --git a/rs/embedders/tests/validation.rs b/rs/embedders/tests/validation.rs index f6a840dc21b..9ac68287ef3 100644 --- a/rs/embedders/tests/validation.rs +++ b/rs/embedders/tests/validation.rs @@ -1178,7 +1178,7 @@ fn test_wasm64_initial_wasm_memory_size_validation() { ..Default::default() }; let allowed_wasm_memory_size_in_pages = - embedders_config.max_wasm_memory_size.get() / WASM_PAGE_SIZE as u64; + embedders_config.max_wasm64_memory_size.get() / WASM_PAGE_SIZE as u64; let declared_wasm_memory_size_in_pages = allowed_wasm_memory_size_in_pages + 10; let wasm = wat2wasm(&format!( r#"(module diff --git a/rs/embedders/tests/wasmtime_embedder.rs b/rs/embedders/tests/wasmtime_embedder.rs index b5076020bb4..728e09210d2 100644 --- a/rs/embedders/tests/wasmtime_embedder.rs +++ b/rs/embedders/tests/wasmtime_embedder.rs @@ -3032,7 +3032,7 @@ fn large_wasm64_stable_read_write_test() { config.feature_flags.wasm64 = FlagStatus::Enabled; config.feature_flags.wasm_native_stable_memory = FlagStatus::Enabled; // Declare a large heap. - config.max_wasm_memory_size = NumBytes::from(10 * gb); + config.max_wasm64_memory_size = NumBytes::from(10 * gb); let mut instance = WasmtimeInstanceBuilder::new() .with_config(config) diff --git a/rs/ethereum/ledger-suite-orchestrator/test_utils/BUILD.bazel b/rs/ethereum/ledger-suite-orchestrator/test_utils/BUILD.bazel index 8c76947951a..bfc24172d8a 100644 --- a/rs/ethereum/ledger-suite-orchestrator/test_utils/BUILD.bazel +++ b/rs/ethereum/ledger-suite-orchestrator/test_utils/BUILD.bazel @@ -16,6 +16,7 @@ rust_library( version = "0.1.0", deps = [ # Keep sorted. + "//packages/ic-metrics-assert", "//packages/icrc-ledger-types:icrc_ledger_types", "//rs/ethereum/ledger-suite-orchestrator:ledger_suite_orchestrator", "//rs/ledger_suite/icrc1/ledger", diff --git a/rs/ethereum/ledger-suite-orchestrator/test_utils/Cargo.toml b/rs/ethereum/ledger-suite-orchestrator/test_utils/Cargo.toml index c8001587498..71d5c31a33f 100644 --- a/rs/ethereum/ledger-suite-orchestrator/test_utils/Cargo.toml +++ b/rs/ethereum/ledger-suite-orchestrator/test_utils/Cargo.toml @@ -15,6 +15,7 @@ ic-cdk = { workspace = true } ic-icrc1-ledger = { path = "../../../ledger_suite/icrc1/ledger" } ic-ledger-suite-orchestrator = { path = "../../ledger-suite-orchestrator" } ic-management-canister-types = { path = "../../../types/management_canister_types" } +ic-metrics-assert = { path = "../../../../packages/ic-metrics-assert" } ic-state-machine-tests = { path = "../../../state_machine_tests" } ic-test-utilities-load-wasm = { path = "../../../test_utilities/load_wasm" } ic-types = { path = "../../../types/types" } diff --git a/rs/ethereum/ledger-suite-orchestrator/test_utils/src/flow.rs b/rs/ethereum/ledger-suite-orchestrator/test_utils/src/flow.rs index 380f74e654e..35fd139dd72 100644 --- a/rs/ethereum/ledger-suite-orchestrator/test_utils/src/flow.rs +++ b/rs/ethereum/ledger-suite-orchestrator/test_utils/src/flow.rs @@ -1,4 +1,3 @@ -use crate::metrics::MetricsAssert; use crate::universal_canister::UniversalCanister; use crate::{ assert_reply, ledger_wasm, out_of_band_upgrade, stop_canister, LedgerAccount, @@ -13,7 +12,8 @@ use ic_management_canister_types::{ CanisterInfoResponse, CanisterInstallMode, CanisterStatusResultV2, InstallCodeArgs, Method, Payload, }; -use ic_state_machine_tests::StateMachine; +use ic_metrics_assert::{CanisterHttpQuery, MetricsAssert}; +use ic_state_machine_tests::{StateMachine, UserError}; use icrc_ledger_types::icrc1::transfer::{TransferArg, TransferError}; use icrc_ledger_types::icrc3::archive::ArchiveInfo; use icrc_ledger_types::icrc3::blocks::{GetBlocksRequest, GetBlocksResult}; @@ -89,8 +89,7 @@ impl ManagedCanistersAssert { } pub fn check_metrics(self) -> MetricsAssert { - let canister_id = self.setup.ledger_suite_orchestrator_id; - MetricsAssert::from_querying_metrics(self, canister_id) + MetricsAssert::from_http_query(self) } pub fn trigger_creation_of_archive(self) -> Self { @@ -426,6 +425,12 @@ impl ManagedCanistersAssert { } } +impl CanisterHttpQuery for ManagedCanistersAssert { + fn http_query(&self, request: Vec) -> Result, UserError> { + self.setup.http_query(request) + } +} + macro_rules! assert_ledger { ($name:expr, $ty:ty) => { paste::paste! { diff --git a/rs/ethereum/ledger-suite-orchestrator/test_utils/src/lib.rs b/rs/ethereum/ledger-suite-orchestrator/test_utils/src/lib.rs index 2a0a120700e..20f52d86b97 100644 --- a/rs/ethereum/ledger-suite-orchestrator/test_utils/src/lib.rs +++ b/rs/ethereum/ledger-suite-orchestrator/test_utils/src/lib.rs @@ -1,5 +1,4 @@ use crate::flow::{AddErc20TokenFlow, ManagedCanistersAssert}; -use crate::metrics::MetricsAssert; use assert_matches::assert_matches; use candid::{Decode, Encode, Nat, Principal}; use ic_base_types::{CanisterId, PrincipalId}; @@ -15,6 +14,7 @@ use ic_management_canister_types::{ CanisterInstallMode, CanisterStatusResultV2, CanisterStatusType, InstallCodeArgs, Method, Payload, }; +use ic_metrics_assert::{CanisterHttpQuery, MetricsAssert}; use ic_state_machine_tests::{StateMachine, StateMachineBuilder, UserError, WasmResult}; use ic_test_utilities_load_wasm::load_wasm; use ic_types::Cycles; @@ -23,7 +23,6 @@ pub use icrc_ledger_types::icrc1::account::Account as LedgerAccount; use std::sync::Arc; pub mod flow; -pub mod metrics; pub mod universal_canister; const MAX_TICKS: usize = 10; @@ -305,9 +304,8 @@ impl LedgerSuiteOrchestrator { .unwrap() } - pub fn check_metrics(self) -> MetricsAssert { - let canister_id = self.ledger_suite_orchestrator_id; - MetricsAssert::from_querying_metrics(self, canister_id) + pub fn check_metrics(self) -> MetricsAssert { + MetricsAssert::from_http_query(self) } pub fn wait_for(&self, f: F) -> T @@ -349,6 +347,14 @@ impl LedgerSuiteOrchestrator { } } +impl CanisterHttpQuery for LedgerSuiteOrchestrator { + fn http_query(&self, request: Vec) -> Result, UserError> { + self.as_ref() + .query(self.ledger_suite_orchestrator_id, "http_request", request) + .map(assert_reply) + } +} + pub fn default_init_arg() -> InitArg { InitArg { more_controller_ids: vec![NNS_ROOT_PRINCIPAL], diff --git a/rs/ethereum/ledger-suite-orchestrator/test_utils/src/metrics.rs b/rs/ethereum/ledger-suite-orchestrator/test_utils/src/metrics.rs deleted file mode 100644 index 9fbc9c5ab02..00000000000 --- a/rs/ethereum/ledger-suite-orchestrator/test_utils/src/metrics.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::assert_reply; -use candid::{Decode, Encode}; -use ic_base_types::CanisterId; -use ic_state_machine_tests::StateMachine; - -pub struct MetricsAssert { - setup: T, - metrics: Vec, -} - -impl> MetricsAssert { - pub fn from_querying_metrics(setup: T, canister_id: CanisterId) -> Self { - use ic_canisters_http_types::{HttpRequest, HttpResponse}; - let request = HttpRequest { - method: "GET".to_string(), - url: "/metrics".to_string(), - headers: Default::default(), - body: Default::default(), - }; - let response = Decode!( - &assert_reply( - setup - .as_ref() - .query( - canister_id, - "http_request", - Encode!(&request).expect("failed to encode HTTP request"), - ) - .expect("failed to get metrics") - ), - HttpResponse - ) - .unwrap(); - assert_eq!(response.status_code, 200_u16); - let metrics = String::from_utf8_lossy(response.body.as_slice()) - .trim() - .split('\n') - .map(|line| line.to_string()) - .collect::>(); - Self { setup, metrics } - } - - pub fn assert_contains_metric(self, metric: &str) -> T { - assert!( - self.metrics.iter().any(|line| line.contains(metric)), - "Searched metric not found: {} in:\n{:?}", - metric, - self.metrics - ); - self.setup - } -} diff --git a/rs/ethereum/ledger-suite-orchestrator/tests/tests.rs b/rs/ethereum/ledger-suite-orchestrator/tests/tests.rs index 4e1998c595a..dbd77bdfbf4 100644 --- a/rs/ethereum/ledger-suite-orchestrator/tests/tests.rs +++ b/rs/ethereum/ledger-suite-orchestrator/tests/tests.rs @@ -138,7 +138,8 @@ fn should_discover_new_archive_and_top_up() { .expect_new_ledger_and_index_canisters() .assert_ledger_has_cycles(200_000_000_000_000_u128) .check_metrics() - .assert_contains_metric("ledger_suite_orchestrator_managed_archives 0") + .assert_contains_metric_matching("ledger_suite_orchestrator_managed_archives 0") + .into() .trigger_creation_of_archive() .assert_ledger_has_cycles(100_000_000_000_000_u128) .assert_all_archives_have_cycles(100_000_000_000_000_u128); @@ -149,14 +150,15 @@ fn should_discover_new_archive_and_top_up() { let managed_canisters = managed_canisters .assert_all_archives_have_cycles(100_000_000_000_000_u128) .check_metrics() - .assert_contains_metric("ledger_suite_orchestrator_managed_archives 1"); + .assert_contains_metric_matching("ledger_suite_orchestrator_managed_archives 1") + .into(); managed_canisters.setup.advance_time_for_periodic_tasks(); managed_canisters .assert_all_archives_have_cycles(110_000_000_000_000_u128) .check_metrics() - .assert_contains_metric("ledger_suite_orchestrator_managed_archives 1"); + .assert_contains_metric_matching("ledger_suite_orchestrator_managed_archives 1"); } #[test] @@ -990,7 +992,8 @@ mod upgrade { .assert_ledger_has_wasm_hash(embedded_ledger_wasm_hash.clone()) .assert_index_has_wasm_hash(embedded_index_wasm_hash.clone()) .check_metrics() - .assert_contains_metric("ledger_suite_orchestrator_managed_archives 0"); + .assert_contains_metric_matching("ledger_suite_orchestrator_managed_archives 0") + .into(); // Run task DiscoverArchives pre-emptively to ensure it's not run during upgrade // so that we can test the case where the orchestrator doesn't know about the archive @@ -998,11 +1001,13 @@ mod upgrade { let managed_canisters = managed_canisters .check_metrics() - .assert_contains_metric("ledger_suite_orchestrator_managed_archives 0") + .assert_contains_metric_matching("ledger_suite_orchestrator_managed_archives 0") + .into() .trigger_creation_of_archive() .check_metrics() // the orchestrator is not yet aware of the archive - .assert_contains_metric("ledger_suite_orchestrator_managed_archives 0"); + .assert_contains_metric_matching("ledger_suite_orchestrator_managed_archives 0") + .into(); let orchestrator = managed_canisters.setup.upgrade_ledger_suite_orchestrator( ledger_suite_orchestrator_wasm(), @@ -1026,7 +1031,8 @@ mod upgrade { }) .check_metrics() // the orchestrator is not yet aware of the archive - .assert_contains_metric("ledger_suite_orchestrator_managed_archives 0") + .assert_contains_metric_matching("ledger_suite_orchestrator_managed_archives 0") + .into() .setup; orchestrator.env.tick(); @@ -1038,7 +1044,7 @@ mod upgrade { has_been_upgraded_to(t, &embedded_archive_wasm_hash) }) .check_metrics() - .assert_contains_metric("ledger_suite_orchestrator_managed_archives 1"); + .assert_contains_metric_matching("ledger_suite_orchestrator_managed_archives 1"); } #[test] diff --git a/rs/execution_environment/benches/lib/src/common.rs b/rs/execution_environment/benches/lib/src/common.rs index fb128014220..d5140c35530 100644 --- a/rs/execution_environment/benches/lib/src/common.rs +++ b/rs/execution_environment/benches/lib/src/common.rs @@ -281,7 +281,7 @@ where }; // Set up larger heap, of 8GB for the Wasm64 feature. - embedders_config.max_wasm_memory_size = NumBytes::from(8 * 1024 * 1024 * 1024); + embedders_config.max_wasm64_memory_size = NumBytes::from(8 * 1024 * 1024 * 1024); let config = Config { embedders_config, diff --git a/rs/execution_environment/benches/lib/src/wat_builder.rs b/rs/execution_environment/benches/lib/src/wat_builder.rs index fcbdb0f6b20..d75e95fd512 100644 --- a/rs/execution_environment/benches/lib/src/wat_builder.rs +++ b/rs/execution_environment/benches/lib/src/wat_builder.rs @@ -1,6 +1,4 @@ -/// -/// The new WAT builder. -// +// The new WAT builder. /// Default number of loop iterations. pub const DEFAULT_LOOP_ITERATIONS: usize = 1_000; @@ -23,7 +21,6 @@ use crate::common::Wasm64; //////////////////////////////////////////////////////////////////////// /// WAT Block Builder - /// Represent a block of WAT code with corresponding imports and local variables. #[derive(Default)] pub struct Block { @@ -161,7 +158,6 @@ impl Block { //////////////////////////////////////////////////////////////////////// /// WAT Function Builder - /// Represent a WAT function with corresponding imports. #[derive(Default)] pub struct Func { @@ -188,7 +184,6 @@ impl Func { //////////////////////////////////////////////////////////////////////// /// Helper functions - /// Return a new block prepended and appended with the specified lines. fn wrap_lines(prefix: &str, lines: Vec, suffix: &str) -> Vec { vec![prefix.into()] diff --git a/rs/execution_environment/fuzz/BUILD.bazel b/rs/execution_environment/fuzz/BUILD.bazel index 7eaa93ed51a..05aa659ddf6 100644 --- a/rs/execution_environment/fuzz/BUILD.bazel +++ b/rs/execution_environment/fuzz/BUILD.bazel @@ -22,7 +22,14 @@ rust_library( deps = [ "//rs/canister_sandbox:backend_lib", "@crate_index//:libfuzzer-sys", - ], + "@crate_index//:nix", + ] + select({ + "@rules_rust//rust/platform:linux": [ + "@crate_index//:procfs", + "@crate_index//:syscalls", + ], + "//conditions:default": [], + }), ) rust_fuzz_test_binary( diff --git a/rs/execution_environment/fuzz/fuzz_targets/execute_subnet_message_update_settings.rs b/rs/execution_environment/fuzz/fuzz_targets/execute_subnet_message_update_settings.rs index 7a52c65a0b9..0756f63e334 100644 --- a/rs/execution_environment/fuzz/fuzz_targets/execute_subnet_message_update_settings.rs +++ b/rs/execution_environment/fuzz/fuzz_targets/execute_subnet_message_update_settings.rs @@ -8,7 +8,10 @@ use libfuzzer_sys::{fuzz_target, Corpus}; // bazel run --config=sandbox_fuzzing //rs/execution_environment/fuzz:execute_subnet_message_update_settings fn main() { - fuzzer_sandbox::fuzzer_main(); + let features = fuzzer_sandbox::SandboxFeatures { + syscall_tracing: false, + }; + fuzzer_sandbox::fuzzer_main(features); } fuzz_target!(|args: UpdateSettingsArgs| -> Corpus { diff --git a/rs/execution_environment/fuzz/fuzz_targets/execute_system_api_call.rs b/rs/execution_environment/fuzz/fuzz_targets/execute_system_api_call.rs index 7b3e5f1414d..7c07d5e2e6f 100644 --- a/rs/execution_environment/fuzz/fuzz_targets/execute_system_api_call.rs +++ b/rs/execution_environment/fuzz/fuzz_targets/execute_system_api_call.rs @@ -23,7 +23,10 @@ const HELLO_WORLD_WAT: &str = r#" // bazel run --config=sandbox_fuzzing //rs/execution_environment/fuzz:execute_with_wasm_executor_system_api_call fn main() { - fuzzer_sandbox::fuzzer_main(); + let features = fuzzer_sandbox::SandboxFeatures { + syscall_tracing: true, + }; + fuzzer_sandbox::fuzzer_main(features); } fuzz_target!(|data: ICWasmModule| { diff --git a/rs/execution_environment/fuzz/src/lib.rs b/rs/execution_environment/fuzz/src/lib.rs index 98048e02f70..9c5cd1bcb3c 100644 --- a/rs/execution_environment/fuzz/src/lib.rs +++ b/rs/execution_environment/fuzz/src/lib.rs @@ -7,6 +7,17 @@ use libfuzzer_sys::test_input_wrap; use std::ffi::CString; use std::os::raw::c_char; +#[cfg(target_os = "linux")] +use { + nix::{ + sys::ptrace, sys::ptrace::Options, sys::wait::waitpid, sys::wait::WaitStatus, unistd::fork, + unistd::ForkResult, unistd::Pid, + }, + procfs::process::Process, + std::collections::BTreeSet, + syscalls::Sysno, +}; + #[allow(improper_ctypes)] extern "C" { fn LLVMFuzzerRunDriver( @@ -17,6 +28,11 @@ extern "C" { } +#[derive(Debug, Default)] +pub struct SandboxFeatures { + pub syscall_tracing: bool, +} + // In general, fuzzers don't include `main()` and the initialisation logic is deferred to libfuzzer. // However, to enable canister sandboxing, we override the initialisation by providing our own `main()` // which acts as a dispatcher for different sandboxed under certain arguments. @@ -29,9 +45,13 @@ extern "C" { // `rust_fuzzer_test_input`, which is generated via the macro `fuzz_target!`. // See https://github.com/rust-fuzz/libfuzzer/blob/c8275d1517933765b56a6de61a371bb1cc4268cb/src/lib.rs#L62 -pub fn fuzzer_main() { +pub fn fuzzer_main(features: SandboxFeatures) { if std::env::args().any(|arg| arg == RUN_AS_CANISTER_SANDBOX_FLAG) { #[cfg(not(fuzzing))] + if features.syscall_tracing { + syscall_monitor("canister_sandbox_main", canister_sandbox_main); + return; + } canister_sandbox_main(); } else if std::env::args().any(|arg| arg == RUN_AS_SANDBOX_LAUNCHER_FLAG) { #[cfg(not(fuzzing))] @@ -59,3 +79,142 @@ pub fn fuzzer_main() { } } } + +#[cfg(target_os = "linux")] +fn syscall_monitor(name: &str, sandbox: F) +where + F: Fn(), +{ + match unsafe { fork() } { + Ok(ForkResult::Child) => { + sandbox(); + } + Ok(ForkResult::Parent { child }) => { + std::thread::sleep(std::time::Duration::from_secs(1)); + let allowed_syscalls: BTreeSet = BTreeSet::from([ + Sysno::mmap, + Sysno::mprotect, + Sysno::munmap, + Sysno::madvise, + Sysno::sendmsg, + Sysno::sigaltstack, + Sysno::futex, + Sysno::close, + Sysno::restart_syscall, + ]); + let children = get_children(child.into()); + let threads: Vec<_> = children + .iter() + .map(|child| { + std::thread::spawn({ + let allowed_syscalls = allowed_syscalls.clone(); + let child = *child; + let name = name.to_string(); + move || { + trace(name, Pid::from_raw(child), allowed_syscalls); + } + }) + }) + .collect(); + + for handle in threads { + handle.join().unwrap(); + } + } + Err(err) => { + panic!("{} fork() failed: {}", name, err); + } + } +} + +#[cfg(not(target_os = "linux"))] +fn syscall_monitor(_name: &str, sandbox: F) +where + F: Fn(), +{ + sandbox(); +} + +#[cfg(target_os = "linux")] +fn trace(name: String, child: Pid, allowed_syscalls: BTreeSet) { + if let Err(err) = ptrace::attach(child) { + println!( + "ptrace: failed to attach process {}::{}: {}", + name, child, err + ); + return; + } + + let mut is_syscall_entry = true; + while let Ok(result) = waitpid(child, None) { + match result { + WaitStatus::Stopped(..) => { + if let Err(err) = ptrace::setoptions(child, Options::all()) { + panic!( + "ptrace: failed to setoptions process {}::{}: {}", + name, child, err + ); + } + + if let Err(err) = ptrace::syscall(child, None) { + panic!( + "ptrace: failed to continue to next syscall {}::{}: {}", + name, child, err + ); + } + } + WaitStatus::PtraceSyscall(_) => { + if is_syscall_entry { + if let Ok(regs) = ptrace::getregs(child) { + let sysno = Sysno::from(regs.orig_rax as u32); + if !allowed_syscalls.contains(&sysno) { + panic!("Syscall not present: {:?} {}::{}", sysno, name, child,); + } + } + } + + is_syscall_entry = !is_syscall_entry; + if let Err(err) = ptrace::syscall(child, None) { + panic!( + "ptrace: failed to continue to next syscall {}::{}: {}", + name, child, err + ); + } + } + WaitStatus::Exited(..) => { + println!( + "ptrace: process exited {}::{} child pids: {:?}", + name, + child, + get_children(child.into()) + ); + } + WaitStatus::PtraceEvent(..) => { + if let Err(err) = ptrace::detach(child, None) { + panic!( + "ptrace: failed to detach process {}::{}: {}", + name, child, err + ); + } + return; + } + _ => (), + } + } +} + +#[cfg(target_os = "linux")] +fn get_children(parent_pid: i32) -> BTreeSet { + let mut pids = BTreeSet::new(); + + if let Ok(process) = Process::new(parent_pid) { + if let Ok(tasks) = process.tasks() { + for task in tasks.flatten() { + let child_pid = task.tid; + pids.insert(child_pid); + } + } + } + pids.remove(&parent_pid); + pids +} diff --git a/rs/execution_environment/src/canister_manager.rs b/rs/execution_environment/src/canister_manager.rs index 14877845010..41fc6fa1930 100644 --- a/rs/execution_environment/src/canister_manager.rs +++ b/rs/execution_environment/src/canister_manager.rs @@ -121,7 +121,8 @@ pub(crate) struct CanisterMgrConfig { pub(crate) own_subnet_id: SubnetId, pub(crate) own_subnet_type: SubnetType, pub(crate) max_controllers: usize, - pub(crate) max_canister_memory_size: NumBytes, + pub(crate) max_canister_memory_size_wasm32: NumBytes, + pub(crate) max_canister_memory_size_wasm64: NumBytes, pub(crate) rate_limiting_of_instructions: FlagStatus, rate_limiting_of_heap_delta: FlagStatus, heap_delta_rate_limit: NumBytes, @@ -141,7 +142,8 @@ impl CanisterMgrConfig { own_subnet_type: SubnetType, max_controllers: usize, compute_capacity: usize, - max_canister_memory_size: NumBytes, + max_canister_memory_size_wasm32: NumBytes, + max_canister_memory_size_wasm64: NumBytes, rate_limiting_of_instructions: FlagStatus, allocatable_capacity_in_percent: usize, rate_limiting_of_heap_delta: FlagStatus, @@ -160,7 +162,8 @@ impl CanisterMgrConfig { max_controllers, compute_capacity: (compute_capacity * allocatable_capacity_in_percent.min(100) / 100) as u64, - max_canister_memory_size, + max_canister_memory_size_wasm32, + max_canister_memory_size_wasm64, rate_limiting_of_instructions, rate_limiting_of_heap_delta, heap_delta_rate_limit, @@ -912,7 +915,7 @@ impl CanisterManager { return false; } }; - module.memories.first().map_or(false, |m| m.memory64) + module.memories.first().is_some_and(|m| m.memory64) } /// Installs code to a canister. @@ -2190,14 +2193,15 @@ impl CanisterManager { .certified_data .clone_from(snapshot.certified_data()); - let is_wasm64_execution = new_execution_state - .as_ref() - .map_or(false, |es| es.is_wasm64); + let is_wasm64_execution = new_execution_state.as_ref().is_some_and(|es| es.is_wasm64); let mut new_canister = CanisterState::new(system_state, new_execution_state, scheduler_state); let new_memory_usage = new_canister.memory_usage(); - let memory_allocation_given = canister.memory_limit(self.config.max_canister_memory_size); + + let memory_allocation_given = + canister.memory_limit(self.get_max_canister_memory_size(is_wasm64_execution)); + if new_memory_usage > memory_allocation_given { return ( Err(CanisterManagerError::NotEnoughMemoryAllocationGiven { @@ -2338,6 +2342,16 @@ impl CanisterManager { ); Ok(()) } + + /// Depending on the canister architecture (Wasm32 or Wasm64), returns the + /// maximum memory size that can be allocated by a canister. + pub(crate) fn get_max_canister_memory_size(&self, is_wasm64_execution: bool) -> NumBytes { + if is_wasm64_execution { + self.config.max_canister_memory_size_wasm64 + } else { + self.config.max_canister_memory_size_wasm32 + } + } } #[derive(Eq, PartialEq, Debug)] diff --git a/rs/execution_environment/src/canister_manager/tests.rs b/rs/execution_environment/src/canister_manager/tests.rs index f0dfc4057b9..b3588f494ca 100644 --- a/rs/execution_environment/src/canister_manager/tests.rs +++ b/rs/execution_environment/src/canister_manager/tests.rs @@ -306,6 +306,7 @@ fn canister_manager_config( // TODO(RUN-319): the capacity should be defined based on actual `scheduler_cores` 100, MAX_CANISTER_MEMORY_SIZE, + MAX_CANISTER_MEMORY_SIZE, rate_limiting_of_instructions, 100, FlagStatus::Enabled, diff --git a/rs/execution_environment/src/execution/replicated_query.rs b/rs/execution_environment/src/execution/replicated_query.rs index b3b38b83fe7..a63c115e8d4 100644 --- a/rs/execution_environment/src/execution/replicated_query.rs +++ b/rs/execution_environment/src/execution/replicated_query.rs @@ -48,7 +48,7 @@ pub fn execute_replicated_query( let is_wasm64_execution = canister .execution_state .as_ref() - .map_or(false, |es| es.is_wasm64); + .is_some_and(|es| es.is_wasm64); let prepaid_execution_cycles = match round.cycles_account_manager.prepay_execution_cycles( &mut canister.system_state, diff --git a/rs/execution_environment/src/execution/response.rs b/rs/execution_environment/src/execution/response.rs index 7d41f7ddac1..ca8963046e7 100644 --- a/rs/execution_environment/src/execution/response.rs +++ b/rs/execution_environment/src/execution/response.rs @@ -101,7 +101,6 @@ const RESERVED_CLEANUP_INSTRUCTIONS_IN_PERCENT: u64 = 5; /// â–¼ /// [end] ///``` - /// Contains fields of `ResponseHelper` that are necessary for resuming the /// response execution. #[derive(Debug)] @@ -575,7 +574,7 @@ impl ResponseHelper { .canister .execution_state .as_ref() - .map_or(false, |es| es.is_wasm64); + .is_some_and(|es| es.is_wasm64); round.cycles_account_manager.refund_unused_execution_cycles( &mut self.canister.system_state, diff --git a/rs/execution_environment/src/execution/update.rs b/rs/execution_environment/src/execution/update.rs index 6e8b5098d48..702d008229b 100644 --- a/rs/execution_environment/src/execution/update.rs +++ b/rs/execution_environment/src/execution/update.rs @@ -63,7 +63,7 @@ pub fn execute_update( let is_wasm64_execution = canister .execution_state .as_ref() - .map_or(false, |es| es.is_wasm64); + .is_some_and(|es| es.is_wasm64); let prepaid_execution_cycles = match round .cycles_account_manager @@ -268,7 +268,7 @@ fn finish_err( let is_wasm64_execution = canister .execution_state .as_ref() - .map_or(false, |es| es.is_wasm64); + .is_some_and(|es| es.is_wasm64); let instruction_limit = original.execution_parameters.instruction_limits.message(); round.cycles_account_manager.refund_unused_execution_cycles( @@ -537,7 +537,7 @@ impl UpdateHelper { .canister .execution_state .as_ref() - .map_or(false, |es| es.is_wasm64); + .is_some_and(|es| es.is_wasm64); round.cycles_account_manager.refund_unused_execution_cycles( &mut self.canister.system_state, diff --git a/rs/execution_environment/src/execution/upgrade.rs b/rs/execution_environment/src/execution/upgrade.rs index 924fed0cdf0..9bad4929b70 100644 --- a/rs/execution_environment/src/execution/upgrade.rs +++ b/rs/execution_environment/src/execution/upgrade.rs @@ -884,7 +884,7 @@ fn determine_main_memory_handling( let old_state_uses_orthogonal_persistence = || { old_state .as_ref() - .map_or(false, expects_enhanced_orthogonal_persistence) + .is_some_and(expects_enhanced_orthogonal_persistence) }; let new_state_uses_classical_persistence = || { new_state_candidate.is_ok() diff --git a/rs/execution_environment/src/execution_environment.rs b/rs/execution_environment/src/execution_environment.rs index 0733c850446..7d6a87baa6d 100644 --- a/rs/execution_environment/src/execution_environment.rs +++ b/rs/execution_environment/src/execution_environment.rs @@ -402,7 +402,8 @@ impl ExecutionEnvironment { own_subnet_type, config.max_controllers, compute_capacity, - config.max_canister_memory_size, + config.max_canister_memory_size_wasm32, + config.max_canister_memory_size_wasm64, config.rate_limiting_of_instructions, config.allocatable_compute_capacity_in_percent, config.rate_limiting_of_heap_delta, @@ -1805,8 +1806,12 @@ impl ExecutionEnvironment { /// Returns the maximum amount of memory that can be utilized by a single /// canister. - pub fn max_canister_memory_size(&self) -> NumBytes { - self.config.max_canister_memory_size + pub fn max_canister_memory_size(&self, is_wasm64: bool) -> NumBytes { + if is_wasm64 { + self.config.max_canister_memory_size_wasm64 + } else { + self.config.max_canister_memory_size_wasm32 + } } /// Returns the subnet memory capacity. @@ -1823,9 +1828,17 @@ impl ExecutionEnvironment { execution_mode: ExecutionMode, subnet_memory_saturation: ResourceSaturation, ) -> ExecutionParameters { + let is_wasm64_execution = match &canister.execution_state { + // The canister is not already installed, so we do not know what kind of canister it is. + // Therefore we can assume it is Wasm64 because Wasm64 can have a larger memory limit. + None => true, + Some(execution_state) => execution_state.is_wasm64, + }; + let max_memory_size = self.max_canister_memory_size(is_wasm64_execution); + ExecutionParameters { instruction_limits, - canister_memory_limit: canister.memory_limit(self.config.max_canister_memory_size), + canister_memory_limit: canister.memory_limit(max_memory_size), wasm_memory_limit: canister.wasm_memory_limit(), memory_allocation: canister.memory_allocation(), canister_guaranteed_callback_quota: self.config.canister_guaranteed_callback_quota diff --git a/rs/execution_environment/src/hypervisor.rs b/rs/execution_environment/src/hypervisor.rs index 4e472eff012..b67705512da 100644 --- a/rs/execution_environment/src/hypervisor.rs +++ b/rs/execution_environment/src/hypervisor.rs @@ -535,11 +535,11 @@ impl Hypervisor { if let Err(err) = &mut result.wasm_result { let can_view = match &system_state.log_visibility { LogVisibilityV2::Controllers => { - caller.map_or(false, |c| system_state.controllers.contains(&c)) + caller.is_some_and(|c| system_state.controllers.contains(&c)) } LogVisibilityV2::Public => true, LogVisibilityV2::AllowedViewers(allowed) => { - caller.map_or(false, |c| allowed.get().contains(&c)) + caller.is_some_and(|c| allowed.get().contains(&c)) } }; if !can_view { diff --git a/rs/execution_environment/src/metrics.rs b/rs/execution_environment/src/metrics.rs index b3417bd1ebb..9a59bd1ca20 100644 --- a/rs/execution_environment/src/metrics.rs +++ b/rs/execution_environment/src/metrics.rs @@ -552,7 +552,7 @@ struct MeasurementScopeCore<'a> { record_zeros: bool, } -impl<'a> Drop for MeasurementScopeCore<'a> { +impl Drop for MeasurementScopeCore<'_> { fn drop(&mut self) { if let Some(outer) = &self.outer { outer.add(self.instructions, self.slices, self.messages); diff --git a/rs/execution_environment/src/query_handler.rs b/rs/execution_environment/src/query_handler.rs index 88213e3ed4f..31970026329 100644 --- a/rs/execution_environment/src/query_handler.rs +++ b/rs/execution_environment/src/query_handler.rs @@ -258,7 +258,6 @@ impl InternalHttpQueryHandler { // instruction limit for the whole composite query tree imposes a much lower // implicit bound anyway. let subnet_available_callbacks = self.config.subnet_callback_soft_limit as i64; - let max_canister_memory_size = self.config.max_canister_memory_size; let mut context = query_context::QueryContext::new( &self.log, @@ -273,7 +272,8 @@ impl InternalHttpQueryHandler { subnet_available_memory, subnet_available_callbacks, self.config.canister_guaranteed_callback_quota as u64, - max_canister_memory_size, + self.config.max_canister_memory_size_wasm32, + self.config.max_canister_memory_size_wasm64, self.max_instructions_per_query, self.config.max_query_call_graph_depth, self.config.max_query_call_graph_instructions, diff --git a/rs/execution_environment/src/query_handler/query_context.rs b/rs/execution_environment/src/query_handler/query_context.rs index 547d5a6777a..70b909c7b71 100644 --- a/rs/execution_environment/src/query_handler/query_context.rs +++ b/rs/execution_environment/src/query_handler/query_context.rs @@ -95,7 +95,8 @@ pub(super) struct QueryContext<'a> { network_topology: Arc, // Certificate for certified queries + canister ID of the root query of this context data_certificate: (Vec, CanisterId), - max_canister_memory_size: NumBytes, + max_canister_memory_size_wasm32: NumBytes, + max_canister_memory_size_wasm64: NumBytes, max_instructions_per_query: NumInstructions, max_query_call_graph_depth: usize, instruction_overhead_per_query_call: RoundInstructions, @@ -130,7 +131,8 @@ impl<'a> QueryContext<'a> { subnet_available_memory: SubnetAvailableMemory, subnet_available_callbacks: i64, canister_guaranteed_callback_quota: u64, - max_canister_memory_size: NumBytes, + max_canister_memory_size_wasm32: NumBytes, + max_canister_memory_size_wasm64: NumBytes, max_instructions_per_query: NumInstructions, max_query_call_graph_depth: usize, max_query_call_graph_instructions: NumInstructions, @@ -158,7 +160,8 @@ impl<'a> QueryContext<'a> { state, network_topology, data_certificate: (data_certificate, canister_id), - max_canister_memory_size, + max_canister_memory_size_wasm32, + max_canister_memory_size_wasm64, max_instructions_per_query, max_query_call_graph_depth, instruction_overhead_per_query_call: as_round_instructions( @@ -1083,9 +1086,20 @@ impl<'a> QueryContext<'a> { canister: &CanisterState, instruction_limits: InstructionLimits, ) -> ExecutionParameters { + let is_wasm64_execution = canister + .execution_state + .as_ref() + .is_some_and(|es| es.is_wasm64); + + let max_canister_memory_size = if is_wasm64_execution { + self.max_canister_memory_size_wasm64 + } else { + self.max_canister_memory_size_wasm32 + }; + ExecutionParameters { instruction_limits, - canister_memory_limit: canister.memory_limit(self.max_canister_memory_size), + canister_memory_limit: canister.memory_limit(max_canister_memory_size), wasm_memory_limit: canister.wasm_memory_limit(), memory_allocation: canister.memory_allocation(), canister_guaranteed_callback_quota: self.canister_guaranteed_callback_quota, diff --git a/rs/execution_environment/src/scheduler.rs b/rs/execution_environment/src/scheduler.rs index bd7a475ef66..3b35cfda5e9 100644 --- a/rs/execution_environment/src/scheduler.rs +++ b/rs/execution_environment/src/scheduler.rs @@ -138,7 +138,6 @@ impl SchedulerRoundLimits { //////////////////////////////////////////////////////////////////////// /// Scheduler Implementation - pub(crate) struct SchedulerImpl { config: SchedulerConfig, own_subnet_id: SubnetId, @@ -222,7 +221,7 @@ impl SchedulerImpl { state = new_state; ongoing_long_install_code |= state .canister_state(canister_id) - .map_or(false, |canister| canister.has_paused_install_code()); + .is_some_and(|canister| canister.has_paused_install_code()); let round_instructions_executed = as_num_instructions(instructions_before - round_limits.instructions); @@ -603,24 +602,27 @@ impl SchedulerImpl { // are not polluted by canisters that haven't had any messages for a long time. for canister_id in &round_filtered_canisters.active_canister_ids { let canister_state = state.canister_state(canister_id).unwrap(); - let canister_age = current_round.get() - - canister_state - .scheduler_state - .last_full_execution_round - .get(); - self.metrics.canister_age.observe(canister_age as f64); - // If `canister_age` > 1 / `compute_allocation` the canister ought to have been - // scheduled. - let allocation = Ratio::new( - canister_state - .scheduler_state - .compute_allocation - .as_percent(), - 100, - ); - if *allocation.numer() > 0 && Ratio::from_integer(canister_age) > allocation.recip() { - self.metrics.canister_compute_allocation_violation.inc(); - } + // Newly created canisters have `last_full_execution_round` set to zero, + // and hence skew the `canister_age` metric. + let last_full_execution_round = + canister_state.scheduler_state.last_full_execution_round; + if last_full_execution_round.get() != 0 { + let canister_age = current_round.get() - last_full_execution_round.get(); + self.metrics.canister_age.observe(canister_age as f64); + // If `canister_age` > 1 / `compute_allocation` the canister ought to have been + // scheduled. + let allocation = Ratio::new( + canister_state + .scheduler_state + .compute_allocation + .as_percent(), + 100, + ); + if *allocation.numer() > 0 && Ratio::from_integer(canister_age) > allocation.recip() + { + self.metrics.canister_compute_allocation_violation.inc(); + } + }; } for (message_id, status) in ingress_execution_results { @@ -1070,7 +1072,15 @@ impl SchedulerImpl { ) -> bool { for canister_id in canister_ids { let canister = state.canister_states.get(canister_id).unwrap(); - if let Err(err) = canister.check_invariants(self.exec_env.max_canister_memory_size()) { + + let canister_is_wasm64 = canister + .execution_state + .as_ref() + .is_some_and(|es| es.is_wasm64); + + if let Err(err) = canister + .check_invariants(self.exec_env.max_canister_memory_size(canister_is_wasm64)) + { let msg = format!( "{}: At Round {} @ time {}, canister {} has invalid state after execution. Invariant check failed with err: {}", CANISTER_INVARIANT_BROKEN, @@ -1658,7 +1668,7 @@ impl Scheduler for SchedulerImpl { //////////////////////////////////////////////////////////////////////// /// Filtered Canisters - +/// /// This struct represents a collection of canister IDs. struct FilteredCanisters { /// Active canisters during the execution of the inner round. @@ -1823,7 +1833,7 @@ fn execute_canisters_on_thread( &mut round_limits, subnet_size, ); - if instructions_used.map_or(false, |instructions| instructions.get() > 0) { + if instructions_used.is_some_and(|instructions| instructions.get() > 0) { // We only want to count the canister as executed if it used instructions. executed_canister_ids.insert(new_canister.canister_id()); } @@ -2028,12 +2038,6 @@ fn observe_replicated_state_metrics( .canisters_with_old_open_call_contexts .with_label_values(&[OLD_CALL_CONTEXT_LABEL_ONE_DAY]) .set(canisters_with_old_open_call_contexts as i64); - let streams_guaranteed_response_bytes = state - .metadata - .streams() - .guaranteed_responses_size_bytes() - .values() - .sum(); metrics .current_heap_delta @@ -2105,7 +2109,6 @@ fn observe_replicated_state_metrics( metrics.observe_queues_response_bytes(queues_response_bytes); metrics.observe_queues_memory_reservations(queues_memory_reservations); metrics.observe_oversized_requests_extra_bytes(queues_oversized_requests_extra_bytes); - metrics.observe_streams_response_bytes(streams_guaranteed_response_bytes); metrics .ingress_history_length @@ -2324,7 +2327,7 @@ fn is_next_method_chosen( .system_state .task_queue .front() - .map_or(false, |task| task.is_hook()) + .is_some_and(|task| task.is_hook()) { return true; } diff --git a/rs/execution_environment/src/scheduler/scheduler_metrics.rs b/rs/execution_environment/src/scheduler/scheduler_metrics.rs index 81a2f2d07c3..91ef3ba6101 100644 --- a/rs/execution_environment/src/scheduler/scheduler_metrics.rs +++ b/rs/execution_environment/src/scheduler/scheduler_metrics.rs @@ -60,7 +60,6 @@ pub(super) struct SchedulerMetrics { pub(super) queues_response_bytes: IntGauge, pub(super) queues_memory_reservations: IntGauge, pub(super) queues_oversized_requests_extra_bytes: IntGauge, - pub(super) streams_response_bytes: IntGauge, pub(super) canister_messages_where_cycles_were_charged: IntCounter, pub(super) current_heap_delta: IntGauge, pub(super) round_skipped_due_to_current_heap_delta_above_limit: IntCounter, @@ -136,8 +135,8 @@ impl SchedulerMetrics { canister_age: metrics_registry.histogram( "scheduler_canister_age_rounds", "Number of rounds for which a canister was not scheduled.", - // 1, 2, 5, …, 100, 200, 500 - decimal_buckets(0, 2), + // 1, 2, 5, …, 1000, 2000, 5000 + decimal_buckets(0, 3), ), canister_compute_allocation_violation: metrics_registry.int_counter( "scheduler_compute_allocation_violations", @@ -304,10 +303,6 @@ impl SchedulerMetrics { "execution_queues_oversized_requests_extra_bytes", "Total bytes above `MAX_RESPONSE_COUNT_BYTES` across oversized local-subnet requests.", ), - streams_response_bytes: metrics_registry.int_gauge( - "execution_streams_response_size_bytes", - "Total byte size of all responses in subnet streams.", - ), canister_messages_where_cycles_were_charged: metrics_registry.int_counter( "scheduler_canister_messages_where_cycles_were_charged", "Total number of canister messages which resulted in cycles being charged.", @@ -773,8 +768,4 @@ impl SchedulerMetrics { self.queues_oversized_requests_extra_bytes .set(size_bytes as i64); } - - pub(super) fn observe_streams_response_bytes(&self, size_bytes: usize) { - self.streams_response_bytes.set(size_bytes as i64); - } } diff --git a/rs/execution_environment/src/scheduler/tests.rs b/rs/execution_environment/src/scheduler/tests.rs index fd1d6e2c1f7..a70e1826dc5 100644 --- a/rs/execution_environment/src/scheduler/tests.rs +++ b/rs/execution_environment/src/scheduler/tests.rs @@ -2685,13 +2685,16 @@ fn can_record_metrics_single_scheduler_thread() { #[test] fn can_record_metrics_for_a_round() { + let num_canisters = 3; + let scheduler_cores = num_canisters as usize - 1; + let instructions = 5; let mut test = SchedulerTestBuilder::new() .with_scheduler_config(SchedulerConfig { - scheduler_cores: 2, - max_instructions_per_round: NumInstructions::from(25), - max_instructions_per_message: NumInstructions::from(5), - max_instructions_per_message_without_dts: NumInstructions::new(5), - max_instructions_per_slice: NumInstructions::from(5), + scheduler_cores, + max_instructions_per_round: NumInstructions::from(instructions * 2), + max_instructions_per_message: NumInstructions::from(instructions), + max_instructions_per_message_without_dts: NumInstructions::new(instructions), + max_instructions_per_slice: NumInstructions::from(instructions), instruction_overhead_per_execution: NumInstructions::from(0), instruction_overhead_per_canister: NumInstructions::from(0), instruction_overhead_per_canister_for_finalization: NumInstructions::from(0), @@ -2699,7 +2702,6 @@ fn can_record_metrics_for_a_round() { }) .build(); - let num_canisters = 3; // The first two canisters have an `Allocation` of 45% and the last 9%. We'll be // forced to execute the first two and then run out of instructions (based on // the limits) which will result in a violation of third canister's @@ -2715,13 +2717,10 @@ fn can_record_metrics_for_a_round() { None, ); for _ in 0..5 { - test.send_ingress(canister, ingress(5)); + test.send_ingress(canister, ingress(instructions)); } } - // For allocation violation to happen, the canister age should be more than `100/9 = 11 rounds` - test.advance_to_round(ExecutionRound::from(12)); - for canister in test.state_mut().canister_states.values_mut() { canister.scheduler_state.time_of_last_allocation_charge = UNIX_EPOCH + Duration::from_secs(1); @@ -2744,10 +2743,11 @@ fn can_record_metrics_for_a_round() { let metrics = &test.scheduler().metrics; assert_eq!( - metrics.executable_canisters_per_round.get_sample_sum() as i64, - 3 + metrics.executable_canisters_per_round.get_sample_sum() as u64, + num_canisters ); - assert_eq!(metrics.canister_age.get_sample_sum() as i64, 12); + // The canister age metric is not observed for newly created canisters. + assert_eq!(metrics.canister_age.get_sample_sum() as i64, 0); assert_eq!(metrics.round_preparation_duration.get_sample_count(), 1); assert_eq!(metrics.round_preparation_ingress.get_sample_count(), 1); assert_eq!(metrics.round_scheduling_duration.get_sample_count(), 1); @@ -2762,10 +2762,11 @@ fn can_record_metrics_for_a_round() { ); assert_eq!(metrics.round_finalization_ingress.get_sample_count(), 1); assert_eq!(metrics.round_finalization_charge.get_sample_count(), 1); - assert_eq!(metrics.canister_compute_allocation_violation.get(), 1); + // Compute allocation violation is not observed for newly created canisters. + assert_eq!(metrics.canister_compute_allocation_violation.get(), 0); assert_eq!( metrics.canister_messages_where_cycles_were_charged.get(), - 10 + scheduler_cores as u64 * 2 ); assert_eq!( @@ -2773,12 +2774,26 @@ fn can_record_metrics_for_a_round() { .metadata .subnet_metrics .update_transactions_total, - 10 + scheduler_cores as u64 * 2 ); assert_eq!( test.state().metadata.subnet_metrics.num_canisters, num_canisters ); + + // Bump up the round number. + test.execute_round(ExecutionRoundType::OrdinaryRound); + + // For allocation violation to happen, the canister age should be more than `100/9 = 11 rounds` + // plus 2 rounds already executed. + test.advance_to_round(ExecutionRound::from(11 + 2)); + test.execute_round(ExecutionRoundType::OrdinaryRound); + + let metrics = &test.scheduler().metrics; + // The canister age metric should be observed now. + assert_eq!(metrics.canister_age.get_sample_sum() as i64, 12); + // Compute allocation violation should also be observed now. + assert_eq!(metrics.canister_compute_allocation_violation.get(), 1); } /// Check that when a canister is scheduled and can't prepay for execution, the diff --git a/rs/execution_environment/tests/execution_test.rs b/rs/execution_environment/tests/execution_test.rs index f8cae43c080..4cb7679daa6 100644 --- a/rs/execution_environment/tests/execution_test.rs +++ b/rs/execution_environment/tests/execution_test.rs @@ -996,7 +996,7 @@ fn max_canister_memory_respected_even_when_no_memory_allocation_is_set() { let env = StateMachine::new_with_config(StateMachineConfig::new( subnet_config, HypervisorConfig { - max_canister_memory_size: NumBytes::from(10 * MIB), + max_canister_memory_size_wasm32: NumBytes::from(10 * MIB), ..Default::default() }, )); @@ -1341,6 +1341,92 @@ fn canister_with_memory_allocation_cannot_grow_wasm_memory_above_allocation_wasm assert_eq!(err.code(), ErrorCode::CanisterOutOfMemory); } +#[test] +fn max_canister_memory_size_is_different_between_wasm32_vs_wasm64() { + fn create_wat(memory_increase_in_pages: i64, is_wasm64: bool) -> String { + // Wat that grows the memory by parameter and can be formatted to Wasm32 and Wasm64. + let mem_declaration = if is_wasm64 { + "(memory $memory i64 1 250)" + } else { + "(memory $memory 1 250)" + }; + let func_msg_decl = if is_wasm64 { + "(import \"ic0\" \"msg_reply_data_append\" (func $msg_reply_data_append (param i64 i64)))" + } else { + "(import \"ic0\" \"msg_reply_data_append\" (func $msg_reply_data_append (param i32 i32)))" + }; + let call_msg_reply_append = if is_wasm64 { + "(call $msg_reply_data_append (i64.const 0) (i64.const 1))" + } else { + "(call $msg_reply_data_append (i32.const 0) (i32.const 1))" + }; + let memory_grow_instruction = if is_wasm64 { + format!( + "(drop (memory.grow (i64.const {})))", + memory_increase_in_pages + ) + } else { + format!( + "(drop (memory.grow (i32.const {})))", + memory_increase_in_pages + ) + }; + format!( + r#" + (module + (import "ic0" "msg_reply" (func $msg_reply)) + {} + (func $update + {} + {} + (call $msg_reply) + ) + {} + (export "canister_update update" (func $update)) + )"#, + func_msg_decl, memory_grow_instruction, call_msg_reply_append, mem_declaration + ) + } + + let subnet_config = SubnetConfig::new(SubnetType::Application); + let env = StateMachine::new_with_config(StateMachineConfig::new( + subnet_config, + HypervisorConfig { + max_canister_memory_size_wasm32: NumBytes::from(10 * MIB), + max_canister_memory_size_wasm64: NumBytes::from(20 * MIB), + ..Default::default() + }, + )); + + // Create wat that grows a Wasm32 canister to the 15 MiB, which is over the Wasm32 limit. + // A Wasm page is 64 KiB. Therefore 15 MiB is 240 pages. + let num_pages = 240; + let wat_32 = create_wat(num_pages, false); + // Create wat that grows a Wasm64 canister to the 15 MiB, which is below the Wasm64 limit. + let wat_64 = create_wat(num_pages, true); + + let wasm32_canister = create_canister_with_cycles( + &env, + wat::parse_str(&wat_32).unwrap(), + Some(CanisterSettingsArgsBuilder::new().build()), + INITIAL_CYCLES_BALANCE, + ); + let wasm64_canister = create_canister_with_cycles( + &env, + wat::parse_str(&wat_64).unwrap(), + Some(CanisterSettingsArgsBuilder::new().build()), + INITIAL_CYCLES_BALANCE, + ); + + // When running, the wasm32 canister should trap because it tries to grow the memory beyond its limit of 15 MiB. + // and the Wasm64 canister should succeed because it has a higher limit. + let res32 = env.execute_ingress(wasm32_canister, "update", vec![]); + let res64 = env.execute_ingress(wasm64_canister, "update", vec![]); + + assert_eq!(res32.unwrap_err().code(), ErrorCode::CanisterOutOfMemory); + assert_replied(res64); +} + #[test] fn canister_with_memory_allocation_cannot_grow_stable_memory_above_allocation() { let subnet_config = SubnetConfig::new(SubnetType::Application); diff --git a/rs/execution_environment/tests/hypervisor.rs b/rs/execution_environment/tests/hypervisor.rs index 7f9f649b104..7458daebcf0 100644 --- a/rs/execution_environment/tests/hypervisor.rs +++ b/rs/execution_environment/tests/hypervisor.rs @@ -7586,9 +7586,7 @@ fn declaring_too_many_tables_fails() { // produces a reply from `bytes`. fn use_wasm_memory_and_reply(bytes: u64) -> Vec { wasm() - .stable64_grow( - (bytes + WASM_PAGE_SIZE_IN_BYTES as u64 - 1) / WASM_PAGE_SIZE_IN_BYTES as u64, - ) + .stable64_grow(bytes.div_ceil(WASM_PAGE_SIZE_IN_BYTES as u64)) .stable64_read(0, bytes) .blob_length() .reply_int() diff --git a/rs/execution_environment/tests/storage_reservation.rs b/rs/execution_environment/tests/storage_reservation.rs index d60bbc77211..083d3662360 100644 --- a/rs/execution_environment/tests/storage_reservation.rs +++ b/rs/execution_environment/tests/storage_reservation.rs @@ -203,9 +203,12 @@ fn test_storage_reservation_triggered_in_canister_snapshot_without_enough_cycles // Match on a substring of the error message. Due to a difference in instructions consumed on // Mac vs Linux, we cannot match on the exact number of cycles but we only need to verify it's // a non-zero amount. - assert!(err - .description() - .contains("due to insufficient cycles. At least 339_603_")); + assert!( + err.description() + .contains("due to insufficient cycles. At least 339_559_"), + "Error message: {}", + err.description() + ); } } } diff --git a/rs/http_utils/src/file_downloader.rs b/rs/http_utils/src/file_downloader.rs index 2470abd93db..2db6d21bc32 100644 --- a/rs/http_utils/src/file_downloader.rs +++ b/rs/http_utils/src/file_downloader.rs @@ -513,7 +513,7 @@ mod tests { let mut tar = Builder::new(writer); let mut header = tar::Header::new_gnu(); header.set_path("test.txt")?; - header.set_size("Hello, world!".as_bytes().len() as u64); + header.set_size("Hello, world!".len() as u64); header.set_cksum(); tar.append(&header, "Hello, world!".as_bytes())?; tar.finish()?; diff --git a/rs/ic_os/config/src/generate_testnet_config.rs b/rs/ic_os/config/src/generate_testnet_config.rs index 2b8f38c1e48..8ff0f9cc7b1 100644 --- a/rs/ic_os/config/src/generate_testnet_config.rs +++ b/rs/ic_os/config/src/generate_testnet_config.rs @@ -181,7 +181,7 @@ fn create_guestos_config(config: GenerateTestnetConfigArgs) -> Result, _>>()?, - None => vec![Url::parse("https://wiki.internetcomputer.org")?], + None => vec![Url::parse("https://cloudflare.com/cdn-cgi/trace")?], }; let use_node_operator_private_key = use_node_operator_private_key.unwrap_or(false); diff --git a/rs/ic_os/os_tools/guestos_tool/src/prometheus_metric.rs b/rs/ic_os/os_tools/guestos_tool/src/prometheus_metric.rs index 2a16d1cdbdb..7d391a64305 100644 --- a/rs/ic_os/os_tools/guestos_tool/src/prometheus_metric.rs +++ b/rs/ic_os/os_tools/guestos_tool/src/prometheus_metric.rs @@ -8,7 +8,6 @@ use std::vec::Vec; /// Types and utils for writing prometheus metrics to textfile collector /// Unused for now: /// const DEFAULT_TEXTFILE_COLLECTOR_DIR: &str = "/run/node_exporter/collector_textfile/"; - #[allow(dead_code)] pub enum MetricType { Counter, diff --git a/rs/ingress_manager/src/ingress_selector.rs b/rs/ingress_manager/src/ingress_selector.rs index ad0f07f69e5..553bf46bad1 100644 --- a/rs/ingress_manager/src/ingress_selector.rs +++ b/rs/ingress_manager/src/ingress_selector.rs @@ -627,7 +627,7 @@ impl<'a, T: IngressSetQuery> IngressSetChain<'a, T> { } } -impl<'a, T: IngressSetQuery> IngressSetQuery for IngressSetChain<'a, T> { +impl IngressSetQuery for IngressSetChain<'_, T> { fn contains(&self, msg_id: &IngressMessageId) -> bool { if self.first.contains(msg_id) { true diff --git a/rs/interfaces/src/consensus_pool.rs b/rs/interfaces/src/consensus_pool.rs index 88f39d258d8..627a0a1a6f3 100644 --- a/rs/interfaces/src/consensus_pool.rs +++ b/rs/interfaces/src/consensus_pool.rs @@ -490,7 +490,7 @@ impl<'a> ChainIterator<'a> { } } -impl<'a> Iterator for ChainIterator<'a> { +impl Iterator for ChainIterator<'_> { type Item = Block; fn next(&mut self) -> Option { diff --git a/rs/interfaces/src/crypto/sign/canister_threshold_sig.rs b/rs/interfaces/src/crypto/sign/canister_threshold_sig.rs index 15ef934f308..75bc013893d 100644 --- a/rs/interfaces/src/crypto/sign/canister_threshold_sig.rs +++ b/rs/interfaces/src/crypto/sign/canister_threshold_sig.rs @@ -531,7 +531,6 @@ pub trait IDkgProtocol { /// The threshold signing protocol is non-interactive, which means that the nodes participating /// to the protocol only need to compute a signature share and publish it. Shares can then be /// publicly verified by anybody and combined into a single ECDSA signature. - pub trait ThresholdEcdsaSigner { /// Create a threshold ECDSA signature share. /// diff --git a/rs/ledger_suite/icp/BUILD.bazel b/rs/ledger_suite/icp/BUILD.bazel index 999d83e0d54..20ab1447f14 100644 --- a/rs/ledger_suite/icp/BUILD.bazel +++ b/rs/ledger_suite/icp/BUILD.bazel @@ -97,9 +97,9 @@ rust_ic_test_suite( "tests/tests.rs", "tests/upgrade_downgrade.rs", ], + compile_data = ["ledger_archive.did"], data = [ "ledger.did", - "ledger_archive.did", ":ledger_proxy.wasm", ":test_notified_canister", "//rs/ledger_suite/icp/archive:ledger-archive-node-canister-wasm", diff --git a/rs/ledger_suite/icrc1/ledger/BUILD.bazel b/rs/ledger_suite/icrc1/ledger/BUILD.bazel index afaec0a3b60..b526dcdad90 100644 --- a/rs/ledger_suite/icrc1/ledger/BUILD.bazel +++ b/rs/ledger_suite/icrc1/ledger/BUILD.bazel @@ -279,9 +279,11 @@ rust_test( "//rs/ledger_suite/icrc1/ledger:ledger_canister_icrc3_compatible_data_certificate", "//rs/universal_canister/impl:universal_canister.wasm.gz", "@mainnet_ckbtc_ic-icrc1-ledger-v1//file", + "@mainnet_ckbtc_ic-icrc1-ledger-v2-noledgerversion//file", "@mainnet_ckbtc_ic-icrc1-ledger-v2//file", "@mainnet_ckbtc_ic-icrc1-ledger//file", "@mainnet_cketh_ic-icrc1-ledger-u256-v1//file", + "@mainnet_cketh_ic-icrc1-ledger-u256-v2-noledgerversion//file", "@mainnet_cketh_ic-icrc1-ledger-u256-v2//file", "@mainnet_cketh_ic-icrc1-ledger-u256//file", "@mainnet_ic-icrc1-ledger//file", @@ -291,9 +293,11 @@ rust_test( "CKBTC_IC_ICRC1_LEDGER_DEPLOYED_VERSION_WASM_PATH": "$(rootpath @mainnet_ckbtc_ic-icrc1-ledger//file)", "CKBTC_IC_ICRC1_LEDGER_V1_VERSION_WASM_PATH": "$(rootpath @mainnet_ckbtc_ic-icrc1-ledger-v1//file)", "CKBTC_IC_ICRC1_LEDGER_V2_VERSION_WASM_PATH": "$(rootpath @mainnet_ckbtc_ic-icrc1-ledger-v2//file)", + "CKBTC_IC_ICRC1_LEDGER_V2_NOLEDGERLEVRION_VERSION_WASM_PATH": "$(rootpath @mainnet_ckbtc_ic-icrc1-ledger-v2-noledgerversion//file)", "CKETH_IC_ICRC1_LEDGER_DEPLOYED_VERSION_WASM_PATH": "$(rootpath @mainnet_cketh_ic-icrc1-ledger-u256//file)", "CKETH_IC_ICRC1_LEDGER_V1_VERSION_WASM_PATH": "$(rootpath @mainnet_cketh_ic-icrc1-ledger-u256-v1//file)", "CKETH_IC_ICRC1_LEDGER_V2_VERSION_WASM_PATH": "$(rootpath @mainnet_cketh_ic-icrc1-ledger-u256-v2//file)", + "CKETH_IC_ICRC1_LEDGER_V2_NOLEDGERLEVRION_VERSION_WASM_PATH": "$(rootpath @mainnet_cketh_ic-icrc1-ledger-u256-v2-noledgerversion//file)", "IC_ICRC1_ARCHIVE_WASM_PATH": "$(rootpath //rs/ledger_suite/icrc1/archive:archive_canister" + name_suffix + ".wasm.gz)", "IC_ICRC1_LEDGER_DEPLOYED_VERSION_WASM_PATH": "$(rootpath @mainnet_ic-icrc1-ledger//file)", "IC_ICRC1_LEDGER_ICRC3_COMPATIBLE_DATA_CERTIFICATE_WASM_PATH": "$(rootpath //rs/ledger_suite/icrc1/ledger:ledger_canister_icrc3_compatible_data_certificate)", diff --git a/rs/ledger_suite/icrc1/ledger/src/lib.rs b/rs/ledger_suite/icrc1/ledger/src/lib.rs index ee7d2fc00bf..08b10581cc3 100644 --- a/rs/ledger_suite/icrc1/ledger/src/lib.rs +++ b/rs/ledger_suite/icrc1/ledger/src/lib.rs @@ -582,14 +582,10 @@ pub struct Ledger { #[serde(default)] accounts_overflow_trim_quantity: usize, - #[serde(default = "default_ledger_version")] + #[serde(default)] pub ledger_version: u64, } -fn default_ledger_version() -> u64 { - LEDGER_VERSION -} - #[derive(Clone, Eq, PartialEq, Debug, CandidType, Deserialize, Serialize)] pub struct FeatureFlags { pub icrc2: bool, @@ -947,22 +943,22 @@ impl Ledger { let mut last_block_index_encoded = Vec::with_capacity(MAX_U64_ENCODING_BYTES); leb128::write::unsigned(&mut last_block_index_encoded, last_block_index) .expect("Failed to write LEB128"); - return fork( + fork( label( last_block_hash_label, leaf(last_block_hash.as_slice().to_vec()), ), label(last_block_index_label, leaf(last_block_index_encoded)), - ); + ) } #[cfg(not(feature = "icrc3-compatible-data-certificate"))] { let tip_hash_label = Label::from("tip_hash"); let last_block_index_encoded = last_block_index.to_be_bytes().to_vec(); - return fork( + fork( label(last_block_index_label, leaf(last_block_index_encoded)), label(tip_hash_label, leaf(last_block_hash.as_slice().to_vec())), - ); + ) } } None => empty(), diff --git a/rs/ledger_suite/icrc1/ledger/tests/tests.rs b/rs/ledger_suite/icrc1/ledger/tests/tests.rs index e3b0fee8020..ed6c69af51d 100644 --- a/rs/ledger_suite/icrc1/ledger/tests/tests.rs +++ b/rs/ledger_suite/icrc1/ledger/tests/tests.rs @@ -85,6 +85,14 @@ fn ledger_mainnet_v2_wasm() -> Vec { mainnet_wasm } +fn ledger_mainnet_v2_noledgerversion_wasm() -> Vec { + #[cfg(not(feature = "u256-tokens"))] + let mainnet_wasm = ledger_mainnet_v2_noledgerversion_u64_wasm(); + #[cfg(feature = "u256-tokens")] + let mainnet_wasm = ledger_mainnet_v2_noledgerversion_u256_wasm(); + mainnet_wasm +} + fn ledger_mainnet_v1_wasm() -> Vec { #[cfg(not(feature = "u256-tokens"))] let mainnet_wasm = ledger_mainnet_v1_u64_wasm(); @@ -103,6 +111,14 @@ fn ledger_mainnet_v2_u64_wasm() -> Vec { std::fs::read(std::env::var("CKBTC_IC_ICRC1_LEDGER_V2_VERSION_WASM_PATH").unwrap()).unwrap() } +#[cfg(not(feature = "u256-tokens"))] +fn ledger_mainnet_v2_noledgerversion_u64_wasm() -> Vec { + std::fs::read( + std::env::var("CKBTC_IC_ICRC1_LEDGER_V2_NOLEDGERLEVRION_VERSION_WASM_PATH").unwrap(), + ) + .unwrap() +} + #[cfg(not(feature = "u256-tokens"))] fn ledger_mainnet_v1_u64_wasm() -> Vec { std::fs::read(std::env::var("CKBTC_IC_ICRC1_LEDGER_V1_VERSION_WASM_PATH").unwrap()).unwrap() @@ -118,6 +134,14 @@ fn ledger_mainnet_v2_u256_wasm() -> Vec { std::fs::read(std::env::var("CKETH_IC_ICRC1_LEDGER_V2_VERSION_WASM_PATH").unwrap()).unwrap() } +#[cfg(feature = "u256-tokens")] +fn ledger_mainnet_v2_noledgerversion_u256_wasm() -> Vec { + std::fs::read( + std::env::var("CKETH_IC_ICRC1_LEDGER_V2_NOLEDGERLEVRION_VERSION_WASM_PATH").unwrap(), + ) + .unwrap() +} + #[cfg(feature = "u256-tokens")] fn ledger_mainnet_v1_u256_wasm() -> Vec { std::fs::read(std::env::var("CKETH_IC_ICRC1_LEDGER_V1_VERSION_WASM_PATH").unwrap()).unwrap() @@ -512,6 +536,15 @@ fn icrc1_test_multi_step_migration_from_v2() { ); } +#[test] +fn icrc1_test_multi_step_migration_from_v2_noledgerversion() { + ic_ledger_suite_state_machine_tests::icrc1_test_multi_step_migration( + ledger_mainnet_v2_noledgerversion_wasm(), + ledger_wasm_lowupgradeinstructionlimits(), + encode_init_args, + ); +} + #[test] fn icrc1_test_downgrade_from_incompatible_version() { ic_ledger_suite_state_machine_tests::test_downgrade_from_incompatible_version( @@ -561,6 +594,15 @@ fn icrc1_test_incomplete_migration_from_v2() { ); } +#[test] +fn icrc1_test_incomplete_migration_from_v2_noledgerversion() { + ic_ledger_suite_state_machine_tests::test_incomplete_migration( + ledger_mainnet_v2_noledgerversion_wasm(), + ledger_wasm_lowupgradeinstructionlimits(), + encode_init_args, + ); +} + #[test] fn icrc1_test_incomplete_migration_to_current_from_mainnet() { ic_ledger_suite_state_machine_tests::test_incomplete_migration_to_current( @@ -579,6 +621,15 @@ fn icrc1_test_incomplete_migration_to_current_from_v2() { ); } +#[test] +fn icrc1_test_incomplete_migration_to_current_from_v2_noledgerversion() { + ic_ledger_suite_state_machine_tests::test_incomplete_migration_to_current( + ledger_mainnet_v2_noledgerversion_wasm(), + ledger_wasm_lowupgradeinstructionlimits(), + encode_init_args, + ); +} + #[test] fn icrc1_test_migration_resumes_from_frozen_from_mainnet() { ic_ledger_suite_state_machine_tests::test_migration_resumes_from_frozen( diff --git a/rs/memory_tracker/src/tests.rs b/rs/memory_tracker/src/tests.rs index b7b50d65609..25fb64fd9e3 100644 --- a/rs/memory_tracker/src/tests.rs +++ b/rs/memory_tracker/src/tests.rs @@ -795,7 +795,14 @@ mod random_ops { handler.sa_flags = libc::SA_SIGINFO | libc::SA_NODEFER | libc::SA_ONSTACK; handler.sa_sigaction = sigsegv_handler as usize; libc::sigemptyset(&mut handler.sa_mask); - if libc::sigaction(libc::SIGSEGV, &handler, PREV_SIGSEGV.as_mut_ptr()) != 0 { + if libc::sigaction( + libc::SIGSEGV, + &handler, + // TODO: EXC-1841 + #[allow(static_mut_refs)] + PREV_SIGSEGV.as_mut_ptr(), + ) != 0 + { panic!( "unable to install signal handler: {}", io::Error::last_os_error(), @@ -809,8 +816,13 @@ mod random_ops { TRACKER.with(|cell| { let previous = cell.replace(None); unsafe { - if libc::sigaction(libc::SIGSEGV, PREV_SIGSEGV.as_ptr(), std::ptr::null_mut()) - != 0 + if libc::sigaction( + libc::SIGSEGV, + // TODO: EXC-1841 + #[allow(static_mut_refs)] + PREV_SIGSEGV.as_ptr(), + std::ptr::null_mut(), + ) != 0 { panic!( "unable to unregister signal handler: {}", @@ -846,6 +858,8 @@ mod random_ops { unsafe { if !handled { + // TODO: EXC-1841 + #[allow(static_mut_refs)] let previous = *PREV_SIGSEGV.as_ptr(); if previous.sa_flags & libc::SA_SIGINFO != 0 { mem::transmute::< diff --git a/rs/messaging/src/message_routing/tests.rs b/rs/messaging/src/message_routing/tests.rs index 22e6d08066d..505c066dbb9 100644 --- a/rs/messaging/src/message_routing/tests.rs +++ b/rs/messaging/src/message_routing/tests.rs @@ -275,7 +275,7 @@ struct SubnetRecord<'a> { max_number_of_canisters: u64, } -impl<'a> From> for SubnetRecordProto { +impl From> for SubnetRecordProto { fn from(record: SubnetRecord) -> SubnetRecordProto { SubnetRecordBuilder::new() .with_membership(record.membership) diff --git a/rs/messaging/src/routing/demux.rs b/rs/messaging/src/routing/demux.rs index 5b412ae13af..d270d54972d 100644 --- a/rs/messaging/src/routing/demux.rs +++ b/rs/messaging/src/routing/demux.rs @@ -46,7 +46,7 @@ impl<'a> DemuxImpl<'a> { } } -impl<'a> Demux for DemuxImpl<'a> { +impl Demux for DemuxImpl<'_> { fn process_payload( &self, state: ReplicatedState, diff --git a/rs/messaging/src/routing/stream_builder.rs b/rs/messaging/src/routing/stream_builder.rs index 5655dad5e0e..3aff1a6d27a 100644 --- a/rs/messaging/src/routing/stream_builder.rs +++ b/rs/messaging/src/routing/stream_builder.rs @@ -22,7 +22,7 @@ use ic_types::{ #[cfg(test)] use mockall::automock; use prometheus::{Histogram, IntCounter, IntCounterVec, IntGaugeVec}; -use std::collections::BTreeMap; +use std::collections::{btree_map, BTreeMap}; use std::sync::{Arc, Mutex}; #[cfg(test)] @@ -290,15 +290,15 @@ impl StreamBuilderImpl { /// Tests whether a stream is over the message count limit, byte limit or (if /// directed at a system subnet) over `2 * SYSTEM_SUBNET_STREAM_MSG_LIMIT`. fn is_at_limit( - stream: Option<&Stream>, + stream: &btree_map::Entry, max_stream_messages: usize, target_stream_size_bytes: usize, is_local_message: bool, destination_subnet_type: SubnetType, ) -> bool { let stream = match stream { - Some(stream) => stream, - None => return false, + btree_map::Entry::Occupied(occupied_entry) => occupied_entry.get(), + btree_map::Entry::Vacant(_) => return false, }; let stream_messages_len = stream.messages().len(); @@ -359,8 +359,9 @@ impl StreamBuilderImpl { match routing_table.route(msg.receiver().get()) { // Destination subnet found. Some(dst_subnet_id) => { + let dst_stream_entry = streams.entry(dst_subnet_id); if is_at_limit( - streams.get(&dst_subnet_id), + &dst_stream_entry, max_stream_messages, target_stream_size_bytes, self.subnet_id == dst_subnet_id, @@ -433,14 +434,14 @@ impl StreamBuilderImpl { } } - streams.push(dst_subnet_id, msg); + dst_stream_entry.or_default().push(msg); } _ => { // Route the message into the stream. self.observe_message_status(&msg, LABEL_VALUE_STATUS_SUCCESS); self.observe_payload_size(&msg); - streams.push(dst_subnet_id, msg); + streams.entry(dst_subnet_id).or_default().push(msg); } }; } diff --git a/rs/messaging/src/routing/stream_builder/tests.rs b/rs/messaging/src/routing/stream_builder/tests.rs index 95a7ff3e675..2bee0c0f48c 100644 --- a/rs/messaging/src/routing/stream_builder/tests.rs +++ b/rs/messaging/src/routing/stream_builder/tests.rs @@ -347,7 +347,7 @@ fn build_streams_impl_at_limit_leaves_state_untouched() { // the implementation of stream builder will always allow one message if // the stream does not exist yet. let mut streams = provided_state.take_streams(); - streams.get_mut_or_insert(REMOTE_SUBNET); + streams.entry(REMOTE_SUBNET).or_default(); provided_state.put_streams(streams); // Set up the provided_canister_states. diff --git a/rs/messaging/src/routing/stream_handler.rs b/rs/messaging/src/routing/stream_handler.rs index 0ed4af4556d..4ae1a2e7a1e 100644 --- a/rs/messaging/src/routing/stream_handler.rs +++ b/rs/messaging/src/routing/stream_handler.rs @@ -16,7 +16,7 @@ use ic_metrics::{ MetricsRegistry, }; use ic_replicated_state::{ - metadata_state::{StreamHandle, Streams}, + metadata_state::{Stream, StreamMap}, replicated_state::{ ReplicatedStateMessageRouting, LABEL_VALUE_QUEUE_FULL, MR_SYNTHETIC_REJECT_MESSAGE_MAX_LEN, }, @@ -352,20 +352,20 @@ impl StreamHandlerImpl { let mut streams = state.take_streams(); // We know for sure that the loopback stream exists, so it is safe to unwrap. - let mut loopback_stream = streams.get_mut(&self.subnet_id).unwrap(); + let loopback_stream = streams.get_mut(&self.subnet_id).unwrap(); // 2. Garbage collect all initial messages and retain any rejected messages. let signals_end = loopback_stream.signals_end(); let reject_signals = loopback_stream.reject_signals().clone(); let rejected_messages = self.garbage_collect_messages( - &mut loopback_stream, + loopback_stream, self.subnet_id, signals_end, &reject_signals, ); // 3. Garbage collect signals for all initial messages. - self.discard_signals_before(&mut loopback_stream, signals_end); + self.discard_signals_before(loopback_stream, signals_end); // 4. Respond to rejected requests and reroute rejected responses. self.handle_rejected_messages( @@ -402,14 +402,14 @@ impl StreamHandlerImpl { let mut streams = state.take_streams(); for (remote_subnet, stream_slice) in stream_slices { match streams.get_mut(remote_subnet) { - Some(mut stream) => { + Some(stream) => { let rejected_messages = self.garbage_collect_messages( - &mut stream, + stream, *remote_subnet, stream_slice.header().signals_end(), stream_slice.header().reject_signals(), ); - self.garbage_collect_signals(&mut stream, *remote_subnet, stream_slice); + self.garbage_collect_signals(stream, *remote_subnet, stream_slice); if stream.reverse_stream_flags() != stream_slice.header().flags() { stream.set_reverse_stream_flags(*stream_slice.header().flags()); @@ -462,7 +462,7 @@ impl StreamHandlerImpl { /// `signals_end` are invalid (not strictly increasing). fn garbage_collect_messages( &self, - stream: &mut StreamHandle, + stream: &mut Stream, remote_subnet: SubnetId, signals_end: StreamIndex, reject_signals: &VecDeque, @@ -503,7 +503,7 @@ impl StreamHandlerImpl { /// if `stream_slice.messages.begin != stream.signals_end`. fn garbage_collect_signals( &self, - stream: &mut StreamHandle, + stream: &mut Stream, remote_subnet: SubnetId, stream_slice: &StreamSlice, ) { @@ -530,7 +530,7 @@ impl StreamHandlerImpl { } /// Wrapper around `Stream::discard_signals_before()` plus telemetry. - fn discard_signals_before(&self, stream: &mut StreamHandle, header_begin: StreamIndex) { + fn discard_signals_before(&self, stream: &mut Stream, header_begin: StreamIndex) { let signal_count_before = stream.reject_signals().len(); stream.discard_signals_before(header_begin); self.observe_gced_reject_signals(signal_count_before - stream.reject_signals().len()); @@ -551,13 +551,13 @@ impl StreamHandlerImpl { rejected_messages: Vec<(RejectReason, RequestOrResponse)>, remote_subnet_id: SubnetId, state: &mut ReplicatedState, - streams: &mut Streams, + streams: &mut StreamMap, available_guaranteed_response_memory: &mut i64, ) { fn reroute_response( response: RequestOrResponse, state: &ReplicatedState, - streams: &mut Streams, + streams: &mut StreamMap, log: &ReplicaLogger, ) { let new_destination = state @@ -573,7 +573,7 @@ impl StreamHandlerImpl { new_destination, response, ); - streams.get_mut_or_insert(new_destination).push(response); + streams.entry(new_destination).or_default().push(response); } for (reason, msg) in rejected_messages { @@ -674,7 +674,7 @@ impl StreamHandlerImpl { for (remote_subnet_id, mut stream_slice) in stream_slices { // Output stream, for resulting signals and (in the initial iteration) reject // `Responses`. - let mut stream = streams.get_mut_or_insert(remote_subnet_id); + let stream = streams.entry(remote_subnet_id).or_default(); while let Some((stream_index, msg)) = stream_slice.pop_message() { assert_eq!( @@ -688,7 +688,7 @@ impl StreamHandlerImpl { msg, remote_subnet_id, &mut state, - &mut stream, + stream, available_guaranteed_response_memory, ); } @@ -726,7 +726,7 @@ impl StreamHandlerImpl { msg: RequestOrResponse, remote_subnet_id: SubnetId, state: &mut ReplicatedState, - stream: &mut StreamHandle, + stream: &mut Stream, available_guaranteed_response_memory: &mut i64, ) { let msg_type = match msg { @@ -749,8 +749,7 @@ impl StreamHandlerImpl { if state.metadata.certification_version < CertificationVersion::V19 => { // Unable to induct a request, generate reject response and push it into `stream`. - *available_guaranteed_response_memory -= - stream.push(generate_reject_response_for(reason, &request)) as i64; + stream.push(generate_reject_response_for(reason, &request)); stream.push_accept_signal(); } Some((reason, RequestOrResponse::Request(_))) => { diff --git a/rs/messaging/src/routing/stream_handler/tests.rs b/rs/messaging/src/routing/stream_handler/tests.rs index 2d5eab55676..3b16f46c917 100644 --- a/rs/messaging/src/routing/stream_handler/tests.rs +++ b/rs/messaging/src/routing/stream_handler/tests.rs @@ -230,7 +230,6 @@ fn legacy_induct_loopback_stream_reject_response() { RejectReason::CanisterNotFound, request_in_stream(state.get_stream(&LOCAL_SUBNET), 21), ); - let reject_response_count_bytes = reject_response.count_bytes(); let mut expected_state = state.clone(); // Expecting a loopback stream with begin advanced and a reject response. @@ -252,16 +251,13 @@ fn legacy_induct_loopback_stream_reject_response() { assert_eq!(expected_state, inducted_state); - // One reject response generated. assert_eq!( - initial_available_guaranteed_response_memory - reject_response_count_bytes as i64, + initial_available_guaranteed_response_memory, available_guaranteed_response_memory ); - // Not equal, because the computed available memory does not account for the - // reject response (since it's from a nonexistent canister). - assert!( - stream_handler.available_guaranteed_response_memory(&inducted_state) - >= available_guaranteed_response_memory + assert_eq!( + stream_handler.available_guaranteed_response_memory(&inducted_state), + available_guaranteed_response_memory ); metrics.assert_inducted_xnet_messages_eq(&[( @@ -311,7 +307,6 @@ fn induct_loopback_stream_reroute_response() { // the request @23 is expected to trigger a reject response which is inducted // successfully. let inducted_response = message_in_stream(state.get_stream(&LOCAL_SUBNET), 22); - let inducted_response_count_bytes = inducted_response.count_bytes(); push_inputs( &mut expected_state, [ @@ -349,10 +344,8 @@ fn induct_loopback_stream_reroute_response() { .induct_loopback_stream(state, &mut available_guaranteed_response_memory); assert_eq!(expected_state, inducted_state); - // `available_guaranteed_response_memory` does not keep track of gc'ing - // the response @22 in the loopback stream after inducting it. assert_eq!( - available_guaranteed_response_memory + inducted_response_count_bytes as i64, + available_guaranteed_response_memory, stream_handler.available_guaranteed_response_memory(&inducted_state), ); @@ -416,7 +409,6 @@ fn legacy_induct_loopback_stream_reroute_response() { RejectReason::CanisterMigrating, request_in_stream(state.get_stream(&LOCAL_SUBNET), 23), ); - let reject_response_count_bytes = reject_response.count_bytes(); let loopback_stream = stream_from_config(StreamConfig { begin: 25, messages: vec![reject_response], @@ -448,22 +440,18 @@ fn legacy_induct_loopback_stream_reroute_response() { assert_eq!(expected_state, inducted_state); - // One request inducted, one response inducted and one reject response produced. + // One request inducted and one response inducted. assert_eq!( initial_available_guaranteed_response_memory // Inducting a request triggers a new reservation in the output queue. - MAX_RESPONSE_COUNT_BYTES as i64 // Inducting a response uses memory for the response, but also frees a reservation. - - (inducted_response_count_bytes as i64 - MAX_RESPONSE_COUNT_BYTES as i64) - // The reject response is in the loopback stream, i.e. no reservation is freed. - - reject_response_count_bytes as i64, + - (inducted_response_count_bytes as i64 - MAX_RESPONSE_COUNT_BYTES as i64), available_guaranteed_response_memory, ); - // Not equal, because the computed available memory does not account for the - // reject response (since it's from a canister no longer hosted by the subnet). - assert!( - stream_handler.available_guaranteed_response_memory(&inducted_state) - >= available_guaranteed_response_memory + assert_eq!( + stream_handler.available_guaranteed_response_memory(&inducted_state), + available_guaranteed_response_memory ); metrics.assert_inducted_xnet_messages_eq(&[ @@ -502,7 +490,6 @@ fn induct_loopback_stream_success() { &mut expected_state, messages_in_stream(loopback_stream, 21..=22), ); - let response_count_bytes = response_in_stream(loopback_stream, 22).count_bytes(); // The loopback stream should be empty with `begin` and `signals_end` advanced. let loopback_stream = stream_from_config(StreamConfig { @@ -518,11 +505,9 @@ fn induct_loopback_stream_success() { .induct_loopback_stream(state, &mut available_guaranteed_response_memory); assert_eq!(expected_state, inducted_state); - // `available_guaranteed_response_memory` is a lower bound as it doesn't include garbage - // collecting responses from streams, therefore it is off by `response_count_bytes`. assert_eq!( stream_handler.available_guaranteed_response_memory(&inducted_state), - available_guaranteed_response_memory + response_count_bytes as i64, + available_guaranteed_response_memory, ); metrics.assert_inducted_xnet_messages_eq(&[ @@ -592,7 +577,9 @@ fn legacy_induct_loopback_stream_with_zero_subnet_wasm_custom_sections_limit() { fn system_subnet_induct_loopback_stream_ignores_canister_memory_limit() { // A stream handler with a canister memory limit that only allows up to 3 reservations. induct_loopback_stream_ignores_memory_limit_impl(HypervisorConfig { - max_canister_memory_size: NumBytes::new(MAX_RESPONSE_COUNT_BYTES as u64 * 7 / 2), + max_canister_memory_size_wasm32: NumBytes::new(MAX_RESPONSE_COUNT_BYTES as u64 * 7 / 2), + // For consistency reasons in case this test is run against Wasm64 canisters. + max_canister_memory_size_wasm64: NumBytes::new(MAX_RESPONSE_COUNT_BYTES as u64 * 7 / 2), ..Default::default() }); } @@ -858,7 +845,7 @@ fn garbage_collect_messages_success() { let slice = slices.get(&REMOTE_SUBNET).unwrap(); let rejected_messages = stream_handler.garbage_collect_messages( - &mut streams.get_mut(&REMOTE_SUBNET).unwrap(), + streams.get_mut(&REMOTE_SUBNET).unwrap(), REMOTE_SUBNET, slice.header().signals_end(), slice.header().reject_signals(), @@ -927,7 +914,7 @@ fn garbage_collect_messages_with_reject_signals_success() { let slice = slices.get(&REMOTE_SUBNET).unwrap(); let rejected_messages = stream_handler.garbage_collect_messages( - &mut streams.get_mut(&REMOTE_SUBNET).unwrap(), + streams.get_mut(&REMOTE_SUBNET).unwrap(), REMOTE_SUBNET, slice.header().signals_end(), slice.header().reject_signals(), @@ -984,7 +971,7 @@ fn garbage_collect_signals_success() { }); stream_handler.garbage_collect_signals( - &mut streams.get_mut(&REMOTE_SUBNET).unwrap(), + streams.get_mut(&REMOTE_SUBNET).unwrap(), REMOTE_SUBNET, slices.get(&REMOTE_SUBNET).unwrap(), ); @@ -1029,7 +1016,7 @@ fn garbage_collect_signals_in_wrong_order() { let mut streams = state.take_streams(); stream_handler.garbage_collect_signals( - &mut streams.get_mut(&REMOTE_SUBNET).unwrap(), + streams.get_mut(&REMOTE_SUBNET).unwrap(), REMOTE_SUBNET, slices.get(&REMOTE_SUBNET).unwrap(), ); @@ -1071,7 +1058,7 @@ fn garbage_collect_signals_with_invalid_slice_messages() { let mut streams = state.take_streams(); stream_handler.garbage_collect_signals( - &mut streams.get_mut(&REMOTE_SUBNET).unwrap(), + streams.get_mut(&REMOTE_SUBNET).unwrap(), REMOTE_SUBNET, slices.get(&REMOTE_SUBNET).unwrap(), ); @@ -1109,7 +1096,7 @@ fn garbage_collect_signals_with_invalid_empty_slice() { let mut streams = state.take_streams(); stream_handler.garbage_collect_signals( - &mut streams.get_mut(&REMOTE_SUBNET).unwrap(), + streams.get_mut(&REMOTE_SUBNET).unwrap(), REMOTE_SUBNET, slices.get(&REMOTE_SUBNET).unwrap(), ); @@ -1151,7 +1138,7 @@ fn assert_garbage_collect_messages_last_signal_before_first_message() { let slice = slices.get(&REMOTE_SUBNET).unwrap(); stream_handler.garbage_collect_messages( - &mut streams.get_mut(&REMOTE_SUBNET).unwrap(), + streams.get_mut(&REMOTE_SUBNET).unwrap(), REMOTE_SUBNET, slice.header().signals_end(), slice.header().reject_signals(), @@ -1193,7 +1180,7 @@ fn assert_garbage_collect_messages_last_signal_after_last_message() { let slice = slices.get(&REMOTE_SUBNET).unwrap(); stream_handler.garbage_collect_messages( - &mut streams.get_mut(&REMOTE_SUBNET).unwrap(), + streams.get_mut(&REMOTE_SUBNET).unwrap(), REMOTE_SUBNET, slice.header().signals_end(), slice.header().reject_signals(), @@ -1286,7 +1273,6 @@ fn garbage_collect_local_state_success() { let mut expected_state = state.clone(); // The expected stream has the first two messages gc'ed and the stream flags set. let outgoing_stream = state.get_stream(&REMOTE_SUBNET); - let response_count_bytes = response_in_stream(outgoing_stream, 32).count_bytes(); let expected_stream = stream_from_config(StreamConfig { begin: 33, messages: vec![message_in_stream(outgoing_stream, 33).clone()], @@ -1304,11 +1290,9 @@ fn garbage_collect_local_state_success() { stream_handler.garbage_collect_local_state(state, &mut (i64::MAX / 2), &slices); assert_eq!(pruned_state, expected_state); - // `available_guaranteed_response_memory` is a lower bound as it doesn't include garbage - // collecting responses from streams, therefore it is off by `response_count_bytes`. assert_eq!( stream_handler.available_guaranteed_response_memory(&pruned_state), - initial_available_guaranteed_response_memory + response_count_bytes as i64, + initial_available_guaranteed_response_memory, ); assert_eq!( @@ -1422,7 +1406,7 @@ fn garbage_collect_local_state_with_illegal_reject_signal_for_response_success() ); } -/// Tests tha tan incoming reject signal for a request from `LOCAL_CANISTER` in the stream to +/// Tests that an incoming reject signal for a request from `LOCAL_CANISTER` in the stream to /// `REMOTE_SUBNET` triggers locally generating and successfully inducting a corresponding /// reject response. #[test] @@ -2349,11 +2333,9 @@ fn induct_stream_slices_partial_success() { - response_count_bytes as i64, available_guaranteed_response_memory ); - // Not equal, because the computed available memory does not account for the - // reject response (since it's from a nonexistent canister). - assert!( - stream_handler.available_guaranteed_response_memory(&inducted_state) - >= available_guaranteed_response_memory + assert_eq!( + stream_handler.available_guaranteed_response_memory(&inducted_state), + available_guaranteed_response_memory ); metrics.assert_inducted_xnet_messages_eq(&[ @@ -2440,7 +2422,6 @@ fn legacy_induct_stream_slices_partial_success() { RejectReason::CanisterNotFound, request_in_slice(slices.get(&REMOTE_SUBNET), 46), ); - let reject_response_count_bytes = reject_response.count_bytes(); // The expected stream has... let expected_stream = stream_from_config(StreamConfig { @@ -2471,19 +2452,16 @@ fn legacy_induct_stream_slices_partial_success() { ); assert_eq!(expected_state, inducted_state); - // 2 requests and one response inducted (consuming 2 - 1 reservations); one reject response enqueued. + // 2 requests and one response inducted (consuming 2 - 1 reservations). assert_eq!( initial_available_guaranteed_response_memory - MAX_RESPONSE_COUNT_BYTES as i64 - - response_count_bytes as i64 - - reject_response_count_bytes as i64, + - response_count_bytes as i64, available_guaranteed_response_memory ); - // Not equal, because the computed available memory does not account for the - // reject response (since it's from a nonexistent canister). - assert!( - stream_handler.available_guaranteed_response_memory(&inducted_state) - >= available_guaranteed_response_memory + assert_eq!( + stream_handler.available_guaranteed_response_memory(&inducted_state), + available_guaranteed_response_memory ); metrics.assert_inducted_xnet_messages_eq(&[ @@ -2742,7 +2720,6 @@ fn legacy_induct_stream_slices_with_messages_to_migrating_canister() { RejectReason::CanisterMigrating, request_in_slice(slices.get(&REMOTE_SUBNET), 43), ); - let reject_response_count_bytes = reject_response.count_bytes(); let mut expected_state = state.clone(); // Expecting a stream with... @@ -2781,16 +2758,13 @@ fn legacy_induct_stream_slices_with_messages_to_migrating_canister() { assert_eq!(expected_state, inducted_state); - // One reject response enqueued. assert_eq!( - initial_available_guaranteed_response_memory - reject_response_count_bytes as i64, + initial_available_guaranteed_response_memory, available_guaranteed_response_memory ); - // Not equal, because the computed available memory does not account for the - // reject response (since it's from a canister not yet hosted by the subnet). - assert!( - stream_handler.available_guaranteed_response_memory(&inducted_state) - >= available_guaranteed_response_memory + assert_eq!( + stream_handler.available_guaranteed_response_memory(&inducted_state), + available_guaranteed_response_memory ); metrics.assert_inducted_xnet_messages_eq(&[ (LABEL_VALUE_TYPE_REQUEST, LABEL_VALUE_CANISTER_MIGRATED, 1), @@ -2926,7 +2900,6 @@ fn legacy_induct_stream_slices_with_messages_to_migrated_canister() { RejectReason::CanisterMigrating, request_in_slice(slices.get(&REMOTE_SUBNET), 43), ); - let reject_response_count_bytes = reject_response.count_bytes(); let mut expected_state = state.clone(); // Expecting a stream with... @@ -2965,16 +2938,13 @@ fn legacy_induct_stream_slices_with_messages_to_migrated_canister() { assert_eq!(expected_state, inducted_state); - // One reject response enqueued. assert_eq!( - initial_available_guaranteed_response_memory - reject_response_count_bytes as i64, + initial_available_guaranteed_response_memory, available_guaranteed_response_memory ); - // Not equal, because the computed available memory does not account for the - // reject response (since it's from a canister no longer hosted by the subnet). - assert!( - stream_handler.available_guaranteed_response_memory(&inducted_state) - >= available_guaranteed_response_memory + assert_eq!( + stream_handler.available_guaranteed_response_memory(&inducted_state), + available_guaranteed_response_memory ); metrics.assert_inducted_xnet_messages_eq(&[ (LABEL_VALUE_TYPE_REQUEST, LABEL_VALUE_CANISTER_MIGRATED, 1), @@ -4249,19 +4219,6 @@ fn request_in_slice( } } -/// Returns a reference to a response in a stream at `stream_index`. -/// -/// Panics if no such response exists. -fn response_in_stream( - opt_stream: Option<&Stream>, - stream_index: u64, -) -> &ic_types::messages::Response { - match opt_stream.and_then(|stream| stream.messages().get(stream_index.into())) { - Some(RequestOrResponse::Response(response)) => response, - _ => unreachable!(), - } -} - /// Returns a reference to a response in the stream slice at `stream_index`. /// /// Panics if no such response exists. diff --git a/rs/messaging/tests/queue_tests.rs b/rs/messaging/tests/queue_tests.rs index a800f92b97f..2107d111210 100644 --- a/rs/messaging/tests/queue_tests.rs +++ b/rs/messaging/tests/queue_tests.rs @@ -204,7 +204,7 @@ impl SubnetPairProxy { do_until_or_panic(MAX_TICKS, |_| { let exit_condition = self .local_output_queue_snapshot() - .map_or(false, |q| q.len() >= min_num_messages); + .is_some_and(|q| q.len() >= min_num_messages); if !exit_condition { self.local_env.tick(); } diff --git a/rs/monitoring/metrics/src/histogram_vec_timer.rs b/rs/monitoring/metrics/src/histogram_vec_timer.rs index b54d6a5affa..23eb54ec4e9 100644 --- a/rs/monitoring/metrics/src/histogram_vec_timer.rs +++ b/rs/monitoring/metrics/src/histogram_vec_timer.rs @@ -72,7 +72,7 @@ impl<'a, const LABEL_COUNT: usize> HistogramVecTimer<'a, LABEL_COUNT> { } } -impl<'a, const LABEL_COUNT: usize> Drop for HistogramVecTimer<'a, LABEL_COUNT> { +impl Drop for HistogramVecTimer<'_, LABEL_COUNT> { fn drop(&mut self) { self.hist .with_label_values(self.label_values()) diff --git a/rs/nervous_system/agent/src/pocketic_impl.rs b/rs/nervous_system/agent/src/pocketic_impl.rs index a9df27e53f8..b1da05081a1 100644 --- a/rs/nervous_system/agent/src/pocketic_impl.rs +++ b/rs/nervous_system/agent/src/pocketic_impl.rs @@ -5,6 +5,20 @@ use thiserror::Error; use crate::CallCanisters; +/// A wrapper around PocketIc that specifies a sender for the requests. +/// The name is an analogy for `ic_agent::Agent`, since each `ic_agent::Agent` specifies a sender. +pub struct PocketIcAgent<'a> { + pub pocket_ic: &'a PocketIc, + pub sender: Principal, +} + +impl<'a> PocketIcAgent<'a> { + pub fn new(pocket_ic: &'a PocketIc, sender: impl Into) -> Self { + let sender = sender.into(); + Self { pocket_ic, sender } + } +} + #[derive(Error, Debug)] pub enum PocketIcCallError { #[error("pocket_ic error: {0}")] @@ -16,8 +30,9 @@ pub enum PocketIcCallError { } impl crate::sealed::Sealed for PocketIc {} +impl crate::sealed::Sealed for PocketIcAgent<'_> {} -impl CallCanisters for PocketIc { +impl CallCanisters for PocketIcAgent<'_> { type Error = PocketIcCallError; async fn call( &self, @@ -27,24 +42,29 @@ impl CallCanisters for PocketIc { let canister_id = canister_id.into(); let request_bytes = request.payload().map_err(PocketIcCallError::CandidEncode)?; let response = if request.update() { - self.update_call( - canister_id, - Principal::anonymous(), - request.method(), - request_bytes, - ) - .await + self.pocket_ic + .update_call(canister_id, self.sender, request.method(), request_bytes) + .await } else { - self.query_call( - canister_id, - Principal::anonymous(), - request.method(), - request_bytes, - ) - .await + self.pocket_ic + .query_call(canister_id, self.sender, request.method(), request_bytes) + .await } .map_err(PocketIcCallError::PocketIc)?; candid::decode_one(response.as_slice()).map_err(PocketIcCallError::CandidDecode) } } + +impl CallCanisters for PocketIc { + type Error = PocketIcCallError; + async fn call( + &self, + canister_id: impl Into + Send, + request: R, + ) -> Result { + PocketIcAgent::new(self, Principal::anonymous()) + .call(canister_id, request) + .await + } +} diff --git a/rs/nervous_system/agent/src/sns/governance.rs b/rs/nervous_system/agent/src/sns/governance.rs index fff7c8de5ad..78fb5258886 100644 --- a/rs/nervous_system/agent/src/sns/governance.rs +++ b/rs/nervous_system/agent/src/sns/governance.rs @@ -1,16 +1,28 @@ use crate::{null_request::NullRequest, CallCanisters}; use ic_base_types::PrincipalId; use ic_sns_governance::pb::v1::{ - GetMetadataRequest, GetMetadataResponse, GetMode, GetModeResponse, GetRunningSnsVersionRequest, - GetRunningSnsVersionResponse, NervousSystemParameters, + manage_neuron, manage_neuron_response, GetMetadataRequest, GetMetadataResponse, GetMode, + GetModeResponse, GetRunningSnsVersionRequest, GetRunningSnsVersionResponse, GovernanceError, + ManageNeuron, ManageNeuronResponse, NervousSystemParameters, NeuronId, Proposal, ProposalId, }; use serde::{Deserialize, Serialize}; +use std::error::Error; #[derive(Copy, Clone, Debug, Deserialize, Serialize)] pub struct GovernanceCanister { pub canister_id: PrincipalId, } +#[derive(Debug, thiserror::Error)] +pub enum SubmitProposalError { + #[error("Failed to call SNS Governance")] + CallGovernanceError(#[source] C), + #[error("SNS Governance returned an error")] + GovernanceError(#[source] GovernanceError), + #[error("SNS Governance did not confirm that the proposal was made: {0:?}")] + ProposalNotMade(ManageNeuronResponse), +} + impl GovernanceCanister { pub async fn metadata( &self, @@ -39,6 +51,51 @@ impl GovernanceCanister { let request = NullRequest::new("get_nervous_system_parameters", false); agent.call(self.canister_id, request).await } + + pub async fn manage_neuron( + &self, + agent: &C, + neuron_id: NeuronId, + command: manage_neuron::Command, + ) -> Result { + let subaccount = neuron_id + .subaccount() + .expect("Valid SNS neuron IDs should be ICRC1 sub-accounts.") + .to_vec(); + let request = ManageNeuron { + subaccount, + command: Some(command), + }; + agent.call(self.canister_id, request).await + } + + pub async fn submit_proposal( + &self, + agent: &C, + neuron_id: NeuronId, + proposal: Proposal, + ) -> Result> { + let response = self + .manage_neuron( + agent, + neuron_id, + manage_neuron::Command::MakeProposal(proposal), + ) + .await + .map_err(SubmitProposalError::CallGovernanceError)?; + + match response.command { + Some(manage_neuron_response::Command::MakeProposal( + manage_neuron_response::MakeProposalResponse { + proposal_id: Some(proposal_id), + }, + )) => Ok(proposal_id), + Some(manage_neuron_response::Command::Error(e)) => { + Err(SubmitProposalError::GovernanceError(e)) + } + _ => Err(SubmitProposalError::ProposalNotMade(response)), + } + } } impl GovernanceCanister { diff --git a/rs/nervous_system/candid_utils/BUILD.bazel b/rs/nervous_system/candid_utils/BUILD.bazel new file mode 100644 index 00000000000..18e9d7cdd31 --- /dev/null +++ b/rs/nervous_system/candid_utils/BUILD.bazel @@ -0,0 +1,46 @@ +load("@rules_rust//rust:defs.bzl", "rust_library", "rust_test") + +package(default_visibility = ["//visibility:public"]) + +# See rs/nervous_system/feature_test.md +DEPENDENCIES = [ + # Keep sorted. + "@crate_index//:candid", + "@crate_index//:candid_parser", +] + +MACRO_DEPENDENCIES = [] + +DEV_DEPENDENCIES = [] + +MACRO_DEV_DEPENDENCIES = [] + +ALIASES = {} + +rust_library( + name = "candid_utils", + srcs = glob( + ["src/**/*.rs"], + exclude = [ + "**/*tests.rs", + ], + ), + aliases = ALIASES, + crate_name = "candid_utils", + proc_macro_deps = MACRO_DEPENDENCIES, + version = "1.0.0", + deps = DEPENDENCIES, +) + +rust_test( + name = "candid_utils_test", + srcs = glob( + ["src/**"], + ), + aliases = ALIASES, + crate_root = "src/lib.rs", + data = [], + env = {}, + proc_macro_deps = MACRO_DEPENDENCIES + MACRO_DEV_DEPENDENCIES, + deps = DEPENDENCIES + DEV_DEPENDENCIES, +) diff --git a/rs/nervous_system/candid_utils/Cargo.toml b/rs/nervous_system/candid_utils/Cargo.toml new file mode 100644 index 00000000000..6ea81ec4a4b --- /dev/null +++ b/rs/nervous_system/candid_utils/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "candid-utils" +version.workspace = true +authors.workspace = true +edition.workspace = true +description.workspace = true +documentation.workspace = true + +[lib] +path = "src/lib.rs" + +[dependencies] +candid = { workspace = true } +candid_parser = { workspace = true } diff --git a/rs/nervous_system/candid_utils/src/lib.rs b/rs/nervous_system/candid_utils/src/lib.rs new file mode 100644 index 00000000000..8695201df0e --- /dev/null +++ b/rs/nervous_system/candid_utils/src/lib.rs @@ -0,0 +1 @@ +pub mod validation; diff --git a/rs/nervous_system/candid_utils/src/validation.rs b/rs/nervous_system/candid_utils/src/validation.rs new file mode 100644 index 00000000000..0a2e6ee2ac7 --- /dev/null +++ b/rs/nervous_system/candid_utils/src/validation.rs @@ -0,0 +1,135 @@ +use candid::types::{ + subtype::{subtype_with_config, OptReport}, + Type, +}; +use candid_parser::{ + parse_idl_args, + utils::{instantiate_candid, CandidSource}, +}; + +fn fmt_type_vec(types: &[Type]) -> String { + let tab = " ".repeat(4); + let types_str = if types.is_empty() { + "// ".to_string() + } else { + types + .iter() + .map(|typ| typ.to_string()) + .collect::>() + .join(&format!(",\n{tab}")) + }; + format!("```candid\n(\n{tab}{types_str}\n)\n```\n") +} + +#[derive(Debug)] +pub enum CandidServiceArgValidationError { + BadService(String), + ArgsParseError(String), + WrongArgumentCount(String), + SubtypingErrors(String), + ArgsSerializationError(String), +} + +impl PartialEq for CandidServiceArgValidationError { + fn eq(&self, other: &Self) -> bool { + matches!( + (self, other), + (Self::BadService(_), Self::BadService(_)) + | (Self::ArgsParseError(_), Self::ArgsParseError(_)) + | (Self::WrongArgumentCount(_), Self::WrongArgumentCount(_)) + | (Self::SubtypingErrors(_), Self::SubtypingErrors(_)) + | ( + Self::ArgsSerializationError(_), + Self::ArgsSerializationError(_) + ) + ) + } +} + +impl CandidServiceArgValidationError { + fn deconstruct(&self) -> (String, String) { + match self { + Self::BadService(err) => ("BadService".to_string(), err.clone()), + Self::ArgsParseError(err) => ("ArgsParseError".to_string(), err.clone()), + Self::WrongArgumentCount(err) => ("WrongArgumentCount".to_string(), err.clone()), + Self::SubtypingErrors(err) => ("SubtypingErrors".to_string(), err.clone()), + Self::ArgsSerializationError(err) => { + ("ArgsSerializationError".to_string(), err.clone()) + } + } + } + + pub fn message(&self) -> String { + let (_, msg) = self.deconstruct(); + msg + } + + pub fn kind(&self) -> String { + let (kind, _) = self.deconstruct(); + kind + } +} + +/// Checks whether `upgrade_args` is a valid argument sequence for `candid_service`. +/// +/// Returns the byte encoding of `upgrade_args` in the successful case. +pub fn validate_upgrade_args( + candid_service: String, + upgrade_args: String, +) -> Result, CandidServiceArgValidationError> { + let (expected_args_types, (env, _)) = + instantiate_candid(CandidSource::Text(&candid_service)) + .map_err(|err| CandidServiceArgValidationError::BadService(format!("{err:?}")))?; + + let upgrade_args = parse_idl_args(&upgrade_args) + .map_err(|err| CandidServiceArgValidationError::ArgsParseError(format!("{err:?}")))?; + + let args_types = upgrade_args.get_types(); + + if args_types.len() != expected_args_types.len() { + return Err(CandidServiceArgValidationError::WrongArgumentCount( + format!( + "Number of specified upgrade arguments ({}) does not match expected number \ + of arguments for the target canister ({}).", + args_types.len(), + expected_args_types.len(), + ), + )); + } + + let mut gamma = std::collections::HashSet::new(); + + let subtyping_subresults = args_types + .iter() + .zip(expected_args_types.iter()) + .map(|(observed_type, expected_type)| { + subtype_with_config( + OptReport::Error, + &mut gamma, + &env, + observed_type, + expected_type, + ) + .map_err(|err| format!("{err:?}")) + }) + .collect::>(); + + if subtyping_subresults != vec![Ok(()); subtyping_subresults.len()] { + return Err(CandidServiceArgValidationError::SubtypingErrors(format!( + "Specified upgrade arguments have types:\n{}\ + that are not subtypes of the Candid service arguments' types:\n{}\n\ + {subtyping_subresults:#?}", + fmt_type_vec(&args_types), + fmt_type_vec(&expected_args_types), + ))); + } + + let upgrade_args = upgrade_args.to_bytes().map_err(|err| { + CandidServiceArgValidationError::ArgsSerializationError(format!("{err:?}")) + })?; + + Ok(upgrade_args) +} + +#[cfg(test)] +mod tests; diff --git a/rs/nervous_system/candid_utils/src/validation/tests.rs b/rs/nervous_system/candid_utils/src/validation/tests.rs new file mode 100644 index 00000000000..c9630ce3df5 --- /dev/null +++ b/rs/nervous_system/candid_utils/src/validation/tests.rs @@ -0,0 +1,148 @@ +use super::*; + +#[test] +fn test_candid_service_arg_validation() { + let complex_service = r#" + type List = opt record { head: int; tail: List }; + type byte = nat8; + service : (x : record { foo : opt record {} }, y : nat32) -> { + f : (byte, int, nat, int8) -> (List); + g : (List) -> (int) query; + } + "#; + let dummy_error_text = "dummy_error_text".to_string(); + + for (label, candid_service, upgrade_arg, expected_result) in [ + ( + "Service without args", + r#" + service : { + g : () -> (int) query; + } + "#, + "()", + Ok(()), + ), + ( + "Invalid service (unbound type List)", + r#" + service : () -> { + f : (byte, int, nat, int8) -> (List); + g : (List) -> (int) query; + } + "#, + "()", + Err(CandidServiceArgValidationError::BadService( + dummy_error_text.clone(), + )), + ), + ( + "Arg is an empty string", + r#" + type List = opt record { head: int; tail: List }; + type byte = nat8; + service : (x : record { foo : opt record {} }, y : nat32) -> { + g : () -> (int) query; + } + "#, + "", + Err(CandidServiceArgValidationError::ArgsParseError( + dummy_error_text.clone(), + )), + ), + ( + "Complex service with two arguments (happy)", + complex_service, + "(record {}, (11 : nat32))", + Ok(()), + ), + ( + "Complex service with two arguments (missing 1st arg)", + complex_service, + "((11 : nat32))", + Err(CandidServiceArgValidationError::WrongArgumentCount( + dummy_error_text.clone(), + )), + ), + ( + "Complex service with two arguments (missing 2nd arg)", + complex_service, + "(record {})", + Err(CandidServiceArgValidationError::WrongArgumentCount( + dummy_error_text.clone(), + )), + ), + ( + "Complex service with two arguments (missing both args)", + complex_service, + "()", + Err(CandidServiceArgValidationError::WrongArgumentCount( + dummy_error_text.clone(), + )), + ), + ( + "Trivial service with two arguments (wrong arg order)", + r#" + service : (x : record { foo : opt record {} }, y : nat32) -> { + g : () -> (int) query; + } + "#, + "((11 : nat32), record {})", + Err(CandidServiceArgValidationError::SubtypingErrors( + dummy_error_text.clone(), + )), + ), + ( + "Trivial service with one record argument (subtyping holds)", + r#" + service : (x : record { foo : opt nat; bar : opt nat }) -> { + g : () -> (int) query; + } + "#, + "(record { foobar = opt (1984 : nat); foo = opt (42 : nat) })", + Ok(()), + ), + ( + "Trivial service with one record argument (missing required field)", + r#" + service : (record { foo : nat; }) -> { + g : () -> (int) query; + } + "#, + "(record { bar = (1984 : nat) })", + Err(CandidServiceArgValidationError::SubtypingErrors( + dummy_error_text, + )), + ), + ] { + let observed_result = + validate_upgrade_args(candid_service.to_string(), upgrade_arg.to_string()); + + match (observed_result, expected_result) { + (Ok(_), Ok(())) => (), + (Err(observed_err), Err(expected_err)) => { + if observed_err != expected_err { + panic!( + "Test `{label}` failed unexpectedly. Expected {}, observed {}:\n{}", + expected_err.kind(), + observed_err.kind(), + observed_err.message(), + ); + } + } + (Err(observed_err), Ok(())) => { + println!("{}", observed_err.message()); + panic!( + "Test `{label}` FAILED with {}, although it is expected to succeed.", + observed_err.kind() + ); + } + (Ok(_), Err(expected_err)) => { + panic!( + "Test `{label}` SUCCEEDED, although it is expected to fail with {}.", + expected_err.kind() + ); + } + } + } +} diff --git a/rs/nervous_system/common/src/memory_manager_upgrade_storage.rs b/rs/nervous_system/common/src/memory_manager_upgrade_storage.rs index 2b2af6c8f47..4d4a92df5dd 100644 --- a/rs/nervous_system/common/src/memory_manager_upgrade_storage.rs +++ b/rs/nervous_system/common/src/memory_manager_upgrade_storage.rs @@ -190,7 +190,7 @@ impl<'a, M: Memory> SizeAwareWriter<'a, M> { } // Unsafe implementation required by BufMut -unsafe impl<'a, M: Memory> BufMut for SizeAwareWriter<'a, M> { +unsafe impl BufMut for SizeAwareWriter<'_, M> { fn remaining_mut(&self) -> usize { // This function needs to return the number of bytes that can be written, not just to the // internal buffer, but to the underlying memory. @@ -304,7 +304,7 @@ impl<'a, M: Memory> SizeAwareReader<'a, M> { } } -impl<'a, M: Memory> Buf for SizeAwareReader<'a, M> { +impl Buf for SizeAwareReader<'_, M> { fn remaining(&self) -> usize { // Our implementation only reads from stable memory up until the size indicated by size bytes let total_written_memory = self.memory_used(); diff --git a/rs/nervous_system/integration_tests/src/lib.rs b/rs/nervous_system/integration_tests/src/lib.rs index 2172f1953ae..c652b850805 100644 --- a/rs/nervous_system/integration_tests/src/lib.rs +++ b/rs/nervous_system/integration_tests/src/lib.rs @@ -35,10 +35,14 @@ impl SectionTimer { impl Drop for SectionTimer { fn drop(&mut self) { - eprintln!( - "Executed `{}` in {:?}", - self.name, - self.start_time.elapsed() - ); + if std::thread::panicking() { + eprintln!("Panicked during `{}`", self.name); + } else { + eprintln!( + "Executed `{}` in {:?}", + self.name, + self.start_time.elapsed() + ); + } } } diff --git a/rs/nervous_system/integration_tests/src/pocket_ic_helpers.rs b/rs/nervous_system/integration_tests/src/pocket_ic_helpers.rs index ed173b1eecc..41b16553571 100644 --- a/rs/nervous_system/integration_tests/src/pocket_ic_helpers.rs +++ b/rs/nervous_system/integration_tests/src/pocket_ic_helpers.rs @@ -4,7 +4,7 @@ use futures::stream; use futures::StreamExt; use ic_base_types::{CanisterId, PrincipalId, SubnetId}; use ic_ledger_core::Tokens; -use ic_nervous_system_agent::pocketic_impl::PocketIcCallError; +use ic_nervous_system_agent::pocketic_impl::{PocketIcAgent, PocketIcCallError}; use ic_nervous_system_agent::sns::Sns; use ic_nervous_system_agent::CallCanisters; use ic_nervous_system_common::{E8, ONE_DAY_SECONDS}; @@ -1365,7 +1365,7 @@ pub mod sns { use super::*; use assert_matches::assert_matches; use ic_crypto_sha2::Sha256; - use ic_nervous_system_agent::sns::governance::GovernanceCanister; + use ic_nervous_system_agent::sns::governance::{GovernanceCanister, SubmitProposalError}; use ic_sns_governance::governance::UPGRADE_STEPS_INTERVAL_REFRESH_BACKOFF_SECONDS; use ic_sns_governance::pb::v1::get_neuron_response; use pocket_ic::ErrorCode; @@ -1425,26 +1425,16 @@ pub mod sns { neuron_id: sns_pb::NeuronId, proposal: sns_pb::Proposal, ) -> Result { - let response = manage_neuron( - pocket_ic, - canister_id, - sender, - neuron_id, - sns_pb::manage_neuron::Command::MakeProposal(proposal), - ) - .await; - use sns_pb::manage_neuron_response::Command; - let response = match response.command { - Some(Command::MakeProposal(response)) => Ok(response), - Some(Command::Error(err)) => Err(err), - _ => panic!("Proposal failed unexpectedly: {:#?}", response), - }?; - let proposal_id = response.proposal_id.unwrap_or_else(|| { - panic!( - "First SNS proposal response did not contain a proposal_id: {:#?}", - response - ) - }); + let agent = PocketIcAgent::new(pocket_ic, sender); + let governance = GovernanceCanister::new(canister_id); + let proposal_id = governance + .submit_proposal(&agent, neuron_id, proposal) + .await + .map_err(|err| match err { + SubmitProposalError::GovernanceError(e) => e, + e => panic!("Unexpected error: {e}"), + })?; + wait_for_proposal_execution(pocket_ic, canister_id, proposal_id).await } diff --git a/rs/nervous_system/integration_tests/tests/sns_release_qualification.rs b/rs/nervous_system/integration_tests/tests/sns_release_qualification.rs index c6900c233af..5a771b2cd16 100644 --- a/rs/nervous_system/integration_tests/tests/sns_release_qualification.rs +++ b/rs/nervous_system/integration_tests/tests/sns_release_qualification.rs @@ -31,7 +31,7 @@ use sns_upgrade_test_utils::test_sns_upgrade; /// since there is sometimes a dependency between them, so we test them in both orders. /// /// Note: FI canisters are considered fully tested elsewhere, and have stable APIs. - +/// /// Deployment tests #[tokio::test] diff --git a/rs/nns/governance/canbench/canbench_results.yml b/rs/nns/governance/canbench/canbench_results.yml index 98282e4d781..34b92de91ce 100644 --- a/rs/nns/governance/canbench/canbench_results.yml +++ b/rs/nns/governance/canbench/canbench_results.yml @@ -1,109 +1,109 @@ benches: add_neuron_active_maximum: total: - instructions: 42559874 + instructions: 42470580 heap_increase: 1 stable_memory_increase: 0 scopes: {} add_neuron_active_typical: total: - instructions: 2160611 + instructions: 2156095 heap_increase: 0 stable_memory_increase: 0 scopes: {} add_neuron_inactive_maximum: total: - instructions: 112126649 + instructions: 111969854 heap_increase: 1 stable_memory_increase: 0 scopes: {} add_neuron_inactive_typical: total: - instructions: 8465215 + instructions: 8450397 heap_increase: 0 stable_memory_increase: 0 scopes: {} cascading_vote_all_heap: total: - instructions: 35565672 + instructions: 34764213 heap_increase: 0 stable_memory_increase: 128 scopes: {} cascading_vote_heap_neurons_stable_index: total: - instructions: 61721390 + instructions: 60897882 heap_increase: 0 stable_memory_increase: 128 scopes: {} cascading_vote_stable_everything: total: - instructions: 188861073 + instructions: 188426120 heap_increase: 0 stable_memory_increase: 128 scopes: {} cascading_vote_stable_neurons_with_heap_index: total: - instructions: 162571959 + instructions: 162159055 heap_increase: 0 stable_memory_increase: 128 scopes: {} centralized_following_all_stable: total: - instructions: 78285442 + instructions: 78085489 heap_increase: 0 stable_memory_increase: 128 scopes: {} compute_ballots_for_new_proposal_with_stable_neurons: total: - instructions: 2197226 + instructions: 2152483 heap_increase: 0 stable_memory_increase: 0 scopes: {} draw_maturity_from_neurons_fund_heap: total: - instructions: 7549569 + instructions: 7455504 heap_increase: 0 stable_memory_increase: 0 scopes: {} draw_maturity_from_neurons_fund_stable: total: - instructions: 12303773 + instructions: 12275909 heap_increase: 0 stable_memory_increase: 0 scopes: {} list_active_neurons_fund_neurons_heap: total: - instructions: 436248 + instructions: 424238 heap_increase: 0 stable_memory_increase: 0 scopes: {} list_active_neurons_fund_neurons_stable: total: - instructions: 2755320 + instructions: 2742494 heap_increase: 0 stable_memory_increase: 0 scopes: {} list_neurons_heap: total: - instructions: 4700995 + instructions: 4717287 heap_increase: 9 stable_memory_increase: 0 scopes: {} list_neurons_ready_to_unstake_maturity_heap: total: - instructions: 158257 + instructions: 158253 heap_increase: 0 stable_memory_increase: 0 scopes: {} list_neurons_ready_to_unstake_maturity_stable: total: - instructions: 41326673 + instructions: 41328019 heap_increase: 0 stable_memory_increase: 0 scopes: {} list_neurons_stable: total: - instructions: 113166221 + instructions: 113419930 heap_increase: 5 stable_memory_increase: 0 scopes: {} @@ -115,49 +115,49 @@ benches: scopes: {} list_ready_to_spawn_neuron_ids_stable: total: - instructions: 41298569 + instructions: 41299922 heap_increase: 0 stable_memory_increase: 0 scopes: {} neuron_data_validation_heap: total: - instructions: 407871691 + instructions: 406681837 heap_increase: 0 stable_memory_increase: 0 scopes: {} neuron_data_validation_stable: total: - instructions: 363603468 + instructions: 362505461 heap_increase: 0 stable_memory_increase: 0 scopes: {} neuron_metrics_calculation_heap: total: - instructions: 1457674 + instructions: 1471252 heap_increase: 0 stable_memory_increase: 0 scopes: {} neuron_metrics_calculation_stable: total: - instructions: 2982336 + instructions: 2994858 heap_increase: 0 stable_memory_increase: 0 scopes: {} range_neurons_performance: total: - instructions: 56494134 + instructions: 56426715 heap_increase: 0 stable_memory_increase: 0 scopes: {} single_vote_all_stable: total: - instructions: 2802680 + instructions: 2801395 heap_increase: 0 stable_memory_increase: 128 scopes: {} update_recent_ballots_stable_memory: total: - instructions: 273777 + instructions: 273152 heap_increase: 0 stable_memory_increase: 0 scopes: {} diff --git a/rs/nns/governance/src/governance.rs b/rs/nns/governance/src/governance.rs index a4add42fe8a..52c74392d00 100644 --- a/rs/nns/governance/src/governance.rs +++ b/rs/nns/governance/src/governance.rs @@ -1057,7 +1057,7 @@ impl Proposal { fn allowed_when_resources_are_low(&self) -> bool { self.action .as_ref() - .map_or(false, |a| a.allowed_when_resources_are_low()) + .is_some_and(|a| a.allowed_when_resources_are_low()) } fn omit_large_fields(self) -> Self { @@ -1177,7 +1177,7 @@ impl ProposalData { pub fn is_manage_neuron(&self) -> bool { self.proposal .as_ref() - .map_or(false, Proposal::is_manage_neuron) + .is_some_and(Proposal::is_manage_neuron) } pub fn reward_status( @@ -5308,6 +5308,8 @@ impl Governance { } fn validate_proposal(&self, proposal: &Proposal) -> Result { + // TODO: Jira ticket NNS1-3555 + #[allow(non_local_definitions)] impl From for GovernanceError { fn from(message: String) -> Self { Self::new_with_message(ErrorType::InvalidProposal, message) diff --git a/rs/nns/governance/src/known_neuron_index.rs b/rs/nns/governance/src/known_neuron_index.rs index bc22e08b3d1..15c52982e92 100644 --- a/rs/nns/governance/src/known_neuron_index.rs +++ b/rs/nns/governance/src/known_neuron_index.rs @@ -8,7 +8,6 @@ use ic_stable_structures::{Memory, StableBTreeMap, Storable}; /// Note that the index only cares about the uniqueness of the names, not the ids - /// the caller should make sure the name-id is removed from the index when a neuron /// is removed or its name is changed. - pub struct KnownNeuronIndex { known_neuron_name_to_id: StableBTreeMap, } diff --git a/rs/nns/integration_tests/test_canisters/governance_mem_test_canister.rs b/rs/nns/integration_tests/test_canisters/governance_mem_test_canister.rs index e5563e66753..f2029ba9131 100644 --- a/rs/nns/integration_tests/test_canisters/governance_mem_test_canister.rs +++ b/rs/nns/integration_tests/test_canisters/governance_mem_test_canister.rs @@ -1,3 +1,6 @@ +// TODO: Jira ticket NNS1-3556 +#![allow(static_mut_refs)] + //! This is a special-purpose canister to create a large Governance proto and //! serialize it to stable memory in a format that is compatible with the real //! governance canister. diff --git a/rs/nns/test_utils/src/itest_helpers.rs b/rs/nns/test_utils/src/itest_helpers.rs index 7d84cf9cb1c..e4e08e82c4c 100644 --- a/rs/nns/test_utils/src/itest_helpers.rs +++ b/rs/nns/test_utils/src/itest_helpers.rs @@ -505,10 +505,7 @@ pub async fn set_up_genesis_token_canister( } /// Compiles the ledger canister, builds it's initial payload and installs it -pub async fn install_ledger_canister<'runtime, 'a>( - canister: &mut Canister<'runtime>, - args: LedgerCanisterInitPayload, -) { +pub async fn install_ledger_canister(canister: &mut Canister<'_>, args: LedgerCanisterInitPayload) { install_rust_canister( canister, "ledger-canister", diff --git a/rs/orchestrator/BUILD.bazel b/rs/orchestrator/BUILD.bazel index 4d992a490ca..02a2db1cc45 100644 --- a/rs/orchestrator/BUILD.bazel +++ b/rs/orchestrator/BUILD.bazel @@ -87,12 +87,12 @@ rust_binary( rust_test( name = "orchestrator_test", - crate = ":lib", - data = [ + compile_data = [ "testdata/nftables_assigned_replica.conf.golden", "testdata/nftables_boundary_node.conf.golden", "//ic-os/components:ic/generate-ic-config/ic.json5.template", ], + crate = ":lib", deps = [ # Keep sorted. "//rs/crypto/temp_crypto", diff --git a/rs/orchestrator/src/registration.rs b/rs/orchestrator/src/registration.rs index 75230b930fe..db86313f532 100644 --- a/rs/orchestrator/src/registration.rs +++ b/rs/orchestrator/src/registration.rs @@ -550,7 +550,7 @@ pub(crate) fn is_time_to_rotate_in_subnet( let now = SystemTime::now(); timestamps .iter() - .all(|ts| now.duration_since(*ts).map_or(false, |d| d >= gamma)) + .all(|ts| now.duration_since(*ts).is_ok_and(|d| d >= gamma)) } pub(crate) fn http_config_to_endpoint( diff --git a/rs/p2p/consensus_manager/src/lib.rs b/rs/p2p/consensus_manager/src/lib.rs index 5f0ded558bc..08308131f6b 100644 --- a/rs/p2p/consensus_manager/src/lib.rs +++ b/rs/p2p/consensus_manager/src/lib.rs @@ -28,6 +28,22 @@ mod sender; type StartConsensusManagerFn = Box, watch::Receiver) -> Vec>; +pub struct AbortableBroadcastChannelManager(Vec); + +impl AbortableBroadcastChannelManager { + pub fn start( + self, + transport: Arc, + topology_watcher: watch::Receiver, + ) -> Vec { + let mut ret = vec![]; + for client in self.0 { + ret.append(&mut client(transport.clone(), topology_watcher.clone())); + } + ret + } +} + /// Same order of magnitude as the number of active artifacts. const MAX_OUTBOUND_CHANNEL_SIZE: usize = 100_000; @@ -113,20 +129,11 @@ impl AbortableBroadcastChannelBuilder { (outbound_tx, inbound_rx) } - pub fn router(&mut self) -> Router { - self.router.take().unwrap_or_default() - } - - pub fn run( - self, - transport: Arc, - topology_watcher: watch::Receiver, - ) -> Vec { - let mut ret = vec![]; - for client in self.clients { - ret.append(&mut client(transport.clone(), topology_watcher.clone())); - } - ret + pub fn build(self) -> (Router, AbortableBroadcastChannelManager) { + ( + self.router.unwrap(), + AbortableBroadcastChannelManager(self.clients), + ) } } diff --git a/rs/p2p/consensus_manager/src/receiver.rs b/rs/p2p/consensus_manager/src/receiver.rs index 2d1a7e31ae1..ec9c4476548 100644 --- a/rs/p2p/consensus_manager/src/receiver.rs +++ b/rs/p2p/consensus_manager/src/receiver.rs @@ -553,12 +553,8 @@ where }); for peers_sender in self.active_assembles.values() { - peers_sender.send_if_modified(|set| { - nodes_leaving_topology - .iter() - .map(|n| set.remove(*n)) - .any(|r| r) - }); + peers_sender + .send_if_modified(|set| nodes_leaving_topology.iter().any(|n| set.remove(*n))); } debug_assert!( self.slot_table.len() <= self.topology_watcher.borrow().iter().count(), diff --git a/rs/p2p/consensus_manager/tests/test.rs b/rs/p2p/consensus_manager/tests/test.rs index 056d9d43ec3..36e4415cfc4 100644 --- a/rs/p2p/consensus_manager/tests/test.rs +++ b/rs/p2p/consensus_manager/tests/test.rs @@ -345,17 +345,18 @@ fn load_test( for i in 0..num_peers { let node = node_test_id(i); let processor = TestConsensus::new(log.clone(), node, 256 * (i as usize + 1), i % 2 == 0); - let (jh, mut cm) = + let (jh, cm) = start_consensus_manager(no_op_logger(), rt.handle().clone(), processor.clone()); jhs.push(jh); - nodes.push((node, cm.router())); - cms.push((node, cm)); + let (r, m) = cm.build(); + nodes.push((node, r)); + cms.push((node, m)); node_advert_map.insert(node, processor); } let (nodes, topology_watcher) = fully_connected_localhost_subnet(rt.handle(), log, id, nodes); for ((node1, transport), (node2, cm)) in nodes.into_iter().zip(cms.into_iter()) { assert!(node1 == node2); - cm.run(transport, topology_watcher.clone()); + cm.start(transport, topology_watcher.clone()); } rt.block_on(async move { diff --git a/rs/p2p/state_sync_manager/src/lib.rs b/rs/p2p/state_sync_manager/src/lib.rs index cca66a54dfe..94dd80319d5 100644 --- a/rs/p2p/state_sync_manager/src/lib.rs +++ b/rs/p2p/state_sync_manager/src/lib.rs @@ -47,25 +47,23 @@ const ADVERT_BROADCAST_INTERVAL: Duration = Duration::from_secs(5); const ADVERT_BROADCAST_TIMEOUT: Duration = ADVERT_BROADCAST_INTERVAL.saturating_sub(Duration::from_secs(2)); -pub fn build_axum_router( - state_sync: Arc>, - log: ReplicaLogger, +pub fn build_state_sync_manager( + log: &ReplicaLogger, metrics_registry: &MetricsRegistry, -) -> ( - Router, - tokio::sync::mpsc::Receiver<(StateSyncArtifactId, NodeId)>, -) { + rt_handle: &tokio::runtime::Handle, + state_sync: Arc>, +) -> (Router, StateSyncManager) { let metrics = StateSyncManagerHandlerMetrics::new(metrics_registry); let shared_chunk_state = Arc::new(StateSyncChunkHandler::new( log.clone(), - state_sync, + state_sync.clone(), metrics.clone(), )); - let (tx, rx) = tokio::sync::mpsc::channel(20); - let advert_handler_state = Arc::new(StateSyncAdvertHandler::new(log, tx)); + let (advert_sender, advert_receiver) = tokio::sync::mpsc::channel(20); + let advert_handler_state = Arc::new(StateSyncAdvertHandler::new(log.clone(), advert_sender)); - let app = Router::new() + let router = Router::new() .route(STATE_SYNC_CHUNK_PATH, any(state_sync_chunk_handler)) .with_state(shared_chunk_state) .route( @@ -74,45 +72,37 @@ pub fn build_axum_router( ) .with_state(advert_handler_state); - (app, rx) -} - -pub fn start_state_sync_manager( - log: &ReplicaLogger, - metrics: &MetricsRegistry, - rt: &Handle, - transport: Arc, - state_sync: Arc>, - advert_receiver: tokio::sync::mpsc::Receiver<(StateSyncArtifactId, NodeId)>, -) -> Shutdown { - let state_sync_manager_metrics = StateSyncManagerMetrics::new(metrics); + let state_sync_manager_metrics = StateSyncManagerMetrics::new(metrics_registry); let manager = StateSyncManager { log: log.clone(), - rt: rt.clone(), + rt: rt_handle.clone(), metrics: state_sync_manager_metrics, - transport, state_sync, advert_receiver, ongoing_state_sync: None, }; - Shutdown::spawn_on_with_cancellation( - |cancellation: CancellationToken| manager.run(cancellation), - rt, - ) + (router, manager) } -struct StateSyncManager { +pub struct StateSyncManager { log: ReplicaLogger, rt: Handle, metrics: StateSyncManagerMetrics, - transport: Arc, state_sync: Arc>, advert_receiver: tokio::sync::mpsc::Receiver<(StateSyncArtifactId, NodeId)>, ongoing_state_sync: Option, } impl StateSyncManager { - async fn run(mut self, cancellation: CancellationToken) { + pub fn start(self, transport: Arc) -> Shutdown { + let rt_handle = self.rt.clone(); + Shutdown::spawn_on_with_cancellation( + |cancellation: CancellationToken| self.run(cancellation, transport), + &rt_handle, + ) + } + + async fn run(mut self, cancellation: CancellationToken, transport: Arc) { let mut interval = tokio::time::interval(ADVERT_BROADCAST_INTERVAL); interval.set_missed_tick_behavior(MissedTickBehavior::Skip); let mut advertise_task = JoinSet::new(); @@ -127,7 +117,7 @@ impl StateSyncManager { Self::send_state_adverts( self.rt.clone(), self.state_sync.clone(), - self.transport.clone(), + transport.clone(), self.metrics.clone(), cancellation.clone(), ), @@ -135,7 +125,7 @@ impl StateSyncManager { ); }, Some((advert, peer_id)) = self.advert_receiver.recv() =>{ - self.handle_advert(advert, peer_id).await; + self.handle_advert(advert, peer_id, transport.clone()).await; } Some(_) = advertise_task.join_next() => {} } @@ -146,7 +136,12 @@ impl StateSyncManager { } } - async fn handle_advert(&mut self, artifact_id: StateSyncArtifactId, peer_id: NodeId) { + async fn handle_advert( + &mut self, + artifact_id: StateSyncArtifactId, + peer_id: NodeId, + transport: Arc, + ) { self.metrics.adverts_received_total.inc(); // Remove ongoing state sync if finished or try to add peer if ongoing. if let Some(ongoing) = &mut self.ongoing_state_sync { @@ -184,7 +179,7 @@ impl StateSyncManager { self.metrics.ongoing_state_sync_metrics.clone(), Arc::new(Mutex::new(chunkable)), artifact_id.clone(), - self.transport.clone(), + transport, ); // Add peer that initiated this state sync to ongoing state sync. ongoing @@ -330,14 +325,17 @@ mod tests { }; let (handler_tx, handler_rx) = tokio::sync::mpsc::channel(100); - start_state_sync_manager( - &log, - &MetricsRegistry::default(), - rt.handle(), - Arc::new(t) as Arc<_>, - Arc::new(s) as Arc<_>, - handler_rx, - ); + let metrics = StateSyncManagerMetrics::new(&MetricsRegistry::default()); + + let manager = StateSyncManager { + log: log.clone(), + advert_receiver: handler_rx, + ongoing_state_sync: None, + metrics, + state_sync: Arc::new(s) as Arc<_>, + rt: rt.handle().clone(), + }; + let _ = manager.start(Arc::new(t) as Arc<_>); rt.block_on(async move { handler_tx.send((id, NODE_1)).await.unwrap(); handler_tx.send((old_id, NODE_2)).await.unwrap(); diff --git a/rs/p2p/state_sync_manager/tests/common.rs b/rs/p2p/state_sync_manager/tests/common.rs index 3f6fe8f9f00..4d211ef8241 100644 --- a/rs/p2p/state_sync_manager/tests/common.rs +++ b/rs/p2p/state_sync_manager/tests/common.rs @@ -418,10 +418,11 @@ pub fn create_node( disconnected: Arc::new(AtomicBool::new(false)), }); - let (router, rx) = ic_state_sync_manager::build_axum_router( - state_sync.clone(), - log.clone(), + let (router, manager) = ic_state_sync_manager::build_state_sync_manager( + &log, &MetricsRegistry::default(), + rt, + state_sync.clone(), ); let transport = transport_router.add_peer( NodeId::from(PrincipalId::new_node_test_id(node_num)), @@ -429,14 +430,6 @@ pub fn create_node( link.0, link.1, ); - let shutdown = ic_state_sync_manager::start_state_sync_manager( - &log, - &MetricsRegistry::default(), - rt, - Arc::new(transport), - state_sync.clone(), - rx, - ); - + let shutdown = manager.start(Arc::new(transport)); (state_sync, shutdown) } diff --git a/rs/p2p/test_utils/src/turmoil.rs b/rs/p2p/test_utils/src/turmoil.rs index 14eb6a251f6..9d24eebd126 100644 --- a/rs/p2p/test_utils/src/turmoil.rs +++ b/rs/p2p/test_utils/src/turmoil.rs @@ -358,19 +358,21 @@ pub fn add_transport_to_sim( let this_ip = turmoil::lookup(peer.to_string()); let custom_udp = CustomUdp::new(this_ip, udp_listener); - let state_sync_rx = if let Some(ref state_sync) = state_sync_client_clone { - let (state_sync_router, state_sync_rx) = ic_state_sync_manager::build_axum_router( - state_sync.clone(), - log.clone(), - &MetricsRegistry::default(), - ); + let state_sync_manager = if let Some(ref state_sync) = state_sync_client_clone { + let (state_sync_router, state_sync_manager) = + ic_state_sync_manager::build_state_sync_manager( + &log, + &MetricsRegistry::default(), + &tokio::runtime::Handle::current(), + state_sync.clone(), + ); router = Some(router.unwrap_or_default().merge(state_sync_router)); - Some(state_sync_rx) + Some(state_sync_manager) } else { None }; - let _artifact_processor_jh = if let Some(consensus) = consensus_manager_clone { + let con = if let Some(consensus) = consensus_manager_clone { let bouncer_factory = Arc::new(consensus.clone().read().unwrap().clone()); let downloader = FetchArtifact::new( log.clone(), @@ -388,9 +390,11 @@ pub fn add_transport_to_sim( consensus.clone(), consensus.clone().read().unwrap().clone(), ); - router = Some(router.unwrap_or_default().merge(consensus_builder.router())); - Some(artifact_processor_jh) + let (consensus_router, manager) = consensus_builder.build(); + router = Some(router.unwrap_or_default().merge(consensus_router)); + + Some((artifact_processor_jh, manager)) } else { None }; @@ -407,17 +411,12 @@ pub fn add_transport_to_sim( router.unwrap_or_default(), )); - consensus_builder.run(transport.clone(), topology_watcher_clone.clone()); + if let Some((_, con_manager)) = con { + con_manager.start(transport.clone(), topology_watcher_clone.clone()); + } - if let Some(state_sync_rx) = state_sync_rx { - ic_state_sync_manager::start_state_sync_manager( - &log, - &MetricsRegistry::default(), - &tokio::runtime::Handle::current(), - transport.clone(), - state_sync_client_clone.unwrap().clone(), - state_sync_rx, - ); + if let Some(state_sync_manager) = state_sync_manager { + state_sync_manager.start(transport.clone()); } post_setup_future_clone(peer, transport).await; diff --git a/rs/phantom_newtype/src/displayer.rs b/rs/phantom_newtype/src/displayer.rs index 1c8ed820e4b..aca27709e9e 100644 --- a/rs/phantom_newtype/src/displayer.rs +++ b/rs/phantom_newtype/src/displayer.rs @@ -34,7 +34,7 @@ where } } -impl<'a, T, Displayer> fmt::Display for DisplayProxy<'a, T, Displayer> +impl fmt::Display for DisplayProxy<'_, T, Displayer> where Displayer: DisplayerOf, { diff --git a/rs/pocket_ic_server/CHANGELOG.md b/rs/pocket_ic_server/CHANGELOG.md index 44815080924..fa0390e3828 100644 --- a/rs/pocket_ic_server/CHANGELOG.md +++ b/rs/pocket_ic_server/CHANGELOG.md @@ -13,8 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - New endpoint `/instances//read/ingress_status` to fetch the status of an update call submitted through an ingress message. - If an optional caller is provided and a corresponding read state request for the status of the same update call - signed by that specified caller was rejected because the update call was submitted by a different caller, then an error is returned. + If an optional caller is provided, the status of the update call is known, but the update call was submitted by a different caller, then an error is returned. ### Fixed - Canisters created via `provisional_create_canister_with_cycles` with the management canister ID as the effective canister ID diff --git a/rs/pocket_ic_server/tests/bitcoin_integration_tests.rs b/rs/pocket_ic_server/tests/bitcoin_integration_tests.rs index 59a97ab7dba..3a36651f1d6 100644 --- a/rs/pocket_ic_server/tests/bitcoin_integration_tests.rs +++ b/rs/pocket_ic_server/tests/bitcoin_integration_tests.rs @@ -94,7 +94,7 @@ rpcauth=ic-btc-integration:cdf2741387f3a12438f69092f0fdad8e$62081498c98bee09a0dc let data_dir_path = tmp_dir.path().join("data"); create_dir(data_dir_path.clone()).unwrap(); - Command::new(bitcoind_path) + let mut bitcoin_d_process = Command::new(bitcoind_path) .arg(format!("-conf={}", conf_path.display())) .arg(format!("-datadir={}", data_dir_path.display())) .spawn() @@ -140,9 +140,20 @@ rpcauth=ic-btc-integration:cdf2741387f3a12438f69092f0fdad8e$62081498c98bee09a0dc // `n` must be more than 100 (Coinbase maturity rule) so that the reward for the first block can be sent out let mut n = 101; - btc_rpc - .generate_to_address(n, &Address::from_str(&bitcoin_address).unwrap()) - .unwrap(); + // retry generating blocks until the bitcoind is up and running + let start = std::time::Instant::now(); + loop { + match btc_rpc.generate_to_address(n, &Address::from_str(&bitcoin_address).unwrap()) { + Ok(_) => break, + Err(bitcoincore_rpc::Error::JsonRpc(err)) => { + if start.elapsed() > std::time::Duration::from_secs(30) { + panic!("Timed out when waiting for bitcoind; last error: {}", err); + } + std::thread::sleep(std::time::Duration::from_millis(100)); + } + Err(err) => panic!("Unexpected error when talking to bitcoind: {}", err), + } + } let reward = 50 * 100_000_000; // 50 BTC @@ -180,4 +191,8 @@ rpcauth=ic-btc-integration:cdf2741387f3a12438f69092f0fdad8e$62081498c98bee09a0dc n += 1; } } + + // Kill the task to avoid zombie process. + bitcoin_d_process.kill().unwrap(); + bitcoin_d_process.wait().unwrap(); } diff --git a/rs/pocket_ic_server/tests/spec_test.rs b/rs/pocket_ic_server/tests/spec_test.rs index 037013ecb23..567c79cf324 100644 --- a/rs/pocket_ic_server/tests/spec_test.rs +++ b/rs/pocket_ic_server/tests/spec_test.rs @@ -90,7 +90,8 @@ fn setup_and_run_ic_ref_test( cmd.arg("--key-file").arg(key_path); } - cmd.stdout(Stdio::inherit()) + let mut process = cmd + .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .spawn() .expect("httpbin binary crashed"); @@ -196,6 +197,9 @@ fn setup_and_run_ic_ref_test( included_tests, 32, ); + + process.kill().unwrap(); + process.wait().unwrap(); } #[test] diff --git a/rs/prep/src/subnet_configuration.rs b/rs/prep/src/subnet_configuration.rs index c7ef379feab..6e518b7c4c9 100644 --- a/rs/prep/src/subnet_configuration.rs +++ b/rs/prep/src/subnet_configuration.rs @@ -169,7 +169,6 @@ pub fn duration_to_millis(unit_delay: Duration) -> u64 { /// The configuration for app subnets is used for new app subnets with at most /// 13 nodes. App subnets with more than 13 nodes will be deployed with the NNS /// subnet configs. - pub fn get_default_config_params(subnet_type: SubnetType, nodes_num: usize) -> SubnetConfigParams { let use_app_config = subnet_type == SubnetType::Application && nodes_num <= ic_limits::SMALL_APP_SUBNET_MAX_SIZE; diff --git a/rs/protobuf/src/proxy.rs b/rs/protobuf/src/proxy.rs index 51bf54ba2d2..ef4942c068a 100644 --- a/rs/protobuf/src/proxy.rs +++ b/rs/protobuf/src/proxy.rs @@ -132,7 +132,7 @@ impl ProxyDecodeError { T: Error + Eq + 'static, { self.source() - .map_or(false, |err| err.downcast_ref() == Some(&other_err)) + .is_some_and(|err| err.downcast_ref() == Some(&other_err)) } } diff --git a/rs/recovery/BUILD.bazel b/rs/recovery/BUILD.bazel index 1364ceed55c..e135f4024a3 100644 --- a/rs/recovery/BUILD.bazel +++ b/rs/recovery/BUILD.bazel @@ -60,8 +60,8 @@ rust_library( name = "recovery", srcs = glob(["src/**/*.rs"]), aliases = ALIASES, + compile_data = ["ic_public_key.pem"], crate_name = "ic_recovery", - data = ["ic_public_key.pem"], proc_macro_deps = MACRO_DEPENDENCIES, version = "0.1.0", visibility = [ @@ -88,7 +88,7 @@ rust_binary( rust_test( name = "recovery_test", + compile_data = ["ic_public_key.pem"], crate = ":recovery", - data = ["ic_public_key.pem"], deps = DEPENDENCIES + DEV_DEPENDENCIES, ) diff --git a/rs/recovery/src/admin_helper.rs b/rs/recovery/src/admin_helper.rs index 0e1c4aa718c..7035d114ac9 100644 --- a/rs/recovery/src/admin_helper.rs +++ b/rs/recovery/src/admin_helper.rs @@ -122,9 +122,7 @@ impl AdminHelper { let mut ic_admin = self.get_ic_admin_cmd_base(); ic_admin - // TODO: Switch to the new command name: - // .add_positional_argument("propose-to-revise-elected-guestos-versions") - .add_positional_argument("propose-to-update-elected-replica-versions") + .add_positional_argument("propose-to-revise-elected-guestos-versions") .add_argument("replica-version-to-elect", quote(upgrade_version)) .add_argument("release-package-urls", quote(upgrade_url)) .add_argument("release-package-sha256-hex", quote(sha256)) @@ -149,9 +147,7 @@ impl AdminHelper { let mut ic_admin = self.get_ic_admin_cmd_base(); ic_admin - // TODO: Switch to the new command name: - // .add_positional_argument("propose-to-deploy-guestos-to-all-subnet-nodes") - .add_positional_argument("propose-to-update-subnet-replica-version") + .add_positional_argument("propose-to-deploy-guestos-to-all-subnet-nodes") .add_positional_argument(subnet_id) .add_positional_argument(upgrade_version) .add_argument( @@ -416,7 +412,7 @@ mod tests { result, "/fake/ic/admin/dir/ic-admin \ --nns-url \"https://fake_nns_url.com:8080/\" \ - propose-to-update-elected-replica-versions \ + propose-to-revise-elected-guestos-versions \ --replica-version-to-elect \"fake_replica_version\" \ --release-package-urls \"https://fake_upgrade_url.com/\" \ --release-package-sha256-hex \"fake_sha_256\" \ @@ -529,7 +525,7 @@ mod tests { assert_eq!(result, "/fake/ic/admin/dir/ic-admin \ --nns-url \"https://fake_nns_url.com:8080/\" \ - propose-to-update-subnet-replica-version \ + propose-to-deploy-guestos-to-all-subnet-nodes \ gpvux-2ejnk-3hgmh-cegwf-iekfc-b7rzs-hrvep-5euo2-3ywz3-k3hcb-cqe \ fake_replica_version \ --summary \"Upgrade replica version of subnet gpvux-2ejnk-3hgmh-cegwf-iekfc-b7rzs-hrvep-5euo2-3ywz3-k3hcb-cqe.\" \ diff --git a/rs/recovery/subnet_splitting/BUILD.bazel b/rs/recovery/subnet_splitting/BUILD.bazel index 7250b6d7f43..60a21945820 100644 --- a/rs/recovery/subnet_splitting/BUILD.bazel +++ b/rs/recovery/subnet_splitting/BUILD.bazel @@ -64,7 +64,7 @@ rust_binary( rust_test( name = "subnet_splitting_tool_test", + compile_data = ["test_data/fake_expected_manifests.data"], crate = "subnet_splitting", - data = ["test_data/fake_expected_manifests.data"], deps = DEPENDENCIES + DEV_DEPENDENCIES, ) diff --git a/rs/registry/admin/src/main.rs b/rs/registry/admin/src/main.rs index c32e582296d..b4b508219c4 100644 --- a/rs/registry/admin/src/main.rs +++ b/rs/registry/admin/src/main.rs @@ -207,14 +207,15 @@ struct Opts { subcmd: SubCommand, /// Use an HSM to sign calls. - #[clap(long, global = true)] + #[clap(long, global = true, requires_all = &["hsm_slot", "hsm_key_id", "hsm_pin"])] use_hsm: bool, /// The slot related to the HSM key that shall be used. #[clap( long = "slot", - help = "Only required if use-hsm is set. Ignored otherwise.", - global = true + help = "Required if use-hsm is set.", + global = true, + requires = "use_hsm" )] hsm_slot: Option, @@ -222,17 +223,21 @@ struct Opts { #[clap( long = "key-id", help = "Only required if use-hsm is set. Ignored otherwise.", - global = true + global = true, + requires = "use_hsm", + visible_alias = "key-id" )] - key_id: Option, + hsm_key_id: Option, /// The PIN used to unlock the HSM. #[clap( long = "pin", help = "Only required if use-hsm is set. Ignored otherwise.", - global = true + global = true, + requires = "use_hsm", + visible_alias = "pin" )] - pin: Option, + hsm_pin: Option, /// Verify NNS responses against NNS public key. #[clap( @@ -258,205 +263,267 @@ struct Opts { silence_notices: bool, } +//////////////////////////////////////////////////////////////////////////////// +// NOTE: Please keep the sub-commands in alphabetical order. +//////////////////////////////////////////////////////////////////////////////// /// List of sub-commands accepted by `ic-admin`. #[allow(clippy::large_enum_variant)] #[derive(clap::Subcommand)] enum SubCommand { - /// Get the last version of a node's public key from the registry. - GetPublicKey(GetPublicKeyCmd), - /// Get the last version of a node's TLS certificate key from the registry. - GetTlsCertificate(GetTlsCertificateCmd), - /// Submits a proposal to change node membership in a subnet. - /// Consider using instead the DRE tool to submit this type of proposals. - /// https://github.com/dfinity/dre - ProposeToChangeSubnetMembership(ProposeToChangeSubnetMembershipCmd), + /// Convert the integer node ID into Principal Id + ConvertNumericNodeIdToPrincipalId(ConvertNumericNodeIdtoPrincipalIdCmd), + + /// Sub-command to fetch an API Boundary Node record from the registry. + /// Retrieve an API Boundary Node record + GetApiBoundaryNode(GetApiBoundaryNodeCmd), + + /// Retrieve all API Boundary Node Ids + GetApiBoundaryNodes, + + /// Get the latest canister migrations. + GetCanisterMigrations, + + /// Get the Master public key ids and their signing subnets + GetChainKeySigningSubnets, + + /// Get a DataCenterRecord + GetDataCenter(GetDataCenterCmd), + + /// Get the ECDSA key ids and their signing subnets + GetEcdsaSigningSubnets, + + /// Get the current list of elected GuestOS/Replica versions. + #[clap(visible_alias = "get-blessed-replica-versions")] + GetElectedGuestosVersions, + + /// Get the current list of elected HostOS versions + GetElectedHostosVersions, + + /// Get the current firewall config + GetFirewallConfig, + + /// Get the existing firewall rules for a given scope + GetFirewallRules(GetFirewallRulesCmd), + + /// Get the existing firewall rules that apply to a given node + GetFirewallRulesForNode(GetFirewallRulesForNodeCmd), + + /// Compute the SHA-256 hash of a given list of firewall rules + GetFirewallRulesetHash(GetFirewallRulesetHashCmd), + + /// Get the monthly Node Provider rewards + GetMonthlyNodeProviderRewards, + /// Get the last version of a node from the registry. GetNode(GetNodeCmd), + /// Get the nodes added since a given version (exclusive). GetNodeListSince(GetNodeListSinceCmd), - /// Get the topology of the system as described in the registry, in JSON - /// format. - GetTopology, - /// Get the last version of a subnet from the registry. - GetSubnet(GetSubnetCmd), - /// Get the last version of the subnet list from the registry. - GetSubnetList, + + /// Get a node operator's record + GetNodeOperator(GetNodeOperatorCmd), + + /// Get the list of all node operators + GetNodeOperatorList, + + /// Get the node rewards table + GetNodeRewardsTable, + + // Get the pending proposals to upgrade the governance canister. + GetPendingRootProposalsToUpgradeGovernanceCanister, + + /// Get whitelist of principals that can access the provisional_* APIs in + /// the management canister. + GetProvisionalWhitelist, + + /// Get the last version of a node's public key from the registry. + GetPublicKey(GetPublicKeyCmd), + + // Get latest registry version number + GetRegistryVersion, + /// Get info about a Replica version GetReplicaVersion(GetReplicaVersionCmd), - /// Deprecated. Please use `ProposeToDeployGuestosToAllSubnetNodes` instead. - ProposeToUpdateSubnetReplicaVersion(ProposeToDeployGuestosToAllSubnetNodesCmd), - /// Propose to deploy a priorly elected GuestOS version to all subnet nodes. - ProposeToDeployGuestosToAllSubnetNodes(ProposeToDeployGuestosToAllSubnetNodesCmd), - /// Get the list of blessed Replica versions. - GetBlessedReplicaVersions, + /// Get the latest routing table. GetRoutingTable, - /// Deprecated. Please use `ProposeToReviseElectedGuestosVersions` instead. - ProposeToUpdateElectedReplicaVersions(ProposeToReviseElectedGuestssVersionsCmd), - /// Submits a proposal to change the set of currently elected GuestOS versions, by electing - /// a new version and/or unelecting multiple priorly elected versions. - ProposeToReviseElectedGuestosVersions(ProposeToReviseElectedGuestssVersionsCmd), - /// Submits a proposal to create a new subnet. - ProposeToCreateSubnet(ProposeToCreateSubnetCmd), - /// Submits a proposal to create a new service nervous system (usually referred to as SNS). - ProposeToCreateServiceNervousSystem(ProposeToCreateServiceNervousSystemCmd), - /// Submits a proposal to update a subnet's recovery CUP - ProposeToUpdateRecoveryCup(ProposeToUpdateRecoveryCupCmd), - /// Submits a proposal to update an existing subnet's configuration. - ProposeToUpdateSubnet(ProposeToUpdateSubnetCmd), + + /// Get the last version of a subnet from the registry. + GetSubnet(GetSubnetCmd), + + /// Get the last version of the subnet list from the registry. + GetSubnetList, + + /// Get the public of the subnet. + GetSubnetPublicKey(SubnetPublicKeyCmd), + + /// Get the last version of a node's TLS certificate key from the registry. + GetTlsCertificate(GetTlsCertificateCmd), + + /// Get the topology of the system as described in the registry, in JSON + /// format. + GetTopology, + + /// Get the SSH key access lists for unassigned nodes + GetUnassignedNodes, + + /// Propose to add an API Boundary Node + ProposeToAddApiBoundaryNodes(ProposeToAddApiBoundaryNodesCmd), + + /// Propose to add firewall rules + ProposeToAddFirewallRules(ProposeToAddFirewallRulesCmd), + + /// Submits a proposal to add a new canister on NNS. + ProposeToAddNnsCanister(ProposeToAddNnsCanisterCmd), + + /// Propose to add a new node operator to the registry. + ProposeToAddNodeOperator(ProposeToAddNodeOperatorCmd), + + /// Submit a proposal to add data centers and/or remove data centers from the Registry + ProposeToAddOrRemoveDataCenters(ProposeToAddOrRemoveDataCentersCmd), + + /// Propose to add or remove a node provider from the governance canister + ProposeToAddOrRemoveNodeProvider(ProposeToAddOrRemoveNodeProviderCmd), + + /// Submits a proposal to add an SNS wasm (e.g. Governance, Ledger, etc) to the SNS-WASM NNS + /// canister. + ProposeToAddWasmToSnsWasm(ProposeToAddWasmToSnsWasmCmd), + /// Submits a proposal to change an existing canister on NNS. ProposeToChangeNnsCanister(ProposeToChangeNnsCanisterCmd), - /// Submits a proposal to uninstall and install root to a particular version - ProposeToHardResetNnsRootToVersion(ProposeToHardResetNnsRootToVersionCmd), - /// Submits a proposal to uninstall code of a canister. - ProposeToUninstallCode(ProposeToUninstallCodeCmd), - /// Submits a proposal to set authorized subnetworks that the cycles minting - /// canister can use. - ProposeToSetAuthorizedSubnetworks(ProposeToSetAuthorizedSubnetworksCmd), - /// Submits a proposal to update the subnet types that are available in the - /// cycles minting canister. - ProposeToUpdateSubnetType(ProposeToUpdateSubnetTypeCmd), + + /// Submits a proposal to change node membership in a subnet. + /// Consider using instead the DRE tool to submit this type of proposals. + /// https://github.com/dfinity/dre + ProposeToChangeSubnetMembership(ProposeToChangeSubnetMembershipCmd), + /// Submits a proposal to add or remove subnets from a subnet type in the /// cycles minting canister. ProposeToChangeSubnetTypeAssignment(ProposeToChangeSubnetTypeAssignmentCmd), - /// Submits a proposal to add a new canister on NNS. - ProposeToAddNnsCanister(ProposeToAddNnsCanisterCmd), - /// Convert the integer node ID into Principal Id - ConvertNumericNodeIdToPrincipalId(ConvertNumericNodeIdtoPrincipalIdCmd), - /// Get whitelist of principals that can access the provisional_* APIs in - /// the management canister. - GetProvisionalWhitelist, - /// Get the public of the subnet. - GetSubnetPublicKey(SubnetPublicKeyCmd), - /// Propose to add a new node operator to the registry. - ProposeToAddNodeOperator(ProposeToAddNodeOperatorCmd), - /// Get a node operator's record - GetNodeOperator(GetNodeOperatorCmd), - /// Get the list of all node operators - GetNodeOperatorList, - /// Update local registry store by pulling from remote URL - UpdateRegistryLocalStore(UpdateRegistryLocalStoreCmd), + /// Update the whitelist of principals that can access the provisional_* /// APIs in the management canister. ProposeToClearProvisionalWhitelist(ProposeToClearProvisionalWhitelistCmd), - /// Update the Node Operator's specified parameters - ProposeToUpdateNodeOperatorConfig(ProposeToUpdateNodeOperatorConfigCmd), - /// Get the current firewall config - GetFirewallConfig, - /// Propose to set the firewall config - ProposeToSetFirewallConfig(ProposeToSetFirewallConfigCmd), - /// Propose to add firewall rules - ProposeToAddFirewallRules(ProposeToAddFirewallRulesCmd), - /// Propose to remove firewall rules - ProposeToRemoveFirewallRules(ProposeToRemoveFirewallRulesCmd), - /// Propose to update firewall rules - ProposeToUpdateFirewallRules(ProposeToUpdateFirewallRulesCmd), - /// Get the existing firewall rules for a given scope - GetFirewallRules(GetFirewallRulesCmd), - /// Get the existing firewall rules that apply to a given node - GetFirewallRulesForNode(GetFirewallRulesForNodeCmd), - /// Compute the SHA-256 hash of a given list of firewall rules - GetFirewallRulesetHash(GetFirewallRulesetHashCmd), - /// Propose to remove a node from the registry via proposal. - ProposeToRemoveNodes(ProposeToRemoveNodesCmd), - /// Propose to add or remove a node provider from the governance canister - ProposeToAddOrRemoveNodeProvider(ProposeToAddOrRemoveNodeProviderCmd), - // Get latest registry version number - GetRegistryVersion, - // Submit a root proposal to the root canister to upgrade the governance canister. - SubmitRootProposalToUpgradeGovernanceCanister(SubmitRootProposalToUpgradeGovernanceCanisterCmd), - // Get the pending proposals to upgrade the governance canister. - GetPendingRootProposalsToUpgradeGovernanceCanister, - // Vote on a pending root proposal to upgrade the governance canister. - VoteOnRootProposalToUpgradeGovernanceCanister(VoteOnRootProposalToUpgradeGovernanceCanisterCmd), - /// Get a DataCenterRecord - GetDataCenter(GetDataCenterCmd), - /// Submit a proposal to add data centers and/or remove data centers from - /// the Registry - ProposeToAddOrRemoveDataCenters(ProposeToAddOrRemoveDataCentersCmd), - /// Get the node rewards table - GetNodeRewardsTable, - /// Submit a proposal to update the node rewards table - ProposeToUpdateNodeRewardsTable(ProposeToUpdateNodeRewardsTableCmd), - /// Submit a proposal to update the unassigned nodes. This subcommand is obsolete; please use - /// `ProposeToDeployGuestosToAllUnassignedNodes` or `ProposeToUpdateSshReadonlyAccessForAllUnassignedNodes` instead. - ProposeToUpdateUnassignedNodesConfig(ProposeToUpdateUnassignedNodesConfigCmd), + + /// Propose to remove entries from `canister_migrations`. Step 3 of canister migration. + ProposeToCompleteCanisterMigration(ProposeToCompleteCanisterMigrationCmd), + + /// Submits a proposal to create a new service nervous system (usually referred to as SNS). + ProposeToCreateServiceNervousSystem(ProposeToCreateServiceNervousSystemCmd), + + /// Submits a proposal to create a new subnet. + ProposeToCreateSubnet(ProposeToCreateSubnetCmd), + + /// Propose to deploy a priorly elected GuestOS version to all subnet nodes. + ProposeToDeployGuestosToAllSubnetNodes(ProposeToDeployGuestosToAllSubnetNodesCmd), + /// Propose to deploy the GuestOS version to all unassigned nodes. ProposeToDeployGuestosToAllUnassignedNodes(ProposeToDeployGuestosToAllUnassignedNodesCmd), - /// Propose to update the SSH keys that have read-only access to all unassigned nodes. - ProposeToUpdateSshReadonlyAccessForAllUnassignedNodes( - ProposeToUpdateSshReadonlyAccessForAllUnassignedNodesCmd, - ), - /// Get the SSH key access lists for unassigned nodes - GetUnassignedNodes, - /// Get the monthly Node Provider rewards - GetMonthlyNodeProviderRewards, - /// Propose Xdr/Icp conversion rate. - ProposeXdrIcpConversionRate(ProposeXdrIcpConversionRateCmd), - /// Propose to start a canister managed by the governance. - ProposeToStartCanister(StartCanisterCmd), - /// Propose to stop a canister managed by the governance. - ProposeToStopCanister(StopCanisterCmd), + + /// Propose to upgrade the GuestOS version of a set of API Boundary Nodes. + ProposeToDeployGuestosToSomeApiBoundaryNodes(ProposeToDeployGuestosToSomeApiBoundaryNodesCmd), + + /// Propose to deploy a HostOS version to some nodes. + ProposeToDeployHostosToSomeNodes(ProposeToDeployHostosToSomeNodesCmd), + + /// Submits a proposal to uninstall and install root to a particular version + ProposeToHardResetNnsRootToVersion(ProposeToHardResetNnsRootToVersionCmd), + + // Submits a proposal to add custom upgrade path entries + ProposeToInsertSnsWasmUpgradePathEntries(ProposeToInsertSnsWasmUpgradePathEntriesCmd), + + /// Propose additions or updates to `canister_migrations`. Step 1 of canister migration. + ProposeToPrepareCanisterMigration(ProposeToPrepareCanisterMigrationCmd), + + /// Propose to remove a set of API Boundary Nodes + ProposeToRemoveApiBoundaryNodes(ProposeToRemoveApiBoundaryNodesCmd), + + /// Propose to remove firewall rules + ProposeToRemoveFirewallRules(ProposeToRemoveFirewallRulesCmd), + /// Propose to remove a list of node operators from the Registry ProposeToRemoveNodeOperators(ProposeToRemoveNodeOperatorsCmd), + + /// Propose to remove a node from the registry via proposal. + ProposeToRemoveNodes(ProposeToRemoveNodesCmd), + + /// Submits a proposal to express the interest in renting a subnet. + ProposeToRentSubnet(ProposeToRentSubnetCmd), + /// Propose to modify the routing table. Step 2 of canister migration. ProposeToRerouteCanisterRanges(ProposeToRerouteCanisterRangesCmd), - /// Propose additions or updates to `canister_migrations`. Step 1 of canister migration. - ProposeToPrepareCanisterMigration(ProposeToPrepareCanisterMigrationCmd), - /// Propose to remove entries from `canister_migrations`. Step 3 of canister migration. - ProposeToCompleteCanisterMigration(ProposeToCompleteCanisterMigrationCmd), - /// Get the latest canister migrations. - GetCanisterMigrations, - /// Submits a proposal to add an SNS wasm (e.g. Governance, Ledger, etc) to the SNS-WASM NNS - /// canister. - ProposeToAddWasmToSnsWasm(ProposeToAddWasmToSnsWasmCmd), - // Submits a proposal to add custom upgrade path entries - ProposeToInsertSnsWasmUpgradePathEntries(ProposeToInsertSnsWasmUpgradePathEntriesCmd), - /// Get the ECDSA key ids and their signing subnets - GetEcdsaSigningSubnets, - /// Get the Master public key ids and their signing subnets - GetChainKeySigningSubnets, - /// Propose to update the list of SNS Subnet IDs that SNS-WASM deploys SNS instances to - ProposeToUpdateSnsSubnetIdsInSnsWasm(ProposeToUpdateSnsSubnetIdsInSnsWasmCmd), - /// Propose to update the list of Principals that are allowed to deploy SNS instances - ProposeToUpdateSnsDeployWhitelist(ProposeToUpdateSnsDeployWhitelistCmd), - /// Propose to start a decentralization swap. This subcommand is obsolete; please use - /// `ProposeToCreateServiceNervousSystem` instead. - ProposeToOpenSnsTokenSwap(ProposeToOpenSnsTokenSwap), - /// Propose to set the Bitcoin configuration - ProposeToSetBitcoinConfig(ProposeToSetBitcoinConfig), - /// Submits a proposal to change the set of currently elected HostOS versions, by electing - /// a new version and/or unelecting multiple versions. This subcommand is obsolete; please use - /// `ProposeToReviseElectedHostosVersions` instead. - ProposeToUpdateElectedHostosVersions(ProposeToUpdateElectedHostosVersionsCmd), + + /// Submits a proposal to change the set of currently elected GuestOS versions, by electing + /// a new version and/or unelecting multiple priorly elected versions. + ProposeToReviseElectedGuestosVersions(ProposeToReviseElectedGuestsOsVersionsCmd), + /// Submits a proposal to change the set of currently elected HostOS versions, by electing /// a new version and/or unelecting multiple versions. ProposeToReviseElectedHostosVersions(ProposeToReviseElectedHostosVersionsCmd), - /// Set or remove a HostOS version on Nodes. This subcommand is obsolete; please use - /// `ProposeToDeployHostosToSomeNodes` instead. - ProposeToUpdateNodesHostosVersion(ProposeToUpdateNodesHostosVersionCmd), - /// Propose to deploy a HostOS version to some nodes. - ProposeToDeployHostosToSomeNodes(ProposeToDeployHostosToSomeNodesCmd), - /// Get current list of elected HostOS versions - GetElectedHostosVersions, - /// Propose to add an API Boundary Node - ProposeToAddApiBoundaryNodes(ProposeToAddApiBoundaryNodesCmd), - /// Propose to remove a set of API Boundary Nodes - ProposeToRemoveApiBoundaryNodes(ProposeToRemoveApiBoundaryNodesCmd), - /// Propose to update the version of a set of API Boundary Nodes. This subcommand is obsolete; please use - /// `ProposeToDeployGuestosToSomeApiBoundaryNodes` instead. - ProposeToUpdateApiBoundaryNodesVersion(ProposeToUpdateApiBoundaryNodesVersionCmd), - /// Propose to upgrade the GuestOS version of a set of API Boundary Nodes. - ProposeToDeployGuestosToSomeApiBoundaryNodes(ProposeToDeployGuestosToSomeApiBoundaryNodesCmd), - /// Sub-command to fetch an API Boundary Node record from the registry. - /// Retrieve an API Boundary Node record - GetApiBoundaryNode(GetApiBoundaryNodeCmd), - /// Retrieve all API Boundary Node Ids - GetApiBoundaryNodes, - /// Submits a proposal to express the interest in renting a subnet. - ProposeToRentSubnet(ProposeToRentSubnetCmd), + + /// Submits a proposal to set authorized subnetworks that the cycles minting + /// canister can use. + ProposeToSetAuthorizedSubnetworks(ProposeToSetAuthorizedSubnetworksCmd), + + /// Propose to set the Bitcoin configuration + ProposeToSetBitcoinConfig(ProposeToSetBitcoinConfig), + + /// Propose to set the firewall config + ProposeToSetFirewallConfig(ProposeToSetFirewallConfigCmd), + + /// Propose to start a canister managed by the governance. + ProposeToStartCanister(StartCanisterCmd), + + /// Propose to stop a canister managed by the governance. + ProposeToStopCanister(StopCanisterCmd), + + /// Submits a proposal to uninstall code of a canister. + ProposeToUninstallCode(ProposeToUninstallCodeCmd), + /// Propose to update the settings of a canister. ProposeToUpdateCanisterSettings(ProposeToUpdateCanisterSettingsCmd), + + /// Propose to update firewall rules + ProposeToUpdateFirewallRules(ProposeToUpdateFirewallRulesCmd), + + /// Update the Node Operator's specified parameters + ProposeToUpdateNodeOperatorConfig(ProposeToUpdateNodeOperatorConfigCmd), + + /// Submit a proposal to update the node rewards table + ProposeToUpdateNodeRewardsTable(ProposeToUpdateNodeRewardsTableCmd), + + /// Submits a proposal to update a subnet's recovery CUP + ProposeToUpdateRecoveryCup(ProposeToUpdateRecoveryCupCmd), + + /// Propose to update the list of Principals that are allowed to deploy SNS instances + ProposeToUpdateSnsDeployWhitelist(ProposeToUpdateSnsDeployWhitelistCmd), + + /// Propose to update the list of SNS Subnet IDs that SNS-WASM deploys SNS instances to + ProposeToUpdateSnsSubnetIdsInSnsWasm(ProposeToUpdateSnsSubnetIdsInSnsWasmCmd), + + /// Propose to update the SSH keys that have read-only access to all unassigned nodes. + ProposeToUpdateSshReadonlyAccessForAllUnassignedNodes( + ProposeToUpdateSshReadonlyAccessForAllUnassignedNodesCmd, + ), + + /// Submits a proposal to update an existing subnet's configuration. + ProposeToUpdateSubnet(ProposeToUpdateSubnetCmd), + + /// Submits a proposal to update the subnet types that are available in the + /// cycles minting canister. + ProposeToUpdateSubnetType(ProposeToUpdateSubnetTypeCmd), + + /// Propose Xdr/Icp conversion rate. + ProposeToUpdateXdrIcpConversionRate(ProposeXdrIcpConversionRateCmd), + + // Submit a root proposal to the root canister to upgrade the governance canister. + SubmitRootProposalToUpgradeGovernanceCanister(SubmitRootProposalToUpgradeGovernanceCanisterCmd), + + /// Update local registry store by pulling from remote URL + UpdateRegistryLocalStore(UpdateRegistryLocalStoreCmd), + + // Vote on a pending root proposal to upgrade the governance canister. + VoteOnRootProposalToUpgradeGovernanceCanister(VoteOnRootProposalToUpgradeGovernanceCanisterCmd), } /// Indicates whether a value should be added or removed. @@ -672,7 +739,7 @@ impl ProposalPayload } } -/// Obsolete; please use `ProposeToDeployGuestosToAllUnassignedNodes` or +/// Deprecated; please use `ProposeToDeployGuestosToAllUnassignedNodes` or /// `ProposeToUpdateSshReadonlyAccessForAllUnassignedNodes` instead. #[derive_common_proposal_fields] #[derive(Clone, Parser, ProposalMetadata)] @@ -866,7 +933,7 @@ impl ProposalAction for StopCanisterCmd { /// Sub-command to submit a proposal to update elected replica versions. #[derive_common_proposal_fields] #[derive(Parser, ProposalMetadata)] -struct ProposeToReviseElectedGuestssVersionsCmd { +struct ProposeToReviseElectedGuestsOsVersionsCmd { #[clap(long)] /// The replica version ID to elect. pub replica_version_to_elect: Option, @@ -886,7 +953,7 @@ struct ProposeToReviseElectedGuestssVersionsCmd { pub replica_versions_to_unelect: Vec, } -impl ProposalTitle for ProposeToReviseElectedGuestssVersionsCmd { +impl ProposalTitle for ProposeToReviseElectedGuestsOsVersionsCmd { fn title(&self) -> String { match &self.proposal_title { Some(title) => title.clone(), @@ -900,7 +967,7 @@ impl ProposalTitle for ProposeToReviseElectedGuestssVersionsCmd { #[async_trait] impl ProposalPayload - for ProposeToReviseElectedGuestssVersionsCmd + for ProposeToReviseElectedGuestsOsVersionsCmd { async fn payload(&self, _: &Agent) -> ReviseElectedGuestosVersionsPayload { let payload = ReviseElectedGuestosVersionsPayload { @@ -1630,7 +1697,7 @@ struct ProposeToUpdateSnsDeployWhitelistCmd { pub removed_principals: Vec, } -/// Obsolete; please use `CreateServiceNervousSystem` instead. +/// Deprecated; please use `CreateServiceNervousSystem` instead. #[derive_common_proposal_fields] #[derive(Clone, Parser, ProposalMetadata)] struct ProposeToOpenSnsTokenSwap {} @@ -3273,11 +3340,6 @@ impl ProposalPayload } } -/// Obsolete; please use `ProposeToDeployHostosToSomeNodes` instead. -#[derive_common_proposal_fields] -#[derive(Parser, ProposalMetadata)] -struct ProposeToUpdateNodesHostosVersionCmd {} - /// Sub-command to deploy a HostOS version to a set of nodes. #[derive_common_proposal_fields] #[derive(Parser, ProposalMetadata)] @@ -3422,11 +3484,6 @@ impl ProposalPayload for ProposeToRemoveApiBounda } } -/// Obsolete; please use `ProposeToDeployGuestosToSomeApiBoundaryNodes` instead. -#[derive_common_proposal_fields] -#[derive(Clone, Parser, ProposalMetadata)] -struct ProposeToUpdateApiBoundaryNodesVersionCmd {} - #[derive_common_proposal_fields] #[derive(Parser, ProposalMetadata)] struct ProposeToDeployGuestosToSomeApiBoundaryNodesCmd { @@ -3610,77 +3667,54 @@ async fn main() { // // TODO(NNS1-486): Remove ic-admin command whitelist for sender match opts.subcmd { - SubCommand::ProposeToDeployGuestosToAllSubnetNodes(_) => (), - SubCommand::ProposeToUpdateSubnetReplicaVersion(_) => (), - SubCommand::ProposeToCreateSubnet(_) => (), - SubCommand::ProposeToRemoveNodes(_) => (), - SubCommand::ProposeToChangeSubnetMembership(_) => (), - SubCommand::ProposeToChangeNnsCanister(_) => (), - SubCommand::ProposeToHardResetNnsRootToVersion(_) => (), - SubCommand::ProposeToUninstallCode(_) => (), - SubCommand::ProposeToAddNnsCanister(_) => (), - SubCommand::ProposeToReviseElectedGuestosVersions(_) => (), - SubCommand::ProposeToUpdateElectedReplicaVersions(_) => (), - SubCommand::ProposeToUpdateSubnet(_) => (), - SubCommand::ProposeToClearProvisionalWhitelist(_) => (), - SubCommand::ProposeToUpdateRecoveryCup(_) => (), - SubCommand::ProposeToUpdateNodeOperatorConfig(_) => (), - SubCommand::ProposeToSetFirewallConfig(_) => (), + SubCommand::ProposeToAddApiBoundaryNodes(_) => (), SubCommand::ProposeToAddFirewallRules(_) => (), - SubCommand::ProposeToRemoveFirewallRules(_) => (), - SubCommand::ProposeToUpdateFirewallRules(_) => (), - SubCommand::ProposeToSetAuthorizedSubnetworks(_) => (), - SubCommand::ProposeToUpdateSubnetType(_) => (), - SubCommand::ProposeToChangeSubnetTypeAssignment(_) => (), - SubCommand::ProposeToAddOrRemoveNodeProvider(_) => (), - SubCommand::SubmitRootProposalToUpgradeGovernanceCanister(_) => (), - SubCommand::VoteOnRootProposalToUpgradeGovernanceCanister(_) => (), - SubCommand::ProposeToAddOrRemoveDataCenters(_) => (), - SubCommand::ProposeToUpdateNodeRewardsTable(_) => (), - SubCommand::ProposeToUpdateUnassignedNodesConfig(_) => panic!( - "Subcommand ProposeToUpdateUnassignedNodesConfig is obsolete; please use \ - ProposeToDeployGuestosToAllUnassignedNodesCmd or \ - ProposeToUpdateSshReadonlyAccessForAllUnassignedNodes instead" - ), - SubCommand::ProposeToDeployGuestosToAllUnassignedNodes(_) => (), - SubCommand::ProposeToUpdateSshReadonlyAccessForAllUnassignedNodes(_) => (), + SubCommand::ProposeToAddNnsCanister(_) => (), SubCommand::ProposeToAddNodeOperator(_) => (), - SubCommand::ProposeToRemoveNodeOperators(_) => (), + SubCommand::ProposeToAddOrRemoveDataCenters(_) => (), + SubCommand::ProposeToAddOrRemoveNodeProvider(_) => (), SubCommand::ProposeToAddWasmToSnsWasm(_) => (), - SubCommand::ProposeToPrepareCanisterMigration(_) => (), + SubCommand::ProposeToChangeNnsCanister(_) => (), + SubCommand::ProposeToChangeSubnetMembership(_) => (), + SubCommand::ProposeToChangeSubnetTypeAssignment(_) => (), + SubCommand::ProposeToClearProvisionalWhitelist(_) => (), SubCommand::ProposeToCompleteCanisterMigration(_) => (), - SubCommand::ProposeToStopCanister(_) => (), - SubCommand::ProposeToStartCanister(_) => (), - SubCommand::ProposeToRerouteCanisterRanges(_) => (), - SubCommand::ProposeXdrIcpConversionRate(_) => (), - SubCommand::ProposeToUpdateSnsSubnetIdsInSnsWasm(_) => (), - SubCommand::ProposeToUpdateSnsDeployWhitelist(_) => (), - SubCommand::ProposeToInsertSnsWasmUpgradePathEntries(_) => (), - SubCommand::ProposeToUpdateElectedHostosVersions(_) => panic!( - "Subcommand ProposeToUpdateElectedHostosVersions is obsolete; please use \ - ProposeToReviseElectedHostosVersions instead" - ), - SubCommand::ProposeToReviseElectedHostosVersions(_) => (), - SubCommand::ProposeToUpdateNodesHostosVersion(_) => panic!( - "Subcommand ProposeToUpdateNodesHostosVersion is obsolete; please use \ - ProposeToDeployHostosToSomeNodes instead" - ), - SubCommand::ProposeToDeployHostosToSomeNodes(_) => (), SubCommand::ProposeToCreateServiceNervousSystem(_) => (), - SubCommand::ProposeToSetBitcoinConfig(_) => (), - SubCommand::ProposeToAddApiBoundaryNodes(_) => (), - SubCommand::ProposeToRemoveApiBoundaryNodes(_) => (), - SubCommand::ProposeToUpdateApiBoundaryNodesVersion(_) => panic!( - "Subcommand ProposeToUpdateApiBoundaryNodesVersion is obsolete; please use \ - ProposeToDeployGuestosToSomeApiBoundaryNodes instead" - ), + SubCommand::ProposeToCreateSubnet(_) => (), + SubCommand::ProposeToDeployGuestosToAllSubnetNodes(_) => (), + SubCommand::ProposeToDeployGuestosToAllUnassignedNodes(_) => (), SubCommand::ProposeToDeployGuestosToSomeApiBoundaryNodes(_) => (), - SubCommand::ProposeToOpenSnsTokenSwap(_) => panic!( - "Subcommand OpenSnsTokenSwap is obsolete; please use \ - ProposeToCreateServiceNervousSystem instead" - ), + SubCommand::ProposeToDeployHostosToSomeNodes(_) => (), + SubCommand::ProposeToHardResetNnsRootToVersion(_) => (), + SubCommand::ProposeToInsertSnsWasmUpgradePathEntries(_) => (), + SubCommand::ProposeToPrepareCanisterMigration(_) => (), + SubCommand::ProposeToRemoveApiBoundaryNodes(_) => (), + SubCommand::ProposeToRemoveFirewallRules(_) => (), + SubCommand::ProposeToRemoveNodeOperators(_) => (), + SubCommand::ProposeToRemoveNodes(_) => (), SubCommand::ProposeToRentSubnet(_) => (), + SubCommand::ProposeToRerouteCanisterRanges(_) => (), + SubCommand::ProposeToReviseElectedGuestosVersions(_) => (), + SubCommand::ProposeToReviseElectedHostosVersions(_) => (), + SubCommand::ProposeToSetAuthorizedSubnetworks(_) => (), + SubCommand::ProposeToSetBitcoinConfig(_) => (), + SubCommand::ProposeToSetFirewallConfig(_) => (), + SubCommand::ProposeToStartCanister(_) => (), + SubCommand::ProposeToStopCanister(_) => (), + SubCommand::ProposeToUninstallCode(_) => (), SubCommand::ProposeToUpdateCanisterSettings(_) => (), + SubCommand::ProposeToUpdateFirewallRules(_) => (), + SubCommand::ProposeToUpdateNodeOperatorConfig(_) => (), + SubCommand::ProposeToUpdateNodeRewardsTable(_) => (), + SubCommand::ProposeToUpdateRecoveryCup(_) => (), + SubCommand::ProposeToUpdateSnsDeployWhitelist(_) => (), + SubCommand::ProposeToUpdateSnsSubnetIdsInSnsWasm(_) => (), + SubCommand::ProposeToUpdateSshReadonlyAccessForAllUnassignedNodes(_) => (), + SubCommand::ProposeToUpdateSubnet(_) => (), + SubCommand::ProposeToUpdateSubnetType(_) => (), + SubCommand::ProposeToUpdateXdrIcpConversionRate(_) => (), + SubCommand::SubmitRootProposalToUpgradeGovernanceCanister(_) => (), + SubCommand::VoteOnRootProposalToUpgradeGovernanceCanister(_) => (), _ => panic!( "Specifying a secret key or HSM is only supported for \ methods that interact with NNS handlers." @@ -3697,10 +3731,10 @@ async fn main() { &opts.hsm_slot.expect( "HSM slot must also be provided for --use-hsm; use --hsm-slot or see --help.", ), - &opts.key_id.expect( + &opts.hsm_key_id.expect( "HSM key ID must also be provided for --use-hsm; use --key-id or see --help.", ), - &opts.pin.expect( + &opts.hsm_pin.expect( "HSM pin must also be provided for --use-hsm; use --pin or see --help.", ), ) @@ -3903,8 +3937,7 @@ async fn main() { exit(1); } } - SubCommand::ProposeToUpdateSubnetReplicaVersion(cmd) - | SubCommand::ProposeToDeployGuestosToAllSubnetNodes(cmd) => { + SubCommand::ProposeToDeployGuestosToAllSubnetNodes(cmd) => { let (proposer, sender) = cmd.proposer_and_sender(sender); propose_external_proposal_from_command( cmd, @@ -3919,7 +3952,7 @@ async fn main() { ) .await; } - SubCommand::GetBlessedReplicaVersions => { + SubCommand::GetElectedGuestosVersions => { print_and_get_last_value::( make_blessed_replica_versions_key().as_bytes().to_vec(), ®istry_canister, @@ -3975,8 +4008,7 @@ async fn main() { println!("KeyId {:?}: {:?}", key_id, subnets); } } - SubCommand::ProposeToUpdateElectedReplicaVersions(cmd) - | SubCommand::ProposeToReviseElectedGuestosVersions(cmd) => { + SubCommand::ProposeToReviseElectedGuestosVersions(cmd) => { let (proposer, sender) = cmd.proposer_and_sender(sender); propose_external_proposal_from_command( cmd, @@ -4151,7 +4183,7 @@ async fn main() { ) .await; } - SubCommand::ProposeXdrIcpConversionRate(cmd) => { + SubCommand::ProposeToUpdateXdrIcpConversionRate(cmd) => { let (proposer, sender) = cmd.proposer_and_sender(sender); propose_external_proposal_from_command( cmd, @@ -4925,9 +4957,6 @@ async fn main() { ); propose_action_from_command(cmd, canister_client, proposer).await; } - // Since we're matching on the `SubCommand` type the second time, this match doesn't have - // to be exhaustive, e.g., we've already verified that the subcommand is not obsolete. - _ => unreachable!(), } } @@ -5520,8 +5549,7 @@ async fn get_node_list_since( *node_map.entry(node_id).or_default() = record; } None => { - #[allow(deprecated)] - node_map.remove(&node_id); + node_map.shift_remove(&node_id); } }; } else if is_node_operator_record_key(&versioned_record.key) { @@ -5533,8 +5561,7 @@ async fn get_node_list_since( *node_operator_map.entry(node_operator_id).or_default() = record; } None => { - #[allow(deprecated)] - node_operator_map.remove(&node_operator_id); + node_operator_map.shift_remove(&node_operator_id); } }; } @@ -5962,7 +5989,7 @@ struct GovernanceCanisterClient(NnsCanisterClient); struct RootCanisterClient(NnsCanisterClient); fn is_mainnet(url: &Url) -> bool { - url.domain().map_or(false, |domain| { + url.domain().is_some_and(|domain| { IC_DOMAINS .iter() .any(|&ic_domain| domain.contains(ic_domain)) diff --git a/rs/registry/canister/src/common/test_helpers.rs b/rs/registry/canister/src/common/test_helpers.rs index 03c9c7345ed..c3248d8e3ab 100644 --- a/rs/registry/canister/src/common/test_helpers.rs +++ b/rs/registry/canister/src/common/test_helpers.rs @@ -10,14 +10,17 @@ use ic_nns_test_utils::registry::{ use ic_protobuf::registry::crypto::v1::PublicKey; use ic_protobuf::registry::node::v1::IPv4InterfaceConfig; use ic_protobuf::registry::node::v1::NodeRecord; +use ic_protobuf::registry::node_operator::v1::NodeOperatorRecord; use ic_protobuf::registry::subnet::v1::SubnetListRecord; use ic_protobuf::registry::subnet::v1::SubnetRecord; +use ic_registry_keys::make_node_operator_record_key; use ic_registry_keys::make_subnet_list_record_key; use ic_registry_keys::make_subnet_record_key; use ic_registry_transport::pb::v1::{ registry_mutation::Type, RegistryAtomicMutateRequest, RegistryMutation, }; -use ic_registry_transport::upsert; +use ic_registry_transport::{insert, upsert}; +use ic_test_utilities_types::ids::subnet_test_id; use ic_types::ReplicaVersion; use prost::Message; use std::collections::BTreeMap; @@ -148,3 +151,58 @@ pub fn prepare_registry_with_nodes_and_node_operator_id( }; (mutate_request, node_ids_and_dkg_pks) } + +pub fn registry_create_subnet_with_nodes( + registry: &mut Registry, + node_ids_and_dkg_pks: &BTreeMap, + node_offsets: &[usize], +) -> ic_types::SubnetId { + let node_ids: Vec = node_ids_and_dkg_pks.keys().cloned().collect(); + + // Create a subnet with the specified nodes + let subnet_id = subnet_test_id(1000); + let mut subnet_list_record = registry.get_subnet_list_record(); + let subnet_record: SubnetRecord = + get_invariant_compliant_subnet_record(node_offsets.iter().map(|&i| node_ids[i]).collect()); + let subnet_nodes = node_offsets + .iter() + .map(|&i| (node_ids[i], node_ids_and_dkg_pks[&node_ids[i]].clone())) + .collect(); + registry.maybe_apply_mutation_internal(add_fake_subnet( + subnet_id, + &mut subnet_list_record, + subnet_record, + &subnet_nodes, + )); + + subnet_id +} + +pub fn registry_add_node_operator_for_node( + registry: &mut Registry, + node_id: NodeId, + node_allowance: u64, +) -> PrincipalId { + let node_operator_id = + PrincipalId::try_from(registry.get_node_or_panic(node_id).node_operator_id).unwrap(); + let node_operator_record_key = make_node_operator_record_key(node_operator_id); + + if registry + .get( + node_operator_record_key.as_bytes(), + registry.latest_version(), + ) + .is_none() + { + let node_operator_record = NodeOperatorRecord { + node_allowance, + ..Default::default() + }; + + registry.maybe_apply_mutation_internal(vec![insert( + node_operator_record_key, + node_operator_record.encode_to_vec(), + )]); + }; + node_operator_id +} diff --git a/rs/registry/canister/src/mutations/node_management/do_add_node.rs b/rs/registry/canister/src/mutations/node_management/do_add_node.rs index 7f57ed9d217..b7bfeb0d5ce 100644 --- a/rs/registry/canister/src/mutations/node_management/do_add_node.rs +++ b/rs/registry/canister/src/mutations/node_management/do_add_node.rs @@ -48,7 +48,13 @@ impl Registry { let mut node_operator_record = get_node_operator_record(self, caller_id) .map_err(|err| format!("{}do_add_node: Aborting node addition: {}", LOG_PREFIX, err))?; - // 1. Clear out any nodes that already exist at this IP. + // 1. Validate keys and get the node id + let (node_id, valid_pks) = valid_keys_from_payload(&payload) + .map_err(|err| format!("{}do_add_node: {}", LOG_PREFIX, err))?; + + println!("{}do_add_node: The node id is {:?}", LOG_PREFIX, node_id); + + // 2. Clear out any nodes that already exist at this IP. // This will only succeed if: // - the same NO was in control of the original nodes. // - the nodes are no longer in subnets. @@ -57,26 +63,47 @@ impl Registry { // release dashboard.) let http_endpoint = connection_endpoint_from_string(&payload.http_endpoint); let nodes_with_same_ip = scan_for_nodes_by_ip(self, &http_endpoint.ip_addr); + let mut mutations = Vec::new(); + let num_removed_nodes = nodes_with_same_ip.len() as u64; if !nodes_with_same_ip.is_empty() { - for node_id in nodes_with_same_ip { - self.do_remove_node(RemoveNodeDirectlyPayload { node_id }, caller_id); + if nodes_with_same_ip.len() == 1 { + mutations = self.make_remove_or_replace_node_mutations( + RemoveNodeDirectlyPayload { + node_id: nodes_with_same_ip[0], + }, + caller_id, + Some(node_id), + ); + } else { + // In the unlikely situation that multiple nodes share the same IP address as the new node, + // this will remove the existing nodes. + // While the situation is unexpected, the behavior is backwards compatible. + // This may happen only if there is a bug in the registry code and the registry invariant isn't enforced, + // due to which the node id was not properly removed. + for previous_node_id in nodes_with_same_ip { + mutations.extend(self.make_remove_or_replace_node_mutations( + RemoveNodeDirectlyPayload { + node_id: previous_node_id, + }, + caller_id, + // If there are multiple nodes with the same IP, then each of them could in principle be in a (different) subnet. + // In that case replacing all different node ids with the same new node isn't an option. + // To cover for this corner case, we don't replace the node id but just remove the node and potentially fail. + None, + )); + } } - - // Update the NO record, as the available allowance may have changed. - node_operator_record = get_node_operator_record(self, caller_id).map_err(|err| { - format!("{}do_add_node: Aborting node addition: {}", LOG_PREFIX, err) - })? } - // 2. Check if adding one more node will get us over the cap for the Node Operator - if node_operator_record.node_allowance == 0 { + // 3. Check if adding one more node will get us over the cap for the Node Operator + if node_operator_record.node_allowance + num_removed_nodes == 0 { return Err(format!( "{}do_add_node: Node allowance for this Node Operator is exhausted", LOG_PREFIX )); } - // 3. Get valid type if type is in request + // 4. Get valid type if type is in request let node_reward_type = payload .node_reward_type .as_ref() @@ -91,10 +118,6 @@ impl Registry { .transpose()? .map(|node_reward_type| node_reward_type as i32); - // 4. Validate keys and get the node id - let (node_id, valid_pks) = valid_keys_from_payload(&payload) - .map_err(|err| format!("{}do_add_node: {}", LOG_PREFIX, err))?; - // 5. Validate the domain is valid let domain: Option = payload .domain @@ -127,8 +150,6 @@ impl Registry { } } - println!("{}do_add_node: The node id is {:?}", LOG_PREFIX, node_id); - // 7. Create the Node Record let node_record = NodeRecord { xnet: Some(connection_endpoint_from_string(&payload.xnet_endpoint)), @@ -142,17 +163,22 @@ impl Registry { }; // 8. Insert node, public keys, and crypto keys - let mut mutations = make_add_node_registry_mutations(node_id, node_record, valid_pks); + mutations.extend(make_add_node_registry_mutations( + node_id, + node_record, + valid_pks, + )); // 9. Update the Node Operator record - node_operator_record.node_allowance -= 1; + node_operator_record.node_allowance = + node_operator_record.node_allowance + num_removed_nodes - 1; let update_node_operator_record = make_update_node_operator_mutation(caller_id, &node_operator_record); mutations.push(update_node_operator_record); - // 10. Check invariants before applying mutations + // 10. Check invariants and then apply mutations self.maybe_apply_mutation_internal(mutations); println!("{}do_add_node finished: {:?}", LOG_PREFIX, payload); @@ -287,21 +313,23 @@ fn now() -> Result { #[cfg(test)] mod tests { - use std::str::FromStr; - - use crate::{ - common::test_helpers::invariant_compliant_registry, mutations::common::test::TEST_NODE_ID, - }; - use super::*; + use crate::common::test_helpers::{ + invariant_compliant_registry, prepare_registry_with_nodes, + registry_add_node_operator_for_node, registry_create_subnet_with_nodes, + }; + use crate::mutations::common::test::TEST_NODE_ID; + use ic_base_types::{NodeId, PrincipalId}; use ic_config::crypto::CryptoConfig; use ic_crypto_node_key_generation::generate_node_keys_once; use ic_protobuf::registry::node_operator::v1::NodeOperatorRecord; use ic_registry_canister_api::IPv4Config; use ic_registry_keys::{make_node_operator_record_key, make_node_record_key}; use ic_registry_transport::insert; + use itertools::Itertools; use lazy_static::lazy_static; use prost::Message; + use std::str::FromStr; /// Prepares the payload to add a new node, for tests. pub fn prepare_add_node_payload(mutation_id: u8) -> (AddNodePayload, ValidNodePublicKeys) { @@ -326,8 +354,8 @@ mod tests { ni_dkg_dealing_encryption_pk, transport_tls_cert, idkg_dealing_encryption_pk: Some(idkg_dealing_encryption_pk), - xnet_endpoint: format!("128.0.{mutation_id}.1:1234"), - http_endpoint: format!("128.0.{mutation_id}.1:4321"), + xnet_endpoint: format!("128.0.{mutation_id}.100:1234"), + http_endpoint: format!("128.0.{mutation_id}.100:4321"), chip_id: None, public_ipv4_config: None, domain: Some("api-example.com".to_string()), @@ -737,4 +765,143 @@ mod tests { .unwrap_err(); assert!(e.contains("do_add_node: There is already another node with the same IPv4 address")); } + + #[test] + fn should_add_node_and_replace_existing_node_in_subnet() { + // This test verifies that adding a new node replaces an existing node in a subnet + let mut registry = invariant_compliant_registry(0); + + // Add nodes to the registry + let (mutate_request, node_ids_and_dkg_pks) = prepare_registry_with_nodes(1, 6); + registry.maybe_apply_mutation_internal(mutate_request.mutations); + let node_ids: Vec = node_ids_and_dkg_pks.keys().cloned().collect(); + let node_operator_id = registry_add_node_operator_for_node(&mut registry, node_ids[0], 0); + + // Create a subnet with the first 4 nodes + let subnet_id = + registry_create_subnet_with_nodes(&mut registry, &node_ids_and_dkg_pks, &[0, 1, 2, 3]); + let subnet_record = registry.get_subnet_or_panic(subnet_id); + let subnet_membership = subnet_record + .membership + .iter() + .map(|bytes| NodeId::from(PrincipalId::try_from(bytes).unwrap())) + .collect::>(); + let expected_remove_node_id = node_ids[1]; // same offset as the subnet membership vector + let expected_remove_node = registry.get_node(subnet_membership[1]).unwrap(); + + println!( + "Original subnet membership (node ids): {:?}", + subnet_membership + ); + + // Add a new node with the same IP address and port as an existing node, which should replace the existing node + let (mut payload, _valid_pks) = prepare_add_node_payload(2); + let http = expected_remove_node.http.unwrap(); + payload + .http_endpoint + .clone_from(&format!("[{}]:{}", http.ip_addr, http.port)); + let new_node_id = registry + .do_add_node_(payload.clone(), node_operator_id) + .expect("failed to add a node"); + + // Verify the subnet record is updated with the new node + let subnet_record = registry.get_subnet_or_panic(subnet_id); + let mut expected_membership = subnet_membership.clone(); + expected_membership[1] = new_node_id; + expected_membership.sort(); + let actual_membership: Vec = subnet_record + .membership + .iter() + .map(|bytes| NodeId::from(PrincipalId::try_from(bytes).unwrap())) + .sorted() + .collect(); + assert_eq!(actual_membership, expected_membership); + + // Verify the old node is removed from the registry + assert!(registry.get_node(expected_remove_node_id).is_none()); + + // Verify the new node is present in the registry + assert!(registry.get_node(new_node_id).is_some()); + + // Verify node operator allowance is unchanged + let updated_operator = get_node_operator_record(®istry, node_operator_id).unwrap(); + assert_eq!(updated_operator.node_allowance, 0); + } + + #[test] + fn should_add_node_with_no_subnet_conflict() { + let mut registry = invariant_compliant_registry(0); + + // Add nodes to the registry + let (mutate_request, node_ids_and_dkg_pks) = prepare_registry_with_nodes(1, 4); + registry.maybe_apply_mutation_internal(mutate_request.mutations); + let node_ids: Vec = node_ids_and_dkg_pks.keys().cloned().collect(); + let node_operator_id = registry_add_node_operator_for_node(&mut registry, node_ids[0], 1); + + // Prepare payload to add a new node + let (payload, _valid_pks) = prepare_add_node_payload(2); + + // Add the new node + let new_node_id = registry + .do_add_node_(payload.clone(), node_operator_id) + .expect("failed to add a node"); + + // Verify the new node is present in the registry + assert!(registry.get_node(new_node_id).is_some()); + + // Verify node operator allowance is decremented + let updated_operator = get_node_operator_record(®istry, node_operator_id).unwrap(); + assert_eq!(updated_operator.node_allowance, 0); + + // Verify all nodes are in the registry + for node_id in node_ids { + assert!(registry.get_node(node_id).is_some()); + } + } + + #[test] + #[should_panic(expected = "Node allowance for this Node Operator is exhausted")] + fn should_panic_if_node_allowance_is_exhausted() { + let mut registry = invariant_compliant_registry(0); + + // Add nodes to the registry + let (mutate_request, node_ids_and_dkg_pks) = prepare_registry_with_nodes(1, 1); + registry.maybe_apply_mutation_internal(mutate_request.mutations); + let node_ids: Vec = node_ids_and_dkg_pks.keys().cloned().collect(); + let node_operator_id = registry_add_node_operator_for_node(&mut registry, node_ids[0], 0); + + // Prepare payload to add a new node + let (payload, _valid_pks) = prepare_add_node_payload(2); + + // Attempt to add the new node, which should panic due to exhausted allowance + registry + .do_add_node_(payload.clone(), node_operator_id) + .unwrap(); + } + + #[test] + fn should_add_node_and_update_allowance() { + let mut registry = invariant_compliant_registry(0); + + // Add nodes to the registry + let (mutate_request, node_ids_and_dkg_pks) = prepare_registry_with_nodes(1, 1); + registry.maybe_apply_mutation_internal(mutate_request.mutations); + let node_ids: Vec = node_ids_and_dkg_pks.keys().cloned().collect(); + let node_operator_id = registry_add_node_operator_for_node(&mut registry, node_ids[0], 1); + + // Prepare payload to add a new node + let (payload, _valid_pks) = prepare_add_node_payload(2); + + // Add the new node + let new_node_id = registry + .do_add_node_(payload.clone(), node_operator_id) + .expect("failed to add a node"); + + // Verify the new node is present in the registry + assert!(registry.get_node(new_node_id).is_some()); + + // Verify node operator allowance is decremented + let updated_operator = get_node_operator_record(®istry, node_operator_id).unwrap(); + assert_eq!(updated_operator.node_allowance, 0); + } } diff --git a/rs/registry/canister/src/mutations/node_management/do_remove_node_directly.rs b/rs/registry/canister/src/mutations/node_management/do_remove_node_directly.rs index 1ead6617e4e..2fc11389747 100644 --- a/rs/registry/canister/src/mutations/node_management/do_remove_node_directly.rs +++ b/rs/registry/canister/src/mutations/node_management/do_remove_node_directly.rs @@ -9,6 +9,9 @@ use candid::{CandidType, Deserialize}; use dfn_core::println; use ic_base_types::{NodeId, PrincipalId}; use ic_registry_keys::{make_api_boundary_node_record_key, make_subnet_record_key}; +use ic_registry_transport::pb::v1::RegistryMutation; +use ic_registry_transport::upsert; +use prost::Message; impl Registry { /// Removes an existing node from the registry. @@ -20,10 +23,42 @@ impl Registry { "{}do_remove_node_directly started: {:?} caller: {:?}", LOG_PREFIX, payload, caller_id ); - self.do_remove_node(payload, caller_id); + self.do_remove_node(payload.clone(), caller_id); + + println!( + "{}do_remove_node_directly finished: {:?}", + LOG_PREFIX, payload + ); + } + + pub fn do_replace_node_with_another( + &mut self, + payload: RemoveNodeDirectlyPayload, + caller_id: PrincipalId, + new_node_id: NodeId, + ) { + let mutations = + self.make_remove_or_replace_node_mutations(payload, caller_id, Some(new_node_id)); + + // Check invariants and apply mutations + self.maybe_apply_mutation_internal(mutations); } pub fn do_remove_node(&mut self, payload: RemoveNodeDirectlyPayload, caller_id: PrincipalId) { + let mutations = self.make_remove_or_replace_node_mutations(payload, caller_id, None); + // Check invariants and apply mutations + self.maybe_apply_mutation_internal(mutations); + } + + // Prepare mutations for removing or replacing a node in the registry. + // If new_node_id is Some, the old node is in-place replaced with the new node, even if the old node is in a subnet. + // If new_node_id is None, the old node is only removed from the registry and is not allowed to be in a subnet. + pub fn make_remove_or_replace_node_mutations( + &mut self, + payload: RemoveNodeDirectlyPayload, + caller_id: PrincipalId, + new_node_id: Option, + ) -> Vec { // 1. Find the node operator id for this record // and abort if the node record is not found let node_operator_id = get_node_operator_id_for_node(self, payload.node_id) @@ -80,23 +115,13 @@ impl Registry { ) }); assert_eq!( - node_provider_caller, node_provider_of_the_node, - "The node provider {:?} of the caller {}, does not match the node provider {:?} of the node {}.", - node_provider_caller, caller_id, node_provider_of_the_node, payload.node_id - ); - } - - // 3. Ensure node is not in a subnet - let subnet_list_record = get_subnet_list_record(self); - let is_node_in_subnet = find_subnet_for_node(self, payload.node_id, &subnet_list_record); - if let Some(subnet_id) = is_node_in_subnet { - panic!("{}do_remove_node_directly: Cannot remove a node that is a member of a subnet. This node is a member of Subnet: {}", - LOG_PREFIX, - make_subnet_record_key(subnet_id) + node_provider_caller, node_provider_of_the_node, + "The node provider {:?} of the caller {}, does not match the node provider {:?} of the node {}.", + node_provider_caller, caller_id, node_provider_of_the_node, payload.node_id ); } - // 4. Ensure the node is not an API Boundary Node. + // 3. Ensure the node is not an API Boundary Node. // In order to succeed, a corresponding ApiBoundaryNodeRecord should be removed first via proposal. let api_bn_id = self.get_api_boundary_node_record(payload.node_id); if api_bn_id.is_some() { @@ -107,6 +132,43 @@ impl Registry { ); } + // 4. Check if node is in a subnet, and if so, replace it in the subnet by updating the membership in the subnet record. + let subnet_list_record = get_subnet_list_record(self); + let is_node_in_subnet = find_subnet_for_node(self, payload.node_id, &subnet_list_record); + let mut mutations = vec![]; + if let Some(subnet_id) = is_node_in_subnet { + if new_node_id.is_some() { + // The node is in a subnet and is being replaced with a new node. + // Update the subnet record with the new node membership. + let mut subnet_record = self.get_subnet_or_panic(subnet_id); + + let mut subnet_membership: Vec = subnet_record + .membership + .iter() + .map(|bytes| NodeId::from(PrincipalId::try_from(bytes).unwrap())) + .collect(); + + subnet_membership.retain(|&id| id != payload.node_id); + subnet_membership.push(new_node_id.unwrap()); + + // Update the subnet record with the new membership (and double check that the new node is not in a subnet) + self.replace_subnet_record_membership( + subnet_id, + &mut subnet_record, + subnet_membership, + ); + mutations = vec![upsert( + make_subnet_record_key(subnet_id), + subnet_record.encode_to_vec(), + )]; + } else { + panic!("{}do_remove_node_directly: Cannot remove a node that is a member of a subnet. This node is a member of Subnet: {}", + LOG_PREFIX, + make_subnet_record_key(subnet_id) + ); + } + } + // 5. Retrieve the NO record and increment its node allowance by 1 let mut new_node_operator_record = get_node_operator_record(self, caller_id) .map_err(|err| { @@ -119,23 +181,17 @@ impl Registry { new_node_operator_record.node_allowance += 1; // 6. Finally, generate the following mutations: - // * Delete the node + // * Delete the node record // * Delete entries for node encryption keys // * Increment NO's allowance by 1 - let mut mutations = make_remove_node_registry_mutations(self, payload.node_id); + mutations.extend(make_remove_node_registry_mutations(self, payload.node_id)); // mutation to update node operator value mutations.push(make_update_node_operator_mutation( node_operator_id, &new_node_operator_record, )); - // 7. Apply mutations after checking invariants - self.maybe_apply_mutation_internal(mutations); - - println!( - "{}do_remove_node_directly finished: {:?}", - LOG_PREFIX, payload - ); + mutations } } @@ -147,26 +203,24 @@ pub struct RemoveNodeDirectlyPayload { #[cfg(test)] mod tests { - use std::str::FromStr; - - use ic_base_types::PrincipalId; - use ic_protobuf::registry::{ - api_boundary_node::v1::ApiBoundaryNodeRecord, node_operator::v1::NodeOperatorRecord, - }; - use ic_registry_keys::make_node_operator_record_key; - use ic_registry_transport::insert; - use ic_types::ReplicaVersion; - use prost::Message; - + use super::*; use crate::{ common::test_helpers::{ invariant_compliant_registry, prepare_registry_with_nodes, - prepare_registry_with_nodes_and_node_operator_id, + prepare_registry_with_nodes_and_node_operator_id, registry_add_node_operator_for_node, + registry_create_subnet_with_nodes, }, mutations::common::test::TEST_NODE_ID, }; - - use super::*; + use ic_base_types::{NodeId, PrincipalId}; + use ic_protobuf::registry::{ + api_boundary_node::v1::ApiBoundaryNodeRecord, node_operator::v1::NodeOperatorRecord, + }; + use ic_registry_keys::{make_node_operator_record_key, make_node_record_key}; + use ic_registry_transport::insert; + use ic_types::ReplicaVersion; + use prost::Message; + use std::str::FromStr; #[test] #[should_panic(expected = "Node Id 2vxsx-fae not found in the registry")] @@ -387,4 +441,120 @@ mod tests { // Should fail because the DC of operator1 and operator2 does not match registry.do_remove_node(payload, operator2_id); } + #[test] + fn should_replace_node_in_subnet() { + let mut registry = invariant_compliant_registry(0); + + // Add nodes to the registry + let (mutate_request, node_ids_and_dkg_pks) = prepare_registry_with_nodes(1, 2); + registry.maybe_apply_mutation_internal(mutate_request.mutations); + let node_ids = node_ids_and_dkg_pks.keys().cloned().collect::>(); + let node_operator_id = registry_add_node_operator_for_node(&mut registry, node_ids[0], 0); + + // Create a subnet with the first node + let subnet_id = + registry_create_subnet_with_nodes(&mut registry, &node_ids_and_dkg_pks, &[0]); + + // Replace the node_ids[0] with node_ids[1], while node_ids[0] is in a subnet + let payload = RemoveNodeDirectlyPayload { + node_id: node_ids[0], + }; + + registry.do_replace_node_with_another(payload, node_operator_id, node_ids[1]); + + // Verify the subnet record is updated with the new node + let expected_membership: Vec = vec![node_ids[1]]; + let actual_membership: Vec = registry + .get_subnet_or_panic(subnet_id) + .membership + .iter() + .map(|bytes| NodeId::from(PrincipalId::try_from(bytes).unwrap())) + .collect(); + assert_eq!(actual_membership, expected_membership); + + // Verify the old node is removed from the registry + assert!(registry + .get( + make_node_record_key(node_ids[0]).as_bytes(), + registry.latest_version() + ) + .is_none()); + + // Verify the new node is present in the registry + assert!(registry.get_node(node_ids[1]).is_some()); + + // Verify node operator allowance increased by 1 + let updated_operator = get_node_operator_record(®istry, node_operator_id).unwrap(); + assert_eq!(updated_operator.node_allowance, 1); + } + + #[test] + #[should_panic(expected = "Cannot remove a node that is a member of a subnet")] + fn should_panic_if_removing_node_in_subnet_without_replacement() { + let mut registry = invariant_compliant_registry(0); + + // Add nodes to the registry + let (mutate_request, node_ids_and_dkg_pks) = prepare_registry_with_nodes(1, 1); + registry.maybe_apply_mutation_internal(mutate_request.mutations); + let node_ids: Vec = node_ids_and_dkg_pks.keys().cloned().collect(); + let node_operator_id = registry_add_node_operator_for_node(&mut registry, node_ids[0], 0); + + // Create a subnet with the first node + let _subnet_id = + registry_create_subnet_with_nodes(&mut registry, &node_ids_and_dkg_pks, &[0]); + + // Attempt to remove the node without replacement + let payload = RemoveNodeDirectlyPayload { + node_id: node_ids[0], + }; + + registry.do_remove_node(payload, node_operator_id); + } + + #[test] + fn should_replace_node_in_subnet_and_update_allowance() { + let mut registry = invariant_compliant_registry(0); + + // Add nodes to the registry + let (mutate_request, node_ids_and_dkg_pks) = prepare_registry_with_nodes(1, 2); + registry.maybe_apply_mutation_internal(mutate_request.mutations); + let node_ids = node_ids_and_dkg_pks.keys().cloned().collect::>(); + let node_operator_id = registry_add_node_operator_for_node(&mut registry, node_ids[0], 0); + + // Create a subnet with the first node + let subnet_id = + registry_create_subnet_with_nodes(&mut registry, &node_ids_and_dkg_pks, &[0]); + + // Replace the first node with the second node in the subnet + let payload = RemoveNodeDirectlyPayload { + node_id: node_ids[0], + }; + + registry.do_replace_node_with_another(payload, node_operator_id, node_ids[1]); + + // Verify the subnet record is updated with the new node + let expected_membership: Vec = vec![node_ids[1]]; + let actual_membership: Vec = registry + .get_subnet_or_panic(subnet_id) + .membership + .iter() + .map(|bytes| NodeId::from(PrincipalId::try_from(bytes).unwrap())) + .collect(); + assert_eq!(actual_membership, expected_membership); + + // Verify the old node is removed from the registry + assert!(registry + .get( + make_node_record_key(node_ids[0]).as_bytes(), + registry.latest_version() + ) + .is_none()); + + // Verify the new node is present in the registry + assert!(registry.get_node(node_ids[1]).is_some()); + + // Verify node operator allowance increased by 1 + let updated_operator = get_node_operator_record(®istry, node_operator_id).unwrap(); + assert_eq!(updated_operator.node_allowance, 1); + } } diff --git a/rs/registry/nns_data_provider/src/certification.rs b/rs/registry/nns_data_provider/src/certification.rs index 79df4bd3b0e..efd31d9325f 100644 --- a/rs/registry/nns_data_provider/src/certification.rs +++ b/rs/registry/nns_data_provider/src/certification.rs @@ -231,7 +231,7 @@ where struct ProtobufVisitor(PhantomData); - impl<'de, T: prost::Message + Default> serde::de::Visitor<'de> for ProtobufVisitor { + impl serde::de::Visitor<'_> for ProtobufVisitor { type Value = Protobuf; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/rs/registry/unreleased_changelog.md b/rs/registry/unreleased_changelog.md index 94126a0ff42..c854a37cad3 100644 --- a/rs/registry/unreleased_changelog.md +++ b/rs/registry/unreleased_changelog.md @@ -11,6 +11,19 @@ on the process that this file is part of, see ## Changed +### Support for node redeployment and replacement even if the node is in a subnet + +During node redeployments support for replacing an existing node with the same +IP address even if the existing node is currently in a subnet. +The new node id will be added to the subnet and the old node id will be removed +from the subnet without any intervention being required from the users or the community. + +Previously, an additional NNS proposal for removing and replacing the old node in +the subnet was required to enable redeployments for such nodes. +Such behavior is conservative and not strictly necessary since the subnet +decentralization is not affected when the new node has all properties +identical as the old node, which is the case if the IPv6 address is unchanged. + ## Deprecated ## Removed diff --git a/rs/replica/setup_ic_network/src/lib.rs b/rs/replica/setup_ic_network/src/lib.rs index ca2333d1edd..af9ec4f83ea 100644 --- a/rs/replica/setup_ic_network/src/lib.rs +++ b/rs/replica/setup_ic_network/src/lib.rs @@ -28,10 +28,10 @@ use ic_https_outcalls_consensus::{ use ic_ingress_manager::{bouncer::IngressBouncer, IngressManager, RandomStateKind}; use ic_interfaces::{ batch_payload::BatchPayloadBuilder, + consensus_pool::{ConsensusBlockCache, ConsensusPoolCache}, execution_environment::IngressHistoryReader, messaging::{MessageRouting, XNetPayloadBuilder}, - p2p::artifact_manager::JoinGuard, - p2p::state_sync::StateSyncClient, + p2p::{artifact_manager::JoinGuard, state_sync::StateSyncClient}, self_validating_payload::SelfValidatingPayloadBuilder, time_source::{SysTimeSource, TimeSource}, }; @@ -85,6 +85,105 @@ struct ArtifactPools { canister_http_pool: Arc>, } +impl ArtifactPools { + fn new( + log: &ReplicaLogger, + metrics_registry: &MetricsRegistry, + node_id: NodeId, + config: ArtifactPoolConfig, + catch_up_package: &CatchUpPackage, + ) -> Self { + let ingress_pool = Arc::new(RwLock::new(IngressPoolImpl::new( + node_id, + config.clone(), + metrics_registry.clone(), + log.clone(), + ))); + + let mut idkg_pool = IDkgPoolImpl::new( + config.clone(), + log.clone(), + metrics_registry.clone(), + Box::new(idkg::IDkgStatsImpl::new(metrics_registry.clone())), + ); + idkg_pool.add_initial_dealings(catch_up_package); + let idkg_pool = Arc::new(RwLock::new(idkg_pool)); + + let certification_pool = Arc::new(RwLock::new(CertificationPoolImpl::new( + node_id, + config, + log.clone(), + metrics_registry.clone(), + ))); + let dkg_pool = Arc::new(RwLock::new(DkgPoolImpl::new( + metrics_registry.clone(), + log.clone(), + ))); + let canister_http_pool = Arc::new(RwLock::new(CanisterHttpPoolImpl::new( + metrics_registry.clone(), + log.clone(), + ))); + Self { + ingress_pool, + certification_pool, + dkg_pool, + idkg_pool, + canister_http_pool, + } + } +} + +struct Bouncers { + ingress: Arc, + consensus: Arc, + certifier: Arc, + dkg: Arc, + idkg: Arc, + https_outcalls: Arc, +} + +impl Bouncers { + fn new( + log: &ReplicaLogger, + metrics_registry: &MetricsRegistry, + subnet_id: SubnetId, + time_source: Arc, + message_router: Arc, + consensus_pool_cache: Arc, + consensus_block_cache: Arc, + state_reader: Arc>, + ) -> Self { + let ingress = Arc::new(IngressBouncer::new(time_source.clone())); + let consensus = Arc::new(ConsensusBouncer::new(metrics_registry, message_router)); + let dkg = Arc::new(DkgBouncer::new(metrics_registry)); + let certifier = Arc::new(CertifierBouncer::new( + metrics_registry, + consensus_pool_cache.clone(), + )); + let idkg = Arc::new(idkg::IDkgBouncer::new( + metrics_registry, + subnet_id, + consensus_block_cache, + state_reader.clone(), + )); + + let https_outcalls = Arc::new(CanisterHttpGossipImpl::new( + consensus_pool_cache.clone(), + state_reader.clone(), + log.clone(), + )); + + Self { + ingress, + consensus, + dkg, + idkg, + certifier, + https_outcalls, + } + } +} + pub type CanisterHttpAdapterClient = Box + Send>; @@ -131,14 +230,14 @@ pub fn setup_consensus_and_p2p( ) { let consensus_pool_cache = consensus_pool.read().unwrap().get_cache(); - let (ingress_pool, ingress_sender, join_handles, mut p2p_consensus) = start_consensus( + let (ingress_pool, ingress_sender, join_handles, p2p_consensus) = start_consensus( log, metrics_registry, rt_handle, node_id, subnet_id, artifact_pool_config, - catch_up_package, + &catch_up_package, Arc::clone(&consensus_crypto) as Arc<_>, Arc::clone(&certifier_crypto) as Arc<_>, Arc::clone(&ingress_sig_crypto) as Arc<_>, @@ -158,18 +257,21 @@ pub fn setup_consensus_and_p2p( max_certified_height_tx, ); - // StateSync receive side => handler definition - let (state_sync_router, state_sync_manager_rx) = ic_state_sync_manager::build_axum_router( - state_sync_client.clone(), - log.clone(), - metrics_registry, - ); + // StateSync receive side + handler definition + let (state_sync_manager_router, state_sync_manager_runner) = + ic_state_sync_manager::build_state_sync_manager( + log, + metrics_registry, + rt_handle, + state_sync_client.clone(), + ); - // Consensus receive side => handler definition + // Consensus receive side + handler definition + let (consensus_manager_router, consensus_manager_runner) = p2p_consensus.build(); // Merge all receive side handlers => router - let p2p_router = state_sync_router - .merge(p2p_consensus.router()) + let p2p_router = state_sync_manager_router + .merge(consensus_manager_router) .layer(TraceLayer::new_for_http()); // Quic transport let (_, topology_watcher) = ic_peer_manager::start_peer_manager( @@ -200,17 +302,8 @@ pub fn setup_consensus_and_p2p( )); // Start the main event loops for StateSync and Consensus - - let _state_sync_manager = ic_state_sync_manager::start_state_sync_manager( - log, - metrics_registry, - rt_handle, - quic_transport.clone(), - state_sync_client, - state_sync_manager_rx, - ); - - let _cancellation_token = p2p_consensus.run(quic_transport, topology_watcher); + let _state_sync_manager = state_sync_manager_runner.start(quic_transport.clone()); + let _cancellation_token = consensus_manager_runner.start(quic_transport, topology_watcher); (ingress_pool, ingress_sender, join_handles) } @@ -225,7 +318,7 @@ fn start_consensus( node_id: NodeId, subnet_id: SubnetId, artifact_pool_config: ArtifactPoolConfig, - catch_up_package: CatchUpPackage, + catch_up_package: &CatchUpPackage, // ConsensusCrypto is an extension of the Crypto trait and we can // not downcast traits. consensus_crypto: Arc, @@ -251,7 +344,27 @@ fn start_consensus( Vec>, AbortableBroadcastChannelBuilder, ) { + let artifact_pools = ArtifactPools::new( + log, + metrics_registry, + node_id, + artifact_pool_config, + catch_up_package, + ); let time_source = Arc::new(SysTimeSource::new()); + let consensus_pool_cache = consensus_pool.read().unwrap().get_cache(); + let consensus_block_cache = consensus_pool.read().unwrap().get_block_cache(); + let bouncers = Bouncers::new( + log, + metrics_registry, + subnet_id, + time_source.clone(), + message_router.clone(), + consensus_pool_cache.clone(), + consensus_block_cache, + state_reader.clone(), + ); + let mut new_p2p_consensus: ic_consensus_manager::AbortableBroadcastChannelBuilder = ic_consensus_manager::AbortableBroadcastChannelBuilder::new( log.clone(), @@ -259,18 +372,6 @@ fn start_consensus( metrics_registry.clone(), ); - let artifact_pools = init_artifact_pools( - node_id, - artifact_pool_config, - metrics_registry, - log, - catch_up_package, - time_source.as_ref(), - ); - - let mut join_handles = vec![]; - - let consensus_pool_cache = consensus_pool.read().unwrap().get_cache(); let consensus_time = consensus_pool.read().unwrap().get_consensus_time(); let replica_config = ReplicaConfig { node_id, subnet_id }; let ingress_manager = Arc::new(IngressManager::new( @@ -308,6 +409,8 @@ fn start_consensus( &PoolReader::new(&*consensus_pool.read().unwrap()), ))); + let mut join_handles = vec![]; + { let consensus_impl = ConsensusImpl::new( replica_config.clone(), @@ -333,14 +436,13 @@ fn start_consensus( let consensus_pool = Arc::clone(&consensus_pool); - let bouncer = Arc::new(ConsensusBouncer::new(metrics_registry, message_router)); let (outbound_tx, inbound_rx) = if HASHES_IN_BLOCKS_FEATURE_ENABLED { let assembler = ic_artifact_downloader::FetchStrippedConsensusArtifact::new( log.clone(), rt_handle.clone(), consensus_pool.clone(), artifact_pools.ingress_pool.clone(), - bouncer, + bouncers.consensus, metrics_registry.clone(), node_id, ); @@ -350,7 +452,7 @@ fn start_consensus( log.clone(), rt_handle.clone(), consensus_pool.clone(), - bouncer, + bouncers.consensus, metrics_registry.clone(), ); new_p2p_consensus.abortable_broadcast_channel(assembler, SLOT_TABLE_NO_LIMIT) @@ -372,12 +474,11 @@ fn start_consensus( let user_ingress_tx = { #[allow(clippy::disallowed_methods)] let (user_ingress_tx, user_ingress_rx) = unbounded_channel(); - let bouncer = Arc::new(IngressBouncer::new(time_source.clone())); let assembler = ic_artifact_downloader::FetchArtifact::new( log.clone(), rt_handle.clone(), artifact_pools.ingress_pool.clone(), - bouncer, + bouncers.ingress, metrics_registry.clone(), ); @@ -408,12 +509,11 @@ fn start_consensus( log.clone(), max_certified_height_tx, ); - let bouncer = CertifierBouncer::new(metrics_registry, Arc::clone(&consensus_pool_cache)); let assembler = ic_artifact_downloader::FetchArtifact::new( log.clone(), rt_handle.clone(), artifact_pools.certification_pool.clone(), - Arc::new(bouncer), + bouncers.certifier, metrics_registry.clone(), ); @@ -432,12 +532,11 @@ fn start_consensus( }; { - let bouncer = Arc::new(DkgBouncer::new(metrics_registry)); let assembler = ic_artifact_downloader::FetchArtifact::new( log.clone(), rt_handle.clone(), artifact_pools.dkg_pool.clone(), - bouncer, + bouncers.dkg, metrics_registry.clone(), ); @@ -475,17 +574,11 @@ fn start_consensus( finalized.payload.as_ref().is_summary(), finalized.payload.as_ref().as_idkg().is_some(), ); - let bouncer = Arc::new(idkg::IDkgBouncer::new( - metrics_registry, - subnet_id, - consensus_pool.read().unwrap().get_block_cache(), - Arc::clone(&state_reader), - )); let assembler = ic_artifact_downloader::FetchArtifact::new( log.clone(), rt_handle.clone(), artifact_pools.idkg_pool.clone(), - bouncer, + bouncers.idkg, metrics_registry.clone(), ); @@ -512,16 +605,11 @@ fn start_consensus( }; { - let bouncer = Arc::new(CanisterHttpGossipImpl::new( - Arc::clone(&consensus_pool_cache), - Arc::clone(&state_reader), - log.clone(), - )); let assembler = ic_artifact_downloader::FetchArtifact::new( log.clone(), rt_handle.clone(), artifact_pools.canister_http_pool.clone(), - bouncer, + bouncers.https_outcalls, metrics_registry.clone(), ); @@ -555,50 +643,3 @@ fn start_consensus( new_p2p_consensus, ) } - -fn init_artifact_pools( - node_id: NodeId, - config: ArtifactPoolConfig, - metrics_registry: &MetricsRegistry, - log: &ReplicaLogger, - catch_up_package: CatchUpPackage, - time_source: &dyn TimeSource, -) -> ArtifactPools { - let ingress_pool = Arc::new(RwLock::new(IngressPoolImpl::new( - node_id, - config.clone(), - metrics_registry.clone(), - log.clone(), - ))); - - let mut idkg_pool = IDkgPoolImpl::new( - config.clone(), - log.clone(), - metrics_registry.clone(), - Box::new(idkg::IDkgStatsImpl::new(metrics_registry.clone())), - ); - idkg_pool.add_initial_dealings(&catch_up_package, time_source); - let idkg_pool = Arc::new(RwLock::new(idkg_pool)); - - let certification_pool = Arc::new(RwLock::new(CertificationPoolImpl::new( - node_id, - config, - log.clone(), - metrics_registry.clone(), - ))); - let dkg_pool = Arc::new(RwLock::new(DkgPoolImpl::new( - metrics_registry.clone(), - log.clone(), - ))); - let canister_http_pool = Arc::new(RwLock::new(CanisterHttpPoolImpl::new( - metrics_registry.clone(), - log.clone(), - ))); - ArtifactPools { - ingress_pool, - certification_pool, - dkg_pool, - idkg_pool, - canister_http_pool, - } -} diff --git a/rs/replica_tests/src/lib.rs b/rs/replica_tests/src/lib.rs index 49a3f2e3919..c21a123df43 100644 --- a/rs/replica_tests/src/lib.rs +++ b/rs/replica_tests/src/lib.rs @@ -753,7 +753,7 @@ pub struct UniversalCanisterWithStateMachine<'a> { canister_id: CanisterId, } -impl<'a> UniversalCanisterWithStateMachine<'a> { +impl UniversalCanisterWithStateMachine<'_> { pub fn canister_id(&self) -> CanisterId { self.canister_id } diff --git a/rs/replicated_state/src/canister_state.rs b/rs/replicated_state/src/canister_state.rs index 2c094148a25..44c2cc9591b 100644 --- a/rs/replicated_state/src/canister_state.rs +++ b/rs/replicated_state/src/canister_state.rs @@ -433,13 +433,6 @@ impl CanisterState { + NumBytes::from(self.system_state.certified_data.len() as u64) } - /// Sets the (transient) size in bytes of guaranteed responses from this - /// canister routed into streams and not yet garbage collected. - pub(super) fn set_stream_guaranteed_responses_size_bytes(&mut self, size_bytes: usize) { - self.system_state - .set_stream_guaranteed_responses_size_bytes(size_bytes); - } - /// Returns the current memory allocation of the canister. pub fn memory_allocation(&self) -> MemoryAllocation { self.system_state.memory_allocation diff --git a/rs/replicated_state/src/canister_state/queues.rs b/rs/replicated_state/src/canister_state/queues.rs index 17ea0b26eac..8aa1624fb42 100644 --- a/rs/replicated_state/src/canister_state/queues.rs +++ b/rs/replicated_state/src/canister_state/queues.rs @@ -983,7 +983,7 @@ impl CanisterQueues { if self .canister_queues .get(sender) - .map_or(false, |(input_queue, _)| input_queue.len() != 0) + .is_some_and(|(input_queue, _)| input_queue.len() != 0) { self.input_schedule.reschedule(*sender, input_queue_type); break; @@ -1343,13 +1343,6 @@ impl CanisterQueues { .oversized_guaranteed_requests_extra_bytes } - /// Sets the (transient) size in bytes of guaranteed responses routed from - /// output queues into streams and not yet garbage collected. - pub(super) fn set_stream_guaranteed_responses_size_bytes(&mut self, size_bytes: usize) { - self.queue_stats - .transient_stream_guaranteed_responses_size_bytes = size_bytes; - } - /// Garbage collects all input and output queue pairs that are both empty. /// /// Because there is no useful information in an empty queue, there is no @@ -1649,8 +1642,6 @@ impl CanisterQueues { let calculated_stats = Self::calculate_queue_stats( &self.canister_queues, self.queue_stats.guaranteed_response_memory_reservations, - self.queue_stats - .transient_stream_guaranteed_responses_size_bytes, ); if self.queue_stats != calculated_stats { return Err(format!( @@ -1677,13 +1668,13 @@ impl CanisterQueues { /// Computes stats for the given canister queues. Used when deserializing and in /// `debug_assert!()` checks. Takes the number of memory reservations from the /// caller, as the queues have no need to track memory reservations, so it - /// cannot be computed. Same with the size of guaranteed responses in streams. + /// cannot be computed. Size of guaranteed responses in streams is ignored as it is + /// limited. /// /// Time complexity: `O(canister_queues.len())`. fn calculate_queue_stats( canister_queues: &BTreeMap, Arc)>, guaranteed_response_memory_reservations: usize, - transient_stream_guaranteed_responses_size_bytes: usize, ) -> QueueStats { let (input_queues_reserved_slots, output_queues_reserved_slots) = canister_queues .values() @@ -1695,7 +1686,6 @@ impl CanisterQueues { guaranteed_response_memory_reservations, input_queues_reserved_slots, output_queues_reserved_slots, - transient_stream_guaranteed_responses_size_bytes, } } } @@ -1863,7 +1853,6 @@ impl TryFrom<(pb_queues::CanisterQueues, &dyn CheckpointLoadingMetrics)> for Can let queue_stats = Self::calculate_queue_stats( &canister_queues, item.guaranteed_response_memory_reservations as usize, - 0, ); let input_schedule = InputSchedule::try_from(( @@ -1902,9 +1891,8 @@ impl TryFrom<(pb_queues::CanisterQueues, &dyn CheckpointLoadingMetrics)> for Can } /// Tracks slot and guaranteed response memory reservations across input and -/// output queues; and holds a (transient) byte size of responses already routed -/// into streams (tracked separately, at the replicated state level, as messages -/// are routed to and GC-ed from streams). +/// output queues. Transient byte size of responses already routed into streams +/// is ignored as the streams size is limited. /// /// Stats for the enqueued messages themselves (counts and sizes by kind, /// context and class) are tracked separately in `message_pool::MessageStats`. @@ -1931,22 +1919,12 @@ struct QueueStats { /// Count of slots reserved in output queues. Note that this is different from /// memory reservations for guaranteed responses. output_queues_reserved_slots: usize, - - /// Transient: size in bytes of guaranteed responses routed from `output_queues` - /// into streams and not yet garbage collected. - /// - /// This is updated by `ReplicatedState::put_streams()`, called by MR after - /// every streams mutation (induction, routing, GC). And is (re)populated during - /// checkpoint loading by `ReplicatedState::new_from_checkpoint()`. - transient_stream_guaranteed_responses_size_bytes: usize, } impl QueueStats { - /// Returns the memory usage of reservations for guaranteed responses plus - /// guaranteed responses in streans. + /// Returns the memory usage of reservations for guaranteed responses. pub fn guaranteed_response_memory_usage(&self) -> usize { self.guaranteed_response_memory_reservations * MAX_RESPONSE_COUNT_BYTES - + self.transient_stream_guaranteed_responses_size_bytes } /// Updates the stats to reflect the enqueueing of the given message in the given diff --git a/rs/replicated_state/src/canister_state/queues/tests.rs b/rs/replicated_state/src/canister_state/queues/tests.rs index 01dd7dcae96..ca8f4f5857e 100644 --- a/rs/replicated_state/src/canister_state/queues/tests.rs +++ b/rs/replicated_state/src/canister_state/queues/tests.rs @@ -2213,7 +2213,6 @@ fn test_stats_best_effort() { guaranteed_response_memory_reservations: 0, input_queues_reserved_slots: 1, output_queues_reserved_slots: 1, - transient_stream_guaranteed_responses_size_bytes: 0, }; assert_eq!(expected_queue_stats, queues.queue_stats); // Two best-effort response requests, two best-effort responses. @@ -2275,7 +2274,6 @@ fn test_stats_best_effort() { guaranteed_response_memory_reservations: 0, input_queues_reserved_slots: 0, output_queues_reserved_slots: 1, - transient_stream_guaranteed_responses_size_bytes: 0, }; assert_eq!(expected_queue_stats, queues.queue_stats); // Only one best-effort reject response (the dropped response is no longer in @@ -2369,7 +2367,6 @@ fn test_stats_guaranteed_response() { guaranteed_response_memory_reservations: 2, input_queues_reserved_slots: 1, output_queues_reserved_slots: 1, - transient_stream_guaranteed_responses_size_bytes: 0, }; assert_eq!(expected_queue_stats, queues.queue_stats); // Two guaranteed response requests, two guaranteed responses. @@ -2439,7 +2436,6 @@ fn test_stats_guaranteed_response() { guaranteed_response_memory_reservations: 1, input_queues_reserved_slots: 0, output_queues_reserved_slots: 1, - transient_stream_guaranteed_responses_size_bytes: 0, }; assert_eq!(expected_queue_stats, queues.queue_stats); // And we have all-zero message stats. @@ -2499,7 +2495,6 @@ fn test_stats_oversized_requests() { guaranteed_response_memory_reservations: 2, input_queues_reserved_slots: 2, output_queues_reserved_slots: 2, - transient_stream_guaranteed_responses_size_bytes: 0, }; assert_eq!(expected_queue_stats, queues.queue_stats); // Two best-effort requests, two oversized guaranteed requests, 4 requests in all. @@ -2564,7 +2559,6 @@ fn test_stats_oversized_requests() { guaranteed_response_memory_reservations: 1, input_queues_reserved_slots: 0, output_queues_reserved_slots: 2, - transient_stream_guaranteed_responses_size_bytes: 0, }; assert_eq!(expected_queue_stats, queues.queue_stats); @@ -2657,9 +2651,6 @@ fn test_garbage_collect_restores_defaults() { let mut queues = CanisterQueues::default(); assert_eq!(CanisterQueues::default(), queues); - // Set the transient response size to a non-zero value. - queues.set_stream_guaranteed_responses_size_bytes(123); - // Push and pop an ingress message. queues.push_ingress(IngressBuilder::default().receiver(this).build()); assert!(queues.pop_input().is_some()); diff --git a/rs/replicated_state/src/canister_state/system_state.rs b/rs/replicated_state/src/canister_state/system_state.rs index ac6753b7e52..e28a39457b9 100644 --- a/rs/replicated_state/src/canister_state/system_state.rs +++ b/rs/replicated_state/src/canister_state/system_state.rs @@ -1592,13 +1592,6 @@ impl SystemState { self.canister_history.get_memory_usage() } - /// Sets the (transient) size in bytes of guaranteed responses from this - /// canister routed into streams and not yet garbage collected. - pub(super) fn set_stream_guaranteed_responses_size_bytes(&mut self, size_bytes: usize) { - self.queues - .set_stream_guaranteed_responses_size_bytes(size_bytes); - } - /// Method used only by the dashboard. pub fn collect_controllers_as_string(&self) -> String { self.controllers diff --git a/rs/replicated_state/src/metadata_state.rs b/rs/replicated_state/src/metadata_state.rs index 5d55f2380ed..b77aa920c7f 100644 --- a/rs/replicated_state/src/metadata_state.rs +++ b/rs/replicated_state/src/metadata_state.rs @@ -35,7 +35,6 @@ use ic_types::{ ingress::{IngressState, IngressStatus}, messages::{ is_subnet_id, CanisterCall, MessageId, Payload, RejectContext, RequestOrResponse, Response, - NO_DEADLINE, }, node_id_into_protobuf, node_id_try_from_option, nominal_cycles::NominalCycles, @@ -72,7 +71,7 @@ pub struct SystemMetadata { pub ingress_history: IngressHistoryState, /// XNet stream state indexed by the _destination_ subnet id. - pub(super) streams: Arc, + pub(super) streams: Arc, /// The canister ID ranges from which this subnet generates canister IDs. canister_allocation_ranges: CanisterIdRanges, @@ -730,10 +729,7 @@ impl TryFrom<(pb_metadata::SystemMetadata, &dyn CheckpointLoadingMetrics)> for S // Ingress history is persisted separately. We rely on `load_checkpoint()` to // properly set this value. ingress_history: Default::default(), - streams: Arc::new(Streams { - guaranteed_responses_size_bytes: Streams::calculate_stats(&streams), - streams, - }), + streams: Arc::new(streams), network_topology: try_from_option_field( item.network_topology, "SystemMetadata::network_topology", @@ -800,7 +796,7 @@ impl SystemMetadata { } /// Returns a reference to the streams. - pub fn streams(&self) -> &Streams { + pub fn streams(&self) -> &StreamMap { &self.streams } @@ -1514,256 +1510,6 @@ impl From for StreamSlice { } } -/// Wrapper around a private `StreamMap` plus stats. -#[derive(Clone, Eq, PartialEq, Debug, Default)] -pub struct Streams { - /// Map of streams by destination `SubnetId`. - streams: StreamMap, - - /// Map of response sizes in bytes by respondent `CanisterId`. - guaranteed_responses_size_bytes: BTreeMap, -} - -impl Streams { - pub fn new() -> Self { - Default::default() - } - - /// Returns a reference to the wrapped `StreamMap`. - pub fn streams(&self) -> &StreamMap { - &self.streams - } - - /// Returns a reference to the stream for the given destination subnet. - pub fn get(&self, destination: &SubnetId) -> Option<&Stream> { - self.streams.get(destination) - } - - /// Returns an iterator over all `(&SubnetId, &Stream)` pairs. - pub fn iter(&self) -> impl Iterator { - self.streams.iter() - } - - /// Returns an iterator over all `&SubnetId` keys. - pub fn keys(&self) -> impl Iterator { - self.streams.keys() - } - - /// Pushes the given message onto the stream for the given destination - /// subnet. - pub fn push(&mut self, destination: SubnetId, msg: RequestOrResponse) { - if let RequestOrResponse::Response(response) = &msg { - if response.deadline == NO_DEADLINE { - *self - .guaranteed_responses_size_bytes - .entry(response.respondent) - .or_default() += msg.count_bytes(); - } - } - - self.streams.entry(destination).or_default().push(msg); - - #[cfg(debug_assertions)] - self.debug_validate_stats(); - } - - /// Returns a mutable reference to the stream for the given destination - /// subnet. - pub fn get_mut(&mut self, destination: &SubnetId) -> Option { - // Can't (easily) validate stats when `StreamHandle` gets dropped, but we should - // at least do it before. - #[cfg(debug_assertions)] - self.debug_validate_stats(); - - match self.streams.get_mut(destination) { - Some(stream) => Some(StreamHandle::new( - stream, - &mut self.guaranteed_responses_size_bytes, - )), - None => None, - } - } - - /// Returns a mutable reference to the stream for the given destination - /// subnet, inserting it if it doesn't already exist. - pub fn get_mut_or_insert(&mut self, destination: SubnetId) -> StreamHandle { - // Can't (easily) validate stats when `StreamHandle` gets dropped, but we should - // at least do it before. - #[cfg(debug_assertions)] - self.debug_validate_stats(); - - StreamHandle::new( - self.streams.entry(destination).or_default(), - &mut self.guaranteed_responses_size_bytes, - ) - } - - /// Returns the guaranteed response sizes by responder canister stat. - pub fn guaranteed_responses_size_bytes(&self) -> &BTreeMap { - &self.guaranteed_responses_size_bytes - } - - /// Prunes zero-valued guaranteed response sizes entries. - /// - /// This is triggered explicitly by `ReplicatedState` after it has updated the - /// canisters' copies of these values (including the zeroes). - pub fn prune_zero_guaranteed_responses_size_bytes(&mut self) { - self.guaranteed_responses_size_bytes - .retain(|_, &mut value| value != 0); - } - - /// Computes the `guaranteed_responses_size_bytes` map from scratch. Used when - /// deserializing and in asserts. - /// - /// Time complexity: O(num_messages). - pub fn calculate_stats(streams: &StreamMap) -> BTreeMap { - let mut guaranteed_responses_size_bytes: BTreeMap = BTreeMap::new(); - for (_, stream) in streams.iter() { - for (_, msg) in stream.messages().iter() { - if let RequestOrResponse::Response(response) = msg { - if response.deadline == NO_DEADLINE { - *guaranteed_responses_size_bytes - .entry(response.respondent) - .or_default() += msg.count_bytes(); - } - } - } - } - guaranteed_responses_size_bytes - } - - /// Checks that the running accounting of the sizes of responses in streams is - /// accurate. - #[cfg(debug_assertions)] - fn debug_validate_stats(&self) { - let mut nonzero_guaranteed_responses_size_bytes = - self.guaranteed_responses_size_bytes.clone(); - nonzero_guaranteed_responses_size_bytes.retain(|_, &mut value| value != 0); - debug_assert_eq!( - Streams::calculate_stats(&self.streams), - nonzero_guaranteed_responses_size_bytes - ); - } -} - -/// A mutable reference to a stream owned by a `Streams` struct; bundled with -/// the `Streams`' stats, to be updated on stream mutations. -pub struct StreamHandle<'a> { - stream: &'a mut Stream, - - guaranteed_responses_size_bytes: &'a mut BTreeMap, -} - -impl<'a> StreamHandle<'a> { - pub fn new( - stream: &'a mut Stream, - guaranteed_responses_size_bytes: &'a mut BTreeMap, - ) -> Self { - Self { - stream, - guaranteed_responses_size_bytes, - } - } - - /// Returns a reference to the message queue. - pub fn messages(&self) -> &StreamIndexedQueue { - self.stream.messages() - } - - /// Returns the stream's begin index. - pub fn messages_begin(&self) -> StreamIndex { - self.stream.messages_begin() - } - - /// Returns the stream's end index. - pub fn messages_end(&self) -> StreamIndex { - self.stream.messages_end() - } - - /// Returns a reference to the reject signals. - pub fn reject_signals(&self) -> &VecDeque { - self.stream.reject_signals() - } - - /// Returns the index just beyond the last sent signal. - pub fn signals_end(&self) -> StreamIndex { - self.stream.signals_end - } - - /// Appends the given message to the tail of the stream. - /// - /// Returns the byte size of the pushed message. - pub fn push(&mut self, message: RequestOrResponse) -> usize { - let size_bytes = message.count_bytes(); - if let RequestOrResponse::Response(response) = &message { - if response.deadline == NO_DEADLINE { - *self - .guaranteed_responses_size_bytes - .entry(response.respondent) - .or_default() += size_bytes; - } - } - self.stream.push(message); - size_bytes - } - - /// Pushes an accept signal. Since these are not explicitly encoded, this - /// just increments `signals_end`. - pub fn push_accept_signal(&mut self) { - self.stream.push_accept_signal(); - } - - /// Appends a reject signal (the current `signals_end`) to the tail of the - /// reject signals; and then increments `signals_end`. - pub fn push_reject_signal(&mut self, reason: RejectReason) { - self.stream.push_reject_signal(reason); - } - - /// Garbage collects messages before `new_begin`, collecting and returning all - /// messages for which a reject signal was received. - pub fn discard_messages_before( - &mut self, - new_begin: StreamIndex, - reject_signals: &VecDeque, - ) -> Vec<(RejectReason, RequestOrResponse)> { - // Update stats for each discarded message. - for (index, msg) in self.stream.messages().iter() { - if index >= new_begin { - break; - } - if let RequestOrResponse::Response(response) = &msg { - if response.deadline == NO_DEADLINE { - let canister_guaranteed_responses_size_bytes = self - .guaranteed_responses_size_bytes - .get_mut(&response.respondent) - .expect( - "No `guaranteed_responses_size_bytes` entry for discarded response", - ); - *canister_guaranteed_responses_size_bytes -= msg.count_bytes(); - } - } - } - - self.stream - .discard_messages_before(new_begin, reject_signals) - } - - /// Garbage collects signals before `new_signals_begin`. - pub fn discard_signals_before(&mut self, new_signals_begin: StreamIndex) { - self.stream.discard_signals_before(new_signals_begin); - } - - /// Returns a reference to the reverse stream flags. - pub fn reverse_stream_flags(&self) -> &StreamFlags { - &self.stream.reverse_stream_flags - } - - /// Sets the reverse stream flags. - pub fn set_reverse_stream_flags(&mut self, flags: StreamFlags) { - self.stream.set_reverse_stream_flags(flags); - } -} - #[derive(Clone, Eq, PartialEq, Debug)] /// State associated with the history of statuses of ingress messages as they /// traversed through the system. @@ -2356,32 +2102,6 @@ impl pub(crate) mod testing { use super::*; - /// Testing only: Exposes `Streams` internals for use in other modules' - /// tests. - pub trait StreamsTesting { - /// Testing only: Modifies `SystemMetadata::streams` by applying the - /// provided function. - fn modify_streams(&mut self, f: F); - } - - impl StreamsTesting for Streams { - fn modify_streams(&mut self, f: F) { - f(&mut self.streams); - - // Update `guaranteed_responses_size_bytes`, retaining all previous keys with a - // default byte size of zero (so that the respective canister's - // `transient_stream_guaranteed_responses_size_bytes` is correctly reset to - // zero). - self.guaranteed_responses_size_bytes - .values_mut() - .for_each(|size| *size = 0); - for (canister_id, size_bytes) in Streams::calculate_stats(&self.streams) { - self.guaranteed_responses_size_bytes - .insert(canister_id, size_bytes); - } - } - } - /// Early warning system / stumbling block forcing the authors of changes adding /// or removing replicated state fields to think about and/or ask the Message /// Routing team to think about any repercussions to the subnet splitting logic. diff --git a/rs/replicated_state/src/metadata_state/tests.rs b/rs/replicated_state/src/metadata_state/tests.rs index 365d6c3cc58..c1a7addbd3f 100644 --- a/rs/replicated_state/src/metadata_state/tests.rs +++ b/rs/replicated_state/src/metadata_state/tests.rs @@ -12,7 +12,7 @@ use ic_test_utilities_types::{ canister_test_id, message_test_id, node_test_id, subnet_test_id, user_test_id, SUBNET_0, SUBNET_1, SUBNET_2, }, - messages::{RequestBuilder, ResponseBuilder}, + messages::RequestBuilder, xnet::{StreamHeaderBuilder, StreamSliceBuilder}, }; use ic_types::{ @@ -130,246 +130,6 @@ fn entries_sorted_lexicographically() { assert_eq!(actual, expected); } -#[test] -fn streams_stats() { - // Two local canisters, `local_a` and `local_b`. - let local_a = canister_test_id(1); - let local_b = canister_test_id(2); - // Two remote canisters, `remote_1` on `SUBNET_1` and `remote_2` on `SUBNET_2`. - let remote_1 = canister_test_id(3); - let remote_2 = canister_test_id(4); - - fn request(sender: CanisterId, receiver: CanisterId) -> RequestOrResponse { - RequestBuilder::default() - .sender(sender) - .receiver(receiver) - .build() - .into() - } - fn response( - respondent: CanisterId, - originator: CanisterId, - payload: &str, - ) -> (RequestOrResponse, usize) { - let rep: RequestOrResponse = ResponseBuilder::default() - .respondent(respondent) - .originator(originator) - .response_payload(Payload::Data(payload.as_bytes().to_vec())) - .build() - .into(); - let req_bytes = rep.count_bytes(); - (rep, req_bytes) - } - - // A bunch of requests and responses from local canisters to remote ones. - let req_a1 = request(local_a, remote_1); - let (rep_a1, rep_a1_size) = response(local_a, remote_1, "a"); - let (rep_b1, rep_b1_size) = response(local_b, remote_1, "bb"); - let (rep_b2, rep_b2_size) = response(local_b, remote_2, "ccc"); - - let mut streams = Streams::new(); - // Empty response size map. - let mut expected_responses_size = Default::default(); - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - streams.push(SUBNET_1, req_a1); - // Pushed a request, response size stats are unchanged. - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - // Push response via `Streams::push()`. - streams.push(SUBNET_1, rep_a1); - // `rep_a1` is now accounted for against `local_a`. - expected_responses_size.insert(local_a, rep_a1_size); - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - // Push response via `StreamHandle::push()`. - streams.get_mut(&SUBNET_1).unwrap().push(rep_b1); - // `rep_b1` is accounted for against `local_b`. - expected_responses_size.insert(local_b, rep_b1_size); - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - // Push response via `StreamHandle::push()` after `get_mut_or_insert()`. - streams.get_mut_or_insert(SUBNET_2).push(rep_b2); - // `rep_b2` is accounted for against `local_b`. - *expected_responses_size.get_mut(&local_b).unwrap() += rep_b2_size; - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - // Discard `req_a1` and `rep_a1` from the stream for `SUBNET_1`. - streams - .get_mut(&SUBNET_1) - .unwrap() - .discard_messages_before(2.into(), &Default::default()); - // No more responses from `local_a` in `streams`. - *expected_responses_size.get_mut(&local_a).unwrap() = 0; - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - streams.prune_zero_guaranteed_responses_size_bytes(); - // Zero valued entry for `local_a` pruned. - expected_responses_size.remove(&local_a); - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - // Discard `rep_b2` from the stream for `SUBNET_2`. - streams - .get_mut(&SUBNET_2) - .unwrap() - .discard_messages_before(1.into(), &Default::default()); - // `rep_b2` is gone. - *expected_responses_size.get_mut(&local_b).unwrap() -= rep_b2_size; - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); -} - -#[test] -fn streams_stats_best_effort_messages() { - let local = canister_test_id(1); - let remote = canister_test_id(2); - - let request = |sender: CanisterId, receiver: CanisterId| -> RequestOrResponse { - RequestBuilder::default() - .sender(sender) - .receiver(receiver) - .deadline(CoarseTime::from_secs_since_unix_epoch(1)) - .build() - .into() - }; - let response = - |respondent: CanisterId, originator: CanisterId, payload: &str| -> RequestOrResponse { - ResponseBuilder::default() - .respondent(respondent) - .originator(originator) - .response_payload(Payload::Data(payload.as_bytes().to_vec())) - .deadline(CoarseTime::from_secs_since_unix_epoch(1)) - .build() - .into() - }; - - // A bunch of best-effort requests and responses from the local canister to the remote one. - let req = request(local, remote); - let rep_1 = response(local, remote, "a"); - let rep_2 = response(local, remote, "bb"); - let rep_3 = response(local, remote, "ccc"); - - let mut streams = Streams::new(); - - // Expecting no guaranteed responses throughout. - let expected_responses_size = BTreeMap::default(); - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - streams.push(SUBNET_1, req); - // Pushed a request, response size stats are unchanged. - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - // Push response via `Streams::push()`. - streams.push(SUBNET_1, rep_1); - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - // Push response via `StreamHandle::push()`. - streams.get_mut(&SUBNET_1).unwrap().push(rep_2); - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - // Push response via `StreamHandle::push()` after `get_mut_or_insert()`. - streams.get_mut_or_insert(SUBNET_2).push(rep_3); - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - // Discard everything from the stream for `SUBNET_1`. - streams - .get_mut(&SUBNET_1) - .unwrap() - .discard_messages_before(3.into(), &Default::default()); - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - streams.prune_zero_guaranteed_responses_size_bytes(); - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); - - // Discard `rep_b2` from the stream for `SUBNET_2`. - streams - .get_mut(&SUBNET_2) - .unwrap() - .discard_messages_before(1.into(), &Default::default()); - assert_eq!( - streams.guaranteed_responses_size_bytes(), - &expected_responses_size - ); -} - -#[test] -fn streams_stats_after_deserialization() { - let mut system_metadata = SystemMetadata::new(SUBNET_0, SubnetType::Application); - let streams = Arc::make_mut(&mut system_metadata.streams); - - streams.push( - SUBNET_1, - ResponseBuilder::default() - .respondent(canister_test_id(1)) - .originator(canister_test_id(2)) - .build() - .into(), - ); - - let system_metadata_proto: ic_protobuf::state::system_metadata::v1::SystemMetadata = - (&system_metadata).into(); - let deserialized_system_metadata = ( - system_metadata_proto, - &DummyMetrics as &dyn CheckpointLoadingMetrics, - ) - .try_into() - .unwrap(); - - // Ensure that the deserialized `SystemMetadata` is equal to the original. - assert_eq!(system_metadata, deserialized_system_metadata); - // Double-check that the stats match. - assert_eq!( - system_metadata.streams.guaranteed_responses_size_bytes(), - deserialized_system_metadata - .streams - .guaranteed_responses_size_bytes() - ); -} - #[test] fn init_allocation_ranges_if_empty() { let own_subnet_id = SUBNET_0; @@ -684,10 +444,8 @@ fn system_metadata_split() { // Only ingress messages for `CANISTER_2` should be retained on `SUBNET_B`. let is_canister_on_subnet_b = |canister_id: CanisterId| canister_id == CANISTER_2; - let streams = Streams { - streams: btreemap! { SUBNET_C => Stream::new(StreamIndexedQueue::with_begin(13.into()), 14.into()) }, - guaranteed_responses_size_bytes: btreemap! { CANISTER_1 => 169 }, - }; + let streams = + btreemap! { SUBNET_C => Stream::new(StreamIndexedQueue::with_begin(13.into()), 14.into()) }; // Use uncommon `SubnetType::VerifiedApplication` to make it more likely to // detect a regression in the subnet type assigned to subnet B. @@ -1694,30 +1452,6 @@ fn stream_pushing_signals_increments_signals_end() { assert_eq!(StreamIndex::new(32), stream.signals_end()); } -#[test] -fn stream_handle_pushing_signals_increments_signals_end() { - let mut stream = generate_stream( - MessageConfig { - begin: 30, - count: 0, - }, - SignalConfig { end: 30 }, - ); - assert!(stream.reject_signals().is_empty()); - - let mut guaranteed_responses_size_bytes = BTreeMap::default(); - let mut handle = StreamHandle::new(&mut stream, &mut guaranteed_responses_size_bytes); - - handle.push_accept_signal(); - assert_eq!(StreamIndex::new(31), handle.signals_end()); - handle.push_reject_signal(RejectReason::CanisterNotFound); - assert_eq!( - &VecDeque::from([RejectSignal::new(RejectReason::CanisterNotFound, 31.into()),]), - handle.reject_signals() - ); - assert_eq!(StreamIndex::new(32), handle.signals_end()); -} - #[test] fn stream_roundtrip_encoding() { let mut messages = StreamIndexedQueue::with_begin(30.into()); diff --git a/rs/replicated_state/src/page_map.rs b/rs/replicated_state/src/page_map.rs index ee6a474152b..580cfad7848 100644 --- a/rs/replicated_state/src/page_map.rs +++ b/rs/replicated_state/src/page_map.rs @@ -133,7 +133,7 @@ struct WriteBuffer<'a> { start_index: PageIndex, } -impl<'a> WriteBuffer<'a> { +impl WriteBuffer<'_> { fn apply_to_file(&mut self, file: &mut File, path: &Path) -> Result<(), PersistenceError> { use std::io::{Seek, SeekFrom}; @@ -356,7 +356,7 @@ pub enum MemoryMapOrData<'a> { Data(&'a [u8]), } -impl<'a> MemoryInstructions<'a> { +impl MemoryInstructions<'_> { // Filters and cuts any instructions that do not fall into `new_range`. pub fn restrict_to_range(&mut self, new_range: &Range) { self.range = PageIndex::new(std::cmp::max(self.range.start.get(), new_range.start.get())) diff --git a/rs/replicated_state/src/page_map/page_allocator/page_bytes.rs b/rs/replicated_state/src/page_map/page_allocator/page_bytes.rs index 360278efc5c..3c5720c6542 100644 --- a/rs/replicated_state/src/page_map/page_allocator/page_bytes.rs +++ b/rs/replicated_state/src/page_map/page_allocator/page_bytes.rs @@ -19,7 +19,7 @@ where struct PageBytesVisitor; -impl<'de> Visitor<'de> for PageBytesVisitor { +impl Visitor<'_> for PageBytesVisitor { type Value = PageBytes; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { diff --git a/rs/replicated_state/src/replicated_state.rs b/rs/replicated_state/src/replicated_state.rs index 606361756bf..6d53d41b5de 100644 --- a/rs/replicated_state/src/replicated_state.rs +++ b/rs/replicated_state/src/replicated_state.rs @@ -1,6 +1,9 @@ use super::{ canister_state::CanisterState, - metadata_state::{IngressHistoryState, Stream, Streams, SystemMetadata}, + metadata_state::{ + subnet_call_context_manager::{IDkgDealingsContext, SignWithThresholdContext}, + IngressHistoryState, Stream, StreamMap, SystemMetadata, + }, }; use crate::{ canister_snapshots::CanisterSnapshots, @@ -8,10 +11,6 @@ use crate::{ queues::{CanisterInput, CanisterQueuesLoopDetector}, system_state::{push_input, CanisterOutputQueuesIterator}, }, - metadata_state::{ - subnet_call_context_manager::{IDkgDealingsContext, SignWithThresholdContext}, - StreamMap, - }, CanisterQueues, }; use ic_base_types::PrincipalId; @@ -438,16 +437,14 @@ impl ReplicatedState { epoch_query_stats: RawQueryStats, canister_snapshots: CanisterSnapshots, ) -> Self { - let mut res = Self { + Self { canister_states, metadata, subnet_queues, consensus_queue: Vec::new(), epoch_query_stats, canister_snapshots, - }; - res.update_stream_guaranteed_responses_size_bytes(); - res + } } pub fn canister_state(&self, canister_id: &CanisterId) -> Option<&CanisterState> { @@ -910,16 +907,6 @@ impl ReplicatedState { &self.epoch_query_stats } - /// Updates the byte size of guaranteed responses in streams for each canister. - fn update_stream_guaranteed_responses_size_bytes(&mut self) { - for (canister_id, size_bytes) in self.metadata.streams.guaranteed_responses_size_bytes() { - if let Some(canister_state) = self.canister_states.get_mut(canister_id) { - canister_state.set_stream_guaranteed_responses_size_bytes(*size_bytes); - } - } - Arc::make_mut(&mut self.metadata.streams).prune_zero_guaranteed_responses_size_bytes() - } - /// Returns the number of canisters in this `ReplicatedState`. pub fn num_canisters(&self) -> usize { self.canister_states.len() @@ -1252,8 +1239,6 @@ impl ReplicatedState { |canister_id| canister_states.contains_key(&canister_id), subnet_queues, ); - - self.update_stream_guaranteed_responses_size_bytes(); } } @@ -1282,33 +1267,31 @@ pub trait ReplicatedStateMessageRouting { fn streams(&self) -> &StreamMap; /// Removes the streams from this `ReplicatedState`. - fn take_streams(&mut self) -> Streams; + fn take_streams(&mut self) -> StreamMap; /// Atomically replaces the streams. - fn put_streams(&mut self, streams: Streams); + fn put_streams(&mut self, streams: StreamMap); } impl ReplicatedStateMessageRouting for ReplicatedState { fn streams(&self) -> &StreamMap { - self.metadata.streams.streams() + &self.metadata.streams } - fn take_streams(&mut self) -> Streams { + fn take_streams(&mut self) -> StreamMap { std::mem::take(Arc::make_mut(&mut self.metadata.streams)) } - fn put_streams(&mut self, streams: Streams) { - // Should never replace a non-empty Streams via `put_streams()`. - assert!(self.metadata.streams.streams().is_empty()); + fn put_streams(&mut self, streams: StreamMap) { + // Should never replace a non-empty StreamMap via `put_streams()`. + assert!(self.metadata.streams.is_empty()); *Arc::make_mut(&mut self.metadata.streams) = streams; - self.update_stream_guaranteed_responses_size_bytes(); } } pub mod testing { use super::*; - use crate::metadata_state::testing::StreamsTesting; /// Exposes `ReplicatedState` internals for use in other crates' unit tests. pub trait ReplicatedStateTesting { @@ -1353,7 +1336,7 @@ pub mod testing { fn modify_streams(&mut self, f: F) { let mut streams = self.take_streams(); - streams.modify_streams(f); + f(&mut streams); self.put_streams(streams); } diff --git a/rs/replicated_state/tests/replicated_state.rs b/rs/replicated_state/tests/replicated_state.rs index c0b392f7d7e..3df3c525500 100644 --- a/rs/replicated_state/tests/replicated_state.rs +++ b/rs/replicated_state/tests/replicated_state.rs @@ -156,7 +156,7 @@ impl ReplicatedStateFixture { fn push_to_streams(&mut self, msgs: Vec) { let mut streams = self.state.take_streams(); for msg in msgs.into_iter() { - streams.push(SUBNET_ID, msg); + streams.entry(SUBNET_ID).or_default().push(msg); } self.state.put_streams(streams); } @@ -341,30 +341,6 @@ fn memory_taken_by_subnet_queues() { assert_wasm_custom_sections_memory_taken(0, &fixture); } -#[test] -fn memory_taken_by_stream_responses() { - let mut fixture = ReplicatedStateFixture::new(); - - // Zero memory used initially. - assert_execution_memory_taken(0, &fixture); - assert_message_memory_taken(0, &fixture); - assert_canister_history_memory_taken(0, &fixture); - assert_wasm_custom_sections_memory_taken(0, &fixture); - - // Push a request and a response into a stream. - let response = response_to(OTHER_CANISTER_ID); - fixture.push_to_streams(vec![ - request_to(OTHER_CANISTER_ID).into(), - response.clone().into(), - ]); - - // Memory only used by response, not request. - assert_execution_memory_taken(0, &fixture); - assert_message_memory_taken(response.count_bytes(), &fixture); - assert_canister_history_memory_taken(0, &fixture); - assert_wasm_custom_sections_memory_taken(0, &fixture); -} - #[test] fn memory_taken_by_wasm_custom_sections() { let mut custom_sections: BTreeMap = BTreeMap::new(); diff --git a/rs/rosetta-api/icp/src/convert.rs b/rs/rosetta-api/icp/src/convert.rs index d15d8496331..f24984635b5 100644 --- a/rs/rosetta-api/icp/src/convert.rs +++ b/rs/rosetta-api/icp/src/convert.rs @@ -36,7 +36,6 @@ use std::convert::{TryFrom, TryInto}; /// This module converts from ledger_canister data structures to Rosetta data /// structures - pub fn to_rosetta_core_transaction( transaction_index: BlockIndex, transaction: Transaction, diff --git a/rs/rosetta-api/icp/src/ledger_client/handle_neuron_info.rs b/rs/rosetta-api/icp/src/ledger_client/handle_neuron_info.rs index 33afba58fad..7032f59820c 100644 --- a/rs/rosetta-api/icp/src/ledger_client/handle_neuron_info.rs +++ b/rs/rosetta-api/icp/src/ledger_client/handle_neuron_info.rs @@ -15,7 +15,7 @@ pub fn handle_neuron_info( // Check the response from governance call. let response: Result = candid::decode_one(bytes.as_ref()) .map_err(|err| format!("Could not decode NEURON_INFO response: {}", err))?; - return match response { + match response { Err(e) => Ok(Err(ApiError::InvalidRequest( false, format!("Could not retrieve neuron information: {}", e.error_message).into(), @@ -32,9 +32,9 @@ pub fn handle_neuron_info( hotkeys: neuron.hot_keys, staked_maturity_e8s: neuron.staked_maturity_e8s_equivalent, }); - return Ok(Ok(Some(output))); + Ok(Ok(Some(output))) } - }; + } } fn neuron_state(neuron: &Neuron) -> models::NeuronState { diff --git a/rs/rosetta-api/icp/src/rosetta_server.rs b/rs/rosetta-api/icp/src/rosetta_server.rs index 1fb6ca756e2..1b5a93dd3cd 100644 --- a/rs/rosetta-api/icp/src/rosetta_server.rs +++ b/rs/rosetta-api/icp/src/rosetta_server.rs @@ -315,7 +315,7 @@ async fn rosetta_metrics() -> HttpResponse { let encoder = prometheus::TextEncoder::new(); encoder.encode(&metrics, &mut buffer).unwrap(); HttpResponse::Ok() - .content_type("text/html; charset=utf-8") + .content_type("text/plain; version=0.0.4; charset=utf-8") .body(String::from_utf8(buffer).unwrap()) } diff --git a/rs/rosetta-api/icp/tests/integration_tests/BUILD.bazel b/rs/rosetta-api/icp/tests/integration_tests/BUILD.bazel index 31998102aab..de7d458b565 100644 --- a/rs/rosetta-api/icp/tests/integration_tests/BUILD.bazel +++ b/rs/rosetta-api/icp/tests/integration_tests/BUILD.bazel @@ -43,7 +43,6 @@ rust_test_suite( "//rs/canister_sandbox:sandbox_launcher", "//rs/ledger_suite/icp/ledger:ledger-canister-wasm-notify-method", "//rs/pocket_ic_server:pocket-ic-server", - "//rs/replica", "//rs/rosetta-api/icp:ic-rosetta-api-rosetta-blocks", "//rs/rosetta-api/icp/test_utils/sender_canister:ic-sender-canister", "@mainnet_icp_ledger_canister//file", @@ -52,8 +51,8 @@ rust_test_suite( "CANISTER_LAUNCHER": "$(rootpath //rs/canister_sandbox)", "LEDGER_CANISTER_NOTIFY_METHOD_WASM_PATH": "$(rootpath //rs/ledger_suite/icp/ledger:ledger-canister-wasm-notify-method)", "POCKET_IC_BIN": "$(rootpath //rs/pocket_ic_server:pocket-ic-server)", - "REPLICA_BIN": "$(rootpath //rs/replica)", "ROSETTA_PATH": "$(rootpath //rs/rosetta-api/icp:ic-rosetta-api-rosetta-blocks)", + "RUST_TEST_THREADS": "4", "SANDBOX_LAUNCHER": "$(rootpath //rs/canister_sandbox:sandbox_launcher)", "IC_SENDER_CANISTER_WASM_PATH": "$(rootpath //rs/rosetta-api/icp/test_utils/sender_canister:ic-sender-canister)", "ICP_LEDGER_DEPLOYED_VERSION_WASM_PATH": "$(rootpath @mainnet_icp_ledger_canister//file)", @@ -63,8 +62,7 @@ rust_test_suite( flaky = True, proc_macro_deps = [ ], - # The test runs replica binary that constantly uses more than 100% cpu core + rosetta server. - tags = ["cpu:3"], + tags = ["cpu:4"], deps = DEPENDENCIES + [ ":rosetta-integration-tests-lib", "//rs/rosetta-api/icp/test_utils/sender_canister:ic_sender_canister_lib", diff --git a/rs/rosetta-api/icp/tests/integration_tests/tests/tests.rs b/rs/rosetta-api/icp/tests/integration_tests/tests/tests.rs index 537b0f2c413..9da29d50400 100644 --- a/rs/rosetta-api/icp/tests/integration_tests/tests/tests.rs +++ b/rs/rosetta-api/icp/tests/integration_tests/tests/tests.rs @@ -411,7 +411,7 @@ fn matches_blockchain_is_empty_error(error: &rosetta_core::miscellaneous::Error) .as_ref() .unwrap() .get("error_message") - .map_or(false, |e| { + .is_some_and( |e| { e == "Blockchain is empty" || e == "Block not found: 0" || e == "RosettaBlocks was activated and there are no RosettaBlocks in the database yet. The synch is ongoing, please wait until the first RosettaBlock is written to the database." }) } diff --git a/rs/rosetta-api/icrc1/src/construction_api/types.rs b/rs/rosetta-api/icrc1/src/construction_api/types.rs index d7d9dc9c12e..9bc343418db 100644 --- a/rs/rosetta-api/icrc1/src/construction_api/types.rs +++ b/rs/rosetta-api/icrc1/src/construction_api/types.rs @@ -40,19 +40,19 @@ pub struct SignedTransaction<'a> { pub envelopes: Vec>, } -impl<'a> std::fmt::Display for SignedTransaction<'a> { +impl std::fmt::Display for SignedTransaction<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", hex::encode(serde_cbor::ser::to_vec(self).unwrap())) } } -impl<'a> FromStr for SignedTransaction<'a> { +impl FromStr for SignedTransaction<'_> { type Err = anyhow::Error; fn from_str(s: &str) -> Result { serde_cbor::from_slice(hex::decode(s)?.as_slice()).map_err(|err| anyhow!("{:?}", err)) } } -impl<'a> SignedTransaction<'a> { +impl SignedTransaction<'_> { pub fn get_lowest_ingress_expiry(&self) -> Option { self.envelopes .iter() diff --git a/rs/rosetta-api/scripts/download_latest_icrc1_ledger.sh b/rs/rosetta-api/scripts/download_latest_icrc1_ledger.sh index 4aa452f7be4..78a04ae29f0 100755 --- a/rs/rosetta-api/scripts/download_latest_icrc1_ledger.sh +++ b/rs/rosetta-api/scripts/download_latest_icrc1_ledger.sh @@ -1,30 +1,25 @@ #!/usr/bin/env bash - set -uo pipefail -#set -x - -COMMITS=$(curl -sLf -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - "https://api.github.com/repos/dfinity/ic/commits?per_page=100" \ - | jq '.[].sha' | tr -d \") -if [ "$?" -ne "0" ]; then - echo >&2 "Unable to fetch the commits from dfinity/ic. Please try again" - exit 1 -fi +### Configuration +RELEASE_TAG_PREFIX=ledger-suite-icrc -for COMMIT in $COMMITS; do +### Download a specific release +## Download the ICRC ledger WASM and did files for a specific release. The files are downloaded +## from the github release page for the given release. +download_release() { + RELEASE=$1 STATUS_CODE=$(curl -s -o /dev/null -w "%{http_code}" -L --head \ - "https://download.dfinity.systems/ic/$COMMIT/canisters/ic-icrc1-ledger.wasm.gz") + "https://github.com/dfinity/ic/releases/download/${RELEASE}/ic-icrc1-ledger.wasm.gz") if (($STATUS_CODE >= 200)) && (($STATUS_CODE < 300)); then - echo "Found artifacts for commit $COMMIT. Downloading icrc1_ledger.did and icrc1_ledger.wasm.gz" - curl -sLf "https://raw.githubusercontent.com/dfinity/ic/$COMMIT/rs/ledger_suite/icrc1/ledger/ledger.did" \ + echo "Found artifacts for release $RELEASE. Downloading icrc1_ledger.did and icrc1_ledger.wasm.gz" + curl -sLf "https://github.com/dfinity/ic/releases/download/${RELEASE}/ledger.did" \ -o icrc1_ledger.did if [ "$?" -ne "0" ]; then echo >&2 "Unable to download the icrc1 ledger did file. Please try again" exit 2 fi - curl -sLf "https://download.dfinity.systems/ic/$COMMIT/canisters/ic-icrc1-ledger.wasm.gz" \ + curl -sLf "https://github.com/dfinity/ic/releases/download/${RELEASE}/ic-icrc1-ledger.wasm.gz" \ -o icrc1_ledger.wasm.gz if [ "$?" -ne "0" ]; then echo >&2 "Unable to download the icrc1 ledger wasm file. Please try again" @@ -32,7 +27,48 @@ for COMMIT in $COMMITS; do fi exit 0 fi -done +} + +### Find and download the latest ICRC ledger WASM and did file +## List the releases from the repository, looking for the most recent release where the corresponding +## tag starts with the expected prefix. Retrieves releases one page at a time, stopping if no release +## was found in some predefined maximum number of pages. Once a release is found, download the ledger +## WASM and did files. +find_and_download_release() { + PAGE=1 + ITEMS_PER_PAGE=100 + MAX_PAGES=10 + while true; do + ITEM=0 + # Unauthenticated requests are rate limited (per IP address) to 60 requests/hr + # https://docs.github.com/en/rest/using-the-rest-api/rate-limits-for-the-rest-api?apiVersion=2022-11-28#primary-rate-limit-for-unauthenticated-users + REL_JSON=$(curl -L \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/dfinity/ic/releases\?per_page\=${ITEMS_PER_PAGE}\&page\=${PAGE}) + if [ "$?" -ne "0" ]; then + echo >&2 "Unable to fetch the releases from dfinity/ic." + exit 1 + fi + while [ ${ITEM} -lt ${ITEMS_PER_PAGE} ]; do + RELEASE=$(echo ${REL_JSON} | jq ".[${ITEM}].tag_name" | tr -d '"') + if [ "$?" -ne "0" ]; then + echo >&2 "Error parsing release from response." + exit 1 + fi + if [[ ${RELEASE} == ${RELEASE_TAG_PREFIX}* ]]; then + download_release "${RELEASE}" + break + else + ITEM=$((ITEM + 1)) + fi + done + PAGE=$((PAGE + 1)) + if [ ${PAGE} -gt ${MAX_PAGES} ]; then + echo "No ${RELEASE_TAG_PREFIX} release found in the first ${MAX_PAGES} with ${ITEMS_PER_PAGE} items per page, aborting." + exit 1 + fi + done +} -echo "No commits with artifacts found" -exit 4 +find_and_download_release diff --git a/rs/rust_canisters/canister_test/src/canister.rs b/rs/rust_canisters/canister_test/src/canister.rs index 035fa531ee2..89e395bf1d6 100644 --- a/rs/rust_canisters/canister_test/src/canister.rs +++ b/rs/rust_canisters/canister_test/src/canister.rs @@ -532,7 +532,7 @@ pub struct Canister<'a> { wasm: Option, } -impl<'a> Canister<'a> { +impl Canister<'_> { pub fn is_runtime_local(&self) -> bool { match self.runtime { Runtime::Remote(_) => false, @@ -542,7 +542,7 @@ impl<'a> Canister<'a> { } } -impl<'a> fmt::Debug for Canister<'a> { +impl fmt::Debug for Canister<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "client-side view of canister {}", self.canister_id) } @@ -891,7 +891,7 @@ pub struct Install<'a> { pub num_cycles: Option, } -impl<'a> Query<'a> { +impl Query<'_> { pub async fn bytes(&self, payload: Vec) -> Result, String> { let canister = self.canister; match canister.runtime { @@ -972,7 +972,7 @@ impl<'a> Query<'a> { } } -impl<'a> Update<'a> { +impl Update<'_> { pub async fn bytes(&self, payload: Vec) -> Result, String> { let canister = self.canister; match canister.runtime { diff --git a/rs/rust_canisters/dfn_candid/src/lib.rs b/rs/rust_canisters/dfn_candid/src/lib.rs index 7c31d2e8b82..c292858a37b 100644 --- a/rs/rust_canisters/dfn_candid/src/lib.rs +++ b/rs/rust_canisters/dfn_candid/src/lib.rs @@ -116,7 +116,7 @@ impl FromWire /// this is a private mirror of the type in dfn_core::api which generates the /// serialization/deserialization for it without putting a dependency on candid /// in dfn_core - +/// /// This is a bit of a weird type witness. Candid is multi arity in both inputs /// and outputs the outputs don't fit in well with rust. To make writing candid /// nicer we assume that every function is going to try and return one value, if diff --git a/rs/rust_canisters/dfn_core/src/stable.rs b/rs/rust_canisters/dfn_core/src/stable.rs index 68dbfe1b541..d4cfd4df04b 100644 --- a/rs/rust_canisters/dfn_core/src/stable.rs +++ b/rs/rust_canisters/dfn_core/src/stable.rs @@ -11,7 +11,6 @@ const PAGE_SIZE: f64 = 64.0 * 1024.0; /// +--------+-----------------+--------> /// | length | content | junk /// +--------+-----------------+--------> - const LENGTH_BYTES: u32 = 4; pub fn stable64_size() -> u64 { diff --git a/rs/rust_canisters/memory_test/src/main.rs b/rs/rust_canisters/memory_test/src/main.rs index 8c09b785fbf..6014e0207cc 100644 --- a/rs/rust_canisters/memory_test/src/main.rs +++ b/rs/rust_canisters/memory_test/src/main.rs @@ -423,7 +423,7 @@ fn copy() { let step = operation.step.unwrap_or(ELEMENT_SIZE) / ELEMENT_SIZE; let value = operation.value.unwrap_or_else(|| rand(0, u8::MAX)); // Address can't exceed 4GiB WASM memory - let len = (operation.size as usize + ELEMENT_SIZE - 1) / ELEMENT_SIZE; + let len = (operation.size as usize).div_ceil(ELEMENT_SIZE); assert!(2 * len <= MEMORY_LEN); MEMORY.with(|memory| { let mut memory_ref = memory.borrow_mut(); diff --git a/rs/rust_canisters/pmap/canister/main.rs b/rs/rust_canisters/pmap/canister/main.rs index 9bf061382c9..fbfbe9a8784 100644 --- a/rs/rust_canisters/pmap/canister/main.rs +++ b/rs/rust_canisters/pmap/canister/main.rs @@ -1,3 +1,6 @@ +// TODO: EXC-1841 +#![allow(static_mut_refs)] + use dfn_core::api::print; use dfn_macro::{query, update}; use std::ptr::{addr_of, addr_of_mut}; diff --git a/rs/rust_canisters/random_traffic_test/src/main.rs b/rs/rust_canisters/random_traffic_test/src/main.rs index ce928a0553e..dad560394f6 100644 --- a/rs/rust_canisters/random_traffic_test/src/main.rs +++ b/rs/rust_canisters/random_traffic_test/src/main.rs @@ -265,7 +265,7 @@ async fn handle_call(msg: Message) -> Vec { fn should_make_downstream_call() -> bool { RNG.with_borrow_mut(|rng| { WeightedIndex::new([CALL_WEIGHT.get(), REPLY_WEIGHT.get()]) - .map_or(false, |dist| dist.sample(rng) == 0) + .is_ok_and(|dist| dist.sample(rng) == 0) }) } diff --git a/rs/sns/cli/src/lib.rs b/rs/sns/cli/src/lib.rs index fd5f82d58c2..76e6e001616 100644 --- a/rs/sns/cli/src/lib.rs +++ b/rs/sns/cli/src/lib.rs @@ -509,6 +509,8 @@ impl NnsGovernanceCanister { proposer: &NeuronIdOrSubaccount, proposal: &Proposal, ) -> Result { + // TODO: Jira ticket NNS1-3555 + #[allow(non_local_definitions)] impl Request for ManageNeuron { type Response = ManageNeuronResponse; const METHOD_NAME: &'static str = "manage_neuron"; @@ -599,13 +601,13 @@ enum RunCommandError<'a> { }, } -impl<'a> Display for RunCommandError<'a> { +impl Display for RunCommandError<'_> { fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { write!(formatter, "{}", self.new_report()) } } -impl<'a> RunCommandError<'a> { +impl RunCommandError<'_> { fn new_report(&self) -> String { match self { RunCommandError::UnableToRunCommand { command, error } => { diff --git a/rs/sns/governance/canister/canister.rs b/rs/sns/governance/canister/canister.rs index f29c9e353b0..6c883c86b6e 100644 --- a/rs/sns/governance/canister/canister.rs +++ b/rs/sns/governance/canister/canister.rs @@ -1,3 +1,6 @@ +// TODO: Jira ticket NNS1-3556 +#![allow(static_mut_refs)] + use async_trait::async_trait; use ic_base_types::{CanisterId, PrincipalId}; use ic_canister_log::log; diff --git a/rs/sns/governance/src/proposal.rs b/rs/sns/governance/src/proposal.rs index eb99082247a..a620fe0d804 100644 --- a/rs/sns/governance/src/proposal.rs +++ b/rs/sns/governance/src/proposal.rs @@ -100,7 +100,7 @@ impl Proposal { pub(crate) fn allowed_when_resources_are_low(&self) -> bool { self.action .as_ref() - .map_or(false, |a| a.allowed_when_resources_are_low()) + .is_some_and(|a| a.allowed_when_resources_are_low()) } /// Returns a clone of self, except that "large blob fields" are replaced diff --git a/rs/sns/governance/src/types.rs b/rs/sns/governance/src/types.rs index 6ae7ff68d63..1f11db29d60 100644 --- a/rs/sns/governance/src/types.rs +++ b/rs/sns/governance/src/types.rs @@ -969,6 +969,8 @@ impl fmt::Display for GovernanceError { } } +impl std::error::Error for crate::pb::v1::GovernanceError {} + impl From for GovernanceError { fn from(nervous_system_error: NervousSystemError) -> Self { GovernanceError { diff --git a/rs/sns/integration_tests/src/proposals.rs b/rs/sns/integration_tests/src/proposals.rs index daf4ac9a9c0..3ac6b042fdf 100644 --- a/rs/sns/integration_tests/src/proposals.rs +++ b/rs/sns/integration_tests/src/proposals.rs @@ -36,6 +36,8 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; +const EXPECTED_MAX_BALLOT_AGE: f64 = 60.0; + const MOTION_PROPOSAL_ACTION_TYPE: u64 = 1; const VOTING_REWARDS_PARAMETERS: VotingRewardsParameters = VotingRewardsParameters { @@ -346,7 +348,7 @@ fn test_voting_with_three_neurons_with_the_same_stake() { ballot ); assert!( - age_seconds < 30.0, + age_seconds < EXPECTED_MAX_BALLOT_AGE, "age_seconds = {}. ballot = {:?}", age_seconds, ballot diff --git a/rs/sns/integration_tests/src/upgrade_canister.rs b/rs/sns/integration_tests/src/upgrade_canister.rs index 2eb886dc4a6..de01de27d3a 100644 --- a/rs/sns/integration_tests/src/upgrade_canister.rs +++ b/rs/sns/integration_tests/src/upgrade_canister.rs @@ -45,6 +45,8 @@ lazy_static! { pub static ref EMPTY_WASM: Vec = vec![0, 0x61, 0x73, 0x6D, 1, 0, 0, 0]; } +const EXPECTED_SNS_DAPP_CANISTER_UPGRADE_TIME_SECONDS: u64 = 60; + // Note: Tests for UpgradeSnsToNextVersion action is in rs/nns/sns-wasm/tests/upgrade_sns_instance.rs fn setup_sns( @@ -445,16 +447,16 @@ fn test_upgrade_canister_proposal_execution_fail() { action ), }; - fn age_s(t: u64) -> f64 { + fn age_s(t: u64) -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() - .as_secs_f64() - - (t as f64) + .as_secs() + .saturating_sub(t) } let decision_age_s = age_s(proposal.decided_timestamp_seconds); assert!( - decision_age_s < 30.0, + decision_age_s < EXPECTED_SNS_DAPP_CANISTER_UPGRADE_TIME_SECONDS, "decision_age_s: {}, proposal: {:?}", decision_age_s, proposal @@ -466,7 +468,7 @@ fn test_upgrade_canister_proposal_execution_fail() { ); let failure_age_s = age_s(proposal.failed_timestamp_seconds); assert!( - failure_age_s < 30.0, + failure_age_s < EXPECTED_SNS_DAPP_CANISTER_UPGRADE_TIME_SECONDS, "failure_age_s: {}, proposal: {:?}", failure_age_s, proposal diff --git a/rs/sns/integration_tests/test_canisters/sns_governance_mem_test_canister.rs b/rs/sns/integration_tests/test_canisters/sns_governance_mem_test_canister.rs index bce4a43db4b..26f7306e4a9 100644 --- a/rs/sns/integration_tests/test_canisters/sns_governance_mem_test_canister.rs +++ b/rs/sns/integration_tests/test_canisters/sns_governance_mem_test_canister.rs @@ -1,3 +1,6 @@ +// TODO: Jira ticket NNS1-3556 +#![allow(static_mut_refs)] + //! This is a special-purpose canister to create a large Governance proto and //! serialize it to stable memory in a format that is compatible with the real //! governance canister. diff --git a/rs/sns/swap/canister/canister.rs b/rs/sns/swap/canister/canister.rs index 1e9b1484caf..4c047678fc6 100644 --- a/rs/sns/swap/canister/canister.rs +++ b/rs/sns/swap/canister/canister.rs @@ -1,3 +1,6 @@ +// TODO: Jira ticket NNS1-3556 +#![allow(static_mut_refs)] + use ic_base_types::{CanisterId, PrincipalId}; use ic_canister_log::log; use ic_canisters_http_types::{HttpRequest, HttpResponse, HttpResponseBuilder}; diff --git a/rs/sns/swap/src/swap.rs b/rs/sns/swap/src/swap.rs index e4f58e45e0c..324424faa2e 100644 --- a/rs/sns/swap/src/swap.rs +++ b/rs/sns/swap/src/swap.rs @@ -2535,7 +2535,7 @@ impl Swap { if request .subaccount .as_ref() - .map_or(false, |subaccount| subaccount.len() != 32) + .is_some_and(|subaccount| subaccount.len() != 32) { return NewSaleTicketResponse::err_invalid_subaccount(); } @@ -3768,7 +3768,7 @@ impl<'a> SwapDigest<'a> { } } -impl<'a> fmt::Debug for SwapDigest<'a> { +impl fmt::Debug for SwapDigest<'_> { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { let Swap { lifecycle, diff --git a/rs/sns/test_utils/src/icrc1.rs b/rs/sns/test_utils/src/icrc1.rs index 5076c3d8f5c..e50faeb55f4 100644 --- a/rs/sns/test_utils/src/icrc1.rs +++ b/rs/sns/test_utils/src/icrc1.rs @@ -9,15 +9,15 @@ use icrc_ledger_types::icrc1::{ }; use num_traits::ToPrimitive; -pub async fn balance_of<'a>(canister: &Canister<'a>, account: Account) -> Result { +pub async fn balance_of(canister: &Canister<'_>, account: Account) -> Result { canister .query_("icrc1_balance_of", candid_one, account) .await .map(|n: Nat| n.0.to_u64().unwrap()) } -pub async fn transfer<'a>( - canister: &Canister<'a>, +pub async fn transfer( + canister: &Canister<'_>, sender: &Sender, args: TransferArg, ) -> Result { diff --git a/rs/sns/test_utils/src/itest_helpers.rs b/rs/sns/test_utils/src/itest_helpers.rs index db114b9d9be..f21aee316ac 100644 --- a/rs/sns/test_utils/src/itest_helpers.rs +++ b/rs/sns/test_utils/src/itest_helpers.rs @@ -1395,10 +1395,7 @@ pub async fn set_up_governance_canister( } /// Compiles the ledger canister, builds it's initial payload and installs it -pub async fn install_ledger_canister<'runtime, 'a>( - canister: &mut Canister<'runtime>, - args: LedgerArgument, -) { +pub async fn install_ledger_canister(canister: &mut Canister<'_>, args: LedgerArgument) { install_rust_canister_with_memory_allocation( canister, "ic-icrc1-ledger", @@ -1417,10 +1414,7 @@ pub async fn set_up_ledger_canister(runtime: &'_ Runtime, args: LedgerInitArgs) } /// Compiles the ledger index canister, builds it's initial payload and installs it -pub async fn install_index_ng_canister<'runtime, 'a>( - canister: &mut Canister<'runtime>, - args: Option, -) { +pub async fn install_index_ng_canister(canister: &mut Canister<'_>, args: Option) { install_rust_canister_with_memory_allocation( canister, "ic-icrc1-index-ng", diff --git a/rs/starter/src/main.rs b/rs/starter/src/main.rs index c3a30475a92..970b03ae284 100644 --- a/rs/starter/src/main.rs +++ b/rs/starter/src/main.rs @@ -205,7 +205,7 @@ fn main() -> Result<()> { .arg("--config-file") .args([config_path.to_str().unwrap()]); info!(log, "Executing {:?}", cmd); - cmd.exec(); + let _ = cmd.exec(); Ok(()) } diff --git a/rs/state_layout/src/state_layout.rs b/rs/state_layout/src/state_layout.rs index 1261b0bb98f..a84b8814350 100644 --- a/rs/state_layout/src/state_layout.rs +++ b/rs/state_layout/src/state_layout.rs @@ -112,14 +112,14 @@ impl AccessPolicy for WriteOnly { impl WritePolicy for WriteOnly {} -impl<'a, T> AccessPolicy for RwPolicy<'a, T> { +impl AccessPolicy for RwPolicy<'_, T> { fn check_dir(p: &Path) -> Result<(), LayoutError> { WriteOnly::check_dir(p) } } -impl<'a, T> ReadPolicy for RwPolicy<'a, T> {} -impl<'a, T> WritePolicy for RwPolicy<'a, T> {} +impl ReadPolicy for RwPolicy<'_, T> {} +impl WritePolicy for RwPolicy<'_, T> {} pub type CompleteCheckpointLayout = CheckpointLayout; diff --git a/rs/state_machine_tests/src/lib.rs b/rs/state_machine_tests/src/lib.rs index 2001e69a856..89545047162 100644 --- a/rs/state_machine_tests/src/lib.rs +++ b/rs/state_machine_tests/src/lib.rs @@ -3591,7 +3591,7 @@ impl StateMachine { let canister_state = replicated_state .canister_state_mut(&canister_id) .unwrap_or_else(|| panic!("Canister {} does not exist", canister_id)); - let size = (data.len() + WASM_PAGE_SIZE_IN_BYTES - 1) / WASM_PAGE_SIZE_IN_BYTES; + let size = data.len().div_ceil(WASM_PAGE_SIZE_IN_BYTES); let memory = Memory::new(PageMap::from(data), NumWasmPages::new(size)); canister_state .execution_state diff --git a/rs/state_machine_tests/tests/tests.rs b/rs/state_machine_tests/tests/tests.rs index fadf407ef62..6501f493fda 100644 --- a/rs/state_machine_tests/tests/tests.rs +++ b/rs/state_machine_tests/tests/tests.rs @@ -40,6 +40,10 @@ fn test() { time.duration_since(SystemTime::UNIX_EPOCH).unwrap(), Duration::from_nanos(1_620_329_630_000_000_000) ); + + // Kill the task to avoid zombie process. + child.kill().unwrap(); + child.wait().unwrap(); } fn call_state_machine( diff --git a/rs/state_manager/src/manifest.rs b/rs/state_manager/src/manifest.rs index 4530c25e47b..bc53ef5786f 100644 --- a/rs/state_manager/src/manifest.rs +++ b/rs/state_manager/src/manifest.rs @@ -286,7 +286,7 @@ fn write_chunk_hash(hasher: &mut Sha256, chunk_info: &ChunkInfo, version: StateS /// Returns the number of chunks of size `max_chunk_size` required to cover a /// file of size `size_bytes`. fn count_chunks(size_bytes: u64, max_chunk_size: u32) -> usize { - (size_bytes as usize + max_chunk_size as usize - 1) / max_chunk_size as usize + (size_bytes as usize).div_ceil(max_chunk_size as usize) } /// Checks if the manifest was computed using specified max_chunk_size. diff --git a/rs/state_manager/src/manifest/tests/computation.rs b/rs/state_manager/src/manifest/tests/computation.rs index 53b7856a836..ab706b8c9fe 100644 --- a/rs/state_manager/src/manifest/tests/computation.rs +++ b/rs/state_manager/src/manifest/tests/computation.rs @@ -401,8 +401,7 @@ fn test_validate_sub_manifest() { let meta_manifest = build_meta_manifest(&manifest); let encoded_manifest = encode_manifest(&manifest); - let num = - (encoded_manifest.len() + DEFAULT_CHUNK_SIZE as usize - 1) / DEFAULT_CHUNK_SIZE as usize; + let num = encoded_manifest.len().div_ceil(DEFAULT_CHUNK_SIZE as usize); assert!( num > 1, "This test does not cover the case where the encoded manifest is divided into multiple pieces." diff --git a/rs/state_manager/src/tip.rs b/rs/state_manager/src/tip.rs index 0ae8a522a1b..96c6d114eb2 100644 --- a/rs/state_manager/src/tip.rs +++ b/rs/state_manager/src/tip.rs @@ -847,7 +847,7 @@ fn merge_to_base( merge_candidate.is_some() }); - return rewritten.iter().any(|b| *b); + rewritten.iter().any(|b| *b) } fn serialize_to_tip( diff --git a/rs/state_tool/src/commands/copy.rs b/rs/state_tool/src/commands/copy.rs index 9a8e2c25ec7..5adfda1592e 100644 --- a/rs/state_tool/src/commands/copy.rs +++ b/rs/state_tool/src/commands/copy.rs @@ -79,8 +79,10 @@ fn do_copy_with_state_layouts( } } - let src_metadata = load_metadata_proto(&src_layout.states_metadata()); - let mut dst_metadata = load_metadata_proto(&dst_layout.states_metadata()); + let src_metadata = load_metadata_proto(&src_layout.states_metadata()) + .map_err(|e| format!("Failed to read metadata: {}", e))?; + let mut dst_metadata = load_metadata_proto(&dst_layout.states_metadata()) + .map_err(|e| format!("Failed to read metadata: {}", e))?; for (src_height, dst_height) in heights { dst_layout @@ -94,7 +96,7 @@ fn do_copy_with_state_layouts( .join(StateLayout::checkpoint_name(dst_height)), None, ) - .map_err(|e| format!("Failed to import checkpoint: {}", e))?; + .map_err(|e| format!("Failed to copy checkpoint. Not all states might have been copied and some metadata might be missing: {}", e))?; if let Some(src_metadata_entry) = src_metadata.by_height.get(&src_height.get()) { dst_metadata @@ -103,22 +105,32 @@ fn do_copy_with_state_layouts( } } - let mut w = std::fs::File::create(dst_layout.states_metadata()).unwrap(); - let mut buf = vec![]; - dst_metadata.encode(&mut buf).unwrap(); - w.write_all(&buf[..]).unwrap(); + write_metadata_proto(&dst_layout.states_metadata(), &dst_metadata).map_err(|e| { + format!( + "Failed to write metadata. Metadata might be missing or corrupted in destination: {}", + e + ) + })?; Ok(()) } -fn load_metadata_proto(path: &Path) -> pb::StatesMetadata { +fn write_metadata_proto(path: &Path, metadata: &pb::StatesMetadata) -> Result<(), std::io::Error> { + let mut w = std::fs::File::create(path)?; + let mut buf = Vec::new(); + metadata.encode(&mut buf)?; + w.write_all(&buf[..])?; + Ok(()) +} + +fn load_metadata_proto(path: &Path) -> Result { if path.exists() { - let mut file = std::fs::File::open(path).unwrap(); + let mut file = std::fs::File::open(path)?; let mut buf = Vec::new(); - file.read_to_end(&mut buf).unwrap(); - pb::StatesMetadata::decode(&buf[..]).unwrap_or_default() + file.read_to_end(&mut buf)?; + Ok(pb::StatesMetadata::decode(&buf[..]).unwrap_or_default()) } else { - pb::StatesMetadata::default() + Ok(pb::StatesMetadata::default()) } } @@ -144,6 +156,7 @@ mod tests { assert!(dst_layout.checkpoint_heights().unwrap().is_empty()); assert!(load_metadata_proto(&dst_layout.states_metadata()) + .unwrap() .by_height .is_empty()); @@ -160,15 +173,16 @@ mod tests { ); assert_eq!( load_metadata_proto(&dst_layout.states_metadata()) + .unwrap() .by_height .len(), 1 ); - assert!( - load_metadata_proto(&dst_layout.states_metadata()).by_height[&1] - .manifest - .is_some() - ); + assert!(load_metadata_proto(&dst_layout.states_metadata()) + .unwrap() + .by_height[&1] + .manifest + .is_some()); env.checkpointed_tick(); env.checkpointed_tick(); @@ -207,6 +221,7 @@ mod tests { assert!(dst_layout.checkpoint_heights().unwrap().is_empty()); assert!(load_metadata_proto(&dst_layout.states_metadata()) + .unwrap() .by_height .is_empty()); @@ -226,20 +241,21 @@ mod tests { ); assert_eq!( load_metadata_proto(&dst_layout.states_metadata()) + .unwrap() .by_height .len(), 2 ); - assert!( - load_metadata_proto(&dst_layout.states_metadata()).by_height[&1] - .manifest - .is_some() - ); - assert!( - load_metadata_proto(&dst_layout.states_metadata()).by_height[&3] - .manifest - .is_some() - ); + assert!(load_metadata_proto(&dst_layout.states_metadata()) + .unwrap() + .by_height[&1] + .manifest + .is_some()); + assert!(load_metadata_proto(&dst_layout.states_metadata()) + .unwrap() + .by_height[&3] + .manifest + .is_some()); } #[test] @@ -258,6 +274,7 @@ mod tests { assert!(dst_layout.checkpoint_heights().unwrap().is_empty()); assert!(load_metadata_proto(&dst_layout.states_metadata()) + .unwrap() .by_height .is_empty()); @@ -274,14 +291,15 @@ mod tests { ); assert_eq!( load_metadata_proto(&dst_layout.states_metadata()) + .unwrap() .by_height .len(), 1 ); - assert!( - load_metadata_proto(&dst_layout.states_metadata()).by_height[&4] - .manifest - .is_some() - ); + assert!(load_metadata_proto(&dst_layout.states_metadata()) + .unwrap() + .by_height[&4] + .manifest + .is_some()); } } diff --git a/rs/state_tool/src/main.rs b/rs/state_tool/src/main.rs index 4542b97226d..0529654deea 100644 --- a/rs/state_tool/src/main.rs +++ b/rs/state_tool/src/main.rs @@ -28,6 +28,7 @@ enum Opt { }, /// Imports replicated state from an external location. + /// Deprecated: use `copy` instead. #[clap(name = "import")] ImportState { /// Path to the state to import. diff --git a/rs/test_utilities/execution_environment/src/lib.rs b/rs/test_utilities/execution_environment/src/lib.rs index 6cc0ced08f1..389af42f7dd 100644 --- a/rs/test_utilities/execution_environment/src/lib.rs +++ b/rs/test_utilities/execution_environment/src/lib.rs @@ -2115,6 +2115,13 @@ impl ExecutionTestBuilder { self } + pub fn with_max_wasm64_memory_size(mut self, wasm_memory_size: NumBytes) -> Self { + self.execution_config + .embedders_config + .max_wasm64_memory_size = wasm_memory_size; + self + } + pub fn with_metering_type(mut self, metering_type: MeteringType) -> Self { self.execution_config.embedders_config.metering_type = metering_type; self diff --git a/rs/tests/BUILD.bazel b/rs/tests/BUILD.bazel index 6a2a583cc3f..47c31dae65a 100644 --- a/rs/tests/BUILD.bazel +++ b/rs/tests/BUILD.bazel @@ -239,18 +239,6 @@ symlink_dir( }, ) -symlink_dir( - name = "backup/binaries", - testonly = True, - targets = { - "//rs/backup:ic-backup": "ic-backup", - "//rs/replay:ic-replay": "ic-replay", - "//rs/canister_sandbox:compiler_sandbox": "compiler_sandbox", - "//rs/canister_sandbox:sandbox_launcher": "sandbox_launcher", - "//rs/canister_sandbox:canister_sandbox": "canister_sandbox", - }, -) - symlink_dir_test( name = "cup_compatibility/binaries", targets = { diff --git a/rs/tests/boundary_nodes/api_bn_decentralization_test.rs b/rs/tests/boundary_nodes/api_bn_decentralization_test.rs index b5c0e7f6d28..4b98b9d8fdb 100644 --- a/rs/tests/boundary_nodes/api_bn_decentralization_test.rs +++ b/rs/tests/boundary_nodes/api_bn_decentralization_test.rs @@ -511,14 +511,10 @@ async fn assert_api_bns_present_in_state_tree( .sorted_by(|a, b| Ord::cmp(&a.domain, &b.domain)) .collect::>(); - let are_expected_bns = api_bns_sorted - .iter() - .enumerate() - .map(|(idx, bn)| { - bn.domain == expected_api_bns[idx].domain - && bn.ipv6_address == expected_api_bns[idx].ipv6_address - }) - .all(|is_match| is_match); + let are_expected_bns = api_bns_sorted.iter().enumerate().all(|(idx, bn)| { + bn.domain == expected_api_bns[idx].domain + && bn.ipv6_address == expected_api_bns[idx].ipv6_address + }); if !are_expected_bns { bail!("Expected API BNs haven't yet appeared in the state tree ..."); diff --git a/rs/tests/ckbtc/src/lib.rs b/rs/tests/ckbtc/src/lib.rs index 93746c04726..896e5920f6f 100644 --- a/rs/tests/ckbtc/src/lib.rs +++ b/rs/tests/ckbtc/src/lib.rs @@ -524,7 +524,7 @@ pub async fn install_bitcoin_canister_with_network( bitcoin_canister.canister_id() } -pub async fn install_icrc1_ledger<'a>(canister: &mut Canister<'a>, args: &LedgerArgument) { +pub async fn install_icrc1_ledger(canister: &mut Canister<'_>, args: &LedgerArgument) { install_rust_canister_from_path( canister, get_dependency_path(env::var("LEDGER_WASM_PATH").expect("LEDGER_WASM_PATH not set")), diff --git a/rs/tests/ckbtc/src/utils.rs b/rs/tests/ckbtc/src/utils.rs index 25b2674f871..9d5c1ed9f69 100644 --- a/rs/tests/ckbtc/src/utils.rs +++ b/rs/tests/ckbtc/src/utils.rs @@ -85,8 +85,8 @@ pub fn generate_blocks(btc_client: &Client, logger: &Logger, nb_blocks: u64, add /// Wait for the expected balance to be available at the given btc address. /// Timeout after SHORT_TIMEOUT if the expected balance is not reached. -pub async fn wait_for_bitcoin_balance<'a>( - canister: &UniversalCanister<'a>, +pub async fn wait_for_bitcoin_balance( + canister: &UniversalCanister<'_>, logger: &Logger, expected_balance_in_satoshis: u64, btc_address: &Address, @@ -107,7 +107,7 @@ pub async fn wait_for_bitcoin_balance<'a>( /// Wait for the expected balance to be available at the given account. /// Timeout after SHORT_TIMEOUT if the expected balance is not reached. -pub async fn wait_for_ledger_balance<'a>( +pub async fn wait_for_ledger_balance( ledger_agent: &Icrc1Agent, logger: &Logger, expected_balance: Nat, @@ -422,10 +422,7 @@ pub fn get_btc_client(env: &TestEnv) -> Client { .unwrap() } -pub async fn get_bitcoin_balance<'a>( - canister: &UniversalCanister<'a>, - btc_address: &Address, -) -> u64 { +pub async fn get_bitcoin_balance(canister: &UniversalCanister<'_>, btc_address: &Address) -> u64 { canister .update(wasm().call(management::bitcoin_get_balance( btc_address.to_string(), diff --git a/rs/tests/consensus/backup/BUILD.bazel b/rs/tests/consensus/backup/BUILD.bazel index 31b6987208e..53720a8d63b 100644 --- a/rs/tests/consensus/backup/BUILD.bazel +++ b/rs/tests/consensus/backup/BUILD.bazel @@ -29,10 +29,20 @@ rust_library( BACKUP_RUNTIME_DEPS = UNIVERSAL_CANISTER_RUNTIME_DEPS + [ # Keep sorted. - "//rs/tests:backup/binaries", + "//rs/backup:ic-backup", + "//rs/canister_sandbox:canister_sandbox", + "//rs/canister_sandbox:compiler_sandbox", + "//rs/canister_sandbox:sandbox_launcher", + "//rs/replay:ic-replay", ] -BACKUP_ENV = UNIVERSAL_CANISTER_ENV +BACKUP_ENV = UNIVERSAL_CANISTER_ENV | { + "IC_BACKUP_PATH": "$(rootpath //rs/backup:ic-backup)", + "IC_REPLAY_PATH": "$(rootpath //rs/replay:ic-replay)", + "COMPILER_SANDBOX_PATH": "$(rootpath //rs/canister_sandbox:compiler_sandbox)", + "SANDBOX_LAUNCHER_PATH": "$(rootpath //rs/canister_sandbox:sandbox_launcher)", + "CANISTER_SANDBOX_PATH": "$(rootpath //rs/canister_sandbox:canister_sandbox)", +} system_test_nns( name = "backup_manager_downgrade_test", @@ -44,6 +54,7 @@ system_test_nns( "long_test", # since it takes longer than 5 minutes. ], target_compatible_with = ["@platforms//os:linux"], # requires libssh that does not build on Mac OS + test_timeout = "eternal", # this test often times out with the default 15 minute timeout so we allow more time uses_guestos_dev_test = True, runtime_deps = GUESTOS_RUNTIME_DEPS + @@ -68,6 +79,7 @@ system_test_nns( "long_test", # since it takes longer than 5 minutes. ], target_compatible_with = ["@platforms//os:linux"], # requires libssh that does not build on Mac OS + test_timeout = "eternal", # this test often times out with the default 15 minute timeout so we allow more time uses_guestos_dev_test = True, runtime_deps = GUESTOS_RUNTIME_DEPS + diff --git a/rs/tests/consensus/backup/backup_manager_downgrade_test.rs b/rs/tests/consensus/backup/backup_manager_downgrade_test.rs index 7cabb0ee90c..9af6ee77fbd 100644 --- a/rs/tests/consensus/backup/backup_manager_downgrade_test.rs +++ b/rs/tests/consensus/backup/backup_manager_downgrade_test.rs @@ -8,7 +8,7 @@ use std::time::Duration; fn main() -> Result<()> { SystemTestGroup::new() .with_setup(setup_downgrade) - .with_timeout_per_test(Duration::from_secs(15 * 60)) + .with_timeout_per_test(Duration::from_secs(25 * 60)) .add_test(systest!(test_downgrade)) .execute_from_args()?; diff --git a/rs/tests/consensus/backup/backup_manager_upgrade_test.rs b/rs/tests/consensus/backup/backup_manager_upgrade_test.rs index d2b440138a8..bde1b83dbb5 100644 --- a/rs/tests/consensus/backup/backup_manager_upgrade_test.rs +++ b/rs/tests/consensus/backup/backup_manager_upgrade_test.rs @@ -8,7 +8,7 @@ use std::time::Duration; fn main() -> Result<()> { SystemTestGroup::new() .with_setup(setup_upgrade) - .with_timeout_per_test(Duration::from_secs(15 * 60)) + .with_timeout_per_test(Duration::from_secs(25 * 60)) .add_test(systest!(test_upgrade)) .execute_from_args()?; diff --git a/rs/tests/consensus/backup/common.rs b/rs/tests/consensus/backup/common.rs index 79192665cda..5ad31158aef 100644 --- a/rs/tests/consensus/backup/common.rs +++ b/rs/tests/consensus/backup/common.rs @@ -181,12 +181,32 @@ fn test(env: TestEnv, binary_version: String, target_version: String) { fs::create_dir_all(&backup_binaries_dir).expect("failure creating backup binaries directory"); // Copy all the binaries needed for the replay of the current version in order to avoid downloading them - let testing_dir = get_dependency_path("rs/tests"); - let binaries_path = testing_dir.join("backup/binaries"); - copy_file(&binaries_path, &backup_binaries_dir, "ic-replay"); - copy_file(&binaries_path, &backup_binaries_dir, "sandbox_launcher"); - copy_file(&binaries_path, &backup_binaries_dir, "canister_sandbox"); - copy_file(&binaries_path, &backup_binaries_dir, "compiler_sandbox"); + copy_file( + &get_dependency_path(std::env::var("IC_REPLAY_PATH").expect("IC_REPLAY_PATH not set")), + &backup_binaries_dir, + "ic-replay", + ); + copy_file( + &get_dependency_path( + std::env::var("SANDBOX_LAUNCHER_PATH").expect("SANDBOX_LAUNCHER_PATH not set"), + ), + &backup_binaries_dir, + "sandbox_launcher", + ); + copy_file( + &get_dependency_path( + std::env::var("CANISTER_SANDBOX_PATH").expect("CANISTER_SANDBOX_PATH not set"), + ), + &backup_binaries_dir, + "canister_sandbox", + ); + copy_file( + &get_dependency_path( + std::env::var("COMPILER_SANDBOX_PATH").expect("COMPILER_SANDBOX_PATH not set"), + ), + &backup_binaries_dir, + "compiler_sandbox", + ); // Generate keypair and store the private key info!(log, "Create backup user credentials"); @@ -282,8 +302,9 @@ fn test(env: TestEnv, binary_version: String, target_version: String) { write!(f, "{}", config_str).expect("Should be able to write the config file"); info!(log, "Start the backup process in a separate thread"); - let ic_backup_path = binaries_path.join("ic-backup"); - let mut command = Command::new(&ic_backup_path); + let ic_backup_path = + &get_dependency_path(std::env::var("IC_BACKUP_PATH").expect("IC_BACKUP_PATH not set")); + let mut command = Command::new(ic_backup_path); command .arg("--config-file") .arg(&config_file) @@ -362,6 +383,7 @@ fn test(env: TestEnv, binary_version: String, target_version: String) { "Restart and wait for cold storage and divergence to happen" ); child.kill().expect("Error killing backup process"); + child.wait().expect("Error waiting for backup process"); let checkpoint = some_checkpoint_dir(&backup_dir, &subnet_id).expect("Checkpoint doesn't exist"); @@ -379,7 +401,7 @@ fn test(env: TestEnv, binary_version: String, target_version: String) { modify_byte_in_file(memory_artifact_path).expect("Modifying a byte failed"); info!(log, "Start again the backup process in a separate thread"); - let mut command = Command::new(&ic_backup_path); + let mut command = Command::new(ic_backup_path); command .arg("--config-file") .arg(&config_file) @@ -432,6 +454,7 @@ fn test(env: TestEnv, binary_version: String, target_version: String) { info!(log, "Kill child process"); child.kill().expect("Error killing backup process"); + child.wait().expect("Error waiting for backup process"); assert!(hash_mismatch); info!(log, "There was a divergence of the state"); @@ -498,12 +521,8 @@ fn dir_exists_and_have_file(log: &Logger, dir: &PathBuf) -> bool { have_file } -fn copy_file(binaries_path: &Path, backup_binaries_dir: &Path, file_name: &str) { - fs::copy( - binaries_path.join(file_name), - backup_binaries_dir.join(file_name), - ) - .expect("failed to copy file"); +fn copy_file(binary_path: &Path, backup_binaries_dir: &Path, file_name: &str) { + fs::copy(binary_path, backup_binaries_dir.join(file_name)).expect("failed to copy file"); } fn highest_dir_entry(dir: &PathBuf, radix: u32) -> u64 { diff --git a/rs/tests/consensus/catch_up_test_common/src/lib.rs b/rs/tests/consensus/catch_up_test_common/src/lib.rs index 8470db27b08..55e0dfb101f 100644 --- a/rs/tests/consensus/catch_up_test_common/src/lib.rs +++ b/rs/tests/consensus/catch_up_test_common/src/lib.rs @@ -1,4 +1,4 @@ -/// Common test function for a couple of catch up tests. +//! Common test function for a couple of catch up tests. const DKG_INTERVAL: u64 = 150; const CATCH_UP_RETRIES: u64 = 40; diff --git a/rs/tests/consensus/orchestrator/rotate_ecdsa_idkg_key_test.rs b/rs/tests/consensus/orchestrator/rotate_ecdsa_idkg_key_test.rs index 69bf064dea9..86a5eb0aa8c 100644 --- a/rs/tests/consensus/orchestrator/rotate_ecdsa_idkg_key_test.rs +++ b/rs/tests/consensus/orchestrator/rotate_ecdsa_idkg_key_test.rs @@ -242,7 +242,7 @@ fn test(env: TestEnv) { assert!(last_rotation .duration_since(first_rotation) - .map_or(false, |d| d + gamma <= delta)); + .is_ok_and(|d| d + gamma <= delta)); // Ensure signing still works for (key_id, public_key) in public_keys { diff --git a/rs/tests/consensus/orchestrator/unstuck_subnet_test.rs b/rs/tests/consensus/orchestrator/unstuck_subnet_test.rs index 97cb79cd63b..6759722b0c9 100644 --- a/rs/tests/consensus/orchestrator/unstuck_subnet_test.rs +++ b/rs/tests/consensus/orchestrator/unstuck_subnet_test.rs @@ -205,7 +205,7 @@ fn test(test_env: TestEnv) { fn have_sha_errors(session: &Session) -> bool { let cmd = "journalctl | grep -c 'FileHashMismatchError'".to_string(); - execute_bash_command(session, cmd).map_or(false, |res| res.trim().parse::().unwrap() > 0) + execute_bash_command(session, cmd).is_ok_and(|res| res.trim().parse::().unwrap() > 0) } fn main() -> Result<()> { diff --git a/rs/tests/consensus/subnet_recovery/common.rs b/rs/tests/consensus/subnet_recovery/common.rs index 4106012edd8..4d824815b05 100644 --- a/rs/tests/consensus/subnet_recovery/common.rs +++ b/rs/tests/consensus/subnet_recovery/common.rs @@ -573,7 +573,7 @@ fn halt_subnet( message.cursor ), ); - if res.map_or(false, |r| r.trim().parse::().unwrap() > 0) { + if res.is_ok_and(|r| r.trim().parse::().unwrap() > 0) { Ok(()) } else { bail!("Did not find log entry that consensus is halted.") @@ -660,7 +660,7 @@ fn corrupt_latest_cup(subnet: &SubnetSnapshot, recovery: &Recovery, logger: &Log message.cursor ), ); - if res.map_or(false, |r| r.trim().parse::().unwrap() > 0) { + if res.is_ok_and( |r| r.trim().parse::().unwrap() > 0) { Ok(()) } else { bail!("Did not find log entry that cup is corrupted.") diff --git a/rs/tests/cross_chain/BUILD.bazel b/rs/tests/cross_chain/BUILD.bazel index 77c73ae7fb8..7aa0b3ee321 100644 --- a/rs/tests/cross_chain/BUILD.bazel +++ b/rs/tests/cross_chain/BUILD.bazel @@ -53,6 +53,7 @@ system_test_nns( "LEDGER_WASM_PATH": "$(rootpath //rs/ledger_suite/icrc1/ledger:ledger_canister_u256.wasm.gz)", "LEDGER_SUITE_ORCHESTRATOR_WASM_PATH": "$(rootpath //rs/ethereum/ledger-suite-orchestrator:ledger_suite_orchestrator_canister.wasm.gz)", }, + tags = ["long_test"], target_compatible_with = ["@platforms//os:linux"], # requires libssh that does not build on Mac OS runtime_deps = BOUNDARY_NODE_GUESTOS_RUNTIME_DEPS + diff --git a/rs/tests/cross_chain/ic_xc_cketh_test.rs b/rs/tests/cross_chain/ic_xc_cketh_test.rs index 9c03cb331f9..4f0d3ee2436 100644 --- a/rs/tests/cross_chain/ic_xc_cketh_test.rs +++ b/rs/tests/cross_chain/ic_xc_cketh_test.rs @@ -871,7 +871,7 @@ struct LedgerCanister<'a> { canister: Canister<'a>, } -impl<'a> LedgerCanister<'a> { +impl LedgerCanister<'_> { fn principal(&self) -> Principal { self.canister.canister_id().get().0 } @@ -881,7 +881,7 @@ struct CkEthMinterCanister<'a> { canister: Canister<'a>, } -impl<'a> CkEthMinterCanister<'a> { +impl CkEthMinterCanister<'_> { async fn minter_address(&self) -> String { self.canister .update_("minter_address", candid, ()) @@ -919,7 +919,7 @@ struct LedgerSuiteOrchestratorCanister<'a> { canister: Canister<'a>, } -impl<'a> LedgerSuiteOrchestratorCanister<'a> { +impl LedgerSuiteOrchestratorCanister<'_> { async fn upgrade(&mut self, arg: LedgerSuiteOrchestratorUpgradeArg) { self.canister .upgrade_to_self_binary(Encode!(&OrchestratorArg::UpgradeArg(arg)).unwrap()) diff --git a/rs/tests/cross_chain/ic_xc_ledger_suite_orchestrator_test.rs b/rs/tests/cross_chain/ic_xc_ledger_suite_orchestrator_test.rs index 43b52e7de9c..29031a79058 100644 --- a/rs/tests/cross_chain/ic_xc_ledger_suite_orchestrator_test.rs +++ b/rs/tests/cross_chain/ic_xc_ledger_suite_orchestrator_test.rs @@ -251,12 +251,12 @@ async fn install_nns_controlled_canister<'a>( canister } -async fn upgrade_ledger_suite_orchestrator_by_nns_proposal<'a>( +async fn upgrade_ledger_suite_orchestrator_by_nns_proposal( logger: &slog::Logger, governance_canister: &Canister<'_>, root_canister: &Canister<'_>, canister_wasm: CanisterModule, - orchestrator: &LedgerOrchestratorCanister<'a>, + orchestrator: &LedgerOrchestratorCanister<'_>, upgrade_arg: OrchestratorArg, ) { use ic_canister_client::Sender; @@ -431,7 +431,7 @@ struct LedgerOrchestratorCanister<'a> { canister: Canister<'a>, } -impl<'a> LedgerOrchestratorCanister<'a> { +impl LedgerOrchestratorCanister<'_> { async fn call_canister_ids(&self, contract: Erc20Contract) -> Option { self.canister .query_("canister_ids", dfn_candid::candid, (contract,)) diff --git a/rs/tests/crypto/canister_sig_verification_cache_test.rs b/rs/tests/crypto/canister_sig_verification_cache_test.rs index e925fac44b6..1c960a0b0cc 100644 --- a/rs/tests/crypto/canister_sig_verification_cache_test.rs +++ b/rs/tests/crypto/canister_sig_verification_cache_test.rs @@ -58,9 +58,9 @@ const RETRY_DELAY: Duration = Duration::from_secs(1); const NUM_RETRIES: usize = 100; /// Range for the random initialization of the number of users in this test -const NUM_USERS_RANGE: RangeInclusive = 5..=15; +const NUM_USERS_RANGE: RangeInclusive = 5..=10; /// Range for the random initialization of the number of calls per user in this test -const NUM_CALLS_PER_USER_RANGE: RangeInclusive = 5..=15; +const NUM_CALLS_PER_USER_RANGE: RangeInclusive = 5..=10; fn main() -> Result<()> { SystemTestGroup::new() diff --git a/rs/tests/driver/src/driver/bootstrap.rs b/rs/tests/driver/src/driver/bootstrap.rs index cda693871e4..e0ce073c985 100644 --- a/rs/tests/driver/src/driver/bootstrap.rs +++ b/rs/tests/driver/src/driver/bootstrap.rs @@ -17,8 +17,6 @@ use crate::driver::{ }, test_setup::InfraProvider, }; -use crate::k8s::datavolume::DataVolumeContentType; -use crate::k8s::images::*; use crate::k8s::tnet::{TNet, TNode}; use crate::util::block_on; use anyhow::{bail, Result}; @@ -286,24 +284,12 @@ pub fn setup_and_start_vms( let conf_img_path = PathBuf::from(&node.node_path).join(CONF_IMG_FNAME); match InfraProvider::read_attribute(&t_env) { InfraProvider::K8s => { - let url = format!( - "{}/{}", - tnet_node.config_url.clone().expect("missing config_url"), - CONF_IMG_FNAME - ); - info!( - t_env.logger(), - "Uploading image {} to {}", - conf_img_path.clone().display().to_string(), - url.clone() - ); - block_on(upload_image(conf_img_path.as_path(), &url)) - .expect("Failed to upload config image"); - block_on(tnet_node.deploy_config_image( - CONF_IMG_FNAME, - "config", - DataVolumeContentType::Kubevirt, - )) + block_on( + tnet_node.build_oci_config_image( + &conf_img_path, + &tnet_node.name.clone().unwrap(), + ), + ) .expect("deploying config image failed"); block_on(tnet_node.start()).expect("starting vm failed"); } diff --git a/rs/tests/driver/src/driver/boundary_node.rs b/rs/tests/driver/src/driver/boundary_node.rs index 9c639dae589..681e75c62d5 100644 --- a/rs/tests/driver/src/driver/boundary_node.rs +++ b/rs/tests/driver/src/driver/boundary_node.rs @@ -27,8 +27,6 @@ use crate::{ }, test_setup::{GroupSetup, InfraProvider}, }, - k8s::datavolume::DataVolumeContentType, - k8s::images::upload_image, k8s::tnet::TNet, retry_with_msg, util::{block_on, create_agent, create_agent_mapping}, @@ -345,19 +343,10 @@ impl BoundaryNodeWithVm { } else { let tnet = TNet::read_attribute(env); let tnet_node = tnet.nodes.last().expect("no nodes"); - block_on(upload_image( - compressed_img_path, - &format!( - "{}/{}", - tnet_node.config_url.clone().expect("missing config url"), - &mk_compressed_img_path() - ), - ))?; - block_on(tnet_node.deploy_config_image( - &mk_compressed_img_path(), - "config", - DataVolumeContentType::Kubevirt, - )) + block_on( + tnet_node + .build_oci_config_image(&compressed_img_path, &tnet_node.name.clone().unwrap()), + ) .expect("deploying config image failed"); block_on(tnet_node.start()).expect("starting vm failed"); } diff --git a/rs/tests/driver/src/driver/simulate_network.rs b/rs/tests/driver/src/driver/simulate_network.rs index 006961bc3af..2947e35578f 100644 --- a/rs/tests/driver/src/driver/simulate_network.rs +++ b/rs/tests/driver/src/driver/simulate_network.rs @@ -192,7 +192,7 @@ impl ProductionSubnetTopology { /// Query: /// https://victoria.ch1-obs1.dfinity.network/select/0/vmui/#/?g0.expr=sum+by+%28ic_node%2Cpeer%29+%28quic_transport_quinn_path_rtt_seconds%7Bic_subnet%3D%22uzr34-akd3s-xrdag-3ql62-ocgoh-ld2ao-tamcv-54e7j-krwgb-2gm4z-oqe%22%7D%29&g0.range_input=21h24m51s870ms&g0.end_input=2024-08-20T08%3A22%3A07&g0.relative_time=none&g0.tenantID=0 -pub const UZR_34_RTT: [(u64, u64, f64); 756] = [ +pub static UZR_34_RTT: [(u64, u64, f64); 756] = [ (1, 2, 0.15598), (1, 3, 0.37478), (1, 4, 0.13583), @@ -952,7 +952,7 @@ pub const UZR_34_RTT: [(u64, u64, f64); 756] = [ ]; /// Query: /// https://victoria.ch1-obs1.dfinity.network/select/0/vmui/#/?g0.expr=sum+by+%28ic_node%2Cpeer%29+%28%0A++rate%28quic_transport_quinn_path_lost_packets%7Bic_subnet%3D%22uzr34-akd3s-xrdag-3ql62-ocgoh-ld2ao-tamcv-54e7j-krwgb-2gm4z-oqe%22%7D%5B7d%5D%29+%2F%0A++rate%28quic_transport_quinn_path_sent_packets%7Bic_subnet%3D%22uzr34-akd3s-xrdag-3ql62-ocgoh-ld2ao-tamcv-54e7j-krwgb-2gm4z-oqe%22%7D%5B7d%5D%29%0A%29&g0.range_input=13d14h36m59s549ms&g0.end_input=2024-06-23T20%3A59%3A19&g0.tab=1&g0.relative_time=none&g0.tenantID=0 -pub const UZR_34_PACKET_LOSS: [(u64, u64, f64); 756] = [ +pub static UZR_34_PACKET_LOSS: [(u64, u64, f64); 756] = [ (1, 2, 0.0015710290841178466), (1, 3, 0.000604650481546998), (1, 4, 0.0014703598418393934), diff --git a/rs/tests/driver/src/k8s/tnet.rs b/rs/tests/driver/src/k8s/tnet.rs index 7c6b1c22a20..39695b93ace 100644 --- a/rs/tests/driver/src/k8s/tnet.rs +++ b/rs/tests/driver/src/k8s/tnet.rs @@ -3,10 +3,12 @@ use regex::Regex; use slog::Logger; use std::collections::BTreeMap; use std::net::{Ipv4Addr, Ipv6Addr}; +use std::path::Path; +use std::process::Command; use std::str::FromStr; use url::Url; -use anyhow::Result; +use anyhow::{bail, Result}; use backon::Retryable; use backon::{ConstantBuilder, ExponentialBuilder}; use k8s_openapi::api::core::v1::{ @@ -115,6 +117,42 @@ impl TNode { } } + pub async fn build_oci_config_image(&self, file_path: &Path, tag: &str) -> Result<()> { + // https://kubevirt.io/user-guide/storage/disks_and_volumes/#containerdisk + // build ctr disk that holds config fat disk for guestos & push it to local ctr registry + // uncompress zst disk (the case with boundary node image) + let command = format!( + "set -xe; \ + mkdir -p /var/sysimage/tnet; \ + if echo {0} | grep -q '.zst'; then \ + uncompressed_file=$(echo {0} | sed 's/.zst$//'); \ + rm -f $uncompressed_file; \ + unzstd -o $uncompressed_file {0}; \ + file_to_copy=$uncompressed_file; \ + else \ + file_to_copy={0}; \ + fi; \ + ctr=$(sudo buildah --root /var/sysimage/tnet from scratch); \ + sudo buildah --root /var/sysimage/tnet copy --chown=107:107 $ctr $file_to_copy /disk/; \ + sudo buildah --root /var/sysimage/tnet commit $ctr harbor-core.harbor.svc.cluster.local/tnet/config:{1}; \ + sudo buildah --root /var/sysimage/tnet push --tls-verify=false --creds 'robot$tnet+tnet:TestingPOC1' harbor-core.harbor.svc.cluster.local/tnet/config:{1}", + file_path.display(), tag + ); + let output = Command::new("bash") + .arg("-c") + .arg(command) + .output() + .expect("Failed to execute command"); + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + bail!( + "Error building and pushing config container config image: {}", + stderr + ); + } + Ok(()) + } + pub async fn deploy_config_image( &self, image_name: &str, diff --git a/rs/tests/driver/src/k8s/virtualmachine.rs b/rs/tests/driver/src/k8s/virtualmachine.rs index 78e4d31cfb7..01754f15d9d 100644 --- a/rs/tests/driver/src/k8s/virtualmachine.rs +++ b/rs/tests/driver/src/k8s/virtualmachine.rs @@ -31,6 +31,7 @@ spec: domain: cpu: cores: {cpus} + model: host-passthrough firmware: bootloader: efi: @@ -47,23 +48,6 @@ spec: - name: default binding: name: passt - ports: - - port: 22 - - port: 8100 - - port: 8101 - - port: 8102 - - port: 8103 - - port: 8104 - - port: 8105 - - port: 8106 - - port: 8107 - - port: 8108 - - port: 8109 - - port: 8110 - - port: 8111 - - port: 8332 - - port: 18444 - - port: 20443 resources: overcommitGuestOverhead: true requests: @@ -133,6 +117,7 @@ spec: domain: cpu: cores: {cpus} + model: host-passthrough firmware: bootloader: efi: @@ -150,23 +135,6 @@ spec: - name: default binding: name: passt - ports: - - port: 22 - - port: 80 - - port: 443 - - port: 2497 - - port: 4100 - protocol: UDP - - port: 4444 - - port: 7070 - - port: 8080 - - port: 8332 - - port: 9090 - - port: 9091 - - port: 9100 - - port: 18444 - - port: 19100 - - port: 19531 resources: overcommitGuestOverhead: true requests: @@ -180,8 +148,8 @@ spec: - dataVolume: name: "{name}-guestos" name: disk0 - - dataVolume: - name: "{name}-config" + - containerDisk: + image: "harbor.ln1-idx1.dfinity.network/tnet/config:{name}" name: disk1 "#; diff --git a/rs/tests/driver/src/util/delegations.rs b/rs/tests/driver/src/util/delegations.rs index a8a779a5552..e6e0e003959 100644 --- a/rs/tests/driver/src/util/delegations.rs +++ b/rs/tests/driver/src/util/delegations.rs @@ -124,7 +124,7 @@ pub struct AgentWithDelegation<'a> { pub polling_timeout: Duration, } -impl<'a> AgentWithDelegation<'a> { +impl AgentWithDelegation<'_> { async fn send_http_request( &self, method: &str, diff --git a/rs/tests/financial_integrations/icrc1_agent_test.rs b/rs/tests/financial_integrations/icrc1_agent_test.rs index 8eda3c764e4..7dc06bb5aaf 100644 --- a/rs/tests/financial_integrations/icrc1_agent_test.rs +++ b/rs/tests/financial_integrations/icrc1_agent_test.rs @@ -400,7 +400,7 @@ fn mleaf>(blob: B) -> MixedHashTree { MixedHashTree::Leaf(blob.as_ref().to_vec()) } -pub async fn install_icrc1_ledger<'a>(canister: &mut Canister<'a>, args: &LedgerArgument) { +pub async fn install_icrc1_ledger(canister: &mut Canister<'_>, args: &LedgerArgument) { install_rust_canister_from_path( canister, get_dependency_path(env::var("LEDGER_WASM_PATH").expect("LEDGER_WASM_PATH not set")), diff --git a/rs/tests/nns/sns/lib/src/sns_aggregator.rs b/rs/tests/nns/sns/lib/src/sns_aggregator.rs index 8e1e1bd5397..bafb8e76c3b 100644 --- a/rs/tests/nns/sns/lib/src/sns_aggregator.rs +++ b/rs/tests/nns/sns/lib/src/sns_aggregator.rs @@ -143,9 +143,9 @@ impl AggregatorClient { format!("/{AGGREGATOR_CANISTER_VERSION}{AGGREGATOR_CANISTER_PATH}") } - async fn http_get_request<'agent>( + async fn http_get_request( log: &Logger, - canister: &HttpRequestCanister<'agent>, + canister: &HttpRequestCanister<'_>, relative_url: String, ) -> Result { let (response,) = canister @@ -171,9 +171,9 @@ impl AggregatorClient { } } - pub async fn http_get_favicon<'agent>( + pub async fn http_get_favicon( log: &Logger, - canister: &HttpRequestCanister<'agent>, + canister: &HttpRequestCanister<'_>, ) -> Result> { let url = "/favicon.ico".to_string(); Self::http_get_request(log, canister, url) @@ -181,9 +181,9 @@ impl AggregatorClient { .map(|res| res.body) } - pub async fn http_get_asset<'agent>( + pub async fn http_get_asset( log: &Logger, - canister: &HttpRequestCanister<'agent>, + canister: &HttpRequestCanister<'_>, ) -> Result> { let url = Self::aggregator_http_endpoint(); Self::http_get_request(log, canister, url) @@ -191,9 +191,9 @@ impl AggregatorClient { .map(|res| res.body) } - pub async fn extract_first_sns_sale_config<'agent>( + pub async fn extract_first_sns_sale_config( log: &Logger, - canister: &HttpRequestCanister<'agent>, + canister: &HttpRequestCanister<'_>, ) -> Result { let asset_bytes = Self::http_get_asset(log, canister).await.unwrap(); let asset: Value = serde_json::from_slice(asset_bytes.as_slice())?; @@ -219,9 +219,9 @@ impl AggregatorClient { } } - async fn sub_asset<'agent, P>( + async fn sub_asset

( log: &Logger, - canister: &HttpRequestCanister<'agent>, + canister: &HttpRequestCanister<'_>, extract_sub_asset: &P, timeout: Duration, ) -> RequestOutcome @@ -263,9 +263,9 @@ impl AggregatorClient { ) } - pub async fn first_sns_asset<'agent>( + pub async fn first_sns_asset( log: &Logger, - canister: &HttpRequestCanister<'agent>, + canister: &HttpRequestCanister<'_>, timeout: Duration, ) -> RequestOutcome { let extract_sub_asset = move |asset| { @@ -275,9 +275,9 @@ impl AggregatorClient { Self::sub_asset(log, canister, &extract_sub_asset, timeout).await } - pub async fn first_swap_params<'agent>( + pub async fn first_swap_params( log: &Logger, - canister: &HttpRequestCanister<'agent>, + canister: &HttpRequestCanister<'_>, timeout: Duration, ) -> RequestOutcome { let extract_sub_asset = move |asset| { diff --git a/rs/tests/system_tests.bzl b/rs/tests/system_tests.bzl index e5a9806b866..d37e6fe7c0c 100644 --- a/rs/tests/system_tests.bzl +++ b/rs/tests/system_tests.bzl @@ -276,6 +276,12 @@ def system_test( if uses_boundary_guestos: icos_images["ENV_DEPS__BOUNDARY_GUESTOS_DISK_IMG_TAR_ZST_CAS_URL"] = "//ic-os/boundary-guestos/envs/dev:disk-img.tar.zst" + # set "local" tag for k8s system tests due to rootful container image builds + is_k8s = select({ + "//rs/tests:k8s": True, + "//conditions:default": False, + }) + run_system_test( name = name, src = test_driver_target, @@ -284,7 +290,7 @@ def system_test( env = env, icos_images = icos_images, env_inherit = env_inherit, - tags = tags + ["requires-network", "system_test"] + + tags = tags + ["requires-network", "system_test"] + (["local"] if is_k8s else []) + (["manual"] if "experimental_system_test_colocation" in tags else []), target_compatible_with = ["@platforms//os:linux"], timeout = test_timeout, diff --git a/rs/tla_instrumentation/local_key/src/lib.rs b/rs/tla_instrumentation/local_key/src/lib.rs index 803bb734775..c63de2a48c7 100644 --- a/rs/tla_instrumentation/local_key/src/lib.rs +++ b/rs/tla_instrumentation/local_key/src/lib.rs @@ -185,7 +185,7 @@ impl LocalKey { slot: &'a mut Option, } - impl<'a, T: 'static> Drop for Guard<'a, T> { + impl Drop for Guard<'_, T> { fn drop(&mut self) { // This should not panic. // @@ -414,7 +414,7 @@ where struct TransparentOption<'a, T> { value: &'a Option, } - impl<'a, T: fmt::Debug> fmt::Debug for TransparentOption<'a, T> { + impl fmt::Debug for TransparentOption<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.value.as_ref() { Some(value) => value.fmt(f), diff --git a/rs/tree_deserializer/src/types.rs b/rs/tree_deserializer/src/types.rs index bcd8b5a1c69..80dbd207a10 100644 --- a/rs/tree_deserializer/src/types.rs +++ b/rs/tree_deserializer/src/types.rs @@ -52,7 +52,7 @@ impl<'de> serde::Deserialize<'de> for Leb128EncodedU64 { { struct LebU64Visitor; - impl<'de> serde::de::Visitor<'de> for LebU64Visitor { + impl serde::de::Visitor<'_> for LebU64Visitor { type Value = Leb128EncodedU64; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/rs/types/management_canister_types/BUILD.bazel b/rs/types/management_canister_types/BUILD.bazel index a415f88fecc..39e3c90bd16 100644 --- a/rs/types/management_canister_types/BUILD.bazel +++ b/rs/types/management_canister_types/BUILD.bazel @@ -1,7 +1,63 @@ load("@rules_rust//rust:defs.bzl", "rust_library", "rust_test") load("//bazel:fuzz_testing.bzl", "DEFAULT_RUSTC_FLAGS_FOR_FUZZING") -package(default_visibility = ["//visibility:public"]) +# This library is private to the execution environment. +# These crates are considered direct users of the management +# types and may always depend on this crate directly. +permanent_whitelist = [ + "//rs/execution_environment:__subpackages__", + "//rs/cycles_account_manager:__subpackages__", + "//rs/messaging:__subpackages__", + "//rs/management_canister_types/fuzz:__subpackages__", +] + +# These crates depend on this library directly for historical reasons +# and must move to the published version of this library on crates.io. +temporary_whitelist = [ + "//rs/artifact_pool:__subpackages__", + "//rs/bitcoin:__subpackages__", + "//rs/canister_client:__subpackages__", + "//rs/canister_sandbox:__subpackages__", # permanent? + "//rs/canonical_state:__subpackages__", # permanent? + "//rs/consensus:__subpackages__", + "//rs/crypto:__subpackages__", + "//rs/determinism_test:__subpackages__", # permanent? + "//rs/drun:__subpackages__", + "//rs/ethereum:__subpackages__", + "//rs/https_outcalls:__subpackages__", # permanent? + "//rs/ingress_manager:__subpackages__", # permanent? + "//rs/interfaces:__subpackages__", + "//rs/ledger_suite:__subpackages__", + "//rs/nervous_system:__subpackages__", + "//rs/nns/cmc:__subpackages__", + "//rs/nns/governance:__subpackages__", + "//rs/nns/handlers:__subpackages__", + "//rs/nns/integration_tests:__subpackages__", + "//rs/nns/sns-wasm:__subpackages__", + "//rs/nns/test_utils:__subpackages__", + "//rs/orchestrator:__subpackages__", + "//rs/pocket_ic_server:__subpackages__", + "//rs/prep:__subpackages__", + "//rs/recovery:__subpackages__", + "//rs/registry:__subpackages__", + "//rs/replica:__subpackages__", + "//rs/replica_tests:__subpackages__", + "//rs/replicated_state:__subpackages__", # permanent? + "//rs/rust_canisters:__subpackages__", + "//rs/artifact_pool:__subpackages__", + "//rs/sns:__subpackages__", + "//rs/starter:__subpackages__", + "//rs/state_layout:__subpackages__", # permanent? + "//rs/state_machine_tests:__subpackages__", # permanent? + "//rs/state_manager:__subpackages__", # permanent? + "//rs/system_api:__subpackages__", # permanent? + "//rs/test_utilities:__subpackages__", # some subpackages permanent? + "//rs/tests:__subpackages__", # some subpackages permanent? + "//rs/types:__subpackages__", + "//rs/workload_generator:__subpackages__", +] + +package(default_visibility = permanent_whitelist + temporary_whitelist) rust_library( name = "management_canister_types", diff --git a/rs/types/management_canister_types/src/lib.rs b/rs/types/management_canister_types/src/lib.rs index 0997a188ce3..7dcadcb9333 100644 --- a/rs/types/management_canister_types/src/lib.rs +++ b/rs/types/management_canister_types/src/lib.rs @@ -1220,9 +1220,11 @@ pub enum CanisterInstallMode { pub enum WasmMemoryPersistence { /// Retain the main memory across upgrades. /// Used for enhanced orthogonal persistence, as implemented in Motoko + #[serde(rename = "keep")] Keep, /// Reinitialize the main memory on upgrade. /// Default behavior without enhanced orthogonal persistence. + #[serde(rename = "replace")] Replace, } diff --git a/rs/types/types/src/consensus/idkg.rs b/rs/types/types/src/consensus/idkg.rs index 27aefe53f93..e5908ea7968 100644 --- a/rs/types/types/src/consensus/idkg.rs +++ b/rs/types/types/src/consensus/idkg.rs @@ -251,7 +251,7 @@ impl IDkgPayload { pub fn iter_pre_signature_ids<'a>( &'a self, key_id: &'a IDkgMasterPublicKeyId, - ) -> impl Iterator + '_ { + ) -> impl Iterator + 'a { let available_pre_signature_ids = self .available_pre_signatures .iter() diff --git a/rs/types/types/src/crypto/hash/domain_separator.rs b/rs/types/types/src/crypto/hash/domain_separator.rs index 228e3c29981..2dfce357301 100644 --- a/rs/types/types/src/crypto/hash/domain_separator.rs +++ b/rs/types/types/src/crypto/hash/domain_separator.rs @@ -162,6 +162,6 @@ fn ic_request_domain_variable_is_sound_and_consistent_with_the_enum_variant() { ); assert_eq!( DOMAIN_IC_REQUEST[0] as usize, - DomainSeparator::IcRequest.as_str().as_bytes().len() + DomainSeparator::IcRequest.as_str().len() ); } diff --git a/rs/types/types/src/ingress.rs b/rs/types/types/src/ingress.rs index 3985fcd80ab..02b88bd81cf 100644 --- a/rs/types/types/src/ingress.rs +++ b/rs/types/types/src/ingress.rs @@ -108,7 +108,7 @@ impl IngressStatus { match self { IngressStatus::Known { state, .. } => match state { IngressState::Completed(result) => result.memory_bytes(), - IngressState::Failed(error) => error.description().as_bytes().len(), + IngressState::Failed(error) => error.description().len(), _ => 0, }, IngressStatus::Unknown => 0, @@ -180,7 +180,7 @@ impl MemoryDiskBytes for WasmResult { fn memory_bytes(&self) -> usize { match self { WasmResult::Reply(bytes) => bytes.len(), - WasmResult::Reject(string) => string.as_bytes().len(), + WasmResult::Reject(string) => string.len(), } } diff --git a/rs/types/types/src/lib.rs b/rs/types/types/src/lib.rs index aa2f743e5ea..bfdf41a0f30 100644 --- a/rs/types/types/src/lib.rs +++ b/rs/types/types/src/lib.rs @@ -489,9 +489,14 @@ pub const MAX_STABLE_MEMORY_IN_BYTES: u64 = 500 * GIB; /// it is public and `u64` (`NumBytes` cannot be used in const expressions). pub const MAX_WASM_MEMORY_IN_BYTES: u64 = 4 * GIB; +/// The upper limit on the Wasm64 canister memory size. +/// This constant is used by other crates to define other constants, that's why +/// it is public and `u64` (`NumBytes` cannot be used in const expressions). +pub const MAX_WASM64_MEMORY_IN_BYTES: u64 = 4 * GIB; + const MIN_MEMORY_ALLOCATION: NumBytes = NumBytes::new(0); pub const MAX_MEMORY_ALLOCATION: NumBytes = - NumBytes::new(MAX_STABLE_MEMORY_IN_BYTES + MAX_WASM_MEMORY_IN_BYTES); + NumBytes::new(MAX_STABLE_MEMORY_IN_BYTES + MAX_WASM64_MEMORY_IN_BYTES); impl InvalidMemoryAllocationError { pub fn new(given: candid::Nat) -> Self { diff --git a/rs/types/types/src/messages/ingress_messages.rs b/rs/types/types/src/messages/ingress_messages.rs index 212bb2d285b..a4c3184e5f8 100644 --- a/rs/types/types/src/messages/ingress_messages.rs +++ b/rs/types/types/src/messages/ingress_messages.rs @@ -235,7 +235,7 @@ impl<'de> Deserialize<'de> for SignedIngress { fn deserialize>(deserializer: D) -> Result { struct BytesVisitor; - impl<'de> serde::de::Visitor<'de> for BytesVisitor { + impl serde::de::Visitor<'_> for BytesVisitor { type Value = Vec; fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { diff --git a/rs/types/types/src/messages/message_id.rs b/rs/types/types/src/messages/message_id.rs index 2a7829614df..37eb6f4cea1 100644 --- a/rs/types/types/src/messages/message_id.rs +++ b/rs/types/types/src/messages/message_id.rs @@ -32,7 +32,7 @@ impl<'a> Deserialize<'a> for MessageId { fn deserialize>(deserializer: D) -> Result { struct MessageIdVisitor; - impl<'de> serde::de::Visitor<'de> for MessageIdVisitor { + impl serde::de::Visitor<'_> for MessageIdVisitor { type Value = MessageId; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/rs/utils/src/thread.rs b/rs/utils/src/thread.rs index 54c6a5459f5..41e9485c842 100644 --- a/rs/utils/src/thread.rs +++ b/rs/utils/src/thread.rs @@ -11,7 +11,7 @@ where { let mut items: Vec<(S, Option)> = items.map(|i| (i, None)).collect(); let threads = thread_pool.thread_count() as usize; - let items_per_thread = ((items.len() + threads - 1) / threads).max(1); + let items_per_thread = items.len().div_ceil(threads).max(1); thread_pool.scoped(|scope| { for items in items.chunks_mut(items_per_thread) { scope.execute(move || { diff --git a/rs/utils/validate_eq/src/lib.rs b/rs/utils/validate_eq/src/lib.rs index 0795f8c2368..f95750c2ec4 100644 --- a/rs/utils/validate_eq/src/lib.rs +++ b/rs/utils/validate_eq/src/lib.rs @@ -6,7 +6,6 @@ use std::fmt::Debug; /// /// Gigantic fields such as PageMaps that are unfeasible to compare in production are meant to /// be exempted from comparison. - pub trait ValidateEq { fn validate_eq(&self, rhs: &Self) -> Result<(), String>; } diff --git a/rs/utils/validate_eq_derive/src/lib.rs b/rs/utils/validate_eq_derive/src/lib.rs index 1b9f87e7513..cc07d8788c7 100644 --- a/rs/utils/validate_eq_derive/src/lib.rs +++ b/rs/utils/validate_eq_derive/src/lib.rs @@ -13,7 +13,6 @@ use syn::Data::Struct; /// - Ignore ignores the field. /// - None (default) compares fields using PartialEq and reports their name in case of /// divergence. - enum ValidateEqFieldAttr { /// Compare using .eq() and return field name if it diverges. CompareWithPartialEq, diff --git a/rs/workload_generator/src/content_length.rs b/rs/workload_generator/src/content_length.rs index dcecd4f3fac..80081ed8003 100644 --- a/rs/workload_generator/src/content_length.rs +++ b/rs/workload_generator/src/content_length.rs @@ -51,7 +51,7 @@ impl Add for ContentLength { } } -impl<'a> Add for &'a ContentLength { +impl Add for &ContentLength { type Output = ContentLength; fn add(self, rhs: &ContentLength) -> ContentLength { @@ -59,7 +59,7 @@ impl<'a> Add for &'a ContentLength { } } -impl<'a> Add<&'a ContentLength> for ContentLength { +impl Add<&ContentLength> for ContentLength { type Output = ContentLength; fn add(self, rhs: &ContentLength) -> ContentLength { @@ -67,7 +67,7 @@ impl<'a> Add<&'a ContentLength> for ContentLength { } } -impl<'a> Add for &'a ContentLength { +impl Add for &ContentLength { type Output = ContentLength; fn add(self, rhs: ContentLength) -> ContentLength { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 34a2c4859a4..f1f8df773bb 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.82.0" +channel = "1.84.0" targets = ["wasm32-unknown-unknown"] profile = "default" components = ["rls"] diff --git a/toolchains/sysimage/build_container_base_image.py b/toolchains/sysimage/build_container_base_image.py index 43432034272..2102a7eb31c 100644 --- a/toolchains/sysimage/build_container_base_image.py +++ b/toolchains/sysimage/build_container_base_image.py @@ -79,9 +79,6 @@ def save_image(container_cmd: str, image_tag: str, output_file: str): log.info("Image saved successfully") -# TODO def upload_to_docker_io() - - def main(): parser = ArgumentParser() parser.add_arguments(Args, dest="fancy")