diff --git a/.editorconfig b/.editorconfig index 163cb8b5518..9edf92eff03 100644 --- a/.editorconfig +++ b/.editorconfig @@ -13,7 +13,7 @@ indent_size = 2 indent_style = space indent_size = 2 -[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +[{go.mod,go.sum,*.go,.gitmodules}] indent_style = tab indent_size = 4 diff --git a/.github/workflows/autoupdate.yml b/.github/workflows/autoupdate.yml deleted file mode 100644 index de45e651c8e..00000000000 --- a/.github/workflows/autoupdate.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: autoupdate -on: - # This will trigger on all pushes to all branches. - push: {} - # Alternatively, you can only trigger if commits are pushed to certain branches, e.g.: - # push: - # branches: - # - master - # - unstable -jobs: - autoupdate: - name: autoupdate - runs-on: ubuntu-22.04 - steps: - - uses: docker://chinthakagodawita/autoupdate-action:v1 - env: - GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' - PR_LABELS: "auto-update" - MERGE_MSG: "Branch was auto-updated." - RETRY_COUNT: "5" - RETRY_SLEEP: "300" - MERGE_CONFLICT_ACTION: "fail" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000000..5465d1ac1b0 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,56 @@ +name: Build + +on: + workflow_call: + +jobs: + prepare: + runs-on: ubuntu-latest-16-cores + if: github.repository_owner == 'armadaproject' + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set up Go with Cache + uses: actions/setup-go@v4 + with: + go-version: '1.20' + + - name: Cache GOBIN + uses: actions/cache@v3 + with: + path: /home/runner/go/bin + key: ${{ runner.os }}-gobin-${{ hashFiles('**/tools.yaml') }} + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v2 + + - uses: goreleaser/goreleaser-action@v3 + with: + distribution: goreleaser + version: v1.20.0 + args: release --snapshot --skip-sbom --skip-sign --clean + env: + DOCKER_REPO: "gresearch" + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + DOCKER_BUILDX_BUILDER: "${{ steps.buildx.outputs.name }}" + DOCKER_BUILDX_CACHE_FROM: "type=gha" + DOCKER_BUILDX_CACHE_TO: "type=gha,mode=max" + + - name: Output full commit sha + if: github.event_name == 'push' && github.ref == 'refs/heads/master' + run: echo "sha_full=$(git rev-parse HEAD)" >> $GITHUB_ENV + + - name: Save Docker image tarballs + if: github.event_name == 'push' && github.ref == 'refs/heads/master' + run: | + scripts/docker-save.sh -t ${{ env.sha_full }} -o /tmp/imgs + + - name: Save Docker image tarballs as artifacts + if: github.event_name == 'push' && github.ref == 'refs/heads/master' + uses: actions/upload-artifact@v3 + with: + name: armada-image-tarballs + path: /tmp/imgs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 125b18d3096..0b5ac50bd82 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,16 +27,22 @@ permissions: jobs: lint: + if: github.event_name == 'schedule' || github.event_name == 'push' || github.event.pull_request.head.repo.id != github.event.pull_request.base.repo.id uses: ./.github/workflows/lint.yml test: + if: github.event_name == 'schedule' || github.event_name == 'push' || github.event.pull_request.head.repo.id != github.event.pull_request.base.repo.id uses: ./.github/workflows/test.yml + build: + if: github.event_name == 'schedule' || github.event_name == 'push' || github.event.pull_request.head.repo.id != github.event.pull_request.base.repo.id + uses: ./.github/workflows/build.yml # Virtual job that can be configured as a required check before a PR can be merged. all-required-checks-done: name: All required checks done - if: ${{ always() }} + if: github.event_name == 'schedule' || github.event_name == 'push' || github.event.pull_request.head.repo.id != github.event.pull_request.base.repo.id needs: - lint - test + - build runs-on: ubuntu-22.04 steps: - uses: actions/github-script@v6 @@ -48,3 +54,4 @@ jobs: } else { core.setFailed('Some required checks failed'); } + diff --git a/.github/workflows/not-airflow-operator.yml b/.github/workflows/not-airflow-operator.yml deleted file mode 100644 index 298cb79c0fd..00000000000 --- a/.github/workflows/not-airflow-operator.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Python Airflow Operator - -on: - push: - branches-ignore: - - master - paths-ignore: - - 'client/python/**' - - 'build/python-client/**' - - 'pkg/api/*.proto' - - '.github/workflows/python-client.yml' - - 'docs/python_armada_client.md' - - 'scripts/build-python-client.sh' - - 'third_party/airflow/**' - - 'build/airflow-operator/**' - - 'pkg/api/jobservice/*.proto' - - '.github/workflows/airflow-operator.yml' - - 'docs/python_airflow_operator.md' - - 'scripts/build-airflow-operator.sh' - - '.github/workflows/python-tests/*' - - pull_request: - branches-ignore: - - gh-pages - paths-ignore: - - 'client/python/**' - - 'build/python-client/**' - - 'pkg/api/*.proto' - - '.github/workflows/python-client.yml' - - 'docs/python_armada_client.md' - - 'scripts/build-python-client.sh' - - 'third_party/airflow/**' - - 'build/airflow-operator/**' - - 'pkg/api/jobservice/*.proto' - - '.github/workflows/airflow-operator.yml' - - 'docs/python_airflow_operator.md' - - 'scripts/build-airflow-operator.sh' - - '.github/workflows/python-tests/*' - -jobs: - airflow-tox: - strategy: - matrix: - go: [ '1.20' ] - runs-on: ubuntu-latest - steps: - - run: 'echo "No airflow operator code modified, not running airflow operator jobs"' diff --git a/.github/workflows/not-python-client.yml b/.github/workflows/not-python-client.yml deleted file mode 100644 index 61443321df8..00000000000 --- a/.github/workflows/not-python-client.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Python Client - -on: - push: - branches-ignore: - - master - paths-ignore: - - 'client/python/**' - - 'build/python-client/**' - - 'pkg/api/*.proto' - - '.github/workflows/python-client.yml' - - 'docs/python_armada_client.md' - - 'scripts/build-python-client.sh' - - 'makefile' - - '.github/workflows/python-tests/*' - - pull_request: - branches-ignore: - - gh-pages - paths-ignore: - - 'client/python/**' - - 'build/python-client/**' - - 'pkg/api/*.proto' - - '.github/workflows/python-client.yml' - - 'docs/python_armada_client.md' - - 'scripts/build-python-client.sh' - - 'makefile' - - '.github/workflows/python-tests/*' - -jobs: - python-client-tox: - strategy: - matrix: - go: [ '1.20' ] - runs-on: ubuntu-latest - steps: - - run: 'echo "No python modified, not running python jobs"' - python-client-integration-tests: - strategy: - matrix: - go: [ '1.20' ] - runs-on: ubuntu-latest - steps: - - run: 'echo "No python modified, not running python jobs"' diff --git a/.github/workflows/release-rc.yml b/.github/workflows/release-rc.yml index de70a33b22b..f220c7b2bf2 100644 --- a/.github/workflows/release-rc.yml +++ b/.github/workflows/release-rc.yml @@ -49,33 +49,16 @@ jobs: with: fetch-depth: 0 - - name: Setup Golang with Cache - uses: magnetikonline/action-golang-cache@v4 - with: - go-version: "1.20" - - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v2 - - name: "Docker login" uses: "docker/login-action@v2" with: username: "${{ secrets.DOCKERHUB_USER }}" password: "${{ secrets.DOCKERHUB_PASS }}" - - name: "Run GoReleaser" - uses: "goreleaser/goreleaser-action@v4" - with: - distribution: "goreleaser" - version: v1.19.2 - args: "-f ./.goreleaser.yml release --snapshot --skip-sbom --skip-sign --clean" + - name: Download Docker image tarballs artifact + run: gh run download ${{ github.event.workflow_run.id }} --name armada-image-tarballs env: - DOCKER_REPO: "gresearch" - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - DOCKER_BUILDX_BUILDER: "${{ steps.buildx.outputs.name }}" - DOCKER_BUILDX_CACHE_FROM: "type=gha" - DOCKER_BUILDX_CACHE_TO: "type=gha,mode=max" + GH_TOKEN: ${{ github.token }} - name: Run Docker push script - run: ./scripts/docker-push.sh -t '${{ github.event.workflow_run.head_sha }}' + run: ./scripts/docker-push.sh --tag '${{ github.event.workflow_run.head_sha }}' --images-dir . --use-tarballs "true" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4b0ea22a381..d51d66d4e10 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,4 +1,4 @@ -name: Code Build and Tests +name: Tests on: workflow_call: @@ -10,7 +10,7 @@ permissions: jobs: ts-unit-tests: name: TypeScript Unit Tests - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest-4-cores steps: - name: Checkout @@ -48,7 +48,7 @@ jobs: go-unit-tests: name: Golang Unit Tests - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest-4-cores steps: - name: Checkout @@ -87,7 +87,7 @@ jobs: go-integration-tests: name: Golang Integration Tests - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest-4-cores env: ARMADA_EXECUTOR_INGRESS_URL: "http://localhost" @@ -131,6 +131,22 @@ jobs: path: junit.xml if-no-files-found: error + - name: Store Docker Logs + if: always() + run: | + mkdir -p ./logs + docker compose logs --no-color > ./logs/docker-compose.log + docker logs pulsar > ./logs/pulsar.log 2>&1 + + - name: Upload Docker Compose Logs + uses: actions/upload-artifact@v3 + if: always() + with: + name: docker-compose-logs + path: | + ./logs/ + if-no-files-found: error + - name: Publish JUnit Report uses: mikepenz/action-junit-report@v3 if: always() @@ -143,7 +159,7 @@ jobs: go-mod-up-to-date: name: Golang Mod Up To Date - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest-4-cores steps: - name: Checkout code @@ -182,7 +198,7 @@ jobs: proto-up-to-date: name: Proto Up To Date - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest-4-cores steps: - name: Checkout @@ -214,7 +230,7 @@ jobs: echo -e "### Git status" >> $GITHUB_STEP_SUMMARY if [[ "$changed" -gt 0 ]]; then - echo -e "Generated proto files are out of date. Please run 'make proto' and commit the changes." >> $GITHUB_STEP_SUMMARY + echo -e "Generated proto files are out of date. Please run 'mage proto' and commit the changes." >> $GITHUB_STEP_SUMMARY git status -s -uno >> $GITHUB_STEP_SUMMARY diff --git a/.goreleaser.yml b/.goreleaser.yml index d66bb74201b..39f15ffb28d 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -26,6 +26,7 @@ env: # https://github.com/moby/buildkit#export-cache - DOCKER_BUILDX_CACHE_FROM={{ if index .Env "DOCKER_BUILDX_CACHE_FROM" }}{{ .Env.DOCKER_BUILDX_CACHE_FROM }}{{ else }}type=inline{{ end }} - DOCKER_BUILDX_CACHE_TO={{ if index .Env "DOCKER_BUILDX_CACHE_TO" }}{{ .Env.DOCKER_BUILDX_CACHE_TO }}{{ else }}type=inline{{ end }} + - GOVERSION={{ if index .Env "GOVERSION" }}{{ .Env.GOVERSION }}{{ else }}go1.20{{ end }} builds: - env: [CGO_ENABLED=0] @@ -168,6 +169,11 @@ builds: binary: armadactl main: ./cmd/armadactl/main.go mod_timestamp: '{{ .CommitTimestamp }}' + ldflags: + - -X github.com/armadaproject/armada/internal/armadactl/build.ReleaseVersion={{.Version}} + - -X github.com/armadaproject/armada/internal/armadactl/build.GitCommit={{.FullCommit}} + - -X github.com/armadaproject/armada/internal/armadactl/build.BuildTime={{.Date}} + - -X github.com/armadaproject/armada/internal/armadactl/build.GoVersion={{.Env.GOVERSION}} goos: - windows - darwin diff --git a/.mergify.yml b/.mergify.yml index 1f49c428ff2..ff0e1a219e2 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -9,6 +9,6 @@ pull_request_rules: - "#approved-reviews-by>=2" - and: - "#approved-reviews-by>=1" - - "author~=^(JamesMurkin|severinson|d80tb7|carlocamurri|dejanzele|Sharpz7|ClifHouck|robertdavidsmith|theAntiYeti|richscott|suprjinx|zuqq)" + - "author~=^(JamesMurkin|severinson|d80tb7|carlocamurri|dejanzele|Sharpz7|ClifHouck|robertdavidsmith|theAntiYeti|richscott|suprjinx|zuqq|msumner91|mustafai)" title: Two are checks required. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index adec709fb56..78cfb8ca6ba 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -53,6 +53,42 @@ Note the names of the branch must follow proper docker names: >A tag name must be valid ASCII and may contain lowercase and uppercase letters, digits, underscores, periods and dashes. A tag name may not start with a period or a dash and may contain a maximum of 128 characters. +#### Signing Off Commits + +To enhance the integrity of contributions to the Armada repository, we've adopted the use of the DCO (Developer Certificate of Origin) plug-in. This means that for every commit you contribute via Pull Requests, you'll need to sign off your commits to certify that you have the right to submit it under the open source license used by this project. + +**Every commit in your PRs must have a "Signed-Off" attribute.** + +When committing to the repository, ensure you use the `--signoff` option with `git commit`. This will append a sign-off message at the end of the commit log to indicate that the commit has your signature. + +You sign-off by adding the following to your commit messages: + +``` +Author: Your Name +Date: Thu Feb 2 11:41:15 2018 -0800 + + This is my commit message + + Signed-off-by: Your Name +``` + +Notice the `Author` and `Signed-off-by` lines match. If they don't, the PR will +be rejected by the automated DCO check. + +Git has a `-s` command line option to do this automatically: + + git commit -s -m 'This is my commit message' + +If you forgot to do this and have not yet pushed your changes to the remote +repository, you can amend your commit with the sign-off by running + + git commit --amend -s + +This command will modify the latest commit and add the required sign-off. + +For more details checkout [DCO](https://github.com/apps/dco) + + ## Chat & Discussions Sometimes, it's good to hash things out in real time. diff --git a/client/DotNet/Armada.Client/ClientGenerated.cs b/client/DotNet/Armada.Client/ClientGenerated.cs index 132cbb2d685..e71be22e0f6 100644 --- a/client/DotNet/Armada.Client/ClientGenerated.cs +++ b/client/DotNet/Armada.Client/ClientGenerated.cs @@ -1222,6 +1222,10 @@ public partial class ApiJob [Newtonsoft.Json.JsonProperty("queueOwnershipUserGroups", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public System.Collections.Generic.ICollection QueueOwnershipUserGroups { get; set; } + /// Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. + [Newtonsoft.Json.JsonProperty("queueTtlSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] + public string QueueTtlSeconds { get; set; } + [Newtonsoft.Json.JsonProperty("requiredNodeLabels", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public System.Collections.Generic.IDictionary RequiredNodeLabels { get; set; } @@ -1834,6 +1838,10 @@ public partial class ApiJobSubmitRequestItem [Newtonsoft.Json.JsonProperty("priority", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public double? Priority { get; set; } + /// Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. + [Newtonsoft.Json.JsonProperty("queueTtlSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] + public string QueueTtlSeconds { get; set; } + [Newtonsoft.Json.JsonProperty("requiredNodeLabels", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public System.Collections.Generic.IDictionary RequiredNodeLabels { get; set; } diff --git a/client/python/CONTRIBUTING.md b/client/python/CONTRIBUTING.md index ca3c0f1f90d..ff015d4284e 100644 --- a/client/python/CONTRIBUTING.md +++ b/client/python/CONTRIBUTING.md @@ -26,7 +26,7 @@ workflow for contributing. First time contributors can follow the guide below to Unlike most python projects, the Armada python client contains a large quantity of generated code. This code must be generated in order to compile and develop against the client. -From the root of the repository, run `make python`. This will generate python code needed to build +From the root of the repository, run `mage buildPython`. This will generate python code needed to build and use the client. This command needs to be re-run anytime an API change is committed (e.g. a change to a `*.proto` file). diff --git a/client/python/README.md b/client/python/README.md index 92ed96b26b8..ea4f1409fb2 100644 --- a/client/python/README.md +++ b/client/python/README.md @@ -26,5 +26,5 @@ Before beginning, ensure you have: - Network access to fetch docker images and go dependencies. To generate all needed code, and install the python client: -1) From the root of the repository, run `make python` +1) From the root of the repository, run `mage buildPython` 2) Install the client using `pip install client/python`. It's strongly recommended you do this inside a virtualenv. diff --git a/client/python/docs/README.md b/client/python/docs/README.md index 056327c87ae..d8a7abfe1a0 100644 --- a/client/python/docs/README.md +++ b/client/python/docs/README.md @@ -9,13 +9,13 @@ Usage Easy way: - Ensure all protobufs files needed for the client are generated by running - `make python` from the repository root. + `mage buildPython` from the repository root. - `tox -e docs` will create a valid virtual environment and use it to generate documentation. The generated files will be placed under `build/jekyll/*.md`. Manual way: - Ensure all protobufs files needed for the client are generated by running - `make python` from the repository root. + `mage buildPython` from the repository root. - Create a virtual environment containing all the deps listed in `tox.ini` under `[testenv:docs]`. - Run `poetry install -v` from inside `client/python` to install the client diff --git a/cmd/armada/main.go b/cmd/armada/main.go index 688fd78c029..43577218507 100644 --- a/cmd/armada/main.go +++ b/cmd/armada/main.go @@ -1,7 +1,6 @@ package main import ( - "context" "fmt" "net/http" _ "net/http/pprof" @@ -13,11 +12,11 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/pflag" "github.com/spf13/viper" - "golang.org/x/sync/errgroup" "github.com/armadaproject/armada/internal/armada" "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common" + "github.com/armadaproject/armada/internal/common/armadacontext" gateway "github.com/armadaproject/armada/internal/common/grpc" "github.com/armadaproject/armada/internal/common/health" "github.com/armadaproject/armada/internal/common/logging" @@ -67,7 +66,7 @@ func main() { } // Run services within an errgroup to propagate errors between services. - g, ctx := errgroup.WithContext(context.Background()) + g, ctx := armadacontext.ErrGroup(armadacontext.Background()) // Cancel the errgroup context on SIGINT and SIGTERM, // which shuts everything down gracefully. @@ -97,7 +96,11 @@ func main() { // register gRPC API handlers in mux // TODO: Run in errgroup shutdownGateway := gateway.CreateGatewayHandler( - config.GrpcPort, mux, "/", + config.GrpcPort, + mux, + config.GrpcGatewayPath, + true, + config.Grpc.Tls.Enabled, config.CorsAllowedOrigins, api.SwaggerJsonTemplate(), api.RegisterSubmitHandler, @@ -107,7 +110,12 @@ func main() { // start HTTP server // TODO: Run in errgroup - shutdownHttpServer := common.ServeHttp(config.HttpPort, mux) + var shutdownHttpServer func() + if config.Grpc.Tls.Enabled { + shutdownHttpServer = common.ServeHttps(config.HttpPort, mux, config.Grpc.Tls.CertPath, config.Grpc.Tls.KeyPath) + } else { + shutdownHttpServer = common.ServeHttp(config.HttpPort, mux) + } defer shutdownHttpServer() // Start Armada server diff --git a/cmd/binoculars/main.go b/cmd/binoculars/main.go index 652e279a9d9..fd318ec78a0 100644 --- a/cmd/binoculars/main.go +++ b/cmd/binoculars/main.go @@ -80,7 +80,7 @@ func serveHttp( spec string, handlers ...func(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error, ) (shutdown func()) { - shutdownGateway := gateway.CreateGatewayHandler(grpcPort, mux, "/", corsAllowedOrigins, spec, handlers...) + shutdownGateway := gateway.CreateGatewayHandler(grpcPort, mux, "/", false, false, corsAllowedOrigins, spec, handlers...) cancel := common.ServeHttp(port, mux) return func() { diff --git a/cmd/eventsprinter/logic/logic.go b/cmd/eventsprinter/logic/logic.go index 34de61b4d61..b7a9dab8ea7 100644 --- a/cmd/eventsprinter/logic/logic.go +++ b/cmd/eventsprinter/logic/logic.go @@ -1,7 +1,6 @@ package logic import ( - "context" "fmt" "time" @@ -9,6 +8,7 @@ import ( "github.com/gogo/protobuf/proto" v1 "k8s.io/api/core/v1" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/pkg/armadaevents" ) @@ -18,7 +18,7 @@ func PrintEvents(url, topic, subscription string, verbose bool) error { fmt.Println("URL:", url) fmt.Println("Topic:", topic) fmt.Println("Subscription", subscription) - return withSetup(url, topic, subscription, func(ctx context.Context, producer pulsar.Producer, consumer pulsar.Consumer) error { + return withSetup(url, topic, subscription, func(ctx *armadacontext.Context, producer pulsar.Producer, consumer pulsar.Consumer) error { // Number of active jobs. numJobs := 0 @@ -199,7 +199,7 @@ func stripPodSpec(spec *v1.PodSpec) *v1.PodSpec { } // Run action with an Armada submit client and a Pulsar producer and consumer. -func withSetup(url, topic, subscription string, action func(ctx context.Context, producer pulsar.Producer, consumer pulsar.Consumer) error) error { +func withSetup(url, topic, subscription string, action func(ctx *armadacontext.Context, producer pulsar.Producer, consumer pulsar.Consumer) error) error { pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{ URL: url, }) @@ -225,5 +225,5 @@ func withSetup(url, topic, subscription string, action func(ctx context.Context, } defer consumer.Close() - return action(context.Background(), producer, consumer) + return action(armadacontext.Background(), producer, consumer) } diff --git a/cmd/executor/main.go b/cmd/executor/main.go index ed8444fbdb4..ac6374a186c 100644 --- a/cmd/executor/main.go +++ b/cmd/executor/main.go @@ -1,7 +1,6 @@ package main import ( - "context" "net/http" "os" "os/signal" @@ -13,6 +12,7 @@ import ( "github.com/spf13/viper" "github.com/armadaproject/armada/internal/common" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/health" "github.com/armadaproject/armada/internal/executor" "github.com/armadaproject/armada/internal/executor/configuration" @@ -55,7 +55,7 @@ func main() { ) defer shutdownMetricServer() - shutdown, wg := executor.StartUp(context.Background(), logrus.NewEntry(logrus.New()), config) + shutdown, wg := executor.StartUp(armadacontext.Background(), logrus.NewEntry(logrus.New()), config) go func() { <-shutdownChannel shutdown() diff --git a/cmd/lookout/main.go b/cmd/lookout/main.go index 485b200051b..0603f156f17 100644 --- a/cmd/lookout/main.go +++ b/cmd/lookout/main.go @@ -98,6 +98,8 @@ func main() { config.GrpcPort, mux, "/api/", + false, + config.Grpc.Tls.Enabled, []string{}, lookoutApi.SwaggerJsonTemplate(), lookoutApi.RegisterLookoutHandler) @@ -110,7 +112,12 @@ func main() { // server static UI files mux.Handle("/", http.FileServer(serve.CreateDirWithIndexFallback("./internal/lookout/ui/build"))) - shutdownServer := common.ServeHttp(config.HttpPort, mux) + var shutdownServer func() = nil + if config.Grpc.Tls.Enabled { + shutdownServer = common.ServeHttps(config.HttpPort, mux, config.Grpc.Tls.CertPath, config.Grpc.Tls.KeyPath) + } else { + shutdownServer = common.ServeHttp(config.HttpPort, mux) + } shutdown, wg := lookout.StartUp(config, healthChecks) go func() { diff --git a/cmd/lookoutv2/main.go b/cmd/lookoutv2/main.go index 3ba4a865e4d..a2d5f6be90e 100644 --- a/cmd/lookoutv2/main.go +++ b/cmd/lookoutv2/main.go @@ -1,7 +1,6 @@ package main import ( - "context" "os" "os/signal" "syscall" @@ -12,6 +11,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "github.com/armadaproject/armada/internal/common" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/lookoutv2" "github.com/armadaproject/armada/internal/lookoutv2/configuration" @@ -36,9 +36,9 @@ func init() { pflag.Parse() } -func makeContext() (context.Context, func()) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) +func makeContext() (*armadacontext.Context, func()) { + ctx := armadacontext.Background() + ctx, cancel := armadacontext.WithCancel(ctx) c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) @@ -57,7 +57,7 @@ func makeContext() (context.Context, func()) { } } -func migrate(ctx context.Context, config configuration.LookoutV2Configuration) { +func migrate(ctx *armadacontext.Context, config configuration.LookoutV2Configuration) { db, err := database.OpenPgxPool(config.Postgres) if err != nil { panic(err) @@ -74,7 +74,7 @@ func migrate(ctx context.Context, config configuration.LookoutV2Configuration) { } } -func prune(ctx context.Context, config configuration.LookoutV2Configuration) { +func prune(ctx *armadacontext.Context, config configuration.LookoutV2Configuration) { db, err := database.OpenPgxConn(config.Postgres) if err != nil { panic(err) @@ -92,7 +92,7 @@ func prune(ctx context.Context, config configuration.LookoutV2Configuration) { log.Infof("expireAfter: %v, batchSize: %v, timeout: %v", config.PrunerConfig.ExpireAfter, config.PrunerConfig.BatchSize, config.PrunerConfig.Timeout) - ctxTimeout, cancel := context.WithTimeout(ctx, config.PrunerConfig.Timeout) + ctxTimeout, cancel := armadacontext.WithTimeout(ctx, config.PrunerConfig.Timeout) defer cancel() err = pruner.PruneDb(ctxTimeout, db, config.PrunerConfig.ExpireAfter, config.PrunerConfig.BatchSize, clock.RealClock{}) if err != nil { diff --git a/cmd/scheduler/cmd/migrate_database.go b/cmd/scheduler/cmd/migrate_database.go index 1564bffb9fd..22d6dc12dc3 100644 --- a/cmd/scheduler/cmd/migrate_database.go +++ b/cmd/scheduler/cmd/migrate_database.go @@ -1,13 +1,13 @@ package cmd import ( - "context" "time" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" schedulerdb "github.com/armadaproject/armada/internal/scheduler/database" ) @@ -43,7 +43,7 @@ func migrateDatabase(cmd *cobra.Command, _ []string) error { return errors.WithMessagef(err, "Failed to connect to database") } - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), timeout) defer cancel() return schedulerdb.Migrate(ctx, db) } diff --git a/cmd/scheduler/cmd/prune_database.go b/cmd/scheduler/cmd/prune_database.go index 3b2250d1661..4ed7aee426e 100644 --- a/cmd/scheduler/cmd/prune_database.go +++ b/cmd/scheduler/cmd/prune_database.go @@ -1,13 +1,13 @@ package cmd import ( - "context" "time" "github.com/pkg/errors" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/util/clock" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" schedulerdb "github.com/armadaproject/armada/internal/scheduler/database" ) @@ -57,7 +57,7 @@ func pruneDatabase(cmd *cobra.Command, _ []string) error { return errors.WithMessagef(err, "Failed to connect to database") } - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), timeout) defer cancel() return schedulerdb.PruneDb(ctx, db, batchSize, expireAfter, clock.RealClock{}) } diff --git a/cmd/testsuite/cmd/test.go b/cmd/testsuite/cmd/test.go index 72eef04d966..ccf8ecdf9fd 100644 --- a/cmd/testsuite/cmd/test.go +++ b/cmd/testsuite/cmd/test.go @@ -31,6 +31,8 @@ func testCmd(app *testsuite.App) *cobra.Command { cmd.Flags().String("tests", "", "Test file pattern, e.g., './testcases/*.yaml'.") cmd.Flags().String("junit", "", "Write a JUnit test report to this path.") cmd.Flags().String("benchmark", "", "Write a benchmark test report to this path.") + cmd.Flags().String("prometheusPushgatewayUrl", "", "Push metrics to Prometheus pushgateway at this url.") + cmd.Flags().String("prometheusPushgatewayJobName", "armada-testsuite", "Metrics are annotated with with job=prometheusPushGatewayJobName.") return cmd } @@ -64,6 +66,18 @@ func testCmdRunE(app *testsuite.App) func(cmd *cobra.Command, args []string) err return errors.New("benchmark report not currently supported") } + prometheusPushgatewayUrl, err := cmd.Flags().GetString("prometheusPushgatewayUrl") + if err != nil { + return errors.WithStack(err) + } + app.Params.PrometheusPushGatewayUrl = prometheusPushgatewayUrl + + prometheusPushgatewayJobName, err := cmd.Flags().GetString("prometheusPushgatewayJobName") + if err != nil { + return errors.WithStack(err) + } + app.Params.PrometheusPushGatewayJobName = prometheusPushgatewayJobName + // Create a context that is cancelled on SIGINT/SIGTERM. // Ensures test jobs are cancelled on ctrl-C. ctx, cancel := context.WithCancel(context.Background()) diff --git a/config/armada/config.yaml b/config/armada/config.yaml index cb9b8e56681..bf3a17039e6 100644 --- a/config/armada/config.yaml +++ b/config/armada/config.yaml @@ -5,6 +5,7 @@ corsAllowedOrigins: - http://localhost:3000 - http://localhost:8089 - http://localhost:10000 +grpcGatewayPath: "/" cancelJobsBatchSize: 1000 pulsarSchedulerEnabled: false probabilityOfUsingPulsarScheduler: 0 @@ -67,6 +68,10 @@ scheduling: maximumResourceFractionToSchedule: memory: 1.0 cpu: 1.0 + maximumSchedulingRate: 100.0 + maximumSchedulingBurst: 1000 + maximumPerQueueSchedulingRate: 50.0 + maximumPerQueueSchedulingBurst: 1000 maxJobSchedulingContextsPerExecutor: 10000 lease: expireAfter: 15m diff --git a/config/lookout/config.yaml b/config/lookout/config.yaml index ba73eb36977..507c3513072 100644 --- a/config/lookout/config.yaml +++ b/config/lookout/config.yaml @@ -2,6 +2,10 @@ grpcPort: 50059 httpPort: 8080 metricsPort: 9000 grpc: + tls: + enabled: false + certPath: /certs/tls.crt + keyPath: /certs/tls.key keepaliveParams: maxConnectionIdle: 5m time: 2h diff --git a/config/lookoutv2/config.yaml b/config/lookoutv2/config.yaml index b525ecdaffc..718b60d6c26 100644 --- a/config/lookoutv2/config.yaml +++ b/config/lookoutv2/config.yaml @@ -3,6 +3,10 @@ corsAllowedOrigins: - "http://localhost:3000" - "http://localhost:8089" - "http://localhost:10000" +tls: + enabled: false + certPath: /certs/tls.crt + keyPath: /certs/tls.key postgres: maxOpenConns: 100 maxIdleConns: 25 diff --git a/config/scheduler/config.yaml b/config/scheduler/config.yaml index 531f4c6a78d..dab4f2780a6 100644 --- a/config/scheduler/config.yaml +++ b/config/scheduler/config.yaml @@ -78,6 +78,7 @@ scheduling: enabled: true nodeEvictionProbability: 1.0 nodeOversubscriptionEvictionProbability: 1.0 + protectedFractionOfFairShare: 1.0 nodeIdLabel: kubernetes.io/hostname priorityClasses: armada-default: @@ -96,7 +97,10 @@ scheduling: maximumResourceFractionToSchedule: memory: 1.0 cpu: 1.0 - maximumJobsToSchedule: 5000 + maximumSchedulingRate: 100.0 + maximumSchedulingBurst: 1000 + maximumPerQueueSchedulingRate: 50.0 + maximumPerQueueSchedulingBurst: 1000 maxUnacknowledgedJobsPerExecutor: 2500 maxJobSchedulingContextsPerExecutor: 10000 defaultJobLimits: diff --git a/deployment/armada/templates/deployment.yaml b/deployment/armada/templates/deployment.yaml index 6564273df98..67763486468 100644 --- a/deployment/armada/templates/deployment.yaml +++ b/deployment/armada/templates/deployment.yaml @@ -100,6 +100,7 @@ spec: httpGet: path: /health port: rest + scheme: {{ if .Values.applicationConfig.grpc.tls.enabled }}HTTPS{{ else }}HTTP{{ end }} initialDelaySeconds: 5 timeoutSeconds: 5 failureThreshold: 2 @@ -107,6 +108,7 @@ spec: httpGet: path: /health port: rest + scheme: {{ if .Values.applicationConfig.grpc.tls.enabled }}HTTPS{{ else }}HTTP{{ end }} initialDelaySeconds: 10 timeoutSeconds: 10 failureThreshold: 3 diff --git a/deployment/armada/templates/routableservice.yaml b/deployment/armada/templates/routableservice.yaml new file mode 100644 index 00000000000..68af2c2d22c --- /dev/null +++ b/deployment/armada/templates/routableservice.yaml @@ -0,0 +1,43 @@ +{{ if .Values.routableService.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "armada.name" . }}-routable + namespace: {{ .Release.Namespace }} + labels: + {{- include "armada.labels.all" . | nindent 4 }} +spec: + type: LoadBalancer + allocateLoadBalancerNodePorts: false + {{ if .Values.routableService.grpcLoadBalancerIP }} + loadBalancerIP: {{.Values.routableService.grpcLoadBalancerIP}} + {{ end }} + selector: + {{- include "armada.labels.identity" . | nindent 4 }} + ports: + - name: grpc + protocol: TCP + port: {{ if .Values.applicationConfig.grpc.tls.enabled }}443{{ else }}80{{ end }} + targetPort: {{ .Values.applicationConfig.grpcPort }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "armada.name" . }}-rest-routable + namespace: {{ .Release.Namespace }} + labels: + {{- include "armada.labels.all" . | nindent 4 }} +spec: + type: LoadBalancer + allocateLoadBalancerNodePorts: false + {{ if .Values.routableService.restLoadBalancerIP }} + loadBalancerIP: {{.Values.routableService.restLoadBalancerIP}} + {{ end }} + selector: + {{- include "armada.labels.identity" . | nindent 4 }} + ports: + - name: grpc + protocol: TCP + port: {{ if .Values.applicationConfig.grpc.tls.enabled }}443{{ else }}80{{ end }} + targetPort: {{ .Values.applicationConfig.httpPort }} +{{ end }} diff --git a/deployment/armada/values.yaml b/deployment/armada/values.yaml index 384bdac3131..5b8d6d4f3b2 100644 --- a/deployment/armada/values.yaml +++ b/deployment/armada/values.yaml @@ -70,10 +70,17 @@ prometheus: podDisruptionBudget: {} -# -- If a port number is specified, the Service will be of type NodePort and the gRPC server will be exposed on that node port -#nodePort: 8080 -# -- If a port number is specified, REST server will be exposed on that node port -#httpNodePort: 50051 +# Service type. May be NodePort, ClusterIp, LoadBalancer +serviceType: ClusterIP + +# -- If Service is of type NodePort this wil lbe the port that the gRPC server will be exposed on +#nodePort: 50051 +# -- If Service is of type NodePort this wil lbe the port that the rest server will be exposed on +#httpNodePort: 8080 + +## Internal and external traffic policy for the armada service +# externalTrafficPolicy: Local +# internalTrafficPolicy: Local # -- If specified, custom ServiceAccount name will be attached to Armada Server Deployment resource and the default ServiceAccount will not be created customServiceAccount: "" @@ -81,6 +88,9 @@ customServiceAccount: "" # -- Additional ServiceAccount properties (e.g. automountServiceAccountToken, imagePullSecrets, etc.) serviceAccount: {} +routableService: + enabled: false + applicationConfig: # -- Armada Server gRPC port grpcPort: 50051 diff --git a/deployment/lookout-v2/templates/deployment.yaml b/deployment/lookout-v2/templates/deployment.yaml index 214b9f25622..14bda0b2954 100644 --- a/deployment/lookout-v2/templates/deployment.yaml +++ b/deployment/lookout-v2/templates/deployment.yaml @@ -53,6 +53,11 @@ spec: mountPath: /config/application_config.yaml subPath: {{ include "lookout_v2.config.filename" . }} readOnly: true + {{- if .Values.applicationConfig.tls.enabled }} + - name: tls-certs + mountPath: /certs + readOnly: true + {{- end }} {{- if .Values.additionalVolumeMounts }} {{- toYaml .Values.additionalVolumeMounts | nindent 12 -}} {{- end }} @@ -62,6 +67,7 @@ spec: httpGet: path: /health port: web + scheme: {{ if .Values.applicationConfig.tls.enabled }}HTTPS{{ else }}HTTP{{ end }} initialDelaySeconds: 5 timeoutSeconds: 5 failureThreshold: 2 @@ -69,6 +75,7 @@ spec: httpGet: path: /health port: web + scheme: {{ if .Values.applicationConfig.tls.enabled }}HTTPS{{ else }}HTTP{{ end }} initialDelaySeconds: 10 timeoutSeconds: 10 failureThreshold: 3 @@ -88,6 +95,11 @@ spec: - name: user-config secret: secretName: {{ include "lookout_v2.config.name" . }} + {{- if .Values.applicationConfig.tls.enabled }} + - name: tls-certs + secret: + secretName: lookout-service-tls-v2 + {{- end }} {{- if .Values.additionalVolumes }} {{- toYaml .Values.additionalVolumes | nindent 8 }} {{- end }} diff --git a/deployment/lookout-v2/templates/routableservice.yaml b/deployment/lookout-v2/templates/routableservice.yaml new file mode 100644 index 00000000000..8edca965ea9 --- /dev/null +++ b/deployment/lookout-v2/templates/routableservice.yaml @@ -0,0 +1,22 @@ +{{ if .Values.routableService.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "lookout_v2.name" . }}-routable + namespace: {{ .Release.Namespace }} + labels: + {{- include "lookout_v2.labels.all" . | nindent 4 }} +spec: + type: LoadBalancer + allocateLoadBalancerNodePorts: false + {{ if .Values.routableService.loadBalancerIP }} + loadBalancerIP: {{.Values.routableService.loadBalancerIP}} + {{ end }} + selector: + {{- include "lookout_v2.labels.identity" . | nindent 4 }} + ports: + - name: web + protocol: TCP + port: {{ if .Values.applicationConfig.tls.enabled }}443{{ else }}80{{ end }} + targetPort: {{ .Values.applicationConfig.apiPort }} +{{ end }} diff --git a/deployment/lookout-v2/values.yaml b/deployment/lookout-v2/values.yaml index 8aac6799dd9..d5212768e83 100644 --- a/deployment/lookout-v2/values.yaml +++ b/deployment/lookout-v2/values.yaml @@ -9,7 +9,7 @@ resources: memory: 512Mi cpu: 200m # -- Tolerations -tolerations: [] +tolerations: [] additionalLabels: {} additionalVolumeMounts: [] additionalVolumes: [] @@ -30,5 +30,10 @@ prometheus: customServiceAccount: null serviceAccount: null +routableService: + enabled: false + applicationConfig: + tls: + enabled: false apiPort: 10000 diff --git a/deployment/lookout/templates/deployment.yaml b/deployment/lookout/templates/deployment.yaml index ba5028c6f6a..df28974ef92 100644 --- a/deployment/lookout/templates/deployment.yaml +++ b/deployment/lookout/templates/deployment.yaml @@ -59,6 +59,11 @@ spec: mountPath: /config/application_config.yaml subPath: {{ include "lookout.config.filename" . }} readOnly: true + {{- if .Values.applicationConfig.grpc.tls.enabled }} + - name: tls-certs + mountPath: /certs + readOnly: true + {{- end }} {{- if .Values.additionalVolumeMounts }} {{- toYaml .Values.additionalVolumeMounts | nindent 12 -}} {{- end }} @@ -68,6 +73,7 @@ spec: httpGet: path: /health port: web + scheme: {{ if .Values.applicationConfig.grpc.tls.enabled }}HTTPS{{ else }}HTTP{{ end }} initialDelaySeconds: 5 timeoutSeconds: 5 failureThreshold: 2 @@ -75,6 +81,7 @@ spec: httpGet: path: /health port: web + scheme: {{ if .Values.applicationConfig.grpc.tls.enabled }}HTTPS{{ else }}HTTP{{ end }} initialDelaySeconds: 10 timeoutSeconds: 10 failureThreshold: 3 @@ -94,6 +101,11 @@ spec: - name: user-config secret: secretName: {{ include "lookout.config.name" . }} + {{- if .Values.applicationConfig.grpc.tls.enabled }} + - name: tls-certs + secret: + secretName: lookout-service-tls + {{- end }} {{- if .Values.additionalVolumes }} {{- toYaml .Values.additionalVolumes | nindent 8 }} {{- end }} diff --git a/deployment/lookout/templates/routableservice.yaml b/deployment/lookout/templates/routableservice.yaml new file mode 100644 index 00000000000..6e6046b70d3 --- /dev/null +++ b/deployment/lookout/templates/routableservice.yaml @@ -0,0 +1,22 @@ +{{ if .Values.routableService.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "lookout.name" . }}-routable + namespace: {{ .Release.Namespace }} + labels: + {{- include "lookout.labels.all" . | nindent 4 }} +spec: + type: LoadBalancer + allocateLoadBalancerNodePorts: false + {{ if .Values.routableService.loadBalancerIP }} + loadBalancerIP: {{.Values.routableService.loadBalancerIP}} + {{ end }} + selector: + {{- include "lookout.labels.identity" . | nindent 4 }} + ports: + - name: grpc + protocol: TCP + port: {{ if .Values.applicationConfig.grpc.tls.enabled }}443{{ else }}80{{ end }} + targetPort: {{ .Values.applicationConfig.httpPort }} +{{ end }} diff --git a/deployment/lookout/values.yaml b/deployment/lookout/values.yaml index f4a5323cacd..adce0aa1e25 100644 --- a/deployment/lookout/values.yaml +++ b/deployment/lookout/values.yaml @@ -9,7 +9,7 @@ resources: memory: 512Mi cpu: 200m # -- Tolerations -tolerations: [] +tolerations: [] additionalLabels: {} additionalVolumeMounts: [] additionalVolumes: [] @@ -29,6 +29,9 @@ prometheus: customServiceAccount: null serviceAccount: null +routableService: + enabled: false + applicationConfig: grpcPort: 50051 httpPort: 8080 diff --git a/developer/dependencies/postgres-init.sh b/developer/dependencies/postgres-init.sh old mode 100644 new mode 100755 diff --git a/docker-compose.yaml b/docker-compose.yaml index fd242bde29c..105d382f253 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -65,7 +65,7 @@ services: working_dir: /app env_file: - ./developer/env/server.env - command: ./server --config /config/insecure-armada.yaml + command: sh -c "sleep 30 && ./server --config /config/insecure-armada.yaml" server-pulsar: container_name: server @@ -141,8 +141,6 @@ services: ports: - 9001:9001 - 4001:4000 - depends_on: - - server volumes: - ./.kube/internal:/.kube - "go-cache:/root/.cache/go-build:rw" diff --git a/docs/developer/manual-localdev.md b/docs/developer/manual-localdev.md index 236995857c7..65c19e2faad 100644 --- a/docs/developer/manual-localdev.md +++ b/docs/developer/manual-localdev.md @@ -28,7 +28,7 @@ mage BootstrapTools # Compile .pb.go files from .proto files # (only necessary after changing a .proto file). mage proto -make dotnet +mage dotnet # Build the Docker images containing all Armada components. # Only the main "bundle" is needed for quickly testing Armada. diff --git a/docs/python_airflow_operator.md b/docs/python_airflow_operator.md index 1d820856344..048667a2562 100644 --- a/docs/python_airflow_operator.md +++ b/docs/python_airflow_operator.md @@ -239,9 +239,27 @@ Reports the result of the job and returns. +#### serialize() +Get a serialized version of this object. + + +* **Returns** + + A dict of keyword arguments used when instantiating + + + +* **Return type** + + dict + + +this object. + + #### template_fields(_: Sequence[str_ _ = ('job_request_items',_ ) -### _class_ armada.operators.armada_deferrable.ArmadaJobCompleteTrigger(job_id, job_service_channel_args, armada_queue, job_set_id, airflow_task_name) +### _class_ armada.operators.armada_deferrable.ArmadaJobCompleteTrigger(job_id, job_service_channel_args, armada_queue, job_set_id, airflow_task_name, poll_interval=30) Bases: `BaseTrigger` An airflow trigger that monitors the job state of an armada job. @@ -269,6 +287,9 @@ Triggers when the job is complete. belongs. + * **poll_interval** (*int*) – How often to poll jobservice to get status. + + * **Returns** @@ -281,7 +302,7 @@ Runs the trigger. Meant to be called by an airflow triggerer process. #### serialize() -Returns the information needed to reconstruct this Trigger. +Return the information needed to reconstruct this Trigger. * **Returns** @@ -664,7 +685,7 @@ A terminated event is SUCCEEDED, FAILED or CANCELLED -### _async_ armada.operators.utils.search_for_job_complete_async(armada_queue, job_set_id, airflow_task_name, job_id, job_service_client, log, time_out_for_failure=7200) +### _async_ armada.operators.utils.search_for_job_complete_async(armada_queue, job_set_id, airflow_task_name, job_id, job_service_client, log, poll_interval, time_out_for_failure=7200) Poll JobService cache asyncronously until you get a terminated event. A terminated event is SUCCEEDED, FAILED or CANCELLED @@ -689,6 +710,9 @@ A terminated event is SUCCEEDED, FAILED or CANCELLED It is optional only for testing + * **poll_interval** (*int*) – How often to poll jobservice to get status. + + * **time_out_for_failure** (*int*) – The amount of time a job can be in job_id_not_found before we decide it was a invalid job diff --git a/go.mod b/go.mod index 2a1bc9d559d..794a9f8ff4c 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 // athenz@v1.10.5 and onwards bundle encrypted signing keys with the source code. // Because corporate proxies may block go get commands that pull in encrypted data, -// we replace athenz@v1.10.5 or later with athenz@v1.10.4. +// we replace athenz@v1.10.5 or later with athenz@v1.10.4 replace github.com/AthenZ/athenz v1.10.39 => github.com/AthenZ/athenz v1.10.4 require ( @@ -53,12 +53,12 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/stretchr/testify v1.8.3 + github.com/stretchr/testify v1.8.4 github.com/weaveworks/promrus v1.2.0 golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 - golang.org/x/net v0.9.0 - golang.org/x/oauth2 v0.7.0 - golang.org/x/sync v0.1.0 + golang.org/x/net v0.15.0 + golang.org/x/oauth2 v0.12.0 + golang.org/x/sync v0.3.0 golang.org/x/tools v0.6.0 // indirect google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect google.golang.org/grpc v1.57.0 @@ -70,19 +70,19 @@ require ( k8s.io/component-helpers v0.22.4 k8s.io/kubelet v0.22.4 k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 - modernc.org/sqlite v1.20.0 + modernc.org/sqlite v1.26.0 sigs.k8s.io/yaml v1.3.0 ) require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/benbjohnson/immutable v0.4.3 - github.com/caarlos0/log v0.2.1 + github.com/caarlos0/log v0.4.2 github.com/go-openapi/errors v0.20.3 github.com/go-openapi/strfmt v0.21.7 - github.com/go-openapi/swag v0.22.3 + github.com/go-openapi/swag v0.22.4 github.com/go-openapi/validate v0.22.1 - github.com/go-playground/validator/v10 v10.14.1 + github.com/go-playground/validator/v10 v10.15.4 github.com/gogo/status v1.1.1 github.com/golang/mock v1.6.0 github.com/goreleaser/goreleaser v1.15.2 @@ -94,6 +94,7 @@ require ( github.com/prometheus/common v0.37.0 github.com/sanity-io/litter v1.5.5 github.com/segmentio/fasthash v1.0.3 + golang.org/x/time v0.3.0 google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 ) @@ -106,16 +107,18 @@ require ( github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect github.com/ardielle/ardielle-go v1.5.2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aymanbagabas/go-osc52 v1.2.1 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.4.0 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/charmbracelet/lipgloss v0.6.1-0.20220911181249-6304a734e792 // indirect + github.com/charmbracelet/lipgloss v0.7.1 // indirect github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/elliotchance/orderedmap/v2 v2.2.0 // indirect github.com/evanphx/json-patch v4.11.0+incompatible // indirect github.com/fatih/color v1.13.0 // indirect github.com/fortytw2/leaktest v1.3.0 // indirect @@ -164,21 +167,21 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/muesli/reflow v0.3.0 // indirect - github.com/muesli/termenv v0.14.0 // indirect + github.com/muesli/termenv v0.15.2 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pierrec/lz4 v2.0.5+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.2 // indirect github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -192,15 +195,14 @@ require ( github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/crypto v0.7.0 // indirect + golang.org/x/crypto v0.13.0 // indirect golang.org/x/mod v0.8.0 // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/term v0.7.0 // indirect - golang.org/x/text v0.9.0 // indirect - golang.org/x/time v0.3.0 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/term v0.12.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect @@ -210,9 +212,9 @@ require ( lukechampine.com/uint128 v1.2.0 // indirect modernc.org/cc/v3 v3.40.0 // indirect modernc.org/ccgo/v3 v3.16.13 // indirect - modernc.org/libc v1.21.5 // indirect + modernc.org/libc v1.24.1 // indirect modernc.org/mathutil v1.5.0 // indirect - modernc.org/memory v1.4.0 // indirect + modernc.org/memory v1.6.0 // indirect modernc.org/opt v0.1.3 // indirect modernc.org/strutil v1.1.3 // indirect modernc.org/token v1.0.1 // indirect diff --git a/go.sum b/go.sum index 54b4013ff0d..a91f26a3628 100644 --- a/go.sum +++ b/go.sum @@ -24,7 +24,7 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= @@ -97,9 +97,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/aws/aws-sdk-go v1.30.8/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aymanbagabas/go-osc52 v1.0.3/go.mod h1:zT8H+Rk4VSabYN90pWyugflM3ZhpTZNC7cASDfUCdT4= -github.com/aymanbagabas/go-osc52 v1.2.1 h1:q2sWUyDcozPLcLabEMd+a+7Ea2DitxZVN9hTxab9L4E= -github.com/aymanbagabas/go-osc52 v1.2.1/go.mod h1:zT8H+Rk4VSabYN90pWyugflM3ZhpTZNC7cASDfUCdT4= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/immutable v0.4.3 h1:GYHcksoJ9K6HyAUpGxwZURrbTkXA0Dh4otXGqbhdrjA= github.com/benbjohnson/immutable v0.4.3/go.mod h1:qJIKKSmdqz1tVzNtst1DZzvaqOU1onk1rc03IeM3Owk= @@ -114,8 +113,8 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/boynton/repl v0.0.0-20170116235056-348863958e3e/go.mod h1:Crc/GCZ3NXDVCio7Yr0o+SSrytpcFhLmVCIzi0s49t4= -github.com/caarlos0/log v0.2.1 h1:E5vf0Sg24tUbrGanknDu2UH0CZq6cCColThb8gTQnHQ= -github.com/caarlos0/log v0.2.1/go.mod h1:BLxpdZKXvWBjB6fshua4c8d7ApdYjypEDok6ibt+pXk= +github.com/caarlos0/log v0.4.2 h1:Zi5DNvCJLU0zJAI7B3sYf2zRfHW3xS8ahKQg1eh5/LQ= +github.com/caarlos0/log v0.4.2/go.mod h1:xwKkgWnQMD39Cb/HgTWrhsG3l3MTTGwf2UZqbki2eqM= github.com/caarlos0/testfs v0.4.4 h1:3PHvzHi5Lt+g332CiShwS8ogTgS3HjrmzZxCm6JCDr8= github.com/caarlos0/testfs v0.4.4/go.mod h1:bRN55zgG4XCUVVHZCeU+/Tz1Q6AxEJOEJTliBy+1DMk= github.com/cenkalti/backoff/v4 v4.0.0/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= @@ -125,8 +124,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/lipgloss v0.6.1-0.20220911181249-6304a734e792 h1:VfX981snWr7d4yvFAJYCN3S2sOsweiM6BsqZgFPY65c= -github.com/charmbracelet/lipgloss v0.6.1-0.20220911181249-6304a734e792/go.mod h1:sOPE4igPEyZ5Q75T0PYIMqA40cL+r0NrLlMJxr01aiE= +github.com/charmbracelet/lipgloss v0.7.1 h1:17WMwi7N1b1rVWOjMT+rCh7sQkvDU75B2hbZpc5Kc1E= +github.com/charmbracelet/lipgloss v0.7.1/go.mod h1:yG0k3giv8Qj8edTCbbg6AlQ5e8KNWpFujkNawKNhE2c= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -165,10 +164,13 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/doug-martin/goqu/v9 v9.18.0 h1:/6bcuEtAe6nsSMVK/M+fOiXUNfyFF3yYtE07DBPFMYY= github.com/doug-martin/goqu/v9 v9.18.0/go.mod h1:nf0Wc2/hV3gYK9LiyqIrzBEVGlI8qW3GuDCEobC4wBQ= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elliotchance/orderedmap/v2 v2.2.0 h1:7/2iwO98kYT4XkOjA9mBEIwvi4KpGB4cyHeOFOnj4Vk= +github.com/elliotchance/orderedmap/v2 v2.2.0/go.mod h1:85lZyVbpGaGvHvnKa7Qhx7zncAdBIBq6u56Hb1PRU5Q= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -249,8 +251,9 @@ github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KA github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= @@ -258,8 +261,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k= -github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-playground/validator/v10 v10.15.4 h1:zMXza4EpOdooxPel5xDqXEdXG5r+WggpvnAKMsalBjs= +github.com/go-playground/validator/v10 v10.15.4/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -624,16 +627,13 @@ github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-zglob v0.0.4 h1:LQi2iOm0/fGgu80AioIJ/1j9w9Oh+9DZ39J4VAGzHQM= github.com/mattn/go-zglob v0.0.4/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -668,12 +668,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= -github.com/muesli/reflow v0.2.1-0.20210115123740-9e1d0d53df68/go.mod h1:Xk+z4oIWdQqJzsxyjgl3P22oYZnHdZ8FFTHAQQt5BMQ= github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= -github.com/muesli/termenv v0.12.1-0.20220901123159-d729275e0977/go.mod h1:bN6sPNtkiahdhHv2Xm6RGU16LSCxfbIZvMfqjOCfrR4= -github.com/muesli/termenv v0.14.0 h1:8x9NFfOe8lmIWK4pgy3IfVEy47f+ppe3tUqdPZG2Uy0= -github.com/muesli/termenv v0.14.0/go.mod h1:kG/pF1E7fh949Xhe156crRUrHNyK221IuGO7Ez60Uc8= +github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= +github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -755,8 +753,9 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/renstrom/shortuuid v3.0.0+incompatible h1:F6T1U7bWlI3FTV+JE8HyeR7bkTeYZJntqQLA9ST4HOQ= github.com/renstrom/shortuuid v3.0.0+incompatible/go.mod h1:n18Ycpn8DijG+h/lLBQVnGKv1BCtTeXo8KKSbBOrQ8c= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -845,8 +844,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= @@ -940,8 +939,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1029,8 +1028,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1042,8 +1041,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1056,8 +1055,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180821044426-4ea2f632f6e9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1130,19 +1129,17 @@ golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20180810153555-6e3c4e7365dd/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1153,8 +1150,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1342,8 +1339,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1422,22 +1419,22 @@ modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= -modernc.org/libc v1.21.5 h1:xBkU9fnHV+hvZuPSRszN0AXDG4M7nwPLwTWwkYcvLCI= -modernc.org/libc v1.21.5/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= +modernc.org/libc v1.24.1 h1:uvJSeCKL/AgzBo2yYIPPTy82v21KgGnizcGYfBHaNuM= +modernc.org/libc v1.24.1/go.mod h1:FmfO1RLrU3MHJfyi9eYYmZBfi/R+tqZ6+hQ3yQQUkak= modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.4.0 h1:crykUfNSnMAXaOJnnxcSzbUGMqkLWjklJKkBK2nwZwk= -modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.6.0 h1:i6mzavxrE9a30whzMfwf7XWVODx2r5OYXvU46cirX7o= +modernc.org/memory v1.6.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.20.0 h1:80zmD3BGkm8BZ5fUi/4lwJQHiO3GXgIUvZRXpoIfROY= -modernc.org/sqlite v1.20.0/go.mod h1:EsYz8rfOvLCiYTy5ZFsOYzoCcRMu98YYkwAcCw5YIYw= +modernc.org/sqlite v1.26.0 h1:SocQdLRSYlA8W99V8YH0NES75thx19d9sB/aFc4R8Lw= +modernc.org/sqlite v1.26.0/go.mod h1:FL3pVXie73rg3Rii6V/u5BoHlSoyeZeIgKZEgHARyCU= modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/tcl v1.15.0 h1:oY+JeD11qVVSgVvodMJsu7Edf8tr5E/7tuhF5cNYz34= +modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg= modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE= +modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/internal/armada/configuration/constants.go b/internal/armada/configuration/constants.go index 51669f2aec6..5275264e2c8 100644 --- a/internal/armada/configuration/constants.go +++ b/internal/armada/configuration/constants.go @@ -5,8 +5,11 @@ const ( // All jobs in a gang are guaranteed to be scheduled onto the same cluster at the same time. GangIdAnnotation = "armadaproject.io/gangId" // GangCardinalityAnnotation All jobs in a gang must specify the total number of jobs in the gang via this annotation. - // The cardinality should be expressed as an integer, e.g., "3". + // The cardinality should be expressed as a positive integer, e.g., "3". GangCardinalityAnnotation = "armadaproject.io/gangCardinality" + // GangMinimumCardinalityAnnotation All jobs in a gang must specify the minimum size for the gang to be schedulable via this annotation. + // The cardinality should be expressed as a positive integer, e.g., "3". + GangMinimumCardinalityAnnotation = "armadaproject.io/gangMinimumCardinality" // The jobs that make up a gang may be constrained to be scheduled across a set of uniform nodes. // Specifically, if provided, all gang jobs are scheduled onto nodes for which the value of the provided label is equal. // Used to ensure, e.g., that all gang jobs are scheduled onto the same cluster or rack. diff --git a/internal/armada/configuration/types.go b/internal/armada/configuration/types.go index 77fc34a548c..f6b6ec985ba 100644 --- a/internal/armada/configuration/types.go +++ b/internal/armada/configuration/types.go @@ -26,6 +26,7 @@ type ArmadaConfig struct { PprofPort *uint16 CorsAllowedOrigins []string + GrpcGatewayPath string Grpc grpcconfig.GrpcConfig @@ -124,10 +125,33 @@ type SchedulingConfig struct { MaximumResourceFractionToSchedule map[string]float64 // Overrides MaximalClusterFractionToSchedule if set for the current pool. MaximumResourceFractionToScheduleByPool map[string]map[string]float64 - // Max number of jobs to schedule in each invocation of the scheduler. - MaximumJobsToSchedule uint - // Max number of gangs to schedule in each invocation of the scheduler. - MaximumGangsToSchedule uint + // The rate at which Armada schedules jobs is rate-limited using a token bucket approach. + // Specifically, there is a token bucket that persists between scheduling rounds. + // The bucket fills up at a rate of MaximumSchedulingRate tokens per second and has capacity MaximumSchedulingBurst. + // A token is removed from the bucket when a scheduling a job and scheduling stops while the bucket is empty. + // + // Hence, MaximumSchedulingRate controls the maximum number of jobs scheduled per second in steady-state, + // i.e., once the burst capacity has been exhausted. + // + // Rate-limiting is based on the number of tokens available at the start of each scheduling round, + // i.e., tokens accumulated while scheduling become available at the start of the next scheduling round. + // + // For more information about the rate-limiter, see: + // https://pkg.go.dev/golang.org/x/time/rate#Limiter + MaximumSchedulingRate float64 `validate:"gt=0"` + // MaximumSchedulingBurst controls the burst capacity of the rate-limiter. + // + // There are two important implications: + // - Armada will never schedule more than MaximumSchedulingBurst jobs per scheduling round. + // - Gang jobs with cardinality greater than MaximumSchedulingBurst can never be scheduled. + MaximumSchedulingBurst int `validate:"gt=0"` + // In addition to the global rate-limiter, there is a separate rate-limiter for each queue. + // These work the same as the global rate-limiter, except they apply only to jobs scheduled from a specific queue. + // + // Per-queue version of MaximumSchedulingRate. + MaximumPerQueueSchedulingRate float64 `validate:"gt=0"` + // Per-queue version of MaximumSchedulingBurst. + MaximumPerQueueSchedulingBurst int `validate:"gt=0"` // Armada stores contexts associated with recent job scheduling attempts. // This setting limits the number of such contexts to store. // Contexts associated with the most recent scheduling attempt for each queue and cluster are always stored. diff --git a/internal/armada/repository/event.go b/internal/armada/repository/event.go index 2e05ba377c6..9df6d7a1a05 100644 --- a/internal/armada/repository/event.go +++ b/internal/armada/repository/event.go @@ -14,6 +14,7 @@ import ( "github.com/armadaproject/armada/internal/armada/repository/apimessages" "github.com/armadaproject/armada/internal/armada/repository/sequence" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/armadaevents" @@ -48,7 +49,7 @@ func NewEventRepository(db redis.UniversalClient) *RedisEventRepository { NumTestsPerEvictionRun: 10, } - decompressorPool := pool.NewObjectPool(context.Background(), pool.NewPooledObjectFactorySimple( + decompressorPool := pool.NewObjectPool(armadacontext.Background(), pool.NewPooledObjectFactorySimple( func(context.Context) (interface{}, error) { return compress.NewZlibDecompressor(), nil }), &poolConfig) @@ -134,16 +135,16 @@ func (repo *RedisEventRepository) GetLastMessageId(queue, jobSetId string) (stri func (repo *RedisEventRepository) extractEvents(msg redis.XMessage, queue, jobSetId string) ([]*api.EventMessage, error) { data := msg.Values[dataKey] bytes := []byte(data.(string)) - decompressor, err := repo.decompressorPool.BorrowObject(context.Background()) + decompressor, err := repo.decompressorPool.BorrowObject(armadacontext.Background()) if err != nil { return nil, errors.WithStack(err) } - defer func(decompressorPool *pool.ObjectPool, ctx context.Context, object interface{}) { + defer func(decompressorPool *pool.ObjectPool, ctx *armadacontext.Context, object interface{}) { err := decompressorPool.ReturnObject(ctx, object) if err != nil { log.WithError(err).Errorf("Error returning decompressor to pool") } - }(repo.decompressorPool, context.Background(), decompressor) + }(repo.decompressorPool, armadacontext.Background(), decompressor) decompressedData, err := decompressor.(compress.Decompressor).Decompress(bytes) if err != nil { return nil, errors.WithStack(err) diff --git a/internal/armada/repository/event_store.go b/internal/armada/repository/event_store.go index 7241cba02ef..248a405b6a4 100644 --- a/internal/armada/repository/event_store.go +++ b/internal/armada/repository/event_store.go @@ -1,10 +1,9 @@ package repository import ( - "context" - "github.com/apache/pulsar-client-go/pulsar" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/internal/common/pulsarutils" "github.com/armadaproject/armada/internal/common/schedulers" @@ -12,14 +11,14 @@ import ( ) type EventStore interface { - ReportEvents(context.Context, []*api.EventMessage) error + ReportEvents(*armadacontext.Context, []*api.EventMessage) error } type TestEventStore struct { ReceivedEvents []*api.EventMessage } -func (es *TestEventStore) ReportEvents(_ context.Context, message []*api.EventMessage) error { +func (es *TestEventStore) ReportEvents(_ *armadacontext.Context, message []*api.EventMessage) error { es.ReceivedEvents = append(es.ReceivedEvents, message...) return nil } @@ -35,7 +34,7 @@ func NewEventStore(producer pulsar.Producer, maxAllowedMessageSize uint) *Stream } } -func (n *StreamEventStore) ReportEvents(ctx context.Context, apiEvents []*api.EventMessage) error { +func (n *StreamEventStore) ReportEvents(ctx *armadacontext.Context, apiEvents []*api.EventMessage) error { if len(apiEvents) == 0 { return nil } diff --git a/internal/armada/scheduling/lease_manager.go b/internal/armada/scheduling/lease_manager.go index 9b34786af9c..6e1c6385f9f 100644 --- a/internal/armada/scheduling/lease_manager.go +++ b/internal/armada/scheduling/lease_manager.go @@ -1,12 +1,12 @@ package scheduling import ( - "context" "time" log "github.com/sirupsen/logrus" "github.com/armadaproject/armada/internal/armada/repository" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/pkg/api" ) @@ -55,7 +55,7 @@ func (l *LeaseManager) ExpireLeases() { if e != nil { log.Error(e) } else { - e := l.eventStore.ReportEvents(context.Background(), []*api.EventMessage{event}) + e := l.eventStore.ReportEvents(armadacontext.Background(), []*api.EventMessage{event}) if e != nil { log.Error(e) } diff --git a/internal/armada/server.go b/internal/armada/server.go index e60567583bc..7f77b26b0d9 100644 --- a/internal/armada/server.go +++ b/internal/armada/server.go @@ -1,7 +1,6 @@ package armada import ( - "context" "fmt" "net" "time" @@ -13,7 +12,6 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/pkg/errors" log "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" "google.golang.org/grpc" "github.com/armadaproject/armada/internal/armada/cache" @@ -22,6 +20,7 @@ import ( "github.com/armadaproject/armada/internal/armada/repository" "github.com/armadaproject/armada/internal/armada/scheduling" "github.com/armadaproject/armada/internal/armada/server" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/auth" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/database" @@ -39,7 +38,7 @@ import ( "github.com/armadaproject/armada/pkg/client" ) -func Serve(ctx context.Context, config *configuration.ArmadaConfig, healthChecks *health.MultiChecker) error { +func Serve(ctx *armadacontext.Context, config *configuration.ArmadaConfig, healthChecks *health.MultiChecker) error { log.Info("Armada server starting") log.Infof("Armada priority classes: %v", config.Scheduling.Preemption.PriorityClasses) log.Infof("Default priority class: %s", config.Scheduling.Preemption.DefaultPriorityClass) @@ -51,9 +50,9 @@ func Serve(ctx context.Context, config *configuration.ArmadaConfig, healthChecks // Run all services within an errgroup to propagate errors between services. // Defer cancelling the parent context to ensure the errgroup is cancelled on return. - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := armadacontext.WithCancel(ctx) defer cancel() - g, ctx := errgroup.WithContext(ctx) + g, ctx := armadacontext.ErrGroup(ctx) // List of services to run concurrently. // Because we want to start services only once all input validation has been completed, diff --git a/internal/armada/server/authorization.go b/internal/armada/server/authorization.go index 1d11253d3c7..434771afcbf 100644 --- a/internal/armada/server/authorization.go +++ b/internal/armada/server/authorization.go @@ -1,10 +1,10 @@ package server import ( - "context" "fmt" "strings" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/auth/permission" "github.com/armadaproject/armada/pkg/client/queue" @@ -60,7 +60,7 @@ func MergePermissionErrors(errs ...*ErrUnauthorized) *ErrUnauthorized { // permissions required to perform some action. The error returned is of type ErrUnauthorized. // After recovering the error (using errors.As), the caller can obtain the name of the user and the // requested permission programatically via this error type. -func checkPermission(p authorization.PermissionChecker, ctx context.Context, permission permission.Permission) error { +func checkPermission(p authorization.PermissionChecker, ctx *armadacontext.Context, permission permission.Permission) error { if !p.UserHasPermission(ctx, permission) { return &ErrUnauthorized{ Principal: authorization.GetPrincipal(ctx), @@ -74,7 +74,7 @@ func checkPermission(p authorization.PermissionChecker, ctx context.Context, per func checkQueuePermission( p authorization.PermissionChecker, - ctx context.Context, + ctx *armadacontext.Context, q queue.Queue, globalPermission permission.Permission, verb queue.PermissionVerb, diff --git a/internal/armada/server/event.go b/internal/armada/server/event.go index 484f1a3a9f9..14ea0d58e18 100644 --- a/internal/armada/server/event.go +++ b/internal/armada/server/event.go @@ -13,6 +13,7 @@ import ( "github.com/armadaproject/armada/internal/armada/permissions" "github.com/armadaproject/armada/internal/armada/repository" "github.com/armadaproject/armada/internal/armada/repository/sequence" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/client/queue" @@ -42,7 +43,8 @@ func NewEventServer( } } -func (s *EventServer) Report(ctx context.Context, message *api.EventMessage) (*types.Empty, error) { +func (s *EventServer) Report(grpcCtx context.Context, message *api.EventMessage) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) if err := checkPermission(s.permissions, ctx, permissions.ExecuteJobs); err != nil { return nil, status.Errorf(codes.PermissionDenied, "[Report] error: %s", err) } @@ -50,7 +52,8 @@ func (s *EventServer) Report(ctx context.Context, message *api.EventMessage) (*t return &types.Empty{}, s.eventStore.ReportEvents(ctx, []*api.EventMessage{message}) } -func (s *EventServer) ReportMultiple(ctx context.Context, message *api.EventList) (*types.Empty, error) { +func (s *EventServer) ReportMultiple(grpcCtx context.Context, message *api.EventList) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) if err := checkPermission(s.permissions, ctx, permissions.ExecuteJobs); err != nil { return nil, status.Errorf(codes.PermissionDenied, "[ReportMultiple] error: %s", err) } @@ -116,6 +119,7 @@ func (s *EventServer) enrichPreemptedEvent(event *api.EventMessage_Preempted, jo // GetJobSetEvents streams back all events associated with a particular job set. func (s *EventServer) GetJobSetEvents(request *api.JobSetRequest, stream api.Event_GetJobSetEventsServer) error { + ctx := armadacontext.FromGrpcCtx(stream.Context()) q, err := s.queueRepository.GetQueue(request.Queue) var expected *repository.ErrQueueNotFound if errors.As(err, &expected) { @@ -124,7 +128,7 @@ func (s *EventServer) GetJobSetEvents(request *api.JobSetRequest, stream api.Eve return err } - err = validateUserHasWatchPermissions(stream.Context(), s.permissions, q, request.Id) + err = validateUserHasWatchPermissions(ctx, s.permissions, q, request.Id) if err != nil { return status.Errorf(codes.PermissionDenied, "[GetJobSetEvents] %s", err) } @@ -142,7 +146,7 @@ func (s *EventServer) GetJobSetEvents(request *api.JobSetRequest, stream api.Eve return s.serveEventsFromRepository(request, s.eventRepository, stream) } -func (s *EventServer) Health(ctx context.Context, cont_ *types.Empty) (*api.HealthCheckResponse, error) { +func (s *EventServer) Health(_ context.Context, _ *types.Empty) (*api.HealthCheckResponse, error) { return &api.HealthCheckResponse{Status: api.HealthCheckResponse_SERVING}, nil } @@ -222,7 +226,7 @@ func (s *EventServer) serveEventsFromRepository(request *api.JobSetRequest, even } } -func validateUserHasWatchPermissions(ctx context.Context, permsChecker authorization.PermissionChecker, q queue.Queue, jobSetId string) error { +func validateUserHasWatchPermissions(ctx *armadacontext.Context, permsChecker authorization.PermissionChecker, q queue.Queue, jobSetId string) error { err := checkPermission(permsChecker, ctx, permissions.WatchAllEvents) var globalPermErr *ErrUnauthorized if errors.As(err, &globalPermErr) { diff --git a/internal/armada/server/event_test.go b/internal/armada/server/event_test.go index a31f24965dc..18d77478f1c 100644 --- a/internal/armada/server/event_test.go +++ b/internal/armada/server/event_test.go @@ -18,6 +18,7 @@ import ( "github.com/armadaproject/armada/internal/armada/permissions" "github.com/armadaproject/armada/internal/armada/repository" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/auth/permission" "github.com/armadaproject/armada/internal/common/compress" @@ -30,7 +31,7 @@ func TestEventServer_Health(t *testing.T) { withEventServer( t, func(s *EventServer) { - health, err := s.Health(context.Background(), &types.Empty{}) + health, err := s.Health(armadacontext.Background(), &types.Empty{}) assert.Equal(t, health.Status, api.HealthCheckResponse_SERVING) require.NoError(t, err) }, @@ -274,7 +275,7 @@ func TestEventServer_GetJobSetEvents_Permissions(t *testing.T) { assert.NoError(t, err) principal := authorization.NewStaticPrincipal("alice", []string{}) - ctx := authorization.WithPrincipal(context.Background(), principal) + ctx := authorization.WithPrincipal(armadacontext.Background(), principal) stream := &eventStreamMock{ctx: ctx} err = s.GetJobSetEvents(&api.JobSetRequest{ @@ -298,7 +299,7 @@ func TestEventServer_GetJobSetEvents_Permissions(t *testing.T) { assert.NoError(t, err) principal := authorization.NewStaticPrincipal("alice", []string{"watch-all-events-group"}) - ctx := authorization.WithPrincipal(context.Background(), principal) + ctx := authorization.WithPrincipal(armadacontext.Background(), principal) stream := &eventStreamMock{ctx: ctx} err = s.GetJobSetEvents(&api.JobSetRequest{ @@ -322,7 +323,7 @@ func TestEventServer_GetJobSetEvents_Permissions(t *testing.T) { assert.NoError(t, err) principal := authorization.NewStaticPrincipal("alice", []string{"watch-queue-group"}) - ctx := authorization.WithPrincipal(context.Background(), principal) + ctx := authorization.WithPrincipal(armadacontext.Background(), principal) stream := &eventStreamMock{ctx: ctx} err = s.GetJobSetEvents(&api.JobSetRequest{ @@ -344,7 +345,7 @@ func TestEventServer_GetJobSetEvents_Permissions(t *testing.T) { assert.NoError(t, err) principal := authorization.NewStaticPrincipal("alice", []string{"watch-events-group", "watch-queue-group"}) - ctx := authorization.WithPrincipal(context.Background(), principal) + ctx := authorization.WithPrincipal(armadacontext.Background(), principal) stream := &eventStreamMock{ctx: ctx} err = s.GetJobSetEvents(&api.JobSetRequest{ @@ -426,7 +427,7 @@ func (s *eventStreamMock) Send(m *api.EventStreamMessage) error { func (s *eventStreamMock) Context() context.Context { if s.ctx == nil { - return context.Background() + return armadacontext.Background() } return s.ctx } diff --git a/internal/armada/server/eventsprinter.go b/internal/armada/server/eventsprinter.go index 90bbca97f83..d2ba150d6e4 100644 --- a/internal/armada/server/eventsprinter.go +++ b/internal/armada/server/eventsprinter.go @@ -9,6 +9,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/internal/common/logging" "github.com/armadaproject/armada/internal/common/pulsarutils/pulsarrequestid" @@ -29,7 +30,7 @@ type EventsPrinter struct { } // Run the service that reads from Pulsar and updates Armada until the provided context is cancelled. -func (srv *EventsPrinter) Run(ctx context.Context) error { +func (srv *EventsPrinter) Run(ctx *armadacontext.Context) error { // Get the configured logger, or the standard logger if none is provided. var log *logrus.Entry if srv.Logger != nil { @@ -74,7 +75,7 @@ func (srv *EventsPrinter) Run(ctx context.Context) error { default: // Get a message from Pulsar, which consists of a sequence of events (i.e., state transitions). - ctxWithTimeout, cancel := context.WithTimeout(ctx, 10*time.Second) + ctxWithTimeout, cancel := armadacontext.WithTimeout(ctx, 10*time.Second) msg, err := consumer.Receive(ctxWithTimeout) cancel() if errors.Is(err, context.DeadlineExceeded) { // expected @@ -85,7 +86,7 @@ func (srv *EventsPrinter) Run(ctx context.Context) error { break } util.RetryUntilSuccess( - context.Background(), + armadacontext.Background(), func() error { return consumer.Ack(msg) }, func(err error) { logging.WithStacktrace(log, err).Warnf("acking pulsar message failed") diff --git a/internal/armada/server/lease.go b/internal/armada/server/lease.go index 68d703d82fa..42da0d16ebd 100644 --- a/internal/armada/server/lease.go +++ b/internal/armada/server/lease.go @@ -10,14 +10,13 @@ import ( "github.com/apache/pulsar-client-go/pulsar" "github.com/gogo/protobuf/types" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "github.com/hashicorp/go-multierror" pool "github.com/jolestar/go-commons-pool" "github.com/pkg/errors" - "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" + "golang.org/x/time/rate" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/utils/clock" @@ -26,6 +25,7 @@ import ( "github.com/armadaproject/armada/internal/armada/permissions" "github.com/armadaproject/armada/internal/armada/repository" "github.com/armadaproject/armada/internal/armada/scheduling" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/compress" @@ -59,6 +59,10 @@ type AggregatedQueueServer struct { schedulingInfoRepository repository.SchedulingInfoRepository decompressorPool *pool.ObjectPool clock clock.Clock + // Global job scheduling rate-limiter. + limiter *rate.Limiter + // Per-queue job scheduling rate-limiters. + limiterByQueue map[string]*rate.Limiter // For storing reports of scheduling attempts. SchedulingContextRepository *scheduler.SchedulingContextRepository // Stores the most recent NodeDb for each executor. @@ -98,12 +102,17 @@ func NewAggregatedQueueServer( return compress.NewZlibDecompressor(), nil }), &poolConfig) return &AggregatedQueueServer{ - permissions: permissions, - schedulingConfig: schedulingConfig, - jobRepository: jobRepository, - queueRepository: queueRepository, - usageRepository: usageRepository, - eventStore: eventStore, + permissions: permissions, + schedulingConfig: schedulingConfig, + jobRepository: jobRepository, + queueRepository: queueRepository, + usageRepository: usageRepository, + eventStore: eventStore, + limiter: rate.NewLimiter( + rate.Limit(schedulingConfig.MaximumSchedulingRate), + schedulingConfig.MaximumSchedulingBurst, + ), + limiterByQueue: make(map[string]*rate.Limiter), schedulingInfoRepository: schedulingInfoRepository, decompressorPool: decompressorPool, executorRepository: executorRepository, @@ -119,7 +128,7 @@ func NewAggregatedQueueServer( // // This function should be used instead of the LeaseJobs function in most cases. func (q *AggregatedQueueServer) StreamingLeaseJobs(stream api.AggregatedQueue_StreamingLeaseJobsServer) error { - if err := checkPermission(q.permissions, stream.Context(), permissions.ExecuteJobs); err != nil { + if err := checkPermission(q.permissions, armadacontext.FromGrpcCtx(stream.Context()), permissions.ExecuteJobs); err != nil { return err } @@ -142,7 +151,7 @@ func (q *AggregatedQueueServer) StreamingLeaseJobs(stream api.AggregatedQueue_St } // Get jobs to be leased. - jobs, err := q.getJobs(stream.Context(), req) + jobs, err := q.getJobs(armadacontext.FromGrpcCtx(stream.Context()), req) if err != nil { return err } @@ -253,14 +262,12 @@ func (repo *SchedulerJobRepositoryAdapter) GetExistingJobsByIds(ids []string) ([ return rv, nil } -func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingLeaseRequest) ([]*api.Job, error) { - log := ctxlogrus.Extract(ctx) - log = log.WithFields(logrus.Fields{ - "function": "getJobs", - "cluster": req.ClusterId, - "pool": req.Pool, - }) - ctx = ctxlogrus.ToContext(ctx, log) +func (q *AggregatedQueueServer) getJobs(ctx *armadacontext.Context, req *api.StreamingLeaseRequest) ([]*api.Job, error) { + ctx = armadacontext. + WithLogFields(ctx, map[string]interface{}{ + "cluster": req.ClusterId, + "pool": req.Pool, + }) // Get the total capacity available across all clusters. usageReports, err := q.usageRepository.GetClusterUsageReports() @@ -337,7 +344,7 @@ func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingL lastSeen, ) if err != nil { - logging.WithStacktrace(log, err).Warnf( + logging.WithStacktrace(ctx, err).Warnf( "skipping node %s from executor %s", nodeInfo.GetName(), req.GetClusterId(), ) continue @@ -380,7 +387,7 @@ func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingL // Group gangs. for _, job := range jobs { - gangId, _, isGangJob, err := scheduler.GangIdAndCardinalityFromLegacySchedulerJob(job) + gangId, _, _, isGangJob, err := scheduler.GangIdAndCardinalityFromLegacySchedulerJob(job) if err != nil { return nil, err } @@ -465,7 +472,7 @@ func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingL // Give Schedule() a 3 second shorter deadline than ctx to give it a chance to finish up before ctx deadline. if deadline, ok := ctx.Deadline(); ok { var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, deadline.Add(-3*time.Second)) + ctx, cancel = armadacontext.WithDeadline(ctx, deadline.Add(-3*time.Second)) defer cancel() } @@ -491,6 +498,7 @@ func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingL q.schedulingConfig.Preemption.PriorityClasses, q.schedulingConfig.Preemption.DefaultPriorityClass, fairnessCostProvider, + q.limiter, totalResources, ) for queue, priorityFactor := range priorityFactorByQueue { @@ -502,7 +510,16 @@ func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingL if priorityFactor > 0 { weight = 1 / priorityFactor } - if err := sctx.AddQueueSchedulingContext(queue, weight, allocatedByQueueAndPriorityClassForPool[queue]); err != nil { + queueLimiter, ok := q.limiterByQueue[queue] + if !ok { + // Create per-queue limiters lazily. + queueLimiter = rate.NewLimiter( + rate.Limit(q.schedulingConfig.MaximumPerQueueSchedulingRate), + q.schedulingConfig.MaximumPerQueueSchedulingBurst, + ) + q.limiterByQueue[queue] = queueLimiter + } + if err := sctx.AddQueueSchedulingContext(queue, weight, allocatedByQueueAndPriorityClassForPool[queue], queueLimiter); err != nil { return nil, err } } @@ -539,12 +556,7 @@ func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingL "starting scheduling with total resources %s", schedulerobjects.ResourceList{Resources: totalCapacity}.CompactString(), ) - result, err := sch.Schedule( - ctxlogrus.ToContext( - ctx, - logrus.NewEntry(logrus.New()), - ), - ) + result, err := sch.Schedule(ctx) if err != nil { return nil, err } @@ -554,7 +566,7 @@ func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingL if q.SchedulingContextRepository != nil { sctx.ClearJobSpecs() if err := q.SchedulingContextRepository.AddSchedulingContext(sctx); err != nil { - logging.WithStacktrace(log, err).Error("failed to store scheduling context") + logging.WithStacktrace(ctx, err).Error("failed to store scheduling context") } } @@ -629,7 +641,7 @@ func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingL jobIdsToDelete := util.Map(jobsToDelete, func(job *api.Job) string { return job.Id }) log.Infof("deleting preempted jobs: %v", jobIdsToDelete) if deletionResult, err := q.jobRepository.DeleteJobs(jobsToDelete); err != nil { - logging.WithStacktrace(log, err).Error("failed to delete preempted jobs from Redis") + logging.WithStacktrace(ctx, err).Error("failed to delete preempted jobs from Redis") } else { deleteErrorByJobId := armadamaps.MapKeys(deletionResult, func(job *api.Job) string { return job.Id }) for jobId := range preemptedApiJobsById { @@ -692,7 +704,7 @@ func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingL } } if err := q.usageRepository.UpdateClusterQueueResourceUsage(req.ClusterId, currentExecutorReport); err != nil { - logging.WithStacktrace(log, err).Errorf("failed to update cluster usage") + logging.WithStacktrace(ctx, err).Errorf("failed to update cluster usage") } allocatedByQueueAndPriorityClassForPool = q.aggregateAllocationAcrossExecutor(reportsByExecutor, req.Pool) @@ -716,7 +728,7 @@ func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingL } node, err := nodeDb.GetNode(nodeId) if err != nil { - logging.WithStacktrace(log, err).Warnf("failed to set node id selector on job %s: node with id %s not found", apiJob.Id, nodeId) + logging.WithStacktrace(ctx, err).Warnf("failed to set node id selector on job %s: node with id %s not found", apiJob.Id, nodeId) continue } v := node.Labels[q.schedulingConfig.Preemption.NodeIdLabel] @@ -752,7 +764,7 @@ func (q *AggregatedQueueServer) getJobs(ctx context.Context, req *api.StreamingL } node, err := nodeDb.GetNode(nodeId) if err != nil { - logging.WithStacktrace(log, err).Warnf("failed to set node name on job %s: node with id %s not found", apiJob.Id, nodeId) + logging.WithStacktrace(ctx, err).Warnf("failed to set node name on job %s: node with id %s not found", apiJob.Id, nodeId) continue } podSpec.NodeName = node.Name @@ -861,22 +873,23 @@ func (q *AggregatedQueueServer) decompressJobOwnershipGroups(jobs []*api.Job) er } func (q *AggregatedQueueServer) decompressOwnershipGroups(compressedOwnershipGroups []byte) ([]string, error) { - decompressor, err := q.decompressorPool.BorrowObject(context.Background()) + decompressor, err := q.decompressorPool.BorrowObject(armadacontext.Background()) if err != nil { return nil, fmt.Errorf("failed to borrow decompressior because %s", err) } - defer func(decompressorPool *pool.ObjectPool, ctx context.Context, object interface{}) { + defer func(decompressorPool *pool.ObjectPool, ctx *armadacontext.Context, object interface{}) { err := decompressorPool.ReturnObject(ctx, object) if err != nil { log.WithError(err).Errorf("Error returning decompressorPool to pool") } - }(q.decompressorPool, context.Background(), decompressor) + }(q.decompressorPool, armadacontext.Background(), decompressor) return compress.DecompressStringArray(compressedOwnershipGroups, decompressor.(compress.Decompressor)) } -func (q *AggregatedQueueServer) RenewLease(ctx context.Context, request *api.RenewLeaseRequest) (*api.IdList, error) { +func (q *AggregatedQueueServer) RenewLease(grpcCtx context.Context, request *api.RenewLeaseRequest) (*api.IdList, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) if err := checkPermission(q.permissions, ctx, permissions.ExecuteJobs); err != nil { return nil, status.Errorf(codes.PermissionDenied, err.Error()) } @@ -884,7 +897,8 @@ func (q *AggregatedQueueServer) RenewLease(ctx context.Context, request *api.Ren return &api.IdList{Ids: renewed}, e } -func (q *AggregatedQueueServer) ReturnLease(ctx context.Context, request *api.ReturnLeaseRequest) (*types.Empty, error) { +func (q *AggregatedQueueServer) ReturnLease(grpcCtx context.Context, request *api.ReturnLeaseRequest) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) if err := checkPermission(q.permissions, ctx, permissions.ExecuteJobs); err != nil { return nil, status.Errorf(codes.PermissionDenied, err.Error()) } @@ -983,7 +997,8 @@ func (q *AggregatedQueueServer) addAvoidNodeAffinity( return res[0].Error } -func (q *AggregatedQueueServer) ReportDone(ctx context.Context, idList *api.IdList) (*api.IdList, error) { +func (q *AggregatedQueueServer) ReportDone(grpcCtx context.Context, idList *api.IdList) (*api.IdList, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) if err := checkPermission(q.permissions, ctx, permissions.ExecuteJobs); err != nil { return nil, status.Errorf(codes.PermissionDenied, "[ReportDone] error: %s", err) } @@ -1008,7 +1023,7 @@ func (q *AggregatedQueueServer) ReportDone(ctx context.Context, idList *api.IdLi return &api.IdList{Ids: cleanedIds}, returnedError } -func (q *AggregatedQueueServer) reportLeaseReturned(ctx context.Context, leaseReturnRequest *api.ReturnLeaseRequest) error { +func (q *AggregatedQueueServer) reportLeaseReturned(ctx *armadacontext.Context, leaseReturnRequest *api.ReturnLeaseRequest) error { job, err := q.getJobById(leaseReturnRequest.JobId) if err != nil { return err diff --git a/internal/armada/server/lease_test.go b/internal/armada/server/lease_test.go index 7f3f8470491..554282c546a 100644 --- a/internal/armada/server/lease_test.go +++ b/internal/armada/server/lease_test.go @@ -1,7 +1,6 @@ package server import ( - "context" "fmt" "testing" "time" @@ -10,6 +9,7 @@ import ( "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/armada/repository" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/client/queue" @@ -25,7 +25,7 @@ func TestAggregatedQueueServer_ReturnLeaseCallsRepositoryMethod(t *testing.T) { _, addJobsErr := mockJobRepository.AddJobs([]*api.Job{job}) assert.Nil(t, addJobsErr) - _, err := aggregatedQueueClient.ReturnLease(context.TODO(), &api.ReturnLeaseRequest{ + _, err := aggregatedQueueClient.ReturnLease(armadacontext.TODO(), &api.ReturnLeaseRequest{ ClusterId: clusterId, JobId: jobId, }) @@ -54,7 +54,7 @@ func TestAggregatedQueueServer_ReturnLeaseCallsSendsJobLeaseReturnedEvent(t *tes _, addJobsErr := mockJobRepository.AddJobs([]*api.Job{job}) assert.Nil(t, addJobsErr) - _, err := aggregatedQueueClient.ReturnLease(context.TODO(), &api.ReturnLeaseRequest{ + _, err := aggregatedQueueClient.ReturnLease(armadacontext.TODO(), &api.ReturnLeaseRequest{ ClusterId: clusterId, JobId: jobId, Reason: reason, @@ -84,7 +84,7 @@ func TestAggregatedQueueServer_ReturningLeaseMoreThanMaxRetriesDeletesJob(t *tes assert.Nil(t, addJobsErr) for i := 0; i < maxRetries; i++ { - _, err := aggregatedQueueClient.ReturnLease(context.TODO(), &api.ReturnLeaseRequest{ + _, err := aggregatedQueueClient.ReturnLease(armadacontext.TODO(), &api.ReturnLeaseRequest{ ClusterId: clusterId, JobId: jobId, JobRunAttempted: true, @@ -96,7 +96,7 @@ func TestAggregatedQueueServer_ReturningLeaseMoreThanMaxRetriesDeletesJob(t *tes assert.Equal(t, jobId, mockJobRepository.returnLeaseArg2) } - _, err := aggregatedQueueClient.ReturnLease(context.TODO(), &api.ReturnLeaseRequest{ + _, err := aggregatedQueueClient.ReturnLease(armadacontext.TODO(), &api.ReturnLeaseRequest{ ClusterId: clusterId, JobId: jobId, }) @@ -125,7 +125,7 @@ func TestAggregatedQueueServer_ReturningLeaseMoreThanMaxRetriesSendsJobFailedEve assert.Nil(t, addJobsErr) for i := 0; i < maxRetries; i++ { - _, err := aggregatedQueueClient.ReturnLease(context.TODO(), &api.ReturnLeaseRequest{ + _, err := aggregatedQueueClient.ReturnLease(armadacontext.TODO(), &api.ReturnLeaseRequest{ ClusterId: clusterId, JobId: jobId, JobRunAttempted: true, @@ -136,7 +136,7 @@ func TestAggregatedQueueServer_ReturningLeaseMoreThanMaxRetriesSendsJobFailedEve fakeEventStore.events = []*api.EventMessage{} } - _, err := aggregatedQueueClient.ReturnLease(context.TODO(), &api.ReturnLeaseRequest{ + _, err := aggregatedQueueClient.ReturnLease(armadacontext.TODO(), &api.ReturnLeaseRequest{ ClusterId: clusterId, JobId: jobId, }) @@ -169,7 +169,7 @@ func TestAggregatedQueueServer_ReturningLease_IncrementsRetries(t *testing.T) { assert.Nil(t, addJobsErr) // Does not count towards retries if JobRunAttempted is false - _, err := aggregatedQueueClient.ReturnLease(context.TODO(), &api.ReturnLeaseRequest{ + _, err := aggregatedQueueClient.ReturnLease(armadacontext.TODO(), &api.ReturnLeaseRequest{ ClusterId: clusterId, JobId: jobId, JobRunAttempted: false, @@ -180,7 +180,7 @@ func TestAggregatedQueueServer_ReturningLease_IncrementsRetries(t *testing.T) { assert.Equal(t, 0, numberOfRetries) // Does count towards reties if JobRunAttempted is true - _, err = aggregatedQueueClient.ReturnLease(context.TODO(), &api.ReturnLeaseRequest{ + _, err = aggregatedQueueClient.ReturnLease(armadacontext.TODO(), &api.ReturnLeaseRequest{ ClusterId: clusterId, JobId: jobId, JobRunAttempted: true, @@ -452,7 +452,7 @@ type fakeEventStore struct { events []*api.EventMessage } -func (es *fakeEventStore) ReportEvents(_ context.Context, message []*api.EventMessage) error { +func (es *fakeEventStore) ReportEvents(_ *armadacontext.Context, message []*api.EventMessage) error { es.events = append(es.events, message...) return nil } @@ -469,14 +469,14 @@ func (repo *fakeSchedulingInfoRepository) UpdateClusterSchedulingInfo(report *ap type fakeExecutorRepository struct{} -func (f fakeExecutorRepository) GetExecutors(ctx context.Context) ([]*schedulerobjects.Executor, error) { +func (f fakeExecutorRepository) GetExecutors(ctx *armadacontext.Context) ([]*schedulerobjects.Executor, error) { return nil, nil } -func (f fakeExecutorRepository) GetLastUpdateTimes(ctx context.Context) (map[string]time.Time, error) { +func (f fakeExecutorRepository) GetLastUpdateTimes(ctx *armadacontext.Context) (map[string]time.Time, error) { return nil, nil } -func (f fakeExecutorRepository) StoreExecutor(ctx context.Context, executor *schedulerobjects.Executor) error { +func (f fakeExecutorRepository) StoreExecutor(ctx *armadacontext.Context, executor *schedulerobjects.Executor) error { return nil } diff --git a/internal/armada/server/reporting.go b/internal/armada/server/reporting.go index d3a5eae180b..73afc3d3c17 100644 --- a/internal/armada/server/reporting.go +++ b/internal/armada/server/reporting.go @@ -1,13 +1,13 @@ package server import ( - "context" "fmt" "time" log "github.com/sirupsen/logrus" "github.com/armadaproject/armada/internal/armada/repository" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/pkg/api" ) @@ -27,7 +27,7 @@ func reportQueued(repository repository.EventStore, jobs []*api.Job) error { events = append(events, event) } - err := repository.ReportEvents(context.Background(), events) + err := repository.ReportEvents(armadacontext.Background(), events) if err != nil { return fmt.Errorf("[reportQueued] error reporting events: %w", err) } @@ -52,7 +52,7 @@ func reportDuplicateDetected(repository repository.EventStore, results []*reposi events = append(events, event) } - err := repository.ReportEvents(context.Background(), events) + err := repository.ReportEvents(armadacontext.Background(), events) if err != nil { return fmt.Errorf("[reportDuplicateDetected] error reporting events: %w", err) } @@ -77,7 +77,7 @@ func reportSubmitted(repository repository.EventStore, jobs []*api.Job) error { events = append(events, event) } - err := repository.ReportEvents(context.Background(), events) + err := repository.ReportEvents(armadacontext.Background(), events) if err != nil { return fmt.Errorf("[reportSubmitted] error reporting events: %w", err) } @@ -106,7 +106,7 @@ func reportJobsLeased(repository repository.EventStore, jobs []*api.Job, cluster } } - err := repository.ReportEvents(context.Background(), events) + err := repository.ReportEvents(armadacontext.Background(), events) if err != nil { err = fmt.Errorf("[reportJobsLeased] error reporting events: %w", err) log.Error(err) @@ -128,7 +128,7 @@ func reportJobLeaseReturned(repository repository.EventStore, job *api.Job, leas return fmt.Errorf("error wrapping event: %w", err) } - err = repository.ReportEvents(context.Background(), []*api.EventMessage{event}) + err = repository.ReportEvents(armadacontext.Background(), []*api.EventMessage{event}) if err != nil { return fmt.Errorf("error reporting lease returned event: %w", err) } @@ -154,7 +154,7 @@ func reportJobsCancelling(repository repository.EventStore, requestorName string events = append(events, event) } - err := repository.ReportEvents(context.Background(), events) + err := repository.ReportEvents(armadacontext.Background(), events) if err != nil { return fmt.Errorf("[reportJobsCancelling] error reporting events: %w", err) } @@ -180,7 +180,7 @@ func reportJobsReprioritizing(repository repository.EventStore, requestorName st events = append(events, event) } - err := repository.ReportEvents(context.Background(), events) + err := repository.ReportEvents(armadacontext.Background(), events) if err != nil { return fmt.Errorf("[reportJobsReprioritizing] error reporting events: %w", err) } @@ -206,7 +206,7 @@ func reportJobsReprioritized(repository repository.EventStore, requestorName str events = append(events, event) } - err := repository.ReportEvents(context.Background(), events) + err := repository.ReportEvents(armadacontext.Background(), events) if err != nil { return fmt.Errorf("[reportJobsReprioritized] error reporting events: %w", err) } @@ -232,7 +232,7 @@ func reportJobsUpdated(repository repository.EventStore, requestorName string, j events = append(events, event) } - err := repository.ReportEvents(context.Background(), events) + err := repository.ReportEvents(armadacontext.Background(), events) if err != nil { return fmt.Errorf("[reportJobsUpdated] error reporting events: %w", err) } @@ -259,7 +259,7 @@ func reportJobsCancelled(repository repository.EventStore, requestorName string, events = append(events, event) } - err := repository.ReportEvents(context.Background(), events) + err := repository.ReportEvents(armadacontext.Background(), events) if err != nil { return fmt.Errorf("[reportJobsCancelled] error reporting events: %w", err) } @@ -293,7 +293,7 @@ func reportFailed(repository repository.EventStore, clusterId string, jobFailure events = append(events, event) } - err := repository.ReportEvents(context.Background(), events) + err := repository.ReportEvents(armadacontext.Background(), events) if err != nil { return fmt.Errorf("[reportFailed] error reporting events: %w", err) } diff --git a/internal/armada/server/submit.go b/internal/armada/server/submit.go index 189cd2b62f6..b43e818d248 100644 --- a/internal/armada/server/submit.go +++ b/internal/armada/server/submit.go @@ -20,6 +20,7 @@ import ( "github.com/armadaproject/armada/internal/armada/permissions" "github.com/armadaproject/armada/internal/armada/repository" servervalidation "github.com/armadaproject/armada/internal/armada/validation" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/compress" @@ -77,7 +78,7 @@ func NewSubmitServer( NumTestsPerEvictionRun: 10, } - compressorPool := pool.NewObjectPool(context.Background(), pool.NewPooledObjectFactorySimple( + compressorPool := pool.NewObjectPool(armadacontext.Background(), pool.NewPooledObjectFactorySimple( func(context.Context) (interface{}, error) { return compress.NewZlibCompressor(512) }), &poolConfig) @@ -100,7 +101,8 @@ func (server *SubmitServer) Health(ctx context.Context, _ *types.Empty) (*api.He return &api.HealthCheckResponse{Status: api.HealthCheckResponse_SERVING}, nil } -func (server *SubmitServer) GetQueueInfo(ctx context.Context, req *api.QueueInfoRequest) (*api.QueueInfo, error) { +func (server *SubmitServer) GetQueueInfo(grpcCtx context.Context, req *api.QueueInfoRequest) (*api.QueueInfo, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) q, err := server.queueRepository.GetQueue(req.Name) var expected *repository.ErrQueueNotFound if errors.Is(err, expected) { @@ -136,7 +138,7 @@ func (server *SubmitServer) GetQueueInfo(ctx context.Context, req *api.QueueInfo }, nil } -func (server *SubmitServer) GetQueue(ctx context.Context, req *api.QueueGetRequest) (*api.Queue, error) { +func (server *SubmitServer) GetQueue(grpcCtx context.Context, req *api.QueueGetRequest) (*api.Queue, error) { queue, err := server.queueRepository.GetQueue(req.Name) var e *repository.ErrQueueNotFound if errors.As(err, &e) { @@ -147,7 +149,8 @@ func (server *SubmitServer) GetQueue(ctx context.Context, req *api.QueueGetReque return queue.ToAPI(), nil } -func (server *SubmitServer) CreateQueue(ctx context.Context, request *api.Queue) (*types.Empty, error) { +func (server *SubmitServer) CreateQueue(grpcCtx context.Context, request *api.Queue) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) err := checkPermission(server.permissions, ctx, permissions.CreateQueue) var ep *ErrUnauthorized if errors.As(err, &ep) { @@ -177,9 +180,9 @@ func (server *SubmitServer) CreateQueue(ctx context.Context, request *api.Queue) return &types.Empty{}, nil } -func (server *SubmitServer) CreateQueues(ctx context.Context, request *api.QueueList) (*api.BatchQueueCreateResponse, error) { +func (server *SubmitServer) CreateQueues(grpcCtx context.Context, request *api.QueueList) (*api.BatchQueueCreateResponse, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) var failedQueues []*api.QueueCreateResponse - // Create a queue for each element of the request body and return the failures. for _, queue := range request.Queues { _, err := server.CreateQueue(ctx, queue) @@ -196,7 +199,8 @@ func (server *SubmitServer) CreateQueues(ctx context.Context, request *api.Queue }, nil } -func (server *SubmitServer) UpdateQueue(ctx context.Context, request *api.Queue) (*types.Empty, error) { +func (server *SubmitServer) UpdateQueue(grpcCtx context.Context, request *api.Queue) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) err := checkPermission(server.permissions, ctx, permissions.CreateQueue) var ep *ErrUnauthorized if errors.As(err, &ep) { @@ -221,7 +225,8 @@ func (server *SubmitServer) UpdateQueue(ctx context.Context, request *api.Queue) return &types.Empty{}, nil } -func (server *SubmitServer) UpdateQueues(ctx context.Context, request *api.QueueList) (*api.BatchQueueUpdateResponse, error) { +func (server *SubmitServer) UpdateQueues(grpcCtx context.Context, request *api.QueueList) (*api.BatchQueueUpdateResponse, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) var failedQueues []*api.QueueUpdateResponse // Create a queue for each element of the request body and return the failures. @@ -240,7 +245,8 @@ func (server *SubmitServer) UpdateQueues(ctx context.Context, request *api.Queue }, nil } -func (server *SubmitServer) DeleteQueue(ctx context.Context, request *api.QueueDeleteRequest) (*types.Empty, error) { +func (server *SubmitServer) DeleteQueue(grpcCtx context.Context, request *api.QueueDeleteRequest) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) err := checkPermission(server.permissions, ctx, permissions.DeleteQueue) var ep *ErrUnauthorized if errors.As(err, &ep) { @@ -265,7 +271,8 @@ func (server *SubmitServer) DeleteQueue(ctx context.Context, request *api.QueueD return &types.Empty{}, nil } -func (server *SubmitServer) SubmitJobs(ctx context.Context, req *api.JobSubmitRequest) (*api.JobSubmitResponse, error) { +func (server *SubmitServer) SubmitJobs(grpcCtx context.Context, req *api.JobSubmitRequest) (*api.JobSubmitResponse, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) principal := authorization.GetPrincipal(ctx) jobs, responseItems, e := server.createJobs(req, principal.GetName(), principal.GetGroupNames()) @@ -443,7 +450,8 @@ func (server *SubmitServer) countQueuedJobs(q queue.Queue) (int64, error) { // CancelJobs cancels jobs identified by the request. // If the request contains a job ID, only the job with that ID is cancelled. // If the request contains a queue name and a job set ID, all jobs matching those are cancelled. -func (server *SubmitServer) CancelJobs(ctx context.Context, request *api.JobCancelRequest) (*api.CancellationResult, error) { +func (server *SubmitServer) CancelJobs(grpcCtx context.Context, request *api.JobCancelRequest) (*api.CancellationResult, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) if request.JobId != "" { return server.cancelJobsById(ctx, request.JobId, request.Reason) } else if request.JobSetId != "" && request.Queue != "" { @@ -452,7 +460,8 @@ func (server *SubmitServer) CancelJobs(ctx context.Context, request *api.JobCanc return nil, status.Errorf(codes.InvalidArgument, "[CancelJobs] specify either job ID or both queue name and job set ID") } -func (server *SubmitServer) CancelJobSet(ctx context.Context, request *api.JobSetCancelRequest) (*types.Empty, error) { +func (server *SubmitServer) CancelJobSet(grpcCtx context.Context, request *api.JobSetCancelRequest) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) err := servervalidation.ValidateJobSetFilter(request.Filter) if err != nil { return nil, err @@ -483,7 +492,7 @@ func createJobSetFilter(filter *api.JobSetFilter) *repository.JobSetFilter { } // cancels a job with a given ID -func (server *SubmitServer) cancelJobsById(ctx context.Context, jobId string, reason string) (*api.CancellationResult, error) { +func (server *SubmitServer) cancelJobsById(ctx *armadacontext.Context, jobId string, reason string) (*api.CancellationResult, error) { jobs, err := server.jobRepository.GetExistingJobsByIds([]string{jobId}) if err != nil { return nil, status.Errorf(codes.Unavailable, "[cancelJobsById] error getting job with ID %s: %s", jobId, err) @@ -505,7 +514,7 @@ func (server *SubmitServer) cancelJobsById(ctx context.Context, jobId string, re // cancels all jobs part of a particular job set and queue func (server *SubmitServer) cancelJobsByQueueAndSet( - ctx context.Context, + ctx *armadacontext.Context, queue string, jobSetId string, filter *repository.JobSetFilter, @@ -548,7 +557,7 @@ func (server *SubmitServer) cancelJobsByQueueAndSet( return &api.CancellationResult{CancelledIds: cancelledIds}, nil } -func (server *SubmitServer) cancelJobs(ctx context.Context, jobs []*api.Job, reason string) (*api.CancellationResult, error) { +func (server *SubmitServer) cancelJobs(ctx *armadacontext.Context, jobs []*api.Job, reason string) (*api.CancellationResult, error) { principal := authorization.GetPrincipal(ctx) err := server.checkCancelPerms(ctx, jobs) @@ -590,7 +599,7 @@ func (server *SubmitServer) cancelJobs(ctx context.Context, jobs []*api.Job, rea return &api.CancellationResult{CancelledIds: cancelledIds}, nil } -func (server *SubmitServer) checkCancelPerms(ctx context.Context, jobs []*api.Job) error { +func (server *SubmitServer) checkCancelPerms(ctx *armadacontext.Context, jobs []*api.Job) error { queueNames := make(map[string]struct{}) for _, job := range jobs { queueNames[job.Queue] = struct{}{} @@ -620,7 +629,8 @@ func (server *SubmitServer) checkCancelPerms(ctx context.Context, jobs []*api.Jo // ReprioritizeJobs updates the priority of one of more jobs. // Returns a map from job ID to any error (or nil if the call succeeded). -func (server *SubmitServer) ReprioritizeJobs(ctx context.Context, request *api.JobReprioritizeRequest) (*api.JobReprioritizeResponse, error) { +func (server *SubmitServer) ReprioritizeJobs(grpcCtx context.Context, request *api.JobReprioritizeRequest) (*api.JobReprioritizeResponse, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) var jobs []*api.Job if len(request.JobIds) > 0 { existingJobs, err := server.jobRepository.GetExistingJobsByIds(request.JobIds) @@ -713,7 +723,7 @@ func (server *SubmitServer) reportReprioritizedJobEvents(reprioritizedJobs []*ap return nil } -func (server *SubmitServer) checkReprioritizePerms(ctx context.Context, jobs []*api.Job) error { +func (server *SubmitServer) checkReprioritizePerms(ctx *armadacontext.Context, jobs []*api.Job) error { queueNames := make(map[string]struct{}) for _, job := range jobs { queueNames[job.Queue] = struct{}{} @@ -741,7 +751,7 @@ func (server *SubmitServer) checkReprioritizePerms(ctx context.Context, jobs []* return nil } -func (server *SubmitServer) getQueueOrCreate(ctx context.Context, queueName string) (*queue.Queue, error) { +func (server *SubmitServer) getQueueOrCreate(ctx *armadacontext.Context, queueName string) (*queue.Queue, error) { q, e := server.queueRepository.GetQueue(queueName) if e == nil { return &q, nil @@ -792,16 +802,16 @@ func (server *SubmitServer) createJobs(request *api.JobSubmitRequest, owner stri func (server *SubmitServer) createJobsObjects(request *api.JobSubmitRequest, owner string, ownershipGroups []string, getTime func() time.Time, getUlid func() string, ) ([]*api.Job, []*api.JobSubmitResponseItem, error) { - compressor, err := server.compressorPool.BorrowObject(context.Background()) + compressor, err := server.compressorPool.BorrowObject(armadacontext.Background()) if err != nil { return nil, nil, err } - defer func(compressorPool *pool.ObjectPool, ctx context.Context, object interface{}) { + defer func(compressorPool *pool.ObjectPool, ctx *armadacontext.Context, object interface{}) { err := compressorPool.ReturnObject(ctx, object) if err != nil { log.WithError(err).Errorf("Error returning compressor to pool") } - }(server.compressorPool, context.Background(), compressor) + }(server.compressorPool, armadacontext.Background(), compressor) compressedOwnershipGroups, err := compress.CompressStringArray(ownershipGroups, compressor.(compress.Compressor)) if err != nil { return nil, nil, err @@ -892,6 +902,7 @@ func (server *SubmitServer) createJobsObjects(request *api.JobSubmitRequest, own Owner: owner, QueueOwnershipUserGroups: nil, CompressedQueueOwnershipUserGroups: compressedOwnershipGroups, + QueueTtlSeconds: item.QueueTtlSeconds, } jobs = append(jobs, j) } diff --git a/internal/armada/server/submit_from_log.go b/internal/armada/server/submit_from_log.go index 13acbf9904a..995e9785d5b 100644 --- a/internal/armada/server/submit_from_log.go +++ b/internal/armada/server/submit_from_log.go @@ -7,19 +7,17 @@ import ( "time" "github.com/apache/pulsar-client-go/pulsar" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "github.com/hashicorp/go-multierror" pool "github.com/jolestar/go-commons-pool" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/armadaproject/armada/internal/armada/repository" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/internal/common/logging" - "github.com/armadaproject/armada/internal/common/pulsarutils/pulsarrequestid" - "github.com/armadaproject/armada/internal/common/requestid" "github.com/armadaproject/armada/internal/common/schedulers" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/pkg/api" @@ -38,7 +36,7 @@ type SubmitFromLog struct { } // Run the service that reads from Pulsar and updates Armada until the provided context is cancelled. -func (srv *SubmitFromLog) Run(ctx context.Context) error { +func (srv *SubmitFromLog) Run(ctx *armadacontext.Context) error { // Get the configured logger, or the standard logger if none is provided. log := srv.getLogger() log.Info("service started") @@ -95,7 +93,7 @@ func (srv *SubmitFromLog) Run(ctx context.Context) error { default: // Get a message from Pulsar, which consists of a sequence of events (i.e., state transitions). - ctxWithTimeout, cancel := context.WithTimeout(ctx, 10*time.Second) + ctxWithTimeout, cancel := armadacontext.WithTimeout(ctx, 10*time.Second) msg, err := srv.Consumer.Receive(ctxWithTimeout) cancel() if errors.Is(err, context.DeadlineExceeded) { @@ -121,29 +119,18 @@ func (srv *SubmitFromLog) Run(ctx context.Context) error { lastPublishTime = msg.PublishTime() numReceived++ - // Incoming gRPC requests are annotated with a unique id, - // which is included with the corresponding Pulsar message. - requestId := pulsarrequestid.FromMessageOrMissing(msg) - - // Put the requestId into a message-specific context and logger, - // which are passed on to sub-functions. - messageCtx, ok := requestid.AddToIncomingContext(ctx, requestId) - if !ok { - messageCtx = ctx - } - messageLogger := log.WithFields(logrus.Fields{"messageId": msg.ID(), requestid.MetadataKey: requestId}) - ctxWithLogger := ctxlogrus.ToContext(messageCtx, messageLogger) + ctxWithLogger := armadacontext.WithLogField(ctx, "messageId", msg.ID()) // Unmarshal and validate the message. sequence, err := eventutil.UnmarshalEventSequence(ctxWithLogger, msg.Payload()) if err != nil { srv.ack(ctx, msg) - logging.WithStacktrace(messageLogger, err).Warnf("processing message failed; ignoring") + logging.WithStacktrace(ctxWithLogger, err).Warnf("processing message failed; ignoring") numErrored++ break } - messageLogger.WithField("numEvents", len(sequence.Events)).Info("processing sequence") + ctxWithLogger.WithField("numEvents", len(sequence.Events)).Info("processing sequence") // TODO: Improve retry logic. srv.ProcessSequence(ctxWithLogger, sequence) srv.ack(ctx, msg) @@ -155,9 +142,7 @@ func (srv *SubmitFromLog) Run(ctx context.Context) error { // For efficiency, we may process several events at a time. // To maintain ordering, we only do so for subsequences of consecutive events of equal type. // The returned bool indicates if the corresponding Pulsar message should be ack'd or not. -func (srv *SubmitFromLog) ProcessSequence(ctx context.Context, sequence *armadaevents.EventSequence) bool { - log := ctxlogrus.Extract(ctx) - +func (srv *SubmitFromLog) ProcessSequence(ctx *armadacontext.Context, sequence *armadaevents.EventSequence) bool { // Sub-functions should always increment the events index unless they experience a transient error. // However, if a permanent error is mis-categorised as transient, we may get stuck forever. // To avoid that issue, we return immediately if timeout time has passed @@ -170,11 +155,11 @@ func (srv *SubmitFromLog) ProcessSequence(ctx context.Context, sequence *armadae for i < len(sequence.Events) && time.Since(lastProgress) < timeout { j, err := srv.ProcessSubSequence(ctx, i, sequence) if err != nil { - logging.WithStacktrace(log, err).WithFields(logrus.Fields{"lowerIndex": i, "upperIndex": j}).Warnf("processing subsequence failed; ignoring") + logging.WithStacktrace(ctx, err).WithFields(logrus.Fields{"lowerIndex": i, "upperIndex": j}).Warnf("processing subsequence failed; ignoring") } if j == i { - log.WithFields(logrus.Fields{"lowerIndex": i, "upperIndex": j}).Info("made no progress") + ctx.WithFields(logrus.Fields{"lowerIndex": i, "upperIndex": j}).Info("made no progress") // We should only get here if a transient error occurs. // Sleep for a bit before retrying. @@ -200,7 +185,7 @@ func (srv *SubmitFromLog) ProcessSequence(ctx context.Context, sequence *armadae // Events are processed by calling into the embedded srv.SubmitServer. // // Not all events are handled by this processor since the legacy scheduler writes some transitions directly to the db. -func (srv *SubmitFromLog) ProcessSubSequence(ctx context.Context, i int, sequence *armadaevents.EventSequence) (j int, err error) { +func (srv *SubmitFromLog) ProcessSubSequence(ctx *armadacontext.Context, i int, sequence *armadaevents.EventSequence) (j int, err error) { j = i // Initially, the next event to be processed is i. if i < 0 || i >= len(sequence.Events) { err = &armadaerrors.ErrInvalidArgument{ @@ -272,7 +257,7 @@ func (srv *SubmitFromLog) ProcessSubSequence(ctx context.Context, i int, sequenc // collectJobSubmitEvents (and the corresponding functions for other types below) // return a slice of events starting at index i in the sequence with equal type. -func collectJobSubmitEvents(ctx context.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.SubmitJob { +func collectJobSubmitEvents(ctx *armadacontext.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.SubmitJob { result := make([]*armadaevents.SubmitJob, 0) for j := i; j < len(sequence.Events); j++ { if e, ok := sequence.Events[j].Event.(*armadaevents.EventSequence_Event_SubmitJob); ok { @@ -284,7 +269,7 @@ func collectJobSubmitEvents(ctx context.Context, i int, sequence *armadaevents.E return result } -func collectCancelJobEvents(ctx context.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.CancelJob { +func collectCancelJobEvents(ctx *armadacontext.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.CancelJob { result := make([]*armadaevents.CancelJob, 0) for j := i; j < len(sequence.Events); j++ { if e, ok := sequence.Events[j].Event.(*armadaevents.EventSequence_Event_CancelJob); ok { @@ -296,7 +281,7 @@ func collectCancelJobEvents(ctx context.Context, i int, sequence *armadaevents.E return result } -func collectCancelJobSetEvents(ctx context.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.CancelJobSet { +func collectCancelJobSetEvents(ctx *armadacontext.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.CancelJobSet { result := make([]*armadaevents.CancelJobSet, 0) for j := i; j < len(sequence.Events); j++ { if e, ok := sequence.Events[j].Event.(*armadaevents.EventSequence_Event_CancelJobSet); ok { @@ -308,7 +293,7 @@ func collectCancelJobSetEvents(ctx context.Context, i int, sequence *armadaevent return result } -func collectReprioritiseJobEvents(ctx context.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.ReprioritiseJob { +func collectReprioritiseJobEvents(ctx *armadacontext.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.ReprioritiseJob { result := make([]*armadaevents.ReprioritiseJob, 0) for j := i; j < len(sequence.Events); j++ { if e, ok := sequence.Events[j].Event.(*armadaevents.EventSequence_Event_ReprioritiseJob); ok { @@ -320,7 +305,7 @@ func collectReprioritiseJobEvents(ctx context.Context, i int, sequence *armadaev return result } -func collectReprioritiseJobSetEvents(ctx context.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.ReprioritiseJobSet { +func collectReprioritiseJobSetEvents(ctx *armadacontext.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.ReprioritiseJobSet { result := make([]*armadaevents.ReprioritiseJobSet, 0) for j := i; j < len(sequence.Events); j++ { if e, ok := sequence.Events[j].Event.(*armadaevents.EventSequence_Event_ReprioritiseJobSet); ok { @@ -332,7 +317,7 @@ func collectReprioritiseJobSetEvents(ctx context.Context, i int, sequence *armad return result } -func collectEvents[T any](ctx context.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.EventSequence_Event { +func collectEvents[T any](ctx *armadacontext.Context, i int, sequence *armadaevents.EventSequence) []*armadaevents.EventSequence_Event { events := make([]*armadaevents.EventSequence_Event, 0) for j := i; j < len(sequence.Events); j++ { if _, ok := sequence.Events[j].Event.(T); ok { @@ -359,7 +344,7 @@ func (srv *SubmitFromLog) getLogger() *logrus.Entry { // Specifically, events are not processed if writing to the database results in a network-related error. // For any other error, the jobs are marked as failed and the events are considered to have been processed. func (srv *SubmitFromLog) SubmitJobs( - ctx context.Context, + ctx *armadacontext.Context, userId string, groups []string, queueName string, @@ -376,16 +361,16 @@ func (srv *SubmitFromLog) SubmitJobs( } log := srv.getLogger() - compressor, err := srv.SubmitServer.compressorPool.BorrowObject(context.Background()) + compressor, err := srv.SubmitServer.compressorPool.BorrowObject(armadacontext.Background()) if err != nil { return false, err } - defer func(compressorPool *pool.ObjectPool, ctx context.Context, object interface{}) { + defer func(compressorPool *pool.ObjectPool, ctx *armadacontext.Context, object interface{}) { err := compressorPool.ReturnObject(ctx, object) if err != nil { log.WithError(err).Errorf("Error returning compressor to pool") } - }(srv.SubmitServer.compressorPool, context.Background(), compressor) + }(srv.SubmitServer.compressorPool, armadacontext.Background(), compressor) compressedOwnershipGroups, err := compress.CompressStringArray(groups, compressor.(compress.Compressor)) if err != nil { @@ -455,7 +440,7 @@ type CancelJobPayload struct { } // CancelJobs cancels all jobs specified by the provided events in a single operation. -func (srv *SubmitFromLog) CancelJobs(ctx context.Context, userId string, es []*armadaevents.CancelJob) (bool, error) { +func (srv *SubmitFromLog) CancelJobs(ctx *armadacontext.Context, userId string, es []*armadaevents.CancelJob) (bool, error) { cancelJobPayloads := make([]*CancelJobPayload, len(es)) for i, e := range es { id, err := armadaevents.UlidStringFromProtoUuid(e.JobId) @@ -475,7 +460,7 @@ func (srv *SubmitFromLog) CancelJobs(ctx context.Context, userId string, es []*a // Because event sequences are specific to queue and job set, all CancelJobSet events in a sequence are equivalent, // and we only need to call CancelJobSet once. func (srv *SubmitFromLog) CancelJobSets( - ctx context.Context, + ctx *armadacontext.Context, userId string, queueName string, jobSetName string, @@ -489,7 +474,7 @@ func (srv *SubmitFromLog) CancelJobSets( return srv.CancelJobSet(ctx, userId, queueName, jobSetName, reason) } -func (srv *SubmitFromLog) CancelJobSet(ctx context.Context, userId string, queueName string, jobSetName string, reason string) (bool, error) { +func (srv *SubmitFromLog) CancelJobSet(ctx *armadacontext.Context, userId string, queueName string, jobSetName string, reason string) (bool, error) { jobIds, err := srv.SubmitServer.jobRepository.GetActiveJobIds(queueName, jobSetName) if armadaerrors.IsNetworkError(err) { return false, err @@ -505,7 +490,7 @@ func (srv *SubmitFromLog) CancelJobSet(ctx context.Context, userId string, queue return srv.BatchedCancelJobsById(ctx, userId, cancelJobPayloads) } -func (srv *SubmitFromLog) BatchedCancelJobsById(ctx context.Context, userId string, cancelJobPayloads []*CancelJobPayload) (bool, error) { +func (srv *SubmitFromLog) BatchedCancelJobsById(ctx *armadacontext.Context, userId string, cancelJobPayloads []*CancelJobPayload) (bool, error) { // Split IDs into batches and process one batch at a time. // To reduce the number of jobs stored in memory. // @@ -538,7 +523,7 @@ type CancelledJobPayload struct { } // CancelJobsById cancels all jobs with the specified ids. -func (srv *SubmitFromLog) CancelJobsById(ctx context.Context, userId string, cancelJobPayloads []*CancelJobPayload) ([]string, error) { +func (srv *SubmitFromLog) CancelJobsById(ctx *armadacontext.Context, userId string, cancelJobPayloads []*CancelJobPayload) ([]string, error) { jobIdReasonMap := make(map[string]string) jobIds := util.Map(cancelJobPayloads, func(payload *CancelJobPayload) string { jobIdReasonMap[payload.JobId] = payload.Reason @@ -588,7 +573,7 @@ func (srv *SubmitFromLog) CancelJobsById(ctx context.Context, userId string, can } // ReprioritizeJobs updates the priority of one of more jobs. -func (srv *SubmitFromLog) ReprioritizeJobs(ctx context.Context, userId string, es []*armadaevents.ReprioritiseJob) (bool, error) { +func (srv *SubmitFromLog) ReprioritizeJobs(ctx *armadacontext.Context, userId string, es []*armadaevents.ReprioritiseJob) (bool, error) { if len(es) == 0 { return true, nil } @@ -635,7 +620,7 @@ func (srv *SubmitFromLog) ReprioritizeJobs(ctx context.Context, userId string, e return true, nil } -func (srv *SubmitFromLog) DeleteFailedJobs(ctx context.Context, es []*armadaevents.EventSequence_Event) (bool, error) { +func (srv *SubmitFromLog) DeleteFailedJobs(ctx *armadacontext.Context, es []*armadaevents.EventSequence_Event) (bool, error) { jobIdsToDelete := make([]string, 0, len(es)) for _, event := range es { jobErrors := event.GetJobErrors() @@ -664,7 +649,7 @@ func (srv *SubmitFromLog) DeleteFailedJobs(ctx context.Context, es []*armadaeven } // UpdateJobStartTimes records the start time (in Redis) of one of more jobs. -func (srv *SubmitFromLog) UpdateJobStartTimes(ctx context.Context, es []*armadaevents.EventSequence_Event) (bool, error) { +func (srv *SubmitFromLog) UpdateJobStartTimes(ctx *armadacontext.Context, es []*armadaevents.EventSequence_Event) (bool, error) { jobStartsInfos := make([]*repository.JobStartInfo, 0, len(es)) for _, event := range es { jobRun := event.GetJobRunRunning() @@ -713,7 +698,7 @@ func (srv *SubmitFromLog) UpdateJobStartTimes(ctx context.Context, es []*armadae // Since repeating this operation is safe (setting the priority is idempotent), // the bool indicating if events were processed is set to false if any job set failed. func (srv *SubmitFromLog) ReprioritizeJobSets( - ctx context.Context, + ctx *armadacontext.Context, userId string, queueName string, jobSetName string, @@ -730,7 +715,7 @@ func (srv *SubmitFromLog) ReprioritizeJobSets( } func (srv *SubmitFromLog) ReprioritizeJobSet( - ctx context.Context, + ctx *armadacontext.Context, userId string, queueName string, jobSetName string, @@ -767,7 +752,7 @@ func (srv *SubmitFromLog) ReprioritizeJobSet( return true, nil } -func (srv *SubmitFromLog) ack(ctx context.Context, msg pulsar.Message) { +func (srv *SubmitFromLog) ack(ctx *armadacontext.Context, msg pulsar.Message) { util.RetryUntilSuccess( ctx, func() error { diff --git a/internal/armada/server/submit_from_log_test.go b/internal/armada/server/submit_from_log_test.go index c3479888d06..45368bfe7e2 100644 --- a/internal/armada/server/submit_from_log_test.go +++ b/internal/armada/server/submit_from_log_test.go @@ -1,13 +1,13 @@ package server import ( - ctx "context" "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/armadaproject/armada/internal/armada/repository" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/ingest/testfixtures" "github.com/armadaproject/armada/pkg/armadaevents" ) @@ -42,7 +42,7 @@ func TestUpdateJobStartTimes(t *testing.T) { }, } - ok, err := s.UpdateJobStartTimes(ctx.Background(), events) + ok, err := s.UpdateJobStartTimes(armadacontext.Background(), events) assert.NoError(t, err) assert.True(t, ok) @@ -59,7 +59,7 @@ func TestUpdateJobStartTimes_NonExistentJob(t *testing.T) { jobRepository: jobRepo, }, } - ok, err := s.UpdateJobStartTimes(ctx.Background(), events) + ok, err := s.UpdateJobStartTimes(armadacontext.Background(), events) assert.Nil(t, err) assert.True(t, ok) @@ -75,7 +75,7 @@ func TestUpdateJobStartTimes_RedisError(t *testing.T) { jobRepository: jobRepo, }, } - ok, err := s.UpdateJobStartTimes(ctx.Background(), events) + ok, err := s.UpdateJobStartTimes(armadacontext.Background(), events) assert.Error(t, err) assert.False(t, ok) diff --git a/internal/armada/server/submit_to_log.go b/internal/armada/server/submit_to_log.go index 709152cbf04..13c5ac8d679 100644 --- a/internal/armada/server/submit_to_log.go +++ b/internal/armada/server/submit_to_log.go @@ -20,6 +20,7 @@ import ( "github.com/armadaproject/armada/internal/armada/permissions" "github.com/armadaproject/armada/internal/armada/repository" "github.com/armadaproject/armada/internal/armada/validation" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/auth/permission" @@ -68,7 +69,8 @@ type PulsarSubmitServer struct { IgnoreJobSubmitChecks bool } -func (srv *PulsarSubmitServer) SubmitJobs(ctx context.Context, req *api.JobSubmitRequest) (*api.JobSubmitResponse, error) { +func (srv *PulsarSubmitServer) SubmitJobs(grpcCtx context.Context, req *api.JobSubmitRequest) (*api.JobSubmitResponse, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) userId, groups, err := srv.Authorize(ctx, req.Queue, permissions.SubmitAnyJobs, queue.PermissionVerbSubmit) if err != nil { return nil, err @@ -257,7 +259,9 @@ func (srv *PulsarSubmitServer) SubmitJobs(ctx context.Context, req *api.JobSubmi return &api.JobSubmitResponse{JobResponseItems: responses}, nil } -func (srv *PulsarSubmitServer) CancelJobs(ctx context.Context, req *api.JobCancelRequest) (*api.CancellationResult, error) { +func (srv *PulsarSubmitServer) CancelJobs(grpcCtx context.Context, req *api.JobCancelRequest) (*api.CancellationResult, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) + // separate code path for multiple jobs if len(req.JobIds) > 0 { return srv.cancelJobsByIdsQueueJobset(ctx, req.JobIds, req.Queue, req.JobSetId, req.Reason) @@ -345,7 +349,8 @@ func (srv *PulsarSubmitServer) CancelJobs(ctx context.Context, req *api.JobCance } // Assumes all Job IDs are in the queue and job set provided -func (srv *PulsarSubmitServer) cancelJobsByIdsQueueJobset(ctx context.Context, jobIds []string, q, jobSet string, reason string) (*api.CancellationResult, error) { +func (srv *PulsarSubmitServer) cancelJobsByIdsQueueJobset(grpcCtx context.Context, jobIds []string, q, jobSet string, reason string) (*api.CancellationResult, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) if q == "" { return nil, &armadaerrors.ErrInvalidArgument{ Name: "Queue", @@ -407,7 +412,8 @@ func eventSequenceForJobIds(jobIds []string, q, jobSet, userId string, groups [] return sequence, validIds } -func (srv *PulsarSubmitServer) CancelJobSet(ctx context.Context, req *api.JobSetCancelRequest) (*types.Empty, error) { +func (srv *PulsarSubmitServer) CancelJobSet(grpcCtx context.Context, req *api.JobSetCancelRequest) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) if req.Queue == "" { return nil, &armadaerrors.ErrInvalidArgument{ Name: "Queue", @@ -509,7 +515,9 @@ func (srv *PulsarSubmitServer) CancelJobSet(ctx context.Context, req *api.JobSet return &types.Empty{}, err } -func (srv *PulsarSubmitServer) ReprioritizeJobs(ctx context.Context, req *api.JobReprioritizeRequest) (*api.JobReprioritizeResponse, error) { +func (srv *PulsarSubmitServer) ReprioritizeJobs(grpcCtx context.Context, req *api.JobReprioritizeRequest) (*api.JobReprioritizeResponse, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) + // If either queue or jobSetId is missing, we get the job set and queue associated // with the first job id in the request. // @@ -629,7 +637,7 @@ func (srv *PulsarSubmitServer) ReprioritizeJobs(ctx context.Context, req *api.Jo // Checks that the user has either anyPerm (e.g., permissions.SubmitAnyJobs) or perm (e.g., PermissionVerbSubmit) for this queue. // Returns the userId and groups extracted from the context. func (srv *PulsarSubmitServer) Authorize( - ctx context.Context, + ctx *armadacontext.Context, queueName string, anyPerm permission.Permission, perm queue.PermissionVerb, @@ -711,7 +719,7 @@ func (srv *PulsarSubmitServer) GetQueueInfo(ctx context.Context, req *api.QueueI } // PublishToPulsar sends pulsar messages async -func (srv *PulsarSubmitServer) publishToPulsar(ctx context.Context, sequences []*armadaevents.EventSequence, scheduler schedulers.Scheduler) error { +func (srv *PulsarSubmitServer) publishToPulsar(ctx *armadacontext.Context, sequences []*armadaevents.EventSequence, scheduler schedulers.Scheduler) error { // Reduce the number of sequences to send to the minimum possible, // and then break up any sequences larger than srv.MaxAllowedMessageSize. sequences = eventutil.CompactEventSequences(sequences) @@ -731,7 +739,7 @@ func jobKey(j *api.Job) string { // getOriginalJobIds returns the mapping between jobId and originalJobId. If the job (or more specifically the clientId // on the job) has not been seen before then jobId -> jobId. If the job has been seen before then jobId -> originalJobId // Note that if srv.KVStore is nil then this function simply returns jobId -> jobId -func (srv *PulsarSubmitServer) getOriginalJobIds(ctx context.Context, apiJobs []*api.Job) (map[string]string, error) { +func (srv *PulsarSubmitServer) getOriginalJobIds(ctx *armadacontext.Context, apiJobs []*api.Job) (map[string]string, error) { // Default is the current id ret := make(map[string]string, len(apiJobs)) for _, apiJob := range apiJobs { @@ -770,7 +778,7 @@ func (srv *PulsarSubmitServer) getOriginalJobIds(ctx context.Context, apiJobs [] return ret, nil } -func (srv *PulsarSubmitServer) storeOriginalJobIds(ctx context.Context, apiJobs []*api.Job) error { +func (srv *PulsarSubmitServer) storeOriginalJobIds(ctx *armadacontext.Context, apiJobs []*api.Job) error { if srv.KVStore == nil { return nil } diff --git a/internal/armada/server/usage.go b/internal/armada/server/usage.go index 92fe54abd45..9c6e1e7800e 100644 --- a/internal/armada/server/usage.go +++ b/internal/armada/server/usage.go @@ -12,6 +12,7 @@ import ( "github.com/armadaproject/armada/internal/armada/permissions" "github.com/armadaproject/armada/internal/armada/repository" "github.com/armadaproject/armada/internal/armada/scheduling" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/client/queue" @@ -41,7 +42,8 @@ func NewUsageServer( } } -func (s *UsageServer) ReportUsage(ctx context.Context, report *api.ClusterUsageReport) (*types.Empty, error) { +func (s *UsageServer) ReportUsage(grpcCtx context.Context, report *api.ClusterUsageReport) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) if err := checkPermission(s.permissions, ctx, permissions.ExecuteJobs); err != nil { return nil, status.Errorf(codes.PermissionDenied, "[ReportUsage] error: %s", err) } diff --git a/internal/armada/server/usage_test.go b/internal/armada/server/usage_test.go index 6464b154880..8f1fa88b30b 100644 --- a/internal/armada/server/usage_test.go +++ b/internal/armada/server/usage_test.go @@ -1,7 +1,6 @@ package server import ( - "context" "testing" "time" @@ -12,6 +11,7 @@ import ( "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/armada/repository" + "github.com/armadaproject/armada/internal/common/armadacontext" armadaresource "github.com/armadaproject/armada/internal/common/resource" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/client/queue" @@ -26,14 +26,14 @@ func TestUsageServer_ReportUsage(t *testing.T) { err := s.queueRepository.CreateQueue(queue.Queue{Name: "q1", PriorityFactor: 1}) assert.Nil(t, err) - _, err = s.ReportUsage(context.Background(), oneQueueReport(now, cpu, memory)) + _, err = s.ReportUsage(armadacontext.Background(), oneQueueReport(now, cpu, memory)) assert.Nil(t, err) priority, err := s.usageRepository.GetClusterPriority("clusterA") assert.Nil(t, err) assert.Equal(t, 10.0, priority["q1"], "Priority should be updated for the new cluster.") - _, err = s.ReportUsage(context.Background(), oneQueueReport(now.Add(time.Minute), cpu, memory)) + _, err = s.ReportUsage(armadacontext.Background(), oneQueueReport(now.Add(time.Minute), cpu, memory)) assert.Nil(t, err) priority, err = s.usageRepository.GetClusterPriority("clusterA") @@ -51,14 +51,14 @@ func TestUsageServer_ReportUsageWithDefinedScarcity(t *testing.T) { err := s.queueRepository.CreateQueue(queue.Queue{Name: "q1", PriorityFactor: 1}) assert.Nil(t, err) - _, err = s.ReportUsage(context.Background(), oneQueueReport(now, cpu, memory)) + _, err = s.ReportUsage(armadacontext.Background(), oneQueueReport(now, cpu, memory)) assert.Nil(t, err) priority, err := s.usageRepository.GetClusterPriority("clusterA") assert.Nil(t, err) assert.Equal(t, 5.0, priority["q1"], "Priority should be updated for the new cluster.") - _, err = s.ReportUsage(context.Background(), oneQueueReport(now.Add(time.Minute), cpu, memory)) + _, err = s.ReportUsage(armadacontext.Background(), oneQueueReport(now.Add(time.Minute), cpu, memory)) assert.Nil(t, err) priority, err = s.usageRepository.GetClusterPriority("clusterA") diff --git a/internal/armadactl/analyze.go b/internal/armadactl/analyze.go index de9d29fb5dc..650c0861684 100644 --- a/internal/armadactl/analyze.go +++ b/internal/armadactl/analyze.go @@ -1,11 +1,11 @@ package armadactl import ( - "context" "encoding/json" "fmt" "reflect" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/client" "github.com/armadaproject/armada/pkg/client/domain" @@ -17,7 +17,7 @@ func (a *App) Analyze(queue string, jobSetId string) error { events := map[string][]*api.Event{} var jobState *domain.WatchContext - client.WatchJobSet(ec, queue, jobSetId, false, true, false, false, context.Background(), func(state *domain.WatchContext, e api.Event) bool { + client.WatchJobSet(ec, queue, jobSetId, false, true, false, false, armadacontext.Background(), func(state *domain.WatchContext, e api.Event) bool { events[e.GetJobId()] = append(events[e.GetJobId()], &e) jobState = state return false diff --git a/internal/armadactl/kube.go b/internal/armadactl/kube.go index ef466f7e6b8..d9b63a0399a 100644 --- a/internal/armadactl/kube.go +++ b/internal/armadactl/kube.go @@ -1,10 +1,10 @@ package armadactl import ( - "context" "fmt" "strings" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/client" ) @@ -14,7 +14,7 @@ import ( func (a *App) Kube(jobId string, queueName string, jobSetId string, podNumber int, args []string) error { verb := strings.Join(args, " ") return client.WithEventClient(a.Params.ApiConnectionDetails, func(c api.EventClient) error { - state := client.GetJobSetState(c, queueName, jobSetId, context.Background(), true, false, false) + state := client.GetJobSetState(c, queueName, jobSetId, armadacontext.Background(), true, false, false) jobInfo := state.GetJobInfo(jobId) if jobInfo == nil { diff --git a/internal/armadactl/resources.go b/internal/armadactl/resources.go index 4cf4faa653c..8a7f018bc0d 100644 --- a/internal/armadactl/resources.go +++ b/internal/armadactl/resources.go @@ -1,9 +1,9 @@ package armadactl import ( - "context" "fmt" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/client" ) @@ -11,7 +11,7 @@ import ( // Resources prints the resources used by the jobs in job set with ID jobSetId in the given queue. func (a *App) Resources(queueName string, jobSetId string) error { return client.WithEventClient(a.Params.ApiConnectionDetails, func(c api.EventClient) error { - state := client.GetJobSetState(c, queueName, jobSetId, context.Background(), true, false, false) + state := client.GetJobSetState(c, queueName, jobSetId, armadacontext.Background(), true, false, false) for _, job := range state.GetCurrentState() { fmt.Fprintf(a.Out, "Job ID: %v, maximum used resources: %v\n", job.Job.Id, job.MaxUsedResources) diff --git a/internal/armadactl/watch.go b/internal/armadactl/watch.go index fd0d842d5cf..872a01388c8 100644 --- a/internal/armadactl/watch.go +++ b/internal/armadactl/watch.go @@ -1,12 +1,12 @@ package armadactl import ( - "context" "encoding/json" "fmt" "reflect" "time" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/client" "github.com/armadaproject/armada/pkg/client/domain" @@ -16,7 +16,7 @@ import ( func (a *App) Watch(queue string, jobSetId string, raw bool, exitOnInactive bool, forceNewEvents bool, forceLegacyEvents bool) error { fmt.Fprintf(a.Out, "Watching job set %s\n", jobSetId) return client.WithEventClient(a.Params.ApiConnectionDetails, func(c api.EventClient) error { - client.WatchJobSet(c, queue, jobSetId, true, true, forceNewEvents, forceLegacyEvents, context.Background(), func(state *domain.WatchContext, event api.Event) bool { + client.WatchJobSet(c, queue, jobSetId, true, true, forceNewEvents, forceLegacyEvents, armadacontext.Background(), func(state *domain.WatchContext, event api.Event) bool { if raw { data, err := json.Marshal(event) if err != nil { diff --git a/internal/binoculars/server/binoculars.go b/internal/binoculars/server/binoculars.go index 4497573a04d..0a08237058f 100644 --- a/internal/binoculars/server/binoculars.go +++ b/internal/binoculars/server/binoculars.go @@ -8,6 +8,7 @@ import ( "github.com/armadaproject/armada/internal/binoculars/service" "github.com/armadaproject/armada/internal/common" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/pkg/api/binoculars" ) @@ -27,7 +28,7 @@ func NewBinocularsServer(logService service.LogService, cordonService service.Co func (b *BinocularsServer) Logs(ctx context.Context, request *binoculars.LogRequest) (*binoculars.LogResponse, error) { principal := authorization.GetPrincipal(ctx) - logLines, err := b.logService.GetLogs(ctx, &service.LogParams{ + logLines, err := b.logService.GetLogs(armadacontext.FromGrpcCtx(ctx), &service.LogParams{ Principal: principal, Namespace: request.PodNamespace, PodName: common.PodNamePrefix + request.JobId + "-" + strconv.Itoa(int(request.PodNumber)), @@ -42,7 +43,7 @@ func (b *BinocularsServer) Logs(ctx context.Context, request *binoculars.LogRequ } func (b *BinocularsServer) Cordon(ctx context.Context, request *binoculars.CordonRequest) (*types.Empty, error) { - err := b.cordonService.CordonNode(ctx, request) + err := b.cordonService.CordonNode(armadacontext.FromGrpcCtx(ctx), request) if err != nil { return nil, err } diff --git a/internal/binoculars/service/cordon.go b/internal/binoculars/service/cordon.go index 8d850bca8ec..584da9bf4ca 100644 --- a/internal/binoculars/service/cordon.go +++ b/internal/binoculars/service/cordon.go @@ -1,7 +1,6 @@ package service import ( - "context" "encoding/json" "fmt" "strings" @@ -14,6 +13,7 @@ import ( "github.com/armadaproject/armada/internal/armada/permissions" "github.com/armadaproject/armada/internal/binoculars/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/auth/permission" "github.com/armadaproject/armada/internal/common/cluster" @@ -23,7 +23,7 @@ import ( const userTemplate = "" type CordonService interface { - CordonNode(ctx context.Context, request *binoculars.CordonRequest) error + CordonNode(ctx *armadacontext.Context, request *binoculars.CordonRequest) error } type KubernetesCordonService struct { @@ -44,7 +44,7 @@ func NewKubernetesCordonService( } } -func (c *KubernetesCordonService) CordonNode(ctx context.Context, request *binoculars.CordonRequest) error { +func (c *KubernetesCordonService) CordonNode(ctx *armadacontext.Context, request *binoculars.CordonRequest) error { err := checkPermission(c.permissionChecker, ctx, permissions.CordonNodes) if err != nil { return status.Errorf(codes.PermissionDenied, err.Error()) @@ -91,7 +91,7 @@ func GetPatchBytes(patchData *nodePatch) ([]byte, error) { return json.Marshal(patchData) } -func checkPermission(p authorization.PermissionChecker, ctx context.Context, permission permission.Permission) error { +func checkPermission(p authorization.PermissionChecker, ctx *armadacontext.Context, permission permission.Permission) error { if !p.UserHasPermission(ctx, permission) { return fmt.Errorf("user %s does not have permission %s", authorization.GetPrincipal(ctx).GetName(), permission) } diff --git a/internal/binoculars/service/cordon_test.go b/internal/binoculars/service/cordon_test.go index 5a1cce961b9..eadac72fd8e 100644 --- a/internal/binoculars/service/cordon_test.go +++ b/internal/binoculars/service/cordon_test.go @@ -6,6 +6,7 @@ import ( "fmt" "testing" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" @@ -19,6 +20,7 @@ import ( clientTesting "k8s.io/client-go/testing" "github.com/armadaproject/armada/internal/binoculars/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/auth/permission" "github.com/armadaproject/armada/pkg/api/binoculars" @@ -79,7 +81,7 @@ func TestCordonNode(t *testing.T) { cordonService, client := setupTest(t, cordonConfig, FakePermissionChecker{ReturnValue: true}) ctx := authorization.WithPrincipal(context.Background(), principal) - err := cordonService.CordonNode(ctx, &binoculars.CordonRequest{ + err := cordonService.CordonNode(armadacontext.New(ctx, logrus.NewEntry(logrus.New())), &binoculars.CordonRequest{ NodeName: defaultNode.Name, }) assert.Nil(t, err) @@ -96,7 +98,7 @@ func TestCordonNode(t *testing.T) { assert.Equal(t, patch, tc.expectedPatch) // Assert resulting node is in expected state - node, err := client.CoreV1().Nodes().Get(context.Background(), defaultNode.Name, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(armadacontext.Background(), defaultNode.Name, metav1.GetOptions{}) assert.Nil(t, err) assert.Equal(t, node.Spec.Unschedulable, true) assert.Equal(t, node.Labels, tc.expectedLabels) @@ -107,7 +109,7 @@ func TestCordonNode(t *testing.T) { func TestCordonNode_InvalidNodeName(t *testing.T) { cordonService, _ := setupTest(t, defaultCordonConfig, FakePermissionChecker{ReturnValue: true}) - err := cordonService.CordonNode(context.Background(), &binoculars.CordonRequest{ + err := cordonService.CordonNode(armadacontext.Background(), &binoculars.CordonRequest{ NodeName: "non-existent-node", }) @@ -117,7 +119,7 @@ func TestCordonNode_InvalidNodeName(t *testing.T) { func TestCordonNode_Unauthenticated(t *testing.T) { cordonService, _ := setupTest(t, defaultCordonConfig, FakePermissionChecker{ReturnValue: false}) - err := cordonService.CordonNode(context.Background(), &binoculars.CordonRequest{ + err := cordonService.CordonNode(armadacontext.Background(), &binoculars.CordonRequest{ NodeName: defaultNode.Name, }) @@ -131,7 +133,7 @@ func setupTest(t *testing.T, config configuration.CordonConfiguration, permissio client := fake.NewSimpleClientset() clientProvider := &FakeClientProvider{FakeClient: client} - _, err := client.CoreV1().Nodes().Create(context.Background(), defaultNode, metav1.CreateOptions{}) + _, err := client.CoreV1().Nodes().Create(armadacontext.Background(), defaultNode, metav1.CreateOptions{}) require.NoError(t, err) client.Fake.ClearActions() diff --git a/internal/binoculars/service/logs.go b/internal/binoculars/service/logs.go index 49801758292..ac72215f67e 100644 --- a/internal/binoculars/service/logs.go +++ b/internal/binoculars/service/logs.go @@ -1,7 +1,6 @@ package service import ( - "context" "fmt" "strings" "time" @@ -10,13 +9,14 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/cluster" "github.com/armadaproject/armada/pkg/api/binoculars" ) type LogService interface { - GetLogs(ctx context.Context, params *LogParams) ([]*binoculars.LogLine, error) + GetLogs(ctx *armadacontext.Context, params *LogParams) ([]*binoculars.LogLine, error) } type LogParams struct { @@ -37,7 +37,7 @@ func NewKubernetesLogService(clientProvider cluster.KubernetesClientProvider) *K return &KubernetesLogService{clientProvider: clientProvider} } -func (l *KubernetesLogService) GetLogs(ctx context.Context, params *LogParams) ([]*binoculars.LogLine, error) { +func (l *KubernetesLogService) GetLogs(ctx *armadacontext.Context, params *LogParams) ([]*binoculars.LogLine, error) { client, err := l.clientProvider.ClientForUser(params.Principal.GetName(), params.Principal.GetGroupNames()) if err != nil { return nil, err diff --git a/internal/common/app/app.go b/internal/common/app/app.go index bd35f7a5a8f..25ce1e828b0 100644 --- a/internal/common/app/app.go +++ b/internal/common/app/app.go @@ -1,15 +1,16 @@ package app import ( - "context" "os" "os/signal" "syscall" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) // CreateContextWithShutdown returns a context that will report done when a SIGTERM is received -func CreateContextWithShutdown() context.Context { - ctx, cancel := context.WithCancel(context.Background()) +func CreateContextWithShutdown() *armadacontext.Context { + ctx, cancel := armadacontext.WithCancel(armadacontext.Background()) c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) go func() { diff --git a/internal/common/armadacontext/armada_context.go b/internal/common/armadacontext/armada_context.go new file mode 100644 index 00000000000..0e41a66a1e4 --- /dev/null +++ b/internal/common/armadacontext/armada_context.go @@ -0,0 +1,107 @@ +package armadacontext + +import ( + "context" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +// Context is an extension of Go's context which also includes a logger. This allows us to pass round a contextual logger +// while retaining type-safety +type Context struct { + context.Context + logrus.FieldLogger +} + +// Background creates an empty context with a default logger. It is analogous to context.Background() +func Background() *Context { + return &Context{ + Context: context.Background(), + FieldLogger: logrus.NewEntry(logrus.New()), + } +} + +// TODO creates an empty context with a default logger. It is analogous to context.TODO() +func TODO() *Context { + return &Context{ + Context: context.TODO(), + FieldLogger: logrus.NewEntry(logrus.New()), + } +} + +// FromGrpcCtx creates a context where the logger is extracted via ctxlogrus's Extract() method. +// Note that this will result in a no-op logger if a logger hasn't already been inserted into the context via ctxlogrus +func FromGrpcCtx(ctx context.Context) *Context { + log := ctxlogrus.Extract(ctx) + return New(ctx, log) +} + +// New returns an armada context that encapsulates both a go context and a logger +func New(ctx context.Context, log *logrus.Entry) *Context { + return &Context{ + Context: ctx, + FieldLogger: log, + } +} + +// WithCancel returns a copy of parent with a new Done channel. It is analogous to context.WithCancel() +func WithCancel(parent *Context) (*Context, context.CancelFunc) { + c, cancel := context.WithCancel(parent.Context) + return &Context{ + Context: c, + FieldLogger: parent.FieldLogger, + }, cancel +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted to be no later than d. +// It is analogous to context.WithDeadline() +func WithDeadline(parent *Context, d time.Time) (*Context, context.CancelFunc) { + c, cancel := context.WithDeadline(parent.Context, d) + return &Context{ + Context: c, + FieldLogger: parent.FieldLogger, + }, cancel +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). It is analogous to context.WithTimeout() +func WithTimeout(parent *Context, timeout time.Duration) (*Context, context.CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithLogField returns a copy of parent with the supplied key-value added to the logger +func WithLogField(parent *Context, key string, val interface{}) *Context { + return &Context{ + Context: parent.Context, + FieldLogger: parent.FieldLogger.WithField(key, val), + } +} + +// WithLogFields returns a copy of parent with the supplied key-values added to the logger +func WithLogFields(parent *Context, fields logrus.Fields) *Context { + return &Context{ + Context: parent.Context, + FieldLogger: parent.FieldLogger.WithFields(fields), + } +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. It is analogous to context.WithValue() +func WithValue(parent *Context, key, val any) *Context { + return &Context{ + Context: context.WithValue(parent, key, val), + FieldLogger: parent.FieldLogger, + } +} + +// ErrGroup returns a new Error Group and an associated Context derived from ctx. +// It is analogous to errgroup.WithContext(ctx) +func ErrGroup(ctx *Context) (*errgroup.Group, *Context) { + group, goctx := errgroup.WithContext(ctx) + return group, &Context{ + Context: goctx, + FieldLogger: ctx.FieldLogger, + } +} diff --git a/internal/common/armadacontext/armada_context_test.go b/internal/common/armadacontext/armada_context_test.go new file mode 100644 index 00000000000..4cda401c1b1 --- /dev/null +++ b/internal/common/armadacontext/armada_context_test.go @@ -0,0 +1,89 @@ +package armadacontext + +import ( + "context" + "testing" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +var defaultLogger = logrus.WithField("foo", "bar") + +func TestNew(t *testing.T) { + ctx := New(context.Background(), defaultLogger) + require.Equal(t, defaultLogger, ctx.FieldLogger) + require.Equal(t, context.Background(), ctx.Context) +} + +func TestFromGrpcContext(t *testing.T) { + grpcCtx := ctxlogrus.ToContext(context.Background(), defaultLogger) + ctx := FromGrpcCtx(grpcCtx) + require.Equal(t, grpcCtx, ctx.Context) + require.Equal(t, defaultLogger, ctx.FieldLogger) +} + +func TestBackground(t *testing.T) { + ctx := Background() + require.Equal(t, ctx.Context, context.Background()) +} + +func TestTODO(t *testing.T) { + ctx := TODO() + require.Equal(t, ctx.Context, context.TODO()) +} + +func TestWithLogField(t *testing.T) { + ctx := WithLogField(Background(), "fish", "chips") + require.Equal(t, context.Background(), ctx.Context) + require.Equal(t, logrus.Fields{"fish": "chips"}, ctx.FieldLogger.(*logrus.Entry).Data) +} + +func TestWithLogFields(t *testing.T) { + ctx := WithLogFields(Background(), logrus.Fields{"fish": "chips", "salt": "pepper"}) + require.Equal(t, context.Background(), ctx.Context) + require.Equal(t, logrus.Fields{"fish": "chips", "salt": "pepper"}, ctx.FieldLogger.(*logrus.Entry).Data) +} + +func TestWithTimeout(t *testing.T) { + ctx, _ := WithTimeout(Background(), 100*time.Millisecond) + testDeadline(t, ctx) +} + +func TestWithDeadline(t *testing.T) { + ctx, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + testDeadline(t, ctx) +} + +func TestWithValue(t *testing.T) { + ctx := WithValue(Background(), "foo", "bar") + require.Equal(t, "bar", ctx.Value("foo")) +} + +func testDeadline(t *testing.T, c *Context) { + t.Helper() + d := quiescent(t) + timer := time.NewTimer(d) + defer timer.Stop() + select { + case <-timer.C: + t.Fatalf("context not timed out after %v", d) + case <-c.Done(): + } + if e := c.Err(); e != context.DeadlineExceeded { + t.Errorf("c.Err() == %v; want %v", e, context.DeadlineExceeded) + } +} + +func quiescent(t *testing.T) time.Duration { + deadline, ok := t.Deadline() + if !ok { + return 5 * time.Second + } + + const arbitraryCleanupMargin = 1 * time.Second + return time.Until(deadline) - arbitraryCleanupMargin +} diff --git a/internal/common/auth/authorization/kubernetes_test.go b/internal/common/auth/authorization/kubernetes_test.go index 9493c71f80a..eef827f9add 100644 --- a/internal/common/auth/authorization/kubernetes_test.go +++ b/internal/common/auth/authorization/kubernetes_test.go @@ -10,11 +10,10 @@ import ( "time" "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" - authv1 "k8s.io/api/authentication/v1" - "k8s.io/apimachinery/pkg/util/clock" - "github.com/patrickmn/go-cache" "github.com/stretchr/testify/assert" + authv1 "k8s.io/api/authentication/v1" + "k8s.io/apimachinery/pkg/util/clock" "github.com/armadaproject/armada/internal/common/auth/configuration" ) diff --git a/internal/common/certs/cached_certificate.go b/internal/common/certs/cached_certificate.go index 2588d0f5b50..72b7f6ea250 100644 --- a/internal/common/certs/cached_certificate.go +++ b/internal/common/certs/cached_certificate.go @@ -1,13 +1,14 @@ package certs import ( - "context" "crypto/tls" "os" "sync" "time" log "github.com/sirupsen/logrus" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) type CachedCertificateService struct { @@ -52,7 +53,7 @@ func (c *CachedCertificateService) updateCertificate(certificate *tls.Certificat c.certificate = certificate } -func (c *CachedCertificateService) Run(ctx context.Context) { +func (c *CachedCertificateService) Run(ctx *armadacontext.Context) { ticker := time.NewTicker(c.refreshInterval) for { select { diff --git a/internal/common/certs/cached_certificate_test.go b/internal/common/certs/cached_certificate_test.go index 7687c80fd63..4edd3efd376 100644 --- a/internal/common/certs/cached_certificate_test.go +++ b/internal/common/certs/cached_certificate_test.go @@ -2,7 +2,6 @@ package certs import ( "bytes" - "context" "crypto/rand" "crypto/rsa" "crypto/tls" @@ -16,6 +15,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) const ( @@ -96,7 +97,7 @@ func TestCachedCertificateService_ReloadsCertPeriodically_WhenUsingRun(t *testin assert.Equal(t, cert, cachedCertService.GetCertificate()) go func() { - cachedCertService.Run(context.Background()) + cachedCertService.Run(armadacontext.Background()) }() newCert, certData, keyData := createCerts(t) diff --git a/internal/common/client.go b/internal/common/client.go index 0b44c374d0b..afc5bb5c597 100644 --- a/internal/common/client.go +++ b/internal/common/client.go @@ -3,8 +3,10 @@ package common import ( "context" "time" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) -func ContextWithDefaultTimeout() (context.Context, context.CancelFunc) { - return context.WithTimeout(context.Background(), 10*time.Second) +func ContextWithDefaultTimeout() (*armadacontext.Context, context.CancelFunc) { + return armadacontext.WithTimeout(armadacontext.Background(), 10*time.Second) } diff --git a/internal/common/database/db_testutil.go b/internal/common/database/db_testutil.go index a36affdef73..416b348d7d8 100644 --- a/internal/common/database/db_testutil.go +++ b/internal/common/database/db_testutil.go @@ -1,7 +1,6 @@ package database import ( - "context" "fmt" "github.com/jackc/pgx/v5" @@ -10,6 +9,7 @@ import ( "github.com/pkg/errors" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" ) @@ -17,7 +17,7 @@ import ( // migrations: perform the list of migrations before entering the action callback // action: callback for client code func WithTestDb(migrations []Migration, action func(db *pgxpool.Pool) error) error { - ctx := context.Background() + ctx := armadacontext.Background() // Connect and create a dedicated database for the test dbName := "test_" + util.NewULID() @@ -67,7 +67,7 @@ func WithTestDb(migrations []Migration, action func(db *pgxpool.Pool) error) err // config: PostgresConfig to specify connection details to database // action: callback for client code func WithTestDbCustom(migrations []Migration, config configuration.PostgresConfig, action func(db *pgxpool.Pool) error) error { - ctx := context.Background() + ctx := armadacontext.Background() testDbPool, err := OpenPgxPool(config) if err != nil { diff --git a/internal/common/database/functions.go b/internal/common/database/functions.go index 5446f7cd0e1..17f3334efab 100644 --- a/internal/common/database/functions.go +++ b/internal/common/database/functions.go @@ -1,7 +1,6 @@ package database import ( - "context" "database/sql" "fmt" "strings" @@ -13,6 +12,7 @@ import ( "github.com/pkg/errors" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" ) func CreateConnectionString(values map[string]string) string { @@ -26,20 +26,20 @@ func CreateConnectionString(values map[string]string) string { } func OpenPgxConn(config configuration.PostgresConfig) (*pgx.Conn, error) { - db, err := pgx.Connect(context.Background(), CreateConnectionString(config.Connection)) + db, err := pgx.Connect(armadacontext.Background(), CreateConnectionString(config.Connection)) if err != nil { return nil, err } - err = db.Ping(context.Background()) + err = db.Ping(armadacontext.Background()) return db, err } func OpenPgxPool(config configuration.PostgresConfig) (*pgxpool.Pool, error) { - db, err := pgxpool.New(context.Background(), CreateConnectionString(config.Connection)) + db, err := pgxpool.New(armadacontext.Background(), CreateConnectionString(config.Connection)) if err != nil { return nil, err } - err = db.Ping(context.Background()) + err = db.Ping(armadacontext.Background()) return db, err } diff --git a/internal/common/database/migrations.go b/internal/common/database/migrations.go index 164c75b313d..b515c94f7fb 100644 --- a/internal/common/database/migrations.go +++ b/internal/common/database/migrations.go @@ -2,7 +2,6 @@ package database import ( "bytes" - "context" "io/fs" "path" "sort" @@ -11,6 +10,8 @@ import ( stakikfs "github.com/rakyll/statik/fs" log "github.com/sirupsen/logrus" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) // Migration represents a single, versioned database migration script @@ -28,7 +29,7 @@ func NewMigration(id int, name string, sql string) Migration { } } -func UpdateDatabase(ctx context.Context, db Querier, migrations []Migration) error { +func UpdateDatabase(ctx *armadacontext.Context, db Querier, migrations []Migration) error { log.Info("Updating postgres...") version, err := readVersion(ctx, db) if err != nil { @@ -55,7 +56,7 @@ func UpdateDatabase(ctx context.Context, db Querier, migrations []Migration) err return nil } -func readVersion(ctx context.Context, db Querier) (int, error) { +func readVersion(ctx *armadacontext.Context, db Querier) (int, error) { _, err := db.Exec(ctx, `CREATE SEQUENCE IF NOT EXISTS database_version START WITH 0 MINVALUE 0;`) if err != nil { @@ -75,7 +76,7 @@ func readVersion(ctx context.Context, db Querier) (int, error) { return version, err } -func setVersion(ctx context.Context, db Querier, version int) error { +func setVersion(ctx *armadacontext.Context, db Querier, version int) error { _, err := db.Exec(ctx, `SELECT setval('database_version', $1)`, version) return err } diff --git a/internal/common/database/types/types.go b/internal/common/database/types/types.go index eb4f8d426be..2171d10bad1 100644 --- a/internal/common/database/types/types.go +++ b/internal/common/database/types/types.go @@ -1,10 +1,10 @@ package types import ( - "context" "database/sql" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" ) type DatabaseConnection interface { @@ -16,25 +16,25 @@ type DatabaseConnection interface { // executing queries, and starting transactions. type DatabaseConn interface { // Close closes the database connection. It returns any error encountered during the closing operation. - Close(context.Context) error + Close(*armadacontext.Context) error // Ping pings the database to check the connection. It returns any error encountered during the ping operation. - Ping(context.Context) error + Ping(*armadacontext.Context) error // Exec executes a query that doesn't return rows. It returns any error encountered. - Exec(context.Context, string, ...any) (any, error) + Exec(*armadacontext.Context, string, ...any) (any, error) // Query executes a query that returns multiple rows. It returns a DatabaseRows interface that allows you to iterate over the result set, and any error encountered. - Query(context.Context, string, ...any) (DatabaseRows, error) + Query(*armadacontext.Context, string, ...any) (DatabaseRows, error) // QueryRow executes a query that returns one row. It returns a DatabaseRow interface representing the result row, and any error encountered. - QueryRow(context.Context, string, ...any) DatabaseRow + QueryRow(*armadacontext.Context, string, ...any) DatabaseRow // BeginTx starts a transcation with the given DatabaseTxOptions, or returns an error if any occurred. - BeginTx(context.Context, DatabaseTxOptions) (DatabaseTx, error) + BeginTx(*armadacontext.Context, DatabaseTxOptions) (DatabaseTx, error) // BeginTxFunc starts a transaction and executes the given function within the transaction. It the function runs successfully, BeginTxFunc commits the transaction, otherwise it rolls back and return an errorr. - BeginTxFunc(context.Context, DatabaseTxOptions, func(DatabaseTx) error) error + BeginTxFunc(*armadacontext.Context, DatabaseTxOptions, func(DatabaseTx) error) error } type DatabaseTxOptions struct { @@ -47,52 +47,52 @@ type DatabaseTxOptions struct { // managing transactions, and performing bulk insertions. type DatabaseTx interface { // Exec executes a query that doesn't return rows. It returns any error encountered. - Exec(context.Context, string, ...any) (any, error) + Exec(*armadacontext.Context, string, ...any) (any, error) // Query executes a query that returns multiple rows. // It returns a DatabaseRows interface that allows you to iterate over the result set, and any error encountered. - Query(context.Context, string, ...any) (DatabaseRows, error) + Query(*armadacontext.Context, string, ...any) (DatabaseRows, error) // QueryRow executes a query that returns one row. // It returns a DatabaseRow interface representing the result row, and any error encountered. - QueryRow(context.Context, string, ...any) DatabaseRow + QueryRow(*armadacontext.Context, string, ...any) DatabaseRow // CopyFrom performs a bulk insertion of data into a specified table. // It accepts the table name, column names, and a slice of rows representing the data to be inserted. It returns the number of rows inserted and any error encountered. - CopyFrom(ctx context.Context, tableName string, columnNames []string, rows [][]any) (int64, error) + CopyFrom(ctx *armadacontext.Context, tableName string, columnNames []string, rows [][]any) (int64, error) // Commit commits the transaction. It returns any error encountered during the commit operation. - Commit(context.Context) error + Commit(*armadacontext.Context) error // Rollback rolls back the transaction. It returns any error encountered during the rollback operation. - Rollback(context.Context) error + Rollback(*armadacontext.Context) error } // DatabasePool represents a database connection pool interface that provides methods for acquiring and managing database connections. type DatabasePool interface { // Acquire acquires a database connection from the pool. // It takes a context and returns a DatabaseConn representing the acquired connection and any encountered error. - Acquire(context.Context) (DatabaseConn, error) + Acquire(*armadacontext.Context) (DatabaseConn, error) // Ping pings the database to check the connection. It returns any error encountered during the ping operation. - Ping(context.Context) error + Ping(*armadacontext.Context) error // Close closes the database connection. It returns any error encountered during the closing operation. Close() // Exec executes a query that doesn't return rows. It returns any error encountered. - Exec(context.Context, string, ...any) (any, error) + Exec(*armadacontext.Context, string, ...any) (any, error) // Query executes a query that returns multiple rows. // It returns a DatabaseRows interface that allows you to iterate over the result set, and any error encountered. - Query(context.Context, string, ...any) (DatabaseRows, error) + Query(*armadacontext.Context, string, ...any) (DatabaseRows, error) // BeginTx starts a transcation with the given DatabaseTxOptions, or returns an error if any occurred. - BeginTx(context.Context, DatabaseTxOptions) (DatabaseTx, error) + BeginTx(*armadacontext.Context, DatabaseTxOptions) (DatabaseTx, error) // BeginTxFunc starts a transaction and executes the given function within the transaction. // It the function runs successfully, BeginTxFunc commits the transaction, otherwise it rolls back and return an error. - BeginTxFunc(context.Context, DatabaseTxOptions, func(DatabaseTx) error) error + BeginTxFunc(*armadacontext.Context, DatabaseTxOptions, func(DatabaseTx) error) error } // DatabaseRow represents a single row in a result set. diff --git a/internal/common/database/upsert.go b/internal/common/database/upsert.go index 23f27164f9b..5df05c67918 100644 --- a/internal/common/database/upsert.go +++ b/internal/common/database/upsert.go @@ -1,19 +1,19 @@ package database import ( - "context" "fmt" "reflect" "strings" - "github.com/jackc/pgx/v5/pgxpool" - "github.com/google/uuid" "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/pkg/errors" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) -func UpsertWithTransaction[T any](ctx context.Context, db *pgxpool.Pool, tableName string, records []T) error { +func UpsertWithTransaction[T any](ctx *armadacontext.Context, db *pgxpool.Pool, tableName string, records []T) error { if len(records) == 0 { return nil } @@ -50,7 +50,7 @@ func UpsertWithTransaction[T any](ctx context.Context, db *pgxpool.Pool, tableNa // // ) // I.e., it should omit everything before and after the "(" and ")", respectively. -func Upsert[T any](ctx context.Context, tx pgx.Tx, tableName string, records []T) error { +func Upsert[T any](ctx *armadacontext.Context, tx pgx.Tx, tableName string, records []T) error { if len(records) < 1 { return nil } diff --git a/internal/common/database/upsert_test.go b/internal/common/database/upsert_test.go index b1329921c1e..638d15ac494 100644 --- a/internal/common/database/upsert_test.go +++ b/internal/common/database/upsert_test.go @@ -1,7 +1,6 @@ package database import ( - "context" "fmt" "testing" "time" @@ -9,6 +8,8 @@ import ( "github.com/google/uuid" "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) // Used for tests. @@ -55,7 +56,7 @@ func TestNamesValuesFromRecordPointer(t *testing.T) { } func TestUpsertWithTransaction(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Hour) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 10*time.Hour) defer cancel() err := withDb(func(db *pgxpool.Pool) error { // Insert rows, read them back, and compare. @@ -90,7 +91,7 @@ func TestUpsertWithTransaction(t *testing.T) { } func TestConcurrency(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 10*time.Second) defer cancel() err := withDb(func(db *pgxpool.Pool) error { // Each thread inserts non-overlapping rows, reads them back, and compares. @@ -125,7 +126,7 @@ func TestConcurrency(t *testing.T) { } func TestAutoIncrement(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 10*time.Second) defer cancel() err := withDb(func(db *pgxpool.Pool) error { // Insert two rows. These should automatically get auto-incrementing serial numbers. @@ -207,7 +208,7 @@ func setMessageToExecutor(runs []Record, executor string) { } func selectRecords(db *pgxpool.Pool) ([]Record, error) { - rows, err := db.Query(context.Background(), fmt.Sprintf("SELECT id, message, value, serial FROM %s order by value", TABLE_NAME)) + rows, err := db.Query(armadacontext.Background(), fmt.Sprintf("SELECT id, message, value, serial FROM %s order by value", TABLE_NAME)) if err != nil { return nil, err } diff --git a/internal/common/etcdhealth/etcdhealth.go b/internal/common/etcdhealth/etcdhealth.go index 804a89542f4..49be27a22fe 100644 --- a/internal/common/etcdhealth/etcdhealth.go +++ b/internal/common/etcdhealth/etcdhealth.go @@ -1,7 +1,6 @@ package etcdhealth import ( - "context" "sync" "time" @@ -9,6 +8,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/healthmonitor" "github.com/armadaproject/armada/internal/common/logging" "github.com/armadaproject/armada/internal/common/metrics" @@ -184,7 +184,7 @@ func (srv *EtcdReplicaHealthMonitor) sizeFraction() float64 { return srv.etcdSizeBytes / srv.etcdCapacityBytes } -func (srv *EtcdReplicaHealthMonitor) Run(ctx context.Context, log *logrus.Entry) error { +func (srv *EtcdReplicaHealthMonitor) Run(ctx *armadacontext.Context, log *logrus.Entry) error { log = log.WithField("service", "EtcdHealthMonitor") log.Info("starting etcd health monitor") defer log.Info("stopping etcd health monitor") @@ -264,7 +264,7 @@ func (srv *EtcdReplicaHealthMonitor) setCapacityBytesFromMetrics(metrics map[str // BlockUntilNextMetricsCollection blocks until the next metrics collection has completed, // or until ctx is cancelled, whichever occurs first. -func (srv *EtcdReplicaHealthMonitor) BlockUntilNextMetricsCollection(ctx context.Context) { +func (srv *EtcdReplicaHealthMonitor) BlockUntilNextMetricsCollection(ctx *armadacontext.Context) { c := make(chan struct{}) srv.mu.Lock() srv.watchers = append(srv.watchers, c) diff --git a/internal/common/etcdhealth/etcdhealth_test.go b/internal/common/etcdhealth/etcdhealth_test.go index 22435861a61..474d4df0e3a 100644 --- a/internal/common/etcdhealth/etcdhealth_test.go +++ b/internal/common/etcdhealth/etcdhealth_test.go @@ -1,14 +1,13 @@ package etcdhealth import ( - "context" "testing" "time" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "golang.org/x/sync/errgroup" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/healthmonitor" "github.com/armadaproject/armada/internal/common/metrics" ) @@ -24,9 +23,9 @@ func TestEtcdReplicaHealthMonitor(t *testing.T) { assert.NoError(t, err) // Start the metrics collection service. - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := armadacontext.WithCancel(armadacontext.Background()) defer cancel() - g, ctx := errgroup.WithContext(ctx) + g, ctx := armadacontext.ErrGroup(ctx) g.Go(func() error { return hm.Run(ctx, logrus.NewEntry(logrus.New())) }) // Should still be unavailable due to missing metrics. diff --git a/internal/common/eventutil/eventutil.go b/internal/common/eventutil/eventutil.go index 05ee5d473c9..6d253dacc35 100644 --- a/internal/common/eventutil/eventutil.go +++ b/internal/common/eventutil/eventutil.go @@ -1,7 +1,6 @@ package eventutil import ( - "context" "fmt" "math" "time" @@ -14,6 +13,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/configuration" @@ -25,7 +25,7 @@ import ( // UnmarshalEventSequence returns an EventSequence object contained in a byte buffer // after validating that the resulting EventSequence is valid. -func UnmarshalEventSequence(ctx context.Context, payload []byte) (*armadaevents.EventSequence, error) { +func UnmarshalEventSequence(ctx *armadacontext.Context, payload []byte) (*armadaevents.EventSequence, error) { sequence := &armadaevents.EventSequence{} err := proto.Unmarshal(payload, sequence) if err != nil { @@ -190,6 +190,7 @@ func ApiJobFromLogSubmitJob(ownerId string, groups []string, queueName string, j Created: time, Owner: ownerId, QueueOwnershipUserGroups: groups, + QueueTtlSeconds: e.QueueTtlSeconds, }, nil } @@ -224,9 +225,10 @@ func LogSubmitJobFromApiJob(job *api.Job) (*armadaevents.SubmitJob, error) { Annotations: job.GetAnnotations(), Labels: job.GetLabels(), }, - MainObject: mainObject, - Objects: objects, - Scheduler: job.Scheduler, + MainObject: mainObject, + Objects: objects, + Scheduler: job.Scheduler, + QueueTtlSeconds: job.QueueTtlSeconds, }, nil } diff --git a/internal/common/eventutil/sequence_from_message.go b/internal/common/eventutil/sequence_from_message.go deleted file mode 100644 index cc1749c392e..00000000000 --- a/internal/common/eventutil/sequence_from_message.go +++ /dev/null @@ -1,193 +0,0 @@ -package eventutil - -import ( - "context" - "time" - - "github.com/apache/pulsar-client-go/pulsar" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" - - "github.com/armadaproject/armada/internal/common/logging" - "github.com/armadaproject/armada/pkg/armadaevents" -) - -// PulsarToChannel is a service for receiving messages from Pulsar and forwarding those on C. -type SequenceFromMessage struct { - In chan pulsar.Message - Out chan *EventSequenceWithMessageIds -} - -// EventSequenceWithMessageIds bundles an event sequence with -// all the ids of all Pulsar messages that were consumed to produce it. -type EventSequenceWithMessageIds struct { - Sequence *armadaevents.EventSequence - MessageIds []pulsar.MessageID -} - -func NewSequenceFromMessage(in chan pulsar.Message) *SequenceFromMessage { - return &SequenceFromMessage{ - In: in, - Out: make(chan *EventSequenceWithMessageIds), - } -} - -func (srv *SequenceFromMessage) Run(ctx context.Context) error { - log := ctxlogrus.Extract(ctx) - for { - select { - case <-ctx.Done(): - return ctx.Err() - case msg := <-srv.In: - if msg == nil { - break - } - sequence, err := UnmarshalEventSequence(ctx, msg.Payload()) - if err != nil { - logging.WithStacktrace(log, err).WithField("messageid", msg.ID()).Error("failed to unmarshal event sequence") - break - } - - sequenceWithMessageIds := &EventSequenceWithMessageIds{ - Sequence: sequence, - MessageIds: []pulsar.MessageID{msg.ID()}, - } - select { - case <-ctx.Done(): - case srv.Out <- sequenceWithMessageIds: - } - } - } -} - -// SequenceCompacter reads sequences and produces compacted sequences. -// Compacted sequences are created by combining events in sequences with the -type SequenceCompacter struct { - In chan *EventSequenceWithMessageIds - Out chan *EventSequenceWithMessageIds - // Buffer messages for at most this long before forwarding on the outgoing channel. - Interval time.Duration - // Max number of events to buffer. - MaxEvents int - // Buffer of events to be compacted and sent. - buffer []*EventSequenceWithMessageIds - // Number of events collected so far. - numEvents int -} - -func NewSequenceCompacter(in chan *EventSequenceWithMessageIds) *SequenceCompacter { - return &SequenceCompacter{ - In: in, - Out: make(chan *EventSequenceWithMessageIds), - Interval: 5 * time.Second, - MaxEvents: 10000, - } -} - -func (srv *SequenceCompacter) Run(ctx context.Context) error { - ticker := time.NewTicker(srv.Interval) - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - err := srv.compactAndSend(ctx) - if err != nil { - return err - } - case sequenceWithIds := <-srv.In: - if sequenceWithIds == nil || sequenceWithIds.Sequence == nil { - break - } - srv.buffer = append(srv.buffer, sequenceWithIds) - srv.numEvents += len(sequenceWithIds.Sequence.Events) - if srv.numEvents > srv.MaxEvents { - err := srv.compactAndSend(ctx) - if err != nil { - return err - } - } - } - } -} - -func (srv *SequenceCompacter) compactAndSend(ctx context.Context) error { - if len(srv.buffer) == 0 { - return nil - } - - // Compact the event sequences. - // Note that we can't be sure of the number of message ids. - messageIds := make([]pulsar.MessageID, 0, len(srv.buffer)) - sequences := make([]*armadaevents.EventSequence, len(srv.buffer)) - for i, sequenceWithIds := range srv.buffer { - messageIds = append(messageIds, sequenceWithIds.MessageIds...) - sequences[i] = sequenceWithIds.Sequence - } - sequences = CompactEventSequences(sequences) - - for i, sequence := range sequences { - sequenceWithIds := &EventSequenceWithMessageIds{ - Sequence: sequence, - } - - // Add all message ids to the last sequence to be produced. - // To avoid later ack'ing messages the data of which has not yet been processed. - if i == len(sequences)-1 { - sequenceWithIds.MessageIds = messageIds - } - - select { - case <-ctx.Done(): - return ctx.Err() - case srv.Out <- sequenceWithIds: - } - } - - // Empty the buffer. - srv.buffer = nil - srv.numEvents = 0 - - return nil -} - -// EventFilter calls filter once for each event, -// and events for which filter returns false are discarded. -type EventFilter struct { - In chan *EventSequenceWithMessageIds - Out chan *EventSequenceWithMessageIds - // Filter function. Discard on returning false. - filter func(*armadaevents.EventSequence_Event) bool -} - -func NewEventFilter(in chan *EventSequenceWithMessageIds, filter func(*armadaevents.EventSequence_Event) bool) *EventFilter { - return &EventFilter{ - In: in, - Out: make(chan *EventSequenceWithMessageIds), - filter: filter, - } -} - -func (srv *EventFilter) Run(ctx context.Context) error { - for { - select { - case <-ctx.Done(): - return ctx.Err() - case sequenceWithIds := <-srv.In: - if sequenceWithIds == nil { - break - } - events := make([]*armadaevents.EventSequence_Event, 0, len(sequenceWithIds.Sequence.Events)) - for _, event := range sequenceWithIds.Sequence.Events { - if srv.filter(event) { - events = append(events, event) - } - } - sequenceWithIds.Sequence.Events = events - - select { - case <-ctx.Done(): - case srv.Out <- sequenceWithIds: - } - } - } -} diff --git a/internal/common/eventutil/sequence_from_message_test.go b/internal/common/eventutil/sequence_from_message_test.go deleted file mode 100644 index a4a1812b207..00000000000 --- a/internal/common/eventutil/sequence_from_message_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package eventutil - -// import ( -// "context" -// "testing" -// "time" - -// "github.com/armadaproject/armada/internal/pulsarutils" -// "github.com/armadaproject/armada/pkg/armadaevents" -// "github.com/apache/pulsar-client-go/pulsar" -// ) - -// func TestSequenceCompacter(t *testing.T) { - -// } - -// func TestEventFilter(t *testing.T) { -// tests := map[string]struct { -// filter func(*armadaevents.EventSequence_Event) bool -// n int // Number of event expected to pass the filter -// }{ -// "filter all": { -// filter: func(a *armadaevents.EventSequence_Event) bool { -// return false -// }, -// n: 0, -// }, -// "filter none": { -// filter: func(a *armadaevents.EventSequence_Event) bool { -// return true -// }, -// n: 1, -// }, -// } -// for name, tc := range tests { -// t.Run(name, func(t *testing.T) { -// C := make(chan *EventSequenceWithMessageIds, 1) -// eventFilter := NewEventFilter(C, tc.filter) -// ctx, _ := context.WithTimeout(context.Background(), time.Second) -// sequence := &EventSequenceWithMessageIds{ -// Sequence: &armadaevents.EventSequence{ -// Events: []*armadaevents.EventSequence_Event{ -// {Event: nil}, -// {Event: &armadaevents.EventSequence_Event_SubmitJob{}}, -// }, -// }, -// MessageIds: []pulsar.MessageID{pulsarutils.New(0, i, 0, 0)}, -// } -// C <- sequence - -// }) -// } -// } - -// func generateEvents(ctx context.Context, out chan *EventSequenceWithMessageIds) error { -// var i int64 -// for { -// sequence := EventSequenceWithMessageIds{ -// Sequence: &armadaevents.EventSequence{ -// Events: []*armadaevents.EventSequence_Event{ -// {Event: nil}, -// {Event: &armadaevents.EventSequence_Event_SubmitJob{}}, -// }, -// }, -// MessageIds: []pulsar.MessageID{pulsarutils.New(0, i, 0, 0)}, -// } -// select { -// case <-ctx.Done(): -// return ctx.Err() -// case out <- &sequence: -// } -// } -// } diff --git a/internal/common/grpc/gateway.go b/internal/common/grpc/gateway.go index 39c7b4c175a..c4b3922ff93 100644 --- a/internal/common/grpc/gateway.go +++ b/internal/common/grpc/gateway.go @@ -2,17 +2,18 @@ package grpc import ( "context" + "crypto/tls" "fmt" "net/http" "path" "strings" - "google.golang.org/grpc/credentials/insecure" - "github.com/go-openapi/runtime/middleware" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/jcmturner/gokrb5/v8/spnego" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" protoutil "github.com/armadaproject/armada/internal/common/grpc/protoutils" "github.com/armadaproject/armada/internal/common/util" @@ -24,6 +25,8 @@ func CreateGatewayHandler( grpcPort uint16, mux *http.ServeMux, apiBasePath string, + stripPrefix bool, + ssl bool, corsAllowedOrigins []string, spec string, handlers ...func(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error, @@ -42,7 +45,12 @@ func CreateGatewayHandler( return fmt.Sprintf("%s%s", runtime.MetadataHeaderPrefix, key), true })) - conn, err := grpc.DialContext(connectionCtx, grpcAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + transportCreds := insecure.NewCredentials() + if ssl { + transportCreds = credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) + } + + conn, err := grpc.DialContext(connectionCtx, grpcAddress, grpc.WithTransportCredentials(transportCreds)) if err != nil { panic(err) } @@ -55,7 +63,12 @@ func CreateGatewayHandler( } } - mux.Handle(apiBasePath, allowCORS(gw, corsAllowedOrigins)) + if stripPrefix { + prefixToStrip := strings.TrimSuffix(apiBasePath, "/") + mux.Handle(apiBasePath, http.StripPrefix(prefixToStrip, allowCORS(gw, corsAllowedOrigins))) + } else { + mux.Handle(apiBasePath, allowCORS(gw, corsAllowedOrigins)) + } mux.Handle(path.Join(apiBasePath, "swagger.json"), middleware.Spec(apiBasePath, []byte(spec), nil)) return func() { diff --git a/internal/common/grpc/grpc.go b/internal/common/grpc/grpc.go index 5f73c3801c0..43707dffadf 100644 --- a/internal/common/grpc/grpc.go +++ b/internal/common/grpc/grpc.go @@ -1,7 +1,6 @@ package grpc import ( - "context" "crypto/tls" "fmt" "net" @@ -23,6 +22,7 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/certs" @@ -91,7 +91,7 @@ func CreateGrpcServer( if tlsConfig.Enabled { cachedCertificateService := certs.NewCachedCertificateService(tlsConfig.CertPath, tlsConfig.KeyPath, time.Minute) go func() { - cachedCertificateService.Run(context.Background()) + cachedCertificateService.Run(armadacontext.Background()) }() tlsCreds := credentials.NewTLS(&tls.Config{ GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { @@ -130,7 +130,7 @@ func Listen(port uint16, grpcServer *grpc.Server, wg *sync.WaitGroup) { // CreateShutdownHandler returns a function that shuts down the grpcServer when the context is closed. // The server is given gracePeriod to perform a graceful showdown and is then forcably stopped if necessary -func CreateShutdownHandler(ctx context.Context, gracePeriod time.Duration, grpcServer *grpc.Server) func() error { +func CreateShutdownHandler(ctx *armadacontext.Context, gracePeriod time.Duration, grpcServer *grpc.Server) func() error { return func() error { <-ctx.Done() go func() { diff --git a/internal/common/healthmonitor/healthmonitor.go b/internal/common/healthmonitor/healthmonitor.go index aa196aaffda..d5c6b151c1e 100644 --- a/internal/common/healthmonitor/healthmonitor.go +++ b/internal/common/healthmonitor/healthmonitor.go @@ -1,10 +1,10 @@ package healthmonitor import ( - "context" - "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) const ( @@ -25,5 +25,5 @@ type HealthMonitor interface { // Run initialises and starts the health checker. // Run may be blocking and should be run within a separate goroutine. // Must be called before IsHealthy() or any prometheus.Collector interface methods. - Run(context.Context, *logrus.Entry) error + Run(*armadacontext.Context, *logrus.Entry) error } diff --git a/internal/common/healthmonitor/manualhealthmonitor.go b/internal/common/healthmonitor/manualhealthmonitor.go index 1bc8a6d5b62..7aa2f525068 100644 --- a/internal/common/healthmonitor/manualhealthmonitor.go +++ b/internal/common/healthmonitor/manualhealthmonitor.go @@ -1,11 +1,12 @@ package healthmonitor import ( - "context" "sync" "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) // ManualHealthMonitor is a manually controlled health monitor. @@ -46,7 +47,7 @@ func (srv *ManualHealthMonitor) IsHealthy() (bool, string, error) { } } -func (srv *ManualHealthMonitor) Run(ctx context.Context, log *logrus.Entry) error { +func (srv *ManualHealthMonitor) Run(_ *armadacontext.Context, _ *logrus.Entry) error { return nil } diff --git a/internal/common/healthmonitor/multihealthmonitor.go b/internal/common/healthmonitor/multihealthmonitor.go index 8d9790fd91e..a9f03643d10 100644 --- a/internal/common/healthmonitor/multihealthmonitor.go +++ b/internal/common/healthmonitor/multihealthmonitor.go @@ -1,7 +1,6 @@ package healthmonitor import ( - "context" "fmt" "sync" @@ -9,7 +8,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" "golang.org/x/exp/maps" - "golang.org/x/sync/errgroup" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) // MultiHealthMonitor wraps multiple HealthMonitors and itself implements the HealthMonitor interface. @@ -100,8 +100,8 @@ func (srv *MultiHealthMonitor) IsHealthy() (ok bool, reason string, err error) { } // Run initialises prometheus metrics and starts any child health checkers. -func (srv *MultiHealthMonitor) Run(ctx context.Context, log *logrus.Entry) error { - g, ctx := errgroup.WithContext(ctx) +func (srv *MultiHealthMonitor) Run(ctx *armadacontext.Context, log *logrus.Entry) error { + g, ctx := armadacontext.ErrGroup(ctx) for _, healthMonitor := range srv.healthMonitorsByName { healthMonitor := healthMonitor g.Go(func() error { return healthMonitor.Run(ctx, log) }) diff --git a/internal/common/ingest/batch.go b/internal/common/ingest/batch.go index 7f07c915855..f099f646fae 100644 --- a/internal/common/ingest/batch.go +++ b/internal/common/ingest/batch.go @@ -1,12 +1,13 @@ package ingest import ( - "context" "sync" "time" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/clock" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) // Batcher batches up events from a channel. Batches are created whenever maxItems have been @@ -32,7 +33,7 @@ func NewBatcher[T any](input chan T, maxItems int, maxTimeout time.Duration, cal } } -func (b *Batcher[T]) Run(ctx context.Context) { +func (b *Batcher[T]) Run(ctx *armadacontext.Context) { for { b.buffer = []T{} expire := b.clock.After(b.maxTimeout) diff --git a/internal/common/ingest/batch_test.go b/internal/common/ingest/batch_test.go index 4c9fee650a1..a906dbc8258 100644 --- a/internal/common/ingest/batch_test.go +++ b/internal/common/ingest/batch_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - "golang.org/x/net/context" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/util/clock" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) const ( @@ -42,7 +42,7 @@ func (r *resultHolder) resultLength() int { } func TestBatch_MaxItems(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) testClock := clock.NewFakeClock(time.Now()) inputChan := make(chan int) result := newResultHolder() @@ -67,7 +67,7 @@ func TestBatch_MaxItems(t *testing.T) { } func TestBatch_Time(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) testClock := clock.NewFakeClock(time.Now()) inputChan := make(chan int) result := newResultHolder() @@ -89,7 +89,7 @@ func TestBatch_Time(t *testing.T) { } func TestBatch_Time_WithIntialQuiet(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) testClock := clock.NewFakeClock(time.Now()) inputChan := make(chan int) result := newResultHolder() @@ -120,7 +120,7 @@ func TestBatch_Time_WithIntialQuiet(t *testing.T) { cancel() } -func waitForBufferLength(ctx context.Context, batcher *Batcher[int], numEvents int) error { +func waitForBufferLength(ctx *armadacontext.Context, batcher *Batcher[int], numEvents int) error { ticker := time.NewTicker(5 * time.Millisecond) for { select { @@ -134,7 +134,7 @@ func waitForBufferLength(ctx context.Context, batcher *Batcher[int], numEvents i } } -func waitForExpectedEvents(ctx context.Context, rh *resultHolder, numEvents int) { +func waitForExpectedEvents(ctx *armadacontext.Context, rh *resultHolder, numEvents int) { done := false ticker := time.NewTicker(5 * time.Millisecond) for !done { diff --git a/internal/common/ingest/ingestion_pipeline.go b/internal/common/ingest/ingestion_pipeline.go index 2b5e9a9e783..4236473d360 100644 --- a/internal/common/ingest/ingestion_pipeline.go +++ b/internal/common/ingest/ingestion_pipeline.go @@ -1,16 +1,17 @@ package ingest import ( + "context" "sync" "time" "github.com/apache/pulsar-client-go/pulsar" "github.com/pkg/errors" log "github.com/sirupsen/logrus" - "golang.org/x/net/context" "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/eventutil" commonmetrics "github.com/armadaproject/armada/internal/common/ingest/metrics" "github.com/armadaproject/armada/internal/common/pulsarutils" @@ -27,7 +28,7 @@ type HasPulsarMessageIds interface { // InstructionConverter should be implemented by structs that can convert a batch of event sequences into an object // suitable for passing to the sink type InstructionConverter[T HasPulsarMessageIds] interface { - Convert(ctx context.Context, msg *EventSequencesWithIds) T + Convert(ctx *armadacontext.Context, msg *EventSequencesWithIds) T } // Sink should be implemented by the struct responsible for putting the data in its final resting place, e.g. a @@ -35,7 +36,7 @@ type InstructionConverter[T HasPulsarMessageIds] interface { type Sink[T HasPulsarMessageIds] interface { // Store should persist the sink. The store is responsible for retrying failed attempts and should only return an error // When it is satisfied that operation cannot be retries. - Store(ctx context.Context, msg T) error + Store(ctx *armadacontext.Context, msg T) error } // EventSequencesWithIds consists of a batch of Event Sequences along with the corresponding Pulsar Message Ids @@ -122,7 +123,7 @@ func NewFilteredMsgIngestionPipeline[T HasPulsarMessageIds]( } // Run will run the ingestion pipeline until the supplied context is shut down -func (ingester *IngestionPipeline[T]) Run(ctx context.Context) error { +func (ingester *IngestionPipeline[T]) Run(ctx *armadacontext.Context) error { shutdownMetricServer := common.ServeMetrics(ingester.metricsConfig.Port) defer shutdownMetricServer() @@ -147,7 +148,7 @@ func (ingester *IngestionPipeline[T]) Run(ctx context.Context) error { // Set up a context that n seconds after ctx // This gives the rest of the pipeline a chance to flush pending messages - pipelineShutdownContext, cancel := context.WithCancel(context.Background()) + pipelineShutdownContext, cancel := armadacontext.WithCancel(armadacontext.Background()) go func() { for { select { @@ -206,7 +207,7 @@ func (ingester *IngestionPipeline[T]) Run(ctx context.Context) error { } else { for _, msgId := range msg.GetMessageIDs() { util.RetryUntilSuccess( - context.Background(), + armadacontext.Background(), func() error { return ingester.consumer.AckID(msgId) }, func(err error) { log.WithError(err).Warnf("Pulsar ack failed; backing off for %s", ingester.pulsarConfig.BackoffTime) @@ -265,7 +266,7 @@ func unmarshalEventSequences(batch []pulsar.Message, msgFilter func(msg pulsar.M } // Try and unmarshall the proto - es, err := eventutil.UnmarshalEventSequence(context.Background(), msg.Payload()) + es, err := eventutil.UnmarshalEventSequence(armadacontext.Background(), msg.Payload()) if err != nil { metrics.RecordPulsarMessageError(commonmetrics.PulsarMessageErrorDeserialization) log.WithError(err).Warnf("Could not unmarshal proto for msg %s", msg.ID()) diff --git a/internal/common/ingest/ingestion_pipeline_test.go b/internal/common/ingest/ingestion_pipeline_test.go index da0d653b39a..53dd6a7a39b 100644 --- a/internal/common/ingest/ingestion_pipeline_test.go +++ b/internal/common/ingest/ingestion_pipeline_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/ingest/metrics" "github.com/armadaproject/armada/internal/common/pulsarutils" "github.com/armadaproject/armada/pkg/armadaevents" @@ -191,7 +192,7 @@ func newSimpleConverter(t *testing.T) InstructionConverter[*simpleMessages] { return &simpleConverter{t} } -func (s *simpleConverter) Convert(_ context.Context, msg *EventSequencesWithIds) *simpleMessages { +func (s *simpleConverter) Convert(_ *armadacontext.Context, msg *EventSequencesWithIds) *simpleMessages { s.t.Helper() assert.Len(s.t, msg.EventSequences, len(msg.MessageIds)) var converted []*simpleMessage @@ -218,7 +219,7 @@ func newSimpleSink(t *testing.T) *simpleSink { } } -func (s *simpleSink) Store(_ context.Context, msg *simpleMessages) error { +func (s *simpleSink) Store(_ *armadacontext.Context, msg *simpleMessages) error { for _, simpleMessage := range msg.msgs { s.simpleMessages[simpleMessage.id] = simpleMessage } @@ -236,7 +237,7 @@ func (s *simpleSink) assertDidProcess(messages []pulsar.Message) { } func TestRun_HappyPath_SingleMessage(t *testing.T) { - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second)) + ctx, cancel := armadacontext.WithDeadline(armadacontext.Background(), time.Now().Add(10*time.Second)) messages := []pulsar.Message{ pulsarutils.NewPulsarMessage(1, baseTime, marshal(t, succeeded)), } @@ -257,7 +258,7 @@ func TestRun_HappyPath_SingleMessage(t *testing.T) { } func TestRun_HappyPath_MultipleMessages(t *testing.T) { - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second)) + ctx, cancel := armadacontext.WithDeadline(armadacontext.Background(), time.Now().Add(10*time.Second)) messages := []pulsar.Message{ pulsarutils.NewPulsarMessage(1, baseTime, marshal(t, succeeded)), pulsarutils.NewPulsarMessage(2, baseTime.Add(1*time.Second), marshal(t, pendingAndRunning)), diff --git a/internal/common/logging/stacktrace.go b/internal/common/logging/stacktrace.go index cdcf4aef525..7d546915b31 100644 --- a/internal/common/logging/stacktrace.go +++ b/internal/common/logging/stacktrace.go @@ -10,9 +10,9 @@ type stackTracer interface { StackTrace() errors.StackTrace } -// WithStacktrace returns a new logrus.Entry obtained by adding error information and, if available, a stack trace -// as fields to the provided logrus.Entry. -func WithStacktrace(logger *logrus.Entry, err error) *logrus.Entry { +// WithStacktrace returns a new logrus.FieldLogger obtained by adding error information and, if available, a stack trace +// as fields to the provided logrus.FieldLogger. +func WithStacktrace(logger logrus.FieldLogger, err error) logrus.FieldLogger { logger = logger.WithError(err) if stackErr, ok := err.(stackTracer); ok { return logger.WithField("stacktrace", stackErr.StackTrace()) diff --git a/internal/common/pgkeyvalue/pgkeyvalue.go b/internal/common/pgkeyvalue/pgkeyvalue.go index 8476146d727..d3f5f7d9401 100644 --- a/internal/common/pgkeyvalue/pgkeyvalue.go +++ b/internal/common/pgkeyvalue/pgkeyvalue.go @@ -1,7 +1,6 @@ package pgkeyvalue import ( - "context" "fmt" "time" @@ -10,6 +9,7 @@ import ( "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/clock" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/logging" @@ -34,7 +34,7 @@ type PGKeyValueStore struct { clock clock.Clock } -func New(ctx context.Context, db *pgxpool.Pool, tableName string) (*PGKeyValueStore, error) { +func New(ctx *armadacontext.Context, db *pgxpool.Pool, tableName string) (*PGKeyValueStore, error) { if db == nil { return nil, errors.WithStack(&armadaerrors.ErrInvalidArgument{ Name: "db", @@ -60,7 +60,7 @@ func New(ctx context.Context, db *pgxpool.Pool, tableName string) (*PGKeyValueSt }, nil } -func (c *PGKeyValueStore) Load(ctx context.Context, keys []string) (map[string][]byte, error) { +func (c *PGKeyValueStore) Load(ctx *armadacontext.Context, keys []string) (map[string][]byte, error) { rows, err := c.db.Query(ctx, fmt.Sprintf("SELECT KEY, VALUE FROM %s WHERE KEY = any($1)", c.tableName), keys) if err != nil { return nil, errors.WithStack(err) @@ -78,7 +78,7 @@ func (c *PGKeyValueStore) Load(ctx context.Context, keys []string) (map[string][ return kv, nil } -func (c *PGKeyValueStore) Store(ctx context.Context, kvs map[string][]byte) error { +func (c *PGKeyValueStore) Store(ctx *armadacontext.Context, kvs map[string][]byte) error { data := make([]KeyValue, 0, len(kvs)) for k, v := range kvs { data = append(data, KeyValue{ @@ -90,7 +90,7 @@ func (c *PGKeyValueStore) Store(ctx context.Context, kvs map[string][]byte) erro return database.UpsertWithTransaction(ctx, c.db, c.tableName, data) } -func createTableIfNotExists(ctx context.Context, db *pgxpool.Pool, tableName string) error { +func createTableIfNotExists(ctx *armadacontext.Context, db *pgxpool.Pool, tableName string) error { _, err := db.Exec(ctx, fmt.Sprintf(` CREATE TABLE IF NOT EXISTS %s ( key TEXT PRIMARY KEY, @@ -101,7 +101,7 @@ func createTableIfNotExists(ctx context.Context, db *pgxpool.Pool, tableName str } // Cleanup removes all key-value pairs older than lifespan. -func (c *PGKeyValueStore) cleanup(ctx context.Context, lifespan time.Duration) error { +func (c *PGKeyValueStore) cleanup(ctx *armadacontext.Context, lifespan time.Duration) error { sql := fmt.Sprintf("DELETE FROM %s WHERE (inserted <= $1);", c.tableName) _, err := c.db.Exec(ctx, sql, c.clock.Now().Add(-lifespan)) if err != nil { @@ -112,7 +112,7 @@ func (c *PGKeyValueStore) cleanup(ctx context.Context, lifespan time.Duration) e // PeriodicCleanup starts a goroutine that automatically runs the cleanup job // every interval until the provided context is cancelled. -func (c *PGKeyValueStore) PeriodicCleanup(ctx context.Context, interval time.Duration, lifespan time.Duration) error { +func (c *PGKeyValueStore) PeriodicCleanup(ctx *armadacontext.Context, interval time.Duration, lifespan time.Duration) error { log := logrus.StandardLogger().WithField("service", "PGKeyValueStoreCleanup") log.Info("service started") ticker := c.clock.NewTicker(interval) diff --git a/internal/common/pgkeyvalue/pgkeyvalue_test.go b/internal/common/pgkeyvalue/pgkeyvalue_test.go index c8a9beeb175..aa70c4ed7b9 100644 --- a/internal/common/pgkeyvalue/pgkeyvalue_test.go +++ b/internal/common/pgkeyvalue/pgkeyvalue_test.go @@ -1,7 +1,6 @@ package pgkeyvalue import ( - "context" "testing" "time" @@ -11,11 +10,12 @@ import ( "golang.org/x/exp/maps" "k8s.io/apimachinery/pkg/util/clock" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/lookout/testutil" ) func TestLoadStore(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 10*time.Second) defer cancel() err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { kvStore, err := New(ctx, db, "cachetable") @@ -47,7 +47,7 @@ func TestLoadStore(t *testing.T) { } func TestCleanup(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 10*time.Second) defer cancel() err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { baseTime := time.Now() diff --git a/internal/common/pulsarutils/async.go b/internal/common/pulsarutils/async.go index 8f71781d558..9040eed5fe9 100644 --- a/internal/common/pulsarutils/async.go +++ b/internal/common/pulsarutils/async.go @@ -7,11 +7,11 @@ import ( "sync" "time" - commonmetrics "github.com/armadaproject/armada/internal/common/ingest/metrics" - "github.com/apache/pulsar-client-go/pulsar" "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" + commonmetrics "github.com/armadaproject/armada/internal/common/ingest/metrics" "github.com/armadaproject/armada/internal/common/logging" "github.com/armadaproject/armada/internal/common/util" ) @@ -36,7 +36,7 @@ type ConsumerMessage struct { var msgLogger = logrus.NewEntry(logrus.StandardLogger()) func Receive( - ctx context.Context, + ctx *armadacontext.Context, consumer pulsar.Consumer, receiveTimeout time.Duration, backoffTime time.Duration, @@ -76,7 +76,7 @@ func Receive( return default: // Get a message from Pulsar, which consists of a sequence of events (i.e., state transitions). - ctxWithTimeout, cancel := context.WithTimeout(ctx, receiveTimeout) + ctxWithTimeout, cancel := armadacontext.WithTimeout(ctx, receiveTimeout) msg, err := consumer.Receive(ctxWithTimeout) if errors.Is(err, context.DeadlineExceeded) { msgLogger.Debugf("No message received") @@ -109,7 +109,7 @@ func Receive( // Ack will ack all pulsar messages coming in on the msgs channel. The incoming messages contain a consumer id which // corresponds to the index of the consumer that should be used to perform the ack. In theory, the acks could be done // in parallel, however its unlikely that they will be a performance bottleneck -func Ack(ctx context.Context, consumers []pulsar.Consumer, msgs chan []*ConsumerMessageId, backoffTime time.Duration, wg *sync.WaitGroup) { +func Ack(ctx *armadacontext.Context, consumers []pulsar.Consumer, msgs chan []*ConsumerMessageId, backoffTime time.Duration, wg *sync.WaitGroup) { for msg := range msgs { for _, id := range msg { if id.ConsumerId < 0 || id.ConsumerId >= len(consumers) { diff --git a/internal/common/pulsarutils/async_test.go b/internal/common/pulsarutils/async_test.go index d47151c660d..bb8739254df 100644 --- a/internal/common/pulsarutils/async_test.go +++ b/internal/common/pulsarutils/async_test.go @@ -1,16 +1,16 @@ package pulsarutils import ( - ctx "context" + "context" "sync" "testing" "time" - "github.com/armadaproject/armada/internal/common/ingest/metrics" - "github.com/apache/pulsar-client-go/pulsar" "github.com/stretchr/testify/assert" - "golang.org/x/net/context" + + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/ingest/metrics" ) var m = metrics.NewMetrics("test_pulsarutils_") @@ -46,8 +46,8 @@ func TestReceive(t *testing.T) { consumer := &mockConsumer{ msgs: msgs, } - context, cancel := ctx.WithCancel(ctx.Background()) - outputChan := Receive(context, consumer, 10*time.Millisecond, 10*time.Millisecond, m) + ctx, cancel := armadacontext.WithCancel(armadacontext.Background()) + outputChan := Receive(ctx, consumer, 10*time.Millisecond, 10*time.Millisecond, m) var receivedMsgs []pulsar.Message wg := sync.WaitGroup{} @@ -71,7 +71,7 @@ func TestAcks(t *testing.T) { consumers := []pulsar.Consumer{&mockConsumer} wg := sync.WaitGroup{} wg.Add(1) - go Ack(ctx.Background(), consumers, input, 1*time.Second, &wg) + go Ack(armadacontext.Background(), consumers, input, 1*time.Second, &wg) input <- []*ConsumerMessageId{ {NewMessageId(1), 0, 0}, {NewMessageId(2), 0, 0}, } diff --git a/internal/common/pulsarutils/eventsequence.go b/internal/common/pulsarutils/eventsequence.go index 49325bd0b2b..3750a1b11e8 100644 --- a/internal/common/pulsarutils/eventsequence.go +++ b/internal/common/pulsarutils/eventsequence.go @@ -1,24 +1,23 @@ package pulsarutils import ( - "context" "sync/atomic" - "github.com/armadaproject/armada/internal/common/schedulers" - "github.com/apache/pulsar-client-go/pulsar" "github.com/gogo/protobuf/proto" "github.com/hashicorp/go-multierror" "github.com/pkg/errors" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/internal/common/requestid" + "github.com/armadaproject/armada/internal/common/schedulers" "github.com/armadaproject/armada/pkg/armadaevents" ) // CompactAndPublishSequences reduces the number of sequences to the smallest possible, // while respecting per-job set ordering and max Pulsar message size, and then publishes to Pulsar. -func CompactAndPublishSequences(ctx context.Context, sequences []*armadaevents.EventSequence, producer pulsar.Producer, maxMessageSizeInBytes uint, scheduler schedulers.Scheduler) error { +func CompactAndPublishSequences(ctx *armadacontext.Context, sequences []*armadaevents.EventSequence, producer pulsar.Producer, maxMessageSizeInBytes uint, scheduler schedulers.Scheduler) error { // Reduce the number of sequences to send to the minimum possible, // and then break up any sequences larger than maxMessageSizeInBytes. sequences = eventutil.CompactEventSequences(sequences) @@ -38,7 +37,7 @@ func CompactAndPublishSequences(ctx context.Context, sequences []*armadaevents.E // and // eventutil.LimitSequencesByteSize(sequences, int(srv.MaxAllowedMessageSize)) // before passing to this function. -func PublishSequences(ctx context.Context, producer pulsar.Producer, sequences []*armadaevents.EventSequence, scheduler schedulers.Scheduler) error { +func PublishSequences(ctx *armadacontext.Context, producer pulsar.Producer, sequences []*armadaevents.EventSequence, scheduler schedulers.Scheduler) error { // Incoming gRPC requests are annotated with a unique id. // Pass this id through the log by adding it to the Pulsar message properties. requestId := requestid.FromContextOrMissing(ctx) diff --git a/internal/common/pulsarutils/eventsequence_test.go b/internal/common/pulsarutils/eventsequence_test.go index 0613a9f3462..0832195beac 100644 --- a/internal/common/pulsarutils/eventsequence_test.go +++ b/internal/common/pulsarutils/eventsequence_test.go @@ -9,19 +9,20 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/schedulers" "github.com/armadaproject/armada/pkg/armadaevents" ) func TestPublishSequences_SendAsyncErr(t *testing.T) { producer := &mockProducer{} - err := PublishSequences(context.Background(), producer, []*armadaevents.EventSequence{{}}, schedulers.Pulsar) + err := PublishSequences(armadacontext.Background(), producer, []*armadaevents.EventSequence{{}}, schedulers.Pulsar) assert.NoError(t, err) producer = &mockProducer{ sendAsyncErr: errors.New("sendAsyncErr"), } - err = PublishSequences(context.Background(), producer, []*armadaevents.EventSequence{{}}, schedulers.Pulsar) + err = PublishSequences(armadacontext.Background(), producer, []*armadaevents.EventSequence{{}}, schedulers.Pulsar) assert.ErrorIs(t, err, producer.sendAsyncErr) } @@ -29,7 +30,7 @@ func TestPublishSequences_RespectTimeout(t *testing.T) { producer := &mockProducer{ sendAsyncDuration: 1 * time.Second, } - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), time.Millisecond) defer cancel() err := PublishSequences(ctx, producer, []*armadaevents.EventSequence{{}}, schedulers.Pulsar) assert.ErrorIs(t, err, context.DeadlineExceeded) diff --git a/internal/common/startup.go b/internal/common/startup.go index e14fa5a21a7..0c1364c1e98 100644 --- a/internal/common/startup.go +++ b/internal/common/startup.go @@ -1,7 +1,7 @@ package common import ( - "context" + "crypto/tls" "fmt" "net/http" "os" @@ -11,6 +11,8 @@ import ( "strings" "time" + "github.com/armadaproject/armada/internal/common/certs" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" log "github.com/sirupsen/logrus" @@ -18,6 +20,7 @@ import ( "github.com/spf13/viper" "github.com/weaveworks/promrus" + "github.com/armadaproject/armada/internal/common/armadacontext" commonconfig "github.com/armadaproject/armada/internal/common/config" "github.com/armadaproject/armada/internal/common/logging" ) @@ -145,23 +148,51 @@ func ServeMetricsFor(port uint16, gatherer prometheus.Gatherer) (shutdown func() // ServeHttp starts an HTTP server listening on the given port. // TODO: Make block until a context passed in is cancelled. func ServeHttp(port uint16, mux http.Handler) (shutdown func()) { + return serveHttp(port, mux, false, "", "") +} + +func ServeHttps(port uint16, mux http.Handler, certFile, keyFile string) (shutdown func()) { + return serveHttp(port, mux, true, certFile, keyFile) +} + +func serveHttp(port uint16, mux http.Handler, useTls bool, certFile, keyFile string) (shutdown func()) { srv := &http.Server{ Addr: fmt.Sprintf(":%d", port), Handler: mux, } + scheme := "http" + if useTls { + scheme = "https" + } + go func() { - log.Printf("Starting http server listening on %d", port) - if err := srv.ListenAndServe(); err != http.ErrServerClosed { + log.Printf("Starting %s server listening on %d", scheme, port) + var err error + if useTls { + certWatcher := certs.NewCachedCertificateService(certFile, keyFile, time.Minute) + go func() { + certWatcher.Run(armadacontext.Background()) + }() + srv.TLSConfig = &tls.Config{ + GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { + return certWatcher.GetCertificate(), nil + }, + } + err = srv.ListenAndServeTLS("", "") + } else { + err = srv.ListenAndServe() + } + if err != nil && err != http.ErrServerClosed { panic(err) // TODO Don't panic, return an error } }() // TODO There's no need for this function to panic, since the main goroutine will exit. // Instead, just log an error. return func() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() - log.Printf("Stopping http server listening on %d", port) + log.Printf("Stopping %s server listening on %d", scheme, port) e := srv.Shutdown(ctx) if e != nil { panic(e) diff --git a/internal/common/util/context.go b/internal/common/util/context.go index c96b4f0adee..1f6fa6519f4 100644 --- a/internal/common/util/context.go +++ b/internal/common/util/context.go @@ -1,11 +1,12 @@ package util import ( - "context" "time" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) -func CloseToDeadline(ctx context.Context, tolerance time.Duration) bool { +func CloseToDeadline(ctx *armadacontext.Context, tolerance time.Duration) bool { deadline, exists := ctx.Deadline() return exists && deadline.Before(time.Now().Add(tolerance)) } diff --git a/internal/common/util/retry.go b/internal/common/util/retry.go index 9f178c037d8..c688614e63e 100644 --- a/internal/common/util/retry.go +++ b/internal/common/util/retry.go @@ -1,8 +1,10 @@ package util -import "golang.org/x/net/context" +import ( + "github.com/armadaproject/armada/internal/common/armadacontext" +) -func RetryUntilSuccess(ctx context.Context, performAction func() error, onError func(error)) { +func RetryUntilSuccess(ctx *armadacontext.Context, performAction func() error, onError func(error)) { for { select { case <-ctx.Done(): diff --git a/internal/common/util/retry_test.go b/internal/common/util/retry_test.go index 43180ac6f39..2ad6ea4b300 100644 --- a/internal/common/util/retry_test.go +++ b/internal/common/util/retry_test.go @@ -1,16 +1,17 @@ package util import ( - "context" "fmt" "testing" "time" "github.com/stretchr/testify/assert" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) func TestRetryDoesntSpin(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 1*time.Second) defer cancel() RetryUntilSuccess( @@ -30,7 +31,7 @@ func TestRetryDoesntSpin(t *testing.T) { } func TestRetryCancel(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 1*time.Second) defer cancel() RetryUntilSuccess( @@ -61,7 +62,7 @@ func TestSucceedsAfterFailures(t *testing.T) { errorCount := 0 - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 1*time.Second) defer cancel() RetryUntilSuccess( diff --git a/internal/common/validation/job.go b/internal/common/validation/job.go index 29ac347d030..81c305607de 100644 --- a/internal/common/validation/job.go +++ b/internal/common/validation/job.go @@ -1,8 +1,6 @@ package validation import ( - "fmt" - "github.com/pkg/errors" "github.com/armadaproject/armada/internal/scheduler" @@ -14,8 +12,8 @@ import ( ) func ValidateApiJobs(jobs []*api.Job, config configuration.SchedulingConfig) ([]*api.JobSubmitResponseItem, error) { - if responseItems, err := validateGangs(jobs); err != nil { - return responseItems, err + if _, err := validateGangs(jobs); err != nil { + return nil, err } responseItems := make([]*api.JobSubmitResponseItem, 0, len(jobs)) @@ -35,57 +33,47 @@ func ValidateApiJobs(jobs []*api.Job, config configuration.SchedulingConfig) ([] return nil, nil } -func validateGangs(jobs []*api.Job) ([]*api.JobSubmitResponseItem, error) { - gangDetailsByGangId := make(map[string]struct { - actualCardinality int - expectedCardinality int - expectedPriorityClassName string - expectedNodeUniformityLabel string - }) +type gangDetails = struct { + expectedCardinality int + expectedMinimumCardinality int + expectedPriorityClassName string + expectedNodeUniformityLabel string +} - responseItems := make([]*api.JobSubmitResponseItem, 0, len(jobs)) +func validateGangs(jobs []*api.Job) (map[string]gangDetails, error) { + gangDetailsByGangId := make(map[string]gangDetails) for i, job := range jobs { annotations := job.Annotations - gangId, gangCardinality, isGangJob, err := scheduler.GangIdAndCardinalityFromAnnotations(annotations) + gangId, gangCardinality, gangMinimumCardinality, isGangJob, err := scheduler.GangIdAndCardinalityFromAnnotations(annotations) nodeUniformityLabel := annotations[configuration.GangNodeUniformityLabelAnnotation] if err != nil { - response := &api.JobSubmitResponseItem{ - JobId: job.Id, - Error: errors.WithMessagef(err, "%d-th job with id %s in gang %s", i, job.Id, gangId).Error(), - } - responseItems = append(responseItems, response) + return nil, errors.WithMessagef(err, "%d-th job with id %s in gang %s", i, job.Id, gangId) } if !isGangJob { continue } if gangId == "" { - response := &api.JobSubmitResponseItem{ - JobId: job.Id, - Error: fmt.Sprintf("empty gang id for %d-th job with id %s", i, job.Id), - } - responseItems = append(responseItems, response) + return nil, errors.Errorf("empty gang id for %d-th job with id %s", i, job.Id) } podSpec := util.PodSpecFromJob(job) if details, ok := gangDetailsByGangId[gangId]; ok { if details.expectedCardinality != gangCardinality { - response := &api.JobSubmitResponseItem{ - JobId: job.Id, - Error: fmt.Sprintf( - "inconsistent gang cardinality for %d-th job with id %s in gang %s: expected %d but got %d", - i, job.Id, gangId, details.expectedCardinality, gangCardinality, - ), - } - responseItems = append(responseItems, response) + return nil, errors.Errorf( + "inconsistent gang cardinality for %d-th job with id %s in gang %s: expected %d but got %d", + i, job.Id, gangId, details.expectedCardinality, gangCardinality, + ) + } + if details.expectedMinimumCardinality != gangMinimumCardinality { + return nil, errors.Errorf( + "inconsistent gang minimum cardinality for %d-th job with id %s in gang %s: expected %d but got %d", + i, job.Id, gangId, details.expectedMinimumCardinality, gangMinimumCardinality, + ) } if podSpec != nil && details.expectedPriorityClassName != podSpec.PriorityClassName { - response := &api.JobSubmitResponseItem{ - JobId: job.Id, - Error: fmt.Sprintf( - "inconsistent PriorityClassName for %d-th job with id %s in gang %s: expected %s but got %s", - i, job.Id, gangId, details.expectedPriorityClassName, podSpec.PriorityClassName, - ), - } - responseItems = append(responseItems, response) + return nil, errors.Errorf( + "inconsistent PriorityClassName for %d-th job with id %s in gang %s: expected %s but got %s", + i, job.Id, gangId, details.expectedPriorityClassName, podSpec.PriorityClassName, + ) } if nodeUniformityLabel != details.expectedNodeUniformityLabel { return nil, errors.Errorf( @@ -93,11 +81,10 @@ func validateGangs(jobs []*api.Job) ([]*api.JobSubmitResponseItem, error) { i, job.Id, gangId, details.expectedNodeUniformityLabel, nodeUniformityLabel, ) } - details.actualCardinality++ gangDetailsByGangId[gangId] = details } else { - details.actualCardinality = 1 details.expectedCardinality = gangCardinality + details.expectedMinimumCardinality = gangMinimumCardinality if podSpec != nil { details.expectedPriorityClassName = podSpec.PriorityClassName } @@ -105,22 +92,8 @@ func validateGangs(jobs []*api.Job) ([]*api.JobSubmitResponseItem, error) { gangDetailsByGangId[gangId] = details } } - - if len(responseItems) > 0 { - return responseItems, errors.New("[createJobs] Failed to validate gang jobs") - } - - for gangId, details := range gangDetailsByGangId { - if details.expectedCardinality != details.actualCardinality { - return nil, errors.Errorf( - "unexpected number of jobs for gang %s: expected %d jobs but got %d", - gangId, details.expectedCardinality, details.actualCardinality, - ) - } - } - return nil, nil + return gangDetailsByGangId, nil } - func ValidateApiJob(job *api.Job, config configuration.SchedulingConfig) error { if err := ValidateApiJobPodSpecs(job); err != nil { return err diff --git a/internal/common/validation/job_test.go b/internal/common/validation/job_test.go index 29b714cb97b..420bea9301f 100644 --- a/internal/common/validation/job_test.go +++ b/internal/common/validation/job_test.go @@ -107,14 +107,15 @@ func Test_ValidateJobSubmitRequestItem_WithPortRepeatedInSeperateConfig(t *testi func TestValidateGangs(t *testing.T) { tests := map[string]struct { - Jobs []*api.Job - ExpectSuccess bool + Jobs []*api.Job + ExpectSuccess bool + ExpectedGangMinimumCardinalityByGangId map[string]int }{ "no gang jobs": { Jobs: []*api.Job{{}, {}}, ExpectSuccess: true, }, - "complete gang job of cardinality 1": { + "complete gang job of cardinality 1 with no minimum cardinality provided": { Jobs: []*api.Job{ { Annotations: map[string]string{ @@ -123,7 +124,21 @@ func TestValidateGangs(t *testing.T) { }, }, }, - ExpectSuccess: true, + ExpectSuccess: true, + ExpectedGangMinimumCardinalityByGangId: map[string]int{"foo": 1}, + }, + "complete gang job of cardinality 2 with minimum cardinality of 1": { + Jobs: []*api.Job{ + { + Annotations: map[string]string{ + configuration.GangIdAnnotation: "foo", + configuration.GangCardinalityAnnotation: strconv.Itoa(2), + configuration.GangMinimumCardinalityAnnotation: strconv.Itoa(1), + }, + }, + }, + ExpectSuccess: true, + ExpectedGangMinimumCardinalityByGangId: map[string]int{"foo": 1}, }, "empty gangId": { Jobs: []*api.Job{ @@ -134,7 +149,8 @@ func TestValidateGangs(t *testing.T) { }, }, }, - ExpectSuccess: false, + ExpectSuccess: false, + ExpectedGangMinimumCardinalityByGangId: nil, }, "complete gang job of cardinality 3": { Jobs: []*api.Job{ @@ -157,7 +173,8 @@ func TestValidateGangs(t *testing.T) { }, }, }, - ExpectSuccess: true, + ExpectSuccess: true, + ExpectedGangMinimumCardinalityByGangId: map[string]int{"foo": 3}, }, "two complete gangs": { Jobs: []*api.Job{ @@ -192,9 +209,10 @@ func TestValidateGangs(t *testing.T) { }, }, }, - ExpectSuccess: true, + ExpectSuccess: true, + ExpectedGangMinimumCardinalityByGangId: map[string]int{"foo": 3, "bar": 2}, }, - "one complete and one incomplete gang": { + "one complete and one incomplete gang are passed through": { Jobs: []*api.Job{ { Annotations: map[string]string{ @@ -221,7 +239,8 @@ func TestValidateGangs(t *testing.T) { }, }, }, - ExpectSuccess: false, + ExpectSuccess: true, + ExpectedGangMinimumCardinalityByGangId: map[string]int{"foo": 3, "bar": 2}, }, "missing cardinality": { Jobs: []*api.Job{ @@ -237,7 +256,8 @@ func TestValidateGangs(t *testing.T) { }, }, }, - ExpectSuccess: false, + ExpectSuccess: false, + ExpectedGangMinimumCardinalityByGangId: nil, }, "invalid cardinality": { Jobs: []*api.Job{ @@ -253,7 +273,8 @@ func TestValidateGangs(t *testing.T) { }, }, }, - ExpectSuccess: false, + ExpectSuccess: false, + ExpectedGangMinimumCardinalityByGangId: nil, }, "zero cardinality": { Jobs: []*api.Job{ @@ -264,7 +285,8 @@ func TestValidateGangs(t *testing.T) { }, }, }, - ExpectSuccess: false, + ExpectSuccess: false, + ExpectedGangMinimumCardinalityByGangId: nil, }, "negative cardinality": { Jobs: []*api.Job{ @@ -275,7 +297,8 @@ func TestValidateGangs(t *testing.T) { }, }, }, - ExpectSuccess: false, + ExpectSuccess: false, + ExpectedGangMinimumCardinalityByGangId: nil, }, "inconsistent cardinality": { Jobs: []*api.Job{ @@ -310,7 +333,8 @@ func TestValidateGangs(t *testing.T) { }, }, }, - ExpectSuccess: false, + ExpectSuccess: false, + ExpectedGangMinimumCardinalityByGangId: nil, }, "inconsistent PriorityClassName": { Jobs: []*api.Job{ @@ -333,7 +357,8 @@ func TestValidateGangs(t *testing.T) { }, }, }, - ExpectSuccess: false, + ExpectSuccess: false, + ExpectedGangMinimumCardinalityByGangId: nil, }, "inconsistent NodeUniformityLabel": { Jobs: []*api.Job{ @@ -354,18 +379,22 @@ func TestValidateGangs(t *testing.T) { PodSpec: &v1.PodSpec{}, }, }, - ExpectSuccess: false, + ExpectSuccess: false, + ExpectedGangMinimumCardinalityByGangId: nil, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - responseItems, err := validateGangs(tc.Jobs) + gangDetailsById, err := validateGangs(tc.Jobs) if tc.ExpectSuccess { - assert.Nil(t, responseItems) assert.NoError(t, err) } else { assert.Error(t, err) } + + for id, e := range gangDetailsById { + assert.Equal(t, tc.ExpectedGangMinimumCardinalityByGangId[id], e.expectedMinimumCardinality) + } }) } } diff --git a/internal/eventingester/convert/conversions.go b/internal/eventingester/convert/conversions.go index cab978e5812..fbb66a0c481 100644 --- a/internal/eventingester/convert/conversions.go +++ b/internal/eventingester/convert/conversions.go @@ -1,12 +1,11 @@ package convert import ( - "context" - "github.com/gogo/protobuf/proto" "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/internal/common/ingest" @@ -30,7 +29,7 @@ func NewEventConverter(compressor compress.Compressor, maxMessageBatchSize uint, } } -func (ec *EventConverter) Convert(ctx context.Context, sequencesWithIds *ingest.EventSequencesWithIds) *model.BatchUpdate { +func (ec *EventConverter) Convert(ctx *armadacontext.Context, sequencesWithIds *ingest.EventSequencesWithIds) *model.BatchUpdate { // Remove all groups as they are potentially quite large for _, es := range sequencesWithIds.EventSequences { es.Groups = nil diff --git a/internal/eventingester/convert/conversions_test.go b/internal/eventingester/convert/conversions_test.go index c716f84815a..24ff9013733 100644 --- a/internal/eventingester/convert/conversions_test.go +++ b/internal/eventingester/convert/conversions_test.go @@ -1,7 +1,6 @@ package convert import ( - "context" "math/rand" "testing" "time" @@ -11,6 +10,7 @@ import ( "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/ingest" "github.com/armadaproject/armada/internal/common/pulsarutils" @@ -55,7 +55,7 @@ var cancelled = &armadaevents.EventSequence_Event{ func TestSingle(t *testing.T) { msg := NewMsg(jobRunSucceeded) converter := simpleEventConverter() - batchUpdate := converter.Convert(context.Background(), msg) + batchUpdate := converter.Convert(armadacontext.Background(), msg) expectedSequence := armadaevents.EventSequence{ Events: []*armadaevents.EventSequence_Event{jobRunSucceeded}, } @@ -72,7 +72,7 @@ func TestSingle(t *testing.T) { func TestMultiple(t *testing.T) { msg := NewMsg(cancelled, jobRunSucceeded) converter := simpleEventConverter() - batchUpdate := converter.Convert(context.Background(), msg) + batchUpdate := converter.Convert(armadacontext.Background(), msg) expectedSequence := armadaevents.EventSequence{ Events: []*armadaevents.EventSequence_Event{cancelled, jobRunSucceeded}, } @@ -113,7 +113,7 @@ func TestCancelled(t *testing.T) { }, }) converter := simpleEventConverter() - batchUpdate := converter.Convert(context.Background(), msg) + batchUpdate := converter.Convert(armadacontext.Background(), msg) assert.Equal(t, 1, len(batchUpdate.Events)) event := batchUpdate.Events[0] es, err := extractEventSeq(event.Event) diff --git a/internal/eventingester/store/eventstore.go b/internal/eventingester/store/eventstore.go index 2f9dc7555a2..981e8460c16 100644 --- a/internal/eventingester/store/eventstore.go +++ b/internal/eventingester/store/eventstore.go @@ -1,7 +1,6 @@ package store import ( - "context" "regexp" "time" @@ -9,6 +8,7 @@ import ( "github.com/hashicorp/go-multierror" log "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/ingest" "github.com/armadaproject/armada/internal/eventingester/configuration" "github.com/armadaproject/armada/internal/eventingester/model" @@ -39,7 +39,7 @@ func NewRedisEventStore(db redis.UniversalClient, eventRetention configuration.E } } -func (repo *RedisEventStore) Store(ctx context.Context, update *model.BatchUpdate) error { +func (repo *RedisEventStore) Store(ctx *armadacontext.Context, update *model.BatchUpdate) error { if len(update.Events) == 0 { return nil } diff --git a/internal/eventingester/store/eventstore_test.go b/internal/eventingester/store/eventstore_test.go index 3327a4fff95..1584b56ba15 100644 --- a/internal/eventingester/store/eventstore_test.go +++ b/internal/eventingester/store/eventstore_test.go @@ -1,13 +1,13 @@ package store import ( - "context" "testing" "time" "github.com/go-redis/redis" "github.com/stretchr/testify/assert" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/eventingester/configuration" "github.com/armadaproject/armada/internal/eventingester/model" ) @@ -29,7 +29,7 @@ func TestReportEvents(t *testing.T) { }, } - err := r.Store(context.Background(), update) + err := r.Store(armadacontext.Background(), update) assert.NoError(t, err) read1, err := ReadEvent(r.db, "testQueue", "testJobset") diff --git a/internal/executor/application.go b/internal/executor/application.go index 6a15c0f9414..3cb4db15af0 100644 --- a/internal/executor/application.go +++ b/internal/executor/application.go @@ -1,7 +1,6 @@ package executor import ( - "context" "fmt" "net/http" "os" @@ -14,10 +13,10 @@ import ( grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/cluster" "github.com/armadaproject/armada/internal/common/etcdhealth" "github.com/armadaproject/armada/internal/common/healthmonitor" @@ -41,7 +40,7 @@ import ( "github.com/armadaproject/armada/pkg/executorapi" ) -func StartUp(ctx context.Context, log *logrus.Entry, config configuration.ExecutorConfiguration) (func(), *sync.WaitGroup) { +func StartUp(ctx *armadacontext.Context, log *logrus.Entry, config configuration.ExecutorConfiguration) (func(), *sync.WaitGroup) { err := validateConfig(config) if err != nil { log.Errorf("Invalid config: %s", err) @@ -59,7 +58,7 @@ func StartUp(ctx context.Context, log *logrus.Entry, config configuration.Execut } // Create an errgroup to run services in. - g, ctx := errgroup.WithContext(ctx) + g, ctx := armadacontext.ErrGroup(ctx) // Setup etcd health monitoring. etcdClusterHealthMonitoringByName := make(map[string]healthmonitor.HealthMonitor, len(config.Kubernetes.Etcd.EtcdClustersHealthMonitoring)) diff --git a/internal/executor/context/cluster_context.go b/internal/executor/context/cluster_context.go index 79619ea06fd..555303fe9a3 100644 --- a/internal/executor/context/cluster_context.go +++ b/internal/executor/context/cluster_context.go @@ -1,7 +1,6 @@ package context import ( - "context" "encoding/json" "fmt" "time" @@ -26,6 +25,7 @@ import ( "k8s.io/kubelet/pkg/apis/stats/v1alpha1" "k8s.io/utils/pointer" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/cluster" util2 "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/configuration" @@ -50,7 +50,7 @@ type ClusterContext interface { GetActiveBatchPods() ([]*v1.Pod, error) GetNodes() ([]*v1.Node, error) GetNode(nodeName string) (*v1.Node, error) - GetNodeStatsSummary(context.Context, *v1.Node) (*v1alpha1.Summary, error) + GetNodeStatsSummary(*armadacontext.Context, *v1.Node) (*v1alpha1.Summary, error) GetPodEvents(pod *v1.Pod) ([]*v1.Event, error) GetServices(pod *v1.Pod) ([]*v1.Service, error) GetIngresses(pod *v1.Pod) ([]*networking.Ingress, error) @@ -223,7 +223,7 @@ func (c *KubernetesClusterContext) GetNode(nodeName string) (*v1.Node, error) { return c.nodeInformer.Lister().Get(nodeName) } -func (c *KubernetesClusterContext) GetNodeStatsSummary(ctx context.Context, node *v1.Node) (*v1alpha1.Summary, error) { +func (c *KubernetesClusterContext) GetNodeStatsSummary(ctx *armadacontext.Context, node *v1.Node) (*v1alpha1.Summary, error) { request := c.kubernetesClient. CoreV1(). RESTClient(). @@ -253,7 +253,7 @@ func (c *KubernetesClusterContext) SubmitPod(pod *v1.Pod, owner string, ownerGro return nil, err } - returnedPod, err := ownerClient.CoreV1().Pods(pod.Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) + returnedPod, err := ownerClient.CoreV1().Pods(pod.Namespace).Create(armadacontext.Background(), pod, metav1.CreateOptions{}) if err != nil { c.submittedPods.Delete(util.ExtractPodKey(pod)) } @@ -261,11 +261,11 @@ func (c *KubernetesClusterContext) SubmitPod(pod *v1.Pod, owner string, ownerGro } func (c *KubernetesClusterContext) SubmitService(service *v1.Service) (*v1.Service, error) { - return c.kubernetesClient.CoreV1().Services(service.Namespace).Create(context.Background(), service, metav1.CreateOptions{}) + return c.kubernetesClient.CoreV1().Services(service.Namespace).Create(armadacontext.Background(), service, metav1.CreateOptions{}) } func (c *KubernetesClusterContext) SubmitIngress(ingress *networking.Ingress) (*networking.Ingress, error) { - return c.kubernetesClient.NetworkingV1().Ingresses(ingress.Namespace).Create(context.Background(), ingress, metav1.CreateOptions{}) + return c.kubernetesClient.NetworkingV1().Ingresses(ingress.Namespace).Create(armadacontext.Background(), ingress, metav1.CreateOptions{}) } func (c *KubernetesClusterContext) AddAnnotation(pod *v1.Pod, annotations map[string]string) error { @@ -280,7 +280,7 @@ func (c *KubernetesClusterContext) AddAnnotation(pod *v1.Pod, annotations map[st } _, err = c.kubernetesClient.CoreV1(). Pods(pod.Namespace). - Patch(context.Background(), pod.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + Patch(armadacontext.Background(), pod.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { return err } @@ -299,7 +299,7 @@ func (c *KubernetesClusterContext) AddClusterEventAnnotation(event *v1.Event, an } _, err = c.kubernetesClient.CoreV1(). Events(event.Namespace). - Patch(context.Background(), event.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + Patch(armadacontext.Background(), event.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { return err } @@ -318,7 +318,7 @@ func (c *KubernetesClusterContext) DeletePodWithCondition(pod *v1.Pod, condition return err } // Get latest pod state - bypassing cache - timeout, cancel := context.WithTimeout(context.Background(), time.Second*10) + timeout, cancel := armadacontext.WithTimeout(armadacontext.Background(), time.Second*10) defer cancel() currentPod, err = c.kubernetesClient.CoreV1().Pods(currentPod.Namespace).Get(timeout, currentPod.Name, metav1.GetOptions{}) if err != nil { @@ -368,7 +368,7 @@ func (c *KubernetesClusterContext) DeletePods(pods []*v1.Pod) { func (c *KubernetesClusterContext) DeleteService(service *v1.Service) error { deleteOptions := createDeleteOptions() - err := c.kubernetesClient.CoreV1().Services(service.Namespace).Delete(context.Background(), service.Name, deleteOptions) + err := c.kubernetesClient.CoreV1().Services(service.Namespace).Delete(armadacontext.Background(), service.Name, deleteOptions) if err != nil && k8s_errors.IsNotFound(err) { return nil } @@ -377,7 +377,7 @@ func (c *KubernetesClusterContext) DeleteService(service *v1.Service) error { func (c *KubernetesClusterContext) DeleteIngress(ingress *networking.Ingress) error { deleteOptions := createDeleteOptions() - err := c.kubernetesClient.NetworkingV1().Ingresses(ingress.Namespace).Delete(context.Background(), ingress.Name, deleteOptions) + err := c.kubernetesClient.NetworkingV1().Ingresses(ingress.Namespace).Delete(armadacontext.Background(), ingress.Name, deleteOptions) if err != nil && k8s_errors.IsNotFound(err) { return nil } @@ -386,7 +386,7 @@ func (c *KubernetesClusterContext) DeleteIngress(ingress *networking.Ingress) er func (c *KubernetesClusterContext) ProcessPodsToDelete() { pods := c.podsToDelete.GetAll() - util.ProcessItemsWithThreadPool(context.Background(), c.deleteThreadCount, pods, func(podToDelete *v1.Pod) { + util.ProcessItemsWithThreadPool(armadacontext.Background(), c.deleteThreadCount, pods, func(podToDelete *v1.Pod) { if podToDelete == nil { return } @@ -438,7 +438,7 @@ func (c *KubernetesClusterContext) doDelete(pod *v1.Pod, force bool) { } func (c *KubernetesClusterContext) deletePod(pod *v1.Pod, deleteOptions metav1.DeleteOptions) error { - return c.kubernetesClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, deleteOptions) + return c.kubernetesClient.CoreV1().Pods(pod.Namespace).Delete(armadacontext.Background(), pod.Name, deleteOptions) } func (c *KubernetesClusterContext) markForDeletion(pod *v1.Pod) (*v1.Pod, error) { diff --git a/internal/executor/context/cluster_context_test.go b/internal/executor/context/cluster_context_test.go index d1836e82168..b382cd0e690 100644 --- a/internal/executor/context/cluster_context_test.go +++ b/internal/executor/context/cluster_context_test.go @@ -1,7 +1,6 @@ package context import ( - ctx "context" "encoding/json" "errors" "testing" @@ -23,6 +22,7 @@ import ( clientTesting "k8s.io/client-go/testing" "k8s.io/utils/pointer" + "github.com/armadaproject/armada/internal/common/armadacontext" util2 "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/configuration" "github.com/armadaproject/armada/internal/executor/domain" @@ -699,7 +699,7 @@ func TestKubernetesClusterContext_GetNodes(t *testing.T) { }, } - _, err := client.CoreV1().Nodes().Create(ctx.Background(), node, metav1.CreateOptions{}) + _, err := client.CoreV1().Nodes().Create(armadacontext.Background(), node, metav1.CreateOptions{}) assert.Nil(t, err) nodeFound := waitForCondition(func() bool { diff --git a/internal/executor/context/fake/sync_cluster_context.go b/internal/executor/context/fake/sync_cluster_context.go index 7a8d26797d0..d4a178920d0 100644 --- a/internal/executor/context/fake/sync_cluster_context.go +++ b/internal/executor/context/fake/sync_cluster_context.go @@ -1,7 +1,6 @@ package fake import ( - "context" "errors" "fmt" @@ -11,6 +10,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubelet/pkg/apis/stats/v1alpha1" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/domain" ) @@ -132,7 +132,7 @@ func (c *SyncFakeClusterContext) GetClusterPool() string { return "pool" } -func (c *SyncFakeClusterContext) GetNodeStatsSummary(ctx context.Context, node *v1.Node) (*v1alpha1.Summary, error) { +func (c *SyncFakeClusterContext) GetNodeStatsSummary(ctx *armadacontext.Context, node *v1.Node) (*v1alpha1.Summary, error) { return &v1alpha1.Summary{}, nil } diff --git a/internal/executor/fake/context/context.go b/internal/executor/fake/context/context.go index 0cee687458b..906c23fe85f 100644 --- a/internal/executor/fake/context/context.go +++ b/internal/executor/fake/context/context.go @@ -1,7 +1,6 @@ package context import ( - "context" "fmt" "math/rand" "regexp" @@ -23,6 +22,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubelet/pkg/apis/stats/v1alpha1" + "github.com/armadaproject/armada/internal/common/armadacontext" armadaresource "github.com/armadaproject/armada/internal/common/resource" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/configuration" @@ -314,7 +314,7 @@ func (c *FakeClusterContext) GetClusterPool() string { return c.pool } -func (c *FakeClusterContext) GetNodeStatsSummary(ctx context.Context, node *v1.Node) (*v1alpha1.Summary, error) { +func (c *FakeClusterContext) GetNodeStatsSummary(ctx *armadacontext.Context, node *v1.Node) (*v1alpha1.Summary, error) { return &v1alpha1.Summary{}, nil } diff --git a/internal/executor/job/job_context.go b/internal/executor/job/job_context.go index 3cc8b36f2b3..bcc5526ce2e 100644 --- a/internal/executor/job/job_context.go +++ b/internal/executor/job/job_context.go @@ -1,7 +1,6 @@ package job import ( - "context" "fmt" "sync" "time" @@ -10,6 +9,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" + "github.com/armadaproject/armada/internal/common/armadacontext" executorContext "github.com/armadaproject/armada/internal/executor/context" "github.com/armadaproject/armada/internal/executor/podchecks" "github.com/armadaproject/armada/internal/executor/util" @@ -149,7 +149,7 @@ func (c *ClusterJobContext) AddAnnotation(jobs []*RunningJob, annotations map[st } } - util.ProcessItemsWithThreadPool(context.Background(), c.updateThreadCount, podsToAnnotate, + util.ProcessItemsWithThreadPool(armadacontext.Background(), c.updateThreadCount, podsToAnnotate, func(pod *v1.Pod) { err := c.clusterContext.AddAnnotation(pod, annotations) if err != nil { diff --git a/internal/executor/job/processors/preempt_runs.go b/internal/executor/job/processors/preempt_runs.go index 9e48adb71a6..c296f7b75f7 100644 --- a/internal/executor/job/processors/preempt_runs.go +++ b/internal/executor/job/processors/preempt_runs.go @@ -1,13 +1,13 @@ package processors import ( - "context" "fmt" "time" log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" + "github.com/armadaproject/armada/internal/common/armadacontext" executorContext "github.com/armadaproject/armada/internal/executor/context" "github.com/armadaproject/armada/internal/executor/domain" "github.com/armadaproject/armada/internal/executor/job" @@ -46,7 +46,7 @@ func (j *RunPreemptedProcessor) Run() { }) runPodInfos := createRunPodInfos(runsToCancel, managedPods) - util.ProcessItemsWithThreadPool(context.Background(), 20, runPodInfos, + util.ProcessItemsWithThreadPool(armadacontext.Background(), 20, runPodInfos, func(runInfo *runPodInfo) { pod := runInfo.Pod if pod == nil { diff --git a/internal/executor/job/processors/remove_runs.go b/internal/executor/job/processors/remove_runs.go index 83038d8c1e1..37942110605 100644 --- a/internal/executor/job/processors/remove_runs.go +++ b/internal/executor/job/processors/remove_runs.go @@ -1,12 +1,12 @@ package processors import ( - "context" "time" log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" + "github.com/armadaproject/armada/internal/common/armadacontext" executorContext "github.com/armadaproject/armada/internal/executor/context" "github.com/armadaproject/armada/internal/executor/domain" "github.com/armadaproject/armada/internal/executor/job" @@ -37,7 +37,7 @@ func (j *RemoveRunProcessor) Run() { }) runPodInfos := createRunPodInfos(runsToCancel, managedPods) - util.ProcessItemsWithThreadPool(context.Background(), 20, runPodInfos, + util.ProcessItemsWithThreadPool(armadacontext.Background(), 20, runPodInfos, func(runInfo *runPodInfo) { pod := runInfo.Pod if pod == nil { diff --git a/internal/executor/reporter/event_sender.go b/internal/executor/reporter/event_sender.go index 9dd42a03f9d..d9afe0fa48b 100644 --- a/internal/executor/reporter/event_sender.go +++ b/internal/executor/reporter/event_sender.go @@ -1,13 +1,12 @@ package reporter import ( - "context" - "github.com/gogo/protobuf/proto" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/armadaproject/armada/internal/common" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/armadaevents" @@ -63,7 +62,7 @@ func (eventSender *ExecutorApiEventSender) SendEvents(events []EventMessage) err } for _, eventList := range eventLists { - _, err = eventSender.eventClient.ReportEvents(context.Background(), eventList) + _, err = eventSender.eventClient.ReportEvents(armadacontext.Background(), eventList) if err != nil { return err } diff --git a/internal/executor/reporter/event_sender_test.go b/internal/executor/reporter/event_sender_test.go index 1c91d1cd6f0..08e60daa521 100644 --- a/internal/executor/reporter/event_sender_test.go +++ b/internal/executor/reporter/event_sender_test.go @@ -205,13 +205,13 @@ func newFakeExecutorApiClient() *fakeExecutorApiClient { } } -func (fakeClient *fakeExecutorApiClient) LeaseJobRuns(ctx context.Context, opts ...grpc.CallOption) (executorapi.ExecutorApi_LeaseJobRunsClient, error) { +func (fakeClient *fakeExecutorApiClient) LeaseJobRuns(_ context.Context, opts ...grpc.CallOption) (executorapi.ExecutorApi_LeaseJobRunsClient, error) { // Not implemented return nil, nil } // Reports job run events to the scheduler -func (fakeClient *fakeExecutorApiClient) ReportEvents(ctx context.Context, in *executorapi.EventList, opts ...grpc.CallOption) (*types.Empty, error) { +func (fakeClient *fakeExecutorApiClient) ReportEvents(_ context.Context, in *executorapi.EventList, opts ...grpc.CallOption) (*types.Empty, error) { fakeClient.reportedEvents = append(fakeClient.reportedEvents, in) return nil, nil } diff --git a/internal/executor/service/job_lease.go b/internal/executor/service/job_lease.go index d8165c32a1b..1b18fc0c9d2 100644 --- a/internal/executor/service/job_lease.go +++ b/internal/executor/service/job_lease.go @@ -10,7 +10,6 @@ import ( grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" "github.com/pkg/errors" log "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/grpc/encoding/gzip" v1 "k8s.io/api/core/v1" @@ -18,6 +17,7 @@ import ( "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common" + "github.com/armadaproject/armada/internal/common/armadacontext" armadamaps "github.com/armadaproject/armada/internal/common/maps" armadaresource "github.com/armadaproject/armada/internal/common/resource" commonUtil "github.com/armadaproject/armada/internal/common/util" @@ -111,10 +111,10 @@ func (jobLeaseService *JobLeaseService) requestJobLeases(leaseRequest *api.Strea // Setup a bidirectional gRPC stream. // The server sends jobs over this stream. // The executor sends back acks to indicate which jobs were successfully received. - ctx := context.Background() + ctx := armadacontext.Background() var cancel context.CancelFunc if jobLeaseService.jobLeaseRequestTimeout != 0 { - ctx, cancel = context.WithTimeout(ctx, jobLeaseService.jobLeaseRequestTimeout) + ctx, cancel = armadacontext.WithTimeout(ctx, jobLeaseService.jobLeaseRequestTimeout) defer cancel() } stream, err := jobLeaseService.queueClient.StreamingLeaseJobs(ctx, grpc_retry.Disable(), grpc.UseCompressor(gzip.Name)) @@ -137,7 +137,7 @@ func (jobLeaseService *JobLeaseService) requestJobLeases(leaseRequest *api.Strea var numJobs uint32 jobs := make([]*api.Job, 0) ch := make(chan *api.StreamingJobLease, 10) - g, ctx := errgroup.WithContext(ctx) + g, ctx := armadacontext.ErrGroup(ctx) g.Go(func() error { // Close channel to ensure sending goroutine exits. defer close(ch) diff --git a/internal/executor/service/job_manager.go b/internal/executor/service/job_manager.go index 4b8b1cfe016..496440d0538 100644 --- a/internal/executor/service/job_manager.go +++ b/internal/executor/service/job_manager.go @@ -1,13 +1,13 @@ package service import ( - "context" "fmt" "time" log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" + "github.com/armadaproject/armada/internal/common/armadacontext" context2 "github.com/armadaproject/armada/internal/executor/context" "github.com/armadaproject/armada/internal/executor/domain" "github.com/armadaproject/armada/internal/executor/job" @@ -75,7 +75,7 @@ func (m *JobManager) ManageJobLeases() { } } - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*2) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), time.Minute*2) defer cancel() m.handlePodIssues(ctx, jobs) } @@ -108,7 +108,7 @@ func (m *JobManager) reportTerminated(pods []*v1.Pod) { } } -func (m *JobManager) handlePodIssues(ctx context.Context, allRunningJobs []*job.RunningJob) { +func (m *JobManager) handlePodIssues(ctx *armadacontext.Context, allRunningJobs []*job.RunningJob) { util.ProcessItemsWithThreadPool(ctx, 20, allRunningJobs, m.handlePodIssue) } diff --git a/internal/executor/service/job_requester.go b/internal/executor/service/job_requester.go index 217f279639e..53cf83c49a6 100644 --- a/internal/executor/service/job_requester.go +++ b/internal/executor/service/job_requester.go @@ -1,12 +1,12 @@ package service import ( - "context" "time" log "github.com/sirupsen/logrus" "golang.org/x/exp/maps" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/slices" util2 "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/configuration" @@ -56,7 +56,7 @@ func (r *JobRequester) RequestJobsRuns() { log.Errorf("Failed to create lease request because %s", err) return } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 30*time.Second) defer cancel() leaseResponse, err := r.leaseRequester.LeaseJobRuns(ctx, leaseRequest) if err != nil { diff --git a/internal/executor/service/job_requester_test.go b/internal/executor/service/job_requester_test.go index 532e7e4fb0e..f7e3fcbc5b7 100644 --- a/internal/executor/service/job_requester_test.go +++ b/internal/executor/service/job_requester_test.go @@ -1,7 +1,6 @@ package service import ( - "context" "fmt" "testing" @@ -11,6 +10,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "github.com/armadaproject/armada/internal/common/armadacontext" armadaresource "github.com/armadaproject/armada/internal/common/resource" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/configuration" @@ -275,7 +275,7 @@ type StubLeaseRequester struct { LeaseJobRunLeaseResponse *LeaseResponse } -func (s *StubLeaseRequester) LeaseJobRuns(ctx context.Context, request *LeaseRequest) (*LeaseResponse, error) { +func (s *StubLeaseRequester) LeaseJobRuns(_ *armadacontext.Context, request *LeaseRequest) (*LeaseResponse, error) { s.ReceivedLeaseRequests = append(s.ReceivedLeaseRequests, request) return s.LeaseJobRunLeaseResponse, s.LeaseJobRunError } diff --git a/internal/executor/service/lease_requester.go b/internal/executor/service/lease_requester.go index 36a29f6e4f2..dc4976d84b5 100644 --- a/internal/executor/service/lease_requester.go +++ b/internal/executor/service/lease_requester.go @@ -10,6 +10,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/encoding/gzip" + "github.com/armadaproject/armada/internal/common/armadacontext" armadaresource "github.com/armadaproject/armada/internal/common/resource" clusterContext "github.com/armadaproject/armada/internal/executor/context" "github.com/armadaproject/armada/pkg/api" @@ -31,7 +32,7 @@ type LeaseResponse struct { } type LeaseRequester interface { - LeaseJobRuns(ctx context.Context, request *LeaseRequest) (*LeaseResponse, error) + LeaseJobRuns(ctx *armadacontext.Context, request *LeaseRequest) (*LeaseResponse, error) } type JobLeaseRequester struct { @@ -52,7 +53,7 @@ func NewJobLeaseRequester( } } -func (requester *JobLeaseRequester) LeaseJobRuns(ctx context.Context, request *LeaseRequest) (*LeaseResponse, error) { +func (requester *JobLeaseRequester) LeaseJobRuns(ctx *armadacontext.Context, request *LeaseRequest) (*LeaseResponse, error) { stream, err := requester.executorApiClient.LeaseJobRuns(ctx, grpcretry.Disable(), grpc.UseCompressor(gzip.Name)) if err != nil { return nil, err diff --git a/internal/executor/service/lease_requester_test.go b/internal/executor/service/lease_requester_test.go index 3f09cf450a7..f6314876c9f 100644 --- a/internal/executor/service/lease_requester_test.go +++ b/internal/executor/service/lease_requester_test.go @@ -1,7 +1,6 @@ package service import ( - "context" "fmt" "io" "testing" @@ -12,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/resource" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/mocks" armadaresource "github.com/armadaproject/armada/internal/common/resource" "github.com/armadaproject/armada/internal/executor/context/fake" @@ -39,7 +39,7 @@ var ( ) func TestLeaseJobRuns(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 30*time.Second) defer cancel() tests := map[string]struct { leaseMessages []*executorapi.JobRunLease @@ -87,7 +87,7 @@ func TestLeaseJobRuns(t *testing.T) { } func TestLeaseJobRuns_Send(t *testing.T) { - shortCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + shortCtx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 30*time.Second) defer cancel() leaseRequest := &LeaseRequest{ @@ -126,7 +126,7 @@ func TestLeaseJobRuns_Send(t *testing.T) { func TestLeaseJobRuns_HandlesNoEndMarkerMessage(t *testing.T) { leaseMessages := []*executorapi.JobRunLease{lease1, lease2} - shortCtx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + shortCtx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 200*time.Millisecond) defer cancel() jobRequester, mockExecutorApiClient, mockStream := setup(t) @@ -146,7 +146,7 @@ func TestLeaseJobRuns_HandlesNoEndMarkerMessage(t *testing.T) { } func TestLeaseJobRuns_Error(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 30*time.Second) defer cancel() tests := map[string]struct { streamError bool diff --git a/internal/executor/service/pod_issue_handler.go b/internal/executor/service/pod_issue_handler.go index b98980df01b..57b323e7146 100644 --- a/internal/executor/service/pod_issue_handler.go +++ b/internal/executor/service/pod_issue_handler.go @@ -1,7 +1,6 @@ package service import ( - "context" "fmt" "sync" "time" @@ -11,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/cache" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/executor/configuration" executorContext "github.com/armadaproject/armada/internal/executor/context" "github.com/armadaproject/armada/internal/executor/job" @@ -159,7 +159,7 @@ func (p *IssueHandler) HandlePodIssues() { }) p.detectPodIssues(managedPods) p.detectReconciliationIssues(managedPods) - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*2) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), time.Minute*2) defer cancel() p.handleKnownIssues(ctx, managedPods) } @@ -225,7 +225,7 @@ func (p *IssueHandler) detectPodIssues(allManagedPods []*v1.Pod) { } } -func (p *IssueHandler) handleKnownIssues(ctx context.Context, allManagedPods []*v1.Pod) { +func (p *IssueHandler) handleKnownIssues(ctx *armadacontext.Context, allManagedPods []*v1.Pod) { // Make issues from pods + issues issues := createIssues(allManagedPods, p.knownPodIssues) util.ProcessItemsWithThreadPool(ctx, 20, issues, p.handleRunIssue) diff --git a/internal/executor/util/process.go b/internal/executor/util/process.go index cc4da52d9a2..a38c316b5fa 100644 --- a/internal/executor/util/process.go +++ b/internal/executor/util/process.go @@ -1,13 +1,13 @@ package util import ( - "context" "sync" + "github.com/armadaproject/armada/internal/common/armadacontext" commonUtil "github.com/armadaproject/armada/internal/common/util" ) -func ProcessItemsWithThreadPool[K any](ctx context.Context, maxThreadCount int, itemsToProcess []K, processFunc func(K)) { +func ProcessItemsWithThreadPool[K any](ctx *armadacontext.Context, maxThreadCount int, itemsToProcess []K, processFunc func(K)) { wg := &sync.WaitGroup{} processChannel := make(chan K) @@ -24,7 +24,7 @@ func ProcessItemsWithThreadPool[K any](ctx context.Context, maxThreadCount int, wg.Wait() } -func poolWorker[K any](ctx context.Context, wg *sync.WaitGroup, podsToProcess chan K, processFunc func(K)) { +func poolWorker[K any](ctx *armadacontext.Context, wg *sync.WaitGroup, podsToProcess chan K, processFunc func(K)) { defer wg.Done() for pod := range podsToProcess { diff --git a/internal/executor/util/process_test.go b/internal/executor/util/process_test.go index cfdb237dea9..f6995106c70 100644 --- a/internal/executor/util/process_test.go +++ b/internal/executor/util/process_test.go @@ -1,12 +1,13 @@ package util import ( - "context" "sync" "testing" "time" "github.com/stretchr/testify/assert" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) func TestProcessItemsWithThreadPool(t *testing.T) { @@ -14,7 +15,7 @@ func TestProcessItemsWithThreadPool(t *testing.T) { output := []string{} outputMutex := &sync.Mutex{} - ProcessItemsWithThreadPool(context.Background(), 2, input, func(item string) { + ProcessItemsWithThreadPool(armadacontext.Background(), 2, input, func(item string) { outputMutex.Lock() defer outputMutex.Unlock() output = append(output, item) @@ -28,7 +29,7 @@ func TestProcessItemsWithThreadPool_HandlesContextCancellation(t *testing.T) { output := []string{} outputMutex := &sync.Mutex{} - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), time.Millisecond*100) defer cancel() ProcessItemsWithThreadPool(ctx, 2, input, func(item string) { diff --git a/internal/executor/utilisation/pod_utilisation_kubelet_metrics.go b/internal/executor/utilisation/pod_utilisation_kubelet_metrics.go index 258d3740942..d7b32d01c7e 100644 --- a/internal/executor/utilisation/pod_utilisation_kubelet_metrics.go +++ b/internal/executor/utilisation/pod_utilisation_kubelet_metrics.go @@ -1,7 +1,6 @@ package utilisation import ( - "context" "sync" "time" @@ -11,6 +10,7 @@ import ( log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" + "github.com/armadaproject/armada/internal/common/armadacontext" clusterContext "github.com/armadaproject/armada/internal/executor/context" "github.com/armadaproject/armada/internal/executor/domain" ) @@ -28,7 +28,7 @@ func (m *podUtilisationKubeletMetrics) fetch(nodes []*v1.Node, podNameToUtilisat wg.Add(1) go func(node *v1.Node) { defer wg.Done() - ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*15) + ctx, cancelFunc := armadacontext.WithTimeout(armadacontext.Background(), time.Second*15) defer cancelFunc() summary, err := clusterContext.GetNodeStatsSummary(ctx, node) if err != nil { diff --git a/internal/lookout/repository/job_pruner.go b/internal/lookout/repository/job_pruner.go index 6c9b92e60df..a77f1657007 100644 --- a/internal/lookout/repository/job_pruner.go +++ b/internal/lookout/repository/job_pruner.go @@ -1,12 +1,13 @@ package repository import ( - "context" "database/sql" "fmt" "time" log "github.com/sirupsen/logrus" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) const postgresFormat = "2006-01-02 15:04:05.000000" @@ -22,7 +23,7 @@ const postgresFormat = "2006-01-02 15:04:05.000000" // For performance reasons we don't use a transaction here and so an error may indicate that // Some jobs were deleted. func DeleteOldJobs(db *sql.DB, batchSizeLimit int, cutoff time.Time) error { - ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 120*time.Second) defer cancel() // This would be much better done as a proper statement with parameters, but postgres doesn't support @@ -30,7 +31,7 @@ func DeleteOldJobs(db *sql.DB, batchSizeLimit int, cutoff time.Time) error { queryText := fmt.Sprintf(` CREATE TEMP TABLE rows_to_delete AS (SELECT job_id FROM job WHERE submitted < '%v' OR submitted IS NULL); CREATE TEMP TABLE batch (job_id varchar(32)); - + DO $do$ DECLARE @@ -52,7 +53,7 @@ func DeleteOldJobs(db *sql.DB, batchSizeLimit int, cutoff time.Time) error { END LOOP; END; $do$; - + DROP TABLE rows_to_delete; DROP TABLE batch; `, cutoff.Format(postgresFormat), batchSizeLimit) diff --git a/internal/lookout/repository/job_sets.go b/internal/lookout/repository/job_sets.go index 70bd187f866..28b60a48179 100644 --- a/internal/lookout/repository/job_sets.go +++ b/internal/lookout/repository/job_sets.go @@ -1,7 +1,6 @@ package repository import ( - "context" "database/sql" "time" @@ -9,6 +8,7 @@ import ( "github.com/doug-martin/goqu/v9/exp" "github.com/gogo/protobuf/types" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/pkg/api/lookout" ) @@ -38,7 +38,7 @@ type jobSetCountsRow struct { QueuedStatsQ3 sql.NullTime `db:"queued_q3"` } -func (r *SQLJobRepository) GetJobSetInfos(ctx context.Context, opts *lookout.GetJobSetsRequest) ([]*lookout.JobSetInfo, error) { +func (r *SQLJobRepository) GetJobSetInfos(ctx *armadacontext.Context, opts *lookout.GetJobSetsRequest) ([]*lookout.JobSetInfo, error) { rows, err := r.queryJobSetInfos(ctx, opts) if err != nil { return nil, err @@ -47,7 +47,7 @@ func (r *SQLJobRepository) GetJobSetInfos(ctx context.Context, opts *lookout.Get return r.rowsToJobSets(rows, opts.Queue), nil } -func (r *SQLJobRepository) queryJobSetInfos(ctx context.Context, opts *lookout.GetJobSetsRequest) ([]*jobSetCountsRow, error) { +func (r *SQLJobRepository) queryJobSetInfos(ctx *armadacontext.Context, opts *lookout.GetJobSetsRequest) ([]*jobSetCountsRow, error) { ds := r.createJobSetsDataset(opts) jobsInQueueRows := make([]*jobSetCountsRow, 0) diff --git a/internal/lookout/repository/jobs.go b/internal/lookout/repository/jobs.go index dc03c6d43c4..3d8cb0994c1 100644 --- a/internal/lookout/repository/jobs.go +++ b/internal/lookout/repository/jobs.go @@ -1,7 +1,6 @@ package repository import ( - "context" "encoding/json" "errors" "fmt" @@ -13,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/duration" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/util" @@ -20,7 +20,7 @@ import ( "github.com/armadaproject/armada/pkg/api/lookout" ) -func (r *SQLJobRepository) GetJobs(ctx context.Context, opts *lookout.GetJobsRequest) ([]*lookout.JobInfo, error) { +func (r *SQLJobRepository) GetJobs(ctx *armadacontext.Context, opts *lookout.GetJobsRequest) ([]*lookout.JobInfo, error) { if valid, jobState := validateJobStates(opts.JobStates); !valid { return nil, fmt.Errorf("unknown job state: %q", jobState) } @@ -57,7 +57,7 @@ func isJobState(val string) bool { return false } -func (r *SQLJobRepository) queryJobs(ctx context.Context, opts *lookout.GetJobsRequest) ([]*JobRow, error) { +func (r *SQLJobRepository) queryJobs(ctx *armadacontext.Context, opts *lookout.GetJobsRequest) ([]*JobRow, error) { ds := r.createJobsDataset(opts) jobsInQueueRows := make([]*JobRow, 0) diff --git a/internal/lookout/repository/queues.go b/internal/lookout/repository/queues.go index 32b40aeb3b7..0ad1909e849 100644 --- a/internal/lookout/repository/queues.go +++ b/internal/lookout/repository/queues.go @@ -1,7 +1,6 @@ package repository import ( - "context" "database/sql" "sort" "time" @@ -10,6 +9,7 @@ import ( "github.com/gogo/protobuf/types" "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/pkg/api/lookout" ) @@ -25,7 +25,7 @@ type rowsSql struct { LongestRunning string } -func (r *SQLJobRepository) GetQueueInfos(ctx context.Context) ([]*lookout.QueueInfo, error) { +func (r *SQLJobRepository) GetQueueInfos(ctx *armadacontext.Context) ([]*lookout.QueueInfo, error) { queries, err := r.getQueuesSql() if err != nil { return nil, err diff --git a/internal/lookout/repository/sql_repository.go b/internal/lookout/repository/sql_repository.go index 42d72473c5e..af59e92c6ed 100644 --- a/internal/lookout/repository/sql_repository.go +++ b/internal/lookout/repository/sql_repository.go @@ -1,12 +1,12 @@ package repository import ( - "context" "database/sql" "github.com/doug-martin/goqu/v9" _ "github.com/doug-martin/goqu/v9/dialect/postgres" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/pkg/api/lookout" ) @@ -33,9 +33,9 @@ const ( ) type JobRepository interface { - GetQueueInfos(ctx context.Context) ([]*lookout.QueueInfo, error) - GetJobSetInfos(ctx context.Context, opts *lookout.GetJobSetsRequest) ([]*lookout.JobSetInfo, error) - GetJobs(ctx context.Context, opts *lookout.GetJobsRequest) ([]*lookout.JobInfo, error) + GetQueueInfos(ctx *armadacontext.Context) ([]*lookout.QueueInfo, error) + GetJobSetInfos(ctx *armadacontext.Context, opts *lookout.GetJobSetsRequest) ([]*lookout.JobSetInfo, error) + GetJobs(ctx *armadacontext.Context, opts *lookout.GetJobsRequest) ([]*lookout.JobInfo, error) } type SQLJobRepository struct { diff --git a/internal/lookout/repository/utils_test.go b/internal/lookout/repository/utils_test.go index 1c073851d0f..54fb40bcc6a 100644 --- a/internal/lookout/repository/utils_test.go +++ b/internal/lookout/repository/utils_test.go @@ -1,7 +1,6 @@ package repository import ( - "context" "fmt" "testing" "time" @@ -12,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/api/lookout" @@ -29,7 +29,7 @@ var ( node = "node" someTimeUnix = int64(1612546858) someTime = time.Unix(someTimeUnix, 0) - ctx = context.Background() + ctx = armadacontext.Background() ) func AssertJobsAreEquivalent(t *testing.T, expected *api.Job, actual *api.Job) { diff --git a/internal/lookout/server/lookout.go b/internal/lookout/server/lookout.go index df95e7bc2de..cf48fe278aa 100644 --- a/internal/lookout/server/lookout.go +++ b/internal/lookout/server/lookout.go @@ -4,9 +4,11 @@ import ( "context" "github.com/gogo/protobuf/types" + "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/lookout/repository" "github.com/armadaproject/armada/pkg/api/lookout" ) @@ -20,7 +22,7 @@ func NewLookoutServer(jobRepository repository.JobRepository) *LookoutServer { } func (s *LookoutServer) Overview(ctx context.Context, _ *types.Empty) (*lookout.SystemOverview, error) { - queues, err := s.jobRepository.GetQueueInfos(ctx) + queues, err := s.jobRepository.GetQueueInfos(armadacontext.New(ctx, logrus.NewEntry(logrus.New()))) if err != nil { return nil, status.Errorf(codes.Internal, "failed to query queue stats: %s", err) } @@ -28,7 +30,7 @@ func (s *LookoutServer) Overview(ctx context.Context, _ *types.Empty) (*lookout. } func (s *LookoutServer) GetJobSets(ctx context.Context, opts *lookout.GetJobSetsRequest) (*lookout.GetJobSetsResponse, error) { - jobSets, err := s.jobRepository.GetJobSetInfos(ctx, opts) + jobSets, err := s.jobRepository.GetJobSetInfos(armadacontext.New(ctx, logrus.NewEntry(logrus.New())), opts) if err != nil { return nil, status.Errorf(codes.Internal, "failed to query queue stats: %s", err) } @@ -36,7 +38,7 @@ func (s *LookoutServer) GetJobSets(ctx context.Context, opts *lookout.GetJobSets } func (s *LookoutServer) GetJobs(ctx context.Context, opts *lookout.GetJobsRequest) (*lookout.GetJobsResponse, error) { - jobInfos, err := s.jobRepository.GetJobs(ctx, opts) + jobInfos, err := s.jobRepository.GetJobs(armadacontext.New(ctx, logrus.NewEntry(logrus.New())), opts) if err != nil { return nil, status.Errorf(codes.Internal, "failed to query jobs in queue: %s", err) } diff --git a/internal/lookout/testutil/db_testutil.go b/internal/lookout/testutil/db_testutil.go index 5ce57e8effa..eaba3992c15 100644 --- a/internal/lookout/testutil/db_testutil.go +++ b/internal/lookout/testutil/db_testutil.go @@ -1,7 +1,6 @@ package testutil import ( - "context" "database/sql" "fmt" @@ -9,6 +8,7 @@ import ( _ "github.com/jackc/pgx/v5/stdlib" "github.com/pkg/errors" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/lookout/repository/schema" ) @@ -61,7 +61,7 @@ func WithDatabase(action func(db *sql.DB) error) error { } func WithDatabasePgx(action func(db *pgxpool.Pool) error) error { - ctx := context.Background() + ctx := armadacontext.Background() // Connect and create a dedicated database for the test // For now use database/sql for this diff --git a/internal/lookout/ui/.eslintrc.js b/internal/lookout/ui/.eslintrc.js index 81f176d2068..133e26f10cd 100644 --- a/internal/lookout/ui/.eslintrc.js +++ b/internal/lookout/ui/.eslintrc.js @@ -12,7 +12,7 @@ module.exports = { version: "detect", }, }, - extends: ["plugin:react/recommended", "plugin:@typescript-eslint/recommended", "prettier"], + extends: ["plugin:@typescript-eslint/recommended", "prettier"], plugins: ["prettier", "eslint-plugin-import"], rules: { "prettier/prettier": [ diff --git a/internal/lookout/ui/README.md b/internal/lookout/ui/README.md index ecf2e9e6b90..09d713756d6 100644 --- a/internal/lookout/ui/README.md +++ b/internal/lookout/ui/README.md @@ -6,6 +6,12 @@ This project was bootstrapped with [Create React App](https://github.com/faceboo In the project directory, you can run: +### `yarn openapi` + +This step requires Docker. + +Generate the OpenAPI client code from the OpenAPI specification. This step is required to be run before the first time the application is run, and any time the OpenAPI specification is updated. + ### `yarn start` Runs the app in the development mode.\ diff --git a/internal/lookout/ui/package.json b/internal/lookout/ui/package.json index 6e637fb16c9..26d22c756c6 100644 --- a/internal/lookout/ui/package.json +++ b/internal/lookout/ui/package.json @@ -19,13 +19,13 @@ }, "dependencies": { "@emotion/react": "^11.10.5", - "@emotion/styled": "^11.10.5", + "@emotion/styled": "^11.11.0", "@material-ui/core": "^4.11.4", "@material-ui/icons": "^4.9.1", "@material-ui/lab": "^4.0.0-alpha.58", "@mui/icons-material": "^5.14.3", "@mui/lab": "^5.0.0-alpha.111", - "@mui/material": "^5.13.6", + "@mui/material": "^5.14.4", "@tanstack/react-table": "^8.7.0", "@testing-library/jest-dom": "^5.11.5", "@testing-library/react": "^12.1.5", @@ -34,7 +34,7 @@ "@types/js-yaml": "^4.0.0", "@types/node": "^12.19.3", "@types/react": "^16.14.43", - "@types/react-dom": "^16.9.9", + "@types/react-dom": "^16.9.19", "@types/react-virtualized": "^9.21.10", "@types/uuid": "^8.3.0", "@typescript-eslint/eslint-plugin": "^5.61.0", @@ -48,7 +48,7 @@ "eslint-config-prettier": "^8.10.0", "eslint-plugin-import": "^2.28.0", "eslint-plugin-prettier": "^3.4.0", - "eslint-plugin-react": "^7.31.11", + "eslint-plugin-react": "^7.33.1", "jest-junit": "^16.0.0", "js-yaml": "^4.0.0", "notistack": "^2.0.8", @@ -57,7 +57,7 @@ "query-string": "^6.13.7", "react": "^17.0.1", "react-dom": "^17.0.1", - "react-router-dom": "6.14.1", + "react-router-dom": "6.14.2", "react-scripts": "^5.0.1", "react-truncate": "^2.4.0", "react-virtualized": "^9.22.2", diff --git a/internal/lookout/ui/src/App.css b/internal/lookout/ui/src/App.css index 5abcd5a03aa..65f2d14bb82 100644 --- a/internal/lookout/ui/src/App.css +++ b/internal/lookout/ui/src/App.css @@ -6,6 +6,7 @@ .app-content { height: 100%; + min-height: 0; display: flex; flex-direction: column; } diff --git a/internal/lookout/ui/src/components/Overview.tsx b/internal/lookout/ui/src/components/Overview.tsx index 19643cc928a..b4ba025c88c 100644 --- a/internal/lookout/ui/src/components/Overview.tsx +++ b/internal/lookout/ui/src/components/Overview.tsx @@ -16,10 +16,10 @@ import { import MoreVert from "@material-ui/icons/MoreVert" import { AutoSizer } from "react-virtualized" -import { QueueInfo } from "../services/JobService" -import { RequestStatus } from "../utils" import AutoRefreshToggle from "./AutoRefreshToggle" import RefreshButton from "./RefreshButton" +import { QueueInfo } from "../services/JobService" +import { RequestStatus } from "../utils" import "./Overview.css" diff --git a/internal/lookout/ui/src/components/job-dialog/ContainerDetails.tsx b/internal/lookout/ui/src/components/job-dialog/ContainerDetails.tsx index 4aab3523fd5..2d5952620df 100644 --- a/internal/lookout/ui/src/components/job-dialog/ContainerDetails.tsx +++ b/internal/lookout/ui/src/components/job-dialog/ContainerDetails.tsx @@ -3,8 +3,8 @@ import React, { Fragment } from "react" import { Table, TableBody, TableContainer, Accordion, AccordionSummary, AccordionDetails } from "@material-ui/core" import { ExpandMore } from "@material-ui/icons" -import { getContainerInfoFromYaml } from "../../services/ComputeResourcesService" import DetailRow from "./DetailRow" +import { getContainerInfoFromYaml } from "../../services/ComputeResourcesService" import "./Details.css" diff --git a/internal/lookout/ui/src/components/job-dialog/JobDetails.tsx b/internal/lookout/ui/src/components/job-dialog/JobDetails.tsx index 71dc9195bfc..877fd4344f3 100644 --- a/internal/lookout/ui/src/components/job-dialog/JobDetails.tsx +++ b/internal/lookout/ui/src/components/job-dialog/JobDetails.tsx @@ -3,11 +3,11 @@ import React, { useEffect, useState } from "react" import { Accordion, AccordionDetails, AccordionSummary, Table, TableBody, TableContainer } from "@material-ui/core" import { ExpandMore } from "@material-ui/icons" -import { Job } from "../../services/JobService" import ContainerDetails from "./ContainerDetails" import DetailRow from "./DetailRow" import { PreviousRuns } from "./PreviousRuns" import RunDetailsRows from "./RunDetailsRows" +import { Job } from "../../services/JobService" import "./Details.css" diff --git a/internal/lookout/ui/src/components/job-dialog/PreviousRuns.tsx b/internal/lookout/ui/src/components/job-dialog/PreviousRuns.tsx index 76063e62147..7b70485fc8b 100644 --- a/internal/lookout/ui/src/components/job-dialog/PreviousRuns.tsx +++ b/internal/lookout/ui/src/components/job-dialog/PreviousRuns.tsx @@ -3,8 +3,8 @@ import React, { Fragment } from "react" import { Collapse, List, ListItem, ListItemText, Paper, Table, TableBody, TableContainer } from "@material-ui/core" import { ExpandLess, ExpandMore } from "@material-ui/icons" -import { Run } from "../../services/JobService" import RunDetailsRows from "./RunDetailsRows" +import { Run } from "../../services/JobService" import "./PreviousRuns.css" diff --git a/internal/lookout/ui/src/components/job-dialog/RunDetailsRows.test.tsx b/internal/lookout/ui/src/components/job-dialog/RunDetailsRows.test.tsx index 53285cb8f79..5d11a9d6bc9 100644 --- a/internal/lookout/ui/src/components/job-dialog/RunDetailsRows.test.tsx +++ b/internal/lookout/ui/src/components/job-dialog/RunDetailsRows.test.tsx @@ -3,8 +3,8 @@ import React from "react" import { TableContainer, Table, TableBody } from "@material-ui/core" import { render, screen } from "@testing-library/react" -import { Run } from "../../services/JobService" import RunDetailsRows from "./RunDetailsRows" +import { Run } from "../../services/JobService" function SetUpReactTable(run: Run, jobId: string) { return ( diff --git a/internal/lookout/ui/src/components/job-dialog/RunDetailsRows.tsx b/internal/lookout/ui/src/components/job-dialog/RunDetailsRows.tsx index dc8ceed7c19..ee96687e582 100644 --- a/internal/lookout/ui/src/components/job-dialog/RunDetailsRows.tsx +++ b/internal/lookout/ui/src/components/job-dialog/RunDetailsRows.tsx @@ -1,7 +1,7 @@ import React from "react" -import { Run } from "../../services/JobService" import DetailRow from "./DetailRow" +import { Run } from "../../services/JobService" import "./Details.css" diff --git a/internal/lookout/ui/src/components/job-sets/DurationBoxPlot.tsx b/internal/lookout/ui/src/components/job-sets/DurationBoxPlot.tsx index 3f08b8731b4..63eb40dbddd 100644 --- a/internal/lookout/ui/src/components/job-sets/DurationBoxPlot.tsx +++ b/internal/lookout/ui/src/components/job-sets/DurationBoxPlot.tsx @@ -8,9 +8,9 @@ import { TooltipWithBounds, withTooltip } from "@visx/tooltip" import { WithTooltipProvidedProps } from "@visx/tooltip/lib/enhancers/withTooltip" import { defaultStyles as defaultTooltipStyles } from "@visx/tooltip/lib/tooltips/Tooltip" +import DurationTooltip, { DurationTooltipProps } from "./DurationTooltip" import { DurationStats } from "../../services/JobService" import { Padding } from "../../utils" -import DurationTooltip, { DurationTooltipProps } from "./DurationTooltip" type DurationBoxPlotData = { width: number diff --git a/internal/lookout/ui/src/components/job-sets/DurationPlotsTable.tsx b/internal/lookout/ui/src/components/job-sets/DurationPlotsTable.tsx index 3e9c8f85a5d..a86f987adbf 100644 --- a/internal/lookout/ui/src/components/job-sets/DurationPlotsTable.tsx +++ b/internal/lookout/ui/src/components/job-sets/DurationPlotsTable.tsx @@ -2,9 +2,9 @@ import React from "react" import { Table, TableHeaderRowProps, TableRowProps } from "react-virtualized" -import { DurationStats } from "../../services/JobService" import DurationAxis from "./DurationAxis" import DurationBoxPlot from "./DurationBoxPlot" +import { DurationStats } from "../../services/JobService" import "./DurationPlotsTable.css" diff --git a/internal/lookout/ui/src/components/job-sets/JobSets.tsx b/internal/lookout/ui/src/components/job-sets/JobSets.tsx index 11303103340..03de28463bc 100644 --- a/internal/lookout/ui/src/components/job-sets/JobSets.tsx +++ b/internal/lookout/ui/src/components/job-sets/JobSets.tsx @@ -17,13 +17,13 @@ import CancelIcon from "@material-ui/icons/Cancel" import LowPriority from "@material-ui/icons/LowPriority" import { AutoSizer } from "react-virtualized" +import DurationPlotsTable from "./DurationPlotsTable" +import JobSetTable from "./JobSetTable" import { JobSetsView, isJobSetsView } from "../../containers/JobSetsContainer" import { DurationStats, JobSet } from "../../services/JobService" import { RequestStatus } from "../../utils" import AutoRefreshToggle from "../AutoRefreshToggle" import RefreshButton from "../RefreshButton" -import DurationPlotsTable from "./DurationPlotsTable" -import JobSetTable from "./JobSetTable" import "./JobSets.css" diff --git a/internal/lookout/ui/src/components/jobs/JobTableColumns.tsx b/internal/lookout/ui/src/components/jobs/JobTableColumns.tsx index 69cb6cf655d..1174b1e98c2 100644 --- a/internal/lookout/ui/src/components/jobs/JobTableColumns.tsx +++ b/internal/lookout/ui/src/components/jobs/JobTableColumns.tsx @@ -2,15 +2,15 @@ import React from "react" import { Column } from "react-virtualized" +import JobStateCell from "./JobStateCell" +import JobStatesHeaderCell from "./JobStatesHeaderCell" +import SearchHeaderCell from "./SearchHeaderCell" import { ColumnSpec } from "../../containers/JobsContainer" import { Job } from "../../services/JobService" import { CHECKBOX_WIDTH } from "../CheckboxRow" import DefaultHeaderCell from "../DefaultHeaderCell" import LinkCell from "../LinkCell" import SortableHeaderCell from "../SortableHeaderCell" -import JobStateCell from "./JobStateCell" -import JobStatesHeaderCell from "./JobStatesHeaderCell" -import SearchHeaderCell from "./SearchHeaderCell" import "./JobTableColumns.css" diff --git a/internal/lookout/ui/src/components/jobs/JobTableHeader.tsx b/internal/lookout/ui/src/components/jobs/JobTableHeader.tsx index 9e4fefbc7ed..67e8b1238b9 100644 --- a/internal/lookout/ui/src/components/jobs/JobTableHeader.tsx +++ b/internal/lookout/ui/src/components/jobs/JobTableHeader.tsx @@ -4,10 +4,10 @@ import { Button } from "@material-ui/core" import CancelIcon from "@material-ui/icons/Cancel" import LowPriority from "@material-ui/icons/LowPriority" +import ColumnSelect from "./ColumnSelect" import { ColumnSpec } from "../../containers/JobsContainer" import AutoRefreshToggle from "../AutoRefreshToggle" import RefreshButton from "../RefreshButton" -import ColumnSelect from "./ColumnSelect" import "./JobTableHeader.css" diff --git a/internal/lookout/ui/src/components/jobs/Jobs.tsx b/internal/lookout/ui/src/components/jobs/Jobs.tsx index afe3b2f634a..5139f11192e 100644 --- a/internal/lookout/ui/src/components/jobs/Jobs.tsx +++ b/internal/lookout/ui/src/components/jobs/Jobs.tsx @@ -2,14 +2,14 @@ import React from "react" import { AutoSizer, InfiniteLoader, Table } from "react-virtualized" +import createJobTableColumns from "./JobTableColumns" +import JobTableHeader from "./JobTableHeader" +import LoadingRow from "./LoadingRow" import { ColumnSpec } from "../../containers/JobsContainer" import { Job } from "../../services/JobService" import { RequestStatus } from "../../utils" import CheckboxHeaderRow from "../CheckboxHeaderRow" import CheckboxRow from "../CheckboxRow" -import createJobTableColumns from "./JobTableColumns" -import JobTableHeader from "./JobTableHeader" -import LoadingRow from "./LoadingRow" import "./Jobs.css" diff --git a/internal/lookout/ui/src/components/lookoutV2/CancelDialog.tsx b/internal/lookout/ui/src/components/lookoutV2/CancelDialog.tsx index b35c42a0af9..349a40a3dd4 100644 --- a/internal/lookout/ui/src/components/lookoutV2/CancelDialog.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/CancelDialog.tsx @@ -12,9 +12,9 @@ import { pl, waitMillis, PlatformCancelReason } from "utils" import { getUniqueJobsMatchingFilters } from "utils/jobsDialogUtils" import { formatJobState } from "utils/jobsTableFormatters" -import { useCustomSnackbar } from "../../hooks/useCustomSnackbar" import dialogStyles from "./DialogStyles.module.css" import { JobStatusTable } from "./JobStatusTable" +import { useCustomSnackbar } from "../../hooks/useCustomSnackbar" interface CancelDialogProps { onClose: () => void diff --git a/internal/lookout/ui/src/components/lookoutV2/JobGroupStateCounts.tsx b/internal/lookout/ui/src/components/lookoutV2/JobGroupStateCounts.tsx index 250fa1cc409..c3b7cf6abe4 100644 --- a/internal/lookout/ui/src/components/lookoutV2/JobGroupStateCounts.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/JobGroupStateCounts.tsx @@ -1,6 +1,6 @@ +import styles from "./JobGroupStateCounts.module.css" import { JobState } from "../../models/lookoutV2Models" import { colorForJobState } from "../../utils/jobsTableFormatters" -import styles from "./JobGroupStateCounts.module.css" interface JobGroupStateCountsProps { stateCounts: Record diff --git a/internal/lookout/ui/src/components/lookoutV2/JobsTableActionBar.tsx b/internal/lookout/ui/src/components/lookoutV2/JobsTableActionBar.tsx index 48feff21cbc..a8dc3e848f4 100644 --- a/internal/lookout/ui/src/components/lookoutV2/JobsTableActionBar.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/JobsTableActionBar.tsx @@ -9,11 +9,11 @@ import { IGetJobsService } from "services/lookoutV2/GetJobsService" import { UpdateJobsService } from "services/lookoutV2/UpdateJobsService" import { ColumnId, JobTableColumn } from "utils/jobsTableColumns" -import { useCustomSnackbar } from "../../hooks/useCustomSnackbar" import { CancelDialog } from "./CancelDialog" import { CustomViewPicker } from "./CustomViewPicker" import styles from "./JobsTableActionBar.module.css" import { ReprioritiseDialog } from "./ReprioritiseDialog" +import { useCustomSnackbar } from "../../hooks/useCustomSnackbar" export interface JobsTableActionBarProps { isLoading: boolean diff --git a/internal/lookout/ui/src/components/lookoutV2/JobsTableCell.tsx b/internal/lookout/ui/src/components/lookoutV2/JobsTableCell.tsx index 34c60fb0e31..2c83ac72390 100644 --- a/internal/lookout/ui/src/components/lookoutV2/JobsTableCell.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/JobsTableCell.tsx @@ -7,15 +7,13 @@ import { JobRow, JobTableRow } from "models/jobsTableModels" import { Match } from "models/lookoutV2Models" import { getColumnMetadata, toColId } from "utils/jobsTableColumns" -import { matchForColumn } from "../../utils/jobsTableUtils" import styles from "./JobsTableCell.module.css" import { JobsTableFilter } from "./JobsTableFilter" +import { matchForColumn } from "../../utils/jobsTableUtils" + const sharedCellStyle = { padding: 0, - "&:hover": { - opacity: 0.85, - }, overflowWrap: "normal", textOverflow: "ellipsis", whiteSpace: "nowrap", @@ -23,6 +21,13 @@ const sharedCellStyle = { borderRight: "1px solid #cccccc", } +const sharedCellStyleWithOpacity = { + ...sharedCellStyle, + "&:hover": { + opacity: 0.85, + }, +} + export interface HeaderCellProps { header: Header columnResizeMode: ColumnResizeMode @@ -70,6 +75,7 @@ export function HeaderCell({ textOverflow: "ellipsis", whiteSpace: "nowrap", overflow: "hidden", + backgroundColor: "#f2f2f2", }} className={styles.headerCell} /> @@ -100,6 +106,7 @@ export function HeaderCell({ justifyContent: "space-between", alignItems: "center", margin: 0, + backgroundColor: "#f2f2f2", }} >
diff --git a/internal/lookout/ui/src/components/lookoutV2/ReprioritiseDialog.tsx b/internal/lookout/ui/src/components/lookoutV2/ReprioritiseDialog.tsx index ed2335e1bf1..14189b771fc 100644 --- a/internal/lookout/ui/src/components/lookoutV2/ReprioritiseDialog.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/ReprioritiseDialog.tsx @@ -19,9 +19,9 @@ import { UpdateJobsService } from "services/lookoutV2/UpdateJobsService" import { pl, waitMillis } from "utils" import { getUniqueJobsMatchingFilters } from "utils/jobsDialogUtils" -import { useCustomSnackbar } from "../../hooks/useCustomSnackbar" import dialogStyles from "./DialogStyles.module.css" import { JobStatusTable } from "./JobStatusTable" +import { useCustomSnackbar } from "../../hooks/useCustomSnackbar" interface ReprioritiseDialogProps { onClose: () => void diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/ContainerDetails.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/ContainerDetails.tsx index 62e9f901858..8180824a6db 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/ContainerDetails.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/ContainerDetails.tsx @@ -2,12 +2,12 @@ import { useMemo, useState } from "react" import { CircularProgress, Collapse, ListItemButton, Typography } from "@mui/material" +import styles from "./ContainerDetails.module.css" +import { KeyValuePairTable } from "./KeyValuePairTable" import { useCustomSnackbar } from "../../../hooks/useCustomSnackbar" import { useJobSpec } from "../../../hooks/useJobSpec" import { Job } from "../../../models/lookoutV2Models" import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" -import styles from "./ContainerDetails.module.css" -import { KeyValuePairTable } from "./KeyValuePairTable" export interface ContainerData { name: string diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx index 63b39693c8f..fdc9340e36d 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx @@ -4,11 +4,11 @@ import { Job, JobRunState, JobState } from "models/lookoutV2Models" import { SnackbarProvider } from "notistack" import { makeTestJob } from "utils/fakeJobsUtils" +import { Sidebar } from "./Sidebar" import { FakeCordonService } from "../../../services/lookoutV2/mocks/FakeCordonService" import FakeGetJobSpecService from "../../../services/lookoutV2/mocks/FakeGetJobSpecService" import { FakeGetRunErrorService } from "../../../services/lookoutV2/mocks/FakeGetRunErrorService" import { FakeLogService } from "../../../services/lookoutV2/mocks/FakeLogService" -import { Sidebar } from "./Sidebar" describe("Sidebar", () => { let job: Job, onClose: () => undefined diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.tsx index 5761b091847..9376a140e75 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.tsx @@ -4,16 +4,16 @@ import { TabContext, TabPanel } from "@mui/lab" import { Box, Divider, Drawer, Tab, Tabs } from "@mui/material" import { Job, JobState } from "models/lookoutV2Models" -import { ICordonService } from "../../../services/lookoutV2/CordonService" -import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" -import { IGetRunErrorService } from "../../../services/lookoutV2/GetRunErrorService" -import { ILogService } from "../../../services/lookoutV2/LogService" import styles from "./Sidebar.module.css" import { SidebarHeader } from "./SidebarHeader" import { SidebarTabJobDetails } from "./SidebarTabJobDetails" import { SidebarTabJobLogs } from "./SidebarTabJobLogs" import { SidebarTabJobRuns } from "./SidebarTabJobRuns" import { SidebarTabJobYaml } from "./SidebarTabJobYaml" +import { ICordonService } from "../../../services/lookoutV2/CordonService" +import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" +import { IGetRunErrorService } from "../../../services/lookoutV2/GetRunErrorService" +import { ILogService } from "../../../services/lookoutV2/LogService" enum SidebarTab { JobDetails = "JobDetails", diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobDetails.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobDetails.tsx index 898f82dc1e9..2dc81afcf4f 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobDetails.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobDetails.tsx @@ -1,10 +1,10 @@ import { Typography } from "@mui/material" import { Job } from "models/lookoutV2Models" -import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" -import { formatBytes, formatCpu } from "../../../utils/resourceUtils" import { ContainerDetails } from "./ContainerDetails" import { KeyValuePairTable } from "./KeyValuePairTable" +import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" +import { formatBytes, formatCpu } from "../../../utils/resourceUtils" export interface SidebarTabJobDetailsProps { job: Job diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobLogs.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobLogs.tsx index 5b1e792d339..49442562ac3 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobLogs.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobLogs.tsx @@ -14,12 +14,12 @@ import { } from "@mui/material" import { Job, JobRun } from "models/lookoutV2Models" +import styles from "./SidebarTabJobLogs.module.css" import { useCustomSnackbar } from "../../../hooks/useCustomSnackbar" import { useJobSpec } from "../../../hooks/useJobSpec" import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" import { ILogService, LogLine } from "../../../services/lookoutV2/LogService" import { getErrorMessage, RequestStatus } from "../../../utils" -import styles from "./SidebarTabJobLogs.module.css" export interface SidebarTabJobLogsProps { job: Job diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.tsx index ddf089545f6..92e35aa1021 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.tsx @@ -16,13 +16,13 @@ import { Button, Tooltip } from "@mui/material" import { Job, JobRun } from "models/lookoutV2Models" import { formatJobRunState, formatUtcDate } from "utils/jobsTableFormatters" +import { CodeBlock } from "./CodeBlock" +import { KeyValuePairTable } from "./KeyValuePairTable" +import styles from "./SidebarTabJobRuns.module.css" import { useCustomSnackbar } from "../../../hooks/useCustomSnackbar" import { ICordonService } from "../../../services/lookoutV2/CordonService" import { IGetRunErrorService } from "../../../services/lookoutV2/GetRunErrorService" import { getErrorMessage } from "../../../utils" -import { CodeBlock } from "./CodeBlock" -import { KeyValuePairTable } from "./KeyValuePairTable" -import styles from "./SidebarTabJobRuns.module.css" export interface SidebarTabJobRunsProps { job: Job diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobYaml.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobYaml.tsx index 5358f277429..18346c312d4 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobYaml.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobYaml.tsx @@ -6,10 +6,10 @@ import { IconButton } from "@mui/material" import yaml from "js-yaml" import { Job } from "models/lookoutV2Models" +import styles from "./SidebarTabJobYaml.module.css" import { useCustomSnackbar } from "../../../hooks/useCustomSnackbar" import { useJobSpec } from "../../../hooks/useJobSpec" import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" -import styles from "./SidebarTabJobYaml.module.css" export interface SidebarTabJobYamlProps { job: Job diff --git a/internal/lookout/ui/src/containers/CancelJobsDialog.tsx b/internal/lookout/ui/src/containers/CancelJobsDialog.tsx index 058c8e69136..c6fcfd73079 100644 --- a/internal/lookout/ui/src/containers/CancelJobsDialog.tsx +++ b/internal/lookout/ui/src/containers/CancelJobsDialog.tsx @@ -2,11 +2,11 @@ import React, { useState } from "react" import { Dialog, DialogContent, DialogTitle } from "@material-ui/core" +import { CANCELLABLE_JOB_STATES } from "./JobsContainer" import CancelJobs from "../components/jobs/cancel-jobs/CancelJobs" import CancelJobsOutcome from "../components/jobs/cancel-jobs/CancelJobsOutcome" import { JobService, CancelJobsResponse, Job } from "../services/JobService" import { ApiResult, RequestStatus, PlatformCancelReason } from "../utils" -import { CANCELLABLE_JOB_STATES } from "./JobsContainer" import "../components/Dialog.css" diff --git a/internal/lookout/ui/src/containers/JobDialog.tsx b/internal/lookout/ui/src/containers/JobDialog.tsx index 795b6a2fdf1..c76fdacc288 100644 --- a/internal/lookout/ui/src/containers/JobDialog.tsx +++ b/internal/lookout/ui/src/containers/JobDialog.tsx @@ -2,10 +2,10 @@ import React, { useState } from "react" import { Dialog, Tabs, Tab, DialogContent } from "@material-ui/core" +import JobLogsContainer from "./JobLogsContainer" import JobDetails from "../components/job-dialog/JobDetails" import { Job } from "../services/JobService" import LogService from "../services/LogService" -import JobLogsContainer from "./JobLogsContainer" type JobDetailsModalProps = { isOpen: boolean diff --git a/internal/lookout/ui/src/containers/JobSetsContainer.tsx b/internal/lookout/ui/src/containers/JobSetsContainer.tsx index 1fb7e12992c..37794dfdb31 100644 --- a/internal/lookout/ui/src/containers/JobSetsContainer.tsx +++ b/internal/lookout/ui/src/containers/JobSetsContainer.tsx @@ -1,13 +1,13 @@ import React from "react" +import CancelJobSetsDialog, { getCancellableJobSets } from "./CancelJobSetsDialog" +import ReprioritizeJobSetsDialog, { getReprioritizeableJobSets } from "./ReprioritizeJobSetsDialog" import JobSets from "../components/job-sets/JobSets" import IntervalService from "../services/IntervalService" import { JobService, GetJobSetsRequest, JobSet } from "../services/JobService" import JobSetsLocalStorageService from "../services/JobSetsLocalStorageService" import JobSetsQueryParamsService from "../services/JobSetsQueryParamsService" import { ApiResult, debounced, PropsWithRouter, RequestStatus, selectItem, setStateAsync, withRouter } from "../utils" -import CancelJobSetsDialog, { getCancellableJobSets } from "./CancelJobSetsDialog" -import ReprioritizeJobSetsDialog, { getReprioritizeableJobSets } from "./ReprioritizeJobSetsDialog" interface JobSetsContainerProps extends PropsWithRouter { jobService: JobService diff --git a/internal/lookout/ui/src/containers/JobsContainer.tsx b/internal/lookout/ui/src/containers/JobsContainer.tsx index 60f8e7c0247..6654667b39d 100644 --- a/internal/lookout/ui/src/containers/JobsContainer.tsx +++ b/internal/lookout/ui/src/containers/JobsContainer.tsx @@ -2,6 +2,9 @@ import React from "react" import { v4 as uuidv4 } from "uuid" +import CancelJobsDialog from "./CancelJobsDialog" +import JobDialog from "./JobDialog" +import ReprioritizeJobsDialog from "./ReprioritizeJobsDialog" import Jobs from "../components/jobs/Jobs" import IntervalService from "../services/IntervalService" import { JobService, GetJobsRequest, Job } from "../services/JobService" @@ -11,9 +14,6 @@ import JobsQueryParamsService from "../services/JobsQueryParamsService" import LogService from "../services/LogService" import TimerService from "../services/TimerService" import { ApiResult, PropsWithRouter, RequestStatus, selectItem, setStateAsync, withRouter } from "../utils" -import CancelJobsDialog from "./CancelJobsDialog" -import JobDialog from "./JobDialog" -import ReprioritizeJobsDialog from "./ReprioritizeJobsDialog" interface JobsContainerProps extends PropsWithRouter { jobService: JobService diff --git a/internal/lookout/ui/src/containers/OverviewContainer.tsx b/internal/lookout/ui/src/containers/OverviewContainer.tsx index b755823aa7a..1787ac18613 100644 --- a/internal/lookout/ui/src/containers/OverviewContainer.tsx +++ b/internal/lookout/ui/src/containers/OverviewContainer.tsx @@ -1,12 +1,12 @@ import React from "react" +import JobDialog from "./JobDialog" import Overview from "../components/Overview" import IntervalService from "../services/IntervalService" import { JobService, Job, QueueInfo } from "../services/JobService" import LogService from "../services/LogService" import OverviewLocalStorageService from "../services/OverviewLocalStorageService" import { PropsWithRouter, RequestStatus, setStateAsync, withRouter } from "../utils" -import JobDialog from "./JobDialog" interface OverviewContainerProps extends PropsWithRouter { jobService: JobService diff --git a/internal/lookout/ui/src/containers/ReprioritizeJobsDialog.tsx b/internal/lookout/ui/src/containers/ReprioritizeJobsDialog.tsx index 3365d4ffc9c..82c35f68a7a 100644 --- a/internal/lookout/ui/src/containers/ReprioritizeJobsDialog.tsx +++ b/internal/lookout/ui/src/containers/ReprioritizeJobsDialog.tsx @@ -2,11 +2,11 @@ import React, { useState } from "react" import { Dialog, DialogContent, DialogTitle } from "@material-ui/core" +import { REPRIORITIZEABLE_JOB_STATES } from "./JobsContainer" import ReprioritizeJobs from "../components/jobs/reprioritize-jobs/ReprioritizeJobs" import ReprioritizeJobsOutcome from "../components/jobs/reprioritize-jobs/ReprioritizeJobsOutcome" import { JobService, Job, ReprioritizeJobsResponse } from "../services/JobService" import { ApiResult, priorityIsValid, RequestStatus } from "../utils" -import { REPRIORITIZEABLE_JOB_STATES } from "./JobsContainer" import "../components/Dialog.css" diff --git a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx index 5611485d67e..64406306f71 100644 --- a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx +++ b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx @@ -10,6 +10,7 @@ import FakeGetJobsService from "services/lookoutV2/mocks/FakeGetJobsService" import FakeGroupJobsService from "services/lookoutV2/mocks/FakeGroupJobsService" import { v4 as uuidv4 } from "uuid" +import { JobsTableContainer } from "./JobsTableContainer" import { IGetJobSpecService } from "../../services/lookoutV2/GetJobSpecService" import { IGetRunErrorService } from "../../services/lookoutV2/GetRunErrorService" import { ILogService } from "../../services/lookoutV2/LogService" @@ -17,7 +18,6 @@ import { FakeCordonService } from "../../services/lookoutV2/mocks/FakeCordonServ import FakeGetJobSpecService from "../../services/lookoutV2/mocks/FakeGetJobSpecService" import { FakeGetRunErrorService } from "../../services/lookoutV2/mocks/FakeGetRunErrorService" import { FakeLogService } from "../../services/lookoutV2/mocks/FakeLogService" -import { JobsTableContainer } from "./JobsTableContainer" // This is quite a heavy component, and tests can timeout on a slower machine jest.setTimeout(30_000) @@ -401,7 +401,7 @@ describe("JobsTableContainer", () => { await waitFor(() => { const rows = getAllByRole("row") // Order should be reversed now - expect(rows[rows.length - 2]).toHaveTextContent(sorted[0].jobId) + expect(rows[rows.length - 1]).toHaveTextContent(sorted[0].jobId) }) }) }) @@ -561,7 +561,7 @@ describe("JobsTableContainer", () => { async () => { const table = await screen.findByRole("table", { name: "Jobs table" }) const rows = await within(table).findAllByRole("row") - expect(rows.length).toBe(nDataRows + 2) // One row per data row, plus the header and footer rows + expect(rows.length).toBe(nDataRows + 1) // One row per data row, plus the header }, { timeout: 3000 }, ) diff --git a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx index 2738c32d62c..3656786f398 100644 --- a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx +++ b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx @@ -9,7 +9,6 @@ import { TableBody, TableCell, TableContainer, - TableFooter, TableHead, TablePagination, TableRow, @@ -68,6 +67,7 @@ import { } from "utils/jobsTableUtils" import { fromRowId, RowId } from "utils/reactTableUtils" +import styles from "./JobsTableContainer.module.css" import { useCustomSnackbar } from "../../hooks/useCustomSnackbar" import { ICordonService } from "../../services/lookoutV2/CordonService" import { CustomViewsService } from "../../services/lookoutV2/CustomViewsService" @@ -75,7 +75,6 @@ import { IGetJobSpecService } from "../../services/lookoutV2/GetJobSpecService" import { ILogService } from "../../services/lookoutV2/LogService" import { getErrorMessage, waitMillis } from "../../utils" import { EmptyInputError, ParseError } from "../../utils/resourceUtils" -import styles from "./JobsTableContainer.module.css" const PAGE_SIZE_OPTIONS = [5, 25, 50, 100] @@ -668,95 +667,96 @@ export const JobsTableContainer = ({ columnsForSelect = columnsForSelect.filter((col) => col.id !== StandardColumnId.Count) } return ( - - - 0} - allColumns={columnsForSelect} - groupedColumns={grouping} - visibleColumns={visibleColumnIds} - selectedItemFilters={selectedItemsFilters} - customViews={customViews} - activeJobSets={activeJobSets} - onActiveJobSetsChanged={(newVal) => { - setActiveJobSets(newVal) - onRefresh() - }} - onRefresh={onRefresh} - onAddAnnotationColumn={addAnnotationCol} - onRemoveAnnotationColumn={removeAnnotationCol} - onEditAnnotationColumn={editAnnotationCol} - onGroupsChanged={onGroupingChange} - toggleColumnVisibility={onColumnVisibilityChange} - getJobsService={getJobsService} - updateJobsService={updateJobsService} - onClearFilters={clearFilters} - onAddCustomView={addCustomView} - onDeleteCustomView={deleteCustomView} - onLoadCustomView={loadCustomView} - /> - - + + + 0} + allColumns={columnsForSelect} + groupedColumns={grouping} + visibleColumns={visibleColumnIds} + selectedItemFilters={selectedItemsFilters} + customViews={customViews} + activeJobSets={activeJobSets} + onActiveJobSetsChanged={(newVal) => { + setActiveJobSets(newVal) + onRefresh() }} - > - - {table.getHeaderGroups().map((headerGroup) => ( - - {headerGroup.headers.map((header) => ( - { - setTextFieldRef(header.id, ref) - }} - /> - ))} - - ))} - - - 0} - columns={table.getVisibleLeafColumns()} - topLevelRows={topLevelRows} - sidebarJobId={sidebarJobId} - onLoadMoreSubRows={onLoadMoreSubRows} - onClickRowCheckbox={(row) => selectRow(row, false)} - onClickJobRow={toggleSidebarForJobRow} - onClickRow={(row) => selectRow(row, true)} - onShiftClickRow={shiftSelectRow} - onControlClickRow={(row) => selectRow(row, false)} - /> - - - - table.setPageIndex(page)} - onRowsPerPageChange={(e) => table.setPageSize(Number(e.target.value))} - colSpan={table.getVisibleLeafColumns().length} - showFirstButton={true} - showLastButton={true} - /> - - -
-
- - {debug &&
{JSON.stringify(table.getState(), null, 2)}
} + onRefresh={onRefresh} + onAddAnnotationColumn={addAnnotationCol} + onRemoveAnnotationColumn={removeAnnotationCol} + onEditAnnotationColumn={editAnnotationCol} + onGroupsChanged={onGroupingChange} + toggleColumnVisibility={onColumnVisibilityChange} + getJobsService={getJobsService} + updateJobsService={updateJobsService} + onClearFilters={clearFilters} + onAddCustomView={addCustomView} + onDeleteCustomView={deleteCustomView} + onLoadCustomView={loadCustomView} + /> + + + + {table.getHeaderGroups().map((headerGroup) => ( + + {headerGroup.headers.map((header) => ( + { + setTextFieldRef(header.id, ref) + }} + /> + ))} + + ))} + + + 0} + columns={table.getVisibleLeafColumns()} + topLevelRows={topLevelRows} + sidebarJobId={sidebarJobId} + onLoadMoreSubRows={onLoadMoreSubRows} + onClickRowCheckbox={(row) => selectRow(row, false)} + onClickJobRow={toggleSidebarForJobRow} + onClickRow={(row) => selectRow(row, true)} + onShiftClickRow={shiftSelectRow} + onControlClickRow={(row) => selectRow(row, false)} + /> +
+
+ table.setPageIndex(page)} + onRowsPerPageChange={(e) => table.setPageSize(Number(e.target.value))} + colSpan={table.getVisibleLeafColumns().length} + showFirstButton={true} + showLastButton={true} + /> + + {debug &&
{JSON.stringify(table.getState(), null, 2)}
} +
{sidebarJobDetails !== undefined && ( @@ -804,7 +804,9 @@ const JobsTableBody = ({ }: JobsTableBodyProps) => { const canDisplay = !dataIsLoading && topLevelRows.length > 0 return ( - + {!canDisplay && ( {dataIsLoading && topLevelRows.length === 0 && ( diff --git a/internal/lookout/ui/src/hooks/useJobSpec.ts b/internal/lookout/ui/src/hooks/useJobSpec.ts index 12dfca4c1ce..0b1cc654401 100644 --- a/internal/lookout/ui/src/hooks/useJobSpec.ts +++ b/internal/lookout/ui/src/hooks/useJobSpec.ts @@ -1,9 +1,9 @@ import { useEffect, useState } from "react" +import { OpenSnackbarFn } from "./useCustomSnackbar" import { Job } from "../models/lookoutV2Models" import { IGetJobSpecService } from "../services/lookoutV2/GetJobSpecService" import { getErrorMessage, RequestStatus } from "../utils" -import { OpenSnackbarFn } from "./useCustomSnackbar" export type JobSpecState = { jobSpec?: Record diff --git a/internal/lookout/ui/src/services/JobTableService.ts b/internal/lookout/ui/src/services/JobTableService.ts index d23b5788252..8096c31eef6 100644 --- a/internal/lookout/ui/src/services/JobTableService.ts +++ b/internal/lookout/ui/src/services/JobTableService.ts @@ -1,5 +1,5 @@ -import { updateArray } from "../utils" import { GetJobsRequest, Job, JobService } from "./JobService" +import { updateArray } from "../utils" type JobLoadState = "Loading" | "Loaded" diff --git a/internal/lookout/ui/src/services/JobsQueryParamsService.ts b/internal/lookout/ui/src/services/JobsQueryParamsService.ts index d8a13cd6ea7..d9e8b41a443 100644 --- a/internal/lookout/ui/src/services/JobsQueryParamsService.ts +++ b/internal/lookout/ui/src/services/JobsQueryParamsService.ts @@ -1,8 +1,8 @@ import queryString, { ParseOptions, StringifiableRecord, StringifyOptions } from "query-string" +import { JOB_STATES_FOR_DISPLAY } from "./JobService" import { ColumnSpec, JobsContainerState } from "../containers/JobsContainer" import { PropsWithRouter, Router } from "../utils" -import { JOB_STATES_FOR_DISPLAY } from "./JobService" const QUERY_STRING_OPTIONS: ParseOptions | StringifyOptions = { arrayFormat: "comma", diff --git a/internal/lookout/ui/src/services/MockJobService.ts b/internal/lookout/ui/src/services/MockJobService.ts index 1e3dffd04a5..57803dc0075 100644 --- a/internal/lookout/ui/src/services/MockJobService.ts +++ b/internal/lookout/ui/src/services/MockJobService.ts @@ -1,6 +1,5 @@ import { v4 as uuidv4 } from "uuid" -import { ApiJobState } from "../openapi/armada" import { CancelJobSetsResponse, CancelJobsResponse, @@ -13,6 +12,7 @@ import { ReprioritizeJobSetsResponse, ReprioritizeJobsResponse, } from "./JobService" +import { ApiJobState } from "../openapi/armada" type MockJobServiceConfig = { getJobs: { diff --git a/internal/lookout/ui/src/services/lookoutV2/CustomViewsService.ts b/internal/lookout/ui/src/services/lookoutV2/CustomViewsService.ts index e46428eaa95..960e6b2faf0 100644 --- a/internal/lookout/ui/src/services/lookoutV2/CustomViewsService.ts +++ b/internal/lookout/ui/src/services/lookoutV2/CustomViewsService.ts @@ -1,4 +1,3 @@ -import { tryParseJson } from "../../utils" import { DEFAULT_PREFERENCES, ensurePreferencesAreConsistent, @@ -6,6 +5,7 @@ import { KEY_PREFIX, stringIsInvalid, } from "./JobsTablePreferencesService" +import { tryParseJson } from "../../utils" const CUSTOM_KEY_PREFIX = `${KEY_PREFIX}CustomPrefs_` const CUSTOM_VIEWS_LIST_KEY = `${KEY_PREFIX}ListCustomPrefs` diff --git a/internal/lookout/ui/src/services/lookoutV2/JobsTablePreferencesService.test.ts b/internal/lookout/ui/src/services/lookoutV2/JobsTablePreferencesService.test.ts index bae8adf34b3..a7b2f8c9ee3 100644 --- a/internal/lookout/ui/src/services/lookoutV2/JobsTablePreferencesService.test.ts +++ b/internal/lookout/ui/src/services/lookoutV2/JobsTablePreferencesService.test.ts @@ -1,8 +1,6 @@ import { Location, NavigateFunction, Params } from "react-router-dom" import { ColumnId, DEFAULT_COLUMN_ORDER, StandardColumnId } from "utils/jobsTableColumns" -import { Match } from "../../models/lookoutV2Models" -import { Router } from "../../utils" import { DEFAULT_PREFERENCES, JobsTablePreferences, @@ -11,6 +9,8 @@ import { QueryStringPrefs, stringifyQueryParams, } from "./JobsTablePreferencesService" +import { Match } from "../../models/lookoutV2Models" +import { Router } from "../../utils" class FakeRouter implements Router { location: Location diff --git a/internal/lookout/ui/src/services/lookoutV2/UpdateJobsService.test.ts b/internal/lookout/ui/src/services/lookoutV2/UpdateJobsService.test.ts index ae92d0a8057..bdcbda6e44f 100644 --- a/internal/lookout/ui/src/services/lookoutV2/UpdateJobsService.test.ts +++ b/internal/lookout/ui/src/services/lookoutV2/UpdateJobsService.test.ts @@ -1,7 +1,7 @@ import _ from "lodash" -import { Job, JobId, JobState } from "../../models/lookoutV2Models" import { createJobBatches } from "./UpdateJobsService" +import { Job, JobId, JobState } from "../../models/lookoutV2Models" describe("UpdateJobsService", () => { describe("createJobBatches", () => { diff --git a/internal/lookout/ui/src/utils/jobsTableColumns.tsx b/internal/lookout/ui/src/utils/jobsTableColumns.tsx index 65afcbd55af..17080e6d349 100644 --- a/internal/lookout/ui/src/utils/jobsTableColumns.tsx +++ b/internal/lookout/ui/src/utils/jobsTableColumns.tsx @@ -6,10 +6,10 @@ import { EnumFilterOption } from "components/lookoutV2/JobsTableFilter" import { isJobGroupRow, JobTableRow } from "models/jobsTableModels" import { JobState, Match } from "models/lookoutV2Models" -import { JobGroupStateCounts } from "../components/lookoutV2/JobGroupStateCounts" -import { LookoutColumnOrder } from "../containers/lookoutV2/JobsTableContainer" import { formatJobState, formatTimeSince, formatUtcDate } from "./jobsTableFormatters" import { formatBytes, formatCpu, parseBytes, parseCpu, parseInteger } from "./resourceUtils" +import { JobGroupStateCounts } from "../components/lookoutV2/JobGroupStateCounts" +import { LookoutColumnOrder } from "../containers/lookoutV2/JobsTableContainer" export type JobTableColumn = ColumnDef diff --git a/internal/lookout/ui/src/utils/jobsTableFormatters.ts b/internal/lookout/ui/src/utils/jobsTableFormatters.ts index 88b335c9b85..8948ac5fea8 100644 --- a/internal/lookout/ui/src/utils/jobsTableFormatters.ts +++ b/internal/lookout/ui/src/utils/jobsTableFormatters.ts @@ -1,7 +1,7 @@ import { cyan, green, grey, orange, pink, purple, red, yellow } from "@mui/material/colors" import { intervalToDuration } from "date-fns" -import { formatInTimeZone } from "date-fns-tz" import { parseISO } from "date-fns/fp" +import { formatInTimeZone } from "date-fns-tz" import { JobRunState, jobRunStateDisplayInfo, JobState, jobStateDisplayInfo } from "models/lookoutV2Models" export const formatJobState = (state?: JobState): string => diff --git a/internal/lookout/ui/src/utils/jobsTableUtils.ts b/internal/lookout/ui/src/utils/jobsTableUtils.ts index 7ffe18fdc22..eac2b3bc9b6 100644 --- a/internal/lookout/ui/src/utils/jobsTableUtils.ts +++ b/internal/lookout/ui/src/utils/jobsTableUtils.ts @@ -5,9 +5,9 @@ import { Job, JobFilter, JobGroup, JobOrder, Match } from "models/lookoutV2Model import { IGetJobsService } from "services/lookoutV2/GetJobsService" import { GroupedField, IGroupJobsService } from "services/lookoutV2/GroupJobsService" -import { LookoutColumnFilter } from "../containers/lookoutV2/JobsTableContainer" import { AnnotationColumnId, DEFAULT_COLUMN_MATCHES, fromAnnotationColId, isStandardColId } from "./jobsTableColumns" import { findRowInData, RowId, RowIdParts, toRowId } from "./reactTableUtils" +import { LookoutColumnFilter } from "../containers/lookoutV2/JobsTableContainer" export interface PendingData { parentRowId: RowId | "ROOT" diff --git a/internal/lookout/ui/yarn.lock b/internal/lookout/ui/yarn.lock index 9c9d1e6fdd1..094e576f2fb 100644 --- a/internal/lookout/ui/yarn.lock +++ b/internal/lookout/ui/yarn.lock @@ -1321,13 +1321,6 @@ dependencies: regenerator-runtime "^0.13.11" -"@babel/runtime@^7.21.0", "@babel/runtime@^7.22.5", "@babel/runtime@^7.22.6": - version "7.22.6" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.22.6.tgz#57d64b9ae3cff1d67eb067ae117dac087f5bd438" - integrity sha512-wDb5pWm4WDdF6LFUde3Jl8WzPA+3ZbxYqkC6xAXuD3irdEHN1k0NfTRrJD8ZD378SJ61miMLCqIOXYhd8x+AJQ== - dependencies: - regenerator-runtime "^0.13.11" - "@babel/runtime@^7.22.10": version "7.22.11" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.22.11.tgz#7a9ba3bbe406ad6f9e8dd4da2ece453eb23a77a4" @@ -1528,6 +1521,23 @@ source-map "^0.5.7" stylis "4.1.3" +"@emotion/babel-plugin@^11.11.0": + version "11.11.0" + resolved "https://registry.yarnpkg.com/@emotion/babel-plugin/-/babel-plugin-11.11.0.tgz#c2d872b6a7767a9d176d007f5b31f7d504bb5d6c" + integrity sha512-m4HEDZleaaCH+XgDDsPF15Ht6wTLsgDTeR3WYj9Q/k76JtWhrJjcP4+/XlG8LGT/Rol9qUfOIztXeA84ATpqPQ== + dependencies: + "@babel/helper-module-imports" "^7.16.7" + "@babel/runtime" "^7.18.3" + "@emotion/hash" "^0.9.1" + "@emotion/memoize" "^0.8.1" + "@emotion/serialize" "^1.1.2" + babel-plugin-macros "^3.1.0" + convert-source-map "^1.5.0" + escape-string-regexp "^4.0.0" + find-root "^1.1.0" + source-map "^0.5.7" + stylis "4.2.0" + "@emotion/cache@^11.10.5": version "11.10.5" resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-11.10.5.tgz#c142da9351f94e47527ed458f7bbbbe40bb13c12" @@ -1560,6 +1570,11 @@ resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.9.0.tgz#c5153d50401ee3c027a57a177bc269b16d889cb7" integrity sha512-14FtKiHhy2QoPIzdTcvh//8OyBlknNs2nXRwIhG904opCby3l+9Xaf/wuPvICBF0rc1ZCNBd3nKe9cd2mecVkQ== +"@emotion/hash@^0.9.1": + version "0.9.1" + resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.9.1.tgz#4ffb0055f7ef676ebc3a5a91fb621393294e2f43" + integrity sha512-gJB6HLm5rYwSLI6PQa+X1t5CFGrv1J1TWG+sOyMCeKz2ojaj6Fnl/rZEspogG+cvqbt4AE/2eIyD2QfLKTBNlQ== + "@emotion/is-prop-valid@^1.2.0": version "1.2.0" resolved "https://registry.yarnpkg.com/@emotion/is-prop-valid/-/is-prop-valid-1.2.0.tgz#7f2d35c97891669f7e276eb71c83376a5dc44c83" @@ -1609,6 +1624,17 @@ "@emotion/utils" "^1.2.0" csstype "^3.0.2" +"@emotion/serialize@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-1.1.2.tgz#017a6e4c9b8a803bd576ff3d52a0ea6fa5a62b51" + integrity sha512-zR6a/fkFP4EAcCMQtLOhIgpprZOwNmCldtpaISpvz348+DP4Mz8ZoKaGGCQpbzepNIUWbq4w6hNZkwDyKoS+HA== + dependencies: + "@emotion/hash" "^0.9.1" + "@emotion/memoize" "^0.8.1" + "@emotion/unitless" "^0.8.1" + "@emotion/utils" "^1.2.1" + csstype "^3.0.2" + "@emotion/sheet@^1.2.1": version "1.2.1" resolved "https://registry.yarnpkg.com/@emotion/sheet/-/sheet-1.2.1.tgz#0767e0305230e894897cadb6c8df2c51e61a6c2c" @@ -1619,28 +1645,38 @@ resolved "https://registry.yarnpkg.com/@emotion/sheet/-/sheet-1.2.2.tgz#d58e788ee27267a14342303e1abb3d508b6d0fec" integrity sha512-0QBtGvaqtWi+nx6doRwDdBIzhNdZrXUppvTM4dtZZWEGTXL/XE/yJxLMGlDT1Gt+UHH5IX1n+jkXyytE/av7OA== -"@emotion/styled@^11.10.5": - version "11.10.5" - resolved "https://registry.yarnpkg.com/@emotion/styled/-/styled-11.10.5.tgz#1fe7bf941b0909802cb826457e362444e7e96a79" - integrity sha512-8EP6dD7dMkdku2foLoruPCNkRevzdcBaY6q0l0OsbyJK+x8D9HWjX27ARiSIKNF634hY9Zdoedh8bJCiva8yZw== +"@emotion/styled@^11.11.0": + version "11.11.0" + resolved "https://registry.yarnpkg.com/@emotion/styled/-/styled-11.11.0.tgz#26b75e1b5a1b7a629d7c0a8b708fbf5a9cdce346" + integrity sha512-hM5Nnvu9P3midq5aaXj4I+lnSfNi7Pmd4EWk1fOZ3pxookaQTNew6bp4JaCBYM4HVFZF9g7UjJmsUmC2JlxOng== dependencies: "@babel/runtime" "^7.18.3" - "@emotion/babel-plugin" "^11.10.5" - "@emotion/is-prop-valid" "^1.2.0" - "@emotion/serialize" "^1.1.1" - "@emotion/use-insertion-effect-with-fallbacks" "^1.0.0" - "@emotion/utils" "^1.2.0" + "@emotion/babel-plugin" "^11.11.0" + "@emotion/is-prop-valid" "^1.2.1" + "@emotion/serialize" "^1.1.2" + "@emotion/use-insertion-effect-with-fallbacks" "^1.0.1" + "@emotion/utils" "^1.2.1" "@emotion/unitless@^0.8.0": version "0.8.0" resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.8.0.tgz#a4a36e9cbdc6903737cd20d38033241e1b8833db" integrity sha512-VINS5vEYAscRl2ZUDiT3uMPlrFQupiKgHz5AA4bCH1miKBg4qtwkim1qPmJj/4WG6TreYMY111rEFsjupcOKHw== +"@emotion/unitless@^0.8.1": + version "0.8.1" + resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.8.1.tgz#182b5a4704ef8ad91bde93f7a860a88fd92c79a3" + integrity sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ== + "@emotion/use-insertion-effect-with-fallbacks@^1.0.0": version "1.0.0" resolved "https://registry.yarnpkg.com/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.0.0.tgz#ffadaec35dbb7885bd54de3fa267ab2f860294df" integrity sha512-1eEgUGmkaljiBnRMTdksDV1W4kUnmwgp7X9G8B++9GYwl1lUdqSndSriIrTJ0N7LQaoauY9JJ2yhiOYK5+NI4A== +"@emotion/use-insertion-effect-with-fallbacks@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.0.1.tgz#08de79f54eb3406f9daaf77c76e35313da963963" + integrity sha512-jT/qyKZ9rzLErtrjGgdkMBn2OP8wl0G3sQlBb3YPryvKHsjvINUhVaPFfP+fpBcOkmrVOVEEHQFJ7nbj2TH2gw== + "@emotion/utils@^1.2.0": version "1.2.0" resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-1.2.0.tgz#9716eaccbc6b5ded2ea5a90d65562609aab0f561" @@ -1703,6 +1739,33 @@ minimatch "^3.1.2" strip-json-comments "^3.1.1" +"@floating-ui/core@^1.4.1": + version "1.4.1" + resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.4.1.tgz#0d633f4b76052668afb932492ac452f7ebe97f17" + integrity sha512-jk3WqquEJRlcyu7997NtR5PibI+y5bi+LS3hPmguVClypenMsCY3CBa3LAQnozRCtCrYWSEtAdiskpamuJRFOQ== + dependencies: + "@floating-ui/utils" "^0.1.1" + +"@floating-ui/dom@^1.5.1": + version "1.5.1" + resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.5.1.tgz#88b70defd002fe851f17b4a25efb2d3c04d7a8d7" + integrity sha512-KwvVcPSXg6mQygvA1TjbN/gh///36kKtllIF8SUm0qpFj8+rvYrpvlYdL1JoA71SHpDqgSSdGOSoQ0Mp3uY5aw== + dependencies: + "@floating-ui/core" "^1.4.1" + "@floating-ui/utils" "^0.1.1" + +"@floating-ui/react-dom@^2.0.1": + version "2.0.2" + resolved "https://registry.yarnpkg.com/@floating-ui/react-dom/-/react-dom-2.0.2.tgz#fab244d64db08e6bed7be4b5fcce65315ef44d20" + integrity sha512-5qhlDvjaLmAst/rKb3VdlCinwTF4EYMiVxuuc/HVUjs46W0zgtbMmAZ1UTsDrRTxRmUEzl92mOtWbeeXL26lSQ== + dependencies: + "@floating-ui/dom" "^1.5.1" + +"@floating-ui/utils@^0.1.1": + version "0.1.1" + resolved "https://registry.yarnpkg.com/@floating-ui/utils/-/utils-0.1.1.tgz#1a5b1959a528e374e8037c4396c3e825d6cf4a83" + integrity sha512-m0G6wlnhm/AX0H12IOWtK8gASEMffnX08RtKkCgTdHb9JpHKGloI7icFfLg9ZmQeavcvR0PKmzxClyuFPSjKWw== + "@humanwhocodes/config-array@^0.11.6": version "0.11.7" resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.7.tgz#38aec044c6c828f6ed51d5d7ae3d9b9faf6dbb0f" @@ -2143,24 +2206,25 @@ prop-types "^15.8.1" react-is "^18.2.0" -"@mui/base@5.0.0-beta.8": - version "5.0.0-beta.8" - resolved "https://registry.yarnpkg.com/@mui/base/-/base-5.0.0-beta.8.tgz#a0a9531ae9147be92d17e4f0e3b9accc57916841" - integrity sha512-b4vVjMZx5KzzEMf4arXKoeV5ZegAMOoPwoy1vfUBwhvXc2QtaaAyBp50U7OA2L06Leubc1A+lEp3eqwZoFn87g== +"@mui/base@5.0.0-beta.13": + version "5.0.0-beta.13" + resolved "https://registry.yarnpkg.com/@mui/base/-/base-5.0.0-beta.13.tgz#3bae94c39752546d84a67d4ca73486b7c4923a89" + integrity sha512-uC0l97pBspfDAp+iz2cJq8YZ8Sd9i73V77+WzUiOAckIVEyCm5dyVDZCCO2/phmzckVEeZCGcytybkjMQuhPQw== dependencies: - "@babel/runtime" "^7.22.6" + "@babel/runtime" "^7.22.10" "@emotion/is-prop-valid" "^1.2.1" + "@floating-ui/react-dom" "^2.0.1" "@mui/types" "^7.2.4" - "@mui/utils" "^5.14.1" + "@mui/utils" "^5.14.7" "@popperjs/core" "^2.11.8" - clsx "^1.2.1" + clsx "^2.0.0" prop-types "^15.8.1" react-is "^18.2.0" -"@mui/core-downloads-tracker@^5.14.1": - version "5.14.1" - resolved "https://registry.yarnpkg.com/@mui/core-downloads-tracker/-/core-downloads-tracker-5.14.1.tgz#af156cb3e15b202f5c09f66e7d8b71ca86aef525" - integrity sha512-mIa1WmDmNr1LoupV1Rbxt9bTFKMbIn10RHG1bnZ/FJCkAYpuU/D4n+R+ttiycgcZNngU++zyh/OQeJblzbQPzg== +"@mui/core-downloads-tracker@^5.14.7": + version "5.14.7" + resolved "https://registry.yarnpkg.com/@mui/core-downloads-tracker/-/core-downloads-tracker-5.14.7.tgz#95bed2487bf59632125a13b8eb8f4c21e460afec" + integrity sha512-sCWTUNElBPgB30iLvWe3PU7SIlTKZNf6/E/sko85iHVeHCM6WPkDw+y89CrZYjhFNmPqt2fIQM/pZu+rP2lFLA== "@mui/icons-material@^5.14.3": version "5.14.6" @@ -2183,19 +2247,19 @@ prop-types "^15.8.1" react-is "^18.2.0" -"@mui/material@^5.13.6": - version "5.14.1" - resolved "https://registry.yarnpkg.com/@mui/material/-/material-5.14.1.tgz#2711e4ca5c9bdc67b916d01faee650a7a5260bb8" - integrity sha512-WtsgYuageTunLfxH3Ri+o1RuQTFImtRHxMcVNyD0Hhd2/znjW6KODNz0XfjvLRnNCAynBxZNiflcoIBW40h9PQ== +"@mui/material@^5.14.4": + version "5.14.7" + resolved "https://registry.yarnpkg.com/@mui/material/-/material-5.14.7.tgz#6c2c0de8a625562f789e1bb33cb4cfc8cf20bdb0" + integrity sha512-jIZj9F7zMv6IlyaYDVv5M2Kp20jIX8c0kzuwteySHS/A0IvPVyomQEPtWc51MCbpDNCqzwoZUp3rQtA2lI8k7A== dependencies: - "@babel/runtime" "^7.22.6" - "@mui/base" "5.0.0-beta.8" - "@mui/core-downloads-tracker" "^5.14.1" - "@mui/system" "^5.14.1" + "@babel/runtime" "^7.22.10" + "@mui/base" "5.0.0-beta.13" + "@mui/core-downloads-tracker" "^5.14.7" + "@mui/system" "^5.14.7" "@mui/types" "^7.2.4" - "@mui/utils" "^5.14.1" + "@mui/utils" "^5.14.7" "@types/react-transition-group" "^4.4.6" - clsx "^1.2.1" + clsx "^2.0.0" csstype "^3.1.2" prop-types "^15.8.1" react-is "^18.2.0" @@ -2210,13 +2274,13 @@ "@mui/utils" "^5.10.16" prop-types "^15.8.1" -"@mui/private-theming@^5.13.7": - version "5.13.7" - resolved "https://registry.yarnpkg.com/@mui/private-theming/-/private-theming-5.13.7.tgz#2f8ef5da066f3c6c6423bd4260d003a28d10b099" - integrity sha512-qbSr+udcij5F9dKhGX7fEdx2drXchq7htLNr2Qg2Ma+WJ6q0ERlEqGSBiPiVDJkptcjeVL4DGmcf1wl5+vD4EA== +"@mui/private-theming@^5.14.7": + version "5.14.7" + resolved "https://registry.yarnpkg.com/@mui/private-theming/-/private-theming-5.14.7.tgz#c9fec31e59bf66b12959e724b0e8ec3bb4a3d923" + integrity sha512-Y86+hmDnJab2Ka42PgxKpK3oL7EiacbeeX3X/lG9LGO0wSc45wZjHeTfIlVSkkUCkexiMKEJp5NlSjZhr27NRQ== dependencies: - "@babel/runtime" "^7.22.5" - "@mui/utils" "^5.13.7" + "@babel/runtime" "^7.22.10" + "@mui/utils" "^5.14.7" prop-types "^15.8.1" "@mui/styled-engine@^5.10.16": @@ -2229,12 +2293,12 @@ csstype "^3.1.1" prop-types "^15.8.1" -"@mui/styled-engine@^5.13.2": - version "5.13.2" - resolved "https://registry.yarnpkg.com/@mui/styled-engine/-/styled-engine-5.13.2.tgz#c87bd61c0ab8086d34828b6defe97c02bcd642ef" - integrity sha512-VCYCU6xVtXOrIN8lcbuPmoG+u7FYuOERG++fpY74hPpEWkyFQG97F+/XfTQVYzlR2m7nPjnwVUgATcTCMEaMvw== +"@mui/styled-engine@^5.14.7": + version "5.14.7" + resolved "https://registry.yarnpkg.com/@mui/styled-engine/-/styled-engine-5.14.7.tgz#aaacec6c87bcc9a180b2da062c613213af10f2e3" + integrity sha512-hKBETEDsIAkL8/mBwPiQj/vw28OeIhMXC3Tvj4J2bb9snxAKpiZioR1PwqP+6P41twsC/GKBd0Vr9oaWYaHuMg== dependencies: - "@babel/runtime" "^7.21.0" + "@babel/runtime" "^7.22.10" "@emotion/cache" "^11.11.0" csstype "^3.1.2" prop-types "^15.8.1" @@ -2253,17 +2317,17 @@ csstype "^3.1.1" prop-types "^15.8.1" -"@mui/system@^5.14.1": - version "5.14.1" - resolved "https://registry.yarnpkg.com/@mui/system/-/system-5.14.1.tgz#ec8ae69f63963b5916dad4bca2f8a86a001a2392" - integrity sha512-u+xlsU34Jdkgx1CxmBnIC4Y08uPdVX5iEd3S/1dggDFtOGp+Lj8xmKRJAQ8PJOOJLOh8pDwaZx4AwXikL4l1QA== +"@mui/system@^5.14.7": + version "5.14.7" + resolved "https://registry.yarnpkg.com/@mui/system/-/system-5.14.7.tgz#b08e23f9151d38186ab12dd618906abd4d73d203" + integrity sha512-jeZtHglc+Pi6qjGoopT6O4RqYXVBMqHVOsjMGP0hxGSSPm1T4gsAu7jU8eqGx9YwwjvvJ0eotTjFqw7iJ6qE2Q== dependencies: - "@babel/runtime" "^7.22.6" - "@mui/private-theming" "^5.13.7" - "@mui/styled-engine" "^5.13.2" + "@babel/runtime" "^7.22.10" + "@mui/private-theming" "^5.14.7" + "@mui/styled-engine" "^5.14.7" "@mui/types" "^7.2.4" - "@mui/utils" "^5.14.1" - clsx "^1.2.1" + "@mui/utils" "^5.14.7" + clsx "^2.0.0" csstype "^3.1.2" prop-types "^15.8.1" @@ -2288,12 +2352,12 @@ prop-types "^15.8.1" react-is "^18.2.0" -"@mui/utils@^5.13.7", "@mui/utils@^5.14.1": - version "5.14.1" - resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-5.14.1.tgz#29696371016552a6eb3af975bc7af429ec88b29a" - integrity sha512-39KHKK2JeqRmuUcLDLwM+c2XfVC136C5/yUyQXmO2PVbOb2Bol4KxtkssEqCbTwg87PSCG3f1Tb0keRsK7cVGw== +"@mui/utils@^5.14.7": + version "5.14.7" + resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-5.14.7.tgz#3677bcabe032f1185e151f57d8c1a166df3ae0a1" + integrity sha512-RtheP/aBoPogVdi8vj8Vo2IFnRa4mZVmnD0RGlVZ49yF60rZs+xP4/KbpIrTr83xVs34QmHQ2aQ+IX7I0a0dDw== dependencies: - "@babel/runtime" "^7.22.6" + "@babel/runtime" "^7.22.10" "@types/prop-types" "^15.7.5" "@types/react-is" "^18.2.1" prop-types "^15.8.1" @@ -2352,10 +2416,10 @@ resolved "https://registry.yarnpkg.com/@popperjs/core/-/core-2.11.8.tgz#6b79032e760a0899cd4204710beede972a3a185f" integrity sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A== -"@remix-run/router@1.7.1": - version "1.7.1" - resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.7.1.tgz#fea7ac35ae4014637c130011f59428f618730498" - integrity sha512-bgVQM4ZJ2u2CM8k1ey70o1ePFXsEzYVZoWghh6WjM8p59jQ7HxzbHW4SbnWFG7V9ig9chLawQxDTZ3xzOF8MkQ== +"@remix-run/router@1.7.2": + version "1.7.2" + resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.7.2.tgz#cba1cf0a04bc04cb66027c51fa600e9cbc388bc8" + integrity sha512-7Lcn7IqGMV+vizMPoEl5F0XDshcdDYtMI6uJLQdQz5CfZAwy3vvGKYSUk789qndt5dEC4HfSjviSYlSoHGL2+A== "@rollup/plugin-babel@^5.2.0": version "5.3.1" @@ -2914,10 +2978,10 @@ dependencies: "@types/react" "*" -"@types/react-dom@^16.9.9": - version "16.9.16" - resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-16.9.16.tgz#c591f2ed1c6f32e9759dfa6eb4abfd8041f29e39" - integrity sha512-Oqc0RY4fggGA3ltEgyPLc3IV9T73IGoWjkONbsyJ3ZBn+UPPCYpU2ec0i3cEbJuEdZtkqcCF2l1zf2pBdgUGSg== +"@types/react-dom@^16.9.19": + version "16.9.19" + resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-16.9.19.tgz#6a139c26b02dec533a7fa131f084561babb10a8f" + integrity sha512-xC8D280Bf6p0zguJ8g62jcEOKZiUbx9sIe6O3tT/lKfR87A7A6g65q13z6D5QUMIa/6yFPkNhqjF5z/VVZEYqQ== dependencies: "@types/react" "^16" @@ -4130,6 +4194,13 @@ async@^3.2.3: resolved "https://registry.yarnpkg.com/async/-/async-3.2.4.tgz#2d22e00f8cddeb5fde5dd33522b56d1cf569a81c" integrity sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ== +asynciterator.prototype@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/asynciterator.prototype/-/asynciterator.prototype-1.0.0.tgz#8c5df0514936cdd133604dfcc9d3fb93f09b2b62" + integrity sha512-wwHYEIS0Q80f5mosx3L/dfG5t5rjEa9Ft51GTaNt862EnpyGHpgz2RkZvLPp1oF5TnAiTohkEKVEu8pQPJI7Vg== + dependencies: + has-symbols "^1.0.3" + asynckit@^0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" @@ -4604,6 +4675,11 @@ clsx@^1.0.4, clsx@^1.1.0, clsx@^1.2.1: resolved "https://registry.yarnpkg.com/clsx/-/clsx-1.2.1.tgz#0ddc4a20a549b59c93a4116bb26f5294ca17dc12" integrity sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg== +clsx@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/clsx/-/clsx-2.0.0.tgz#12658f3fd98fafe62075595a5c30e43d18f3d00b" + integrity sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q== + co@^4.6.0: version "4.6.0" resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" @@ -5574,7 +5650,7 @@ es-abstract@^1.17.2, es-abstract@^1.19.0, es-abstract@^1.19.1, es-abstract@^1.19 string.prototype.trimstart "^1.0.5" unbox-primitive "^1.0.2" -es-abstract@^1.21.2: +es-abstract@^1.21.2, es-abstract@^1.22.1: version "1.22.1" resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.22.1.tgz#8b4e5fc5cefd7f1660f0f8e1a52900dfbc9d9ccc" integrity sha512-ioRRcXMO6OFyRpyzV3kE1IIBd4WG5/kltnzdxSCqoP8CMGs/Li+M1uF5o7lOkZVFjDs+NLesthnF66Pg/0q0Lw== @@ -5624,6 +5700,26 @@ es-array-method-boxes-properly@^1.0.0: resolved "https://registry.yarnpkg.com/es-array-method-boxes-properly/-/es-array-method-boxes-properly-1.0.0.tgz#873f3e84418de4ee19c5be752990b2e44718d09e" integrity sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA== +es-iterator-helpers@^1.0.12: + version "1.0.14" + resolved "https://registry.yarnpkg.com/es-iterator-helpers/-/es-iterator-helpers-1.0.14.tgz#19cd7903697d97e21198f3293b55e8985791c365" + integrity sha512-JgtVnwiuoRuzLvqelrvN3Xu7H9bu2ap/kQ2CrM62iidP8SKuD99rWU3CJy++s7IVL2qb/AjXPGR/E7i9ngd/Cw== + dependencies: + asynciterator.prototype "^1.0.0" + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + es-set-tostringtag "^2.0.1" + function-bind "^1.1.1" + get-intrinsic "^1.2.1" + globalthis "^1.0.3" + has-property-descriptors "^1.0.0" + has-proto "^1.0.1" + has-symbols "^1.0.3" + internal-slot "^1.0.5" + iterator.prototype "^1.1.0" + safe-array-concat "^1.0.0" + es-module-lexer@^0.9.0: version "0.9.3" resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-0.9.3.tgz#6f13db00cc38417137daf74366f535c8eb438f19" @@ -5835,7 +5931,7 @@ eslint-plugin-react-hooks@^4.3.0: resolved "https://registry.yarnpkg.com/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz#4c3e697ad95b77e93f8646aaa1630c1ba607edd3" integrity sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g== -eslint-plugin-react@^7.27.1, eslint-plugin-react@^7.31.11: +eslint-plugin-react@^7.27.1: version "7.31.11" resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.31.11.tgz#011521d2b16dcf95795df688a4770b4eaab364c8" integrity sha512-TTvq5JsT5v56wPa9OYHzsrOlHzKZKjV+aLgS+55NJP/cuzdiQPC7PfYoUjMoxlffKtvijpk7vA/jmuqRb9nohw== @@ -5856,6 +5952,28 @@ eslint-plugin-react@^7.27.1, eslint-plugin-react@^7.31.11: semver "^6.3.0" string.prototype.matchall "^4.0.8" +eslint-plugin-react@^7.33.1: + version "7.33.2" + resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.33.2.tgz#69ee09443ffc583927eafe86ffebb470ee737608" + integrity sha512-73QQMKALArI8/7xGLNI/3LylrEYrlKZSb5C9+q3OtOewTnMQi5cT+aE9E41sLCmli3I9PGGmD1yiZydyo4FEPw== + dependencies: + array-includes "^3.1.6" + array.prototype.flatmap "^1.3.1" + array.prototype.tosorted "^1.1.1" + doctrine "^2.1.0" + es-iterator-helpers "^1.0.12" + estraverse "^5.3.0" + jsx-ast-utils "^2.4.1 || ^3.0.0" + minimatch "^3.1.2" + object.entries "^1.1.6" + object.fromentries "^2.0.6" + object.hasown "^1.1.2" + object.values "^1.1.6" + prop-types "^15.8.1" + resolve "^2.0.0-next.4" + semver "^6.3.1" + string.prototype.matchall "^4.0.8" + eslint-plugin-testing-library@^5.0.1: version "5.9.1" resolved "https://registry.yarnpkg.com/eslint-plugin-testing-library/-/eslint-plugin-testing-library-5.9.1.tgz#12e4bd34c48683ee98af4df2e3318ec9f51dcf8a" @@ -6948,6 +7066,13 @@ is-arrayish@^0.2.1: resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== +is-async-function@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-async-function/-/is-async-function-2.0.0.tgz#8e4418efd3e5d3a6ebb0164c05ef5afb69aa9646" + integrity sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA== + dependencies: + has-tostringtag "^1.0.0" + is-bigint@^1.0.1: version "1.0.4" resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.4.tgz#08147a1875bc2b32005d41ccd8291dffc6691df3" @@ -6989,7 +7114,7 @@ is-core-module@^2.8.1, is-core-module@^2.9.0: dependencies: has "^1.0.3" -is-date-object@^1.0.1: +is-date-object@^1.0.1, is-date-object@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f" integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ== @@ -7006,6 +7131,13 @@ is-extglob@^2.1.1: resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== +is-finalizationregistry@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-finalizationregistry/-/is-finalizationregistry-1.0.2.tgz#c8749b65f17c133313e661b1289b95ad3dbd62e6" + integrity sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw== + dependencies: + call-bind "^1.0.2" + is-fullwidth-code-point@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" @@ -7016,6 +7148,13 @@ is-generator-fn@^2.0.0: resolved "https://registry.yarnpkg.com/is-generator-fn/-/is-generator-fn-2.1.0.tgz#7d140adc389aaf3011a8f2a2a4cfa6faadffb118" integrity sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ== +is-generator-function@^1.0.10: + version "1.0.10" + resolved "https://registry.yarnpkg.com/is-generator-function/-/is-generator-function-1.0.10.tgz#f1558baf1ac17e0deea7c0415c438351ff2b3c72" + integrity sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A== + dependencies: + has-tostringtag "^1.0.0" + is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3, is-glob@~4.0.1: version "4.0.3" resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" @@ -7028,6 +7167,11 @@ is-in-browser@^1.0.2, is-in-browser@^1.1.3: resolved "https://registry.yarnpkg.com/is-in-browser/-/is-in-browser-1.1.3.tgz#56ff4db683a078c6082eb95dad7dc62e1d04f835" integrity sha512-FeXIBgG/CPGd/WUxuEyvgGTEfwiG9Z4EKGxjNMRqviiIIfsmgrpnHLffEDdwUHqNva1VEW91o3xBT/m8Elgl9g== +is-map@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.2.tgz#00922db8c9bf73e81b7a335827bc2a43f2b91127" + integrity sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg== + is-module@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/is-module/-/is-module-1.0.0.tgz#3258fb69f78c14d5b815d664336b4cffb6441591" @@ -7088,6 +7232,11 @@ is-root@^2.1.0: resolved "https://registry.yarnpkg.com/is-root/-/is-root-2.1.0.tgz#809e18129cf1129644302a4f8544035d51984a9c" integrity sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg== +is-set@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-set/-/is-set-2.0.2.tgz#90755fa4c2562dc1c5d4024760d6119b94ca18ec" + integrity sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g== + is-shared-array-buffer@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz#8f259c573b60b6a32d4058a1a07430c0a7344c79" @@ -7126,6 +7275,11 @@ is-typedarray@^1.0.0: resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" integrity sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA== +is-weakmap@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-weakmap/-/is-weakmap-2.0.1.tgz#5008b59bdc43b698201d18f62b37b2ca243e8cf2" + integrity sha512-NSBR4kH5oVj1Uwvv970ruUkCV7O1mzgVFO4/rev2cLRda9Tm9HrL70ZPut4rOHgY0FNrUu9BCbXA2sdQ+x0chA== + is-weakref@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.0.2.tgz#9529f383a9338205e89765e0392efc2f100f06f2" @@ -7133,6 +7287,14 @@ is-weakref@^1.0.2: dependencies: call-bind "^1.0.2" +is-weakset@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-weakset/-/is-weakset-2.0.2.tgz#4569d67a747a1ce5a994dfd4ef6dcea76e7c0a1d" + integrity sha512-t2yVvttHkQktwnNNmBQ98AhENLdPUTDTE21uPqAQ0ARwQfGeQKRVS0NNurH7bTf7RrvcVn1OOge45CnBeHCSmg== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.1.1" + is-wsl@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271" @@ -7197,6 +7359,17 @@ istanbul-reports@^3.1.3: html-escaper "^2.0.0" istanbul-lib-report "^3.0.0" +iterator.prototype@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/iterator.prototype/-/iterator.prototype-1.1.0.tgz#690c88b043d821f783843aaf725d7ac3b62e3b46" + integrity sha512-rjuhAk1AJ1fssphHD0IFV6TWL40CwRZ53FrztKx43yk2v6rguBYsY4Bj1VU4HmoMmKwZUlx7mfnhDf9cOp4YTw== + dependencies: + define-properties "^1.1.4" + get-intrinsic "^1.1.3" + has-symbols "^1.0.3" + has-tostringtag "^1.0.0" + reflect.getprototypeof "^1.0.3" + jake@^10.8.5: version "10.8.5" resolved "https://registry.yarnpkg.com/jake/-/jake-10.8.5.tgz#f2183d2c59382cb274226034543b9c03b8164c46" @@ -9564,20 +9737,20 @@ react-refresh@^0.11.0: resolved "https://registry.yarnpkg.com/react-refresh/-/react-refresh-0.11.0.tgz#77198b944733f0f1f1a90e791de4541f9f074046" integrity sha512-F27qZr8uUqwhWZboondsPx8tnC3Ct3SxZA3V5WyEvujRyyNv0VYPhoBg1gZ8/MV5tubQp76Trw8lTv9hzRBa+A== -react-router-dom@6.14.1: - version "6.14.1" - resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.14.1.tgz#0ad7ba7abdf75baa61169d49f096f0494907a36f" - integrity sha512-ssF6M5UkQjHK70fgukCJyjlda0Dgono2QGwqGvuk7D+EDGHdacEN3Yke2LTMjkrpHuFwBfDFsEjGVXBDmL+bWw== +react-router-dom@6.14.2: + version "6.14.2" + resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.14.2.tgz#88f520118b91aa60233bd08dbd3fdcaea3a68488" + integrity sha512-5pWX0jdKR48XFZBuJqHosX3AAHjRAzygouMTyimnBPOLdY3WjzUSKhus2FVMihUFWzeLebDgr4r8UeQFAct7Bg== dependencies: - "@remix-run/router" "1.7.1" - react-router "6.14.1" + "@remix-run/router" "1.7.2" + react-router "6.14.2" -react-router@6.14.1: - version "6.14.1" - resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.14.1.tgz#5e82bcdabf21add859dc04b1859f91066b3a5810" - integrity sha512-U4PfgvG55LdvbQjg5Y9QRWyVxIdO1LlpYT7x+tMAxd9/vmiPuJhIwdxZuIQLN/9e3O4KFDHYfR9gzGeYMasW8g== +react-router@6.14.2: + version "6.14.2" + resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.14.2.tgz#1f60994d8c369de7b8ba7a78d8f7ec23df76b300" + integrity sha512-09Zss2dE2z+T1D03IheqAFtK4UzQyX8nFPWx6jkwdYzGLXd5ie06A6ezS2fO6zJfEb/SpG6UocN2O1hfD+2urQ== dependencies: - "@remix-run/router" "1.7.1" + "@remix-run/router" "1.7.2" react-scripts@^5.0.1: version "5.0.1" @@ -9743,6 +9916,18 @@ reduce-function-call@^1.0.1: dependencies: balanced-match "^1.0.0" +reflect.getprototypeof@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/reflect.getprototypeof/-/reflect.getprototypeof-1.0.3.tgz#2738fd896fcc3477ffbd4190b40c2458026b6928" + integrity sha512-TTAOZpkJ2YLxl7mVHWrNo3iDMEkYlva/kgFcXndqMgbo/AZUmmavEkdXV+hXtE4P8xdyEKRzalaFqZVuwIk/Nw== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.4" + es-abstract "^1.20.4" + get-intrinsic "^1.1.1" + globalthis "^1.0.3" + which-builtin-type "^1.1.3" + regenerate-unicode-properties@^10.1.0: version "10.1.0" resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz#7c3192cab6dd24e21cb4461e5ddd7dd24fa8374c" @@ -9911,7 +10096,7 @@ resolve@^1.22.4: path-parse "^1.0.7" supports-preserve-symlinks-flag "^1.0.0" -resolve@^2.0.0-next.3: +resolve@^2.0.0-next.3, resolve@^2.0.0-next.4: version "2.0.0-next.4" resolved "https://registry.yarnpkg.com/resolve/-/resolve-2.0.0-next.4.tgz#3d37a113d6429f496ec4752d2a2e58efb1fd4660" integrity sha512-iMDbmAWtfU+MHpxt/I5iWI7cY6YVEZUQ3MBgPQ++XD1PELuJHIl82xBmObyP2KyQmkNB2dsqF7seoQQiAn5yDQ== @@ -11272,7 +11457,35 @@ which-boxed-primitive@^1.0.2: is-string "^1.0.5" is-symbol "^1.0.3" -which-typed-array@^1.1.10, which-typed-array@^1.1.11: +which-builtin-type@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/which-builtin-type/-/which-builtin-type-1.1.3.tgz#b1b8443707cc58b6e9bf98d32110ff0c2cbd029b" + integrity sha512-YmjsSMDBYsM1CaFiayOVT06+KJeXf0o5M/CAd4o1lTadFAtacTUM49zoYxr/oroopFDfhvN6iEcBxUyc3gvKmw== + dependencies: + function.prototype.name "^1.1.5" + has-tostringtag "^1.0.0" + is-async-function "^2.0.0" + is-date-object "^1.0.5" + is-finalizationregistry "^1.0.2" + is-generator-function "^1.0.10" + is-regex "^1.1.4" + is-weakref "^1.0.2" + isarray "^2.0.5" + which-boxed-primitive "^1.0.2" + which-collection "^1.0.1" + which-typed-array "^1.1.9" + +which-collection@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/which-collection/-/which-collection-1.0.1.tgz#70eab71ebbbd2aefaf32f917082fc62cdcb70906" + integrity sha512-W8xeTUwaln8i3K/cY1nGXzdnVZlidBcagyNFtBdD5kxnb4TvGKR7FfSIS3mYpwWS1QUCutfKz8IY8RjftB0+1A== + dependencies: + is-map "^2.0.1" + is-set "^2.0.1" + is-weakmap "^2.0.1" + is-weakset "^2.0.1" + +which-typed-array@^1.1.10, which-typed-array@^1.1.11, which-typed-array@^1.1.9: version "1.1.11" resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.11.tgz#99d691f23c72aab6768680805a271b69761ed61a" integrity sha512-qe9UWWpkeG5yzZ0tNYxDmd7vo58HDBc39mZ0xWWpolAGADdFOzkfamWLDxkOWcvHQKVmdTyQdLD4NOfjLWTKew== diff --git a/internal/lookoutingester/instructions/instructions.go b/internal/lookoutingester/instructions/instructions.go index f49ac049975..2e6b314fc66 100644 --- a/internal/lookoutingester/instructions/instructions.go +++ b/internal/lookoutingester/instructions/instructions.go @@ -1,23 +1,21 @@ package instructions import ( - "context" "sort" "strings" "time" - "github.com/armadaproject/armada/internal/common/ingest/metrics" - "github.com/gogo/protobuf/proto" "github.com/google/uuid" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "k8s.io/utils/pointer" - "github.com/armadaproject/armada/internal/common/ingest" - + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/eventutil" + "github.com/armadaproject/armada/internal/common/ingest" + "github.com/armadaproject/armada/internal/common/ingest/metrics" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/lookout/repository" "github.com/armadaproject/armada/internal/lookoutingester/model" @@ -42,7 +40,7 @@ func NewInstructionConverter(metrics *metrics.Metrics, userAnnotationPrefix stri } } -func (c *InstructionConverter) Convert(ctx context.Context, sequencesWithIds *ingest.EventSequencesWithIds) *model.InstructionSet { +func (c *InstructionConverter) Convert(ctx *armadacontext.Context, sequencesWithIds *ingest.EventSequencesWithIds) *model.InstructionSet { updateInstructions := &model.InstructionSet{ MessageIds: sequencesWithIds.MessageIds, } diff --git a/internal/lookoutingester/instructions/instructions_test.go b/internal/lookoutingester/instructions/instructions_test.go index 5510a30a695..3f4f3043101 100644 --- a/internal/lookoutingester/instructions/instructions_test.go +++ b/internal/lookoutingester/instructions/instructions_test.go @@ -1,7 +1,6 @@ package instructions import ( - "context" "testing" "time" @@ -13,6 +12,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/pointer" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/internal/common/ingest" @@ -339,7 +339,7 @@ var expectedJobRunContainer = model.CreateJobRunContainerInstruction{ func TestSubmit(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(submit) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ JobsToCreate: []*model.CreateJobInstruction{&expectedSubmit}, MessageIds: msg.MessageIds, @@ -351,7 +351,7 @@ func TestSubmit(t *testing.T) { func TestDuplicate(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(testfixtures.SubmitDuplicate) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ MessageIds: msg.MessageIds, } @@ -364,7 +364,7 @@ func TestDuplicate(t *testing.T) { func TestHappyPathSingleUpdate(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(submit, assigned, running, jobRunSucceeded, jobSucceeded) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ JobsToCreate: []*model.CreateJobInstruction{&expectedSubmit}, JobsToUpdate: []*model.UpdateJobInstruction{&expectedLeased, &expectedRunning, &expectedJobSucceeded}, @@ -384,7 +384,7 @@ func TestHappyPathMultiUpdate(t *testing.T) { svc := SimpleInstructionConverter() // Submit msg1 := NewMsg(submit) - instructions := svc.Convert(context.Background(), msg1) + instructions := svc.Convert(armadacontext.Background(), msg1) expected := &model.InstructionSet{ JobsToCreate: []*model.CreateJobInstruction{&expectedSubmit}, MessageIds: msg1.MessageIds, @@ -393,7 +393,7 @@ func TestHappyPathMultiUpdate(t *testing.T) { // Leased msg2 := NewMsg(assigned) - instructions = svc.Convert(context.Background(), msg2) + instructions = svc.Convert(armadacontext.Background(), msg2) expected = &model.InstructionSet{ JobsToUpdate: []*model.UpdateJobInstruction{&expectedLeased}, JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLeasedRun}, @@ -403,7 +403,7 @@ func TestHappyPathMultiUpdate(t *testing.T) { // Running msg3 := NewMsg(running) - instructions = svc.Convert(context.Background(), msg3) + instructions = svc.Convert(armadacontext.Background(), msg3) expected = &model.InstructionSet{ JobsToUpdate: []*model.UpdateJobInstruction{&expectedRunning}, JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedRunningRun}, @@ -413,7 +413,7 @@ func TestHappyPathMultiUpdate(t *testing.T) { // Run Succeeded msg4 := NewMsg(jobRunSucceeded) - instructions = svc.Convert(context.Background(), msg4) + instructions = svc.Convert(armadacontext.Background(), msg4) expected = &model.InstructionSet{ JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedJobRunSucceeded}, MessageIds: msg4.MessageIds, @@ -422,7 +422,7 @@ func TestHappyPathMultiUpdate(t *testing.T) { // Job Succeeded msg5 := NewMsg(jobSucceeded) - instructions = svc.Convert(context.Background(), msg5) + instructions = svc.Convert(armadacontext.Background(), msg5) expected = &model.InstructionSet{ JobsToUpdate: []*model.UpdateJobInstruction{&expectedJobSucceeded}, MessageIds: msg5.MessageIds, @@ -433,7 +433,7 @@ func TestHappyPathMultiUpdate(t *testing.T) { func TestCancelled(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(jobCancelled) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ JobsToUpdate: []*model.UpdateJobInstruction{&expectedJobCancelled}, MessageIds: msg.MessageIds, @@ -444,7 +444,7 @@ func TestCancelled(t *testing.T) { func TestReprioritised(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(jobReprioritised) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ JobsToUpdate: []*model.UpdateJobInstruction{&expectedJobReprioritised}, MessageIds: msg.MessageIds, @@ -455,7 +455,7 @@ func TestReprioritised(t *testing.T) { func TestPreempted(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(jobPreempted) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedJobRunPreempted}, MessageIds: msg.MessageIds, @@ -466,7 +466,7 @@ func TestPreempted(t *testing.T) { func TestFailed(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(jobRunFailed) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedFailed}, JobRunContainersToCreate: []*model.CreateJobRunContainerInstruction{&expectedJobRunContainer}, @@ -478,7 +478,7 @@ func TestFailed(t *testing.T) { func TestFailedWithMissingRunId(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(jobLeaseReturned) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) jobRun := instructions.JobRunsToCreate[0] assert.NotEqual(t, eventutil.LEGACY_RUN_ID, jobRun.RunId) expected := &model.InstructionSet{ @@ -534,7 +534,7 @@ func TestHandlePodTerminated(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(podTerminated) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ MessageIds: msg.MessageIds, } @@ -565,7 +565,7 @@ func TestHandleJobLeaseReturned(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(leaseReturned) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ JobRunsToUpdate: []*model.UpdateJobRunInstruction{{ RunId: runIdString, @@ -616,7 +616,7 @@ func TestHandlePodUnschedulable(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(podUnschedulable) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ JobRunsToUpdate: []*model.UpdateJobRunInstruction{{ RunId: runIdString, @@ -639,7 +639,7 @@ func TestHandleDuplicate(t *testing.T) { svc := SimpleInstructionConverter() msg := NewMsg(duplicate) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ JobsToUpdate: []*model.UpdateJobInstruction{ { @@ -685,7 +685,7 @@ func TestSubmitWithNullChar(t *testing.T) { }) svc := SimpleInstructionConverter() - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) assert.Len(t, instructions.JobsToCreate, 1) assert.NotContains(t, string(instructions.JobsToCreate[0].JobProto), "\\u0000") } @@ -716,7 +716,7 @@ func TestFailedWithNullCharInError(t *testing.T) { }) svc := SimpleInstructionConverter() - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expectedJobRunsToUpdate := []*model.UpdateJobRunInstruction{ { RunId: runIdString, @@ -741,7 +741,7 @@ func TestInvalidEvent(t *testing.T) { // Check that the (valid) Submit is processed, but the invalid message is discarded svc := SimpleInstructionConverter() msg := NewMsg(invalidEvent, submit) - instructions := svc.Convert(context.Background(), msg) + instructions := svc.Convert(armadacontext.Background(), msg) expected := &model.InstructionSet{ JobsToCreate: []*model.CreateJobInstruction{&expectedSubmit}, MessageIds: msg.MessageIds, diff --git a/internal/lookoutingester/lookoutdb/insertion.go b/internal/lookoutingester/lookoutdb/insertion.go index a22b2eab29b..6009f4cadbc 100644 --- a/internal/lookoutingester/lookoutdb/insertion.go +++ b/internal/lookoutingester/lookoutdb/insertion.go @@ -1,7 +1,6 @@ package lookoutdb import ( - "context" "fmt" "sync" "time" @@ -11,6 +10,7 @@ import ( "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/ingest" @@ -45,7 +45,7 @@ func NewLookoutDb( // * Job Run Updates, New Job Containers // In each case we first try to bach insert the rows using the postgres copy protocol. If this fails then we try a // slower, serial insert and discard any rows that cannot be inserted. -func (l *LookoutDb) Store(ctx context.Context, instructions *model.InstructionSet) error { +func (l *LookoutDb) Store(ctx *armadacontext.Context, instructions *model.InstructionSet) error { jobsToUpdate := instructions.JobsToUpdate jobRunsToUpdate := instructions.JobRunsToUpdate @@ -92,7 +92,7 @@ func (l *LookoutDb) Store(ctx context.Context, instructions *model.InstructionSe return nil } -func (l *LookoutDb) CreateJobs(ctx context.Context, instructions []*model.CreateJobInstruction) { +func (l *LookoutDb) CreateJobs(ctx *armadacontext.Context, instructions []*model.CreateJobInstruction) { if len(instructions) == 0 { return } @@ -109,7 +109,7 @@ func (l *LookoutDb) CreateJobs(ctx context.Context, instructions []*model.Create } } -func (l *LookoutDb) UpdateJobs(ctx context.Context, instructions []*model.UpdateJobInstruction) { +func (l *LookoutDb) UpdateJobs(ctx *armadacontext.Context, instructions []*model.UpdateJobInstruction) { if len(instructions) == 0 { return } @@ -127,7 +127,7 @@ func (l *LookoutDb) UpdateJobs(ctx context.Context, instructions []*model.Update } } -func (l *LookoutDb) CreateJobRuns(ctx context.Context, instructions []*model.CreateJobRunInstruction) { +func (l *LookoutDb) CreateJobRuns(ctx *armadacontext.Context, instructions []*model.CreateJobRunInstruction) { if len(instructions) == 0 { return } @@ -144,7 +144,7 @@ func (l *LookoutDb) CreateJobRuns(ctx context.Context, instructions []*model.Cre } } -func (l *LookoutDb) UpdateJobRuns(ctx context.Context, instructions []*model.UpdateJobRunInstruction) { +func (l *LookoutDb) UpdateJobRuns(ctx *armadacontext.Context, instructions []*model.UpdateJobRunInstruction) { if len(instructions) == 0 { return } @@ -161,7 +161,7 @@ func (l *LookoutDb) UpdateJobRuns(ctx context.Context, instructions []*model.Upd } } -func (l *LookoutDb) CreateUserAnnotations(ctx context.Context, instructions []*model.CreateUserAnnotationInstruction) { +func (l *LookoutDb) CreateUserAnnotations(ctx *armadacontext.Context, instructions []*model.CreateUserAnnotationInstruction) { if len(instructions) == 0 { return } @@ -178,7 +178,7 @@ func (l *LookoutDb) CreateUserAnnotations(ctx context.Context, instructions []*m } } -func (l *LookoutDb) CreateJobRunContainers(ctx context.Context, instructions []*model.CreateJobRunContainerInstruction) { +func (l *LookoutDb) CreateJobRunContainers(ctx *armadacontext.Context, instructions []*model.CreateJobRunContainerInstruction) { if len(instructions) == 0 { return } @@ -195,13 +195,13 @@ func (l *LookoutDb) CreateJobRunContainers(ctx context.Context, instructions []* } } -func (l *LookoutDb) CreateJobsBatch(ctx context.Context, instructions []*model.CreateJobInstruction) error { +func (l *LookoutDb) CreateJobsBatch(ctx *armadacontext.Context, instructions []*model.CreateJobInstruction) error { return withDatabaseRetryInsert(func() error { tmpTable := database.UniqueTableName("job") createTmp := func(tx pgx.Tx) error { _, err := tx.Exec(ctx, fmt.Sprintf(` - CREATE TEMPORARY TABLE %s + CREATE TEMPORARY TABLE %s ( job_id varchar(32), queue varchar(512), @@ -258,7 +258,7 @@ func (l *LookoutDb) CreateJobsBatch(ctx context.Context, instructions []*model.C } // CreateJobsScalar will insert jobs one by one into the database -func (l *LookoutDb) CreateJobsScalar(ctx context.Context, instructions []*model.CreateJobInstruction) { +func (l *LookoutDb) CreateJobsScalar(ctx *armadacontext.Context, instructions []*model.CreateJobInstruction) { sqlStatement := `INSERT INTO job (job_id, queue, owner, jobset, priority, submitted, orig_job_spec, state, job_updated) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) ON CONFLICT DO NOTHING` @@ -276,7 +276,7 @@ func (l *LookoutDb) CreateJobsScalar(ctx context.Context, instructions []*model. } } -func (l *LookoutDb) UpdateJobsBatch(ctx context.Context, instructions []*model.UpdateJobInstruction) error { +func (l *LookoutDb) UpdateJobsBatch(ctx *armadacontext.Context, instructions []*model.UpdateJobInstruction) error { return withDatabaseRetryInsert(func() error { tmpTable := database.UniqueTableName("job") @@ -337,7 +337,7 @@ func (l *LookoutDb) UpdateJobsBatch(ctx context.Context, instructions []*model.U }) } -func (l *LookoutDb) UpdateJobsScalar(ctx context.Context, instructions []*model.UpdateJobInstruction) { +func (l *LookoutDb) UpdateJobsScalar(ctx *armadacontext.Context, instructions []*model.UpdateJobInstruction) { sqlStatement := `UPDATE job SET priority = coalesce($1, priority), @@ -360,7 +360,7 @@ func (l *LookoutDb) UpdateJobsScalar(ctx context.Context, instructions []*model. } } -func (l *LookoutDb) CreateJobRunsBatch(ctx context.Context, instructions []*model.CreateJobRunInstruction) error { +func (l *LookoutDb) CreateJobRunsBatch(ctx *armadacontext.Context, instructions []*model.CreateJobRunInstruction) error { return withDatabaseRetryInsert(func() error { tmpTable := database.UniqueTableName("job_run") @@ -410,7 +410,7 @@ func (l *LookoutDb) CreateJobRunsBatch(ctx context.Context, instructions []*mode }) } -func (l *LookoutDb) CreateJobRunsScalar(ctx context.Context, instructions []*model.CreateJobRunInstruction) { +func (l *LookoutDb) CreateJobRunsScalar(ctx *armadacontext.Context, instructions []*model.CreateJobRunInstruction) { sqlStatement := `INSERT INTO job_run (run_id, job_id, created, cluster) VALUES ($1, $2, $3, $4) ON CONFLICT DO NOTHING` @@ -428,7 +428,7 @@ func (l *LookoutDb) CreateJobRunsScalar(ctx context.Context, instructions []*mod } } -func (l *LookoutDb) UpdateJobRunsBatch(ctx context.Context, instructions []*model.UpdateJobRunInstruction) error { +func (l *LookoutDb) UpdateJobRunsBatch(ctx *armadacontext.Context, instructions []*model.UpdateJobRunInstruction) error { return withDatabaseRetryInsert(func() error { tmpTable := database.UniqueTableName("job_run") @@ -499,7 +499,7 @@ func (l *LookoutDb) UpdateJobRunsBatch(ctx context.Context, instructions []*mode }) } -func (l *LookoutDb) UpdateJobRunsScalar(ctx context.Context, instructions []*model.UpdateJobRunInstruction) { +func (l *LookoutDb) UpdateJobRunsScalar(ctx *armadacontext.Context, instructions []*model.UpdateJobRunInstruction) { sqlStatement := `UPDATE job_run SET node = coalesce($1, node), @@ -525,7 +525,7 @@ func (l *LookoutDb) UpdateJobRunsScalar(ctx context.Context, instructions []*mod } } -func (l *LookoutDb) CreateUserAnnotationsBatch(ctx context.Context, instructions []*model.CreateUserAnnotationInstruction) error { +func (l *LookoutDb) CreateUserAnnotationsBatch(ctx *armadacontext.Context, instructions []*model.CreateUserAnnotationInstruction) error { return withDatabaseRetryInsert(func() error { tmpTable := database.UniqueTableName("user_annotation_lookup") @@ -573,9 +573,9 @@ func (l *LookoutDb) CreateUserAnnotationsBatch(ctx context.Context, instructions }) } -func (l *LookoutDb) CreateUserAnnotationsScalar(ctx context.Context, instructions []*model.CreateUserAnnotationInstruction) { +func (l *LookoutDb) CreateUserAnnotationsScalar(ctx *armadacontext.Context, instructions []*model.CreateUserAnnotationInstruction) { sqlStatement := `INSERT INTO user_annotation_lookup (job_id, key, value) - VALUES ($1, $2, $3) + VALUES ($1, $2, $3) ON CONFLICT DO NOTHING` for _, i := range instructions { err := withDatabaseRetryInsert(func() error { @@ -592,7 +592,7 @@ func (l *LookoutDb) CreateUserAnnotationsScalar(ctx context.Context, instruction } } -func (l *LookoutDb) CreateJobRunContainersBatch(ctx context.Context, instructions []*model.CreateJobRunContainerInstruction) error { +func (l *LookoutDb) CreateJobRunContainersBatch(ctx *armadacontext.Context, instructions []*model.CreateJobRunContainerInstruction) error { return withDatabaseRetryInsert(func() error { tmpTable := database.UniqueTableName("job_run_container") createTmp := func(tx pgx.Tx) error { @@ -641,7 +641,7 @@ func (l *LookoutDb) CreateJobRunContainersBatch(ctx context.Context, instruction }) } -func (l *LookoutDb) CreateJobRunContainersScalar(ctx context.Context, instructions []*model.CreateJobRunContainerInstruction) { +func (l *LookoutDb) CreateJobRunContainersScalar(ctx *armadacontext.Context, instructions []*model.CreateJobRunContainerInstruction) { sqlStatement := `INSERT INTO job_run_container (run_id, container_name, exit_code) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING` @@ -659,7 +659,7 @@ func (l *LookoutDb) CreateJobRunContainersScalar(ctx context.Context, instructio } } -func batchInsert(ctx context.Context, db *pgxpool.Pool, createTmp func(pgx.Tx) error, +func batchInsert(ctx *armadacontext.Context, db *pgxpool.Pool, createTmp func(pgx.Tx) error, insertTmp func(pgx.Tx) error, copyToDest func(pgx.Tx) error, ) error { return pgx.BeginTxFunc(ctx, db, pgx.TxOptions{ @@ -776,7 +776,7 @@ func conflateJobRunUpdates(updates []*model.UpdateJobRunInstruction) []*model.Up // in the terminal state. If, however, the database returns a non-retryable error it will give up and simply not // filter out any events as the job state is undetermined. func filterEventsForTerminalJobs( - ctx context.Context, + ctx *armadacontext.Context, db *pgxpool.Pool, instructions []*model.UpdateJobInstruction, m *metrics.Metrics, diff --git a/internal/lookoutingester/lookoutdb/insertion_test.go b/internal/lookoutingester/lookoutdb/insertion_test.go index 25a3ff1af03..079912a68c4 100644 --- a/internal/lookoutingester/lookoutdb/insertion_test.go +++ b/internal/lookoutingester/lookoutdb/insertion_test.go @@ -1,21 +1,19 @@ package lookoutdb import ( - "context" "fmt" "sort" "testing" "time" - "github.com/armadaproject/armada/internal/common/database/lookout" - "github.com/apache/pulsar-client-go/pulsar" "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/utils/pointer" + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/common/pulsarutils" "github.com/armadaproject/armada/internal/lookout/configuration" "github.com/armadaproject/armada/internal/lookout/repository" @@ -216,24 +214,24 @@ func TestCreateJobsBatch(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) // Insert - err := ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) job := getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) // Insert again and test that it's idempotent - err = ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err = ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) // If a row is bad then we should return an error and no updates should happen - _, err = db.Exec(context.Background(), "DELETE FROM job") + _, err = db.Exec(armadacontext.Background(), "DELETE FROM job") require.NoError(t, err) invalidJob := &model.CreateJobInstruction{ JobId: invalidId, } - err = ldb.CreateJobsBatch(context.Background(), append(defaultInstructionSet().JobsToCreate, invalidJob)) + err = ldb.CreateJobsBatch(armadacontext.Background(), append(defaultInstructionSet().JobsToCreate, invalidJob)) assert.Error(t, err) assertNoRows(t, db, "job") return nil @@ -245,29 +243,29 @@ func TestUpdateJobsBatch(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) // Insert - err := ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) // Update - err = ldb.UpdateJobsBatch(context.Background(), defaultInstructionSet().JobsToUpdate) + err = ldb.UpdateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToUpdate) require.NoError(t, err) job := getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterUpdate, job) - err = ldb.UpdateJobsBatch(context.Background(), defaultInstructionSet().JobsToUpdate) + err = ldb.UpdateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToUpdate) require.NoError(t, err) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterUpdate, job) // If an update is bad then we should return an error and no updates should happen - _, err = db.Exec(context.Background(), "DELETE FROM job") + _, err = db.Exec(armadacontext.Background(), "DELETE FROM job") require.NoError(t, err) - err = ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err = ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) invalidUpdate := &model.UpdateJobInstruction{ JobId: invalidId, } - err = ldb.UpdateJobsBatch(context.Background(), append(defaultInstructionSet().JobsToUpdate, invalidUpdate)) + err = ldb.UpdateJobsBatch(armadacontext.Background(), append(defaultInstructionSet().JobsToUpdate, invalidUpdate)) assert.Error(t, err) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) @@ -280,28 +278,28 @@ func TestUpdateJobsScalar(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) // Insert - err := ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) // Update - ldb.UpdateJobsScalar(context.Background(), defaultInstructionSet().JobsToUpdate) + ldb.UpdateJobsScalar(armadacontext.Background(), defaultInstructionSet().JobsToUpdate) job := getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterUpdate, job) // Insert again and test that it's idempotent - ldb.UpdateJobsScalar(context.Background(), defaultInstructionSet().JobsToUpdate) + ldb.UpdateJobsScalar(armadacontext.Background(), defaultInstructionSet().JobsToUpdate) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterUpdate, job) // If a update is bad then we should return an error and no updates should happen - _, err = db.Exec(context.Background(), "DELETE FROM job") + _, err = db.Exec(armadacontext.Background(), "DELETE FROM job") require.NoError(t, err) - err = ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err = ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) invalidUpdate := &model.UpdateJobInstruction{ JobId: invalidId, } - ldb.UpdateJobsScalar(context.Background(), append(defaultInstructionSet().JobsToUpdate, invalidUpdate)) + ldb.UpdateJobsScalar(armadacontext.Background(), append(defaultInstructionSet().JobsToUpdate, invalidUpdate)) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterUpdate, job) return nil @@ -376,13 +374,13 @@ func TestUpdateJobsWithTerminal(t *testing.T) { ldb := getTestLookoutDb(db) // Insert - ldb.CreateJobs(context.Background(), initial) + ldb.CreateJobs(armadacontext.Background(), initial) // Mark the jobs terminal - ldb.UpdateJobs(context.Background(), update1) + ldb.UpdateJobs(armadacontext.Background(), update1) // Update the jobs - these should be discarded - ldb.UpdateJobs(context.Background(), update2) + ldb.UpdateJobs(armadacontext.Background(), update2) // Assert the states are still terminal job := getJob(t, db, jobIdString) @@ -403,22 +401,22 @@ func TestCreateJobsScalar(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) // Simple create - ldb.CreateJobsScalar(context.Background(), defaultInstructionSet().JobsToCreate) + ldb.CreateJobsScalar(armadacontext.Background(), defaultInstructionSet().JobsToCreate) job := getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) // Insert again and check for idempotency - ldb.CreateJobsScalar(context.Background(), defaultInstructionSet().JobsToCreate) + ldb.CreateJobsScalar(armadacontext.Background(), defaultInstructionSet().JobsToCreate) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) // If a row is bad then we should update only the good rows - _, err := ldb.db.Exec(context.Background(), "DELETE FROM job") + _, err := ldb.db.Exec(armadacontext.Background(), "DELETE FROM job") require.NoError(t, err) invalidJob := &model.CreateJobInstruction{ JobId: invalidId, } - ldb.CreateJobsScalar(context.Background(), append(defaultInstructionSet().JobsToCreate, invalidJob)) + ldb.CreateJobsScalar(armadacontext.Background(), append(defaultInstructionSet().JobsToCreate, invalidJob)) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) return nil @@ -430,28 +428,28 @@ func TestCreateJobRunsBatch(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) // Need to make sure we have a job, so we can satisfy PK - err := ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) // Insert - err = ldb.CreateJobRunsBatch(context.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) require.NoError(t, err) job := getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, job) // Insert again and test that it's idempotent - err = ldb.CreateJobRunsBatch(context.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) require.NoError(t, err) job = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, job) // If a row is bad then we should return an error and no updates should happen - _, err = ldb.db.Exec(context.Background(), "DELETE FROM job_run") + _, err = ldb.db.Exec(armadacontext.Background(), "DELETE FROM job_run") require.NoError(t, err) invalidRun := &model.CreateJobRunInstruction{ RunId: invalidId, } - err = ldb.CreateJobRunsBatch(context.Background(), append(defaultInstructionSet().JobRunsToCreate, invalidRun)) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), append(defaultInstructionSet().JobRunsToCreate, invalidRun)) assert.Error(t, err) assertNoRows(t, db, "job_run") return nil @@ -463,26 +461,26 @@ func TestCreateJobRunsScalar(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) // Need to make sure we have a job, so we can satisfy PK - err := ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) // Insert - ldb.CreateJobRunsScalar(context.Background(), defaultInstructionSet().JobRunsToCreate) + ldb.CreateJobRunsScalar(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) job := getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, job) // Insert again and test that it's idempotent - ldb.CreateJobRunsScalar(context.Background(), defaultInstructionSet().JobRunsToCreate) + ldb.CreateJobRunsScalar(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) job = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, job) // If a row is bad then we create rows that can be created - _, err = db.Exec(context.Background(), "DELETE FROM job_run") + _, err = db.Exec(armadacontext.Background(), "DELETE FROM job_run") require.NoError(t, err) invalidRun := &model.CreateJobRunInstruction{ RunId: invalidId, } - ldb.CreateJobRunsScalar(context.Background(), append(defaultInstructionSet().JobRunsToCreate, invalidRun)) + ldb.CreateJobRunsScalar(armadacontext.Background(), append(defaultInstructionSet().JobRunsToCreate, invalidRun)) job = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, job) return nil @@ -494,33 +492,33 @@ func TestUpdateJobRunsBatch(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) // Need to make sure we have a job and run - err := ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) - err = ldb.CreateJobRunsBatch(context.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) require.NoError(t, err) // Update - err = ldb.UpdateJobRunsBatch(context.Background(), defaultInstructionSet().JobRunsToUpdate) + err = ldb.UpdateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToUpdate) require.NoError(t, err) run := getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRunAfterUpdate, run) // Update again and test that it's idempotent - err = ldb.UpdateJobRunsBatch(context.Background(), defaultInstructionSet().JobRunsToUpdate) + err = ldb.UpdateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToUpdate) require.NoError(t, err) run = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRunAfterUpdate, run) // If a row is bad then we should return an error and no updates should happen - _, err = db.Exec(context.Background(), "DELETE FROM job_run;") + _, err = db.Exec(armadacontext.Background(), "DELETE FROM job_run;") require.NoError(t, err) invalidRun := &model.UpdateJobRunInstruction{ RunId: invalidId, } - err = ldb.CreateJobRunsBatch(context.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) require.NoError(t, err) - err = ldb.UpdateJobRunsBatch(context.Background(), append(defaultInstructionSet().JobRunsToUpdate, invalidRun)) + err = ldb.UpdateJobRunsBatch(armadacontext.Background(), append(defaultInstructionSet().JobRunsToUpdate, invalidRun)) assert.Error(t, err) run = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, run) @@ -533,33 +531,33 @@ func TestUpdateJobRunsScalar(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) // Need to make sure we have a job and run - err := ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) - err = ldb.CreateJobRunsBatch(context.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) require.NoError(t, err) // Update - ldb.UpdateJobRunsScalar(context.Background(), defaultInstructionSet().JobRunsToUpdate) + ldb.UpdateJobRunsScalar(armadacontext.Background(), defaultInstructionSet().JobRunsToUpdate) require.NoError(t, err) run := getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRunAfterUpdate, run) // Update again and test that it's idempotent - ldb.UpdateJobRunsScalar(context.Background(), defaultInstructionSet().JobRunsToUpdate) + ldb.UpdateJobRunsScalar(armadacontext.Background(), defaultInstructionSet().JobRunsToUpdate) require.NoError(t, err) run = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRunAfterUpdate, run) // If a row is bad then we should update the rows we can - _, err = ldb.db.Exec(context.Background(), "DELETE FROM job_run;") + _, err = ldb.db.Exec(armadacontext.Background(), "DELETE FROM job_run;") require.NoError(t, err) invalidRun := &model.UpdateJobRunInstruction{ RunId: invalidId, } - err = ldb.CreateJobRunsBatch(context.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) assert.Nil(t, err) - ldb.UpdateJobRunsScalar(context.Background(), append(defaultInstructionSet().JobRunsToUpdate, invalidRun)) + ldb.UpdateJobRunsScalar(armadacontext.Background(), append(defaultInstructionSet().JobRunsToUpdate, invalidRun)) run = getJobRun(t, ldb.db, runIdString) assert.Equal(t, expectedJobRunAfterUpdate, run) return nil @@ -571,28 +569,28 @@ func TestCreateUserAnnotationsBatch(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) // Need to make sure we have a job - err := ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) // Insert - err = ldb.CreateUserAnnotationsBatch(context.Background(), defaultInstructionSet().UserAnnotationsToCreate) + err = ldb.CreateUserAnnotationsBatch(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) require.NoError(t, err) annotation := getUserAnnotationLookup(t, db, jobIdString) assert.Equal(t, expectedUserAnnotation, annotation) // Insert again and test that it's idempotent - err = ldb.CreateUserAnnotationsBatch(context.Background(), defaultInstructionSet().UserAnnotationsToCreate) + err = ldb.CreateUserAnnotationsBatch(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) require.NoError(t, err) annotation = getUserAnnotationLookup(t, db, jobIdString) assert.Equal(t, expectedUserAnnotation, annotation) // If a row is bad then we should return an error and no updates should happen - _, err = ldb.db.Exec(context.Background(), "DELETE FROM user_annotation_lookup") + _, err = ldb.db.Exec(armadacontext.Background(), "DELETE FROM user_annotation_lookup") require.NoError(t, err) invalidAnnotation := &model.CreateUserAnnotationInstruction{ JobId: invalidId, } - err = ldb.CreateUserAnnotationsBatch(context.Background(), append(defaultInstructionSet().UserAnnotationsToCreate, invalidAnnotation)) + err = ldb.CreateUserAnnotationsBatch(armadacontext.Background(), append(defaultInstructionSet().UserAnnotationsToCreate, invalidAnnotation)) assert.Error(t, err) assertNoRows(t, ldb.db, "user_annotation_lookup") return nil @@ -603,7 +601,7 @@ func TestCreateUserAnnotationsBatch(t *testing.T) { func TestEmptyUpdate(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) - storeErr := ldb.Store(context.Background(), &model.InstructionSet{}) + storeErr := ldb.Store(armadacontext.Background(), &model.InstructionSet{}) require.NoError(t, storeErr) assertNoRows(t, ldb.db, "job") assertNoRows(t, ldb.db, "job_run") @@ -618,26 +616,26 @@ func TestCreateUserAnnotationsScalar(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) // Need to make sure we have a job - err := ldb.CreateJobsBatch(context.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) require.NoError(t, err) // Insert - ldb.CreateUserAnnotationsScalar(context.Background(), defaultInstructionSet().UserAnnotationsToCreate) + ldb.CreateUserAnnotationsScalar(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) annotation := getUserAnnotationLookup(t, db, jobIdString) assert.Equal(t, expectedUserAnnotation, annotation) // Insert again and test that it's idempotent - ldb.CreateUserAnnotationsScalar(context.Background(), defaultInstructionSet().UserAnnotationsToCreate) + ldb.CreateUserAnnotationsScalar(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) annotation = getUserAnnotationLookup(t, db, jobIdString) assert.Equal(t, expectedUserAnnotation, annotation) // If a row is bad then we should update the rows we can - _, err = ldb.db.Exec(context.Background(), "DELETE FROM user_annotation_lookup") + _, err = ldb.db.Exec(armadacontext.Background(), "DELETE FROM user_annotation_lookup") require.NoError(t, err) invalidAnnotation := &model.CreateUserAnnotationInstruction{ JobId: invalidId, } - ldb.CreateUserAnnotationsScalar(context.Background(), append(defaultInstructionSet().UserAnnotationsToCreate, invalidAnnotation)) + ldb.CreateUserAnnotationsScalar(armadacontext.Background(), append(defaultInstructionSet().UserAnnotationsToCreate, invalidAnnotation)) annotation = getUserAnnotationLookup(t, ldb.db, jobIdString) assert.Equal(t, expectedUserAnnotation, annotation) return nil @@ -649,7 +647,7 @@ func TestUpdate(t *testing.T) { err := testutil.WithDatabasePgx(func(db *pgxpool.Pool) error { ldb := getTestLookoutDb(db) // Do the update - storeErr := ldb.Store(context.Background(), defaultInstructionSet()) + storeErr := ldb.Store(armadacontext.Background(), defaultInstructionSet()) require.NoError(t, storeErr) job := getJob(t, ldb.db, jobIdString) jobRun := getJobRun(t, ldb.db, runIdString) @@ -748,7 +746,7 @@ func TestConflateJobRunUpdates(T *testing.T) { func getJob(t *testing.T, db *pgxpool.Pool, jobId string) JobRow { job := JobRow{} r := db.QueryRow( - context.Background(), + armadacontext.Background(), `SELECT job_id, queue, owner, jobset, priority, submitted, state, duplicate, job_updated, orig_job_spec, cancelled FROM job WHERE job_id = $1`, jobId) err := r.Scan( @@ -771,7 +769,7 @@ func getJob(t *testing.T, db *pgxpool.Pool, jobId string) JobRow { func getJobRun(t *testing.T, db *pgxpool.Pool, runId string) JobRunRow { run := JobRunRow{} r := db.QueryRow( - context.Background(), + armadacontext.Background(), `SELECT run_id, job_id, cluster, node, created, started, finished, succeeded, error, pod_number, unable_to_schedule, preempted FROM job_run WHERE run_id = $1`, runId) err := r.Scan( @@ -795,7 +793,7 @@ func getJobRun(t *testing.T, db *pgxpool.Pool, runId string) JobRunRow { func getJobRunContainer(t *testing.T, db *pgxpool.Pool, runId string) JobRunContainerRow { container := JobRunContainerRow{} r := db.QueryRow( - context.Background(), + armadacontext.Background(), `SELECT run_id, container_name, exit_code FROM job_run_container WHERE run_id = $1`, runId) err := r.Scan(&container.RunId, &container.ContainerName, &container.ExitCode) @@ -806,7 +804,7 @@ func getJobRunContainer(t *testing.T, db *pgxpool.Pool, runId string) JobRunCont func getUserAnnotationLookup(t *testing.T, db *pgxpool.Pool, jobId string) UserAnnotationRow { annotation := UserAnnotationRow{} r := db.QueryRow( - context.Background(), + armadacontext.Background(), `SELECT job_id, key, value FROM user_annotation_lookup WHERE job_id = $1`, jobId) err := r.Scan(&annotation.JobId, &annotation.Key, &annotation.Value) @@ -816,7 +814,7 @@ func getUserAnnotationLookup(t *testing.T, db *pgxpool.Pool, jobId string) UserA func assertNoRows(t *testing.T, db *pgxpool.Pool, table string) { var count int - r := db.QueryRow(context.Background(), fmt.Sprintf("SELECT COUNT(*) FROM %s", table)) + r := db.QueryRow(armadacontext.Background(), fmt.Sprintf("SELECT COUNT(*) FROM %s", table)) err := r.Scan(&count) require.NoError(t, err) assert.Equal(t, 0, count) diff --git a/internal/lookoutingesterv2/benchmark/benchmark.go b/internal/lookoutingesterv2/benchmark/benchmark.go index 6c808ca14f3..953ccb85483 100644 --- a/internal/lookoutingesterv2/benchmark/benchmark.go +++ b/internal/lookoutingesterv2/benchmark/benchmark.go @@ -1,7 +1,6 @@ package benchmark import ( - "context" "fmt" "math" "math/rand" @@ -12,6 +11,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "k8s.io/utils/pointer" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/lookoutingesterv2/configuration" @@ -51,7 +51,7 @@ func benchmarkSubmissions1000(b *testing.B, config configuration.LookoutIngester withDbBenchmark(b, config, func(b *testing.B, db *pgxpool.Pool) { ldb := lookoutdb.NewLookoutDb(db, metrics.Get(), 2, 10) b.StartTimer() - err := ldb.Store(context.TODO(), instructions) + err := ldb.Store(armadacontext.TODO(), instructions) if err != nil { panic(err) } @@ -69,7 +69,7 @@ func benchmarkSubmissions10000(b *testing.B, config configuration.LookoutIngeste withDbBenchmark(b, config, func(b *testing.B, db *pgxpool.Pool) { ldb := lookoutdb.NewLookoutDb(db, metrics.Get(), 2, 10) b.StartTimer() - err := ldb.Store(context.TODO(), instructions) + err := ldb.Store(armadacontext.TODO(), instructions) if err != nil { panic(err) } @@ -99,12 +99,12 @@ func benchmarkUpdates1000(b *testing.B, config configuration.LookoutIngesterV2Co withDbBenchmark(b, config, func(b *testing.B, db *pgxpool.Pool) { ldb := lookoutdb.NewLookoutDb(db, metrics.Get(), 2, 10) - err := ldb.Store(context.TODO(), initialInstructions) + err := ldb.Store(armadacontext.TODO(), initialInstructions) if err != nil { panic(err) } b.StartTimer() - err = ldb.Store(context.TODO(), instructions) + err = ldb.Store(armadacontext.TODO(), instructions) if err != nil { panic(err) } @@ -134,12 +134,12 @@ func benchmarkUpdates10000(b *testing.B, config configuration.LookoutIngesterV2C withDbBenchmark(b, config, func(b *testing.B, db *pgxpool.Pool) { ldb := lookoutdb.NewLookoutDb(db, metrics.Get(), 2, 10) - err := ldb.Store(context.TODO(), initialInstructions) + err := ldb.Store(armadacontext.TODO(), initialInstructions) if err != nil { panic(err) } b.StartTimer() - err = ldb.Store(context.TODO(), instructions) + err = ldb.Store(armadacontext.TODO(), instructions) if err != nil { panic(err) } diff --git a/internal/lookoutingesterv2/instructions/instructions.go b/internal/lookoutingesterv2/instructions/instructions.go index 49e519f5eb4..25decf00f2a 100644 --- a/internal/lookoutingesterv2/instructions/instructions.go +++ b/internal/lookoutingesterv2/instructions/instructions.go @@ -1,7 +1,6 @@ package instructions import ( - "context" "fmt" "sort" "strings" @@ -14,6 +13,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/utils/pointer" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/common/eventutil" @@ -65,7 +65,7 @@ func (c *InstructionConverter) IsLegacy() bool { return c.useLegacyEventConversion } -func (c *InstructionConverter) Convert(ctx context.Context, sequencesWithIds *ingest.EventSequencesWithIds) *model.InstructionSet { +func (c *InstructionConverter) Convert(ctx *armadacontext.Context, sequencesWithIds *ingest.EventSequencesWithIds) *model.InstructionSet { updateInstructions := &model.InstructionSet{ MessageIds: sequencesWithIds.MessageIds, } @@ -77,7 +77,7 @@ func (c *InstructionConverter) Convert(ctx context.Context, sequencesWithIds *in } func (c *InstructionConverter) convertSequence( - ctx context.Context, + ctx *armadacontext.Context, sequence *armadaevents.EventSequence, update *model.InstructionSet, ) { diff --git a/internal/lookoutingesterv2/instructions/instructions_test.go b/internal/lookoutingesterv2/instructions/instructions_test.go index d70d7d3900d..36e58983283 100644 --- a/internal/lookoutingesterv2/instructions/instructions_test.go +++ b/internal/lookoutingesterv2/instructions/instructions_test.go @@ -1,7 +1,6 @@ package instructions import ( - "context" "fmt" "strings" "testing" @@ -14,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/pointer" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/common/eventutil" @@ -560,7 +560,7 @@ func TestConvert(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { converter := NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}, tc.useLegacyEventConversion) - instructionSet := converter.Convert(context.TODO(), tc.events) + instructionSet := converter.Convert(armadacontext.TODO(), tc.events) assert.Equal(t, tc.expected.JobsToCreate, instructionSet.JobsToCreate) assert.Equal(t, tc.expected.JobsToUpdate, instructionSet.JobsToUpdate) assert.Equal(t, tc.expected.JobRunsToCreate, instructionSet.JobRunsToCreate) @@ -571,7 +571,7 @@ func TestConvert(t *testing.T) { func TestFailedWithMissingRunId(t *testing.T) { converter := NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}, true) - instructions := converter.Convert(context.Background(), &ingest.EventSequencesWithIds{ + instructions := converter.Convert(armadacontext.Background(), &ingest.EventSequencesWithIds{ EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence(testfixtures.JobLeaseReturned)}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }) @@ -631,7 +631,7 @@ func TestTruncatesStringsThatAreTooLong(t *testing.T) { } converter := NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}, true) - actual := converter.Convert(context.TODO(), events) + actual := converter.Convert(armadacontext.TODO(), events) // String lengths obtained from database schema assert.Len(t, actual.JobsToCreate[0].Queue, 512) diff --git a/internal/lookoutingesterv2/lookoutdb/insertion.go b/internal/lookoutingesterv2/lookoutdb/insertion.go index c5378543df0..2e13c453213 100644 --- a/internal/lookoutingesterv2/lookoutdb/insertion.go +++ b/internal/lookoutingesterv2/lookoutdb/insertion.go @@ -1,7 +1,6 @@ package lookoutdb import ( - "context" "fmt" "sync" "time" @@ -11,6 +10,7 @@ import ( "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/database/lookout" @@ -36,7 +36,7 @@ func NewLookoutDb(db *pgxpool.Pool, metrics *metrics.Metrics, maxAttempts int, m // * Job Run Updates // In each case we first try to bach insert the rows using the postgres copy protocol. If this fails then we try a // slower, serial insert and discard any rows that cannot be inserted. -func (l *LookoutDb) Store(ctx context.Context, instructions *model.InstructionSet) error { +func (l *LookoutDb) Store(ctx *armadacontext.Context, instructions *model.InstructionSet) error { // We might have multiple updates for the same job or job run // These can be conflated to help performance jobsToUpdate := conflateJobUpdates(instructions.JobsToUpdate) @@ -68,7 +68,7 @@ func (l *LookoutDb) Store(ctx context.Context, instructions *model.InstructionSe return nil } -func (l *LookoutDb) CreateJobs(ctx context.Context, instructions []*model.CreateJobInstruction) { +func (l *LookoutDb) CreateJobs(ctx *armadacontext.Context, instructions []*model.CreateJobInstruction) { if len(instructions) == 0 { return } @@ -79,7 +79,7 @@ func (l *LookoutDb) CreateJobs(ctx context.Context, instructions []*model.Create } } -func (l *LookoutDb) UpdateJobs(ctx context.Context, instructions []*model.UpdateJobInstruction) { +func (l *LookoutDb) UpdateJobs(ctx *armadacontext.Context, instructions []*model.UpdateJobInstruction) { if len(instructions) == 0 { return } @@ -91,7 +91,7 @@ func (l *LookoutDb) UpdateJobs(ctx context.Context, instructions []*model.Update } } -func (l *LookoutDb) CreateJobRuns(ctx context.Context, instructions []*model.CreateJobRunInstruction) { +func (l *LookoutDb) CreateJobRuns(ctx *armadacontext.Context, instructions []*model.CreateJobRunInstruction) { if len(instructions) == 0 { return } @@ -102,7 +102,7 @@ func (l *LookoutDb) CreateJobRuns(ctx context.Context, instructions []*model.Cre } } -func (l *LookoutDb) UpdateJobRuns(ctx context.Context, instructions []*model.UpdateJobRunInstruction) { +func (l *LookoutDb) UpdateJobRuns(ctx *armadacontext.Context, instructions []*model.UpdateJobRunInstruction) { if len(instructions) == 0 { return } @@ -113,7 +113,7 @@ func (l *LookoutDb) UpdateJobRuns(ctx context.Context, instructions []*model.Upd } } -func (l *LookoutDb) CreateUserAnnotations(ctx context.Context, instructions []*model.CreateUserAnnotationInstruction) { +func (l *LookoutDb) CreateUserAnnotations(ctx *armadacontext.Context, instructions []*model.CreateUserAnnotationInstruction) { if len(instructions) == 0 { return } @@ -124,7 +124,7 @@ func (l *LookoutDb) CreateUserAnnotations(ctx context.Context, instructions []*m } } -func (l *LookoutDb) CreateJobsBatch(ctx context.Context, instructions []*model.CreateJobInstruction) error { +func (l *LookoutDb) CreateJobsBatch(ctx *armadacontext.Context, instructions []*model.CreateJobInstruction) error { return l.withDatabaseRetryInsert(func() error { tmpTable := database.UniqueTableName("job") @@ -231,7 +231,7 @@ func (l *LookoutDb) CreateJobsBatch(ctx context.Context, instructions []*model.C } // CreateJobsScalar will insert jobs one by one into the database -func (l *LookoutDb) CreateJobsScalar(ctx context.Context, instructions []*model.CreateJobInstruction) { +func (l *LookoutDb) CreateJobsScalar(ctx *armadacontext.Context, instructions []*model.CreateJobInstruction) { sqlStatement := `INSERT INTO job ( job_id, queue, @@ -279,7 +279,7 @@ func (l *LookoutDb) CreateJobsScalar(ctx context.Context, instructions []*model. } } -func (l *LookoutDb) UpdateJobsBatch(ctx context.Context, instructions []*model.UpdateJobInstruction) error { +func (l *LookoutDb) UpdateJobsBatch(ctx *armadacontext.Context, instructions []*model.UpdateJobInstruction) error { return l.withDatabaseRetryInsert(func() error { tmpTable := database.UniqueTableName("job") @@ -358,7 +358,7 @@ func (l *LookoutDb) UpdateJobsBatch(ctx context.Context, instructions []*model.U }) } -func (l *LookoutDb) UpdateJobsScalar(ctx context.Context, instructions []*model.UpdateJobInstruction) { +func (l *LookoutDb) UpdateJobsScalar(ctx *armadacontext.Context, instructions []*model.UpdateJobInstruction) { sqlStatement := `UPDATE job SET priority = coalesce($2, priority), @@ -393,7 +393,7 @@ func (l *LookoutDb) UpdateJobsScalar(ctx context.Context, instructions []*model. } } -func (l *LookoutDb) CreateJobRunsBatch(ctx context.Context, instructions []*model.CreateJobRunInstruction) error { +func (l *LookoutDb) CreateJobRunsBatch(ctx *armadacontext.Context, instructions []*model.CreateJobRunInstruction) error { return l.withDatabaseRetryInsert(func() error { tmpTable := database.UniqueTableName("job_run") @@ -464,7 +464,7 @@ func (l *LookoutDb) CreateJobRunsBatch(ctx context.Context, instructions []*mode }) } -func (l *LookoutDb) CreateJobRunsScalar(ctx context.Context, instructions []*model.CreateJobRunInstruction) { +func (l *LookoutDb) CreateJobRunsScalar(ctx *armadacontext.Context, instructions []*model.CreateJobRunInstruction) { sqlStatement := `INSERT INTO job_run ( run_id, job_id, @@ -496,7 +496,7 @@ func (l *LookoutDb) CreateJobRunsScalar(ctx context.Context, instructions []*mod } } -func (l *LookoutDb) UpdateJobRunsBatch(ctx context.Context, instructions []*model.UpdateJobRunInstruction) error { +func (l *LookoutDb) UpdateJobRunsBatch(ctx *armadacontext.Context, instructions []*model.UpdateJobRunInstruction) error { return l.withDatabaseRetryInsert(func() error { tmpTable := database.UniqueTableName("job_run") @@ -571,7 +571,7 @@ func (l *LookoutDb) UpdateJobRunsBatch(ctx context.Context, instructions []*mode }) } -func (l *LookoutDb) UpdateJobRunsScalar(ctx context.Context, instructions []*model.UpdateJobRunInstruction) { +func (l *LookoutDb) UpdateJobRunsScalar(ctx *armadacontext.Context, instructions []*model.UpdateJobRunInstruction) { sqlStatement := `UPDATE job_run SET node = coalesce($2, node), @@ -604,7 +604,7 @@ func (l *LookoutDb) UpdateJobRunsScalar(ctx context.Context, instructions []*mod } } -func (l *LookoutDb) CreateUserAnnotationsBatch(ctx context.Context, instructions []*model.CreateUserAnnotationInstruction) error { +func (l *LookoutDb) CreateUserAnnotationsBatch(ctx *armadacontext.Context, instructions []*model.CreateUserAnnotationInstruction) error { return l.withDatabaseRetryInsert(func() error { tmpTable := database.UniqueTableName("user_annotation_lookup") @@ -667,7 +667,7 @@ func (l *LookoutDb) CreateUserAnnotationsBatch(ctx context.Context, instructions }) } -func (l *LookoutDb) CreateUserAnnotationsScalar(ctx context.Context, instructions []*model.CreateUserAnnotationInstruction) { +func (l *LookoutDb) CreateUserAnnotationsScalar(ctx *armadacontext.Context, instructions []*model.CreateUserAnnotationInstruction) { sqlStatement := `INSERT INTO user_annotation_lookup ( job_id, key, @@ -696,7 +696,7 @@ func (l *LookoutDb) CreateUserAnnotationsScalar(ctx context.Context, instruction } } -func batchInsert(ctx context.Context, db *pgxpool.Pool, createTmp func(pgx.Tx) error, +func batchInsert(ctx *armadacontext.Context, db *pgxpool.Pool, createTmp func(pgx.Tx) error, insertTmp func(pgx.Tx) error, copyToDest func(pgx.Tx) error, ) error { return pgx.BeginTxFunc(ctx, db, pgx.TxOptions{ @@ -834,7 +834,7 @@ type updateInstructionsForJob struct { // in the terminal state. If, however, the database returns a non-retryable error it will give up and simply not // filter out any events as the job state is undetermined. func (l *LookoutDb) filterEventsForTerminalJobs( - ctx context.Context, + ctx *armadacontext.Context, db *pgxpool.Pool, instructions []*model.UpdateJobInstruction, m *metrics.Metrics, diff --git a/internal/lookoutingesterv2/lookoutdb/insertion_test.go b/internal/lookoutingesterv2/lookoutdb/insertion_test.go index 9de584df3fa..13b64c12365 100644 --- a/internal/lookoutingesterv2/lookoutdb/insertion_test.go +++ b/internal/lookoutingesterv2/lookoutdb/insertion_test.go @@ -1,7 +1,6 @@ package lookoutdb import ( - ctx "context" "fmt" "sort" "testing" @@ -12,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/utils/pointer" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/common/pulsarutils" "github.com/armadaproject/armada/internal/lookoutingesterv2/metrics" @@ -202,24 +202,24 @@ func TestCreateJobsBatch(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) // Insert - err := ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) job := getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) // Insert again and test that it's idempotent - err = ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err = ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) // If a row is bad then we should return an error and no updates should happen - _, err = db.Exec(ctx.Background(), "DELETE FROM job") + _, err = db.Exec(armadacontext.Background(), "DELETE FROM job") assert.NoError(t, err) invalidJob := &model.CreateJobInstruction{ JobId: invalidId, } - err = ldb.CreateJobsBatch(ctx.Background(), append(defaultInstructionSet().JobsToCreate, invalidJob)) + err = ldb.CreateJobsBatch(armadacontext.Background(), append(defaultInstructionSet().JobsToCreate, invalidJob)) assert.Error(t, err) assertNoRows(t, db, "job") return nil @@ -231,29 +231,29 @@ func TestUpdateJobsBatch(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) // Insert - err := ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) // Update - err = ldb.UpdateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToUpdate) + err = ldb.UpdateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToUpdate) assert.Nil(t, err) job := getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterUpdate, job) - err = ldb.UpdateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToUpdate) + err = ldb.UpdateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToUpdate) assert.Nil(t, err) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterUpdate, job) // If an update is bad then we should return an error and no updates should happen - _, err = db.Exec(ctx.Background(), "DELETE FROM job") + _, err = db.Exec(armadacontext.Background(), "DELETE FROM job") assert.NoError(t, err) - err = ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err = ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) invalidUpdate := &model.UpdateJobInstruction{ JobId: invalidId, } - err = ldb.UpdateJobsBatch(ctx.Background(), append(defaultInstructionSet().JobsToUpdate, invalidUpdate)) + err = ldb.UpdateJobsBatch(armadacontext.Background(), append(defaultInstructionSet().JobsToUpdate, invalidUpdate)) assert.Error(t, err) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) @@ -266,28 +266,28 @@ func TestUpdateJobsScalar(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) // Insert - err := ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) // Update - ldb.UpdateJobsScalar(ctx.Background(), defaultInstructionSet().JobsToUpdate) + ldb.UpdateJobsScalar(armadacontext.Background(), defaultInstructionSet().JobsToUpdate) job := getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterUpdate, job) // Insert again and test that it's idempotent - ldb.UpdateJobsScalar(ctx.Background(), defaultInstructionSet().JobsToUpdate) + ldb.UpdateJobsScalar(armadacontext.Background(), defaultInstructionSet().JobsToUpdate) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterUpdate, job) // If a update is bad then we should return an error and no updates should happen - _, err = db.Exec(ctx.Background(), "DELETE FROM job") + _, err = db.Exec(armadacontext.Background(), "DELETE FROM job") assert.NoError(t, err) - err = ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err = ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) invalidUpdate := &model.UpdateJobInstruction{ JobId: invalidId, } - ldb.UpdateJobsScalar(ctx.Background(), append(defaultInstructionSet().JobsToUpdate, invalidUpdate)) + ldb.UpdateJobsScalar(armadacontext.Background(), append(defaultInstructionSet().JobsToUpdate, invalidUpdate)) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterUpdate, job) return nil @@ -399,13 +399,13 @@ func TestUpdateJobsWithTerminal(t *testing.T) { ldb := NewLookoutDb(db, m, 2, 10) // Insert - ldb.CreateJobs(ctx.Background(), initial) + ldb.CreateJobs(armadacontext.Background(), initial) // Mark the jobs terminal - ldb.UpdateJobs(ctx.Background(), update1) + ldb.UpdateJobs(armadacontext.Background(), update1) // Update the jobs - these should be discarded - ldb.UpdateJobs(ctx.Background(), update2) + ldb.UpdateJobs(armadacontext.Background(), update2) // Assert the states are still terminal job := getJob(t, db, jobIdString) @@ -427,22 +427,22 @@ func TestCreateJobsScalar(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) // Simple create - ldb.CreateJobsScalar(ctx.Background(), defaultInstructionSet().JobsToCreate) + ldb.CreateJobsScalar(armadacontext.Background(), defaultInstructionSet().JobsToCreate) job := getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) // Insert again and check for idempotency - ldb.CreateJobsScalar(ctx.Background(), defaultInstructionSet().JobsToCreate) + ldb.CreateJobsScalar(armadacontext.Background(), defaultInstructionSet().JobsToCreate) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) // If a row is bad then we should update only the good rows - _, err := ldb.db.Exec(ctx.Background(), "DELETE FROM job") + _, err := ldb.db.Exec(armadacontext.Background(), "DELETE FROM job") assert.NoError(t, err) invalidJob := &model.CreateJobInstruction{ JobId: invalidId, } - ldb.CreateJobsScalar(ctx.Background(), append(defaultInstructionSet().JobsToCreate, invalidJob)) + ldb.CreateJobsScalar(armadacontext.Background(), append(defaultInstructionSet().JobsToCreate, invalidJob)) job = getJob(t, db, jobIdString) assert.Equal(t, expectedJobAfterSubmit, job) return nil @@ -454,28 +454,28 @@ func TestCreateJobRunsBatch(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) // Need to make sure we have a job, so we can satisfy PK - err := ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) // Insert - err = ldb.CreateJobRunsBatch(ctx.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) assert.Nil(t, err) job := getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, job) // Insert again and test that it's idempotent - err = ldb.CreateJobRunsBatch(ctx.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) assert.Nil(t, err) job = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, job) // If a row is bad then we should return an error and no updates should happen - _, err = ldb.db.Exec(ctx.Background(), "DELETE FROM job_run") + _, err = ldb.db.Exec(armadacontext.Background(), "DELETE FROM job_run") assert.NoError(t, err) invalidRun := &model.CreateJobRunInstruction{ RunId: invalidId, } - err = ldb.CreateJobRunsBatch(ctx.Background(), append(defaultInstructionSet().JobRunsToCreate, invalidRun)) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), append(defaultInstructionSet().JobRunsToCreate, invalidRun)) assert.Error(t, err) assertNoRows(t, db, "job_run") return nil @@ -487,26 +487,26 @@ func TestCreateJobRunsScalar(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) // Need to make sure we have a job, so we can satisfy PK - err := ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) // Insert - ldb.CreateJobRunsScalar(ctx.Background(), defaultInstructionSet().JobRunsToCreate) + ldb.CreateJobRunsScalar(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) job := getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, job) // Insert again and test that it's idempotent - ldb.CreateJobRunsScalar(ctx.Background(), defaultInstructionSet().JobRunsToCreate) + ldb.CreateJobRunsScalar(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) job = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, job) // If a row is bad then we create rows that can be created - _, err = db.Exec(ctx.Background(), "DELETE FROM job_run") + _, err = db.Exec(armadacontext.Background(), "DELETE FROM job_run") assert.NoError(t, err) invalidRun := &model.CreateJobRunInstruction{ RunId: invalidId, } - ldb.CreateJobRunsScalar(ctx.Background(), append(defaultInstructionSet().JobRunsToCreate, invalidRun)) + ldb.CreateJobRunsScalar(armadacontext.Background(), append(defaultInstructionSet().JobRunsToCreate, invalidRun)) job = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, job) return nil @@ -518,33 +518,33 @@ func TestUpdateJobRunsBatch(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) // Need to make sure we have a job and run - err := ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) - err = ldb.CreateJobRunsBatch(ctx.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) assert.Nil(t, err) // Update - err = ldb.UpdateJobRunsBatch(ctx.Background(), defaultInstructionSet().JobRunsToUpdate) + err = ldb.UpdateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToUpdate) assert.Nil(t, err) run := getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRunAfterUpdate, run) // Update again and test that it's idempotent - err = ldb.UpdateJobRunsBatch(ctx.Background(), defaultInstructionSet().JobRunsToUpdate) + err = ldb.UpdateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToUpdate) assert.Nil(t, err) run = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRunAfterUpdate, run) // If a row is bad then we should return an error and no updates should happen - _, err = db.Exec(ctx.Background(), "DELETE FROM job_run;") + _, err = db.Exec(armadacontext.Background(), "DELETE FROM job_run;") assert.Nil(t, err) invalidRun := &model.UpdateJobRunInstruction{ RunId: invalidId, } - err = ldb.CreateJobRunsBatch(ctx.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) assert.Nil(t, err) - err = ldb.UpdateJobRunsBatch(ctx.Background(), append(defaultInstructionSet().JobRunsToUpdate, invalidRun)) + err = ldb.UpdateJobRunsBatch(armadacontext.Background(), append(defaultInstructionSet().JobRunsToUpdate, invalidRun)) assert.Error(t, err) run = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRun, run) @@ -557,33 +557,33 @@ func TestUpdateJobRunsScalar(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) // Need to make sure we have a job and run - err := ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) - err = ldb.CreateJobRunsBatch(ctx.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) assert.Nil(t, err) // Update - ldb.UpdateJobRunsScalar(ctx.Background(), defaultInstructionSet().JobRunsToUpdate) + ldb.UpdateJobRunsScalar(armadacontext.Background(), defaultInstructionSet().JobRunsToUpdate) assert.Nil(t, err) run := getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRunAfterUpdate, run) // Update again and test that it's idempotent - ldb.UpdateJobRunsScalar(ctx.Background(), defaultInstructionSet().JobRunsToUpdate) + ldb.UpdateJobRunsScalar(armadacontext.Background(), defaultInstructionSet().JobRunsToUpdate) assert.Nil(t, err) run = getJobRun(t, db, runIdString) assert.Equal(t, expectedJobRunAfterUpdate, run) // If a row is bad then we should update the rows we can - _, err = ldb.db.Exec(ctx.Background(), "DELETE FROM job_run;") + _, err = ldb.db.Exec(armadacontext.Background(), "DELETE FROM job_run;") assert.Nil(t, err) invalidRun := &model.UpdateJobRunInstruction{ RunId: invalidId, } - err = ldb.CreateJobRunsBatch(ctx.Background(), defaultInstructionSet().JobRunsToCreate) + err = ldb.CreateJobRunsBatch(armadacontext.Background(), defaultInstructionSet().JobRunsToCreate) assert.Nil(t, err) - ldb.UpdateJobRunsScalar(ctx.Background(), append(defaultInstructionSet().JobRunsToUpdate, invalidRun)) + ldb.UpdateJobRunsScalar(armadacontext.Background(), append(defaultInstructionSet().JobRunsToUpdate, invalidRun)) run = getJobRun(t, ldb.db, runIdString) assert.Equal(t, expectedJobRunAfterUpdate, run) return nil @@ -595,28 +595,28 @@ func TestCreateUserAnnotationsBatch(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) // Need to make sure we have a job - err := ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) // Insert - err = ldb.CreateUserAnnotationsBatch(ctx.Background(), defaultInstructionSet().UserAnnotationsToCreate) + err = ldb.CreateUserAnnotationsBatch(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) assert.Nil(t, err) annotation := getUserAnnotationLookup(t, db, jobIdString) assert.Equal(t, expectedUserAnnotation, annotation) // Insert again and test that it's idempotent - err = ldb.CreateUserAnnotationsBatch(ctx.Background(), defaultInstructionSet().UserAnnotationsToCreate) + err = ldb.CreateUserAnnotationsBatch(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) assert.Nil(t, err) annotation = getUserAnnotationLookup(t, db, jobIdString) assert.Equal(t, expectedUserAnnotation, annotation) // If a row is bad then we should return an error and no updates should happen - _, err = ldb.db.Exec(ctx.Background(), "DELETE FROM user_annotation_lookup") + _, err = ldb.db.Exec(armadacontext.Background(), "DELETE FROM user_annotation_lookup") assert.NoError(t, err) invalidAnnotation := &model.CreateUserAnnotationInstruction{ JobId: invalidId, } - err = ldb.CreateUserAnnotationsBatch(ctx.Background(), append(defaultInstructionSet().UserAnnotationsToCreate, invalidAnnotation)) + err = ldb.CreateUserAnnotationsBatch(armadacontext.Background(), append(defaultInstructionSet().UserAnnotationsToCreate, invalidAnnotation)) assert.Error(t, err) assertNoRows(t, ldb.db, "user_annotation_lookup") return nil @@ -627,7 +627,7 @@ func TestCreateUserAnnotationsBatch(t *testing.T) { func TestStoreWithEmptyInstructionSet(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) - err := ldb.Store(ctx.Background(), &model.InstructionSet{ + err := ldb.Store(armadacontext.Background(), &model.InstructionSet{ MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }) assert.NoError(t, err) @@ -643,26 +643,26 @@ func TestCreateUserAnnotationsScalar(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) // Need to make sure we have a job - err := ldb.CreateJobsBatch(ctx.Background(), defaultInstructionSet().JobsToCreate) + err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) assert.Nil(t, err) // Insert - ldb.CreateUserAnnotationsScalar(ctx.Background(), defaultInstructionSet().UserAnnotationsToCreate) + ldb.CreateUserAnnotationsScalar(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) annotation := getUserAnnotationLookup(t, db, jobIdString) assert.Equal(t, expectedUserAnnotation, annotation) // Insert again and test that it's idempotent - ldb.CreateUserAnnotationsScalar(ctx.Background(), defaultInstructionSet().UserAnnotationsToCreate) + ldb.CreateUserAnnotationsScalar(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) annotation = getUserAnnotationLookup(t, db, jobIdString) assert.Equal(t, expectedUserAnnotation, annotation) // If a row is bad then we should update the rows we can - _, err = ldb.db.Exec(ctx.Background(), "DELETE FROM user_annotation_lookup") + _, err = ldb.db.Exec(armadacontext.Background(), "DELETE FROM user_annotation_lookup") assert.NoError(t, err) invalidAnnotation := &model.CreateUserAnnotationInstruction{ JobId: invalidId, } - ldb.CreateUserAnnotationsScalar(ctx.Background(), append(defaultInstructionSet().UserAnnotationsToCreate, invalidAnnotation)) + ldb.CreateUserAnnotationsScalar(armadacontext.Background(), append(defaultInstructionSet().UserAnnotationsToCreate, invalidAnnotation)) annotation = getUserAnnotationLookup(t, ldb.db, jobIdString) assert.Equal(t, expectedUserAnnotation, annotation) return nil @@ -674,7 +674,7 @@ func TestStore(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, m, 2, 10) // Do the update - err := ldb.Store(ctx.Background(), defaultInstructionSet()) + err := ldb.Store(armadacontext.Background(), defaultInstructionSet()) assert.NoError(t, err) job := getJob(t, ldb.db, jobIdString) @@ -843,7 +843,7 @@ func TestStoreNullValue(t *testing.T) { ldb := NewLookoutDb(db, m, 2, 10) // Do the update - err := ldb.Store(ctx.Background(), instructions) + err := ldb.Store(armadacontext.Background(), instructions) assert.NoError(t, err) job := getJob(t, ldb.db, jobIdString) @@ -875,7 +875,7 @@ func TestStoreEventsForAlreadyTerminalJobs(t *testing.T) { } // Create the jobs in the DB - err := ldb.Store(ctx.Background(), baseInstructions) + err := ldb.Store(armadacontext.Background(), baseInstructions) assert.NoError(t, err) mutateInstructions := &model.InstructionSet{ @@ -895,7 +895,7 @@ func TestStoreEventsForAlreadyTerminalJobs(t *testing.T) { } // Update the jobs in the DB - err = ldb.Store(ctx.Background(), mutateInstructions) + err = ldb.Store(armadacontext.Background(), mutateInstructions) assert.NoError(t, err) for _, jobId := range []string{"job-1", "job-2", "job-3"} { @@ -941,7 +941,7 @@ func makeUpdateJobInstruction(jobId string, state int32) *model.UpdateJobInstruc func getJob(t *testing.T, db *pgxpool.Pool, jobId string) JobRow { job := JobRow{} r := db.QueryRow( - ctx.Background(), + armadacontext.Background(), `SELECT job_id, queue, @@ -992,7 +992,7 @@ func getJob(t *testing.T, db *pgxpool.Pool, jobId string) JobRow { func getJobRun(t *testing.T, db *pgxpool.Pool, runId string) JobRunRow { run := JobRunRow{} r := db.QueryRow( - ctx.Background(), + armadacontext.Background(), `SELECT run_id, job_id, @@ -1025,7 +1025,7 @@ func getJobRun(t *testing.T, db *pgxpool.Pool, runId string) JobRunRow { func getUserAnnotationLookup(t *testing.T, db *pgxpool.Pool, jobId string) UserAnnotationRow { annotation := UserAnnotationRow{} r := db.QueryRow( - ctx.Background(), + armadacontext.Background(), `SELECT job_id, key, value, queue, jobset FROM user_annotation_lookup WHERE job_id = $1`, jobId) err := r.Scan(&annotation.JobId, &annotation.Key, &annotation.Value, &annotation.Queue, &annotation.JobSet) @@ -1037,7 +1037,7 @@ func assertNoRows(t *testing.T, db *pgxpool.Pool, table string) { t.Helper() var count int query := fmt.Sprintf("SELECT COUNT(*) FROM %s", table) - r := db.QueryRow(ctx.Background(), query) + r := db.QueryRow(armadacontext.Background(), query) err := r.Scan(&count) assert.NoError(t, err) assert.Equal(t, 0, count) diff --git a/internal/lookoutv2/application.go b/internal/lookoutv2/application.go index ca6844f8b32..ec25e9335a9 100644 --- a/internal/lookoutv2/application.go +++ b/internal/lookoutv2/application.go @@ -3,10 +3,13 @@ package lookoutv2 import ( + "github.com/caarlos0/log" "github.com/go-openapi/loads" "github.com/go-openapi/runtime/middleware" - log "github.com/sirupsen/logrus" + "github.com/jessevdk/go-flags" + "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/util" @@ -38,6 +41,8 @@ func Serve(configuration configuration.LookoutV2Configuration) error { // create new service API api := operations.NewLookoutAPI(swaggerSpec) + logger := logrus.NewEntry(logrus.New()) + api.GetHealthHandler = operations.GetHealthHandlerFunc( func(params operations.GetHealthParams) middleware.Responder { return operations.NewGetHealthOK().WithPayload("Health check passed") @@ -53,7 +58,7 @@ func Serve(configuration configuration.LookoutV2Configuration) error { skip = int(*params.GetJobsRequest.Skip) } result, err := getJobsRepo.GetJobs( - params.HTTPRequest.Context(), + armadacontext.New(params.HTTPRequest.Context(), logger), filters, params.GetJobsRequest.ActiveJobSets, order, @@ -78,7 +83,7 @@ func Serve(configuration configuration.LookoutV2Configuration) error { skip = int(*params.GroupJobsRequest.Skip) } result, err := groupJobsRepo.GroupBy( - params.HTTPRequest.Context(), + armadacontext.New(params.HTTPRequest.Context(), logger), filters, params.GroupJobsRequest.ActiveJobSets, order, @@ -98,7 +103,8 @@ func Serve(configuration configuration.LookoutV2Configuration) error { api.GetJobRunErrorHandler = operations.GetJobRunErrorHandlerFunc( func(params operations.GetJobRunErrorParams) middleware.Responder { - result, err := getJobRunErrorRepo.GetJobRunError(params.HTTPRequest.Context(), params.GetJobRunErrorRequest.RunID) + ctx := armadacontext.New(params.HTTPRequest.Context(), logger) + result, err := getJobRunErrorRepo.GetJobRunError(ctx, params.GetJobRunErrorRequest.RunID) if err != nil { return operations.NewGetJobRunErrorBadRequest().WithPayload(conversions.ToSwaggerError(err.Error())) } @@ -110,7 +116,8 @@ func Serve(configuration configuration.LookoutV2Configuration) error { api.GetJobSpecHandler = operations.GetJobSpecHandlerFunc( func(params operations.GetJobSpecParams) middleware.Responder { - result, err := getJobSpecRepo.GetJobSpec(params.HTTPRequest.Context(), params.GetJobSpecRequest.JobID) + ctx := armadacontext.New(params.HTTPRequest.Context(), logger) + result, err := getJobSpecRepo.GetJobSpec(ctx, params.GetJobSpecRequest.JobID) if err != nil { return operations.NewGetJobSpecBadRequest().WithPayload(conversions.ToSwaggerError(err.Error())) } @@ -128,7 +135,15 @@ func Serve(configuration configuration.LookoutV2Configuration) error { } }() - server.Port = configuration.ApiPort + if configuration.Tls.Enabled { + server.EnabledListeners = []string{"https"} + server.TLSPort = configuration.ApiPort + server.TLSCertificate = flags.Filename(configuration.Tls.CertPath) + server.TLSCertificateKey = flags.Filename(configuration.Tls.KeyPath) + } else { + server.Port = configuration.ApiPort + } + restapi.SetCorsAllowedOrigins(configuration.CorsAllowedOrigins) // This needs to happen before ConfigureAPI server.ConfigureAPI() if err := server.Serve(); err != nil { diff --git a/internal/lookoutv2/configuration/types.go b/internal/lookoutv2/configuration/types.go index 18239942e82..a22d07c8f4f 100644 --- a/internal/lookoutv2/configuration/types.go +++ b/internal/lookoutv2/configuration/types.go @@ -9,12 +9,19 @@ import ( type LookoutV2Configuration struct { ApiPort int CorsAllowedOrigins []string + Tls TlsConfig Postgres configuration.PostgresConfig PrunerConfig PrunerConfig } +type TlsConfig struct { + Enabled bool + KeyPath string + CertPath string +} + type PrunerConfig struct { ExpireAfter time.Duration Timeout time.Duration diff --git a/internal/lookoutv2/gen/restapi/doc.go b/internal/lookoutv2/gen/restapi/doc.go index 23beb22a1a0..a8686cb04ea 100644 --- a/internal/lookoutv2/gen/restapi/doc.go +++ b/internal/lookoutv2/gen/restapi/doc.go @@ -2,18 +2,18 @@ // Package restapi Lookout v2 API // -// Schemes: -// http -// Host: localhost -// BasePath: / -// Version: 2.0.0 +// Schemes: +// http +// Host: localhost +// BasePath: / +// Version: 2.0.0 // -// Consumes: -// - application/json +// Consumes: +// - application/json // -// Produces: -// - application/json -// - text/plain +// Produces: +// - application/json +// - text/plain // // swagger:meta package restapi diff --git a/internal/lookoutv2/gen/restapi/operations/get_health.go b/internal/lookoutv2/gen/restapi/operations/get_health.go index 16cd6803823..d7c8a7dc5ad 100644 --- a/internal/lookoutv2/gen/restapi/operations/get_health.go +++ b/internal/lookoutv2/gen/restapi/operations/get_health.go @@ -29,10 +29,10 @@ func NewGetHealth(ctx *middleware.Context, handler GetHealthHandler) *GetHealth return &GetHealth{Context: ctx, Handler: handler} } -/* GetHealth swagger:route GET /health getHealth +/* + GetHealth swagger:route GET /health getHealth GetHealth get health API - */ type GetHealth struct { Context *middleware.Context diff --git a/internal/lookoutv2/gen/restapi/operations/get_health_responses.go b/internal/lookoutv2/gen/restapi/operations/get_health_responses.go index 032b8c2cb0d..c54a26244c4 100644 --- a/internal/lookoutv2/gen/restapi/operations/get_health_responses.go +++ b/internal/lookoutv2/gen/restapi/operations/get_health_responses.go @@ -14,7 +14,8 @@ import ( // GetHealthOKCode is the HTTP code returned for type GetHealthOK const GetHealthOKCode int = 200 -/*GetHealthOK OK +/* +GetHealthOK OK swagger:response getHealthOK */ @@ -56,7 +57,8 @@ func (o *GetHealthOK) WriteResponse(rw http.ResponseWriter, producer runtime.Pro // GetHealthBadRequestCode is the HTTP code returned for type GetHealthBadRequest const GetHealthBadRequestCode int = 400 -/*GetHealthBadRequest Error response +/* +GetHealthBadRequest Error response swagger:response getHealthBadRequest */ diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_run_error.go b/internal/lookoutv2/gen/restapi/operations/get_job_run_error.go index 537d2663379..f8add74ee45 100644 --- a/internal/lookoutv2/gen/restapi/operations/get_job_run_error.go +++ b/internal/lookoutv2/gen/restapi/operations/get_job_run_error.go @@ -34,10 +34,10 @@ func NewGetJobRunError(ctx *middleware.Context, handler GetJobRunErrorHandler) * return &GetJobRunError{Context: ctx, Handler: handler} } -/* GetJobRunError swagger:route POST /api/v1/jobRunError getJobRunError +/* + GetJobRunError swagger:route POST /api/v1/jobRunError getJobRunError GetJobRunError get job run error API - */ type GetJobRunError struct { Context *middleware.Context diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_run_error_responses.go b/internal/lookoutv2/gen/restapi/operations/get_job_run_error_responses.go index ff1a82e47c2..e8a17e5b37d 100644 --- a/internal/lookoutv2/gen/restapi/operations/get_job_run_error_responses.go +++ b/internal/lookoutv2/gen/restapi/operations/get_job_run_error_responses.go @@ -16,7 +16,8 @@ import ( // GetJobRunErrorOKCode is the HTTP code returned for type GetJobRunErrorOK const GetJobRunErrorOKCode int = 200 -/*GetJobRunErrorOK Returns error for specific job run (if present) +/* +GetJobRunErrorOK Returns error for specific job run (if present) swagger:response getJobRunErrorOK */ @@ -60,7 +61,8 @@ func (o *GetJobRunErrorOK) WriteResponse(rw http.ResponseWriter, producer runtim // GetJobRunErrorBadRequestCode is the HTTP code returned for type GetJobRunErrorBadRequest const GetJobRunErrorBadRequestCode int = 400 -/*GetJobRunErrorBadRequest Error response +/* +GetJobRunErrorBadRequest Error response swagger:response getJobRunErrorBadRequest */ @@ -101,7 +103,8 @@ func (o *GetJobRunErrorBadRequest) WriteResponse(rw http.ResponseWriter, produce } } -/*GetJobRunErrorDefault Error response +/* +GetJobRunErrorDefault Error response swagger:response getJobRunErrorDefault */ diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_spec.go b/internal/lookoutv2/gen/restapi/operations/get_job_spec.go index a0ee4726d38..74055af08f8 100644 --- a/internal/lookoutv2/gen/restapi/operations/get_job_spec.go +++ b/internal/lookoutv2/gen/restapi/operations/get_job_spec.go @@ -34,10 +34,10 @@ func NewGetJobSpec(ctx *middleware.Context, handler GetJobSpecHandler) *GetJobSp return &GetJobSpec{Context: ctx, Handler: handler} } -/* GetJobSpec swagger:route POST /api/v1/jobSpec getJobSpec +/* + GetJobSpec swagger:route POST /api/v1/jobSpec getJobSpec GetJobSpec get job spec API - */ type GetJobSpec struct { Context *middleware.Context diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_spec_responses.go b/internal/lookoutv2/gen/restapi/operations/get_job_spec_responses.go index ccccd693330..8c4776d0f47 100644 --- a/internal/lookoutv2/gen/restapi/operations/get_job_spec_responses.go +++ b/internal/lookoutv2/gen/restapi/operations/get_job_spec_responses.go @@ -16,7 +16,8 @@ import ( // GetJobSpecOKCode is the HTTP code returned for type GetJobSpecOK const GetJobSpecOKCode int = 200 -/*GetJobSpecOK Returns raw Job spec +/* +GetJobSpecOK Returns raw Job spec swagger:response getJobSpecOK */ @@ -60,7 +61,8 @@ func (o *GetJobSpecOK) WriteResponse(rw http.ResponseWriter, producer runtime.Pr // GetJobSpecBadRequestCode is the HTTP code returned for type GetJobSpecBadRequest const GetJobSpecBadRequestCode int = 400 -/*GetJobSpecBadRequest Error response +/* +GetJobSpecBadRequest Error response swagger:response getJobSpecBadRequest */ @@ -101,7 +103,8 @@ func (o *GetJobSpecBadRequest) WriteResponse(rw http.ResponseWriter, producer ru } } -/*GetJobSpecDefault Error response +/* +GetJobSpecDefault Error response swagger:response getJobSpecDefault */ diff --git a/internal/lookoutv2/gen/restapi/operations/get_jobs.go b/internal/lookoutv2/gen/restapi/operations/get_jobs.go index 76689ed77d0..b498f593901 100644 --- a/internal/lookoutv2/gen/restapi/operations/get_jobs.go +++ b/internal/lookoutv2/gen/restapi/operations/get_jobs.go @@ -37,10 +37,10 @@ func NewGetJobs(ctx *middleware.Context, handler GetJobsHandler) *GetJobs { return &GetJobs{Context: ctx, Handler: handler} } -/* GetJobs swagger:route POST /api/v1/jobs getJobs +/* + GetJobs swagger:route POST /api/v1/jobs getJobs GetJobs get jobs API - */ type GetJobs struct { Context *middleware.Context diff --git a/internal/lookoutv2/gen/restapi/operations/get_jobs_responses.go b/internal/lookoutv2/gen/restapi/operations/get_jobs_responses.go index 2b1802191f6..5af80b4f316 100644 --- a/internal/lookoutv2/gen/restapi/operations/get_jobs_responses.go +++ b/internal/lookoutv2/gen/restapi/operations/get_jobs_responses.go @@ -16,7 +16,8 @@ import ( // GetJobsOKCode is the HTTP code returned for type GetJobsOK const GetJobsOKCode int = 200 -/*GetJobsOK Returns jobs from API +/* +GetJobsOK Returns jobs from API swagger:response getJobsOK */ @@ -60,7 +61,8 @@ func (o *GetJobsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Produ // GetJobsBadRequestCode is the HTTP code returned for type GetJobsBadRequest const GetJobsBadRequestCode int = 400 -/*GetJobsBadRequest Error response +/* +GetJobsBadRequest Error response swagger:response getJobsBadRequest */ @@ -101,7 +103,8 @@ func (o *GetJobsBadRequest) WriteResponse(rw http.ResponseWriter, producer runti } } -/*GetJobsDefault Error response +/* +GetJobsDefault Error response swagger:response getJobsDefault */ diff --git a/internal/lookoutv2/gen/restapi/operations/group_jobs.go b/internal/lookoutv2/gen/restapi/operations/group_jobs.go index 4225045294b..208d7856c68 100644 --- a/internal/lookoutv2/gen/restapi/operations/group_jobs.go +++ b/internal/lookoutv2/gen/restapi/operations/group_jobs.go @@ -37,10 +37,10 @@ func NewGroupJobs(ctx *middleware.Context, handler GroupJobsHandler) *GroupJobs return &GroupJobs{Context: ctx, Handler: handler} } -/* GroupJobs swagger:route POST /api/v1/jobGroups groupJobs +/* + GroupJobs swagger:route POST /api/v1/jobGroups groupJobs GroupJobs group jobs API - */ type GroupJobs struct { Context *middleware.Context diff --git a/internal/lookoutv2/gen/restapi/operations/group_jobs_responses.go b/internal/lookoutv2/gen/restapi/operations/group_jobs_responses.go index ff442c870bc..b34b787fbbf 100644 --- a/internal/lookoutv2/gen/restapi/operations/group_jobs_responses.go +++ b/internal/lookoutv2/gen/restapi/operations/group_jobs_responses.go @@ -16,7 +16,8 @@ import ( // GroupJobsOKCode is the HTTP code returned for type GroupJobsOK const GroupJobsOKCode int = 200 -/*GroupJobsOK Returns job groups from API +/* +GroupJobsOK Returns job groups from API swagger:response groupJobsOK */ @@ -60,7 +61,8 @@ func (o *GroupJobsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Pro // GroupJobsBadRequestCode is the HTTP code returned for type GroupJobsBadRequest const GroupJobsBadRequestCode int = 400 -/*GroupJobsBadRequest Error response +/* +GroupJobsBadRequest Error response swagger:response groupJobsBadRequest */ @@ -101,7 +103,8 @@ func (o *GroupJobsBadRequest) WriteResponse(rw http.ResponseWriter, producer run } } -/*GroupJobsDefault Error response +/* +GroupJobsDefault Error response swagger:response groupJobsDefault */ diff --git a/internal/lookoutv2/pruner/pruner.go b/internal/lookoutv2/pruner/pruner.go index 18ee81c8da1..946917fe30a 100644 --- a/internal/lookoutv2/pruner/pruner.go +++ b/internal/lookoutv2/pruner/pruner.go @@ -1,16 +1,17 @@ package pruner import ( - "context" "time" "github.com/jackc/pgx/v5" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/clock" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) -func PruneDb(ctx context.Context, db *pgx.Conn, keepAfterCompletion time.Duration, batchLimit int, clock clock.Clock) error { +func PruneDb(ctx *armadacontext.Context, db *pgx.Conn, keepAfterCompletion time.Duration, batchLimit int, clock clock.Clock) error { now := clock.Now() cutOffTime := now.Add(-keepAfterCompletion) totalJobsToDelete, err := createJobIdsToDeleteTempTable(ctx, db, cutOffTime) @@ -60,10 +61,10 @@ func PruneDb(ctx context.Context, db *pgx.Conn, keepAfterCompletion time.Duratio } // Returns total number of jobs to delete -func createJobIdsToDeleteTempTable(ctx context.Context, db *pgx.Conn, cutOffTime time.Time) (int, error) { +func createJobIdsToDeleteTempTable(ctx *armadacontext.Context, db *pgx.Conn, cutOffTime time.Time) (int, error) { _, err := db.Exec(ctx, ` CREATE TEMP TABLE job_ids_to_delete AS ( - SELECT job_id FROM job + SELECT job_id FROM job WHERE last_transition_time < $1 )`, cutOffTime) if err != nil { @@ -77,7 +78,7 @@ func createJobIdsToDeleteTempTable(ctx context.Context, db *pgx.Conn, cutOffTime return totalJobsToDelete, nil } -func deleteBatch(ctx context.Context, tx pgx.Tx, batchLimit int) (int, error) { +func deleteBatch(ctx *armadacontext.Context, tx pgx.Tx, batchLimit int) (int, error) { _, err := tx.Exec(ctx, "INSERT INTO batch (job_id) SELECT job_id FROM job_ids_to_delete LIMIT $1;", batchLimit) if err != nil { return -1, err diff --git a/internal/lookoutv2/pruner/pruner_test.go b/internal/lookoutv2/pruner/pruner_test.go index a88274c316a..3df18c0cf05 100644 --- a/internal/lookoutv2/pruner/pruner_test.go +++ b/internal/lookoutv2/pruner/pruner_test.go @@ -1,7 +1,6 @@ package pruner import ( - "context" "testing" "time" @@ -10,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/util/clock" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/common/util" @@ -111,7 +111,7 @@ func TestPruneDb(t *testing.T) { converter := instructions.NewInstructionConverter(metrics.Get(), "armadaproject.io/", &compress.NoOpCompressor{}, true) store := lookoutdb.NewLookoutDb(db, metrics.Get(), 3, 10) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Minute) defer cancel() for _, tj := range tc.jobs { runId := uuid.NewString() @@ -156,7 +156,7 @@ func TestPruneDb(t *testing.T) { func selectStringSet(t *testing.T, db *pgxpool.Pool, query string) map[string]bool { t.Helper() - rows, err := db.Query(context.TODO(), query) + rows, err := db.Query(armadacontext.TODO(), query) assert.NoError(t, err) var ss []string for rows.Next() { diff --git a/internal/lookoutv2/repository/getjobrunerror.go b/internal/lookoutv2/repository/getjobrunerror.go index 467da22ec1a..b878c9291fb 100644 --- a/internal/lookoutv2/repository/getjobrunerror.go +++ b/internal/lookoutv2/repository/getjobrunerror.go @@ -1,18 +1,17 @@ package repository import ( - "context" - "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" ) type GetJobRunErrorRepository interface { - GetJobRunError(ctx context.Context, runId string) (string, error) + GetJobRunError(ctx *armadacontext.Context, runId string) (string, error) } type SqlGetJobRunErrorRepository struct { @@ -27,7 +26,7 @@ func NewSqlGetJobRunErrorRepository(db *pgxpool.Pool, decompressor compress.Deco } } -func (r *SqlGetJobRunErrorRepository) GetJobRunError(ctx context.Context, runId string) (string, error) { +func (r *SqlGetJobRunErrorRepository) GetJobRunError(ctx *armadacontext.Context, runId string) (string, error) { var rawBytes []byte err := r.db.QueryRow(ctx, "SELECT error FROM job_run WHERE run_id = $1 AND error IS NOT NULL", runId).Scan(&rawBytes) if err != nil { diff --git a/internal/lookoutv2/repository/getjobrunerror_test.go b/internal/lookoutv2/repository/getjobrunerror_test.go index 274de5e6d40..4bf2854929d 100644 --- a/internal/lookoutv2/repository/getjobrunerror_test.go +++ b/internal/lookoutv2/repository/getjobrunerror_test.go @@ -1,12 +1,12 @@ package repository import ( - "context" "testing" "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/lookoutingesterv2/instructions" @@ -34,7 +34,7 @@ func TestGetJobRunError(t *testing.T) { ApiJob() repo := NewSqlGetJobRunErrorRepository(db, &compress.NoOpDecompressor{}) - result, err := repo.GetJobRunError(context.TODO(), runId) + result, err := repo.GetJobRunError(armadacontext.TODO(), runId) assert.NoError(t, err) assert.Equal(t, expected, result) } @@ -46,7 +46,7 @@ func TestGetJobRunError(t *testing.T) { func TestGetJobRunErrorNotFound(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { repo := NewSqlGetJobRunErrorRepository(db, &compress.NoOpDecompressor{}) - _, err := repo.GetJobRunError(context.TODO(), runId) + _, err := repo.GetJobRunError(armadacontext.TODO(), runId) assert.Error(t, err) return nil }) diff --git a/internal/lookoutv2/repository/getjobs.go b/internal/lookoutv2/repository/getjobs.go index eac6cc0aaf5..cce2550d2b1 100644 --- a/internal/lookoutv2/repository/getjobs.go +++ b/internal/lookoutv2/repository/getjobs.go @@ -1,7 +1,6 @@ package repository import ( - "context" "database/sql" "fmt" "sort" @@ -12,13 +11,14 @@ import ( "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/lookoutv2/model" ) type GetJobsRepository interface { - GetJobs(ctx context.Context, filters []*model.Filter, order *model.Order, skip int, take int) (*GetJobsResult, error) + GetJobs(ctx *armadacontext.Context, filters []*model.Filter, order *model.Order, skip int, take int) (*GetJobsResult, error) } type SqlGetJobsRepository struct { @@ -77,7 +77,7 @@ func NewSqlGetJobsRepository(db *pgxpool.Pool) *SqlGetJobsRepository { } } -func (r *SqlGetJobsRepository) GetJobs(ctx context.Context, filters []*model.Filter, activeJobSets bool, order *model.Order, skip int, take int) (*GetJobsResult, error) { +func (r *SqlGetJobsRepository) GetJobs(ctx *armadacontext.Context, filters []*model.Filter, activeJobSets bool, order *model.Order, skip int, take int) (*GetJobsResult, error) { var jobRows []*jobRow var runRows []*runRow var annotationRows []*annotationRow @@ -243,7 +243,7 @@ func getJobRunTime(run *model.Run) (time.Time, error) { return time.Time{}, errors.Errorf("error when getting run time for run with id %s", run.RunId) } -func makeJobRows(ctx context.Context, tx pgx.Tx, tmpTableName string) ([]*jobRow, error) { +func makeJobRows(ctx *armadacontext.Context, tx pgx.Tx, tmpTableName string) ([]*jobRow, error) { query := fmt.Sprintf(` SELECT j.job_id, @@ -302,7 +302,7 @@ func makeJobRows(ctx context.Context, tx pgx.Tx, tmpTableName string) ([]*jobRow return rows, nil } -func makeRunRows(ctx context.Context, tx pgx.Tx, tmpTableName string) ([]*runRow, error) { +func makeRunRows(ctx *armadacontext.Context, tx pgx.Tx, tmpTableName string) ([]*runRow, error) { query := fmt.Sprintf(` SELECT jr.job_id, @@ -347,7 +347,7 @@ func makeRunRows(ctx context.Context, tx pgx.Tx, tmpTableName string) ([]*runRow return rows, nil } -func makeAnnotationRows(ctx context.Context, tx pgx.Tx, tempTableName string) ([]*annotationRow, error) { +func makeAnnotationRows(ctx *armadacontext.Context, tx pgx.Tx, tempTableName string) ([]*annotationRow, error) { query := fmt.Sprintf(` SELECT ual.job_id, diff --git a/internal/lookoutv2/repository/getjobs_test.go b/internal/lookoutv2/repository/getjobs_test.go index 3c28805c198..d5b45a5cae7 100644 --- a/internal/lookoutv2/repository/getjobs_test.go +++ b/internal/lookoutv2/repository/getjobs_test.go @@ -1,7 +1,6 @@ package repository import ( - "context" "fmt" "testing" "time" @@ -11,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/resource" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/common/util" @@ -77,7 +77,7 @@ func TestGetJobsSingle(t *testing.T) { Job() repo := NewSqlGetJobsRepository(db) - result, err := repo.GetJobs(context.TODO(), []*model.Filter{}, false, &model.Order{}, 0, 1) + result, err := repo.GetJobs(armadacontext.TODO(), []*model.Filter{}, false, &model.Order{}, 0, 1) assert.NoError(t, err) assert.Len(t, result.Jobs, 1) assert.Equal(t, 1, result.Count) @@ -105,7 +105,7 @@ func TestGetJobsMultipleRuns(t *testing.T) { // Runs should be sorted from oldest -> newest repo := NewSqlGetJobsRepository(db) - result, err := repo.GetJobs(context.TODO(), []*model.Filter{}, false, &model.Order{}, 0, 1) + result, err := repo.GetJobs(armadacontext.TODO(), []*model.Filter{}, false, &model.Order{}, 0, 1) assert.NoError(t, err) assert.Len(t, result.Jobs, 1) assert.Equal(t, 1, result.Count) @@ -119,7 +119,7 @@ func TestOrderByUnsupportedField(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { repo := NewSqlGetJobsRepository(db) _, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -140,7 +140,7 @@ func TestOrderByUnsupportedDirection(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { repo := NewSqlGetJobsRepository(db) _, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -192,7 +192,7 @@ func TestGetJobsOrderByJobId(t *testing.T) { t.Run("ascending order", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -212,7 +212,7 @@ func TestGetJobsOrderByJobId(t *testing.T) { t.Run("descending order", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -259,7 +259,7 @@ func TestGetJobsOrderBySubmissionTime(t *testing.T) { t.Run("ascending order", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -279,7 +279,7 @@ func TestGetJobsOrderBySubmissionTime(t *testing.T) { t.Run("descending order", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -330,7 +330,7 @@ func TestGetJobsOrderByLastTransitionTime(t *testing.T) { t.Run("ascending order", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -350,7 +350,7 @@ func TestGetJobsOrderByLastTransitionTime(t *testing.T) { t.Run("descending order", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -377,7 +377,7 @@ func TestFilterByUnsupportedField(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { repo := NewSqlGetJobsRepository(db) _, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "someField", Match: model.MatchExact, @@ -400,7 +400,7 @@ func TestFilterByUnsupportedMatch(t *testing.T) { repo := NewSqlGetJobsRepository(db) _, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "jobId", Match: model.MatchLessThan, @@ -443,7 +443,7 @@ func TestGetJobsById(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "jobId", Match: model.MatchExact, @@ -499,7 +499,7 @@ func TestGetJobsByQueue(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "queue", Match: model.MatchExact, @@ -518,7 +518,7 @@ func TestGetJobsByQueue(t *testing.T) { t.Run("startsWith", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "queue", Match: model.MatchStartsWith, @@ -542,7 +542,7 @@ func TestGetJobsByQueue(t *testing.T) { t.Run("contains", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "queue", Match: model.MatchContains, @@ -604,7 +604,7 @@ func TestGetJobsByJobSet(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "jobSet", Match: model.MatchExact, @@ -623,7 +623,7 @@ func TestGetJobsByJobSet(t *testing.T) { t.Run("startsWith", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "jobSet", Match: model.MatchStartsWith, @@ -647,7 +647,7 @@ func TestGetJobsByJobSet(t *testing.T) { t.Run("contains", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "jobSet", Match: model.MatchContains, @@ -709,7 +709,7 @@ func TestGetJobsByOwner(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "owner", Match: model.MatchExact, @@ -728,7 +728,7 @@ func TestGetJobsByOwner(t *testing.T) { t.Run("startsWith", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "owner", Match: model.MatchStartsWith, @@ -752,7 +752,7 @@ func TestGetJobsByOwner(t *testing.T) { t.Run("contains", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "owner", Match: model.MatchContains, @@ -817,7 +817,7 @@ func TestGetJobsByState(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "state", Match: model.MatchExact, @@ -836,7 +836,7 @@ func TestGetJobsByState(t *testing.T) { t.Run("anyOf", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "state", Match: model.MatchAnyOf, @@ -923,7 +923,7 @@ func TestGetJobsByAnnotation(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "annotation-key-1", Match: model.MatchExact, @@ -943,7 +943,7 @@ func TestGetJobsByAnnotation(t *testing.T) { t.Run("exact, multiple annotations", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{ { Field: "annotation-key-1", @@ -971,7 +971,7 @@ func TestGetJobsByAnnotation(t *testing.T) { t.Run("startsWith, multiple annotations", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{ { Field: "annotation-key-1", @@ -1000,7 +1000,7 @@ func TestGetJobsByAnnotation(t *testing.T) { t.Run("contains, multiple annotations", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{ { Field: "annotation-key-1", @@ -1029,7 +1029,7 @@ func TestGetJobsByAnnotation(t *testing.T) { t.Run("exists", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{ { Field: "annotation-key-1", @@ -1093,7 +1093,7 @@ func TestGetJobsByCpu(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "cpu", Match: model.MatchExact, @@ -1112,7 +1112,7 @@ func TestGetJobsByCpu(t *testing.T) { t.Run("greaterThan", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "cpu", Match: model.MatchGreaterThan, @@ -1135,7 +1135,7 @@ func TestGetJobsByCpu(t *testing.T) { t.Run("lessThan", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "cpu", Match: model.MatchLessThan, @@ -1158,7 +1158,7 @@ func TestGetJobsByCpu(t *testing.T) { t.Run("greaterThanOrEqualTo", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "cpu", Match: model.MatchGreaterThanOrEqualTo, @@ -1182,7 +1182,7 @@ func TestGetJobsByCpu(t *testing.T) { t.Run("lessThanOrEqualTo", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "cpu", Match: model.MatchLessThanOrEqualTo, @@ -1246,7 +1246,7 @@ func TestGetJobsByMemory(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "memory", Match: model.MatchExact, @@ -1265,7 +1265,7 @@ func TestGetJobsByMemory(t *testing.T) { t.Run("greaterThan", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "memory", Match: model.MatchGreaterThan, @@ -1288,7 +1288,7 @@ func TestGetJobsByMemory(t *testing.T) { t.Run("lessThan", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "memory", Match: model.MatchLessThan, @@ -1311,7 +1311,7 @@ func TestGetJobsByMemory(t *testing.T) { t.Run("greaterThanOrEqualTo", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "memory", Match: model.MatchGreaterThanOrEqualTo, @@ -1335,7 +1335,7 @@ func TestGetJobsByMemory(t *testing.T) { t.Run("lessThanOrEqualTo", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "memory", Match: model.MatchLessThanOrEqualTo, @@ -1399,7 +1399,7 @@ func TestGetJobsByEphemeralStorage(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "ephemeralStorage", Match: model.MatchExact, @@ -1418,7 +1418,7 @@ func TestGetJobsByEphemeralStorage(t *testing.T) { t.Run("greaterThan", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "ephemeralStorage", Match: model.MatchGreaterThan, @@ -1441,7 +1441,7 @@ func TestGetJobsByEphemeralStorage(t *testing.T) { t.Run("lessThan", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "ephemeralStorage", Match: model.MatchLessThan, @@ -1464,7 +1464,7 @@ func TestGetJobsByEphemeralStorage(t *testing.T) { t.Run("greaterThanOrEqualTo", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "ephemeralStorage", Match: model.MatchGreaterThanOrEqualTo, @@ -1488,7 +1488,7 @@ func TestGetJobsByEphemeralStorage(t *testing.T) { t.Run("lessThanOrEqualTo", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "ephemeralStorage", Match: model.MatchLessThanOrEqualTo, @@ -1552,7 +1552,7 @@ func TestGetJobsByGpu(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "gpu", Match: model.MatchExact, @@ -1571,7 +1571,7 @@ func TestGetJobsByGpu(t *testing.T) { t.Run("greaterThan", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "gpu", Match: model.MatchGreaterThan, @@ -1594,7 +1594,7 @@ func TestGetJobsByGpu(t *testing.T) { t.Run("lessThan", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "gpu", Match: model.MatchLessThan, @@ -1617,7 +1617,7 @@ func TestGetJobsByGpu(t *testing.T) { t.Run("greaterThanOrEqualTo", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "gpu", Match: model.MatchGreaterThanOrEqualTo, @@ -1641,7 +1641,7 @@ func TestGetJobsByGpu(t *testing.T) { t.Run("lessThanOrEqualTo", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "gpu", Match: model.MatchLessThanOrEqualTo, @@ -1705,7 +1705,7 @@ func TestGetJobsByPriority(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "priority", Match: model.MatchExact, @@ -1724,7 +1724,7 @@ func TestGetJobsByPriority(t *testing.T) { t.Run("greaterThan", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "priority", Match: model.MatchGreaterThan, @@ -1747,7 +1747,7 @@ func TestGetJobsByPriority(t *testing.T) { t.Run("lessThan", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "priority", Match: model.MatchLessThan, @@ -1770,7 +1770,7 @@ func TestGetJobsByPriority(t *testing.T) { t.Run("greaterThanOrEqualTo", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "priority", Match: model.MatchGreaterThanOrEqualTo, @@ -1794,7 +1794,7 @@ func TestGetJobsByPriority(t *testing.T) { t.Run("lessThanOrEqualTo", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "priority", Match: model.MatchLessThanOrEqualTo, @@ -1865,7 +1865,7 @@ func TestGetJobsByPriorityClass(t *testing.T) { t.Run("exact", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "priorityClass", Match: model.MatchExact, @@ -1884,7 +1884,7 @@ func TestGetJobsByPriorityClass(t *testing.T) { t.Run("startsWith", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "priorityClass", Match: model.MatchStartsWith, @@ -1908,7 +1908,7 @@ func TestGetJobsByPriorityClass(t *testing.T) { t.Run("contains", func(t *testing.T) { result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{{ Field: "priorityClass", Match: model.MatchContains, @@ -1957,7 +1957,7 @@ func TestGetJobsSkip(t *testing.T) { skip := 3 take := 5 result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -1977,7 +1977,7 @@ func TestGetJobsSkip(t *testing.T) { skip := 7 take := 5 result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -1997,7 +1997,7 @@ func TestGetJobsSkip(t *testing.T) { skip := 13 take := 5 result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -2057,7 +2057,7 @@ func TestGetJobsComplex(t *testing.T) { skip := 8 take := 5 result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{ { Field: "queue", @@ -2121,7 +2121,7 @@ func TestGetJobsActiveJobSet(t *testing.T) { repo := NewSqlGetJobsRepository(db) result, err := repo.GetJobs( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, true, &model.Order{ diff --git a/internal/lookoutv2/repository/getjobspec.go b/internal/lookoutv2/repository/getjobspec.go index 60c6ac41cd1..55799249f35 100644 --- a/internal/lookoutv2/repository/getjobspec.go +++ b/internal/lookoutv2/repository/getjobspec.go @@ -1,20 +1,19 @@ package repository import ( - "context" - "github.com/gogo/protobuf/proto" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/pkg/api" ) type GetJobSpecRepository interface { - GetJobSpec(ctx context.Context, jobId string) (*api.Job, error) + GetJobSpec(ctx *armadacontext.Context, jobId string) (*api.Job, error) } type SqlGetJobSpecRepository struct { @@ -29,7 +28,7 @@ func NewSqlGetJobSpecRepository(db *pgxpool.Pool, decompressor compress.Decompre } } -func (r *SqlGetJobSpecRepository) GetJobSpec(ctx context.Context, jobId string) (*api.Job, error) { +func (r *SqlGetJobSpecRepository) GetJobSpec(ctx *armadacontext.Context, jobId string) (*api.Job, error) { var rawBytes []byte err := r.db.QueryRow(ctx, "SELECT job_spec FROM job WHERE job_id = $1", jobId).Scan(&rawBytes) if err != nil { diff --git a/internal/lookoutv2/repository/getjobspec_test.go b/internal/lookoutv2/repository/getjobspec_test.go index d7e00d83671..b13a897e8c4 100644 --- a/internal/lookoutv2/repository/getjobspec_test.go +++ b/internal/lookoutv2/repository/getjobspec_test.go @@ -1,12 +1,12 @@ package repository import ( - "context" "testing" "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/lookoutingesterv2/instructions" @@ -42,7 +42,7 @@ func TestGetJobSpec(t *testing.T) { ApiJob() repo := NewSqlGetJobSpecRepository(db, &compress.NoOpDecompressor{}) - result, err := repo.GetJobSpec(context.TODO(), jobId) + result, err := repo.GetJobSpec(armadacontext.TODO(), jobId) assert.NoError(t, err) assertApiJobsEquivalent(t, job, result) return nil @@ -53,7 +53,7 @@ func TestGetJobSpec(t *testing.T) { func TestGetJobSpecError(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { repo := NewSqlGetJobSpecRepository(db, &compress.NoOpDecompressor{}) - _, err := repo.GetJobSpec(context.TODO(), jobId) + _, err := repo.GetJobSpec(armadacontext.TODO(), jobId) assert.Error(t, err) return nil }) diff --git a/internal/lookoutv2/repository/groupjobs.go b/internal/lookoutv2/repository/groupjobs.go index dd80976dcd6..20dcb5adb0a 100644 --- a/internal/lookoutv2/repository/groupjobs.go +++ b/internal/lookoutv2/repository/groupjobs.go @@ -1,7 +1,6 @@ package repository import ( - "context" "fmt" "strings" @@ -9,6 +8,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/pkg/errors" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/lookoutv2/model" @@ -22,7 +22,7 @@ type GroupByResult struct { type GroupJobsRepository interface { GroupBy( - ctx context.Context, + ctx *armadacontext.Context, filters []*model.Filter, order *model.Order, groupedField string, @@ -47,7 +47,7 @@ func NewSqlGroupJobsRepository(db *pgxpool.Pool) *SqlGroupJobsRepository { } func (r *SqlGroupJobsRepository) GroupBy( - ctx context.Context, + ctx *armadacontext.Context, filters []*model.Filter, activeJobSets bool, order *model.Order, diff --git a/internal/lookoutv2/repository/groupjobs_test.go b/internal/lookoutv2/repository/groupjobs_test.go index 1f255029f8c..b2bd04d5d03 100644 --- a/internal/lookoutv2/repository/groupjobs_test.go +++ b/internal/lookoutv2/repository/groupjobs_test.go @@ -1,7 +1,6 @@ package repository import ( - "context" "fmt" "testing" "time" @@ -10,6 +9,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/common/pointer" @@ -39,7 +39,7 @@ func TestGroupByQueue(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -98,7 +98,7 @@ func TestGroupByJobSet(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -165,7 +165,7 @@ func TestGroupByState(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -330,7 +330,7 @@ func TestGroupByWithFilters(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{ { Field: "queue", @@ -452,7 +452,7 @@ func TestGroupJobsWithMaxSubmittedTime(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -552,7 +552,7 @@ func TestGroupJobsWithAvgLastTransitionTime(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -652,7 +652,7 @@ func TestGroupJobsWithAllStateCounts(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -774,7 +774,7 @@ func TestGroupJobsWithFilteredStateCounts(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{ { Field: stateField, @@ -898,7 +898,7 @@ func TestGroupJobsComplex(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{ { Field: "queue", @@ -997,7 +997,7 @@ func TestGroupByAnnotation(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -1112,7 +1112,7 @@ func TestGroupByAnnotationWithFiltersAndAggregates(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{ { Field: "queue", @@ -1212,7 +1212,7 @@ func TestGroupJobsSkip(t *testing.T) { skip := 3 take := 5 result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -1242,7 +1242,7 @@ func TestGroupJobsSkip(t *testing.T) { skip := 7 take := 5 result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -1272,7 +1272,7 @@ func TestGroupJobsSkip(t *testing.T) { skip := 13 take := 5 result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -1306,7 +1306,7 @@ func TestGroupJobsValidation(t *testing.T) { t.Run("valid field", func(t *testing.T) { _, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -1325,7 +1325,7 @@ func TestGroupJobsValidation(t *testing.T) { t.Run("invalid field", func(t *testing.T) { _, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -1344,7 +1344,7 @@ func TestGroupJobsValidation(t *testing.T) { t.Run("valid annotation", func(t *testing.T) { _, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -1364,7 +1364,7 @@ func TestGroupJobsValidation(t *testing.T) { t.Run("valid annotation with same name as column", func(t *testing.T) { _, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, false, &model.Order{ @@ -1427,7 +1427,7 @@ func TestGroupByActiveJobSets(t *testing.T) { repo := NewSqlGroupJobsRepository(db) result, err := repo.GroupBy( - context.TODO(), + armadacontext.TODO(), []*model.Filter{}, true, &model.Order{ diff --git a/internal/lookoutv2/repository/util.go b/internal/lookoutv2/repository/util.go index d250f3844dc..62143df1f37 100644 --- a/internal/lookoutv2/repository/util.go +++ b/internal/lookoutv2/repository/util.go @@ -1,7 +1,6 @@ package repository import ( - "context" "fmt" "strings" "time" @@ -13,6 +12,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/pointer" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/internal/common/ingest" @@ -586,8 +586,8 @@ func (js *JobSimulator) Build() *JobSimulator { EventSequences: []*armadaevents.EventSequence{eventSequence}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, } - instructionSet := js.converter.Convert(context.TODO(), eventSequenceWithIds) - err := js.store.Store(context.TODO(), instructionSet) + instructionSet := js.converter.Convert(armadacontext.TODO(), eventSequenceWithIds) + err := js.store.Store(armadacontext.TODO(), instructionSet) if err != nil { log.WithError(err).Error("Simulator failed to store job in database") } diff --git a/internal/pulsartest/watch.go b/internal/pulsartest/watch.go index cbe6e5834fa..210916cf7e1 100644 --- a/internal/pulsartest/watch.go +++ b/internal/pulsartest/watch.go @@ -1,13 +1,13 @@ package pulsartest import ( - "context" "fmt" "log" "os" "github.com/sanity-io/litter" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/internal/common/pulsarutils" ) @@ -17,12 +17,12 @@ func (a *App) Watch() error { defer a.Reader.Close() for a.Reader.HasNext() { - msg, err := a.Reader.Next(context.Background()) + msg, err := a.Reader.Next(armadacontext.Background()) if err != nil { log.Fatal(err) } - ctx := context.Background() + ctx := armadacontext.Background() msgId := pulsarutils.New(msg.ID().LedgerID(), msg.ID().EntryID(), msg.ID().PartitionIdx(), msg.ID().BatchIdx()) diff --git a/internal/scheduler/api.go b/internal/scheduler/api.go index 2e6f2779504..533abc4b728 100644 --- a/internal/scheduler/api.go +++ b/internal/scheduler/api.go @@ -8,10 +8,10 @@ import ( "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" "github.com/google/uuid" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/clock" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/logging" "github.com/armadaproject/armada/internal/common/pulsarutils" @@ -81,9 +81,7 @@ func (srv *ExecutorApi) LeaseJobRuns(stream executorapi.ExecutorApi_LeaseJobRuns return errors.WithStack(err) } - ctx := stream.Context() - log := ctxlogrus.Extract(ctx) - log = log.WithField("executor", req.ExecutorId) + ctx := armadacontext.WithLogField(armadacontext.FromGrpcCtx(stream.Context()), "executor", req.ExecutorId) executor := srv.executorFromLeaseRequest(ctx, req) if err := srv.executorRepository.StoreExecutor(ctx, executor); err != nil { @@ -105,7 +103,7 @@ func (srv *ExecutorApi) LeaseJobRuns(stream executorapi.ExecutorApi_LeaseJobRuns if err != nil { return err } - log.Infof( + ctx.Infof( "executor currently has %d job runs; sending %d cancellations and %d new runs", len(requestRuns), len(runsToCancel), len(newRuns), ) @@ -216,19 +214,19 @@ func setPriorityClassName(podSpec *armadaevents.PodSpecWithAvoidList, priorityCl } // ReportEvents publishes all events to Pulsar. The events are compacted for more efficient publishing. -func (srv *ExecutorApi) ReportEvents(ctx context.Context, list *executorapi.EventList) (*types.Empty, error) { +func (srv *ExecutorApi) ReportEvents(grpcCtx context.Context, list *executorapi.EventList) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) err := pulsarutils.CompactAndPublishSequences(ctx, list.Events, srv.producer, srv.maxPulsarMessageSizeBytes, schedulers.Pulsar) return &types.Empty{}, err } // executorFromLeaseRequest extracts a schedulerobjects.Executor from the request. -func (srv *ExecutorApi) executorFromLeaseRequest(ctx context.Context, req *executorapi.LeaseRequest) *schedulerobjects.Executor { - log := ctxlogrus.Extract(ctx) +func (srv *ExecutorApi) executorFromLeaseRequest(ctx *armadacontext.Context, req *executorapi.LeaseRequest) *schedulerobjects.Executor { nodes := make([]*schedulerobjects.Node, 0, len(req.Nodes)) now := srv.clock.Now().UTC() for _, nodeInfo := range req.Nodes { if node, err := api.NewNodeFromNodeInfo(nodeInfo, req.ExecutorId, srv.allowedPriorities, now); err != nil { - logging.WithStacktrace(log, err).Warnf( + logging.WithStacktrace(ctx, err).Warnf( "skipping node %s from executor %s", nodeInfo.GetName(), req.GetExecutorId(), ) } else { diff --git a/internal/scheduler/api_test.go b/internal/scheduler/api_test.go index 77a3c52f7a9..f388a5129d4 100644 --- a/internal/scheduler/api_test.go +++ b/internal/scheduler/api_test.go @@ -1,6 +1,7 @@ package scheduler import ( + "context" "testing" "time" @@ -10,10 +11,10 @@ import ( "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/net/context" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/clock" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/mocks" "github.com/armadaproject/armada/internal/common/pulsarutils" @@ -165,7 +166,7 @@ func TestExecutorApi_LeaseJobRuns(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) ctrl := gomock.NewController(t) mockPulsarProducer := mocks.NewMockProducer(ctrl) mockJobRepository := schedulermocks.NewMockJobRepository(ctrl) @@ -179,11 +180,11 @@ func TestExecutorApi_LeaseJobRuns(t *testing.T) { // set up mocks mockStream.EXPECT().Context().Return(ctx).AnyTimes() mockStream.EXPECT().Recv().Return(tc.request, nil).Times(1) - mockExecutorRepository.EXPECT().StoreExecutor(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, executor *schedulerobjects.Executor) error { + mockExecutorRepository.EXPECT().StoreExecutor(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx *armadacontext.Context, executor *schedulerobjects.Executor) error { assert.Equal(t, tc.expectedExecutor, executor) return nil }).Times(1) - mockLegacyExecutorRepository.EXPECT().StoreExecutor(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, executor *schedulerobjects.Executor) error { + mockLegacyExecutorRepository.EXPECT().StoreExecutor(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx *armadacontext.Context, executor *schedulerobjects.Executor) error { assert.Equal(t, tc.expectedExecutor, executor) return nil }).Times(1) @@ -304,7 +305,7 @@ func TestExecutorApi_Publish(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) ctrl := gomock.NewController(t) mockPulsarProducer := mocks.NewMockProducer(ctrl) mockJobRepository := schedulermocks.NewMockJobRepository(ctrl) diff --git a/internal/scheduler/common.go b/internal/scheduler/common.go index 4daceef1b77..55d22c3bac1 100644 --- a/internal/scheduler/common.go +++ b/internal/scheduler/common.go @@ -18,10 +18,15 @@ import ( // SchedulerResult is returned by Rescheduler.Schedule(). type SchedulerResult struct { + // Whether the scheduler failed to create a result for some reason + EmptyResult bool // Running jobs that should be preempted. PreemptedJobs []interfaces.LegacySchedulerJob // Queued jobs that should be scheduled. ScheduledJobs []interfaces.LegacySchedulerJob + // Queued jobs that could not be scheduled. + // This is used to fail jobs that could not schedule above `minimumGangCardinality`. + FailedJobs []interfaces.LegacySchedulerJob // For each preempted job, maps the job id to the id of the node on which the job was running. // For each scheduled job, maps the job id to the id of the node on which the job should be scheduled. NodeIdByJobId map[string]string @@ -30,9 +35,10 @@ type SchedulerResult struct { SchedulingContexts []*schedulercontext.SchedulingContext } -func NewSchedulerResult[S ~[]T, T interfaces.LegacySchedulerJob]( +func NewSchedulerResultForTest[S ~[]T, T interfaces.LegacySchedulerJob]( preemptedJobs S, scheduledJobs S, + failedJobs S, nodeIdByJobId map[string]string, ) *SchedulerResult { castPreemptedJobs := make([]interfaces.LegacySchedulerJob, len(preemptedJobs)) @@ -43,10 +49,15 @@ func NewSchedulerResult[S ~[]T, T interfaces.LegacySchedulerJob]( for i, job := range scheduledJobs { castScheduledJobs[i] = job } + castFailedJobs := make([]interfaces.LegacySchedulerJob, len(failedJobs)) + for i, job := range failedJobs { + castFailedJobs[i] = job + } return &SchedulerResult{ PreemptedJobs: castPreemptedJobs, ScheduledJobs: castScheduledJobs, NodeIdByJobId: nodeIdByJobId, + FailedJobs: castFailedJobs, } } @@ -70,6 +81,16 @@ func ScheduledJobsFromSchedulerResult[T interfaces.LegacySchedulerJob](sr *Sched return rv } +// FailedJobsFromScheduleResult returns the slice of scheduled jobs in the result, +// cast to type T. +func FailedJobsFromSchedulerResult[T interfaces.LegacySchedulerJob](sr *SchedulerResult) []T { + rv := make([]T, len(sr.FailedJobs)) + for i, job := range sr.FailedJobs { + rv[i] = job.(T) + } + return rv +} + // JobsSummary returns a string giving an overview of the provided jobs meant for logging. // For example: "affected queues [A, B]; resources {A: {cpu: 1}, B: {cpu: 2}}; jobs [jobAId, jobBId]". func JobsSummary(jobs []interfaces.LegacySchedulerJob) string { @@ -122,30 +143,46 @@ func targetNodeIdFromNodeSelector(nodeSelector map[string]string) (string, bool) return nodeId, ok } -// GangIdAndCardinalityFromLegacySchedulerJob returns a tuple (gangId, gangCardinality, isGangJob, error). -func GangIdAndCardinalityFromLegacySchedulerJob(job interfaces.LegacySchedulerJob) (string, int, bool, error) { +// GangIdAndCardinalityFromLegacySchedulerJob returns a tuple (gangId, gangCardinality, gangMinimumCardinality, isGangJob, error). +func GangIdAndCardinalityFromLegacySchedulerJob(job interfaces.LegacySchedulerJob) (string, int, int, bool, error) { return GangIdAndCardinalityFromAnnotations(job.GetAnnotations()) } -// GangIdAndCardinalityFromAnnotations returns a tuple (gangId, gangCardinality, isGangJob, error). -func GangIdAndCardinalityFromAnnotations(annotations map[string]string) (string, int, bool, error) { +// GangIdAndCardinalityFromAnnotations returns a tuple (gangId, gangCardinality, gangMinimumCardinality, isGangJob, error). +func GangIdAndCardinalityFromAnnotations(annotations map[string]string) (string, int, int, bool, error) { if annotations == nil { - return "", 0, false, nil + return "", 1, 1, false, nil } gangId, ok := annotations[configuration.GangIdAnnotation] if !ok { - return "", 0, false, nil + return "", 1, 1, false, nil } gangCardinalityString, ok := annotations[configuration.GangCardinalityAnnotation] if !ok { - return "", 0, false, errors.Errorf("missing annotation %s", configuration.GangCardinalityAnnotation) + return "", 1, 1, false, errors.Errorf("missing annotation %s", configuration.GangCardinalityAnnotation) } gangCardinality, err := strconv.Atoi(gangCardinalityString) if err != nil { - return "", 0, false, errors.WithStack(err) + return "", 1, 1, false, errors.WithStack(err) } if gangCardinality <= 0 { - return "", 0, false, errors.Errorf("gang cardinality is non-positive %d", gangCardinality) + return "", 1, 1, false, errors.Errorf("gang cardinality is non-positive %d", gangCardinality) + } + gangMinimumCardinalityString, ok := annotations[configuration.GangMinimumCardinalityAnnotation] + if !ok { + // If this is not set, default the minimum gang size to gangCardinality + return gangId, gangCardinality, gangCardinality, true, nil + } else { + gangMinimumCardinality, err := strconv.Atoi(gangMinimumCardinalityString) + if err != nil { + return "", 1, 1, false, errors.WithStack(err) + } + if gangMinimumCardinality <= 0 { + return "", 1, 1, false, errors.Errorf("gang minimum cardinality is non-positive %d", gangMinimumCardinality) + } + if gangMinimumCardinality > gangCardinality { + return "", 1, 1, false, errors.Errorf("gang minimum cardinality %d cannot be greater than gang cardinality %d", gangMinimumCardinality, gangCardinality) + } + return gangId, gangCardinality, gangMinimumCardinality, true, nil } - return gangId, gangCardinality, true, nil } diff --git a/internal/scheduler/constraints/constraints.go b/internal/scheduler/constraints/constraints.go index 477b121b2e4..b859d71bca1 100644 --- a/internal/scheduler/constraints/constraints.go +++ b/internal/scheduler/constraints/constraints.go @@ -1,6 +1,7 @@ package constraints import ( + "fmt" "math" "github.com/pkg/errors" @@ -12,32 +13,46 @@ import ( ) const ( - UnschedulableReasonMaximumResourcesScheduled = "maximum resources scheduled" - UnschedulableReasonMaximumNumberOfJobsScheduled = "maximum number of jobs scheduled" - UnschedulableReasonMaximumNumberOfGangsScheduled = "maximum number of gangs scheduled" - UnschedulableReasonMaximumResourcesPerQueueExceeded = "maximum total resources for this queue exceeded" + // Indicates that the limit on resources scheduled per round has been exceeded. + MaximumResourcesScheduledUnschedulableReason = "maximum resources scheduled" + + // Indicates that a queue has been assigned more than its allowed amount of resources. + MaximumResourcesPerQueueExceededUnschedulableReason = "maximum total resources for this queue exceeded" + + // Indicates that the scheduling rate limit has been exceeded. + GlobalRateLimitExceededUnschedulableReason = "global scheduling rate limit exceeded" + QueueRateLimitExceededUnschedulableReason = "queue scheduling rate limit exceeded" + + // Indicates that scheduling a gang would exceed the rate limit. + GlobalRateLimitExceededByGangUnschedulableReason = "gang would exceed global scheduling rate limit" + QueueRateLimitExceededByGangUnschedulableReason = "gang would exceed queue scheduling rate limit" + + // Indicates that the number of jobs in a gang exceeds the burst size. + // This means the gang can not be scheduled without first increasing the burst size. + GangExceedsGlobalBurstSizeUnschedulableReason = "gang cardinality too large: exceeds global max burst size" + GangExceedsQueueBurstSizeUnschedulableReason = "gang cardinality too large: exceeds queue max burst size" ) -// IsTerminalUnschedulableReason returns true if reason indicates it's not possible to schedule any more jobs in this round. +// IsTerminalUnschedulableReason returns true if reason indicates +// it's not possible to schedule any more jobs in this round. func IsTerminalUnschedulableReason(reason string) bool { - if reason == UnschedulableReasonMaximumResourcesScheduled { + if reason == MaximumResourcesScheduledUnschedulableReason { return true } - if reason == UnschedulableReasonMaximumNumberOfJobsScheduled { - return true - } - if reason == UnschedulableReasonMaximumNumberOfGangsScheduled { + if reason == GlobalRateLimitExceededUnschedulableReason { return true } return false } +// IsTerminalQueueUnschedulableReason returns true if reason indicates +// it's not possible to schedule any more jobs from this queue in this round. +func IsTerminalQueueUnschedulableReason(reason string) bool { + return reason == QueueRateLimitExceededUnschedulableReason +} + // SchedulingConstraints contains scheduling constraints, e.g., per-queue resource limits. type SchedulingConstraints struct { - // Max number of jobs to scheduler per lease jobs call. - MaximumJobsToSchedule uint - // Max number of jobs to scheduler per lease jobs call. - MaximumGangsToSchedule uint // Max number of jobs to consider for a queue before giving up. MaxQueueLookback uint // Jobs leased to this executor must be at least this large. @@ -82,8 +97,6 @@ func SchedulingConstraintsFromSchedulingConfig( maximumResourceFractionToSchedule = m } return SchedulingConstraints{ - MaximumJobsToSchedule: config.MaximumJobsToSchedule, - MaximumGangsToSchedule: config.MaximumGangsToSchedule, MaxQueueLookback: config.MaxQueueLookback, MinimumJobSize: minimumJobSize, MaximumResourcesToSchedule: absoluteFromRelativeLimits(totalResources, maximumResourceFractionToSchedule), @@ -99,47 +112,75 @@ func absoluteFromRelativeLimits(totalResources schedulerobjects.ResourceList, re return absoluteLimits } -func (constraints *SchedulingConstraints) CheckRoundConstraints(sctx *schedulercontext.SchedulingContext) (bool, string, error) { - // MaximumJobsToSchedule check. - if constraints.MaximumJobsToSchedule != 0 && sctx.NumScheduledJobs == int(constraints.MaximumJobsToSchedule) { - return false, UnschedulableReasonMaximumNumberOfJobsScheduled, nil - } - - // MaximumGangsToSchedule check. - if constraints.MaximumGangsToSchedule != 0 && sctx.NumScheduledGangs == int(constraints.MaximumGangsToSchedule) { - return false, UnschedulableReasonMaximumNumberOfGangsScheduled, nil - } +// ScaleQuantity scales q in-place by a factor f. +// This functions overflows for quantities the milli value of which can't be expressed as an int64. +// E.g., 1Pi is ok, but not 10Pi. +func ScaleQuantity(q resource.Quantity, f float64) resource.Quantity { + q.SetMilli(int64(math.Round(float64(q.MilliValue()) * f))) + return q +} +func (constraints *SchedulingConstraints) CheckRoundConstraints(sctx *schedulercontext.SchedulingContext, queue string) (bool, string, error) { // MaximumResourcesToSchedule check. if !sctx.ScheduledResources.IsStrictlyLessOrEqual(constraints.MaximumResourcesToSchedule) { - return false, UnschedulableReasonMaximumResourcesScheduled, nil + return false, MaximumResourcesScheduledUnschedulableReason, nil } return true, "", nil } -func (constraints *SchedulingConstraints) CheckPerQueueAndPriorityClassConstraints( +func (constraints *SchedulingConstraints) CheckConstraints( sctx *schedulercontext.SchedulingContext, - queue string, - priorityClassName string, + gctx *schedulercontext.GangSchedulingContext, ) (bool, string, error) { - qctx := sctx.QueueSchedulingContexts[queue] + qctx := sctx.QueueSchedulingContexts[gctx.Queue] if qctx == nil { - return false, "", errors.Errorf("no QueueSchedulingContext for queue %s", queue) + return false, "", errors.Errorf("no QueueSchedulingContext for queue %s", gctx.Queue) + } + + // Check that the job is large enough for this executor. + if ok, unschedulableReason := RequestsAreLargeEnough(gctx.TotalResourceRequests, constraints.MinimumJobSize); !ok { + return false, unschedulableReason, nil + } + + // Global rate limiter check. + tokens := sctx.Limiter.TokensAt(sctx.Started) + if tokens <= 0 { + return false, GlobalRateLimitExceededUnschedulableReason, nil + } + if sctx.Limiter.Burst() < gctx.Cardinality() { + return false, GangExceedsGlobalBurstSizeUnschedulableReason, nil + } + if tokens < float64(gctx.Cardinality()) { + return false, GlobalRateLimitExceededByGangUnschedulableReason, nil + } + + // Per-queue rate limiter check. + tokens = qctx.Limiter.TokensAt(sctx.Started) + if tokens <= 0 { + return false, QueueRateLimitExceededUnschedulableReason, nil + } + if qctx.Limiter.Burst() < gctx.Cardinality() { + return false, GangExceedsQueueBurstSizeUnschedulableReason, nil + } + if tokens < float64(gctx.Cardinality()) { + return false, QueueRateLimitExceededByGangUnschedulableReason, nil } // PriorityClassSchedulingConstraintsByPriorityClassName check. - if priorityClassConstraint, ok := constraints.PriorityClassSchedulingConstraintsByPriorityClassName[priorityClassName]; ok { - if !qctx.AllocatedByPriorityClass[priorityClassName].IsStrictlyLessOrEqual(priorityClassConstraint.MaximumResourcesPerQueue) { - return false, UnschedulableReasonMaximumResourcesPerQueueExceeded, nil + if priorityClassConstraint, ok := constraints.PriorityClassSchedulingConstraintsByPriorityClassName[gctx.PriorityClassName]; ok { + if !qctx.AllocatedByPriorityClass[gctx.PriorityClassName].IsStrictlyLessOrEqual(priorityClassConstraint.MaximumResourcesPerQueue) { + return false, MaximumResourcesPerQueueExceededUnschedulableReason, nil } } return true, "", nil } -// ScaleQuantity scales q in-place by a factor f. -// This functions overflows for quantities the milli value of which can't be expressed as an int64. -// E.g., 1Pi is ok, but not 10Pi. -func ScaleQuantity(q resource.Quantity, f float64) resource.Quantity { - q.SetMilli(int64(math.Round(float64(q.MilliValue()) * f))) - return q +func RequestsAreLargeEnough(totalResourceRequests, minRequest schedulerobjects.ResourceList) (bool, string) { + for t, minQuantity := range minRequest.Resources { + q := totalResourceRequests.Get(t) + if minQuantity.Cmp(q) == 1 { + return false, fmt.Sprintf("job requests %s %s, but the minimum is %s", q.String(), t, minQuantity.String()) + } + } + return true, "" } diff --git a/internal/scheduler/constraints/constraints_test.go b/internal/scheduler/constraints/constraints_test.go index e387bf60ea7..081058191dc 100644 --- a/internal/scheduler/constraints/constraints_test.go +++ b/internal/scheduler/constraints/constraints_test.go @@ -21,12 +21,12 @@ func TestConstraints(t *testing.T) { }{} // TODO: Add tests. for name, tc := range tests { t.Run(name, func(t *testing.T) { - ok, unschedulableReason, err := tc.constraints.CheckRoundConstraints(tc.sctx) + ok, unschedulableReason, err := tc.constraints.CheckRoundConstraints(tc.sctx, tc.queue) require.NoError(t, err) require.Equal(t, tc.globalUnschedulableReason == "", ok) require.Equal(t, tc.globalUnschedulableReason, unschedulableReason) - ok, unschedulableReason, err = tc.constraints.CheckPerQueueAndPriorityClassConstraints(tc.sctx, tc.queue, tc.priorityClassName) + ok, unschedulableReason, err = tc.constraints.CheckConstraints(tc.sctx, nil) require.NoError(t, err) require.Equal(t, tc.perQueueAndPriorityClassUnschedulableReason == "", ok) require.Equal(t, tc.perQueueAndPriorityClassUnschedulableReason, unschedulableReason) diff --git a/internal/scheduler/context/context.go b/internal/scheduler/context/context.go index 30f2c87e6ec..8a52b497735 100644 --- a/internal/scheduler/context/context.go +++ b/internal/scheduler/context/context.go @@ -10,6 +10,7 @@ import ( "github.com/pkg/errors" "golang.org/x/exp/maps" "golang.org/x/exp/slices" + "golang.org/x/time/rate" "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadaerrors" @@ -38,6 +39,9 @@ type SchedulingContext struct { DefaultPriorityClass string // Determines how fairness is computed. FairnessCostProvider fairness.FairnessCostProvider + // Limits job scheduling rate globally across all queues. + // Use the "Started" time to ensure limiter state remains constant within each scheduling round. + Limiter *rate.Limiter // Sum of queue weights across all queues. WeightSum float64 // Per-queue scheduling contexts. @@ -73,6 +77,7 @@ func NewSchedulingContext( priorityClasses map[string]types.PriorityClass, defaultPriorityClass string, fairnessCostProvider fairness.FairnessCostProvider, + limiter *rate.Limiter, totalResources schedulerobjects.ResourceList, ) *SchedulingContext { return &SchedulingContext{ @@ -82,6 +87,7 @@ func NewSchedulingContext( PriorityClasses: priorityClasses, DefaultPriorityClass: defaultPriorityClass, FairnessCostProvider: fairnessCostProvider, + Limiter: limiter, QueueSchedulingContexts: make(map[string]*QueueSchedulingContext), TotalResources: totalResources.DeepCopy(), ScheduledResources: schedulerobjects.NewResourceListWithDefaultSize(), @@ -110,7 +116,11 @@ func (sctx *SchedulingContext) ClearUnfeasibleSchedulingKeys() { sctx.UnfeasibleSchedulingKeys = make(map[schedulerobjects.SchedulingKey]*JobSchedulingContext) } -func (sctx *SchedulingContext) AddQueueSchedulingContext(queue string, weight float64, initialAllocatedByPriorityClass schedulerobjects.QuantityByTAndResourceType[string]) error { +func (sctx *SchedulingContext) AddQueueSchedulingContext( + queue string, weight float64, + initialAllocatedByPriorityClass schedulerobjects.QuantityByTAndResourceType[string], + limiter *rate.Limiter, +) error { if _, ok := sctx.QueueSchedulingContexts[queue]; ok { return errors.WithStack(&armadaerrors.ErrInvalidArgument{ Name: "queue", @@ -134,6 +144,7 @@ func (sctx *SchedulingContext) AddQueueSchedulingContext(queue string, weight fl ExecutorId: sctx.ExecutorId, Queue: queue, Weight: weight, + Limiter: limiter, Allocated: allocated, AllocatedByPriorityClass: initialAllocatedByPriorityClass, ScheduledResourcesByPriorityClass: make(schedulerobjects.QuantityByTAndResourceType[string]), @@ -215,15 +226,20 @@ func (sctx *SchedulingContext) ReportString(verbosity int32) string { func (sctx *SchedulingContext) AddGangSchedulingContext(gctx *GangSchedulingContext) (bool, error) { allJobsEvictedInThisRound := true allJobsSuccessful := true + numberOfSuccessfulJobs := 0 for _, jctx := range gctx.JobSchedulingContexts { evictedInThisRound, err := sctx.AddJobSchedulingContext(jctx) if err != nil { return false, err } allJobsEvictedInThisRound = allJobsEvictedInThisRound && evictedInThisRound - allJobsSuccessful = allJobsSuccessful && jctx.IsSuccessful() + isSuccess := jctx.IsSuccessful() + allJobsSuccessful = allJobsSuccessful && isSuccess + if isSuccess { + numberOfSuccessfulJobs++ + } } - if allJobsSuccessful && !allJobsEvictedInThisRound { + if numberOfSuccessfulJobs >= gctx.GangMinCardinality && !allJobsEvictedInThisRound { sctx.NumScheduledGangs++ } return allJobsEvictedInThisRound, nil @@ -335,6 +351,9 @@ type QueueSchedulingContext struct { Queue string // Determines the fair share of this queue relative to other queues. Weight float64 + // Limits job scheduling rate for this queue. + // Use the "Started" time to ensure limiter state remains constant within each scheduling round. + Limiter *rate.Limiter // Total resources assigned to the queue across all clusters by priority class priority. // Includes jobs scheduled during this invocation of the scheduler. Allocated schedulerobjects.ResourceList @@ -444,15 +463,6 @@ func (qctx *QueueSchedulingContext) ReportString(verbosity int32) string { return sb.String() } -func (qctx *QueueSchedulingContext) AddGangSchedulingContext(gctx *GangSchedulingContext) error { - for _, jctx := range gctx.JobSchedulingContexts { - if _, err := qctx.AddJobSchedulingContext(jctx); err != nil { - return err - } - } - return nil -} - // AddJobSchedulingContext adds a job scheduling context. // Automatically updates scheduled resources. func (qctx *QueueSchedulingContext) AddJobSchedulingContext(jctx *JobSchedulingContext) (bool, error) { @@ -528,6 +538,7 @@ type GangSchedulingContext struct { TotalResourceRequests schedulerobjects.ResourceList AllJobsEvicted bool NodeUniformityLabel string + GangMinCardinality int } func NewGangSchedulingContext(jctxs []*JobSchedulingContext) *GangSchedulingContext { @@ -536,12 +547,14 @@ func NewGangSchedulingContext(jctxs []*JobSchedulingContext) *GangSchedulingCont queue := "" priorityClassName := "" nodeUniformityLabel := "" + gangMinCardinality := 1 if len(jctxs) > 0 { queue = jctxs[0].Job.GetQueue() priorityClassName = jctxs[0].Job.GetPriorityClassName() if jctxs[0].PodRequirements != nil { nodeUniformityLabel = jctxs[0].PodRequirements.Annotations[configuration.GangNodeUniformityLabelAnnotation] } + gangMinCardinality = jctxs[0].GangMinCardinality } allJobsEvicted := true totalResourceRequests := schedulerobjects.NewResourceList(4) @@ -557,9 +570,15 @@ func NewGangSchedulingContext(jctxs []*JobSchedulingContext) *GangSchedulingCont TotalResourceRequests: totalResourceRequests, AllJobsEvicted: allJobsEvicted, NodeUniformityLabel: nodeUniformityLabel, + GangMinCardinality: gangMinCardinality, } } +// Cardinality returns the number of jobs in the gang. +func (gctx *GangSchedulingContext) Cardinality() int { + return len(gctx.JobSchedulingContexts) +} + func isEvictedJob(job interfaces.LegacySchedulerJob) bool { return job.GetAnnotations()[schedulerconfig.IsEvictedAnnotation] == "true" } @@ -581,6 +600,10 @@ type JobSchedulingContext struct { UnschedulableReason string // Pod scheduling contexts for the individual pods that make up the job. PodSchedulingContext *PodSchedulingContext + // The minimum size of the gang associated with this job. + GangMinCardinality int + // If set, indicates this job should be failed back to the client when the gang is scheduled. + ShouldFail bool } func (jctx *JobSchedulingContext) String() string { @@ -596,6 +619,7 @@ func (jctx *JobSchedulingContext) String() string { if jctx.PodSchedulingContext != nil { fmt.Fprint(w, jctx.PodSchedulingContext.String()) } + fmt.Fprintf(w, "GangMinCardinality:\t%d\n", jctx.GangMinCardinality) w.Flush() return sb.String() } @@ -604,15 +628,25 @@ func (jctx *JobSchedulingContext) IsSuccessful() bool { return jctx.UnschedulableReason == "" } -func JobSchedulingContextsFromJobs[J interfaces.LegacySchedulerJob](priorityClasses map[string]types.PriorityClass, jobs []J) []*JobSchedulingContext { +func JobSchedulingContextsFromJobs[J interfaces.LegacySchedulerJob](priorityClasses map[string]types.PriorityClass, jobs []J, extractGangInfo func(map[string]string) (string, int, int, bool, error)) []*JobSchedulingContext { jctxs := make([]*JobSchedulingContext, len(jobs)) timestamp := time.Now() + for i, job := range jobs { + // TODO: Move min cardinality to gang context only and remove from here. + // Requires re-phrasing nodedb in terms of gang context, as well as feeding the value extracted from the annotations downstream. + _, _, gangMinCardinality, _, err := extractGangInfo(job.GetAnnotations()) + if err != nil { + gangMinCardinality = 1 + } + jctxs[i] = &JobSchedulingContext{ - Created: timestamp, - JobId: job.GetId(), - Job: job, - PodRequirements: job.GetPodRequirements(priorityClasses), + Created: timestamp, + JobId: job.GetId(), + Job: job, + PodRequirements: job.GetPodRequirements(priorityClasses), + GangMinCardinality: gangMinCardinality, + ShouldFail: false, } } return jctxs diff --git a/internal/scheduler/context/context_test.go b/internal/scheduler/context/context_test.go index 3b932f30540..6fe905bfeaf 100644 --- a/internal/scheduler/context/context_test.go +++ b/internal/scheduler/context/context_test.go @@ -43,6 +43,7 @@ func TestSchedulingContextAccounting(t *testing.T) { testfixtures.TestPriorityClasses, testfixtures.TestDefaultPriorityClass, fairnessCostProvider, + nil, totalResources, ) priorityFactorByQueue := map[string]float64{"A": 1, "B": 1} @@ -52,7 +53,7 @@ func TestSchedulingContextAccounting(t *testing.T) { }, } for _, queue := range []string{"A", "B"} { - err := sctx.AddQueueSchedulingContext(queue, priorityFactorByQueue[queue], allocatedByQueueAndPriorityClass[queue]) + err := sctx.AddQueueSchedulingContext(queue, priorityFactorByQueue[queue], allocatedByQueueAndPriorityClass[queue], nil) require.NoError(t, err) } @@ -88,8 +89,9 @@ func testNSmallCpuJobSchedulingContext(queue, priorityClassName string, n int) [ func testSmallCpuJobSchedulingContext(queue, priorityClassName string) *JobSchedulingContext { job := testfixtures.Test1Cpu4GiJob(queue, priorityClassName) return &JobSchedulingContext{ - JobId: job.GetId(), - Job: job, - PodRequirements: job.GetPodRequirements(testfixtures.TestPriorityClasses), + JobId: job.GetId(), + Job: job, + PodRequirements: job.GetPodRequirements(testfixtures.TestPriorityClasses), + GangMinCardinality: 1, } } diff --git a/internal/scheduler/database/db.go b/internal/scheduler/database/db.go index 5af3de156f4..8f9fc5e6de2 100644 --- a/internal/scheduler/database/db.go +++ b/internal/scheduler/database/db.go @@ -7,8 +7,8 @@ package database import ( "context" - "github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" ) type DBTX interface { diff --git a/internal/scheduler/database/db_pruner.go b/internal/scheduler/database/db_pruner.go index 728c3c9b71b..8da7dd7935d 100644 --- a/internal/scheduler/database/db_pruner.go +++ b/internal/scheduler/database/db_pruner.go @@ -1,13 +1,13 @@ package database import ( - ctx "context" "time" "github.com/jackc/pgx/v5" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/clock" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) // PruneDb removes completed jobs (and related runs and errors) from the database if their `lastUpdateTime` @@ -15,7 +15,7 @@ import ( // Jobs are deleted in batches across transactions. This means that if this job fails midway through, it still // may have deleted some jobs. // The function will run until the supplied context is cancelled. -func PruneDb(ctx ctx.Context, db *pgx.Conn, batchLimit int, keepAfterCompletion time.Duration, clock clock.Clock) error { +func PruneDb(ctx *armadacontext.Context, db *pgx.Conn, batchLimit int, keepAfterCompletion time.Duration, clock clock.Clock) error { start := time.Now() cutOffTime := clock.Now().Add(-keepAfterCompletion) @@ -28,7 +28,7 @@ func PruneDb(ctx ctx.Context, db *pgx.Conn, batchLimit int, keepAfterCompletion // Insert the ids of all jobs we want to delete into a tmp table _, err = db.Exec(ctx, `CREATE TEMP TABLE rows_to_delete AS ( - SELECT job_id FROM jobs + SELECT job_id FROM jobs WHERE last_modified < $1 AND (succeeded = TRUE OR failed = TRUE OR cancelled = TRUE))`, cutOffTime) if err != nil { @@ -40,11 +40,11 @@ func PruneDb(ctx ctx.Context, db *pgx.Conn, batchLimit int, keepAfterCompletion return errors.WithStack(err) } if totalJobsToDelete == 0 { - log.Infof("Found no jobs to be deleted. Exiting") + ctx.Infof("Found no jobs to be deleted. Exiting") return nil } - log.Infof("Found %d jobs to be deleted", totalJobsToDelete) + ctx.Infof("Found %d jobs to be deleted", totalJobsToDelete) // create temp table to hold a batch of results _, err = db.Exec(ctx, "CREATE TEMP TABLE batch (job_id TEXT);") @@ -93,9 +93,10 @@ func PruneDb(ctx ctx.Context, db *pgx.Conn, batchLimit int, keepAfterCompletion taken := time.Now().Sub(batchStart) jobsDeleted += batchSize - log.Infof("Deleted %d jobs in %s. Deleted %d jobs out of %d", batchSize, taken, jobsDeleted, totalJobsToDelete) + ctx. + Infof("Deleted %d jobs in %s. Deleted %d jobs out of %d", batchSize, taken, jobsDeleted, totalJobsToDelete) } taken := time.Now().Sub(start) - log.Infof("Deleted %d jobs in %s", jobsDeleted, taken) + ctx.Infof("Deleted %d jobs in %s", jobsDeleted, taken) return nil } diff --git a/internal/scheduler/database/db_pruner_test.go b/internal/scheduler/database/db_pruner_test.go index bd1165ed2d3..1a30c200463 100644 --- a/internal/scheduler/database/db_pruner_test.go +++ b/internal/scheduler/database/db_pruner_test.go @@ -1,7 +1,6 @@ package database import ( - "context" "fmt" "testing" "time" @@ -12,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/util/clock" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" commonutil "github.com/armadaproject/armada/internal/common/util" ) @@ -108,7 +108,7 @@ func TestPruneDb_RemoveJobs(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { err := WithTestDb(func(_ *Queries, db *pgxpool.Pool) error { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 10*time.Second) defer cancel() testClock := clock.NewFakeClock(baseTime) @@ -186,7 +186,7 @@ func TestPruneDb_RemoveMarkers(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { err := WithTestDb(func(_ *Queries, db *pgxpool.Pool) error { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 10*time.Second) defer cancel() testClock := clock.NewFakeClock(baseTime) @@ -220,7 +220,7 @@ func TestPruneDb_RemoveMarkers(t *testing.T) { // Removes the triggers that auto-set serial and last_update_time as // we need to manipulate these as part of the test -func removeTriggers(ctx context.Context, db *pgxpool.Pool) error { +func removeTriggers(ctx *armadacontext.Context, db *pgxpool.Pool) error { triggers := map[string]string{ "jobs": "next_serial_on_insert_jobs", "runs": "next_serial_on_insert_runs", diff --git a/internal/scheduler/database/executor_repository.go b/internal/scheduler/database/executor_repository.go index c2da2442e54..ec50db20126 100644 --- a/internal/scheduler/database/executor_repository.go +++ b/internal/scheduler/database/executor_repository.go @@ -1,13 +1,13 @@ package database import ( - "context" "time" "github.com/gogo/protobuf/proto" "github.com/jackc/pgx/v5/pgxpool" "github.com/pkg/errors" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -15,11 +15,11 @@ import ( // ExecutorRepository is an interface to be implemented by structs which provide executor information type ExecutorRepository interface { // GetExecutors returns all known executors, regardless of their last heartbeat time - GetExecutors(ctx context.Context) ([]*schedulerobjects.Executor, error) + GetExecutors(ctx *armadacontext.Context) ([]*schedulerobjects.Executor, error) // GetLastUpdateTimes returns a map of executor name -> last heartbeat time - GetLastUpdateTimes(ctx context.Context) (map[string]time.Time, error) + GetLastUpdateTimes(ctx *armadacontext.Context) (map[string]time.Time, error) // StoreExecutor persists the latest executor state - StoreExecutor(ctx context.Context, executor *schedulerobjects.Executor) error + StoreExecutor(ctx *armadacontext.Context, executor *schedulerobjects.Executor) error } // PostgresExecutorRepository is an implementation of ExecutorRepository that stores its state in postgres @@ -40,7 +40,7 @@ func NewPostgresExecutorRepository(db *pgxpool.Pool) *PostgresExecutorRepository } // GetExecutors returns all known executors, regardless of their last heartbeat time -func (r *PostgresExecutorRepository) GetExecutors(ctx context.Context) ([]*schedulerobjects.Executor, error) { +func (r *PostgresExecutorRepository) GetExecutors(ctx *armadacontext.Context) ([]*schedulerobjects.Executor, error) { queries := New(r.db) requests, err := queries.SelectAllExecutors(ctx) if err != nil { @@ -59,7 +59,7 @@ func (r *PostgresExecutorRepository) GetExecutors(ctx context.Context) ([]*sched } // GetLastUpdateTimes returns a map of executor name -> last heartbeat time -func (r *PostgresExecutorRepository) GetLastUpdateTimes(ctx context.Context) (map[string]time.Time, error) { +func (r *PostgresExecutorRepository) GetLastUpdateTimes(ctx *armadacontext.Context) (map[string]time.Time, error) { queries := New(r.db) rows, err := queries.SelectExecutorUpdateTimes(ctx) if err != nil { @@ -74,7 +74,7 @@ func (r *PostgresExecutorRepository) GetLastUpdateTimes(ctx context.Context) (ma } // StoreExecutor persists the latest executor state -func (r *PostgresExecutorRepository) StoreExecutor(ctx context.Context, executor *schedulerobjects.Executor) error { +func (r *PostgresExecutorRepository) StoreExecutor(ctx *armadacontext.Context, executor *schedulerobjects.Executor) error { queries := New(r.db) bytes, err := proto.Marshal(executor) if err != nil { diff --git a/internal/scheduler/database/executor_repository_test.go b/internal/scheduler/database/executor_repository_test.go index 2d7bd206512..76a0e14c9f9 100644 --- a/internal/scheduler/database/executor_repository_test.go +++ b/internal/scheduler/database/executor_repository_test.go @@ -1,7 +1,6 @@ package database import ( - "context" "testing" "time" @@ -10,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/exp/slices" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -53,7 +53,7 @@ func TestExecutorRepository_LoadAndSave(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { err := withExecutorRepository(func(repo *PostgresExecutorRepository) error { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() for _, executor := range tc.executors { err := repo.StoreExecutor(ctx, executor) @@ -106,7 +106,7 @@ func TestExecutorRepository_GetLastUpdateTimes(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { err := withExecutorRepository(func(repo *PostgresExecutorRepository) error { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() for _, executor := range tc.executors { err := repo.StoreExecutor(ctx, executor) diff --git a/internal/scheduler/database/job_repository.go b/internal/scheduler/database/job_repository.go index c4eaf606099..ebc08d03230 100644 --- a/internal/scheduler/database/job_repository.go +++ b/internal/scheduler/database/job_repository.go @@ -1,7 +1,6 @@ package database import ( - "context" "fmt" "github.com/google/uuid" @@ -9,6 +8,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/pkg/errors" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database" protoutil "github.com/armadaproject/armada/internal/common/proto" @@ -35,24 +35,24 @@ type JobRunLease struct { type JobRepository interface { // FetchJobUpdates returns all jobs and job dbRuns that have been updated after jobSerial and jobRunSerial respectively // These updates are guaranteed to be consistent with each other - FetchJobUpdates(ctx context.Context, jobSerial int64, jobRunSerial int64) ([]Job, []Run, error) + FetchJobUpdates(ctx *armadacontext.Context, jobSerial int64, jobRunSerial int64) ([]Job, []Run, error) // FetchJobRunErrors returns all armadaevents.JobRunErrors for the provided job run ids. The returned map is // keyed by job run id. Any dbRuns which don't have errors wil be absent from the map. - FetchJobRunErrors(ctx context.Context, runIds []uuid.UUID) (map[uuid.UUID]*armadaevents.Error, error) + FetchJobRunErrors(ctx *armadacontext.Context, runIds []uuid.UUID) (map[uuid.UUID]*armadaevents.Error, error) // CountReceivedPartitions returns a count of the number of partition messages present in the database corresponding // to the provided groupId. This is used by the scheduler to determine if the database represents the state of // pulsar after a given point in time. - CountReceivedPartitions(ctx context.Context, groupId uuid.UUID) (uint32, error) + CountReceivedPartitions(ctx *armadacontext.Context, groupId uuid.UUID) (uint32, error) // FindInactiveRuns returns a slice containing all dbRuns that the scheduler does not currently consider active // Runs are inactive if they don't exist or if they have succeeded, failed or been cancelled - FindInactiveRuns(ctx context.Context, runIds []uuid.UUID) ([]uuid.UUID, error) + FindInactiveRuns(ctx *armadacontext.Context, runIds []uuid.UUID) ([]uuid.UUID, error) // FetchJobRunLeases fetches new job runs for a given executor. A maximum of maxResults rows will be returned, while run // in excludedRunIds will be excluded - FetchJobRunLeases(ctx context.Context, executor string, maxResults uint, excludedRunIds []uuid.UUID) ([]*JobRunLease, error) + FetchJobRunLeases(ctx *armadacontext.Context, executor string, maxResults uint, excludedRunIds []uuid.UUID) ([]*JobRunLease, error) } // PostgresJobRepository is an implementation of JobRepository that stores its state in postgres @@ -72,7 +72,7 @@ func NewPostgresJobRepository(db *pgxpool.Pool, batchSize int32) *PostgresJobRep // FetchJobRunErrors returns all armadaevents.JobRunErrors for the provided job run ids. The returned map is // keyed by job run id. Any dbRuns which don't have errors wil be absent from the map. -func (r *PostgresJobRepository) FetchJobRunErrors(ctx context.Context, runIds []uuid.UUID) (map[uuid.UUID]*armadaevents.Error, error) { +func (r *PostgresJobRepository) FetchJobRunErrors(ctx *armadacontext.Context, runIds []uuid.UUID) (map[uuid.UUID]*armadaevents.Error, error) { if len(runIds) == 0 { return map[uuid.UUID]*armadaevents.Error{}, nil } @@ -125,7 +125,7 @@ func (r *PostgresJobRepository) FetchJobRunErrors(ctx context.Context, runIds [] // FetchJobUpdates returns all jobs and job dbRuns that have been updated after jobSerial and jobRunSerial respectively // These updates are guaranteed to be consistent with each other -func (r *PostgresJobRepository) FetchJobUpdates(ctx context.Context, jobSerial int64, jobRunSerial int64) ([]Job, []Run, error) { +func (r *PostgresJobRepository) FetchJobUpdates(ctx *armadacontext.Context, jobSerial int64, jobRunSerial int64) ([]Job, []Run, error) { var updatedJobs []Job = nil var updatedRuns []Run = nil @@ -180,7 +180,7 @@ func (r *PostgresJobRepository) FetchJobUpdates(ctx context.Context, jobSerial i // FindInactiveRuns returns a slice containing all dbRuns that the scheduler does not currently consider active // Runs are inactive if they don't exist or if they have succeeded, failed or been cancelled -func (r *PostgresJobRepository) FindInactiveRuns(ctx context.Context, runIds []uuid.UUID) ([]uuid.UUID, error) { +func (r *PostgresJobRepository) FindInactiveRuns(ctx *armadacontext.Context, runIds []uuid.UUID) ([]uuid.UUID, error) { var inactiveRuns []uuid.UUID err := pgx.BeginTxFunc(ctx, r.db, pgx.TxOptions{ IsoLevel: pgx.ReadCommitted, @@ -221,7 +221,7 @@ func (r *PostgresJobRepository) FindInactiveRuns(ctx context.Context, runIds []u // FetchJobRunLeases fetches new job runs for a given executor. A maximum of maxResults rows will be returned, while run // in excludedRunIds will be excluded -func (r *PostgresJobRepository) FetchJobRunLeases(ctx context.Context, executor string, maxResults uint, excludedRunIds []uuid.UUID) ([]*JobRunLease, error) { +func (r *PostgresJobRepository) FetchJobRunLeases(ctx *armadacontext.Context, executor string, maxResults uint, excludedRunIds []uuid.UUID) ([]*JobRunLease, error) { if maxResults == 0 { return []*JobRunLease{}, nil } @@ -272,7 +272,7 @@ func (r *PostgresJobRepository) FetchJobRunLeases(ctx context.Context, executor // CountReceivedPartitions returns a count of the number of partition messages present in the database corresponding // to the provided groupId. This is used by the scheduler to determine if the database represents the state of // pulsar after a given point in time. -func (r *PostgresJobRepository) CountReceivedPartitions(ctx context.Context, groupId uuid.UUID) (uint32, error) { +func (r *PostgresJobRepository) CountReceivedPartitions(ctx *armadacontext.Context, groupId uuid.UUID) (uint32, error) { queries := New(r.db) count, err := queries.CountGroup(ctx, groupId) if err != nil { @@ -300,7 +300,7 @@ func fetch[T hasSerial](from int64, batchSize int32, fetchBatch func(int64) ([]T } // Insert all run ids into a tmp table. The name of the table is returned -func insertRunIdsToTmpTable(ctx context.Context, tx pgx.Tx, runIds []uuid.UUID) (string, error) { +func insertRunIdsToTmpTable(ctx *armadacontext.Context, tx pgx.Tx, runIds []uuid.UUID) (string, error) { tmpTable := database.UniqueTableName("job_runs") _, err := tx.Exec(ctx, fmt.Sprintf("CREATE TEMPORARY TABLE %s (run_id uuid) ON COMMIT DROP", tmpTable)) diff --git a/internal/scheduler/database/job_repository_test.go b/internal/scheduler/database/job_repository_test.go index b236618185b..771d887b17d 100644 --- a/internal/scheduler/database/job_repository_test.go +++ b/internal/scheduler/database/job_repository_test.go @@ -1,7 +1,6 @@ package database import ( - "context" "fmt" "testing" "time" @@ -13,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/exp/slices" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database" protoutil "github.com/armadaproject/armada/internal/common/proto" @@ -84,7 +84,7 @@ func TestFetchJobUpdates(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { err := withJobRepository(func(repo *PostgresJobRepository) error { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) // Set up db err := database.UpsertWithTransaction(ctx, repo.db, "jobs", tc.dbJobs) @@ -187,7 +187,7 @@ func TestFetchJobRunErrors(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { err := withJobRepository(func(repo *PostgresJobRepository) error { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) // Set up db err := database.UpsertWithTransaction(ctx, repo.db, "job_run_errors", tc.errorsInDb) require.NoError(t, err) @@ -222,7 +222,7 @@ func TestCountReceivedPartitions(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { err := withJobRepository(func(repo *PostgresJobRepository) error { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) markers := make([]Marker, tc.numPartitions) groupId := uuid.New() @@ -357,7 +357,7 @@ func TestFindInactiveRuns(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { err := withJobRepository(func(repo *PostgresJobRepository) error { - ctx, cancel := context.WithTimeout(context.Background(), 500*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 500*time.Second) // Set up db err := database.UpsertWithTransaction(ctx, repo.db, "runs", tc.dbRuns) @@ -487,7 +487,7 @@ func TestFetchJobRunLeases(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { err := withJobRepository(func(repo *PostgresJobRepository) error { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) // Set up db err := database.UpsertWithTransaction(ctx, repo.db, "jobs", tc.dbJobs) @@ -553,7 +553,7 @@ func withJobRepository(action func(repository *PostgresJobRepository) error) err }) } -func insertMarkers(ctx context.Context, markers []Marker, db *pgxpool.Pool) error { +func insertMarkers(ctx *armadacontext.Context, markers []Marker, db *pgxpool.Pool) error { for _, marker := range markers { _, err := db.Exec(ctx, "INSERT INTO markers VALUES ($1, $2)", marker.GroupID, marker.PartitionID) if err != nil { diff --git a/internal/scheduler/database/redis_executor_repository.go b/internal/scheduler/database/redis_executor_repository.go index 989710a69da..ef775ff7f75 100644 --- a/internal/scheduler/database/redis_executor_repository.go +++ b/internal/scheduler/database/redis_executor_repository.go @@ -1,7 +1,6 @@ package database import ( - "context" "fmt" "time" @@ -9,6 +8,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pkg/errors" + "github.com/armadaproject/armada/internal/common/armadacontext" protoutil "github.com/armadaproject/armada/internal/common/proto" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -29,7 +29,7 @@ func NewRedisExecutorRepository(db redis.UniversalClient, schedulerName string) } } -func (r *RedisExecutorRepository) GetExecutors(_ context.Context) ([]*schedulerobjects.Executor, error) { +func (r *RedisExecutorRepository) GetExecutors(_ *armadacontext.Context) ([]*schedulerobjects.Executor, error) { result, err := r.db.HGetAll(r.executorsKey).Result() if err != nil { return nil, errors.Wrap(err, "Error retrieving executors from redis") @@ -47,12 +47,12 @@ func (r *RedisExecutorRepository) GetExecutors(_ context.Context) ([]*schedulero return executors, nil } -func (r *RedisExecutorRepository) GetLastUpdateTimes(_ context.Context) (map[string]time.Time, error) { +func (r *RedisExecutorRepository) GetLastUpdateTimes(_ *armadacontext.Context) (map[string]time.Time, error) { // We could implement this in a very inefficient way, but I don't believe it's needed so panic for now panic("GetLastUpdateTimes is not implemented") } -func (r *RedisExecutorRepository) StoreExecutor(_ context.Context, executor *schedulerobjects.Executor) error { +func (r *RedisExecutorRepository) StoreExecutor(_ *armadacontext.Context, executor *schedulerobjects.Executor) error { data, err := proto.Marshal(executor) if err != nil { return errors.Wrap(err, "Error marshalling executor proto") diff --git a/internal/scheduler/database/redis_executor_repository_test.go b/internal/scheduler/database/redis_executor_repository_test.go index 6fb48d66c49..bf5b0ea9629 100644 --- a/internal/scheduler/database/redis_executor_repository_test.go +++ b/internal/scheduler/database/redis_executor_repository_test.go @@ -1,7 +1,6 @@ package database import ( - "context" "testing" "time" @@ -10,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/exp/slices" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -53,7 +53,7 @@ func TestRedisExecutorRepository_LoadAndSave(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { withRedisExecutorRepository(func(repo *RedisExecutorRepository) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() for _, executor := range tc.executors { err := repo.StoreExecutor(ctx, executor) diff --git a/internal/scheduler/database/util.go b/internal/scheduler/database/util.go index d6539a2a743..af338ee3b42 100644 --- a/internal/scheduler/database/util.go +++ b/internal/scheduler/database/util.go @@ -1,21 +1,20 @@ package database import ( - "context" "embed" _ "embed" "time" "github.com/jackc/pgx/v5/pgxpool" - log "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" ) //go:embed migrations/*.sql var fs embed.FS -func Migrate(ctx context.Context, db database.Querier) error { +func Migrate(ctx *armadacontext.Context, db database.Querier) error { start := time.Now() migrations, err := database.ReadMigrations(fs, "migrations") if err != nil { @@ -25,7 +24,7 @@ func Migrate(ctx context.Context, db database.Querier) error { if err != nil { return err } - log.Infof("Updated scheduler database in %s", time.Now().Sub(start)) + ctx.Infof("Updated scheduler database in %s", time.Now().Sub(start)) return nil } diff --git a/internal/scheduler/gang_scheduler.go b/internal/scheduler/gang_scheduler.go index f1a39e31ead..f81e6bcaff4 100644 --- a/internal/scheduler/gang_scheduler.go +++ b/internal/scheduler/gang_scheduler.go @@ -1,17 +1,16 @@ package scheduler import ( - "context" "fmt" "github.com/hashicorp/go-memdb" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/interfaces" "github.com/armadaproject/armada/internal/scheduler/nodedb" - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) // GangScheduler schedules one gang at a time. GangScheduler is not aware of queues. @@ -39,10 +38,57 @@ func (sch *GangScheduler) SkipUnsuccessfulSchedulingKeyCheck() { sch.skipUnsuccessfulSchedulingKeyCheck = true } -func (sch *GangScheduler) Schedule(ctx context.Context, gctx *schedulercontext.GangSchedulingContext) (ok bool, unschedulableReason string, err error) { +func (sch *GangScheduler) updateGangSchedulingContextOnFailure(gctx *schedulercontext.GangSchedulingContext, gangAddedToSchedulingContext bool, unschedulableReason string) (err error) { + if gangAddedToSchedulingContext { + failedJobs := util.Map(gctx.JobSchedulingContexts, func(jctx *schedulercontext.JobSchedulingContext) interfaces.LegacySchedulerJob { return jctx.Job }) + if _, err = sch.schedulingContext.EvictGang(failedJobs); err != nil { + return + } + } + + for _, jctx := range gctx.JobSchedulingContexts { + jctx.UnschedulableReason = unschedulableReason + } + + if _, err = sch.schedulingContext.AddGangSchedulingContext(gctx); err != nil { + return + } + + // Register unfeasible scheduling keys. + // + // Only record unfeasible scheduling keys for single-job gangs. + // Since a gang may be unschedulable even if all its members are individually schedulable. + if !sch.skipUnsuccessfulSchedulingKeyCheck && gctx.Cardinality() == 1 { + jctx := gctx.JobSchedulingContexts[0] + schedulingKey := sch.schedulingContext.SchedulingKeyFromLegacySchedulerJob(jctx.Job) + if _, ok := sch.schedulingContext.UnfeasibleSchedulingKeys[schedulingKey]; !ok { + // Keep the first jctx for each unfeasible schedulingKey. + sch.schedulingContext.UnfeasibleSchedulingKeys[schedulingKey] = jctx + } + } + + return +} + +func (sch *GangScheduler) updateGangSchedulingContextOnSuccess(gctx *schedulercontext.GangSchedulingContext, gangAddedToSchedulingContext bool) (err error) { + if gangAddedToSchedulingContext { + jobs := util.Map(gctx.JobSchedulingContexts, func(jctx *schedulercontext.JobSchedulingContext) interfaces.LegacySchedulerJob { return jctx.Job }) + if _, err = sch.schedulingContext.EvictGang(jobs); err != nil { + return + } + } + + if _, err = sch.schedulingContext.AddGangSchedulingContext(gctx); err != nil { + return + } + + return +} + +func (sch *GangScheduler) Schedule(ctx *armadacontext.Context, gctx *schedulercontext.GangSchedulingContext) (ok bool, unschedulableReason string, err error) { // Exit immediately if this is a new gang and we've hit any round limits. if !gctx.AllJobsEvicted { - if ok, unschedulableReason, err = sch.constraints.CheckRoundConstraints(sch.schedulingContext); err != nil || !ok { + if ok, unschedulableReason, err = sch.constraints.CheckRoundConstraints(sch.schedulingContext, gctx.Queue); err != nil || !ok { return } } @@ -55,60 +101,38 @@ func (sch *GangScheduler) Schedule(ctx context.Context, gctx *schedulercontext.G if err != nil { return } - if !ok { - // Register the job as unschedulable. If the job was added to the context, remove it first. - if gangAddedToSchedulingContext { - jobs := util.Map(gctx.JobSchedulingContexts, func(jctx *schedulercontext.JobSchedulingContext) interfaces.LegacySchedulerJob { return jctx.Job }) - if _, err = sch.schedulingContext.EvictGang(jobs); err != nil { - return - } - } - for _, jctx := range gctx.JobSchedulingContexts { - jctx.UnschedulableReason = unschedulableReason - } - if _, err = sch.schedulingContext.AddGangSchedulingContext(gctx); err != nil { - return - } - // Register unfeasible scheduling keys. - // - // Only record unfeasible scheduling keys for single-job gangs. - // Since a gang may be unschedulable even if all its members are individually schedulable. - if !sch.skipUnsuccessfulSchedulingKeyCheck && len(gctx.JobSchedulingContexts) == 1 { - jctx := gctx.JobSchedulingContexts[0] - schedulingKey := sch.schedulingContext.SchedulingKeyFromLegacySchedulerJob(jctx.Job) - if _, ok := sch.schedulingContext.UnfeasibleSchedulingKeys[schedulingKey]; !ok { - // Keep the first jctx for each unfeasible schedulingKey. - sch.schedulingContext.UnfeasibleSchedulingKeys[schedulingKey] = jctx - } + // Update rate-limiters to account for new successfully scheduled jobs. + if ok && !gctx.AllJobsEvicted { + sch.schedulingContext.Limiter.ReserveN(sch.schedulingContext.Started, gctx.Cardinality()) + if qctx := sch.schedulingContext.QueueSchedulingContexts[gctx.Queue]; qctx != nil { + qctx.Limiter.ReserveN(sch.schedulingContext.Started, gctx.Cardinality()) } } + + if ok { + err = sch.updateGangSchedulingContextOnSuccess(gctx, gangAddedToSchedulingContext) + } else { + err = sch.updateGangSchedulingContextOnFailure(gctx, gangAddedToSchedulingContext, unschedulableReason) + } + + return }() - // Try scheduling the gang. if _, err = sch.schedulingContext.AddGangSchedulingContext(gctx); err != nil { return } gangAddedToSchedulingContext = true if !gctx.AllJobsEvicted { - // Check that the job is large enough for this executor. - // This check needs to be here, since it relates to a specific job. - // Only perform limit checks for new jobs to avoid preempting jobs if, e.g., MinimumJobSize changes. - if ok, unschedulableReason = requestsAreLargeEnough(gctx.TotalResourceRequests, sch.constraints.MinimumJobSize); !ok { - return - } - if ok, unschedulableReason, err = sch.constraints.CheckPerQueueAndPriorityClassConstraints( - sch.schedulingContext, - gctx.Queue, - gctx.PriorityClassName, - ); err != nil || !ok { + // Only perform these checks for new jobs to avoid preempting jobs if, e.g., MinimumJobSize changes. + if ok, unschedulableReason, err = sch.constraints.CheckConstraints(sch.schedulingContext, gctx); err != nil || !ok { return } } return sch.trySchedule(ctx, gctx) } -func (sch *GangScheduler) trySchedule(ctx context.Context, gctx *schedulercontext.GangSchedulingContext) (ok bool, unschedulableReason string, err error) { +func (sch *GangScheduler) trySchedule(ctx *armadacontext.Context, gctx *schedulercontext.GangSchedulingContext) (ok bool, unschedulableReason string, err error) { // If no node uniformity constraint, try scheduling across all nodes. if gctx.NodeUniformityLabel == "" { return sch.tryScheduleGang(ctx, gctx) @@ -175,7 +199,7 @@ func (sch *GangScheduler) trySchedule(ctx context.Context, gctx *schedulercontex return sch.tryScheduleGang(ctx, gctx) } -func (sch *GangScheduler) tryScheduleGang(ctx context.Context, gctx *schedulercontext.GangSchedulingContext) (ok bool, unschedulableReason string, err error) { +func (sch *GangScheduler) tryScheduleGang(ctx *armadacontext.Context, gctx *schedulercontext.GangSchedulingContext) (ok bool, unschedulableReason string, err error) { txn := sch.nodeDb.Txn(true) defer txn.Abort() ok, unschedulableReason, err = sch.tryScheduleGangWithTxn(ctx, txn, gctx) @@ -185,23 +209,38 @@ func (sch *GangScheduler) tryScheduleGang(ctx context.Context, gctx *schedulerco return } -func (sch *GangScheduler) tryScheduleGangWithTxn(ctx context.Context, txn *memdb.Txn, gctx *schedulercontext.GangSchedulingContext) (ok bool, unschedulableReason string, err error) { - if ok, err = sch.nodeDb.ScheduleManyWithTxn(txn, gctx.JobSchedulingContexts); err != nil { - return - } else if !ok { - for _, jctx := range gctx.JobSchedulingContexts { - if jctx.PodSchedulingContext != nil { - // Clear any node bindings on failure to schedule. - jctx.PodSchedulingContext.NodeId = "" +func clearNodeBindings(jctx *schedulercontext.JobSchedulingContext) { + if jctx.PodSchedulingContext != nil { + // Clear any node bindings on failure to schedule. + jctx.PodSchedulingContext.NodeId = "" + } +} + +func (sch *GangScheduler) tryScheduleGangWithTxn(_ *armadacontext.Context, txn *memdb.Txn, gctx *schedulercontext.GangSchedulingContext) (ok bool, unschedulableReason string, err error) { + if ok, err = sch.nodeDb.ScheduleManyWithTxn(txn, gctx.JobSchedulingContexts); err == nil { + if !ok { + for _, jctx := range gctx.JobSchedulingContexts { + clearNodeBindings(jctx) + } + + if gctx.Cardinality() > 1 { + unschedulableReason = "unable to schedule gang since minimum cardinality not met" + } else { + unschedulableReason = "job does not fit on any node" } - } - if len(gctx.JobSchedulingContexts) > 1 { - unschedulableReason = "at least one job in the gang does not fit on any node" } else { - unschedulableReason = "job does not fit on any node" + // When a gang schedules successfully, update state for failed jobs if they exist. + for _, jctx := range gctx.JobSchedulingContexts { + if jctx.ShouldFail { + clearNodeBindings(jctx) + jctx.UnschedulableReason = "job does not fit on any node" + } + } } + return } + return } @@ -222,18 +261,5 @@ func meanScheduledAtPriorityFromGctx(gctx *schedulercontext.GangSchedulingContex } sum += jctx.PodSchedulingContext.ScheduledAtPriority } - return float64(sum) / float64(len(gctx.JobSchedulingContexts)), true -} - -func requestsAreLargeEnough(totalResourceRequests, minRequest schedulerobjects.ResourceList) (bool, string) { - if len(minRequest.Resources) == 0 { - return true, "" - } - for t, minQuantity := range minRequest.Resources { - q := totalResourceRequests.Get(t) - if minQuantity.Cmp(q) == 1 { - return false, fmt.Sprintf("job requests %s %s, but the minimum is %s", q.String(), t, minQuantity.String()) - } - } - return true, "" + return float64(sum) / float64(gctx.Cardinality()), true } diff --git a/internal/scheduler/gang_scheduler_test.go b/internal/scheduler/gang_scheduler_test.go index bab895a0421..cce5b2962c3 100644 --- a/internal/scheduler/gang_scheduler_test.go +++ b/internal/scheduler/gang_scheduler_test.go @@ -1,14 +1,15 @@ package scheduler import ( - "context" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/time/rate" "k8s.io/apimachinery/pkg/api/resource" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" armadaslices "github.com/armadaproject/armada/internal/common/slices" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" @@ -33,39 +34,74 @@ func TestGangScheduler(t *testing.T) { Gangs [][]*jobdb.Job // Indices of gangs expected to be scheduled. ExpectedScheduledIndices []int + // Cumulative number of jobs we expect to schedule successfully. + // Each index `i` is the expected value when processing gang `i`. + ExpectedScheduledJobs []int }{ "simple success": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Gangs: [][]*jobdb.Job{ - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32)), }, ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedScheduledJobs: []int{32}, }, "simple failure": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Gangs: [][]*jobdb.Job{ - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33)), }, ExpectedScheduledIndices: nil, + ExpectedScheduledJobs: []int{0}, + }, + "simple success where min cardinality is met": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Gangs: [][]*jobdb.Job{ + testfixtures.WithGangAnnotationsAndMinCardinalityJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 40), 32), + }, + ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedScheduledJobs: []int{32}, + }, + "simple failure where min cardinality is not met": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Gangs: [][]*jobdb.Job{ + testfixtures.WithGangAnnotationsAndMinCardinalityJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 40), 33), + }, + ExpectedScheduledIndices: nil, + ExpectedScheduledJobs: []int{0}, }, "one success and one failure": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Gangs: [][]*jobdb.Job{ - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), }, ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedScheduledJobs: []int{32, 32}, + }, + "one success and one failure using min cardinality": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Gangs: [][]*jobdb.Job{ + testfixtures.WithGangAnnotationsAndMinCardinalityJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33), 32), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), + }, + ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedScheduledJobs: []int{32, 32}, }, "multiple nodes": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), Gangs: [][]*jobdb.Job{ - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 64), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 64)), }, ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedScheduledJobs: []int{64}, }, "MaximumResourceFractionToSchedule": { SchedulingConfig: testfixtures.WithRoundLimitsConfig( @@ -74,11 +110,12 @@ func TestGangScheduler(t *testing.T) { ), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Gangs: [][]*jobdb.Job{ - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 8), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 16), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 8), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 8)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 16)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 8)), }, ExpectedScheduledIndices: []int{0, 1}, + ExpectedScheduledJobs: []int{8, 24, 24}, }, "MaximumResourceFractionToScheduleByPool": { SchedulingConfig: testfixtures.WithRoundLimitsConfig( @@ -90,13 +127,14 @@ func TestGangScheduler(t *testing.T) { ), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Gangs: [][]*jobdb.Job{ - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), }, ExpectedScheduledIndices: []int{0, 1, 2}, + ExpectedScheduledJobs: []int{1, 2, 3, 3, 3}, }, "MaximumResourceFractionToScheduleByPool non-existing pool": { SchedulingConfig: testfixtures.WithRoundLimitsConfig( @@ -108,13 +146,14 @@ func TestGangScheduler(t *testing.T) { ), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Gangs: [][]*jobdb.Job{ - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), }, ExpectedScheduledIndices: []int{0, 1, 2, 3}, + ExpectedScheduledJobs: []int{1, 2, 3, 4, 4}, }, "MaximumResourceFractionPerQueue": { SchedulingConfig: testfixtures.WithPerPriorityLimitsConfig( @@ -128,16 +167,17 @@ func TestGangScheduler(t *testing.T) { ), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Gangs: [][]*jobdb.Job{ - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass1, 2), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass1, 3), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass2, 3), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass2, 4), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass3, 4), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass3, 5), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass1, 2)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass1, 3)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass2, 3)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass2, 4)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass3, 4)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass3, 5)), }, ExpectedScheduledIndices: []int{0, 2, 4, 6}, + ExpectedScheduledJobs: []int{1, 1, 3, 3, 6, 6, 10, 10}, }, "resolution has no impact on jobs of size a multiple of the resolution": { SchedulingConfig: testfixtures.WithIndexedResourcesConfig( @@ -149,14 +189,15 @@ func TestGangScheduler(t *testing.T) { ), Nodes: testfixtures.N32CpuNodes(3, testfixtures.TestPriorities), Gangs: [][]*jobdb.Job{ - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), + testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), }, ExpectedScheduledIndices: testfixtures.IntRange(0, 5), + ExpectedScheduledJobs: testfixtures.IntRange(1, 6), }, "jobs of size not a multiple of the resolution blocks scheduling new jobs": { SchedulingConfig: testfixtures.WithIndexedResourcesConfig( @@ -168,12 +209,13 @@ func TestGangScheduler(t *testing.T) { ), Nodes: testfixtures.N32CpuNodes(3, testfixtures.TestPriorities), Gangs: [][]*jobdb.Job{ - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), + testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), }, ExpectedScheduledIndices: testfixtures.IntRange(0, 2), + ExpectedScheduledJobs: []int{1, 2, 3, 3}, }, "consider all nodes in the bucket": { SchedulingConfig: testfixtures.WithIndexedResourcesConfig( @@ -207,9 +249,10 @@ func TestGangScheduler(t *testing.T) { ), ), Gangs: [][]*jobdb.Job{ - testfixtures.N1GpuJobs("A", testfixtures.PriorityClass0, 1), + testfixtures.WithGangAnnotationsJobs(testfixtures.N1GpuJobs("A", testfixtures.PriorityClass0, 1)), }, ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedScheduledJobs: []int{1}, }, "NodeUniformityLabel set but not indexed": { SchedulingConfig: testfixtures.TestSchedulingConfig(), @@ -218,12 +261,14 @@ func TestGangScheduler(t *testing.T) { testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), ), Gangs: [][]*jobdb.Job{ - testfixtures.WithNodeUniformityLabelAnnotationJobs( - "foo", - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), - ), + testfixtures.WithGangAnnotationsJobs( + testfixtures.WithNodeUniformityLabelAnnotationJobs( + "foo", + testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), + )), }, ExpectedScheduledIndices: nil, + ExpectedScheduledJobs: []int{0}, }, "NodeUniformityLabel not set": { SchedulingConfig: testfixtures.WithIndexedNodeLabelsConfig( @@ -232,12 +277,14 @@ func TestGangScheduler(t *testing.T) { ), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Gangs: [][]*jobdb.Job{ - testfixtures.WithNodeUniformityLabelAnnotationJobs( - "foo", - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), - ), + testfixtures.WithGangAnnotationsJobs( + testfixtures.WithNodeUniformityLabelAnnotationJobs( + "foo", + testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), + )), }, ExpectedScheduledIndices: nil, + ExpectedScheduledJobs: []int{0}, }, "NodeUniformityLabel insufficient capacity": { SchedulingConfig: testfixtures.WithIndexedNodeLabelsConfig( @@ -255,12 +302,12 @@ func TestGangScheduler(t *testing.T) { ), ), Gangs: [][]*jobdb.Job{ - testfixtures.WithNodeUniformityLabelAnnotationJobs( - "foo", - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 3), + testfixtures.WithGangAnnotationsJobs( + testfixtures.WithNodeUniformityLabelAnnotationJobs("foo", testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 3)), ), }, ExpectedScheduledIndices: nil, + ExpectedScheduledJobs: []int{0}, }, "NodeUniformityLabel": { SchedulingConfig: testfixtures.WithIndexedNodeLabelsConfig( @@ -290,12 +337,14 @@ func TestGangScheduler(t *testing.T) { ), ), Gangs: [][]*jobdb.Job{ - testfixtures.WithNodeUniformityLabelAnnotationJobs( - "foo", - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 4), - ), + testfixtures.WithGangAnnotationsJobs( + testfixtures.WithNodeUniformityLabelAnnotationJobs( + "foo", + testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 4), + )), }, ExpectedScheduledIndices: []int{0}, + ExpectedScheduledJobs: []int{4}, }, } for name, tc := range tests { @@ -340,10 +389,22 @@ func TestGangScheduler(t *testing.T) { tc.SchedulingConfig.Preemption.PriorityClasses, tc.SchedulingConfig.Preemption.DefaultPriorityClass, fairnessCostProvider, + rate.NewLimiter( + rate.Limit(tc.SchedulingConfig.MaximumSchedulingRate), + tc.SchedulingConfig.MaximumSchedulingBurst, + ), tc.TotalResources, ) for queue, priorityFactor := range priorityFactorByQueue { - err := sctx.AddQueueSchedulingContext(queue, priorityFactor, nil) + err := sctx.AddQueueSchedulingContext( + queue, + priorityFactor, + nil, + rate.NewLimiter( + rate.Limit(tc.SchedulingConfig.MaximumPerQueueSchedulingRate), + tc.SchedulingConfig.MaximumPerQueueSchedulingBurst, + ), + ) require.NoError(t, err) } constraints := schedulerconstraints.SchedulingConstraintsFromSchedulingConfig( @@ -356,10 +417,11 @@ func TestGangScheduler(t *testing.T) { require.NoError(t, err) var actualScheduledIndices []int + scheduledGangs := 0 for i, gang := range tc.Gangs { - jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, gang) + jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, gang, GangIdAndCardinalityFromAnnotations) gctx := schedulercontext.NewGangSchedulingContext(jctxs) - ok, reason, err := sch.Schedule(context.Background(), gctx) + ok, reason, err := sch.Schedule(armadacontext.Background(), gctx) require.NoError(t, err) if ok { require.Empty(t, reason) @@ -381,8 +443,36 @@ func TestGangScheduler(t *testing.T) { "node uniformity constraint not met: %s", nodeUniformityLabelValues, ) } + + // Verify any excess jobs that failed have the correct state set + for _, jctx := range jctxs { + if jctx.ShouldFail { + if jctx.PodSchedulingContext != nil { + require.Equal(t, "", jctx.PodSchedulingContext.NodeId) + } + require.Equal(t, "job does not fit on any node", jctx.UnschedulableReason) + } + } + + // Verify accounting + scheduledGangs++ + require.Equal(t, scheduledGangs, sch.schedulingContext.NumScheduledGangs) + require.Equal(t, tc.ExpectedScheduledJobs[i], sch.schedulingContext.NumScheduledJobs) + require.Equal(t, 0, sch.schedulingContext.NumEvictedJobs) } else { require.NotEmpty(t, reason) + + // Verify all jobs have been correctly unbound from nodes + for _, jctx := range jctxs { + if jctx.PodSchedulingContext != nil { + require.Equal(t, "", jctx.PodSchedulingContext.NodeId) + } + } + + // Verify accounting + require.Equal(t, scheduledGangs, sch.schedulingContext.NumScheduledGangs) + require.Equal(t, tc.ExpectedScheduledJobs[i], sch.schedulingContext.NumScheduledJobs) + require.Equal(t, 0, sch.schedulingContext.NumEvictedJobs) } } assert.Equal(t, tc.ExpectedScheduledIndices, actualScheduledIndices) diff --git a/internal/scheduler/interfaces/interfaces.go b/internal/scheduler/interfaces/interfaces.go index 2070f3191e7..121fae18fa4 100644 --- a/internal/scheduler/interfaces/interfaces.go +++ b/internal/scheduler/interfaces/interfaces.go @@ -23,4 +23,5 @@ type LegacySchedulerJob interface { GetAffinity() *v1.Affinity GetTolerations() []v1.Toleration GetResourceRequirements() v1.ResourceRequirements + GetQueueTtlSeconds() int64 } diff --git a/internal/scheduler/jobdb/job.go b/internal/scheduler/jobdb/job.go index f75d0dab3a7..8980b55cd4c 100644 --- a/internal/scheduler/jobdb/job.go +++ b/internal/scheduler/jobdb/job.go @@ -229,6 +229,11 @@ func (job *Job) GetResourceRequirements() v1.ResourceRequirements { return v1.ResourceRequirements{} } +// Needed for compatibility with interfaces.LegacySchedulerJob +func (job *Job) GetQueueTtlSeconds() int64 { + return job.jobSchedulingInfo.QueueTtlSeconds +} + func (job *Job) PodRequirements() *schedulerobjects.PodRequirements { return job.jobSchedulingInfo.GetPodRequirements() } @@ -403,6 +408,28 @@ func (job *Job) RunById(id uuid.UUID) *JobRun { return job.runsById[id] } +// HasQueueTtlExpired returns true if the given job has reached its queueTtl expiry. +// Invariants: +// - job.created < `t` +func (job *Job) HasQueueTtlExpired() bool { + ttlSeconds := job.GetQueueTtlSeconds() + if ttlSeconds > 0 { + timeSeconds := time.Now().UTC().Unix() + + // job.Created is populated from the `Submitted` field in postgres, which is a UnixNano time hence the conversion. + createdSeconds := job.created / 1_000_000_000 + duration := timeSeconds - createdSeconds + return duration > ttlSeconds + } else { + return false + } +} + +// HasQueueTtlSet returns true if the given job has a queueTtl set. +func (job *Job) HasQueueTtlSet() bool { + return job.GetQueueTtlSeconds() > 0 +} + // WithJobset returns a copy of the job with the jobset updated. func (job *Job) WithJobset(jobset string) *Job { j := copyJob(*job) @@ -487,3 +514,46 @@ func (j JobPriorityComparer) Compare(a, b *Job) int { // Jobs are equal; return 0. return 0 } + +type JobQueueTtlComparer struct{} + +func max(x, y int64) int64 { + if x < y { + return y + } + return x +} + +// Compare jobs by their remaining queue time before expiry +// Invariants: +// - Job.queueTtl must be > 0 +// - Job.created must be < `t` +func (j JobQueueTtlComparer) Compare(a, b *Job) int { + timeSeconds := time.Now().UTC().Unix() + aDuration := timeSeconds - (a.created / 1_000_000_000) + bDuration := timeSeconds - (b.created / 1_000_000_000) + + aRemaining := max(0, a.GetQueueTtlSeconds()-aDuration) + bRemaining := max(0, b.GetQueueTtlSeconds()-bDuration) + + // If jobs have different ttl remaining, they are ordered by remaining queue ttl - the smallest ttl first + if aRemaining != bRemaining { + if aRemaining < bRemaining { + return -1 + } else { + return 1 + } + } + + // If the jobs have the same remaining time, order based on id. + if a.id != b.id { + if a.id < b.id { + return -1 + } else { + return 1 + } + } + + // Jobs are equal if they have the same remaining ttl and id + return 0 +} diff --git a/internal/scheduler/jobdb/jobdb.go b/internal/scheduler/jobdb/jobdb.go index 700627c67a6..45d02b5b237 100644 --- a/internal/scheduler/jobdb/jobdb.go +++ b/internal/scheduler/jobdb/jobdb.go @@ -9,22 +9,27 @@ import ( "golang.org/x/exp/maps" ) -var emptyList = immutable.NewSortedSet[*Job](JobPriorityComparer{}) +var ( + emptyList = immutable.NewSortedSet[*Job](JobPriorityComparer{}) + emptyQueuedJobsByTtl = immutable.NewSortedSet[*Job](JobQueueTtlComparer{}) +) type JobDb struct { - jobsById *immutable.Map[string, *Job] - jobsByRunId *immutable.Map[uuid.UUID, string] - jobsByQueue map[string]immutable.SortedSet[*Job] - copyMutex sync.Mutex - writerMutex sync.Mutex + jobsById *immutable.Map[string, *Job] + jobsByRunId *immutable.Map[uuid.UUID, string] + jobsByQueue map[string]immutable.SortedSet[*Job] + queuedJobsByTtl *immutable.SortedSet[*Job] + copyMutex sync.Mutex + writerMutex sync.Mutex } func NewJobDb() *JobDb { return &JobDb{ - jobsById: immutable.NewMap[string, *Job](nil), - jobsByRunId: immutable.NewMap[uuid.UUID, string](&UUIDHasher{}), - jobsByQueue: map[string]immutable.SortedSet[*Job]{}, - copyMutex: sync.Mutex{}, + jobsById: immutable.NewMap[string, *Job](nil), + jobsByRunId: immutable.NewMap[uuid.UUID, string](&UUIDHasher{}), + jobsByQueue: map[string]immutable.SortedSet[*Job]{}, + queuedJobsByTtl: &emptyQueuedJobsByTtl, + copyMutex: sync.Mutex{}, } } @@ -45,6 +50,9 @@ func (jobDb *JobDb) Upsert(txn *Txn, jobs []*Job) error { if ok { txn.jobsByQueue[existingJob.queue] = existingQueue.Delete(existingJob) } + + newQueuedJobsByTtl := txn.queuedJobsByTtl.Delete(existingJob) + txn.queuedJobsByTtl = &newQueuedJobsByTtl } } } @@ -101,6 +109,11 @@ func (jobDb *JobDb) Upsert(txn *Txn, jobs []*Job) error { } newQueue = newQueue.Add(job) txn.jobsByQueue[job.queue] = newQueue + + if job.HasQueueTtlSet() { + queuedJobsByTtl := txn.queuedJobsByTtl.Add(job) + txn.queuedJobsByTtl = &queuedJobsByTtl + } } } }() @@ -141,6 +154,11 @@ func (jobDb *JobDb) QueuedJobs(txn *Txn, queue string) *immutable.SortedSetItera } } +// QueuedJobsByTtl returns an iterator for jobs ordered by queue ttl time - the closest to expiry first +func (jobDb *JobDb) QueuedJobsByTtl(txn *Txn) *immutable.SortedSetIterator[*Job] { + return txn.queuedJobsByTtl.Iterator() +} + // GetAll returns all jobs in the database. // The Jobs returned by this function *must not* be subsequently modified func (jobDb *JobDb) GetAll(txn *Txn) []*Job { @@ -171,6 +189,12 @@ func (jobDb *JobDb) BatchDelete(txn *Txn, ids []string) error { newQueue := queue.Delete(job) txn.jobsByQueue[job.queue] = newQueue } + + // We only add these jobs into the collection if it has a queueTtl set, hence only remove if this is set. + if job.HasQueueTtlSet() { + newQueuedJobsByExpiry := txn.queuedJobsByTtl.Delete(job) + txn.queuedJobsByTtl = &newQueuedJobsByExpiry + } } } return nil @@ -192,12 +216,13 @@ func (jobDb *JobDb) ReadTxn() *Txn { jobDb.copyMutex.Lock() defer jobDb.copyMutex.Unlock() return &Txn{ - readOnly: true, - jobsById: jobDb.jobsById, - jobsByRunId: jobDb.jobsByRunId, - jobsByQueue: jobDb.jobsByQueue, - active: true, - jobDb: jobDb, + readOnly: true, + jobsById: jobDb.jobsById, + jobsByRunId: jobDb.jobsByRunId, + jobsByQueue: jobDb.jobsByQueue, + queuedJobsByTtl: jobDb.queuedJobsByTtl, + active: true, + jobDb: jobDb, } } @@ -209,12 +234,13 @@ func (jobDb *JobDb) WriteTxn() *Txn { jobDb.copyMutex.Lock() defer jobDb.copyMutex.Unlock() return &Txn{ - readOnly: false, - jobsById: jobDb.jobsById, - jobsByRunId: jobDb.jobsByRunId, - jobsByQueue: maps.Clone(jobDb.jobsByQueue), - active: true, - jobDb: jobDb, + readOnly: false, + jobsById: jobDb.jobsById, + jobsByRunId: jobDb.jobsByRunId, + jobsByQueue: maps.Clone(jobDb.jobsByQueue), + queuedJobsByTtl: jobDb.queuedJobsByTtl, + active: true, + jobDb: jobDb, } } @@ -223,12 +249,13 @@ func (jobDb *JobDb) WriteTxn() *Txn { // Write transactions also allow callers to perform write operations that will not be visible to other users // until the transaction is committed. type Txn struct { - readOnly bool - jobsById *immutable.Map[string, *Job] - jobsByRunId *immutable.Map[uuid.UUID, string] - jobsByQueue map[string]immutable.SortedSet[*Job] - jobDb *JobDb - active bool + readOnly bool + jobsById *immutable.Map[string, *Job] + jobsByRunId *immutable.Map[uuid.UUID, string] + jobsByQueue map[string]immutable.SortedSet[*Job] + queuedJobsByTtl *immutable.SortedSet[*Job] + jobDb *JobDb + active bool } func (txn *Txn) Commit() { @@ -241,6 +268,7 @@ func (txn *Txn) Commit() { txn.jobDb.jobsById = txn.jobsById txn.jobDb.jobsByRunId = txn.jobsByRunId txn.jobDb.jobsByQueue = txn.jobsByQueue + txn.jobDb.queuedJobsByTtl = txn.queuedJobsByTtl txn.active = false } diff --git a/internal/scheduler/jobdb/jobdb_test.go b/internal/scheduler/jobdb/jobdb_test.go index 3d73d0bdd1f..1a1a63717eb 100644 --- a/internal/scheduler/jobdb/jobdb_test.go +++ b/internal/scheduler/jobdb/jobdb_test.go @@ -200,11 +200,12 @@ func TestJobDb_TestBatchDelete(t *testing.T) { func newJob() *Job { return &Job{ - id: util.NewULID(), - queue: "test-queue", - priority: 0, - created: 0, - queued: false, - runsById: map[uuid.UUID]*JobRun{}, + id: util.NewULID(), + queue: "test-queue", + priority: 0, + created: 0, + queued: false, + runsById: map[uuid.UUID]*JobRun{}, + jobSchedulingInfo: schedulingInfo, } } diff --git a/internal/scheduler/jobiteration.go b/internal/scheduler/jobiteration.go index 7b232edc141..04dd63a6490 100644 --- a/internal/scheduler/jobiteration.go +++ b/internal/scheduler/jobiteration.go @@ -1,13 +1,12 @@ package scheduler import ( - "context" "sync" "golang.org/x/exp/maps" "golang.org/x/exp/slices" - "golang.org/x/sync/errgroup" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/scheduler/interfaces" ) @@ -136,7 +135,7 @@ func (repo *InMemoryJobRepository) GetExistingJobsByIds(jobIds []string) ([]inte return rv, nil } -func (repo *InMemoryJobRepository) GetJobIterator(ctx context.Context, queue string) (JobIterator, error) { +func (repo *InMemoryJobRepository) GetJobIterator(ctx *armadacontext.Context, queue string) (JobIterator, error) { repo.mu.Lock() defer repo.mu.Unlock() return NewInMemoryJobIterator(slices.Clone(repo.jobsByQueue[queue])), nil @@ -145,14 +144,14 @@ func (repo *InMemoryJobRepository) GetJobIterator(ctx context.Context, queue str // QueuedJobsIterator is an iterator over all jobs in a queue. // It lazily loads jobs in batches from Redis asynch. type QueuedJobsIterator struct { - ctx context.Context + ctx *armadacontext.Context err error c chan interfaces.LegacySchedulerJob } -func NewQueuedJobsIterator(ctx context.Context, queue string, repo JobRepository) (*QueuedJobsIterator, error) { +func NewQueuedJobsIterator(ctx *armadacontext.Context, queue string, repo JobRepository) (*QueuedJobsIterator, error) { batchSize := 16 - g, ctx := errgroup.WithContext(ctx) + g, ctx := armadacontext.ErrGroup(ctx) it := &QueuedJobsIterator{ ctx: ctx, c: make(chan interfaces.LegacySchedulerJob, 2*batchSize), // 2x batchSize to load one batch async. @@ -190,7 +189,7 @@ func (it *QueuedJobsIterator) Next() (interfaces.LegacySchedulerJob, error) { // queuedJobsIteratorLoader loads jobs from Redis lazily. // Used with QueuedJobsIterator. -func queuedJobsIteratorLoader(ctx context.Context, jobIds []string, ch chan interfaces.LegacySchedulerJob, batchSize int, repo JobRepository) error { +func queuedJobsIteratorLoader(ctx *armadacontext.Context, jobIds []string, ch chan interfaces.LegacySchedulerJob, batchSize int, repo JobRepository) error { defer close(ch) batch := make([]string, batchSize) for i, jobId := range jobIds { diff --git a/internal/scheduler/jobiteration_test.go b/internal/scheduler/jobiteration_test.go index 42133f0ba05..a5990fa3fc4 100644 --- a/internal/scheduler/jobiteration_test.go +++ b/internal/scheduler/jobiteration_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/scheduler/interfaces" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" @@ -87,7 +88,7 @@ func TestMultiJobsIterator_TwoQueues(t *testing.T) { expected = append(expected, job.Id) } - ctx := context.Background() + ctx := armadacontext.Background() its := make([]JobIterator, 3) for i, queue := range []string{"A", "B", "C"} { it, err := NewQueuedJobsIterator(ctx, queue, repo) @@ -121,7 +122,7 @@ func TestQueuedJobsIterator_OneQueue(t *testing.T) { expected = append(expected, job.Id) } - ctx := context.Background() + ctx := armadacontext.Background() it, err := NewQueuedJobsIterator(ctx, "A", repo) if !assert.NoError(t, err) { return @@ -146,7 +147,7 @@ func TestQueuedJobsIterator_ExceedsBufferSize(t *testing.T) { expected = append(expected, job.Id) } - ctx := context.Background() + ctx := armadacontext.Background() it, err := NewQueuedJobsIterator(ctx, "A", repo) if !assert.NoError(t, err) { return @@ -171,7 +172,7 @@ func TestQueuedJobsIterator_ManyJobs(t *testing.T) { expected = append(expected, job.Id) } - ctx := context.Background() + ctx := armadacontext.Background() it, err := NewQueuedJobsIterator(ctx, "A", repo) if !assert.NoError(t, err) { return @@ -200,7 +201,7 @@ func TestCreateQueuedJobsIterator_TwoQueues(t *testing.T) { repo.Enqueue(job) } - ctx := context.Background() + ctx := armadacontext.Background() it, err := NewQueuedJobsIterator(ctx, "A", repo) if !assert.NoError(t, err) { return @@ -223,7 +224,7 @@ func TestCreateQueuedJobsIterator_RespectsTimeout(t *testing.T) { repo.Enqueue(job) } - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), time.Millisecond) time.Sleep(20 * time.Millisecond) defer cancel() it, err := NewQueuedJobsIterator(ctx, "A", repo) @@ -248,7 +249,7 @@ func TestCreateQueuedJobsIterator_NilOnEmpty(t *testing.T) { repo.Enqueue(job) } - ctx := context.Background() + ctx := armadacontext.Background() it, err := NewQueuedJobsIterator(ctx, "A", repo) if !assert.NoError(t, err) { return @@ -291,7 +292,7 @@ func (repo *mockJobRepository) Enqueue(job *api.Job) { repo.jobsById[job.Id] = job } -func (repo *mockJobRepository) GetJobIterator(ctx context.Context, queue string) (JobIterator, error) { +func (repo *mockJobRepository) GetJobIterator(ctx *armadacontext.Context, queue string) (JobIterator, error) { return NewQueuedJobsIterator(ctx, queue, repo) } diff --git a/internal/scheduler/leader.go b/internal/scheduler/leader.go index a0c8b8a85f6..714cf243f52 100644 --- a/internal/scheduler/leader.go +++ b/internal/scheduler/leader.go @@ -6,12 +6,12 @@ import ( "sync/atomic" "github.com/google/uuid" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" + "github.com/armadaproject/armada/internal/common/armadacontext" schedulerconfig "github.com/armadaproject/armada/internal/scheduler/configuration" ) @@ -23,7 +23,7 @@ type LeaderController interface { // Returns true if the token is a leader and false otherwise ValidateToken(tok LeaderToken) bool // Run starts the controller. This is a blocking call which will return when the provided context is cancelled - Run(ctx context.Context) error + Run(ctx *armadacontext.Context) error // GetLeaderReport returns a report about the current leader GetLeaderReport() LeaderReport } @@ -85,14 +85,14 @@ func (lc *StandaloneLeaderController) ValidateToken(tok LeaderToken) bool { return false } -func (lc *StandaloneLeaderController) Run(ctx context.Context) error { +func (lc *StandaloneLeaderController) Run(ctx *armadacontext.Context) error { return nil } // LeaseListener allows clients to listen for lease events. type LeaseListener interface { // Called when the client has started leading. - onStartedLeading(context.Context) + onStartedLeading(*armadacontext.Context) // Called when the client has stopped leading, onStoppedLeading() } @@ -138,16 +138,14 @@ func (lc *KubernetesLeaderController) ValidateToken(tok LeaderToken) bool { // Run starts the controller. // This is a blocking call that returns when the provided context is cancelled. -func (lc *KubernetesLeaderController) Run(ctx context.Context) error { - log := ctxlogrus.Extract(ctx) - log = log.WithField("service", "KubernetesLeaderController") +func (lc *KubernetesLeaderController) Run(ctx *armadacontext.Context) error { for { select { case <-ctx.Done(): return ctx.Err() default: lock := lc.getNewLock() - log.Infof("attempting to become leader") + ctx.Infof("attempting to become leader") leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ Lock: lock, ReleaseOnCancel: true, @@ -156,14 +154,14 @@ func (lc *KubernetesLeaderController) Run(ctx context.Context) error { RetryPeriod: lc.config.RetryPeriod, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(c context.Context) { - log.Infof("I am now leader") + ctx.Infof("I am now leader") lc.token.Store(NewLeaderToken()) for _, listener := range lc.listeners { listener.onStartedLeading(ctx) } }, OnStoppedLeading: func() { - log.Infof("I am no longer leader") + ctx.Infof("I am no longer leader") lc.token.Store(InvalidLeaderToken()) for _, listener := range lc.listeners { listener.onStoppedLeading() @@ -176,7 +174,7 @@ func (lc *KubernetesLeaderController) Run(ctx context.Context) error { }, }, }) - log.Infof("leader election round finished") + ctx.Infof("leader election round finished") } } } diff --git a/internal/scheduler/leader_client_test.go b/internal/scheduler/leader_client_test.go index e8909356402..31ba46a8913 100644 --- a/internal/scheduler/leader_client_test.go +++ b/internal/scheduler/leader_client_test.go @@ -1,11 +1,11 @@ package scheduler import ( - "context" "testing" "github.com/stretchr/testify/assert" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/scheduler/configuration" "github.com/armadaproject/armada/pkg/client" ) @@ -91,7 +91,7 @@ func (f *FakeLeaderController) ValidateToken(tok LeaderToken) bool { return f.IsCurrentlyLeader } -func (f *FakeLeaderController) Run(ctx context.Context) error { +func (f *FakeLeaderController) Run(_ *armadacontext.Context) error { return nil } diff --git a/internal/scheduler/leader_metrics.go b/internal/scheduler/leader_metrics.go index cc02157504e..d5d4e62f535 100644 --- a/internal/scheduler/leader_metrics.go +++ b/internal/scheduler/leader_metrics.go @@ -1,11 +1,11 @@ package scheduler import ( - "context" "sync" "github.com/prometheus/client_golang/prometheus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/metrics" ) @@ -29,7 +29,7 @@ func NewLeaderStatusMetricsCollector(currentInstanceName string) *LeaderStatusMe } } -func (l *LeaderStatusMetricsCollector) onStartedLeading(context.Context) { +func (l *LeaderStatusMetricsCollector) onStartedLeading(*armadacontext.Context) { l.lock.Lock() defer l.lock.Unlock() diff --git a/internal/scheduler/leader_metrics_test.go b/internal/scheduler/leader_metrics_test.go index fec5d4e5d08..8132179afbd 100644 --- a/internal/scheduler/leader_metrics_test.go +++ b/internal/scheduler/leader_metrics_test.go @@ -1,11 +1,12 @@ package scheduler import ( - "context" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" + + "github.com/armadaproject/armada/internal/common/armadacontext" ) const testInstanceName = "instance-1" @@ -31,7 +32,7 @@ func TestLeaderStatusMetrics_HandlesLeaderChanges(t *testing.T) { assert.Equal(t, actual[0], isNotLeaderMetric) // start leading - collector.onStartedLeading(context.Background()) + collector.onStartedLeading(armadacontext.Background()) actual = getCurrentMetrics(collector) assert.Len(t, actual, 1) assert.Equal(t, actual[0], isLeaderMetric) diff --git a/internal/scheduler/leader_proxying_reports_server_test.go b/internal/scheduler/leader_proxying_reports_server_test.go index 5fc1874d210..2b83a02da28 100644 --- a/internal/scheduler/leader_proxying_reports_server_test.go +++ b/internal/scheduler/leader_proxying_reports_server_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "google.golang.org/grpc" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -48,7 +49,7 @@ func TestLeaderProxyingSchedulingReportsServer_GetJobReports(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() sut, clientProvider, jobReportsServer, jobReportsClient := setupLeaderProxyingSchedulerReportsServerTest(t) @@ -113,7 +114,7 @@ func TestLeaderProxyingSchedulingReportsServer_GetSchedulingReport(t *testing.T) } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() sut, clientProvider, jobReportsServer, jobReportsClient := setupLeaderProxyingSchedulerReportsServerTest(t) @@ -178,7 +179,7 @@ func TestLeaderProxyingSchedulingReportsServer_GetQueueReport(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() sut, clientProvider, jobReportsServer, jobReportsClient := setupLeaderProxyingSchedulerReportsServerTest(t) diff --git a/internal/scheduler/leader_test.go b/internal/scheduler/leader_test.go index 1790c9518b5..17fb468b0cf 100644 --- a/internal/scheduler/leader_test.go +++ b/internal/scheduler/leader_test.go @@ -12,6 +12,7 @@ import ( v1 "k8s.io/api/coordination/v1" "k8s.io/utils/pointer" + "github.com/armadaproject/armada/internal/common/armadacontext" schedulerconfig "github.com/armadaproject/armada/internal/scheduler/configuration" schedulermocks "github.com/armadaproject/armada/internal/scheduler/mocks" ) @@ -108,7 +109,7 @@ func TestK8sLeaderController_BecomingLeader(t *testing.T) { controller := NewKubernetesLeaderController(testLeaderConfig(), client) testListener := NewTestLeaseListener(controller) controller.RegisterListener(testListener) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 10*time.Second) go func() { err := controller.Run(ctx) assert.ErrorIs(t, err, context.Canceled) @@ -184,7 +185,7 @@ func (t *TestLeaseListener) GetMessages() []LeaderToken { return append([]LeaderToken(nil), t.tokens...) } -func (t *TestLeaseListener) onStartedLeading(_ context.Context) { +func (t *TestLeaseListener) onStartedLeading(_ *armadacontext.Context) { t.handleNewToken() } diff --git a/internal/scheduler/metrics.go b/internal/scheduler/metrics.go index 15da0d6c478..168295ff91f 100644 --- a/internal/scheduler/metrics.go +++ b/internal/scheduler/metrics.go @@ -1,16 +1,16 @@ package scheduler import ( - "context" "strings" "sync/atomic" "time" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/clock" + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/logging" commonmetrics "github.com/armadaproject/armada/internal/common/metrics" "github.com/armadaproject/armada/internal/common/resource" "github.com/armadaproject/armada/internal/scheduler/database" @@ -76,18 +76,20 @@ func NewMetricsCollector( } // Run enters s a loop which updates the metrics every refreshPeriod until the supplied context is cancelled -func (c *MetricsCollector) Run(ctx context.Context) error { +func (c *MetricsCollector) Run(ctx *armadacontext.Context) error { ticker := c.clock.NewTicker(c.refreshPeriod) - log.Infof("Will update metrics every %s", c.refreshPeriod) + ctx.Infof("Will update metrics every %s", c.refreshPeriod) for { select { case <-ctx.Done(): - log.Debugf("Context cancelled, returning..") + ctx.Debugf("Context cancelled, returning..") return nil case <-ticker.C(): err := c.refresh(ctx) if err != nil { - log.WithError(err).Warnf("error refreshing metrics state") + logging. + WithStacktrace(ctx, err). + Warnf("error refreshing metrics state") } } } @@ -108,8 +110,8 @@ func (c *MetricsCollector) Collect(metrics chan<- prometheus.Metric) { } } -func (c *MetricsCollector) refresh(ctx context.Context) error { - log.Debugf("Refreshing prometheus metrics") +func (c *MetricsCollector) refresh(ctx *armadacontext.Context) error { + ctx.Debugf("Refreshing prometheus metrics") start := time.Now() queueMetrics, err := c.updateQueueMetrics(ctx) if err != nil { @@ -121,11 +123,11 @@ func (c *MetricsCollector) refresh(ctx context.Context) error { } allMetrics := append(queueMetrics, clusterMetrics...) c.state.Store(allMetrics) - log.Debugf("Refreshed prometheus metrics in %s", time.Since(start)) + ctx.Debugf("Refreshed prometheus metrics in %s", time.Since(start)) return nil } -func (c *MetricsCollector) updateQueueMetrics(ctx context.Context) ([]prometheus.Metric, error) { +func (c *MetricsCollector) updateQueueMetrics(ctx *armadacontext.Context) ([]prometheus.Metric, error) { queues, err := c.queueRepository.GetAllQueues() if err != nil { return nil, err @@ -154,7 +156,7 @@ func (c *MetricsCollector) updateQueueMetrics(ctx context.Context) ([]prometheus } qs, ok := provider.queueStates[job.Queue()] if !ok { - log.Warnf("job %s is in queue %s, but this queue does not exist; skipping", job.Id(), job.Queue()) + ctx.Warnf("job %s is in queue %s, but this queue does not exist; skipping", job.Id(), job.Queue()) continue } @@ -181,7 +183,7 @@ func (c *MetricsCollector) updateQueueMetrics(ctx context.Context) ([]prometheus timeInState = currentTime.Sub(time.Unix(0, run.Created())) recorder = qs.runningJobRecorder } else { - log.Warnf("Job %s is marked as leased but has no runs", job.Id()) + ctx.Warnf("Job %s is marked as leased but has no runs", job.Id()) } recorder.RecordJobRuntime(pool, priorityClass, timeInState) recorder.RecordResources(pool, priorityClass, jobResources) @@ -212,7 +214,7 @@ type clusterMetricKey struct { nodeType string } -func (c *MetricsCollector) updateClusterMetrics(ctx context.Context) ([]prometheus.Metric, error) { +func (c *MetricsCollector) updateClusterMetrics(ctx *armadacontext.Context) ([]prometheus.Metric, error) { executors, err := c.executorRepository.GetExecutors(ctx) if err != nil { return nil, err diff --git a/internal/scheduler/metrics_test.go b/internal/scheduler/metrics_test.go index 52c89eb6641..0bbcd9090c7 100644 --- a/internal/scheduler/metrics_test.go +++ b/internal/scheduler/metrics_test.go @@ -1,7 +1,6 @@ package scheduler import ( - "context" "testing" "time" @@ -12,6 +11,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/clock" + "github.com/armadaproject/armada/internal/common/armadacontext" commonmetrics "github.com/armadaproject/armada/internal/common/metrics" "github.com/armadaproject/armada/internal/scheduler/database" "github.com/armadaproject/armada/internal/scheduler/jobdb" @@ -86,7 +86,7 @@ func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { t.Run(name, func(t *testing.T) { ctrl := gomock.NewController(t) testClock := clock.NewFakeClock(testfixtures.BaseTime) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() // set up job db with initial jobs @@ -236,7 +236,7 @@ func TestMetricsCollector_TestCollect_ClusterMetrics(t *testing.T) { t.Run(name, func(t *testing.T) { ctrl := gomock.NewController(t) testClock := clock.NewFakeClock(testfixtures.BaseTime) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() // set up job db with initial jobs @@ -303,7 +303,7 @@ type MockPoolAssigner struct { poolsById map[string]string } -func (m MockPoolAssigner) Refresh(_ context.Context) error { +func (m MockPoolAssigner) Refresh(_ *armadacontext.Context) error { return nil } diff --git a/internal/scheduler/mocks/mock_repositories.go b/internal/scheduler/mocks/mock_repositories.go index 9a8f6efee1a..c2924402b9b 100644 --- a/internal/scheduler/mocks/mock_repositories.go +++ b/internal/scheduler/mocks/mock_repositories.go @@ -5,10 +5,10 @@ package schedulermocks import ( - context "context" reflect "reflect" time "time" + armadacontext "github.com/armadaproject/armada/internal/common/armadacontext" database "github.com/armadaproject/armada/internal/scheduler/database" schedulerobjects "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" armadaevents "github.com/armadaproject/armada/pkg/armadaevents" @@ -40,7 +40,7 @@ func (m *MockExecutorRepository) EXPECT() *MockExecutorRepositoryMockRecorder { } // GetExecutors mocks base method. -func (m *MockExecutorRepository) GetExecutors(arg0 context.Context) ([]*schedulerobjects.Executor, error) { +func (m *MockExecutorRepository) GetExecutors(arg0 *armadacontext.Context) ([]*schedulerobjects.Executor, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetExecutors", arg0) ret0, _ := ret[0].([]*schedulerobjects.Executor) @@ -55,7 +55,7 @@ func (mr *MockExecutorRepositoryMockRecorder) GetExecutors(arg0 interface{}) *go } // GetLastUpdateTimes mocks base method. -func (m *MockExecutorRepository) GetLastUpdateTimes(arg0 context.Context) (map[string]time.Time, error) { +func (m *MockExecutorRepository) GetLastUpdateTimes(arg0 *armadacontext.Context) (map[string]time.Time, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLastUpdateTimes", arg0) ret0, _ := ret[0].(map[string]time.Time) @@ -70,7 +70,7 @@ func (mr *MockExecutorRepositoryMockRecorder) GetLastUpdateTimes(arg0 interface{ } // StoreExecutor mocks base method. -func (m *MockExecutorRepository) StoreExecutor(arg0 context.Context, arg1 *schedulerobjects.Executor) error { +func (m *MockExecutorRepository) StoreExecutor(arg0 *armadacontext.Context, arg1 *schedulerobjects.Executor) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StoreExecutor", arg0, arg1) ret0, _ := ret[0].(error) @@ -145,7 +145,7 @@ func (m *MockJobRepository) EXPECT() *MockJobRepositoryMockRecorder { } // CountReceivedPartitions mocks base method. -func (m *MockJobRepository) CountReceivedPartitions(arg0 context.Context, arg1 uuid.UUID) (uint32, error) { +func (m *MockJobRepository) CountReceivedPartitions(arg0 *armadacontext.Context, arg1 uuid.UUID) (uint32, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CountReceivedPartitions", arg0, arg1) ret0, _ := ret[0].(uint32) @@ -160,7 +160,7 @@ func (mr *MockJobRepositoryMockRecorder) CountReceivedPartitions(arg0, arg1 inte } // FetchJobRunErrors mocks base method. -func (m *MockJobRepository) FetchJobRunErrors(arg0 context.Context, arg1 []uuid.UUID) (map[uuid.UUID]*armadaevents.Error, error) { +func (m *MockJobRepository) FetchJobRunErrors(arg0 *armadacontext.Context, arg1 []uuid.UUID) (map[uuid.UUID]*armadaevents.Error, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchJobRunErrors", arg0, arg1) ret0, _ := ret[0].(map[uuid.UUID]*armadaevents.Error) @@ -175,7 +175,7 @@ func (mr *MockJobRepositoryMockRecorder) FetchJobRunErrors(arg0, arg1 interface{ } // FetchJobRunLeases mocks base method. -func (m *MockJobRepository) FetchJobRunLeases(arg0 context.Context, arg1 string, arg2 uint, arg3 []uuid.UUID) ([]*database.JobRunLease, error) { +func (m *MockJobRepository) FetchJobRunLeases(arg0 *armadacontext.Context, arg1 string, arg2 uint, arg3 []uuid.UUID) ([]*database.JobRunLease, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchJobRunLeases", arg0, arg1, arg2, arg3) ret0, _ := ret[0].([]*database.JobRunLease) @@ -190,7 +190,7 @@ func (mr *MockJobRepositoryMockRecorder) FetchJobRunLeases(arg0, arg1, arg2, arg } // FetchJobUpdates mocks base method. -func (m *MockJobRepository) FetchJobUpdates(arg0 context.Context, arg1, arg2 int64) ([]database.Job, []database.Run, error) { +func (m *MockJobRepository) FetchJobUpdates(arg0 *armadacontext.Context, arg1, arg2 int64) ([]database.Job, []database.Run, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchJobUpdates", arg0, arg1, arg2) ret0, _ := ret[0].([]database.Job) @@ -206,7 +206,7 @@ func (mr *MockJobRepositoryMockRecorder) FetchJobUpdates(arg0, arg1, arg2 interf } // FindInactiveRuns mocks base method. -func (m *MockJobRepository) FindInactiveRuns(arg0 context.Context, arg1 []uuid.UUID) ([]uuid.UUID, error) { +func (m *MockJobRepository) FindInactiveRuns(arg0 *armadacontext.Context, arg1 []uuid.UUID) ([]uuid.UUID, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FindInactiveRuns", arg0, arg1) ret0, _ := ret[0].([]uuid.UUID) diff --git a/internal/scheduler/nodedb/nodedb.go b/internal/scheduler/nodedb/nodedb.go index a2351d1e75f..e529d870ff6 100644 --- a/internal/scheduler/nodedb/nodedb.go +++ b/internal/scheduler/nodedb/nodedb.go @@ -488,13 +488,9 @@ func NodeJobDiff(txnA, txnB *memdb.Txn) (map[string]*Node, map[string]*Node, err return preempted, scheduled, nil } -// ScheduleMany assigns a set of jobs to nodes. The assignment is atomic, i.e., either all jobs are -// successfully assigned to nodes or none are. The returned bool indicates whether assignment -// succeeded (true) or not (false). -// -// This method sets the PodSchedulingContext field on each JobSchedulingContext that it attempts to -// schedule; if it returns early (e.g., because it finds an unschedulable JobSchedulingContext), -// then this field will not be set on the remaining items. +// ScheduleMany assigns a set of jobs to nodes. +// If N jobs can be scheduled, where N >= `GangMinCardinality`, it will return true, nil and set ShouldFail on any excess jobs. +// Otherwise, it will return false, nil. // TODO: Pass through contexts to support timeouts. func (nodeDb *NodeDb) ScheduleMany(jctxs []*schedulercontext.JobSchedulingContext) (bool, error) { txn := nodeDb.db.Txn(true) @@ -507,25 +503,42 @@ func (nodeDb *NodeDb) ScheduleMany(jctxs []*schedulercontext.JobSchedulingContex return ok, err } +// TODO: Remove me once we re-phrase nodedb in terms of gang context (and therefore can just take this value from the gang scheduling context provided) +func gangMinCardinality(jctxs []*schedulercontext.JobSchedulingContext) int { + if len(jctxs) > 0 { + return jctxs[0].GangMinCardinality + } else { + return 1 + } +} + func (nodeDb *NodeDb) ScheduleManyWithTxn(txn *memdb.Txn, jctxs []*schedulercontext.JobSchedulingContext) (bool, error) { // Attempt to schedule pods one by one in a transaction. + cumulativeScheduled := 0 + gangMinCardinality := gangMinCardinality(jctxs) + for _, jctx := range jctxs { + // Defensively reset `ShouldFail` (this should always be false as the state is re-constructed per cycle but just in case) + jctx.ShouldFail = false + node, err := nodeDb.SelectNodeForJobWithTxn(txn, jctx) if err != nil { return false, err } + if node == nil { + // Indicates that when the min cardinality is met, we should fail this job back to the client. + jctx.ShouldFail = true + continue + } + // If we found a node for this pod, bind it and continue to the next pod. - if node != nil { - if node, err := bindJobToNode(nodeDb.priorityClasses, jctx.Job, node); err != nil { + if node, err := bindJobToNode(nodeDb.priorityClasses, jctx.Job, node); err != nil { + return false, err + } else { + if err := nodeDb.UpsertWithTxn(txn, node); err != nil { return false, err - } else { - if err := nodeDb.UpsertWithTxn(txn, node); err != nil { - return false, err - } } - } else { - return false, nil } // Once a job is scheduled, it should no longer be considered for preemption. @@ -534,7 +547,14 @@ func (nodeDb *NodeDb) ScheduleManyWithTxn(txn *memdb.Txn, jctxs []*schedulercont return false, err } } + + cumulativeScheduled++ } + + if cumulativeScheduled < gangMinCardinality { + return false, nil + } + return true, nil } diff --git a/internal/scheduler/nodedb/nodedb_test.go b/internal/scheduler/nodedb/nodedb_test.go index 50f7f9c5a9c..afeeadecb0e 100644 --- a/internal/scheduler/nodedb/nodedb_test.go +++ b/internal/scheduler/nodedb/nodedb_test.go @@ -2,14 +2,17 @@ package nodedb import ( "fmt" + "strconv" "testing" + "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "github.com/armadaproject/armada/internal/armada/configuration" armadamaps "github.com/armadaproject/armada/internal/common/maps" schedulerconfig "github.com/armadaproject/armada/internal/scheduler/configuration" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" @@ -71,7 +74,7 @@ func TestSelectNodeForPod_NodeIdLabel_Success(t *testing.T) { map[string]string{schedulerconfig.NodeIdLabel: nodeId}, testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), ) - jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, jobs) + jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, jobs, func(_ map[string]string) (string, int, int, bool, error) { return "", 1, 1, true, nil }) for _, jctx := range jctxs { txn := db.Txn(false) node, err := db.SelectNodeForJobWithTxn(txn, jctx) @@ -100,7 +103,7 @@ func TestSelectNodeForPod_NodeIdLabel_Failure(t *testing.T) { map[string]string{schedulerconfig.NodeIdLabel: "this node does not exist"}, testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), ) - jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, jobs) + jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, jobs, func(_ map[string]string) (string, int, int, bool, error) { return "", 1, 1, true, nil }) for _, jctx := range jctxs { txn := db.Txn(false) node, err := db.SelectNodeForJobWithTxn(txn, jctx) @@ -435,7 +438,7 @@ func TestScheduleIndividually(t *testing.T) { nodeDb, err := newNodeDbWithNodes(tc.Nodes) require.NoError(t, err) - jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, tc.Jobs) + jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, tc.Jobs, func(_ map[string]string) (string, int, int, bool, error) { return "", 1, 1, true, nil }) for i, jctx := range jctxs { ok, err := nodeDb.ScheduleMany([]*schedulercontext.JobSchedulingContext{jctx}) @@ -474,6 +477,9 @@ func TestScheduleIndividually(t *testing.T) { } func TestScheduleMany(t *testing.T) { + gangSuccess := testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32)) + gangFailure := testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33)) + tests := map[string]struct { // Nodes to schedule across. Nodes []*schedulerobjects.Node @@ -483,22 +489,30 @@ func TestScheduleMany(t *testing.T) { // For each group, whether we expect scheduling to succeed. ExpectSuccess []bool }{ + // Attempts to schedule 32 jobs with a minimum gang cardinality of 1 job. All jobs get scheduled. "simple success": { Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), - Jobs: [][]*jobdb.Job{testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32)}, + Jobs: [][]*jobdb.Job{gangSuccess}, ExpectSuccess: []bool{true}, }, - "simple failure": { + // Attempts to schedule 33 jobs with a minimum gang cardinality of 32 jobs. One fails, but the overall result is a success. + "simple success with min cardinality": { Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), - Jobs: [][]*jobdb.Job{testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33)}, + Jobs: [][]*jobdb.Job{testfixtures.WithGangAnnotationsAndMinCardinalityJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33), 32)}, + ExpectSuccess: []bool{true}, + }, + // Attempts to schedule 33 jobs with a minimum gang cardinality of 33. The overall result fails. + "simple failure with min cardinality": { + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Jobs: [][]*jobdb.Job{gangFailure}, ExpectSuccess: []bool{false}, }, "correct rollback": { Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), Jobs: [][]*jobdb.Job{ - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33), - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32), + gangSuccess, + gangFailure, + gangSuccess, }, ExpectSuccess: []bool{true, false, true}, }, @@ -519,14 +533,27 @@ func TestScheduleMany(t *testing.T) { nodeDb, err := newNodeDbWithNodes(tc.Nodes) require.NoError(t, err) for i, jobs := range tc.Jobs { - jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, jobs) - ok, err := nodeDb.ScheduleMany(jctxs) + minCardinalityStr, ok := jobs[0].GetAnnotations()[configuration.GangMinimumCardinalityAnnotation] + if !ok { + minCardinalityStr = "1" + } + minCardinality, err := strconv.Atoi(minCardinalityStr) + if err != nil { + minCardinality = 1 + } + extractGangInfo := func(_ map[string]string) (string, int, int, bool, error) { + id, _ := uuid.NewUUID() + return id.String(), 1, minCardinality, true, nil + } + + jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, jobs, extractGangInfo) + ok, err = nodeDb.ScheduleMany(jctxs) require.NoError(t, err) assert.Equal(t, tc.ExpectSuccess[i], ok) for _, jctx := range jctxs { pctx := jctx.PodSchedulingContext require.NotNil(t, pctx) - if tc.ExpectSuccess[i] { + if tc.ExpectSuccess[i] && !jctx.ShouldFail { assert.NotEqual(t, "", pctx.NodeId) } } @@ -591,7 +618,7 @@ func benchmarkScheduleMany(b *testing.B, nodes []*schedulerobjects.Node, jobs [] b.ResetTimer() for n := 0; n < b.N; n++ { - jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, jobs) + jctxs := schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, jobs, func(_ map[string]string) (string, int, int, bool, error) { return "", 1, 1, true, nil }) txn := nodeDb.Txn(true) _, err := nodeDb.ScheduleManyWithTxn(txn, jctxs) txn.Abort() diff --git a/internal/scheduler/pool_assigner.go b/internal/scheduler/pool_assigner.go index 66e2ff999f0..9d636570a61 100644 --- a/internal/scheduler/pool_assigner.go +++ b/internal/scheduler/pool_assigner.go @@ -1,7 +1,6 @@ package scheduler import ( - "context" "time" "github.com/gogo/protobuf/proto" @@ -10,7 +9,9 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/types" + "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/database" "github.com/armadaproject/armada/internal/scheduler/jobdb" @@ -21,7 +22,7 @@ import ( // PoolAssigner allows jobs to be assigned to a pool // Note that this is intended only for use with metrics calculation type PoolAssigner interface { - Refresh(ctx context.Context) error + Refresh(ctx *armadacontext.Context) error AssignPool(j *jobdb.Job) (string, error) } @@ -70,7 +71,7 @@ func NewPoolAssigner(executorTimeout time.Duration, } // Refresh updates executor state -func (p *DefaultPoolAssigner) Refresh(ctx context.Context) error { +func (p *DefaultPoolAssigner) Refresh(ctx *armadacontext.Context) error { executors, err := p.executorRepository.GetExecutors(ctx) executorsByPool := map[string][]*executor{} poolByExecutorId := map[string]string{} @@ -123,20 +124,22 @@ func (p *DefaultPoolAssigner) AssignPool(j *jobdb.Job) (string, error) { req := j.PodRequirements() req = p.clearAnnotations(req) - // Otherwise iterate through each pool and detect the first one the job is potentially schedulable on + // Otherwise iterate through each pool and detect the first one the job is potentially schedulable on. + // TODO: We should use the real scheduler instead since this check may go out of sync with the scheduler. for pool, executors := range p.executorsByPool { for _, e := range executors { requests := req.GetResourceRequirements().Requests - if ok, _ := requestsAreLargeEnough(schedulerobjects.ResourceListFromV1ResourceList(requests), e.minimumJobSize); !ok { + if ok, _ := constraints.RequestsAreLargeEnough(schedulerobjects.ResourceListFromV1ResourceList(requests), e.minimumJobSize); !ok { continue } nodeDb := e.nodeDb txn := nodeDb.Txn(true) jctx := &schedulercontext.JobSchedulingContext{ - Created: time.Now(), - JobId: j.GetId(), - Job: j, - PodRequirements: j.GetPodRequirements(p.priorityClasses), + Created: time.Now(), + JobId: j.GetId(), + Job: j, + PodRequirements: j.GetPodRequirements(p.priorityClasses), + GangMinCardinality: 1, } node, err := nodeDb.SelectNodeForJobWithTxn(txn, jctx) txn.Abort() diff --git a/internal/scheduler/pool_assigner_test.go b/internal/scheduler/pool_assigner_test.go index f2508295e65..7734b6195be 100644 --- a/internal/scheduler/pool_assigner_test.go +++ b/internal/scheduler/pool_assigner_test.go @@ -1,17 +1,16 @@ package scheduler import ( - "context" "testing" "time" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/util/clock" - "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/util/clock" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/scheduler/jobdb" schedulermocks "github.com/armadaproject/armada/internal/scheduler/mocks" @@ -48,7 +47,7 @@ func TestPoolAssigner_AssignPool(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() ctrl := gomock.NewController(t) diff --git a/internal/scheduler/preempting_queue_scheduler.go b/internal/scheduler/preempting_queue_scheduler.go index ba753af0368..7be10464132 100644 --- a/internal/scheduler/preempting_queue_scheduler.go +++ b/internal/scheduler/preempting_queue_scheduler.go @@ -1,12 +1,10 @@ package scheduler import ( - "context" "fmt" "math/rand" "time" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "github.com/hashicorp/go-memdb" "github.com/pkg/errors" "golang.org/x/exp/maps" @@ -14,6 +12,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" armadamaps "github.com/armadaproject/armada/internal/common/maps" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/types" @@ -108,9 +107,7 @@ func (sch *PreemptingQueueScheduler) EnableNewPreemptionStrategy() { // Schedule // - preempts jobs belonging to queues with total allocation above their fair share and // - schedules new jobs belonging to queues with total allocation less than their fair share. -func (sch *PreemptingQueueScheduler) Schedule(ctx context.Context) (*SchedulerResult, error) { - log := ctxlogrus.Extract(ctx) - log = log.WithField("service", "PreemptingQueueScheduler") +func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*SchedulerResult, error) { defer func() { sch.schedulingContext.Finished = time.Now() }() @@ -125,23 +122,18 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx context.Context) (*SchedulerRe // Evict preemptible jobs. totalCost := sch.schedulingContext.TotalCost() evictorResult, inMemoryJobRepo, err := sch.evict( - ctxlogrus.ToContext( - ctx, - log.WithField("stage", "evict for resource balancing"), - ), + armadacontext.WithLogField(ctx, "stage", "evict for resource balancing"), NewNodeEvictor( sch.jobRepo, sch.schedulingContext.PriorityClasses, sch.nodeEvictionProbability, - func(ctx context.Context, job interfaces.LegacySchedulerJob) bool { + func(ctx *armadacontext.Context, job interfaces.LegacySchedulerJob) bool { if job.GetAnnotations() == nil { - log := ctxlogrus.Extract(ctx) - log.Errorf("can't evict job %s: annotations not initialised", job.GetId()) + ctx.Errorf("can't evict job %s: annotations not initialised", job.GetId()) return false } if job.GetNodeSelector() == nil { - log := ctxlogrus.Extract(ctx) - log.Errorf("can't evict job %s: nodeSelector not initialised", job.GetId()) + ctx.Errorf("can't evict job %s: nodeSelector not initialised", job.GetId()) return false } if qctx, ok := sch.schedulingContext.QueueSchedulingContexts[job.GetQueue()]; ok { @@ -168,10 +160,7 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx context.Context) (*SchedulerRe // Re-schedule evicted jobs/schedule new jobs. schedulerResult, err := sch.schedule( - ctxlogrus.ToContext( - ctx, - log.WithField("stage", "re-schedule after balancing eviction"), - ), + armadacontext.WithLogField(ctx, "stage", "re-schedule after balancing eviction"), inMemoryJobRepo, sch.jobRepo, ) @@ -189,10 +178,7 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx context.Context) (*SchedulerRe // Evict jobs on oversubscribed nodes. evictorResult, inMemoryJobRepo, err = sch.evict( - ctxlogrus.ToContext( - ctx, - log.WithField("stage", "evict oversubscribed"), - ), + armadacontext.WithLogField(ctx, "stage", "evict oversubscribed"), NewOversubscribedEvictor( sch.jobRepo, sch.schedulingContext.PriorityClasses, @@ -226,10 +212,7 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx context.Context) (*SchedulerRe // Since no new jobs are considered in this round, the scheduling key check brings no benefit. sch.SkipUnsuccessfulSchedulingKeyCheck() schedulerResult, err = sch.schedule( - ctxlogrus.ToContext( - ctx, - log.WithField("stage", "schedule after oversubscribed eviction"), - ), + armadacontext.WithLogField(ctx, "stage", "schedule after oversubscribed eviction"), inMemoryJobRepo, // Only evicted jobs should be scheduled in this round, // so we provide an empty repo for queued jobs. @@ -258,10 +241,10 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx context.Context) (*SchedulerRe return nil, err } if s := JobsSummary(preemptedJobs); s != "" { - log.Infof("preempting running jobs; %s", s) + ctx.Infof("preempting running jobs; %s", s) } if s := JobsSummary(scheduledJobs); s != "" { - log.Infof("scheduling new jobs; %s", s) + ctx.Infof("scheduling new jobs; %s", s) } if sch.enableAssertions { err := sch.assertions( @@ -277,12 +260,13 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx context.Context) (*SchedulerRe return &SchedulerResult{ PreemptedJobs: preemptedJobs, ScheduledJobs: scheduledJobs, + FailedJobs: schedulerResult.FailedJobs, NodeIdByJobId: sch.nodeIdByJobId, SchedulingContexts: []*schedulercontext.SchedulingContext{sch.schedulingContext}, }, nil } -func (sch *PreemptingQueueScheduler) evict(ctx context.Context, evictor *Evictor) (*EvictorResult, *InMemoryJobRepository, error) { +func (sch *PreemptingQueueScheduler) evict(ctx *armadacontext.Context, evictor *Evictor) (*EvictorResult, *InMemoryJobRepository, error) { if evictor == nil { return &EvictorResult{}, NewInMemoryJobRepository(sch.schedulingContext.PriorityClasses), nil } @@ -348,7 +332,7 @@ func (sch *PreemptingQueueScheduler) evict(ctx context.Context, evictor *Evictor // When evicting jobs, gangs may have been partially evicted. // Here, we evict all jobs in any gang for which at least one job was already evicted. -func (sch *PreemptingQueueScheduler) evictGangs(ctx context.Context, txn *memdb.Txn, previousEvictorResult *EvictorResult) (*EvictorResult, error) { +func (sch *PreemptingQueueScheduler) evictGangs(ctx *armadacontext.Context, txn *memdb.Txn, previousEvictorResult *EvictorResult) (*EvictorResult, error) { gangJobIds, gangNodeIds, err := sch.collectIdsForGangEviction(previousEvictorResult.EvictedJobsById) if err != nil { return nil, err @@ -512,7 +496,7 @@ func (q MinimalQueue) GetWeight() float64 { // addEvictedJobsToNodeDb adds evicted jobs to the NodeDb. // Needed to enable the nodeDb accounting for these when preempting. -func addEvictedJobsToNodeDb(ctx context.Context, sctx *schedulercontext.SchedulingContext, nodeDb *nodedb.NodeDb, inMemoryJobRepo *InMemoryJobRepository) error { +func addEvictedJobsToNodeDb(ctx *armadacontext.Context, sctx *schedulercontext.SchedulingContext, nodeDb *nodedb.NodeDb, inMemoryJobRepo *InMemoryJobRepository) error { gangItByQueue := make(map[string]*QueuedGangIterator) for _, qctx := range sctx.QueueSchedulingContexts { jobIt, err := inMemoryJobRepo.GetJobIterator(ctx, qctx.Queue) @@ -552,7 +536,7 @@ func addEvictedJobsToNodeDb(ctx context.Context, sctx *schedulercontext.Scheduli return nil } -func (sch *PreemptingQueueScheduler) schedule(ctx context.Context, inMemoryJobRepo *InMemoryJobRepository, jobRepo JobRepository) (*SchedulerResult, error) { +func (sch *PreemptingQueueScheduler) schedule(ctx *armadacontext.Context, inMemoryJobRepo *InMemoryJobRepository, jobRepo JobRepository) (*SchedulerResult, error) { jobIteratorByQueue := make(map[string]JobIterator) for _, qctx := range sch.schedulingContext.QueueSchedulingContexts { evictedIt, err := inMemoryJobRepo.GetJobIterator(ctx, qctx.Queue) @@ -626,7 +610,7 @@ func (sch *PreemptingQueueScheduler) updateGangAccounting(preemptedJobs, schedul } } for _, job := range scheduledJobs { - gangId, _, isGangJob, err := GangIdAndCardinalityFromLegacySchedulerJob(job) + gangId, _, _, isGangJob, err := GangIdAndCardinalityFromLegacySchedulerJob(job) if err != nil { return err } @@ -717,9 +701,9 @@ func (sch *PreemptingQueueScheduler) assertions( type Evictor struct { jobRepo JobRepository priorityClasses map[string]types.PriorityClass - nodeFilter func(context.Context, *nodedb.Node) bool - jobFilter func(context.Context, interfaces.LegacySchedulerJob) bool - postEvictFunc func(context.Context, interfaces.LegacySchedulerJob, *nodedb.Node) + nodeFilter func(*armadacontext.Context, *nodedb.Node) bool + jobFilter func(*armadacontext.Context, interfaces.LegacySchedulerJob) bool + postEvictFunc func(*armadacontext.Context, interfaces.LegacySchedulerJob, *nodedb.Node) } type EvictorResult struct { @@ -735,7 +719,7 @@ func NewNodeEvictor( jobRepo JobRepository, priorityClasses map[string]types.PriorityClass, perNodeEvictionProbability float64, - jobFilter func(context.Context, interfaces.LegacySchedulerJob) bool, + jobFilter func(*armadacontext.Context, interfaces.LegacySchedulerJob) bool, random *rand.Rand, ) *Evictor { if perNodeEvictionProbability <= 0 { @@ -747,7 +731,7 @@ func NewNodeEvictor( return &Evictor{ jobRepo: jobRepo, priorityClasses: priorityClasses, - nodeFilter: func(_ context.Context, node *nodedb.Node) bool { + nodeFilter: func(_ *armadacontext.Context, node *nodedb.Node) bool { return len(node.AllocatedByJobId) > 0 && random.Float64() < perNodeEvictionProbability }, jobFilter: jobFilter, @@ -769,11 +753,11 @@ func NewFilteredEvictor( return &Evictor{ jobRepo: jobRepo, priorityClasses: priorityClasses, - nodeFilter: func(_ context.Context, node *nodedb.Node) bool { + nodeFilter: func(_ *armadacontext.Context, node *nodedb.Node) bool { shouldEvict := nodeIdsToEvict[node.Id] return shouldEvict }, - jobFilter: func(_ context.Context, job interfaces.LegacySchedulerJob) bool { + jobFilter: func(_ *armadacontext.Context, job interfaces.LegacySchedulerJob) bool { shouldEvict := jobIdsToEvict[job.GetId()] return shouldEvict }, @@ -804,7 +788,7 @@ func NewOversubscribedEvictor( return &Evictor{ jobRepo: jobRepo, priorityClasses: priorityClasses, - nodeFilter: func(_ context.Context, node *nodedb.Node) bool { + nodeFilter: func(_ *armadacontext.Context, node *nodedb.Node) bool { overSubscribedPriorities = make(map[int32]bool) for p, rl := range node.AllocatableByPriority { if p < 0 { @@ -820,10 +804,9 @@ func NewOversubscribedEvictor( } return len(overSubscribedPriorities) > 0 && random.Float64() < perNodeEvictionProbability }, - jobFilter: func(ctx context.Context, job interfaces.LegacySchedulerJob) bool { + jobFilter: func(ctx *armadacontext.Context, job interfaces.LegacySchedulerJob) bool { if job.GetAnnotations() == nil { - log := ctxlogrus.Extract(ctx) - log.Warnf("can't evict job %s: annotations not initialised", job.GetId()) + ctx.Warnf("can't evict job %s: annotations not initialised", job.GetId()) return false } priorityClassName := job.GetPriorityClassName() @@ -844,7 +827,7 @@ func NewOversubscribedEvictor( // Any node for which nodeFilter returns false is skipped. // Any job for which jobFilter returns true is evicted (if the node was not skipped). // If a job was evicted from a node, postEvictFunc is called with the corresponding job and node. -func (evi *Evictor) Evict(ctx context.Context, it nodedb.NodeIterator) (*EvictorResult, error) { +func (evi *Evictor) Evict(ctx *armadacontext.Context, it nodedb.NodeIterator) (*EvictorResult, error) { var jobFilter func(job interfaces.LegacySchedulerJob) bool if evi.jobFilter != nil { jobFilter = func(job interfaces.LegacySchedulerJob) bool { return evi.jobFilter(ctx, job) } @@ -898,12 +881,11 @@ func (evi *Evictor) Evict(ctx context.Context, it nodedb.NodeIterator) (*Evictor // TODO: This is only necessary for jobs not scheduled in this cycle. // Since jobs scheduled in this cycle can be re-scheduled onto another node without triggering a preemption. -func defaultPostEvictFunc(ctx context.Context, job interfaces.LegacySchedulerJob, node *nodedb.Node) { +func defaultPostEvictFunc(ctx *armadacontext.Context, job interfaces.LegacySchedulerJob, node *nodedb.Node) { // Add annotation indicating to the scheduler this this job was evicted. annotations := job.GetAnnotations() if annotations == nil { - log := ctxlogrus.Extract(ctx) - log.Errorf("error evicting job %s: annotations not initialised", job.GetId()) + ctx.Errorf("error evicting job %s: annotations not initialised", job.GetId()) } else { annotations[schedulerconfig.IsEvictedAnnotation] = "true" } @@ -911,8 +893,7 @@ func defaultPostEvictFunc(ctx context.Context, job interfaces.LegacySchedulerJob // Add node selector ensuring this job is only re-scheduled onto the node it was evicted from. nodeSelector := job.GetNodeSelector() if nodeSelector == nil { - log := ctxlogrus.Extract(ctx) - log.Errorf("error evicting job %s: nodeSelector not initialised", job.GetId()) + ctx.Errorf("error evicting job %s: nodeSelector not initialised", job.GetId()) } else { nodeSelector[schedulerconfig.NodeIdLabel] = node.Id } diff --git a/internal/scheduler/preempting_queue_scheduler_test.go b/internal/scheduler/preempting_queue_scheduler_test.go index 923d77296bc..86077955363 100644 --- a/internal/scheduler/preempting_queue_scheduler_test.go +++ b/internal/scheduler/preempting_queue_scheduler_test.go @@ -1,20 +1,21 @@ package scheduler import ( - "context" "fmt" "math/rand" "testing" + "time" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" "golang.org/x/exp/slices" + "golang.org/x/time/rate" "k8s.io/apimachinery/pkg/api/resource" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" armadamaps "github.com/armadaproject/armada/internal/common/maps" armadaslices "github.com/armadaproject/armada/internal/common/slices" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" @@ -53,7 +54,7 @@ func TestEvictOversubscribed(t *testing.T) { nil, ) it := NewInMemoryNodeIterator([]*nodedb.Node{entry}) - result, err := evictor.Evict(context.Background(), it) + result, err := evictor.Evict(armadacontext.Background(), it) require.NoError(t, err) prioritiesByName := configuration.PriorityByPriorityClassName(testfixtures.TestPriorityClasses) @@ -515,7 +516,7 @@ func TestPreemptingQueueScheduler(t *testing.T) { { // Schedule a gang across two nodes. JobsByQueue: map[string][]*jobdb.Job{ - "A": testfixtures.WithGangAnnotationsJobs(testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 2)), + "A": testfixtures.WithGangAnnotationsAndMinCardinalityJobs(testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 2), 1), }, ExpectedScheduledIndices: map[string][]int{ "A": testfixtures.IntRange(0, 1), @@ -625,8 +626,8 @@ func TestPreemptingQueueScheduler(t *testing.T) { "B": 1, }, }, - "rescheduled jobs don't count towards maxJobsToSchedule": { - SchedulingConfig: testfixtures.WithMaxJobsToScheduleConfig(5, testfixtures.TestSchedulingConfig()), + "rescheduled jobs don't count towards global scheduling rate limit": { + SchedulingConfig: testfixtures.WithGlobalSchedulingRateLimiterConfig(2, 5, testfixtures.TestSchedulingConfig()), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Rounds: []SchedulingRound{ { @@ -642,7 +643,7 @@ func TestPreemptingQueueScheduler(t *testing.T) { "A": testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 10), }, ExpectedScheduledIndices: map[string][]int{ - "A": testfixtures.IntRange(0, 4), + "A": testfixtures.IntRange(0, 1), }, }, }, @@ -650,6 +651,42 @@ func TestPreemptingQueueScheduler(t *testing.T) { "A": 1, }, }, + "MaximumSchedulingRate": { + SchedulingConfig: testfixtures.WithGlobalSchedulingRateLimiterConfig(2, 4, testfixtures.TestSchedulingConfig()), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Rounds: []SchedulingRound{ + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 6)), + }, + }, + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 10), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 3), + }, + }, + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 10), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 1), + }, + }, + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 10), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 1), + }, + }, + }, + PriorityFactorByQueue: map[string]float64{"A": 1}, + }, "rescheduled jobs don't count towards maxQueueLookback": { SchedulingConfig: testfixtures.WithMaxLookbackPerQueueConfig(5, testfixtures.TestSchedulingConfig()), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), @@ -1300,6 +1337,22 @@ func TestPreemptingQueueScheduler(t *testing.T) { var jobIdsByGangId map[string]map[string]bool var gangIdByJobId map[string]string + // Scheduling rate-limiters persist between rounds. + // We control the rate at which time passes between scheduling rounds. + schedulingStarted := time.Now() + schedulingInterval := time.Second + limiter := rate.NewLimiter( + rate.Limit(tc.SchedulingConfig.MaximumSchedulingRate), + tc.SchedulingConfig.MaximumSchedulingBurst, + ) + limiterByQueue := make(map[string]*rate.Limiter) + for queue := range tc.PriorityFactorByQueue { + limiterByQueue[queue] = rate.NewLimiter( + rate.Limit(tc.SchedulingConfig.MaximumPerQueueSchedulingRate), + tc.SchedulingConfig.MaximumPerQueueSchedulingBurst, + ) + } + // Run the scheduler. log := logrus.NewEntry(logrus.New()) for i, round := range tc.Rounds { @@ -1367,12 +1420,21 @@ func TestPreemptingQueueScheduler(t *testing.T) { tc.SchedulingConfig.Preemption.PriorityClasses, tc.SchedulingConfig.Preemption.DefaultPriorityClass, fairnessCostProvider, + limiter, tc.TotalResources, ) + sctx.Started = schedulingStarted.Add(time.Duration(i) * schedulingInterval) + for queue, priorityFactor := range tc.PriorityFactorByQueue { weight := 1 / priorityFactor - err := sctx.AddQueueSchedulingContext(queue, weight, allocatedByQueueAndPriorityClass[queue]) + err := sctx.AddQueueSchedulingContext( + queue, + weight, + allocatedByQueueAndPriorityClass[queue], + limiterByQueue[queue], + ) require.NoError(t, err) + } constraints := schedulerconstraints.SchedulingConstraintsFromSchedulingConfig( "pool", @@ -1396,7 +1458,7 @@ func TestPreemptingQueueScheduler(t *testing.T) { if tc.SchedulingConfig.EnableNewPreemptionStrategy { sch.EnableNewPreemptionStrategy() } - result, err := sch.Schedule(ctxlogrus.ToContext(context.Background(), log)) + result, err := sch.Schedule(armadacontext.Background()) require.NoError(t, err) jobIdsByGangId = sch.jobIdsByGangId gangIdByJobId = sch.gangIdByJobId @@ -1645,11 +1707,12 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { tc.SchedulingConfig.Preemption.PriorityClasses, tc.SchedulingConfig.Preemption.DefaultPriorityClass, fairnessCostProvider, + nil, nodeDb.TotalResources(), ) for queue, priorityFactor := range priorityFactorByQueue { weight := 1 / priorityFactor - err := sctx.AddQueueSchedulingContext(queue, weight, make(schedulerobjects.QuantityByTAndResourceType[string])) + err := sctx.AddQueueSchedulingContext(queue, weight, make(schedulerobjects.QuantityByTAndResourceType[string]), nil) require.NoError(b, err) } constraints := schedulerconstraints.SchedulingConstraintsFromSchedulingConfig( @@ -1670,7 +1733,7 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { nil, nil, ) - result, err := sch.Schedule(context.Background()) + result, err := sch.Schedule(armadacontext.Background()) require.NoError(b, err) require.Equal(b, 0, len(result.PreemptedJobs)) @@ -1706,11 +1769,12 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { tc.SchedulingConfig.Preemption.PriorityClasses, tc.SchedulingConfig.Preemption.DefaultPriorityClass, fairnessCostProvider, + nil, nodeDb.TotalResources(), ) for queue, priorityFactor := range priorityFactorByQueue { weight := 1 / priorityFactor - err := sctx.AddQueueSchedulingContext(queue, weight, allocatedByQueueAndPriorityClass[queue]) + err := sctx.AddQueueSchedulingContext(queue, weight, allocatedByQueueAndPriorityClass[queue], nil) require.NoError(b, err) } sch := NewPreemptingQueueScheduler( @@ -1725,7 +1789,7 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { nil, nil, ) - result, err := sch.Schedule(context.Background()) + result, err := sch.Schedule(armadacontext.Background()) require.NoError(b, err) // We expect the system to be in steady-state, i.e., no preempted/scheduled jobs. diff --git a/internal/scheduler/proxying_reports_server_test.go b/internal/scheduler/proxying_reports_server_test.go index 0dc81b54bf9..98f7c11fa97 100644 --- a/internal/scheduler/proxying_reports_server_test.go +++ b/internal/scheduler/proxying_reports_server_test.go @@ -1,13 +1,13 @@ package scheduler import ( - "context" "fmt" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -24,7 +24,7 @@ func TestProxyingSchedulingReportsServer_GetJobReports(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() sut, jobReportsClient := setupProxyingSchedulerReportsServerTest(t) @@ -62,7 +62,7 @@ func TestProxyingSchedulingReportsServer_GetSchedulingReport(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() sut, jobReportsClient := setupProxyingSchedulerReportsServerTest(t) @@ -100,7 +100,7 @@ func TestProxyingSchedulingReportsServer_GetQueueReport(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() sut, jobReportsClient := setupProxyingSchedulerReportsServerTest(t) diff --git a/internal/scheduler/publisher.go b/internal/scheduler/publisher.go index 0ae0595303b..598a00fc755 100644 --- a/internal/scheduler/publisher.go +++ b/internal/scheduler/publisher.go @@ -1,7 +1,6 @@ package scheduler import ( - "context" "fmt" "strconv" "sync" @@ -11,9 +10,10 @@ import ( "github.com/gogo/protobuf/proto" "github.com/google/uuid" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/eventutil" + "github.com/armadaproject/armada/internal/common/logging" "github.com/armadaproject/armada/internal/common/schedulers" "github.com/armadaproject/armada/pkg/armadaevents" ) @@ -28,12 +28,12 @@ const ( type Publisher interface { // PublishMessages will publish the supplied messages. A LeaderToken is provided and the // implementor may decide whether to publish based on the status of this token - PublishMessages(ctx context.Context, events []*armadaevents.EventSequence, shouldPublish func() bool) error + PublishMessages(ctx *armadacontext.Context, events []*armadaevents.EventSequence, shouldPublish func() bool) error // PublishMarkers publishes a single marker message for each Pulsar partition. Each marker // massage contains the supplied group id, which allows all marker messages for a given call // to be identified. The uint32 returned is the number of messages published - PublishMarkers(ctx context.Context, groupId uuid.UUID) (uint32, error) + PublishMarkers(ctx *armadacontext.Context, groupId uuid.UUID) (uint32, error) } // PulsarPublisher is the default implementation of Publisher @@ -77,7 +77,7 @@ func NewPulsarPublisher( // PublishMessages publishes all event sequences to pulsar. Event sequences for a given jobset will be combined into // single event sequences up to maxMessageBatchSize. -func (p *PulsarPublisher) PublishMessages(ctx context.Context, events []*armadaevents.EventSequence, shouldPublish func() bool) error { +func (p *PulsarPublisher) PublishMessages(ctx *armadacontext.Context, events []*armadaevents.EventSequence, shouldPublish func() bool) error { sequences := eventutil.CompactEventSequences(events) sequences, err := eventutil.LimitSequencesByteSize(sequences, p.maxMessageBatchSize, true) if err != nil { @@ -103,13 +103,15 @@ func (p *PulsarPublisher) PublishMessages(ctx context.Context, events []*armadae // Send messages if shouldPublish() { - log.Debugf("Am leader so will publish") - sendCtx, cancel := context.WithTimeout(ctx, p.pulsarSendTimeout) + ctx.Debugf("Am leader so will publish") + sendCtx, cancel := armadacontext.WithTimeout(ctx, p.pulsarSendTimeout) errored := false for _, msg := range msgs { p.producer.SendAsync(sendCtx, msg, func(_ pulsar.MessageID, _ *pulsar.ProducerMessage, err error) { if err != nil { - log.WithError(err).Error("error sending message to Pulsar") + logging. + WithStacktrace(ctx, err). + Error("error sending message to Pulsar") errored = true } wg.Done() @@ -121,14 +123,14 @@ func (p *PulsarPublisher) PublishMessages(ctx context.Context, events []*armadae return errors.New("One or more messages failed to send to Pulsar") } } else { - log.Debugf("No longer leader so not publishing") + ctx.Debugf("No longer leader so not publishing") } return nil } // PublishMarkers sends one pulsar message (containing an armadaevents.PartitionMarker) to each partition // of the producer's Pulsar topic. -func (p *PulsarPublisher) PublishMarkers(ctx context.Context, groupId uuid.UUID) (uint32, error) { +func (p *PulsarPublisher) PublishMarkers(ctx *armadacontext.Context, groupId uuid.UUID) (uint32, error) { for i := 0; i < p.numPartitions; i++ { pm := &armadaevents.PartitionMarker{ GroupId: armadaevents.ProtoUuidFromUuid(groupId), diff --git a/internal/scheduler/publisher_test.go b/internal/scheduler/publisher_test.go index a524f9e26b9..6ecb200d416 100644 --- a/internal/scheduler/publisher_test.go +++ b/internal/scheduler/publisher_test.go @@ -1,7 +1,6 @@ package scheduler import ( - "context" "fmt" "math" "testing" @@ -15,6 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/mocks" "github.com/armadaproject/armada/internal/common/pulsarutils" "github.com/armadaproject/armada/pkg/armadaevents" @@ -89,7 +89,7 @@ func TestPulsarPublisher_TestPublish(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() ctrl := gomock.NewController(t) mockPulsarClient := mocks.NewMockClient(ctrl) @@ -106,7 +106,7 @@ func TestPulsarPublisher_TestPublish(t *testing.T) { mockPulsarProducer. EXPECT(). SendAsync(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, msg *pulsar.ProducerMessage, callback func(pulsar.MessageID, *pulsar.ProducerMessage, error)) { + DoAndReturn(func(_ *armadacontext.Context, msg *pulsar.ProducerMessage, callback func(pulsar.MessageID, *pulsar.ProducerMessage, error)) { es := &armadaevents.EventSequence{} err := proto.Unmarshal(msg.Payload, es) require.NoError(t, err) @@ -177,7 +177,7 @@ func TestPulsarPublisher_TestPublishMarkers(t *testing.T) { mockPulsarProducer. EXPECT(). Send(gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, msg *pulsar.ProducerMessage) (pulsar.MessageID, error) { + DoAndReturn(func(_ *armadacontext.Context, msg *pulsar.ProducerMessage) (pulsar.MessageID, error) { numPublished++ key, ok := msg.Properties[explicitPartitionKey] if ok { @@ -190,7 +190,7 @@ func TestPulsarPublisher_TestPublishMarkers(t *testing.T) { }).AnyTimes() options := pulsar.ProducerOptions{Topic: topic} - ctx := context.TODO() + ctx := armadacontext.TODO() publisher, err := NewPulsarPublisher(mockPulsarClient, options, 5*time.Second) require.NoError(t, err) diff --git a/internal/scheduler/queue_scheduler.go b/internal/scheduler/queue_scheduler.go index ba6c223f49a..95683904eec 100644 --- a/internal/scheduler/queue_scheduler.go +++ b/internal/scheduler/queue_scheduler.go @@ -2,13 +2,13 @@ package scheduler import ( "container/heap" - "context" "reflect" "time" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/logging" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" @@ -60,9 +60,10 @@ func (sch *QueueScheduler) SkipUnsuccessfulSchedulingKeyCheck() { sch.gangScheduler.SkipUnsuccessfulSchedulingKeyCheck() } -func (sch *QueueScheduler) Schedule(ctx context.Context) (*SchedulerResult, error) { +func (sch *QueueScheduler) Schedule(ctx *armadacontext.Context) (*SchedulerResult, error) { nodeIdByJobId := make(map[string]string) scheduledJobs := make([]interfaces.LegacySchedulerJob, 0) + failedJobs := make([]interfaces.LegacySchedulerJob, 0) for { // Peek() returns the next gang to try to schedule. Call Clear() before calling Peek() again. // Calling Clear() after (failing to) schedule ensures we get the next gang in order of smallest fair share. @@ -74,7 +75,7 @@ func (sch *QueueScheduler) Schedule(ctx context.Context) (*SchedulerResult, erro if gctx == nil { break } - if len(gctx.JobSchedulingContexts) == 0 { + if gctx.Cardinality() == 0 { if err := sch.candidateGangIterator.Clear(); err != nil { return nil, err } @@ -91,18 +92,31 @@ func (sch *QueueScheduler) Schedule(ctx context.Context) (*SchedulerResult, erro if ok, unschedulableReason, err := sch.gangScheduler.Schedule(ctx, gctx); err != nil { return nil, err } else if ok { + // We scheduled the minimum number of gang jobs required. for _, jctx := range gctx.JobSchedulingContexts { - scheduledJobs = append(scheduledJobs, jctx.Job) pctx := jctx.PodSchedulingContext if pctx != nil && pctx.NodeId != "" { + scheduledJobs = append(scheduledJobs, jctx.Job) nodeIdByJobId[jctx.JobId] = pctx.NodeId } } + + // Report any excess gang jobs that failed + for _, jctx := range gctx.JobSchedulingContexts { + if jctx.ShouldFail { + failedJobs = append(failedJobs, jctx.Job) + } + } } else if schedulerconstraints.IsTerminalUnschedulableReason(unschedulableReason) { // If unschedulableReason indicates no more new jobs can be scheduled, // instruct the underlying iterator to only yield evicted jobs from now on. sch.candidateGangIterator.OnlyYieldEvicted() + } else if schedulerconstraints.IsTerminalQueueUnschedulableReason(unschedulableReason) { + // If unschedulableReason indicates no more new jobs can be scheduled for this queue, + // instruct the underlying iterator to only yield evicted jobs for this queue from now on. + sch.candidateGangIterator.OnlyYieldEvictedForQueue(gctx.Queue) } + // Clear() to get the next gang in order of smallest fair share. // Calling clear here ensures the gang scheduled in this iteration is accounted for. if err := sch.candidateGangIterator.Clear(); err != nil { @@ -118,6 +132,7 @@ func (sch *QueueScheduler) Schedule(ctx context.Context) (*SchedulerResult, erro return &SchedulerResult{ PreemptedJobs: nil, ScheduledJobs: scheduledJobs, + FailedJobs: failedJobs, NodeIdByJobId: nodeIdByJobId, SchedulingContexts: []*schedulercontext.SchedulingContext{sch.schedulingContext}, }, nil @@ -197,12 +212,15 @@ func (it *QueuedGangIterator) Peek() (*schedulercontext.GangSchedulingContext, e if len(it.schedulingContext.UnfeasibleSchedulingKeys) > 0 { schedulingKey := it.schedulingContext.SchedulingKeyFromLegacySchedulerJob(job) if unsuccessfulJctx, ok := it.schedulingContext.UnfeasibleSchedulingKeys[schedulingKey]; ok { + // TODO: For performance, we should avoid creating new objects and instead reference the existing one. jctx := &schedulercontext.JobSchedulingContext{ Created: time.Now(), JobId: job.GetId(), Job: job, UnschedulableReason: unsuccessfulJctx.UnschedulableReason, PodSchedulingContext: unsuccessfulJctx.PodSchedulingContext, + // TODO: Move this into gang scheduling context + GangMinCardinality: 1, } if _, err := it.schedulingContext.AddJobSchedulingContext(jctx); err != nil { return nil, err @@ -211,7 +229,7 @@ func (it *QueuedGangIterator) Peek() (*schedulercontext.GangSchedulingContext, e } } - gangId, gangCardinality, isGangJob, err := GangIdAndCardinalityFromAnnotations(job.GetAnnotations()) + gangId, gangCardinality, _, isGangJob, err := GangIdAndCardinalityFromAnnotations(job.GetAnnotations()) if err != nil { // TODO: Get from context passed in. log := logrus.NewEntry(logrus.New()) @@ -227,6 +245,7 @@ func (it *QueuedGangIterator) Peek() (*schedulercontext.GangSchedulingContext, e schedulercontext.JobSchedulingContextsFromJobs( it.schedulingContext.PriorityClasses, gang, + GangIdAndCardinalityFromAnnotations, ), ) return it.next, nil @@ -236,6 +255,7 @@ func (it *QueuedGangIterator) Peek() (*schedulercontext.GangSchedulingContext, e schedulercontext.JobSchedulingContextsFromJobs( it.schedulingContext.PriorityClasses, []interfaces.LegacySchedulerJob{job}, + GangIdAndCardinalityFromAnnotations, ), ) return it.next, nil @@ -254,10 +274,13 @@ func (it *QueuedGangIterator) hitLookbackLimit() bool { // Specifically, it yields the next gang in the queue with smallest fraction of its fair share, // where the fraction of fair share computation includes the yielded gang. type CandidateGangIterator struct { - queueProvier fairness.QueueRepository + queueRepository fairness.QueueRepository fairnessCostProvider fairness.FairnessCostProvider // If true, this iterator only yields gangs where all jobs are evicted. onlyYieldEvicted bool + // If, e.g., onlyYieldEvictedByQueue["A"] is true, + // this iterator only yields gangs where all jobs are evicted for queue A. + onlyYieldEvictedByQueue map[string]bool // Reusable buffer to avoid allocations. buffer schedulerobjects.ResourceList // Priority queue containing per-queue iterators. @@ -266,15 +289,16 @@ type CandidateGangIterator struct { } func NewCandidateGangIterator( - queueProvier fairness.QueueRepository, + queueRepository fairness.QueueRepository, fairnessCostProvider fairness.FairnessCostProvider, iteratorsByQueue map[string]*QueuedGangIterator, ) (*CandidateGangIterator, error) { it := &CandidateGangIterator{ - queueProvier: queueProvier, - fairnessCostProvider: fairnessCostProvider, - buffer: schedulerobjects.NewResourceListWithDefaultSize(), - pq: make(QueueCandidateGangIteratorPQ, 0, len(iteratorsByQueue)), + queueRepository: queueRepository, + fairnessCostProvider: fairnessCostProvider, + onlyYieldEvictedByQueue: make(map[string]bool), + buffer: schedulerobjects.NewResourceListWithDefaultSize(), + pq: make(QueueCandidateGangIteratorPQ, 0, len(iteratorsByQueue)), } for queue, queueIt := range iteratorsByQueue { if _, err := it.updateAndPushPQItem(it.newPQItem(queue, queueIt)); err != nil { @@ -288,6 +312,48 @@ func (it *CandidateGangIterator) OnlyYieldEvicted() { it.onlyYieldEvicted = true } +func (it *CandidateGangIterator) OnlyYieldEvictedForQueue(queue string) { + it.onlyYieldEvictedByQueue[queue] = true +} + +// Clear removes the first item in the iterator. +// If it.onlyYieldEvicted is true, any consecutive non-evicted jobs are also removed. +func (it *CandidateGangIterator) Clear() error { + if len(it.pq) == 0 { + return nil + } + item := heap.Pop(&it.pq).(*QueueCandidateGangIteratorItem) + if err := item.it.Clear(); err != nil { + return err + } + if _, err := it.updateAndPushPQItem(item); err != nil { + return err + } + + // If set to only yield evicted gangs, drop any queues for which the next gang is non-evicted here. + // We assume here that all evicted jobs appear before non-evicted jobs in the queue. + // Hence, it's safe to drop a queue if the first job is non-evicted. + if it.onlyYieldEvicted { + for len(it.pq) > 0 && !it.pq[0].gctx.AllJobsEvicted { + heap.Pop(&it.pq) + } + } else { + // Same check as above on a per-queue basis. + for len(it.pq) > 0 && it.onlyYieldEvictedByQueue[it.pq[0].gctx.Queue] && !it.pq[0].gctx.AllJobsEvicted { + heap.Pop(&it.pq) + } + } + return nil +} + +func (it *CandidateGangIterator) Peek() (*schedulercontext.GangSchedulingContext, error) { + if len(it.pq) == 0 { + // No queued jobs left. + return nil, nil + } + return it.pq[0].gctx, nil +} + func (it *CandidateGangIterator) newPQItem(queue string, queueIt *QueuedGangIterator) *QueueCandidateGangIteratorItem { return &QueueCandidateGangIteratorItem{ queue: queue, @@ -303,8 +369,9 @@ func (it *CandidateGangIterator) updateAndPushPQItem(item *QueueCandidateGangIte return false, nil } if it.onlyYieldEvicted && !item.gctx.AllJobsEvicted { - // We assume here that all evicted jobs appear before non-evicted jobs in the queue. - // Hence, it's safe to drop a queue once a non-evicted job has been seen. + return false, nil + } + if it.onlyYieldEvictedByQueue[item.gctx.Queue] && !item.gctx.AllJobsEvicted { return false, nil } heap.Push(&it.pq, item) @@ -335,7 +402,7 @@ func (it *CandidateGangIterator) updatePQItem(item *QueueCandidateGangIteratorIt // queueCostWithGctx returns the cost associated with a queue if gctx were to be scheduled. func (it *CandidateGangIterator) queueCostWithGctx(gctx *schedulercontext.GangSchedulingContext) (float64, error) { - queue, ok := it.queueProvier.GetQueue(gctx.Queue) + queue, ok := it.queueRepository.GetQueue(gctx.Queue) if !ok { return 0, errors.Errorf("unknown queue %s", gctx.Queue) } @@ -345,33 +412,6 @@ func (it *CandidateGangIterator) queueCostWithGctx(gctx *schedulercontext.GangSc return it.fairnessCostProvider.CostFromAllocationAndWeight(it.buffer, queue.GetWeight()), nil } -// Clear removes the first item in the iterator. -// If it.onlyYieldEvicted is true, any consecutive non-evicted jobs are also removed. -func (it *CandidateGangIterator) Clear() error { - if len(it.pq) == 0 { - return nil - } - item := heap.Pop(&it.pq).(*QueueCandidateGangIteratorItem) - if err := item.it.Clear(); err != nil { - return err - } - if _, err := it.updateAndPushPQItem(item); err != nil { - return err - } - for len(it.pq) > 0 && it.onlyYieldEvicted && !it.pq[0].gctx.AllJobsEvicted { - heap.Pop(&it.pq) - } - return nil -} - -func (it *CandidateGangIterator) Peek() (*schedulercontext.GangSchedulingContext, error) { - if len(it.pq) == 0 { - // No queued jobs left. - return nil, nil - } - return it.pq[0].gctx, nil -} - // Priority queue used by CandidateGangIterator to determine from which queue to schedule the next job. type QueueCandidateGangIteratorPQ []*QueueCandidateGangIteratorItem diff --git a/internal/scheduler/queue_scheduler_test.go b/internal/scheduler/queue_scheduler_test.go index 02ea3929aaa..d5b720aa35a 100644 --- a/internal/scheduler/queue_scheduler_test.go +++ b/internal/scheduler/queue_scheduler_test.go @@ -1,7 +1,6 @@ package scheduler import ( - "context" "fmt" "testing" @@ -9,9 +8,11 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/exp/maps" "golang.org/x/exp/slices" + "golang.org/x/time/rate" "k8s.io/apimachinery/pkg/api/resource" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/util" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" @@ -87,44 +88,56 @@ func TestQueueScheduler(t *testing.T) { PriorityFactorByQueue: map[string]float64{"A": 1}, ExpectedScheduledIndices: []int{0, 11}, }, - "MaximumJobsToSchedule": { - SchedulingConfig: testfixtures.WithMaxJobsToScheduleConfig(2, testfixtures.TestSchedulingConfig()), + "MaximumSchedulingBurst": { + SchedulingConfig: testfixtures.WithGlobalSchedulingRateLimiterConfig(10, 2, testfixtures.TestSchedulingConfig()), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Jobs: armadaslices.Concatenate( + testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), + testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 10), + testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 4), + ), + PriorityFactorByQueue: map[string]float64{"A": 1}, + ExpectedScheduledIndices: []int{0, 11}, + }, + "MaximumPerQueueSchedulingBurst": { + SchedulingConfig: testfixtures.WithPerQueueSchedulingLimiterConfig(10, 2, testfixtures.TestSchedulingConfig()), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate( testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 10), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 3), + testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 1), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, - ExpectedScheduledIndices: []int{0, 11}, - ExpectedNeverAttemptedIndices: []int{12, 13}, + PriorityFactorByQueue: map[string]float64{"A": 1, "B": 1}, + ExpectedScheduledIndices: []int{0, 11, 14}, }, - "MaximumGangsToSchedule": { - SchedulingConfig: testfixtures.WithMaxGangsToScheduleConfig(2, testfixtures.TestSchedulingConfig()), + "MaximumSchedulingBurst is not exceeded by gangs": { + SchedulingConfig: testfixtures.WithGlobalSchedulingRateLimiterConfig(10, 2, testfixtures.TestSchedulingConfig()), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate( + testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), + testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.WithGangAnnotationsJobs( testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2), ), - testfixtures.WithGangAnnotationsJobs( - testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 2), - ), - testfixtures.WithGangAnnotationsJobs( - testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 2), - ), - testfixtures.WithGangAnnotationsJobs( - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2), - ), - testfixtures.WithGangAnnotationsJobs( - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2), - ), + testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2), + ), + PriorityFactorByQueue: map[string]float64{"A": 1}, + ExpectedScheduledIndices: []int{0, 4}, + }, + "MaximumPerQueueSchedulingBurst is not exceeded by gangs": { + SchedulingConfig: testfixtures.WithPerQueueSchedulingLimiterConfig(10, 2, testfixtures.TestSchedulingConfig()), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Jobs: armadaslices.Concatenate( + testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), + testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.WithGangAnnotationsJobs( testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2), ), + testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, - ExpectedScheduledIndices: []int{0, 1, 6, 7}, - ExpectedNeverAttemptedIndices: []int{8, 9, 10, 11}, + PriorityFactorByQueue: map[string]float64{"A": 1}, + ExpectedScheduledIndices: []int{0, 4}, }, "MaximumResourceFractionToSchedule": { SchedulingConfig: testfixtures.WithRoundLimitsConfig( @@ -397,9 +410,19 @@ func TestQueueScheduler(t *testing.T) { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(3, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate( - testfixtures.WithAnnotationsJobs(map[string]string{configuration.GangIdAnnotation: "my-gang", configuration.GangCardinalityAnnotation: "2"}, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithAnnotationsJobs(map[string]string{ + configuration.GangIdAnnotation: "my-gang", + configuration.GangCardinalityAnnotation: "2", + configuration.GangMinimumCardinalityAnnotation: "1", + }, + testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.WithAnnotationsJobs(map[string]string{configuration.GangIdAnnotation: "my-gang", configuration.GangCardinalityAnnotation: "2"}, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithAnnotationsJobs(map[string]string{ + configuration.GangIdAnnotation: "my-gang", + configuration.GangCardinalityAnnotation: "2", + configuration.GangMinimumCardinalityAnnotation: "1", + }, + testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), ), PriorityFactorByQueue: map[string]float64{"A": 1}, ExpectedScheduledIndices: []int{0, 1, 2}, @@ -415,9 +438,19 @@ func TestQueueScheduler(t *testing.T) { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate( - testfixtures.WithAnnotationsJobs(map[string]string{configuration.GangIdAnnotation: "my-gang", configuration.GangCardinalityAnnotation: "2"}, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithAnnotationsJobs(map[string]string{ + configuration.GangIdAnnotation: "my-gang", + configuration.GangCardinalityAnnotation: "2", + configuration.GangMinimumCardinalityAnnotation: "2", + }, + testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - testfixtures.WithAnnotationsJobs(map[string]string{configuration.GangIdAnnotation: "my-gang", configuration.GangCardinalityAnnotation: "2"}, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), + testfixtures.WithAnnotationsJobs(map[string]string{ + configuration.GangIdAnnotation: "my-gang", + configuration.GangCardinalityAnnotation: "2", + configuration.GangMinimumCardinalityAnnotation: "2", + }, + testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), ), PriorityFactorByQueue: map[string]float64{"A": 1}, ExpectedScheduledIndices: []int{1}, @@ -473,11 +506,22 @@ func TestQueueScheduler(t *testing.T) { tc.SchedulingConfig.Preemption.PriorityClasses, tc.SchedulingConfig.Preemption.DefaultPriorityClass, fairnessCostProvider, + rate.NewLimiter( + rate.Limit(tc.SchedulingConfig.MaximumSchedulingRate), + tc.SchedulingConfig.MaximumSchedulingBurst, + ), tc.TotalResources, ) for queue, priorityFactor := range tc.PriorityFactorByQueue { weight := 1 / priorityFactor - err := sctx.AddQueueSchedulingContext(queue, weight, tc.InitialAllocatedByQueueAndPriorityClass[queue]) + err := sctx.AddQueueSchedulingContext( + queue, weight, + tc.InitialAllocatedByQueueAndPriorityClass[queue], + rate.NewLimiter( + rate.Limit(tc.SchedulingConfig.MaximumPerQueueSchedulingRate), + tc.SchedulingConfig.MaximumPerQueueSchedulingBurst, + ), + ) require.NoError(t, err) } constraints := schedulerconstraints.SchedulingConstraintsFromSchedulingConfig( @@ -488,14 +532,14 @@ func TestQueueScheduler(t *testing.T) { ) jobIteratorByQueue := make(map[string]JobIterator) for queue := range tc.PriorityFactorByQueue { - it, err := jobRepo.GetJobIterator(context.Background(), queue) + it, err := jobRepo.GetJobIterator(armadacontext.Background(), queue) require.NoError(t, err) jobIteratorByQueue[queue] = it } sch, err := NewQueueScheduler(sctx, constraints, nodeDb, jobIteratorByQueue) require.NoError(t, err) - result, err := sch.Schedule(context.Background()) + result, err := sch.Schedule(armadacontext.Background()) require.NoError(t, err) // Check that the right jobs got scheduled. @@ -600,7 +644,7 @@ func TestQueueScheduler(t *testing.T) { continue } assert.Equal(t, nodeDb.NumNodes(), pctx.NumNodes) - _, _, isGangJob, err := GangIdAndCardinalityFromLegacySchedulerJob(jctx.Job) + _, _, _, isGangJob, err := GangIdAndCardinalityFromLegacySchedulerJob(jctx.Job) require.NoError(t, err) if !isGangJob { numExcludedNodes := 0 diff --git a/internal/scheduler/reports_test.go b/internal/scheduler/reports_test.go index fc96cc1b25e..d989e498fad 100644 --- a/internal/scheduler/reports_test.go +++ b/internal/scheduler/reports_test.go @@ -1,7 +1,6 @@ package scheduler import ( - "context" "fmt" "testing" "time" @@ -10,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/resource" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" @@ -159,7 +159,7 @@ func TestAddGetSchedulingContext(t *testing.T) { func TestTestAddGetSchedulingContextConcurrency(t *testing.T) { repo, err := NewSchedulingContextRepository(10) require.NoError(t, err) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), time.Second) defer cancel() for _, executorId := range []string{"foo", "bar"} { go func(executorId string) { @@ -202,7 +202,7 @@ func TestReportDoesNotExist(t *testing.T) { require.NoError(t, err) err = repo.AddSchedulingContext(testSchedulingContext("executor-01")) require.NoError(t, err) - ctx := context.Background() + ctx := armadacontext.Background() queue := "queue-does-not-exist" jobId := util.NewULID() @@ -246,14 +246,14 @@ func withSuccessfulJobSchedulingContext(sctx *schedulercontext.SchedulingContext } qctx := sctx.QueueSchedulingContexts[queue] if qctx == nil { - if err := sctx.AddQueueSchedulingContext(queue, 1.0, make(schedulerobjects.QuantityByTAndResourceType[string])); err != nil { + if err := sctx.AddQueueSchedulingContext(queue, 1.0, make(schedulerobjects.QuantityByTAndResourceType[string]), nil); err != nil { panic(err) } qctx = sctx.QueueSchedulingContexts[queue] qctx.SchedulingContext = nil qctx.Created = time.Time{} } - qctx.SuccessfulJobSchedulingContexts[jobId] = &schedulercontext.JobSchedulingContext{JobId: jobId} + qctx.SuccessfulJobSchedulingContexts[jobId] = &schedulercontext.JobSchedulingContext{JobId: jobId, GangMinCardinality: 1} rl := schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{"cpu": resource.MustParse("1")}} qctx.ScheduledResourcesByPriorityClass.AddResourceList("foo", rl) sctx.ScheduledResourcesByPriorityClass.AddResourceList("foo", rl) @@ -266,7 +266,7 @@ func withPreemptingJobSchedulingContext(sctx *schedulercontext.SchedulingContext } qctx := sctx.QueueSchedulingContexts[queue] if qctx == nil { - if err := sctx.AddQueueSchedulingContext(queue, 1.0, make(schedulerobjects.QuantityByTAndResourceType[string])); err != nil { + if err := sctx.AddQueueSchedulingContext(queue, 1.0, make(schedulerobjects.QuantityByTAndResourceType[string]), nil); err != nil { panic(err) } qctx = sctx.QueueSchedulingContexts[queue] @@ -286,14 +286,14 @@ func withUnsuccessfulJobSchedulingContext(sctx *schedulercontext.SchedulingConte } qctx := sctx.QueueSchedulingContexts[queue] if qctx == nil { - if err := sctx.AddQueueSchedulingContext(queue, 1.0, make(schedulerobjects.QuantityByTAndResourceType[string])); err != nil { + if err := sctx.AddQueueSchedulingContext(queue, 1.0, make(schedulerobjects.QuantityByTAndResourceType[string]), nil); err != nil { panic(err) } qctx = sctx.QueueSchedulingContexts[queue] qctx.SchedulingContext = nil qctx.Created = time.Time{} } - qctx.UnsuccessfulJobSchedulingContexts[jobId] = &schedulercontext.JobSchedulingContext{JobId: jobId, UnschedulableReason: "unknown"} + qctx.UnsuccessfulJobSchedulingContexts[jobId] = &schedulercontext.JobSchedulingContext{JobId: jobId, UnschedulableReason: "unknown", GangMinCardinality: 1} return sctx } @@ -304,6 +304,7 @@ func testSchedulingContext(executorId string) *schedulercontext.SchedulingContex nil, "", nil, + nil, schedulerobjects.ResourceList{}, ) sctx.Started = time.Time{} diff --git a/internal/scheduler/scheduler.go b/internal/scheduler/scheduler.go index 34c74e353c1..58ba50b6f1b 100644 --- a/internal/scheduler/scheduler.go +++ b/internal/scheduler/scheduler.go @@ -1,22 +1,23 @@ package scheduler import ( - "context" "fmt" "time" "github.com/gogo/protobuf/proto" "github.com/google/uuid" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "github.com/pkg/errors" + "github.com/renstrom/shortuuid" "golang.org/x/exp/maps" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/clock" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/logging" "github.com/armadaproject/armada/internal/common/stringinterner" "github.com/armadaproject/armada/internal/scheduler/database" + "github.com/armadaproject/armada/internal/scheduler/interfaces" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/kubernetesobjects/affinity" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" @@ -116,40 +117,38 @@ func NewScheduler( } // Run enters the scheduling loop, which will continue until ctx is cancelled. -func (s *Scheduler) Run(ctx context.Context) error { - log := ctxlogrus.Extract(ctx) - log = log.WithField("service", "scheduler") - ctx = ctxlogrus.ToContext(ctx, log) - log.Infof("starting scheduler with cycle time %s", s.cyclePeriod) - defer log.Info("scheduler stopped") +func (s *Scheduler) Run(ctx *armadacontext.Context) error { + ctx.Infof("starting scheduler with cycle time %s", s.cyclePeriod) + defer ctx.Info("scheduler stopped") // JobDb initialisation. start := s.clock.Now() if err := s.initialise(ctx); err != nil { return err } - log.Infof("JobDb initialised in %s", s.clock.Since(start)) + ctx.Infof("JobDb initialised in %s", s.clock.Since(start)) ticker := s.clock.NewTicker(s.cyclePeriod) prevLeaderToken := InvalidLeaderToken() for { select { case <-ctx.Done(): - log.Infof("context cancelled; returning.") + ctx.Infof("context cancelled; returning.") return ctx.Err() case <-ticker.C(): start := s.clock.Now() + ctx := armadacontext.WithLogField(ctx, "cycleId", shortuuid.New()) leaderToken := s.leaderController.GetToken() fullUpdate := false - log.Infof("received leaderToken; leader status is %t", leaderToken.leader) + ctx.Infof("received leaderToken; leader status is %t", leaderToken.leader) // If we are becoming leader then we must ensure we have caught up to all Pulsar messages if leaderToken.leader && leaderToken != prevLeaderToken { - log.Infof("becoming leader") - syncContext, cancel := context.WithTimeout(ctx, 5*time.Minute) + ctx.Infof("becoming leader") + syncContext, cancel := armadacontext.WithTimeout(ctx, 5*time.Minute) err := s.ensureDbUpToDate(syncContext, 1*time.Second) if err != nil { - log.WithError(err).Error("could not become leader") + logging.WithStacktrace(ctx, err).Error("could not become leader") leaderToken = InvalidLeaderToken() } else { fullUpdate = true @@ -167,20 +166,24 @@ func (s *Scheduler) Run(ctx context.Context) error { shouldSchedule := s.clock.Now().Sub(s.previousSchedulingRoundEnd) > s.schedulePeriod - if err := s.cycle(ctx, fullUpdate, leaderToken, shouldSchedule); err != nil { - logging.WithStacktrace(log, err).Error("scheduling cycle failure") + result, err := s.cycle(ctx, fullUpdate, leaderToken, shouldSchedule) + if err != nil { + logging.WithStacktrace(ctx, err).Error("scheduling cycle failure") leaderToken = InvalidLeaderToken() } cycleTime := s.clock.Since(start) + s.metrics.ResetGaugeMetrics() + if shouldSchedule && leaderToken.leader { - // Only the leader token does real scheduling rounds. + // Only the leader does real scheduling rounds. s.metrics.ReportScheduleCycleTime(cycleTime) - log.Infof("scheduling cycle completed in %s", cycleTime) + s.metrics.ReportSchedulerResult(ctx, result) + ctx.Infof("scheduling cycle completed in %s", cycleTime) } else { s.metrics.ReportReconcileCycleTime(cycleTime) - log.Infof("reconciliation cycle completed in %s", cycleTime) + ctx.Infof("reconciliation cycle completed in %s", cycleTime) } prevLeaderToken = leaderToken @@ -194,18 +197,18 @@ func (s *Scheduler) Run(ctx context.Context) error { // cycle is a single iteration of the main scheduling loop. // If updateAll is true, we generate events from all jobs in the jobDb. // Otherwise, we only generate events from jobs updated since the last cycle. -func (s *Scheduler) cycle(ctx context.Context, updateAll bool, leaderToken LeaderToken, shouldSchedule bool) error { - log := ctxlogrus.Extract(ctx) - log = log.WithField("function", "cycle") +func (s *Scheduler) cycle(ctx *armadacontext.Context, updateAll bool, leaderToken LeaderToken, shouldSchedule bool) (overallSchedulerResult SchedulerResult, err error) { + overallSchedulerResult = SchedulerResult{EmptyResult: true} + // Update job state. updatedJobs, err := s.syncState(ctx) if err != nil { - return err + return } // Only the leader may make decisions; exit if not leader. if !s.leaderController.ValidateToken(leaderToken) { - return nil + return } // If we've been asked to generate messages for all jobs, do so. @@ -219,38 +222,40 @@ func (s *Scheduler) cycle(ctx context.Context, updateAll bool, leaderToken Leade // Generate any events that came out of synchronising the db state. events, err := s.generateUpdateMessages(ctx, updatedJobs, txn) if err != nil { - return err + return } // Expire any jobs running on clusters that haven't heartbeated within the configured deadline. expirationEvents, err := s.expireJobsIfNecessary(ctx, txn) if err != nil { - return err + return } events = append(events, expirationEvents...) + // Request cancel for any jobs that exceed queueTtl + queueTtlCancelEvents, err := s.cancelQueuedJobsIfExpired(txn) + if err != nil { + return + } + events = append(events, queueTtlCancelEvents...) + // Schedule jobs. if shouldSchedule { - overallSchedulerResult, err := s.schedulingAlgo.Schedule(ctx, txn, s.jobDb) + var result *SchedulerResult + result, err = s.schedulingAlgo.Schedule(ctx, txn, s.jobDb) if err != nil { - return err + return } - // This check feels redundant. It feels like we shouldn't have got here without - // a leader token. - if leaderToken.leader { - // Report various metrics computed from the scheduling cycle. - // TODO: preemptible jobs, possibly other metrics - // TODO: Return this information and deal with metrics after the cycle? - s.metrics.ReportSchedulerResult(overallSchedulerResult) - } - - resultEvents, err := s.eventsFromSchedulerResult(txn, overallSchedulerResult) + var resultEvents []*armadaevents.EventSequence + resultEvents, err = s.eventsFromSchedulerResult(result) if err != nil { - return err + return } events = append(events, resultEvents...) s.previousSchedulingRoundEnd = s.clock.Now() + + overallSchedulerResult = *result } // Publish to Pulsar. @@ -258,25 +263,22 @@ func (s *Scheduler) cycle(ctx context.Context, updateAll bool, leaderToken Leade return s.leaderController.ValidateToken(leaderToken) } start := s.clock.Now() - if err := s.publisher.PublishMessages(ctx, events, isLeader); err != nil { - return err + if err = s.publisher.PublishMessages(ctx, events, isLeader); err != nil { + return } - log.Infof("published %d events to pulsar in %s", len(events), s.clock.Since(start)) + ctx.Infof("published %d events to pulsar in %s", len(events), s.clock.Since(start)) txn.Commit() - return nil + return } // syncState updates jobs in jobDb to match state in postgres and returns all updated jobs. -func (s *Scheduler) syncState(ctx context.Context) ([]*jobdb.Job, error) { - log := ctxlogrus.Extract(ctx) - log = log.WithField("function", "syncState") - +func (s *Scheduler) syncState(ctx *armadacontext.Context) ([]*jobdb.Job, error) { start := s.clock.Now() updatedJobs, updatedRuns, err := s.jobRepository.FetchJobUpdates(ctx, s.jobsSerial, s.runsSerial) if err != nil { return nil, err } - log.Infof("received %d updated jobs and %d updated job runs in %s", len(updatedJobs), len(updatedRuns), s.clock.Since(start)) + ctx.Infof("received %d updated jobs and %d updated job runs in %s", len(updatedJobs), len(updatedRuns), s.clock.Since(start)) txn := s.jobDb.WriteTxn() defer txn.Abort() @@ -320,7 +322,7 @@ func (s *Scheduler) syncState(ctx context.Context) ([]*jobdb.Job, error) { // If the job is nil or terminal at this point then it cannot be active. // In this case we can ignore the run. if job == nil || job.InTerminalState() { - log.Debugf("job %s is not active; ignoring update for run %s", jobId, dbRun.RunID) + ctx.Debugf("job %s is not active; ignoring update for run %s", jobId, dbRun.RunID) continue } } @@ -390,13 +392,13 @@ func (s *Scheduler) addNodeAntiAffinitiesForAttemptedRunsIfSchedulable(job *jobd } // eventsFromSchedulerResult generates necessary EventSequences from the provided SchedulerResult. -func (s *Scheduler) eventsFromSchedulerResult(txn *jobdb.Txn, result *SchedulerResult) ([]*armadaevents.EventSequence, error) { +func (s *Scheduler) eventsFromSchedulerResult(result *SchedulerResult) ([]*armadaevents.EventSequence, error) { return EventsFromSchedulerResult(result, s.clock.Now()) } // EventsFromSchedulerResult generates necessary EventSequences from the provided SchedulerResult. func EventsFromSchedulerResult(result *SchedulerResult, time time.Time) ([]*armadaevents.EventSequence, error) { - eventSequences := make([]*armadaevents.EventSequence, 0, len(result.PreemptedJobs)+len(result.ScheduledJobs)) + eventSequences := make([]*armadaevents.EventSequence, 0, len(result.PreemptedJobs)+len(result.ScheduledJobs)+len(result.FailedJobs)) eventSequences, err := AppendEventSequencesFromPreemptedJobs(eventSequences, PreemptedJobsFromSchedulerResult[*jobdb.Job](result), time) if err != nil { return nil, err @@ -405,6 +407,10 @@ func EventsFromSchedulerResult(result *SchedulerResult, time time.Time) ([]*arma if err != nil { return nil, err } + eventSequences, err = AppendEventSequencesFromUnschedulableJobs(eventSequences, result.FailedJobs, time) + if err != nil { + return nil, err + } return eventSequences, nil } @@ -504,9 +510,35 @@ func AppendEventSequencesFromScheduledJobs(eventSequences []*armadaevents.EventS return eventSequences, nil } +func AppendEventSequencesFromUnschedulableJobs(eventSequences []*armadaevents.EventSequence, jobs []interfaces.LegacySchedulerJob, time time.Time) ([]*armadaevents.EventSequence, error) { + for _, job := range jobs { + jobId, err := armadaevents.ProtoUuidFromUlidString(job.GetId()) + if err != nil { + return nil, err + } + gangJobUnschedulableError := &armadaevents.Error{ + Terminal: true, + Reason: &armadaevents.Error_GangJobUnschedulable{GangJobUnschedulable: &armadaevents.GangJobUnschedulable{Message: "Job did not meet the minimum gang cardinality"}}, + } + eventSequences = append(eventSequences, &armadaevents.EventSequence{ + Queue: job.GetQueue(), + JobSetName: job.GetJobSet(), + Events: []*armadaevents.EventSequence_Event{ + { + Created: &time, + Event: &armadaevents.EventSequence_Event_JobErrors{ + JobErrors: &armadaevents.JobErrors{JobId: jobId, Errors: []*armadaevents.Error{gangJobUnschedulableError}}, + }, + }, + }, + }) + } + return eventSequences, nil +} + // generateUpdateMessages generates EventSequences representing the state changes on updated jobs // If there are no state changes then an empty slice will be returned -func (s *Scheduler) generateUpdateMessages(ctx context.Context, updatedJobs []*jobdb.Job, txn *jobdb.Txn) ([]*armadaevents.EventSequence, error) { +func (s *Scheduler) generateUpdateMessages(ctx *armadacontext.Context, updatedJobs []*jobdb.Job, txn *jobdb.Txn) ([]*armadaevents.EventSequence, error) { failedRunIds := make([]uuid.UUID, 0, len(updatedJobs)) for _, job := range updatedJobs { run := job.LatestRun() @@ -707,10 +739,7 @@ func (s *Scheduler) generateUpdateMessagesFromJob(job *jobdb.Job, jobRunErrors m // expireJobsIfNecessary removes any jobs from the JobDb which are running on stale executors. // It also generates an EventSequence for each job, indicating that both the run and the job has failed // Note that this is different behaviour from the old scheduler which would allow expired jobs to be rerun -func (s *Scheduler) expireJobsIfNecessary(ctx context.Context, txn *jobdb.Txn) ([]*armadaevents.EventSequence, error) { - log := ctxlogrus.Extract(ctx) - log = log.WithField("function", "expireJobsIfNecessary") - +func (s *Scheduler) expireJobsIfNecessary(ctx *armadacontext.Context, txn *jobdb.Txn) ([]*armadaevents.EventSequence, error) { heartbeatTimes, err := s.executorRepository.GetLastUpdateTimes(ctx) if err != nil { return nil, err @@ -725,14 +754,14 @@ func (s *Scheduler) expireJobsIfNecessary(ctx context.Context, txn *jobdb.Txn) ( // has been completely removed for executor, heartbeat := range heartbeatTimes { if heartbeat.Before(cutOff) { - log.Warnf("Executor %s has not reported a hearbeart since %v. Will expire all jobs running on this executor", executor, heartbeat) + ctx.Warnf("Executor %s has not reported a hearbeart since %v. Will expire all jobs running on this executor", executor, heartbeat) staleExecutors[executor] = true } } // All clusters have had a heartbeat recently. No need to expire any jobs if len(staleExecutors) == 0 { - log.Infof("No stale executors found. No jobs need to be expired") + ctx.Infof("No stale executors found. No jobs need to be expired") return nil, nil } @@ -749,7 +778,7 @@ func (s *Scheduler) expireJobsIfNecessary(ctx context.Context, txn *jobdb.Txn) ( run := job.LatestRun() if run != nil && !job.Queued() && staleExecutors[run.Executor()] { - log.Warnf("Cancelling job %s as it is running on lost executor %s", job.Id(), run.Executor()) + ctx.Warnf("Cancelling job %s as it is running on lost executor %s", job.Id(), run.Executor()) jobsToUpdate = append(jobsToUpdate, job.WithQueued(false).WithFailed(true).WithUpdatedRun(run.WithFailed(true))) jobId, err := armadaevents.ProtoUuidFromUlidString(job.Id()) @@ -797,6 +826,51 @@ func (s *Scheduler) expireJobsIfNecessary(ctx context.Context, txn *jobdb.Txn) ( return events, nil } +// cancelQueuedJobsIfExpired generates cancel request messages for any queued jobs that exceed their queueTtl. +func (s *Scheduler) cancelQueuedJobsIfExpired(txn *jobdb.Txn) ([]*armadaevents.EventSequence, error) { + jobsToCancel := make([]*jobdb.Job, 0) + events := make([]*armadaevents.EventSequence, 0) + it := s.jobDb.QueuedJobsByTtl(txn) + + // `it` is ordered such that the jobs with the least ttl remaining come first, hence we exit early if we find a job that is not expired. + for job, _ := it.Next(); job != nil && job.HasQueueTtlExpired(); job, _ = it.Next() { + if job.InTerminalState() { + continue + } + + job = job.WithCancelRequested(true).WithQueued(false).WithCancelled(true) + jobId, err := armadaevents.ProtoUuidFromUlidString(job.Id()) + if err != nil { + return nil, err + } + + reason := "Expired queue ttl" + cancel := &armadaevents.EventSequence{ + Queue: job.Queue(), + JobSetName: job.Jobset(), + Events: []*armadaevents.EventSequence_Event{ + { + Created: s.now(), + Event: &armadaevents.EventSequence_Event_CancelJob{CancelJob: &armadaevents.CancelJob{JobId: jobId, Reason: reason}}, + }, + { + Created: s.now(), + Event: &armadaevents.EventSequence_Event_CancelledJob{CancelledJob: &armadaevents.CancelledJob{JobId: jobId, Reason: reason}}, + }, + }, + } + + jobsToCancel = append(jobsToCancel, job) + events = append(events, cancel) + } + + if err := s.jobDb.Upsert(txn, jobsToCancel); err != nil { + return nil, err + } + + return events, nil +} + // now is a convenience function for generating a pointer to a time.Time (as required by armadaevents). // It exists because Go won't let you do &s.clock.Now(). func (s *Scheduler) now() *time.Time { @@ -807,16 +881,14 @@ func (s *Scheduler) now() *time.Time { // initialise builds the initial job db based on the current database state // right now this is quite dim and loads the entire database but in the future // we should be able to make it load active jobs/runs only -func (s *Scheduler) initialise(ctx context.Context) error { - log := ctxlogrus.Extract(ctx) - log = log.WithField("function", "initialise") +func (s *Scheduler) initialise(ctx *armadacontext.Context) error { for { select { case <-ctx.Done(): return nil default: if _, err := s.syncState(ctx); err != nil { - log.WithError(err).Error("failed to initialise; trying again in 1 second") + logging.WithStacktrace(ctx, err).Error("failed to initialise; trying again in 1 second") time.Sleep(1 * time.Second) } else { // Initialisation succeeded. @@ -829,10 +901,7 @@ func (s *Scheduler) initialise(ctx context.Context) error { // ensureDbUpToDate blocks until that the database state contains all Pulsar messages sent *before* this // function was called. This is achieved firstly by publishing messages to Pulsar and then polling the // database until all messages have been written. -func (s *Scheduler) ensureDbUpToDate(ctx context.Context, pollInterval time.Duration) error { - log := ctxlogrus.Extract(ctx) - log = log.WithField("function", "ensureDbUpToDate") - +func (s *Scheduler) ensureDbUpToDate(ctx *armadacontext.Context, pollInterval time.Duration) error { groupId := uuid.New() var numSent uint32 var err error @@ -846,7 +915,7 @@ func (s *Scheduler) ensureDbUpToDate(ctx context.Context, pollInterval time.Dura default: numSent, err = s.publisher.PublishMarkers(ctx, groupId) if err != nil { - log.WithError(err).Error("Error sending marker messages to pulsar") + logging.WithStacktrace(ctx, err).Error("Error sending marker messages to pulsar") s.clock.Sleep(pollInterval) } else { messagesSent = true @@ -862,13 +931,15 @@ func (s *Scheduler) ensureDbUpToDate(ctx context.Context, pollInterval time.Dura default: numReceived, err := s.jobRepository.CountReceivedPartitions(ctx, groupId) if err != nil { - log.WithError(err).Error("Error querying the database or marker messages") + logging. + WithStacktrace(ctx, err). + Error("Error querying the database or marker messages") } if numSent == numReceived { - log.Infof("Successfully ensured that database state is up to date") + ctx.Infof("Successfully ensured that database state is up to date") return nil } - log.Infof("Recevied %d partitions, still waiting on %d", numReceived, numSent-numReceived) + ctx.Infof("Recevied %d partitions, still waiting on %d", numReceived, numSent-numReceived) s.clock.Sleep(pollInterval) } } diff --git a/internal/scheduler/scheduler_metrics.go b/internal/scheduler/scheduler_metrics.go index b6f79e66612..3ba197ebeba 100644 --- a/internal/scheduler/scheduler_metrics.go +++ b/internal/scheduler/scheduler_metrics.go @@ -4,9 +4,10 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" + schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/interfaces" ) @@ -21,9 +22,15 @@ type SchedulerMetrics struct { // Cycle time when reconciling, as leader or follower. reconcileCycleTime prometheus.Histogram // Number of jobs scheduled per queue. - scheduledJobsPerQueue prometheus.GaugeVec + scheduledJobsPerQueue prometheus.CounterVec // Number of jobs preempted per queue. - preemptedJobsPerQueue prometheus.GaugeVec + preemptedJobsPerQueue prometheus.CounterVec + // Number of jobs considered per queue/pool. + consideredJobs prometheus.CounterVec + // Fair share of each queue. + fairSharePerQueue prometheus.GaugeVec + // Actual share of each queue. + actualSharePerQueue prometheus.GaugeVec } func NewSchedulerMetrics(config configuration.SchedulerMetricsConfig) *SchedulerMetrics { @@ -53,8 +60,8 @@ func NewSchedulerMetrics(config configuration.SchedulerMetricsConfig) *Scheduler }, ) - scheduledJobs := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ + scheduledJobs := prometheus.NewCounterVec( + prometheus.CounterOpts{ Namespace: NAMESPACE, Subsystem: SUBSYSTEM, Name: "scheduled_jobs", @@ -66,8 +73,8 @@ func NewSchedulerMetrics(config configuration.SchedulerMetricsConfig) *Scheduler }, ) - preemptedJobs := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ + preemptedJobs := prometheus.NewCounterVec( + prometheus.CounterOpts{ Namespace: NAMESPACE, Subsystem: SUBSYSTEM, Name: "preempted_jobs", @@ -79,19 +86,69 @@ func NewSchedulerMetrics(config configuration.SchedulerMetricsConfig) *Scheduler }, ) + consideredJobs := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: NAMESPACE, + Subsystem: SUBSYSTEM, + Name: "considered_jobs", + Help: "Number of jobs considered each round per queue and pool.", + }, + []string{ + "queue", + "pool", + }, + ) + + fairSharePerQueue := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: NAMESPACE, + Subsystem: SUBSYSTEM, + Name: "fair_share", + Help: "Fair share of each queue and pool.", + }, + []string{ + "queue", + "pool", + }, + ) + + actualSharePerQueue := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: NAMESPACE, + Subsystem: SUBSYSTEM, + Name: "actual_share", + Help: "Actual share of each queue and pool.", + }, + []string{ + "queue", + "pool", + }, + ) + prometheus.MustRegister(scheduleCycleTime) prometheus.MustRegister(reconcileCycleTime) prometheus.MustRegister(scheduledJobs) prometheus.MustRegister(preemptedJobs) + prometheus.MustRegister(consideredJobs) + prometheus.MustRegister(fairSharePerQueue) + prometheus.MustRegister(actualSharePerQueue) return &SchedulerMetrics{ scheduleCycleTime: scheduleCycleTime, reconcileCycleTime: reconcileCycleTime, scheduledJobsPerQueue: *scheduledJobs, preemptedJobsPerQueue: *preemptedJobs, + consideredJobs: *consideredJobs, + fairSharePerQueue: *fairSharePerQueue, + actualSharePerQueue: *actualSharePerQueue, } } +func (metrics *SchedulerMetrics) ResetGaugeMetrics() { + metrics.fairSharePerQueue.Reset() + metrics.actualSharePerQueue.Reset() +} + func (metrics *SchedulerMetrics) ReportScheduleCycleTime(cycleTime time.Duration) { metrics.scheduleCycleTime.Observe(float64(cycleTime.Milliseconds())) } @@ -100,19 +157,29 @@ func (metrics *SchedulerMetrics) ReportReconcileCycleTime(cycleTime time.Duratio metrics.reconcileCycleTime.Observe(float64(cycleTime.Milliseconds())) } -func (metrics *SchedulerMetrics) ReportSchedulerResult(result *SchedulerResult) { - metrics.reportScheduledJobs(result.ScheduledJobs) - metrics.reportPreemptedJobs(result.PreemptedJobs) +func (metrics *SchedulerMetrics) ReportSchedulerResult(ctx *armadacontext.Context, result SchedulerResult) { + if result.EmptyResult { + return // TODO: Add logging or maybe place to add failure metric? + } + + // Report the total scheduled jobs (possibly we can get these out of contexts?) + metrics.reportScheduledJobs(ctx, result.ScheduledJobs) + metrics.reportPreemptedJobs(ctx, result.PreemptedJobs) + + // TODO: When more metrics are added, consider consolidating into a single loop over the data. + // Report the number of considered jobs. + metrics.reportNumberOfJobsConsidered(ctx, result.SchedulingContexts) + metrics.reportQueueShares(ctx, result.SchedulingContexts) } -func (metrics *SchedulerMetrics) reportScheduledJobs(scheduledJobs []interfaces.LegacySchedulerJob) { +func (metrics *SchedulerMetrics) reportScheduledJobs(ctx *armadacontext.Context, scheduledJobs []interfaces.LegacySchedulerJob) { jobAggregates := aggregateJobs(scheduledJobs) - observeJobAggregates(metrics.scheduledJobsPerQueue, jobAggregates) + observeJobAggregates(ctx, metrics.scheduledJobsPerQueue, jobAggregates) } -func (metrics *SchedulerMetrics) reportPreemptedJobs(preemptedJobs []interfaces.LegacySchedulerJob) { +func (metrics *SchedulerMetrics) reportPreemptedJobs(ctx *armadacontext.Context, preemptedJobs []interfaces.LegacySchedulerJob) { jobAggregates := aggregateJobs(preemptedJobs) - observeJobAggregates(metrics.preemptedJobsPerQueue, jobAggregates) + observeJobAggregates(ctx, metrics.preemptedJobsPerQueue, jobAggregates) } type collectionKey struct { @@ -132,8 +199,8 @@ func aggregateJobs[S ~[]E, E interfaces.LegacySchedulerJob](scheduledJobs S) map return groups } -// observeJobAggregates reports a set of job aggregates to a given HistogramVec by queue and priorityClass. -func observeJobAggregates(metric prometheus.GaugeVec, jobAggregates map[collectionKey]int) { +// observeJobAggregates reports a set of job aggregates to a given CounterVec by queue and priorityClass. +func observeJobAggregates(ctx *armadacontext.Context, metric prometheus.CounterVec, jobAggregates map[collectionKey]int) { for key, count := range jobAggregates { queue := key.queue priorityClassName := key.priorityClass @@ -142,9 +209,53 @@ func observeJobAggregates(metric prometheus.GaugeVec, jobAggregates map[collecti if err != nil { // A metric failure isn't reason to kill the programme. - log.Error(err) + ctx.Errorf("error reteriving considered jobs observer for queue %s, priorityClass %s", queue, priorityClassName) } else { observer.Add(float64(count)) } } } + +func (metrics *SchedulerMetrics) reportNumberOfJobsConsidered(ctx *armadacontext.Context, schedulingContexts []*schedulercontext.SchedulingContext) { + for _, schedContext := range schedulingContexts { + pool := schedContext.Pool + for queue, queueContext := range schedContext.QueueSchedulingContexts { + count := len(queueContext.UnsuccessfulJobSchedulingContexts) + len(queueContext.SuccessfulJobSchedulingContexts) + + observer, err := metrics.consideredJobs.GetMetricWithLabelValues(queue, pool) + if err != nil { + ctx.Errorf("error reteriving considered jobs observer for queue %s, pool %s", queue, pool) + } else { + observer.Add(float64(count)) + } + } + } +} + +func (metrics *SchedulerMetrics) reportQueueShares(ctx *armadacontext.Context, schedulingContexts []*schedulercontext.SchedulingContext) { + for _, schedContext := range schedulingContexts { + totalCost := schedContext.TotalCost() + totalWeight := schedContext.WeightSum + pool := schedContext.Pool + + for queue, queueContext := range schedContext.QueueSchedulingContexts { + fairShare := queueContext.Weight / totalWeight + + observer, err := metrics.fairSharePerQueue.GetMetricWithLabelValues(queue, pool) + if err != nil { + ctx.Errorf("error retrieving considered jobs observer for queue %s, pool %s", queue, pool) + } else { + observer.Set(fairShare) + } + + actualShare := schedContext.FairnessCostProvider.CostFromQueue(queueContext) / totalCost + + observer, err = metrics.actualSharePerQueue.GetMetricWithLabelValues(queue, pool) + if err != nil { + ctx.Errorf("error reteriving considered jobs observer for queue %s, pool %s", queue, pool) + } else { + observer.Set(actualShare) + } + } + } +} diff --git a/internal/scheduler/scheduler_test.go b/internal/scheduler/scheduler_test.go index df889db79f6..f513d79a8f5 100644 --- a/internal/scheduler/scheduler_test.go +++ b/internal/scheduler/scheduler_test.go @@ -1,7 +1,6 @@ package scheduler import ( - "context" "fmt" "sync" "testing" @@ -15,6 +14,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" protoutil "github.com/armadaproject/armada/internal/common/proto" "github.com/armadaproject/armada/internal/common/stringinterner" "github.com/armadaproject/armada/internal/common/util" @@ -76,7 +76,22 @@ var ( Version: 2, } updatedSchedulingInfoBytes = protoutil.MustMarshall(updatedSchedulingInfo) - schedulerMetrics = NewSchedulerMetrics(configuration.SchedulerMetricsConfig{ + schedulingInfoWithQueueTtl = &schedulerobjects.JobSchedulingInfo{ + AtMostOnce: true, + ObjectRequirements: []*schedulerobjects.ObjectRequirements{ + { + Requirements: &schedulerobjects.ObjectRequirements_PodRequirements{ + PodRequirements: &schedulerobjects.PodRequirements{ + Priority: int32(10), + }, + }, + }, + }, + QueueTtlSeconds: 2, + Version: 1, + } + schedulingInfoWithQueueTtlBytes = protoutil.MustMarshall(schedulingInfoWithQueueTtl) + schedulerMetrics = NewSchedulerMetrics(configuration.SchedulerMetricsConfig{ ScheduleCycleTimeHistogramSettings: configuration.HistogramConfig{ Start: 1, Factor: 1.1, @@ -103,6 +118,19 @@ var queuedJob = jobdb.NewJob( false, 1) +var queuedJobWithExpiredTtl = jobdb.NewJob( + util.NewULID(), + "testJobset", + "testQueue", + 0, + schedulingInfoWithQueueTtl, + true, + 1, + false, + false, + false, + 1) + var leasedJob = jobdb.NewJob( util.NewULID(), "testJobset", @@ -184,8 +212,10 @@ func TestScheduler_TestCycle(t *testing.T) { expectedJobRunLeased []string // ids of jobs we expect to have produced leased messages expectedJobRunErrors []string // ids of jobs we expect to have produced jobRunErrors messages expectedJobErrors []string // ids of jobs we expect to have produced jobErrors messages + expectedJobsToFail []string // ids of jobs we expect to fail without having failed the overall scheduling cycle expectedJobRunPreempted []string // ids of jobs we expect to have produced jobRunPreempted messages expectedJobCancelled []string // ids of jobs we expect to have produced cancelled messages + expectedJobRequestCancel []string // ids of jobs we expect to have produced request cancel expectedJobReprioritised []string // ids of jobs we expect to have produced reprioritised messages expectedQueued []string // ids of jobs we expect to have produced requeued messages expectedJobSucceeded []string // ids of jobs we expect to have produced succeeeded messages @@ -225,6 +255,12 @@ func TestScheduler_TestCycle(t *testing.T) { expectedQueued: []string{queuedJob.Id()}, expectedQueuedVersion: queuedJob.QueuedVersion(), }, + "FailedJobs in scheduler result will publish appropriate messages": { + initialJobs: []*jobdb.Job{queuedJob}, + expectedJobErrors: []string{queuedJob.Id()}, + expectedJobsToFail: []string{queuedJob.Id()}, + expectedTerminal: []string{queuedJob.Id()}, + }, "No updates to an already leased job": { initialJobs: []*jobdb.Job{leasedJob}, expectedLeased: []string{leasedJob.Id()}, @@ -386,6 +422,82 @@ func TestScheduler_TestCycle(t *testing.T) { expectedTerminal: []string{leasedJob.Id()}, expectedQueuedVersion: leasedJob.QueuedVersion(), }, + "New job from postgres with expired queue ttl is cancel requested": { + jobUpdates: []database.Job{ + { + JobID: queuedJobWithExpiredTtl.Id(), + JobSet: queuedJobWithExpiredTtl.Jobset(), + Queue: queuedJobWithExpiredTtl.Queue(), + Queued: queuedJobWithExpiredTtl.Queued(), + QueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), + Serial: 1, + Submitted: queuedJobWithExpiredTtl.Created(), + SchedulingInfo: schedulingInfoWithQueueTtlBytes, + }, + }, + + // We expect to publish request cancel and cancelled message this cycle. + // The job should also be removed from the queue and set to a terminal state. + expectedJobRequestCancel: []string{queuedJobWithExpiredTtl.Id()}, + expectedJobCancelled: []string{queuedJobWithExpiredTtl.Id()}, + expectedQueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), + expectedTerminal: []string{queuedJobWithExpiredTtl.Id()}, + }, + "Existing jobDb job with expired queue ttl is cancel requested": { + initialJobs: []*jobdb.Job{queuedJobWithExpiredTtl}, + + // We expect to publish request cancel and cancelled message this cycle. + // The job should also be removed from the queue and set to a terminal state. + expectedJobRequestCancel: []string{queuedJobWithExpiredTtl.Id()}, + expectedJobCancelled: []string{queuedJobWithExpiredTtl.Id()}, + expectedQueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), + expectedTerminal: []string{queuedJobWithExpiredTtl.Id()}, + }, + "New postgres job with cancel requested results in cancel messages": { + jobUpdates: []database.Job{ + { + JobID: queuedJobWithExpiredTtl.Id(), + JobSet: queuedJobWithExpiredTtl.Jobset(), + Queue: queuedJobWithExpiredTtl.Queue(), + Queued: queuedJobWithExpiredTtl.Queued(), + QueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), + Serial: 1, + Submitted: queuedJobWithExpiredTtl.Created(), + CancelRequested: true, + Cancelled: false, + SchedulingInfo: schedulingInfoWithQueueTtlBytes, + }, + }, + + // We have already got a request cancel from the DB, so only publish a cancelled message. + // The job should also be removed from the queue and set to a terminal state.# + expectedJobCancelled: []string{queuedJobWithExpiredTtl.Id()}, + expectedQueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), + expectedTerminal: []string{queuedJobWithExpiredTtl.Id()}, + }, + "Postgres job with cancel requested results in cancel messages": { + initialJobs: []*jobdb.Job{queuedJobWithExpiredTtl.WithCancelRequested(true)}, + jobUpdates: []database.Job{ + { + JobID: queuedJobWithExpiredTtl.Id(), + JobSet: queuedJobWithExpiredTtl.Jobset(), + Queue: queuedJobWithExpiredTtl.Queue(), + Queued: queuedJobWithExpiredTtl.Queued(), + QueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), + Serial: 1, + Submitted: queuedJobWithExpiredTtl.Created(), + CancelRequested: true, + Cancelled: false, + SchedulingInfo: schedulingInfoWithQueueTtlBytes, + }, + }, + + // We have already got a request cancel from the DB/existing job state, so only publish a cancelled message. + // The job should also be removed from the queue and set to a terminal state. + expectedJobCancelled: []string{queuedJobWithExpiredTtl.Id()}, + expectedQueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), + expectedTerminal: []string{queuedJobWithExpiredTtl.Id()}, + }, "Job reprioritised": { initialJobs: []*jobdb.Job{queuedJob}, jobUpdates: []database.Job{ @@ -487,6 +599,7 @@ func TestScheduler_TestCycle(t *testing.T) { schedulingAlgo := &testSchedulingAlgo{ jobsToSchedule: tc.expectedJobRunLeased, jobsToPreempt: tc.expectedJobRunPreempted, + jobsToFail: tc.expectedJobsToFail, shouldError: tc.scheduleError, } publisher := &testPublisher{shouldError: tc.publishError} @@ -527,8 +640,8 @@ func TestScheduler_TestCycle(t *testing.T) { txn.Commit() // run a scheduler cycle - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - err = sched.cycle(ctx, false, sched.leaderController.GetToken(), true) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + _, err = sched.cycle(ctx, false, sched.leaderController.GetToken(), true) if tc.fetchError || tc.publishError || tc.scheduleError { assert.Error(t, err) } else { @@ -545,6 +658,7 @@ func TestScheduler_TestCycle(t *testing.T) { fmt.Sprintf("%T", &armadaevents.EventSequence_Event_ReprioritisedJob{}): stringSet(tc.expectedJobReprioritised), fmt.Sprintf("%T", &armadaevents.EventSequence_Event_JobSucceeded{}): stringSet(tc.expectedJobSucceeded), fmt.Sprintf("%T", &armadaevents.EventSequence_Event_JobRequeued{}): stringSet(tc.expectedRequeued), + fmt.Sprintf("%T", &armadaevents.EventSequence_Event_CancelJob{}): stringSet(tc.expectedJobRequestCancel), } err = subtractEventsFromOutstandingEventsByType(publisher.events, outstandingEventsByType) require.NoError(t, err) @@ -684,7 +798,7 @@ func TestRun(t *testing.T) { sched.clock = testClock - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := armadacontext.WithCancel(armadacontext.Background()) //nolint:errcheck go sched.Run(ctx) @@ -861,7 +975,7 @@ func TestScheduler_TestSyncState(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() // Test objects @@ -943,31 +1057,31 @@ type testJobRepository struct { numReceivedPartitions uint32 } -func (t *testJobRepository) FindInactiveRuns(ctx context.Context, runIds []uuid.UUID) ([]uuid.UUID, error) { +func (t *testJobRepository) FindInactiveRuns(ctx *armadacontext.Context, runIds []uuid.UUID) ([]uuid.UUID, error) { // TODO implement me panic("implement me") } -func (t *testJobRepository) FetchJobRunLeases(ctx context.Context, executor string, maxResults uint, excludedRunIds []uuid.UUID) ([]*database.JobRunLease, error) { +func (t *testJobRepository) FetchJobRunLeases(ctx *armadacontext.Context, executor string, maxResults uint, excludedRunIds []uuid.UUID) ([]*database.JobRunLease, error) { // TODO implement me panic("implement me") } -func (t *testJobRepository) FetchJobUpdates(ctx context.Context, jobSerial int64, jobRunSerial int64) ([]database.Job, []database.Run, error) { +func (t *testJobRepository) FetchJobUpdates(ctx *armadacontext.Context, jobSerial int64, jobRunSerial int64) ([]database.Job, []database.Run, error) { if t.shouldError { return nil, nil, errors.New("error fetchiung job updates") } return t.updatedJobs, t.updatedRuns, nil } -func (t *testJobRepository) FetchJobRunErrors(ctx context.Context, runIds []uuid.UUID) (map[uuid.UUID]*armadaevents.Error, error) { +func (t *testJobRepository) FetchJobRunErrors(ctx *armadacontext.Context, runIds []uuid.UUID) (map[uuid.UUID]*armadaevents.Error, error) { if t.shouldError { return nil, errors.New("error fetching job run errors") } return t.errors, nil } -func (t *testJobRepository) CountReceivedPartitions(ctx context.Context, groupId uuid.UUID) (uint32, error) { +func (t *testJobRepository) CountReceivedPartitions(ctx *armadacontext.Context, groupId uuid.UUID) (uint32, error) { if t.shouldError { return 0, errors.New("error counting received partitions") } @@ -979,18 +1093,18 @@ type testExecutorRepository struct { shouldError bool } -func (t testExecutorRepository) GetExecutors(ctx context.Context) ([]*schedulerobjects.Executor, error) { +func (t testExecutorRepository) GetExecutors(ctx *armadacontext.Context) ([]*schedulerobjects.Executor, error) { panic("not implemented") } -func (t testExecutorRepository) GetLastUpdateTimes(ctx context.Context) (map[string]time.Time, error) { +func (t testExecutorRepository) GetLastUpdateTimes(ctx *armadacontext.Context) (map[string]time.Time, error) { if t.shouldError { return nil, errors.New("error getting last update time") } return t.updateTimes, nil } -func (t testExecutorRepository) StoreExecutor(ctx context.Context, executor *schedulerobjects.Executor) error { +func (t testExecutorRepository) StoreExecutor(ctx *armadacontext.Context, executor *schedulerobjects.Executor) error { panic("not implemented") } @@ -998,16 +1112,18 @@ type testSchedulingAlgo struct { numberOfScheduleCalls int jobsToPreempt []string jobsToSchedule []string + jobsToFail []string shouldError bool } -func (t *testSchedulingAlgo) Schedule(ctx context.Context, txn *jobdb.Txn, jobDb *jobdb.JobDb) (*SchedulerResult, error) { +func (t *testSchedulingAlgo) Schedule(ctx *armadacontext.Context, txn *jobdb.Txn, jobDb *jobdb.JobDb) (*SchedulerResult, error) { t.numberOfScheduleCalls++ if t.shouldError { return nil, errors.New("error scheduling jobs") } preemptedJobs := make([]*jobdb.Job, 0, len(t.jobsToPreempt)) scheduledJobs := make([]*jobdb.Job, 0, len(t.jobsToSchedule)) + failedJobs := make([]*jobdb.Job, 0, len(t.jobsToFail)) for _, id := range t.jobsToPreempt { job := jobDb.GetById(txn, id) if job == nil { @@ -1035,13 +1151,27 @@ func (t *testSchedulingAlgo) Schedule(ctx context.Context, txn *jobdb.Txn, jobDb job = job.WithQueuedVersion(job.QueuedVersion()+1).WithQueued(false).WithNewRun("test-executor", "test-node", "node") scheduledJobs = append(scheduledJobs, job) } + for _, id := range t.jobsToFail { + job := jobDb.GetById(txn, id) + if job == nil { + return nil, errors.Errorf("was asked to lease %s but job does not exist", id) + } + if !job.Queued() { + return nil, errors.Errorf("was asked to lease %s but job was already leased", job.Id()) + } + job = job.WithQueued(false).WithFailed(true) + failedJobs = append(failedJobs, job) + } if err := jobDb.Upsert(txn, preemptedJobs); err != nil { return nil, err } if err := jobDb.Upsert(txn, scheduledJobs); err != nil { return nil, err } - return NewSchedulerResult(preemptedJobs, scheduledJobs, nil), nil + if err := jobDb.Upsert(txn, failedJobs); err != nil { + return nil, err + } + return NewSchedulerResultForTest(preemptedJobs, scheduledJobs, failedJobs, nil), nil } type testPublisher struct { @@ -1049,7 +1179,7 @@ type testPublisher struct { shouldError bool } -func (t *testPublisher) PublishMessages(ctx context.Context, events []*armadaevents.EventSequence, _ func() bool) error { +func (t *testPublisher) PublishMessages(ctx *armadacontext.Context, events []*armadaevents.EventSequence, _ func() bool) error { t.events = events if t.shouldError { return errors.New("Error when publishing") @@ -1061,7 +1191,7 @@ func (t *testPublisher) Reset() { t.events = nil } -func (t *testPublisher) PublishMarkers(ctx context.Context, groupId uuid.UUID) (uint32, error) { +func (t *testPublisher) PublishMarkers(ctx *armadacontext.Context, groupId uuid.UUID) (uint32, error) { return 100, nil } diff --git a/internal/scheduler/schedulerapp.go b/internal/scheduler/schedulerapp.go index ef742c3dc24..c045591b175 100644 --- a/internal/scheduler/schedulerapp.go +++ b/internal/scheduler/schedulerapp.go @@ -10,21 +10,20 @@ import ( "github.com/apache/pulsar-client-go/pulsar" "github.com/go-redis/redis" "github.com/google/uuid" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "github.com/armadaproject/armada/internal/common" "github.com/armadaproject/armada/internal/common/app" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/auth" dbcommon "github.com/armadaproject/armada/internal/common/database" grpcCommon "github.com/armadaproject/armada/internal/common/grpc" "github.com/armadaproject/armada/internal/common/health" + "github.com/armadaproject/armada/internal/common/logging" "github.com/armadaproject/armada/internal/common/pulsarutils" "github.com/armadaproject/armada/internal/common/stringinterner" schedulerconfig "github.com/armadaproject/armada/internal/scheduler/configuration" @@ -35,9 +34,7 @@ import ( // Run sets up a Scheduler application and runs it until a SIGTERM is received func Run(config schedulerconfig.Configuration) error { - g, ctx := errgroup.WithContext(app.CreateContextWithShutdown()) - logrusLogger := log.NewEntry(log.StandardLogger()) - ctx = ctxlogrus.ToContext(ctx, logrusLogger) + g, ctx := armadacontext.ErrGroup(app.CreateContextWithShutdown()) ////////////////////////////////////////////////////////////////////////// // Health Checks @@ -58,7 +55,7 @@ func Run(config schedulerconfig.Configuration) error { ////////////////////////////////////////////////////////////////////////// // Database setup (postgres and redis) ////////////////////////////////////////////////////////////////////////// - log.Infof("Setting up database connections") + ctx.Infof("Setting up database connections") db, err := dbcommon.OpenPgxPool(config.Postgres) if err != nil { return errors.WithMessage(err, "Error opening connection to postgres") @@ -71,7 +68,9 @@ func Run(config schedulerconfig.Configuration) error { defer func() { err := redisClient.Close() if err != nil { - log.WithError(errors.WithStack(err)).Warnf("Redis client didn't close down cleanly") + logging. + WithStacktrace(ctx, err). + Warnf("Redis client didn't close down cleanly") } }() queueRepository := database.NewLegacyQueueRepository(redisClient) @@ -80,7 +79,7 @@ func Run(config schedulerconfig.Configuration) error { ////////////////////////////////////////////////////////////////////////// // Pulsar ////////////////////////////////////////////////////////////////////////// - log.Infof("Setting up Pulsar connectivity") + ctx.Infof("Setting up Pulsar connectivity") pulsarClient, err := pulsarutils.NewPulsarClient(&config.Pulsar) if err != nil { return errors.WithMessage(err, "Error creating pulsar client") @@ -100,7 +99,7 @@ func Run(config schedulerconfig.Configuration) error { ////////////////////////////////////////////////////////////////////////// // Leader Election ////////////////////////////////////////////////////////////////////////// - leaderController, err := createLeaderController(config.Leader) + leaderController, err := createLeaderController(ctx, config.Leader) if err != nil { return errors.WithMessage(err, "error creating leader controller") } @@ -109,7 +108,7 @@ func Run(config schedulerconfig.Configuration) error { ////////////////////////////////////////////////////////////////////////// // Executor Api ////////////////////////////////////////////////////////////////////////// - log.Infof("Setting up executor api") + ctx.Infof("Setting up executor api") apiProducer, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{ Name: fmt.Sprintf("armada-executor-api-%s", uuid.NewString()), CompressionType: config.Pulsar.CompressionType, @@ -147,7 +146,7 @@ func Run(config schedulerconfig.Configuration) error { } executorapi.RegisterExecutorApiServer(grpcServer, executorServer) services = append(services, func() error { - log.Infof("Executor api listening on %s", lis.Addr()) + ctx.Infof("Executor api listening on %s", lis.Addr()) return grpcServer.Serve(lis) }) services = append(services, grpcCommon.CreateShutdownHandler(ctx, 5*time.Second, grpcServer)) @@ -155,7 +154,7 @@ func Run(config schedulerconfig.Configuration) error { ////////////////////////////////////////////////////////////////////////// // Scheduling ////////////////////////////////////////////////////////////////////////// - log.Infof("setting up scheduling loop") + ctx.Infof("setting up scheduling loop") stringInterner, err := stringinterner.New(config.InternedStringsCacheSize) if err != nil { return errors.WithMessage(err, "error creating string interner") @@ -241,14 +240,14 @@ func Run(config schedulerconfig.Configuration) error { return g.Wait() } -func createLeaderController(config schedulerconfig.LeaderConfig) (LeaderController, error) { +func createLeaderController(ctx *armadacontext.Context, config schedulerconfig.LeaderConfig) (LeaderController, error) { switch mode := strings.ToLower(config.Mode); mode { case "standalone": - log.Infof("Scheduler will run in standalone mode") + ctx.Infof("Scheduler will run in standalone mode") return NewStandaloneLeaderController(), nil case "kubernetes": - log.Infof("Scheduler will run kubernetes mode") - clusterConfig, err := loadClusterConfig() + ctx.Infof("Scheduler will run kubernetes mode") + clusterConfig, err := loadClusterConfig(ctx) if err != nil { return nil, errors.Wrapf(err, "Error creating kubernetes client") } @@ -266,14 +265,14 @@ func createLeaderController(config schedulerconfig.LeaderConfig) (LeaderControll } } -func loadClusterConfig() (*rest.Config, error) { +func loadClusterConfig(ctx *armadacontext.Context) (*rest.Config, error) { config, err := rest.InClusterConfig() if err == rest.ErrNotInCluster { - log.Info("Running with default client configuration") + ctx.Info("Running with default client configuration") rules := clientcmd.NewDefaultClientConfigLoadingRules() overrides := &clientcmd.ConfigOverrides{} return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig() } - log.Info("Running with in cluster client configuration") + ctx.Info("Running with in cluster client configuration") return config, err } diff --git a/internal/scheduler/schedulerobjects/schedulerobjects.pb.go b/internal/scheduler/schedulerobjects/schedulerobjects.pb.go index 402a1e4fd0c..5a0dfc244a8 100644 --- a/internal/scheduler/schedulerobjects/schedulerobjects.pb.go +++ b/internal/scheduler/schedulerobjects/schedulerobjects.pb.go @@ -649,6 +649,8 @@ type JobSchedulingInfo struct { // Kubernetes objects that make up this job and their respective scheduling requirements. ObjectRequirements []*ObjectRequirements `protobuf:"bytes,5,rep,name=object_requirements,json=objectRequirements,proto3" json:"objectRequirements,omitempty"` Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. + QueueTtlSeconds int64 `protobuf:"varint,10,opt,name=queue_ttl_seconds,json=queueTtlSeconds,proto3" json:"queueTtlSeconds,omitempty"` } func (m *JobSchedulingInfo) Reset() { *m = JobSchedulingInfo{} } @@ -743,6 +745,13 @@ func (m *JobSchedulingInfo) GetVersion() uint32 { return 0 } +func (m *JobSchedulingInfo) GetQueueTtlSeconds() int64 { + if m != nil { + return m.QueueTtlSeconds + } + return 0 +} + // Message capturing the scheduling requirements of a particular Kubernetes object. type ObjectRequirements struct { // Types that are valid to be assigned to Requirements: @@ -1025,144 +1034,146 @@ func init() { } var fileDescriptor_97dadc5fbd620721 = []byte{ - // 2186 bytes of a gzipped FileDescriptorProto + // 2217 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x19, 0x4d, 0x73, 0xdb, 0xc6, - 0x55, 0x10, 0x29, 0x89, 0x5c, 0xea, 0x83, 0x5a, 0x7d, 0x18, 0xa2, 0x6d, 0x82, 0x56, 0xdc, 0x8c, - 0xd2, 0x38, 0x64, 0xa3, 0x74, 0xa6, 0x1e, 0xb5, 0x17, 0x51, 0x52, 0x6b, 0xca, 0x36, 0x25, 0x83, - 0x52, 0x3b, 0xed, 0x4c, 0x83, 0x01, 0x89, 0x15, 0x8d, 0x08, 0xc4, 0xd2, 0xc0, 0x42, 0x0d, 0x73, - 0x6e, 0x0f, 0x9d, 0xcc, 0xa4, 0x99, 0x4e, 0xda, 0x66, 0xa6, 0x33, 0xed, 0xe4, 0x96, 0x5f, 0xd0, - 0x1e, 0x7a, 0xeb, 0xc9, 0xc7, 0x1c, 0x7b, 0x62, 0x3b, 0xf6, 0x8d, 0xbf, 0xa2, 0xb3, 0xbb, 0x00, - 0xb1, 0x04, 0x40, 0x51, 0x8e, 0xeb, 0xea, 0x44, 0xee, 0xfb, 0xde, 0xf7, 0xde, 0xbe, 0x7d, 0x6f, - 0x01, 0x76, 0x4c, 0x9b, 0x20, 0xc7, 0xd6, 0xad, 0x8a, 0xdb, 0x7a, 0x8a, 0x0c, 0xcf, 0x42, 0x4e, - 0xf8, 0x0f, 0x37, 0x3f, 0x42, 0x2d, 0xe2, 0xc6, 0x00, 0xe5, 0xae, 0x83, 0x09, 0x86, 0xf9, 0x28, - 0xbc, 0xa0, 0xb4, 0x31, 0x6e, 0x5b, 0xa8, 0xc2, 0xf0, 0x4d, 0xef, 0xac, 0x42, 0xcc, 0x0e, 0x72, - 0x89, 0xde, 0xe9, 0x72, 0x96, 0xc2, 0xe6, 0xf9, 0x7d, 0xb7, 0x6c, 0xe2, 0x8a, 0xde, 0x35, 0x2b, - 0x2d, 0xec, 0xa0, 0xca, 0xc5, 0xfb, 0x95, 0x36, 0xb2, 0x91, 0xa3, 0x13, 0x64, 0xf8, 0x34, 0xdf, - 0x0f, 0x69, 0x3a, 0x7a, 0xeb, 0xa9, 0x69, 0x23, 0xa7, 0x57, 0xe9, 0x9e, 0xb7, 0x19, 0x93, 0x83, - 0x5c, 0xec, 0x39, 0x2d, 0x14, 0xe3, 0x7a, 0xaf, 0x6d, 0x92, 0xa7, 0x5e, 0xb3, 0xdc, 0xc2, 0x9d, - 0x4a, 0x1b, 0xb7, 0x71, 0x68, 0x03, 0x5d, 0xb1, 0x05, 0xfb, 0xc7, 0xc9, 0x37, 0xbf, 0x4e, 0x81, - 0xcc, 0xc1, 0xc7, 0xa8, 0xe5, 0x11, 0xec, 0xc0, 0x12, 0x98, 0x36, 0x0d, 0x59, 0x2a, 0x49, 0x5b, - 0xd9, 0x6a, 0x7e, 0xd0, 0x57, 0xe6, 0x4d, 0xe3, 0x1e, 0xee, 0x98, 0x04, 0x75, 0xba, 0xa4, 0xa7, - 0x4e, 0x9b, 0x06, 0x7c, 0x1b, 0xa4, 0xbb, 0x18, 0x5b, 0xf2, 0x34, 0xa3, 0x81, 0x83, 0xbe, 0xb2, - 0x48, 0xd7, 0x02, 0x15, 0xc3, 0xc3, 0x5d, 0x30, 0x63, 0x63, 0x03, 0xb9, 0x72, 0xaa, 0x94, 0xda, - 0xca, 0x6d, 0xaf, 0x97, 0x63, 0xae, 0xab, 0x63, 0x03, 0x55, 0x57, 0x06, 0x7d, 0x65, 0x89, 0x11, - 0x0a, 0x12, 0x38, 0x27, 0xfc, 0x10, 0x2c, 0x76, 0x4c, 0xdb, 0xec, 0x78, 0x9d, 0x43, 0xdc, 0x6c, - 0x98, 0x9f, 0x20, 0x39, 0x5d, 0x92, 0xb6, 0x72, 0xdb, 0xc5, 0xb8, 0x2c, 0xd5, 0x77, 0xc6, 0x23, - 0xd3, 0x25, 0xd5, 0xf5, 0xe7, 0x7d, 0x65, 0x8a, 0x1a, 0x36, 0xca, 0xad, 0x46, 0xd6, 0x54, 0xbe, - 0xa5, 0xbb, 0xe4, 0xb4, 0x6b, 0xe8, 0x04, 0x9d, 0x98, 0x1d, 0x24, 0xcf, 0x30, 0xf9, 0x85, 0x32, - 0x0f, 0x5e, 0x39, 0x70, 0x5c, 0xf9, 0x24, 0x08, 0x5e, 0xb5, 0x10, 0xc8, 0x1e, 0xe5, 0xfc, 0xfc, - 0xdf, 0x8a, 0xa4, 0x46, 0x60, 0xf0, 0x08, 0xac, 0x78, 0xb6, 0xee, 0xba, 0x66, 0xdb, 0x46, 0x86, - 0xf6, 0x11, 0x6e, 0x6a, 0x8e, 0x67, 0xbb, 0x72, 0xb6, 0x94, 0xda, 0xca, 0x56, 0x95, 0x41, 0x5f, - 0xb9, 0x19, 0xa2, 0x0f, 0x71, 0x53, 0xf5, 0x6c, 0xd1, 0x09, 0xcb, 0x31, 0xe4, 0xe6, 0xd7, 0xeb, - 0x20, 0x4d, 0xbd, 0x76, 0xb5, 0x30, 0xd9, 0x7a, 0x07, 0xc9, 0xf3, 0x61, 0x98, 0xe8, 0x5a, 0x0c, - 0x13, 0x5d, 0xc3, 0x6d, 0x90, 0x41, 0x7e, 0xf0, 0xe5, 0x15, 0x46, 0xbb, 0x3e, 0xe8, 0x2b, 0x30, - 0x80, 0x09, 0xf4, 0x43, 0x3a, 0x78, 0x1f, 0x00, 0x1a, 0xa0, 0xfd, 0xe6, 0x43, 0xd4, 0x73, 0x65, - 0x58, 0x4a, 0x6d, 0xcd, 0x57, 0xe5, 0x41, 0x5f, 0x59, 0x0d, 0xa1, 0x02, 0x9f, 0x40, 0x0b, 0x1f, - 0x83, 0x2c, 0xf5, 0x91, 0xe6, 0x22, 0x64, 0xb3, 0x0c, 0xba, 0xdc, 0xd9, 0xab, 0xbe, 0xb3, 0x33, - 0x94, 0xa9, 0x81, 0x90, 0xcd, 0xdc, 0x3c, 0x5c, 0xc1, 0x23, 0x90, 0xa5, 0xc2, 0x35, 0xd2, 0xeb, - 0x22, 0x39, 0xe5, 0x8b, 0x4b, 0xcc, 0xb3, 0x93, 0x5e, 0x17, 0xf1, 0x9d, 0xd9, 0xfe, 0x4a, 0xdc, - 0x59, 0x00, 0x83, 0x3b, 0x60, 0x7e, 0x28, 0x50, 0x33, 0x0d, 0x96, 0x6f, 0xe9, 0x70, 0x6f, 0x94, - 0xa6, 0x66, 0x44, 0xf7, 0xc6, 0xa1, 0x70, 0x17, 0xcc, 0x12, 0xdd, 0xb4, 0x89, 0x2b, 0xcf, 0xb0, - 0x8c, 0xdf, 0x28, 0xf3, 0xd3, 0x5b, 0xd6, 0xbb, 0x66, 0x99, 0x9e, 0xf0, 0xf2, 0xc5, 0xfb, 0xe5, - 0x13, 0x4a, 0x51, 0x5d, 0xf4, 0xf7, 0xe5, 0x33, 0xa8, 0xfe, 0x2f, 0x3c, 0x06, 0xb3, 0x96, 0xde, - 0x44, 0x96, 0x2b, 0xcf, 0x32, 0x11, 0x9b, 0xc9, 0x9b, 0x29, 0x3f, 0x62, 0x44, 0x07, 0x36, 0x71, - 0x7a, 0xd5, 0xd5, 0x41, 0x5f, 0xc9, 0x73, 0x2e, 0xc1, 0x30, 0x5f, 0x0e, 0xd4, 0xc0, 0x12, 0xc1, - 0x44, 0xb7, 0xb4, 0xa0, 0x5a, 0xb8, 0xf2, 0xdc, 0xab, 0x9d, 0x21, 0xc6, 0x1e, 0xa0, 0x5c, 0x35, - 0xb2, 0x86, 0x7f, 0x93, 0xc0, 0x5d, 0xdd, 0xb2, 0x70, 0x4b, 0x27, 0x7a, 0xd3, 0x42, 0x5a, 0xb3, - 0xa7, 0x75, 0x1d, 0x13, 0x3b, 0x26, 0xe9, 0x69, 0xba, 0x6d, 0x0c, 0xf5, 0xca, 0x19, 0xb6, 0xa3, - 0x1f, 0x8d, 0xd9, 0xd1, 0x6e, 0x28, 0xa2, 0xda, 0x3b, 0xf6, 0x05, 0xec, 0xda, 0x46, 0xa0, 0x88, - 0xef, 0x75, 0xcb, 0x37, 0xaa, 0xa4, 0x4f, 0x20, 0x57, 0x27, 0x52, 0x40, 0x07, 0xac, 0xb8, 0x44, - 0x27, 0xcc, 0x62, 0xff, 0x68, 0xd2, 0x88, 0x67, 0x99, 0x99, 0xef, 0x8e, 0x31, 0xb3, 0x41, 0x39, - 0xaa, 0x3d, 0x7e, 0x1e, 0x6b, 0x06, 0xb7, 0xea, 0x86, 0x6f, 0xd5, 0x92, 0x3b, 0x8a, 0x55, 0xa3, - 0x00, 0xe8, 0x81, 0x15, 0xdf, 0x2e, 0x64, 0x04, 0x7a, 0x4d, 0x43, 0x06, 0x4c, 0xe7, 0xbd, 0xcb, - 0x5d, 0x83, 0x0c, 0x26, 0x28, 0x50, 0x2a, 0xfb, 0x4a, 0xf3, 0x7a, 0x04, 0xad, 0xc6, 0x20, 0x90, - 0x00, 0x38, 0xa2, 0xf6, 0x99, 0x87, 0x3c, 0x24, 0xe7, 0xae, 0xaa, 0xf5, 0x09, 0x25, 0x1f, 0xaf, - 0x95, 0xa1, 0xd5, 0x18, 0x84, 0x6e, 0x16, 0x5d, 0x98, 0x2d, 0x12, 0x96, 0x3e, 0xcd, 0x34, 0x5c, - 0x79, 0xf1, 0x52, 0xb5, 0x07, 0x9c, 0x23, 0xf0, 0x98, 0x1b, 0x51, 0x8b, 0x22, 0x68, 0x35, 0x06, - 0x81, 0x5f, 0x49, 0xa0, 0x68, 0x63, 0x5b, 0xd3, 0x9d, 0x8e, 0x6e, 0xe8, 0x5a, 0xb8, 0xf1, 0xf0, - 0x04, 0x2c, 0x30, 0x13, 0x7e, 0x30, 0xc6, 0x84, 0x3a, 0xb6, 0x77, 0x19, 0xef, 0xd0, 0x05, 0xc3, - 0x6c, 0xe7, 0xd6, 0xbc, 0xe5, 0x5b, 0x73, 0xd3, 0x1e, 0x4f, 0xa9, 0x5e, 0x86, 0x84, 0xbb, 0x60, - 0xc1, 0xb3, 0x7d, 0xed, 0x34, 0x43, 0xe5, 0xa5, 0x92, 0xb4, 0x95, 0xa9, 0xde, 0x1c, 0xf4, 0x95, - 0x1b, 0x23, 0x08, 0xe1, 0x44, 0x8f, 0x72, 0xc0, 0x4f, 0x25, 0x70, 0x23, 0xd8, 0x91, 0xe6, 0xb9, - 0x7a, 0x1b, 0x85, 0x91, 0xcd, 0xb3, 0xfd, 0x7d, 0x6f, 0xcc, 0xfe, 0x02, 0x33, 0x4e, 0x29, 0xd3, - 0x48, 0x74, 0x37, 0x07, 0x7d, 0xa5, 0xe8, 0x24, 0xa0, 0x05, 0x33, 0x56, 0x93, 0xf0, 0xf4, 0xa6, - 0x73, 0x50, 0x17, 0x3b, 0xc4, 0xb4, 0xdb, 0x5a, 0x58, 0x92, 0x97, 0xd9, 0x85, 0xc2, 0x6e, 0xba, - 0x21, 0xba, 0x1e, 0xaf, 0xbf, 0xcb, 0x31, 0x64, 0x41, 0x07, 0x39, 0xa1, 0xc8, 0xc1, 0xb7, 0x40, - 0xea, 0x1c, 0xf5, 0xfc, 0x0b, 0x6f, 0x79, 0xd0, 0x57, 0x16, 0xce, 0x51, 0x4f, 0x90, 0x40, 0xb1, - 0xf0, 0x1d, 0x30, 0x73, 0xa1, 0x5b, 0x1e, 0xf2, 0x5b, 0x13, 0xd6, 0x59, 0x30, 0x80, 0xd8, 0x59, - 0x30, 0xc0, 0xce, 0xf4, 0x7d, 0xa9, 0xf0, 0x67, 0x09, 0x7c, 0xe7, 0x4a, 0x65, 0x47, 0xd4, 0x3e, - 0x33, 0x56, 0x7b, 0x4d, 0xd4, 0x3e, 0xb9, 0xbe, 0x4e, 0xb2, 0xee, 0xb7, 0x12, 0x58, 0x4d, 0xaa, - 0x36, 0x57, 0x73, 0xc5, 0x03, 0xd1, 0x98, 0xc5, 0xed, 0xdb, 0x71, 0x63, 0xb8, 0x50, 0xae, 0x61, - 0x92, 0x2d, 0x9f, 0x4a, 0x60, 0x2d, 0xb1, 0x0a, 0x5d, 0xcd, 0x98, 0xff, 0xb1, 0x67, 0x22, 0xd6, - 0x84, 0xf9, 0x7b, 0x2d, 0xd6, 0x9c, 0x83, 0xb5, 0xc4, 0x9a, 0xf5, 0x2d, 0x52, 0x36, 0x33, 0x51, - 0xd9, 0x1f, 0x25, 0x50, 0x9a, 0x54, 0x9e, 0xae, 0x25, 0x5b, 0x7f, 0x27, 0x81, 0x8d, 0xb1, 0x75, - 0xe5, 0x3a, 0xe2, 0xb2, 0xf9, 0x97, 0x34, 0xc8, 0x04, 0xd5, 0x84, 0xb6, 0xcb, 0x35, 0xde, 0x2e, - 0xa7, 0x79, 0xbb, 0x3c, 0xd2, 0xc4, 0x4d, 0x8f, 0x34, 0x6f, 0xd3, 0xdf, 0xb6, 0x79, 0x3b, 0x19, - 0x36, 0x6f, 0x7c, 0xe2, 0x79, 0x7b, 0x7c, 0x27, 0xfa, 0x0a, 0x0d, 0xdc, 0xaf, 0x25, 0x00, 0x3d, - 0xdb, 0x45, 0xa4, 0x66, 0x1b, 0xe8, 0x63, 0x64, 0x70, 0x4e, 0x39, 0xcd, 0x54, 0x6c, 0x5f, 0xa2, - 0xe2, 0x34, 0xc6, 0xc4, 0xd5, 0x95, 0x06, 0x7d, 0xe5, 0x56, 0x5c, 0xa2, 0xa0, 0x3a, 0x41, 0xdf, - 0xff, 0xa3, 0x1e, 0x77, 0xc0, 0x8d, 0x31, 0x36, 0xbf, 0x09, 0x75, 0x9b, 0xcf, 0x67, 0xc1, 0x06, - 0xcb, 0xd1, 0x3d, 0xcb, 0x73, 0x09, 0x72, 0x46, 0xd2, 0x17, 0xd6, 0xc0, 0x5c, 0xcb, 0x41, 0xf4, - 0x74, 0x31, 0xad, 0x97, 0x8f, 0x29, 0x2b, 0x7e, 0x46, 0x04, 0x2c, 0x6c, 0x4a, 0x09, 0x16, 0xd4, - 0x2e, 0x7e, 0x2d, 0x0b, 0x76, 0x3d, 0x8b, 0xdc, 0xaa, 0x9c, 0x82, 0x0e, 0x56, 0xc1, 0x90, 0x55, - 0x33, 0xd8, 0x40, 0x93, 0xe5, 0xc3, 0x47, 0x08, 0x15, 0x87, 0x8f, 0x10, 0x0a, 0xff, 0x20, 0xd1, - 0x1b, 0xd8, 0xaf, 0x03, 0xe1, 0x55, 0xe6, 0xe7, 0xc9, 0x7e, 0x3c, 0x4f, 0xc6, 0x6e, 0x7d, 0x78, - 0xcc, 0x04, 0x31, 0x3c, 0x73, 0x6e, 0xfb, 0xdb, 0x4c, 0x54, 0x24, 0xa9, 0x49, 0x60, 0xf8, 0x77, - 0x09, 0xdc, 0x4a, 0x80, 0xef, 0x59, 0xba, 0xeb, 0xd6, 0x75, 0x36, 0x71, 0x53, 0x03, 0x1f, 0xbf, - 0xa6, 0x81, 0x43, 0x79, 0xdc, 0xd2, 0xbb, 0xbe, 0xa5, 0x97, 0xaa, 0x56, 0x2f, 0xc5, 0x16, 0x3e, - 0x93, 0x80, 0x3c, 0xce, 0x15, 0xd7, 0x52, 0x63, 0xff, 0x24, 0x81, 0x3b, 0x13, 0xb7, 0x7e, 0x2d, - 0xb5, 0xf6, 0x1f, 0x29, 0x50, 0x48, 0x8a, 0x94, 0xca, 0xda, 0xba, 0xe1, 0x8b, 0x91, 0x34, 0xe1, - 0xc5, 0x48, 0x38, 0x73, 0xd3, 0xaf, 0x79, 0xe6, 0x3e, 0x93, 0x40, 0x5e, 0x88, 0x2e, 0xcb, 0x25, - 0xbf, 0x2c, 0x57, 0xe3, 0x9b, 0x1d, 0x6f, 0xbb, 0x98, 0x6b, 0x42, 0xa3, 0x5c, 0x1c, 0xf4, 0x95, - 0x42, 0x54, 0xbe, 0xb0, 0x9f, 0x98, 0xee, 0xc2, 0x97, 0x12, 0x58, 0x4b, 0x94, 0x75, 0xb5, 0x80, - 0xfd, 0x74, 0x34, 0x60, 0xef, 0xbe, 0xc2, 0x71, 0x99, 0x18, 0xbd, 0xdf, 0x4c, 0x83, 0x79, 0x31, - 0xdc, 0xf0, 0x43, 0x90, 0x0d, 0x67, 0x25, 0x89, 0x39, 0xed, 0xbd, 0xcb, 0x33, 0xa4, 0x1c, 0x99, - 0x90, 0x96, 0xfd, 0xe0, 0x84, 0x72, 0xd4, 0xf0, 0x6f, 0xe1, 0x0b, 0x09, 0x2c, 0x8e, 0xef, 0x59, - 0xc6, 0x3b, 0xe1, 0xe7, 0xa3, 0x4e, 0x28, 0x0b, 0x57, 0xf4, 0xf0, 0x75, 0xb4, 0xdc, 0x3d, 0x6f, - 0xb3, 0x3b, 0x3b, 0x50, 0x57, 0x7e, 0xe2, 0xe9, 0x36, 0x31, 0x49, 0x6f, 0xa2, 0x1f, 0xbe, 0x98, - 0x01, 0xcb, 0x87, 0xb8, 0xd9, 0xe0, 0x1b, 0x35, 0xed, 0x76, 0xcd, 0x3e, 0xc3, 0x70, 0x1b, 0x64, - 0x2c, 0xf3, 0x0c, 0x11, 0xb3, 0x83, 0x98, 0x79, 0x0b, 0xfc, 0x15, 0x29, 0x80, 0x89, 0xaf, 0x48, - 0x01, 0x0c, 0xee, 0x80, 0x79, 0x9d, 0x68, 0x1d, 0xec, 0x12, 0x0d, 0xdb, 0xad, 0xa0, 0xb9, 0x63, - 0x85, 0x5c, 0x27, 0x8f, 0xb1, 0x4b, 0x8e, 0xec, 0x96, 0xc8, 0x09, 0x42, 0x28, 0xfc, 0x21, 0xc8, - 0x75, 0x1d, 0x44, 0xe1, 0x26, 0x1d, 0x0c, 0x53, 0x8c, 0x75, 0x63, 0xd0, 0x57, 0xd6, 0x04, 0xb0, - 0xc0, 0x2b, 0x52, 0xc3, 0x07, 0x20, 0xdf, 0xc2, 0x76, 0xcb, 0x73, 0x1c, 0x64, 0xb7, 0x7a, 0x9a, - 0xab, 0x9f, 0xf1, 0x27, 0xd3, 0x4c, 0xf5, 0xf6, 0xa0, 0xaf, 0x6c, 0x08, 0xb8, 0x86, 0x7e, 0x26, - 0x4a, 0x59, 0x8a, 0xa0, 0xe8, 0x40, 0x37, 0x7c, 0xc6, 0x69, 0xd1, 0x0a, 0xa3, 0xb1, 0xd7, 0xc4, - 0xd9, 0x70, 0xa0, 0xeb, 0x46, 0xeb, 0x8f, 0x38, 0xd0, 0xc5, 0x90, 0xb0, 0x01, 0x72, 0xae, 0xd7, - 0xec, 0x98, 0x44, 0x63, 0xae, 0x9c, 0x9b, 0x78, 0xc0, 0x83, 0x07, 0x28, 0xc0, 0xd9, 0x86, 0x8f, - 0xac, 0xc2, 0x9a, 0x06, 0x27, 0xd0, 0x24, 0x67, 0xc2, 0xe0, 0x04, 0x30, 0x31, 0x38, 0x01, 0x0c, - 0xfe, 0x0a, 0xac, 0xf0, 0x14, 0xd6, 0x1c, 0xf4, 0xcc, 0x33, 0x1d, 0xd4, 0x41, 0xe1, 0x9b, 0xdd, - 0xdd, 0x78, 0x9e, 0x1f, 0xb1, 0x5f, 0x55, 0xa0, 0xe5, 0x2d, 0x14, 0x8e, 0xc1, 0xc5, 0x16, 0x2a, - 0x8e, 0x85, 0x15, 0x30, 0x77, 0x81, 0x1c, 0xd7, 0xc4, 0xb6, 0x9c, 0x65, 0xb6, 0xae, 0x0d, 0xfa, - 0xca, 0xb2, 0x0f, 0x12, 0x78, 0x03, 0xaa, 0x9d, 0xf4, 0x97, 0x5f, 0x29, 0xd2, 0xe6, 0xef, 0x25, - 0x00, 0xe3, 0x36, 0x40, 0x0b, 0x2c, 0x75, 0xb1, 0x21, 0x82, 0xfc, 0x46, 0xe5, 0x4e, 0x7c, 0x0b, - 0xc7, 0xa3, 0x84, 0x3c, 0x19, 0x22, 0xdc, 0xa1, 0x01, 0x0f, 0xa6, 0xd4, 0xa8, 0xe8, 0xea, 0x22, - 0x98, 0x17, 0xbd, 0xb5, 0xf9, 0xcf, 0x39, 0xb0, 0x14, 0x91, 0x0a, 0x5d, 0xfe, 0x76, 0xda, 0x40, - 0x16, 0x6a, 0x11, 0xec, 0xf8, 0x95, 0xe3, 0x83, 0x89, 0xe6, 0xb0, 0x96, 0x35, 0xe0, 0xe2, 0xf5, - 0xa3, 0x30, 0xe8, 0x2b, 0xeb, 0xa2, 0x30, 0xc1, 0x3d, 0x23, 0x4a, 0xe0, 0x31, 0xc8, 0xe8, 0x67, - 0x67, 0xa6, 0x4d, 0x33, 0x80, 0x97, 0x85, 0x5b, 0x49, 0x9d, 0xfb, 0xae, 0x4f, 0xc3, 0xf3, 0x23, - 0xe0, 0x10, 0xf3, 0x23, 0x80, 0xc1, 0x53, 0x90, 0x23, 0xd8, 0x42, 0x8e, 0x4e, 0x4c, 0x6c, 0x07, - 0xbd, 0x7c, 0x31, 0x71, 0x1c, 0x18, 0x92, 0x0d, 0x6f, 0x23, 0x91, 0x55, 0x15, 0x17, 0x10, 0x83, - 0x9c, 0x6e, 0xdb, 0x98, 0xf8, 0x62, 0xe7, 0xc6, 0xf5, 0xef, 0x51, 0xe7, 0xec, 0x86, 0x4c, 0xdc, - 0x37, 0xac, 0x16, 0x08, 0xa2, 0xc4, 0x5a, 0x20, 0x80, 0x47, 0xce, 0x46, 0x9a, 0xf5, 0x29, 0x93, - 0xcf, 0xc6, 0x21, 0xc8, 0x07, 0xe5, 0x04, 0xdb, 0xc7, 0xd8, 0x32, 0x5b, 0x3d, 0xf6, 0x49, 0x24, - 0xcb, 0x6f, 0xbc, 0x28, 0x4e, 0xbc, 0xf1, 0xa2, 0x38, 0xf8, 0x09, 0x18, 0x3e, 0x15, 0x8d, 0x64, - 0xe9, 0x2c, 0x8b, 0xd2, 0x56, 0x92, 0x43, 0xd5, 0x04, 0xfa, 0xea, 0x2d, 0xdf, 0xb5, 0x89, 0xd2, - 0xd4, 0x44, 0x28, 0x6c, 0x80, 0x95, 0x96, 0x4e, 0x3d, 0x1b, 0x16, 0xf3, 0x87, 0x88, 0x97, 0x88, - 0xf9, 0xea, 0x9d, 0x41, 0x5f, 0xb9, 0x9d, 0x80, 0x16, 0x76, 0x93, 0xc4, 0x5d, 0x68, 0x83, 0xe5, - 0x58, 0xa6, 0xbe, 0x91, 0x41, 0xe8, 0x0c, 0xe4, 0xa3, 0x51, 0x7f, 0x23, 0x13, 0xd0, 0x5f, 0x25, - 0xb0, 0x71, 0xec, 0x59, 0xae, 0xee, 0x34, 0x82, 0x2c, 0x3c, 0xc4, 0xcd, 0x7d, 0x44, 0x74, 0xd3, - 0x72, 0xa9, 0x30, 0xf6, 0xd0, 0xe3, 0xeb, 0x64, 0xc2, 0x18, 0x40, 0x14, 0xc6, 0xdf, 0x97, 0xdf, - 0x01, 0x33, 0x4f, 0xa2, 0x13, 0x4e, 0xb4, 0x25, 0xe2, 0x14, 0xf0, 0x1e, 0x98, 0xa5, 0x77, 0x2c, - 0x22, 0xfe, 0x74, 0xc3, 0x86, 0x5f, 0x0e, 0x11, 0x87, 0x5f, 0x0e, 0xf9, 0xee, 0x11, 0xc8, 0x09, - 0xef, 0x54, 0x30, 0x07, 0xe6, 0x4e, 0xeb, 0x0f, 0xeb, 0x47, 0x3f, 0xab, 0xe7, 0xa7, 0xe8, 0xe2, - 0xf8, 0xa0, 0xbe, 0x5f, 0xab, 0xff, 0x24, 0x2f, 0xd1, 0x85, 0x7a, 0x5a, 0xaf, 0xd3, 0xc5, 0x34, - 0x5c, 0x00, 0xd9, 0xc6, 0xe9, 0xde, 0xde, 0xc1, 0xc1, 0xfe, 0xc1, 0x7e, 0x3e, 0x05, 0x01, 0x98, - 0xfd, 0xf1, 0x6e, 0xed, 0xd1, 0xc1, 0x7e, 0x3e, 0x5d, 0xfd, 0xe5, 0xf3, 0x17, 0x45, 0xe9, 0x9b, - 0x17, 0x45, 0xe9, 0x3f, 0x2f, 0x8a, 0xd2, 0xe7, 0x2f, 0x8b, 0x53, 0xdf, 0xbc, 0x2c, 0x4e, 0xfd, - 0xeb, 0x65, 0x71, 0xea, 0x17, 0x7b, 0xc2, 0x47, 0x53, 0xfe, 0x74, 0xdc, 0x75, 0x30, 0x3d, 0x92, - 0xfe, 0xaa, 0x72, 0x85, 0xaf, 0xc3, 0xcd, 0x59, 0x76, 0x8f, 0x7d, 0xf0, 0xdf, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x3a, 0x96, 0x2c, 0x42, 0x4b, 0x1e, 0x00, 0x00, + 0x55, 0x10, 0xf5, 0x41, 0x2e, 0x65, 0x89, 0x5a, 0xf9, 0x03, 0x62, 0x6c, 0x82, 0x56, 0xdc, 0x8c, + 0xd2, 0x38, 0x64, 0xa3, 0x74, 0xa6, 0x1e, 0xb7, 0x17, 0xd1, 0x52, 0x6b, 0xfa, 0x83, 0x92, 0x41, + 0xa9, 0x9d, 0x76, 0xa6, 0xc1, 0x80, 0xc0, 0x8a, 0x46, 0x04, 0xee, 0xd2, 0xc0, 0xc2, 0x0d, 0x73, + 0x6e, 0x0f, 0x9d, 0xcc, 0xa4, 0x99, 0x4e, 0x3f, 0x32, 0xd3, 0x99, 0x76, 0x72, 0xcb, 0x2f, 0x68, + 0x0f, 0xbd, 0xf5, 0xe4, 0x63, 0x8e, 0x3d, 0x31, 0x1d, 0xfb, 0xc6, 0x5f, 0xd1, 0xd9, 0x5d, 0x80, + 0x58, 0x02, 0xa0, 0x28, 0x27, 0x75, 0x7d, 0x22, 0xf7, 0x7d, 0xef, 0x7b, 0x6f, 0xdf, 0xbe, 0xb7, + 0x00, 0xb7, 0x1d, 0x4c, 0x91, 0x87, 0x4d, 0xb7, 0xee, 0x5b, 0x8f, 0x91, 0x1d, 0xb8, 0xc8, 0x8b, + 0xff, 0x91, 0xce, 0x87, 0xc8, 0xa2, 0x7e, 0x0a, 0x50, 0xeb, 0x7b, 0x84, 0x12, 0x58, 0x4a, 0xc2, + 0xcb, 0x5a, 0x97, 0x90, 0xae, 0x8b, 0xea, 0x1c, 0xdf, 0x09, 0x4e, 0xea, 0xd4, 0xe9, 0x21, 0x9f, + 0x9a, 0xbd, 0xbe, 0x60, 0x29, 0x6f, 0x9d, 0xde, 0xf2, 0x6b, 0x0e, 0xa9, 0x9b, 0x7d, 0xa7, 0x6e, + 0x11, 0x0f, 0xd5, 0x9f, 0xbe, 0x57, 0xef, 0x22, 0x8c, 0x3c, 0x93, 0x22, 0x3b, 0xa4, 0xf9, 0x7e, + 0x4c, 0xd3, 0x33, 0xad, 0xc7, 0x0e, 0x46, 0xde, 0xa0, 0xde, 0x3f, 0xed, 0x72, 0x26, 0x0f, 0xf9, + 0x24, 0xf0, 0x2c, 0x94, 0xe2, 0x7a, 0xb7, 0xeb, 0xd0, 0xc7, 0x41, 0xa7, 0x66, 0x91, 0x5e, 0xbd, + 0x4b, 0xba, 0x24, 0xb6, 0x81, 0xad, 0xf8, 0x82, 0xff, 0x13, 0xe4, 0x5b, 0x5f, 0xe6, 0x40, 0x7e, + 0xff, 0x23, 0x64, 0x05, 0x94, 0x78, 0xb0, 0x0a, 0xe6, 0x1d, 0x5b, 0x55, 0xaa, 0xca, 0x76, 0xa1, + 0x51, 0x1a, 0x0d, 0xb5, 0x15, 0xc7, 0xbe, 0x49, 0x7a, 0x0e, 0x45, 0xbd, 0x3e, 0x1d, 0xe8, 0xf3, + 0x8e, 0x0d, 0xdf, 0x02, 0x0b, 0x7d, 0x42, 0x5c, 0x75, 0x9e, 0xd3, 0xc0, 0xd1, 0x50, 0x5b, 0x65, + 0x6b, 0x89, 0x8a, 0xe3, 0xe1, 0x2e, 0x58, 0xc4, 0xc4, 0x46, 0xbe, 0x9a, 0xab, 0xe6, 0xb6, 0x8b, + 0x3b, 0x97, 0x6b, 0x29, 0xd7, 0xb5, 0x88, 0x8d, 0x1a, 0x1b, 0xa3, 0xa1, 0xb6, 0xc6, 0x09, 0x25, + 0x09, 0x82, 0x13, 0x7e, 0x00, 0x56, 0x7b, 0x0e, 0x76, 0x7a, 0x41, 0xef, 0x1e, 0xe9, 0xb4, 0x9d, + 0x8f, 0x91, 0xba, 0x50, 0x55, 0xb6, 0x8b, 0x3b, 0x95, 0xb4, 0x2c, 0x3d, 0x74, 0xc6, 0x03, 0xc7, + 0xa7, 0x8d, 0xcb, 0xcf, 0x86, 0xda, 0x1c, 0x33, 0x6c, 0x92, 0x5b, 0x4f, 0xac, 0x99, 0x7c, 0xd7, + 0xf4, 0xe9, 0x71, 0xdf, 0x36, 0x29, 0x3a, 0x72, 0x7a, 0x48, 0x5d, 0xe4, 0xf2, 0xcb, 0x35, 0x11, + 0xbc, 0x5a, 0xe4, 0xb8, 0xda, 0x51, 0x14, 0xbc, 0x46, 0x39, 0x92, 0x3d, 0xc9, 0xf9, 0xd9, 0xd7, + 0x9a, 0xa2, 0x27, 0x60, 0xf0, 0x00, 0x6c, 0x04, 0xd8, 0xf4, 0x7d, 0xa7, 0x8b, 0x91, 0x6d, 0x7c, + 0x48, 0x3a, 0x86, 0x17, 0x60, 0x5f, 0x2d, 0x54, 0x73, 0xdb, 0x85, 0x86, 0x36, 0x1a, 0x6a, 0x6f, + 0xc4, 0xe8, 0x7b, 0xa4, 0xa3, 0x07, 0x58, 0x76, 0xc2, 0x7a, 0x0a, 0xb9, 0xf5, 0xe5, 0x65, 0xb0, + 0xc0, 0xbc, 0x76, 0xbe, 0x30, 0x61, 0xb3, 0x87, 0xd4, 0x95, 0x38, 0x4c, 0x6c, 0x2d, 0x87, 0x89, + 0xad, 0xe1, 0x0e, 0xc8, 0xa3, 0x30, 0xf8, 0xea, 0x06, 0xa7, 0xbd, 0x3c, 0x1a, 0x6a, 0x30, 0x82, + 0x49, 0xf4, 0x63, 0x3a, 0x78, 0x0b, 0x00, 0x16, 0xa0, 0xbd, 0xce, 0x7d, 0x34, 0xf0, 0x55, 0x58, + 0xcd, 0x6d, 0xaf, 0x34, 0xd4, 0xd1, 0x50, 0xbb, 0x18, 0x43, 0x25, 0x3e, 0x89, 0x16, 0x3e, 0x04, + 0x05, 0xe6, 0x23, 0xc3, 0x47, 0x08, 0xf3, 0x0c, 0x3a, 0xdb, 0xd9, 0x17, 0x43, 0x67, 0xe7, 0x19, + 0x53, 0x1b, 0x21, 0xcc, 0xdd, 0x3c, 0x5e, 0xc1, 0x03, 0x50, 0x60, 0xc2, 0x0d, 0x3a, 0xe8, 0x23, + 0x35, 0x17, 0x8a, 0xcb, 0xcc, 0xb3, 0xa3, 0x41, 0x1f, 0x89, 0x9d, 0xe1, 0x70, 0x25, 0xef, 0x2c, + 0x82, 0xc1, 0xdb, 0x60, 0x65, 0x2c, 0xd0, 0x70, 0x6c, 0x9e, 0x6f, 0x0b, 0xf1, 0xde, 0x18, 0x4d, + 0xd3, 0x4e, 0xee, 0x4d, 0x40, 0xe1, 0x2e, 0x58, 0xa2, 0xa6, 0x83, 0xa9, 0xaf, 0x2e, 0xf2, 0x8c, + 0xdf, 0xac, 0x89, 0xd3, 0x5b, 0x33, 0xfb, 0x4e, 0x8d, 0x9d, 0xf0, 0xda, 0xd3, 0xf7, 0x6a, 0x47, + 0x8c, 0xa2, 0xb1, 0x1a, 0xee, 0x2b, 0x64, 0xd0, 0xc3, 0x5f, 0x78, 0x08, 0x96, 0x5c, 0xb3, 0x83, + 0x5c, 0x5f, 0x5d, 0xe2, 0x22, 0xb6, 0xb2, 0x37, 0x53, 0x7b, 0xc0, 0x89, 0xf6, 0x31, 0xf5, 0x06, + 0x8d, 0x8b, 0xa3, 0xa1, 0x56, 0x12, 0x5c, 0x92, 0x61, 0xa1, 0x1c, 0x68, 0x80, 0x35, 0x4a, 0xa8, + 0xe9, 0x1a, 0x51, 0xb5, 0xf0, 0xd5, 0xe5, 0x97, 0x3b, 0x43, 0x9c, 0x3d, 0x42, 0xf9, 0x7a, 0x62, + 0x0d, 0xff, 0xae, 0x80, 0x1b, 0xa6, 0xeb, 0x12, 0xcb, 0xa4, 0x66, 0xc7, 0x45, 0x46, 0x67, 0x60, + 0xf4, 0x3d, 0x87, 0x78, 0x0e, 0x1d, 0x18, 0x26, 0xb6, 0xc7, 0x7a, 0xd5, 0x3c, 0xdf, 0xd1, 0x8f, + 0xa6, 0xec, 0x68, 0x37, 0x16, 0xd1, 0x18, 0x1c, 0x86, 0x02, 0x76, 0xb1, 0x1d, 0x29, 0x12, 0x7b, + 0xdd, 0x0e, 0x8d, 0xaa, 0x9a, 0x33, 0xc8, 0xf5, 0x99, 0x14, 0xd0, 0x03, 0x1b, 0x3e, 0x35, 0x29, + 0xb7, 0x38, 0x3c, 0x9a, 0x2c, 0xe2, 0x05, 0x6e, 0xe6, 0x3b, 0x53, 0xcc, 0x6c, 0x33, 0x8e, 0xc6, + 0x40, 0x9c, 0xc7, 0xa6, 0x2d, 0xac, 0xba, 0x12, 0x5a, 0xb5, 0xe6, 0x4f, 0x62, 0xf5, 0x24, 0x00, + 0x06, 0x60, 0x23, 0xb4, 0x0b, 0xd9, 0x91, 0x5e, 0xc7, 0x56, 0x01, 0xd7, 0x79, 0xf3, 0x6c, 0xd7, + 0x20, 0x9b, 0x0b, 0x8a, 0x94, 0xaa, 0xa1, 0xd2, 0x92, 0x99, 0x40, 0xeb, 0x29, 0x08, 0xa4, 0x00, + 0x4e, 0xa8, 0x7d, 0x12, 0xa0, 0x00, 0xa9, 0xc5, 0xf3, 0x6a, 0x7d, 0xc4, 0xc8, 0xa7, 0x6b, 0xe5, + 0x68, 0x3d, 0x05, 0x61, 0x9b, 0x45, 0x4f, 0x1d, 0x8b, 0xc6, 0xa5, 0xcf, 0x70, 0x6c, 0x5f, 0x5d, + 0x3d, 0x53, 0xed, 0xbe, 0xe0, 0x88, 0x3c, 0xe6, 0x27, 0xd4, 0xa2, 0x04, 0x5a, 0x4f, 0x41, 0xe0, + 0x17, 0x0a, 0xa8, 0x60, 0x82, 0x0d, 0xd3, 0xeb, 0x99, 0xb6, 0x69, 0xc4, 0x1b, 0x8f, 0x4f, 0xc0, + 0x05, 0x6e, 0xc2, 0x0f, 0xa6, 0x98, 0xd0, 0x22, 0x78, 0x97, 0xf3, 0x8e, 0x5d, 0x30, 0xce, 0x76, + 0x61, 0xcd, 0x9b, 0xa1, 0x35, 0x6f, 0xe0, 0xe9, 0x94, 0xfa, 0x59, 0x48, 0xb8, 0x0b, 0x2e, 0x04, + 0x38, 0xd4, 0xce, 0x32, 0x54, 0x5d, 0xab, 0x2a, 0xdb, 0xf9, 0xc6, 0x1b, 0xa3, 0xa1, 0x76, 0x65, + 0x02, 0x21, 0x9d, 0xe8, 0x49, 0x0e, 0xf8, 0x89, 0x02, 0xae, 0x44, 0x3b, 0x32, 0x02, 0xdf, 0xec, + 0xa2, 0x38, 0xb2, 0x25, 0xbe, 0xbf, 0xef, 0x4d, 0xd9, 0x5f, 0x64, 0xc6, 0x31, 0x63, 0x9a, 0x88, + 0xee, 0xd6, 0x68, 0xa8, 0x55, 0xbc, 0x0c, 0xb4, 0x64, 0xc6, 0xc5, 0x2c, 0x3c, 0xbb, 0xe9, 0x3c, + 0xd4, 0x27, 0x1e, 0x75, 0x70, 0xd7, 0x88, 0x4b, 0xf2, 0x3a, 0xbf, 0x50, 0xf8, 0x4d, 0x37, 0x46, + 0xb7, 0xd2, 0xf5, 0x77, 0x3d, 0x85, 0x2c, 0x9b, 0xa0, 0x28, 0x15, 0x39, 0xf8, 0x26, 0xc8, 0x9d, + 0xa2, 0x41, 0x78, 0xe1, 0xad, 0x8f, 0x86, 0xda, 0x85, 0x53, 0x34, 0x90, 0x24, 0x30, 0x2c, 0x7c, + 0x1b, 0x2c, 0x3e, 0x35, 0xdd, 0x00, 0x85, 0xad, 0x09, 0xef, 0x2c, 0x38, 0x40, 0xee, 0x2c, 0x38, + 0xe0, 0xf6, 0xfc, 0x2d, 0xa5, 0xfc, 0x17, 0x05, 0x7c, 0xe7, 0x5c, 0x65, 0x47, 0xd6, 0xbe, 0x38, + 0x55, 0x7b, 0x53, 0xd6, 0x3e, 0xbb, 0xbe, 0xce, 0xb2, 0xee, 0xb7, 0x0a, 0xb8, 0x98, 0x55, 0x6d, + 0xce, 0xe7, 0x8a, 0xbb, 0xb2, 0x31, 0xab, 0x3b, 0xd7, 0xd2, 0xc6, 0x08, 0xa1, 0x42, 0xc3, 0x2c, + 0x5b, 0x3e, 0x51, 0xc0, 0xa5, 0xcc, 0x2a, 0x74, 0x3e, 0x63, 0xfe, 0xc7, 0x9e, 0x49, 0x58, 0x13, + 0xe7, 0xef, 0x6b, 0xb1, 0xe6, 0x14, 0x5c, 0xca, 0xac, 0x59, 0xdf, 0x20, 0x65, 0xf3, 0x33, 0x95, + 0xfd, 0x49, 0x01, 0xd5, 0x59, 0xe5, 0xe9, 0xb5, 0x64, 0xeb, 0xef, 0x14, 0xb0, 0x39, 0xb5, 0xae, + 0xbc, 0x8e, 0xb8, 0x6c, 0xfd, 0x75, 0x01, 0xe4, 0xa3, 0x6a, 0xc2, 0xda, 0xe5, 0xa6, 0x68, 0x97, + 0x17, 0x44, 0xbb, 0x3c, 0xd1, 0xc4, 0xcd, 0x4f, 0x34, 0x6f, 0xf3, 0xdf, 0xb4, 0x79, 0x3b, 0x1a, + 0x37, 0x6f, 0x62, 0xe2, 0x79, 0x6b, 0x7a, 0x27, 0xfa, 0x12, 0x0d, 0xdc, 0xaf, 0x15, 0x00, 0x03, + 0xec, 0x23, 0xda, 0xc4, 0x36, 0xfa, 0x08, 0xd9, 0x82, 0x53, 0x5d, 0xe0, 0x2a, 0x76, 0xce, 0x50, + 0x71, 0x9c, 0x62, 0x12, 0xea, 0xaa, 0xa3, 0xa1, 0x76, 0x35, 0x2d, 0x51, 0x52, 0x9d, 0xa1, 0xef, + 0xff, 0x51, 0x8f, 0x7b, 0xe0, 0xca, 0x14, 0x9b, 0x5f, 0x85, 0xba, 0xad, 0x67, 0x4b, 0x60, 0x93, + 0xe7, 0xe8, 0x1d, 0x37, 0xf0, 0x29, 0xf2, 0x26, 0xd2, 0x17, 0x36, 0xc1, 0xb2, 0xe5, 0x21, 0x76, + 0xba, 0xb8, 0xd6, 0xb3, 0xc7, 0x94, 0x8d, 0x30, 0x23, 0x22, 0x16, 0x3e, 0xa5, 0x44, 0x0b, 0x66, + 0x97, 0xb8, 0x96, 0x25, 0xbb, 0x9e, 0x24, 0x6e, 0x55, 0x41, 0xc1, 0x06, 0xab, 0x68, 0xc8, 0x6a, + 0xda, 0x7c, 0xa0, 0x29, 0x88, 0xe1, 0x23, 0x86, 0xca, 0xc3, 0x47, 0x0c, 0x85, 0x7f, 0x54, 0xd8, + 0x0d, 0x1c, 0xd6, 0x81, 0xf8, 0x2a, 0x0b, 0xf3, 0x64, 0x2f, 0x9d, 0x27, 0x53, 0xb7, 0x3e, 0x3e, + 0x66, 0x92, 0x18, 0x91, 0x39, 0xd7, 0xc2, 0x6d, 0x66, 0x2a, 0x52, 0xf4, 0x2c, 0x30, 0xfc, 0x87, + 0x02, 0xae, 0x66, 0xc0, 0xef, 0xb8, 0xa6, 0xef, 0xb7, 0x4c, 0x3e, 0x71, 0x33, 0x03, 0x1f, 0x7e, + 0x4b, 0x03, 0xc7, 0xf2, 0x84, 0xa5, 0x37, 0x42, 0x4b, 0xcf, 0x54, 0xad, 0x9f, 0x89, 0x2d, 0x7f, + 0xaa, 0x00, 0x75, 0x9a, 0x2b, 0x5e, 0x4b, 0x8d, 0xfd, 0xb3, 0x02, 0xae, 0xcf, 0xdc, 0xfa, 0x6b, + 0xa9, 0xb5, 0xff, 0xcc, 0x81, 0x72, 0x56, 0xa4, 0x74, 0xde, 0xd6, 0x8d, 0x5f, 0x8c, 0x94, 0x19, + 0x2f, 0x46, 0xd2, 0x99, 0x9b, 0xff, 0x96, 0x67, 0xee, 0x53, 0x05, 0x94, 0xa4, 0xe8, 0xf2, 0x5c, + 0x0a, 0xcb, 0x72, 0x23, 0xbd, 0xd9, 0xe9, 0xb6, 0xcb, 0xb9, 0x26, 0x35, 0xca, 0x95, 0xd1, 0x50, + 0x2b, 0x27, 0xe5, 0x4b, 0xfb, 0x49, 0xe9, 0x2e, 0x7f, 0xae, 0x80, 0x4b, 0x99, 0xb2, 0xce, 0x17, + 0xb0, 0x9f, 0x4e, 0x06, 0xec, 0x9d, 0x97, 0x38, 0x2e, 0x33, 0xa3, 0xf7, 0x9b, 0x79, 0xb0, 0x22, + 0x87, 0x1b, 0x7e, 0x00, 0x0a, 0xf1, 0xac, 0xa4, 0x70, 0xa7, 0xbd, 0x7b, 0x76, 0x86, 0xd4, 0x12, + 0x13, 0xd2, 0x7a, 0x18, 0x9c, 0x58, 0x8e, 0x1e, 0xff, 0x2d, 0xff, 0x41, 0x01, 0xab, 0xd3, 0x7b, + 0x96, 0xe9, 0x4e, 0xf8, 0xf9, 0xa4, 0x13, 0x6a, 0xd2, 0x15, 0x3d, 0x7e, 0x1d, 0xad, 0xf5, 0x4f, + 0xbb, 0xfc, 0xce, 0x8e, 0xd4, 0xd5, 0x1e, 0x05, 0x26, 0xa6, 0x0e, 0x1d, 0xcc, 0xf4, 0xc3, 0xd7, + 0x8b, 0x60, 0xfd, 0x1e, 0xe9, 0xb4, 0xc5, 0x46, 0x1d, 0xdc, 0x6d, 0xe2, 0x13, 0x02, 0x77, 0x40, + 0xde, 0x75, 0x4e, 0x10, 0x75, 0x7a, 0x88, 0x9b, 0x77, 0x41, 0xbc, 0x22, 0x45, 0x30, 0xf9, 0x15, + 0x29, 0x82, 0xc1, 0xdb, 0x60, 0xc5, 0xa4, 0x46, 0x8f, 0xf8, 0xd4, 0x20, 0xd8, 0x8a, 0x9a, 0x3b, + 0x5e, 0xc8, 0x4d, 0xfa, 0x90, 0xf8, 0xf4, 0x00, 0x5b, 0x32, 0x27, 0x88, 0xa1, 0xf0, 0x87, 0xa0, + 0xd8, 0xf7, 0x10, 0x83, 0x3b, 0x6c, 0x30, 0xcc, 0x71, 0xd6, 0xcd, 0xd1, 0x50, 0xbb, 0x24, 0x81, + 0x25, 0x5e, 0x99, 0x1a, 0xde, 0x05, 0x25, 0x8b, 0x60, 0x2b, 0xf0, 0x3c, 0x84, 0xad, 0x81, 0xe1, + 0x9b, 0x27, 0xe2, 0xc9, 0x34, 0xdf, 0xb8, 0x36, 0x1a, 0x6a, 0x9b, 0x12, 0xae, 0x6d, 0x9e, 0xc8, + 0x52, 0xd6, 0x12, 0x28, 0x36, 0xd0, 0x8d, 0x9f, 0x71, 0x2c, 0x56, 0x61, 0x0c, 0xfe, 0x9a, 0xb8, + 0x14, 0x0f, 0x74, 0xfd, 0x64, 0xfd, 0x91, 0x07, 0xba, 0x14, 0x12, 0xb6, 0x41, 0xd1, 0x0f, 0x3a, + 0x3d, 0x87, 0x1a, 0xdc, 0x95, 0xcb, 0x33, 0x0f, 0x78, 0xf4, 0x00, 0x05, 0x04, 0xdb, 0xf8, 0x91, + 0x55, 0x5a, 0xb3, 0xe0, 0x44, 0x9a, 0xd4, 0x7c, 0x1c, 0x9c, 0x08, 0x26, 0x07, 0x27, 0x82, 0xc1, + 0x5f, 0x81, 0x0d, 0x91, 0xc2, 0x86, 0x87, 0x9e, 0x04, 0x8e, 0x87, 0x7a, 0x28, 0x7e, 0xb3, 0xbb, + 0x91, 0xce, 0xf3, 0x03, 0xfe, 0xab, 0x4b, 0xb4, 0xa2, 0x85, 0x22, 0x29, 0xb8, 0xdc, 0x42, 0xa5, + 0xb1, 0xb0, 0x0e, 0x96, 0x9f, 0x22, 0xcf, 0x77, 0x08, 0x56, 0x0b, 0xdc, 0xd6, 0x4b, 0xa3, 0xa1, + 0xb6, 0x1e, 0x82, 0x24, 0xde, 0x88, 0x0a, 0x36, 0xc1, 0x3a, 0x6f, 0x0b, 0x0c, 0x4a, 0x5d, 0xc3, + 0x47, 0x16, 0xc1, 0xb6, 0xaf, 0x82, 0xaa, 0xb2, 0x9d, 0x13, 0xe1, 0xe4, 0xc8, 0x23, 0xea, 0xb6, + 0x05, 0x4a, 0x0e, 0x67, 0x02, 0x75, 0x7b, 0xe1, 0xf3, 0x2f, 0x34, 0x65, 0xeb, 0xf7, 0x0a, 0x80, + 0xe9, 0xed, 0x40, 0x17, 0xac, 0xf5, 0x89, 0x2d, 0x83, 0xc2, 0x9e, 0xe7, 0x7a, 0xda, 0x1b, 0x87, + 0x93, 0x84, 0xc2, 0x90, 0x04, 0x77, 0x6c, 0xc8, 0xdd, 0x39, 0x3d, 0x29, 0xba, 0xb1, 0x0a, 0x56, + 0x64, 0xc7, 0x6f, 0xfd, 0x6b, 0x19, 0xac, 0x25, 0xa4, 0x42, 0x5f, 0x3c, 0xc3, 0xb6, 0x91, 0x8b, + 0x2c, 0x4a, 0xbc, 0xb0, 0x08, 0xbd, 0x3f, 0xd3, 0x1c, 0xde, 0xfd, 0x46, 0x5c, 0xa2, 0x14, 0x95, + 0x47, 0x43, 0xed, 0xb2, 0x2c, 0x4c, 0x72, 0xd3, 0x84, 0x12, 0x78, 0x08, 0xf2, 0xe6, 0xc9, 0x89, + 0x83, 0x59, 0x32, 0x89, 0x0a, 0x73, 0x35, 0x6b, 0x08, 0xd8, 0x0d, 0x69, 0x44, 0xaa, 0x45, 0x1c, + 0x72, 0xaa, 0x45, 0x30, 0x78, 0x0c, 0x8a, 0x94, 0xb8, 0xc8, 0x33, 0xa9, 0x43, 0x70, 0x34, 0x16, + 0x54, 0x32, 0x27, 0x8b, 0x31, 0xd9, 0xf8, 0x62, 0x93, 0x59, 0x75, 0x79, 0x01, 0x09, 0x28, 0x9a, + 0x18, 0x13, 0x1a, 0x8a, 0x5d, 0x9e, 0x36, 0x0a, 0x24, 0x9d, 0xb3, 0x1b, 0x33, 0x09, 0xdf, 0xf0, + 0xb2, 0x22, 0x89, 0x92, 0xcb, 0x8a, 0x04, 0x9e, 0x38, 0x66, 0x0b, 0xbc, 0xe5, 0x99, 0x7d, 0xcc, + 0xee, 0x81, 0x52, 0x54, 0x99, 0x08, 0x3e, 0x24, 0xae, 0x63, 0x0d, 0xf8, 0xd7, 0x95, 0x82, 0xb8, + 0x3c, 0x93, 0x38, 0xf9, 0xf2, 0x4c, 0xe2, 0xe0, 0xc7, 0x60, 0xfc, 0xea, 0x34, 0x91, 0xa5, 0x4b, + 0x3c, 0x4a, 0xdb, 0x59, 0x0e, 0xd5, 0x33, 0xe8, 0x1b, 0x57, 0x43, 0xd7, 0x66, 0x4a, 0xd3, 0x33, + 0xa1, 0xb0, 0x0d, 0x36, 0x2c, 0x93, 0x79, 0x36, 0xbe, 0x17, 0xee, 0x23, 0x51, 0x6d, 0x56, 0x1a, + 0xd7, 0x47, 0x43, 0xed, 0x5a, 0x06, 0x5a, 0xda, 0x4d, 0x16, 0x77, 0xb9, 0x0b, 0xd6, 0x53, 0x99, + 0xfa, 0x4a, 0x66, 0xaa, 0x13, 0x50, 0x4a, 0x46, 0xfd, 0x95, 0x0c, 0x53, 0x7f, 0x53, 0xc0, 0xe6, + 0x61, 0xe0, 0xfa, 0xa6, 0xd7, 0x8e, 0xb2, 0xf0, 0x1e, 0xe9, 0xec, 0x21, 0x6a, 0x3a, 0xae, 0xcf, + 0x84, 0xf1, 0x37, 0xa3, 0x50, 0x27, 0x17, 0xc6, 0x01, 0xb2, 0x30, 0xf1, 0x54, 0xfd, 0x36, 0x58, + 0x7c, 0x94, 0x1c, 0x96, 0x92, 0xdd, 0x95, 0xa0, 0x80, 0x37, 0xc1, 0x12, 0xbb, 0xae, 0x11, 0x0d, + 0x07, 0x25, 0x3e, 0x47, 0x0b, 0x88, 0x3c, 0x47, 0x0b, 0xc8, 0x77, 0x0f, 0x40, 0x51, 0x7a, 0xf2, + 0x82, 0x45, 0xb0, 0x7c, 0xdc, 0xba, 0xdf, 0x3a, 0xf8, 0x59, 0xab, 0x34, 0xc7, 0x16, 0x87, 0xfb, + 0xad, 0xbd, 0x66, 0xeb, 0x27, 0x25, 0x85, 0x2d, 0xf4, 0xe3, 0x56, 0x8b, 0x2d, 0xe6, 0xe1, 0x05, + 0x50, 0x68, 0x1f, 0xdf, 0xb9, 0xb3, 0xbf, 0xbf, 0xb7, 0xbf, 0x57, 0xca, 0x41, 0x00, 0x96, 0x7e, + 0xbc, 0xdb, 0x7c, 0xb0, 0xbf, 0x57, 0x5a, 0x68, 0xfc, 0xf2, 0xd9, 0xf3, 0x8a, 0xf2, 0xd5, 0xf3, + 0x8a, 0xf2, 0x9f, 0xe7, 0x15, 0xe5, 0xb3, 0x17, 0x95, 0xb9, 0xaf, 0x5e, 0x54, 0xe6, 0xfe, 0xfd, + 0xa2, 0x32, 0xf7, 0x8b, 0x3b, 0xd2, 0xf7, 0x57, 0xf1, 0x0a, 0xdd, 0xf7, 0x08, 0x3b, 0x92, 0xe1, + 0xaa, 0x7e, 0x8e, 0x0f, 0xcd, 0x9d, 0x25, 0x7e, 0x25, 0xbe, 0xff, 0xdf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xdd, 0x00, 0x57, 0x24, 0x96, 0x1e, 0x00, 0x00, } func (m *Executor) Marshal() (dAtA []byte, err error) { @@ -1848,6 +1859,11 @@ func (m *JobSchedulingInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.QueueTtlSeconds != 0 { + i = encodeVarintSchedulerobjects(dAtA, i, uint64(m.QueueTtlSeconds)) + i-- + dAtA[i] = 0x50 + } if m.Version != 0 { i = encodeVarintSchedulerobjects(dAtA, i, uint64(m.Version)) i-- @@ -2459,6 +2475,9 @@ func (m *JobSchedulingInfo) Size() (n int) { if m.Version != 0 { n += 1 + sovSchedulerobjects(uint64(m.Version)) } + if m.QueueTtlSeconds != 0 { + n += 1 + sovSchedulerobjects(uint64(m.QueueTtlSeconds)) + } return n } @@ -5580,6 +5599,25 @@ func (m *JobSchedulingInfo) Unmarshal(dAtA []byte) error { break } } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueTtlSeconds", wireType) + } + m.QueueTtlSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerobjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueTtlSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipSchedulerobjects(dAtA[iNdEx:]) diff --git a/internal/scheduler/schedulerobjects/schedulerobjects.proto b/internal/scheduler/schedulerobjects/schedulerobjects.proto index 3bcff305487..91a904d1efd 100644 --- a/internal/scheduler/schedulerobjects/schedulerobjects.proto +++ b/internal/scheduler/schedulerobjects/schedulerobjects.proto @@ -136,6 +136,8 @@ message JobSchedulingInfo { // Kubernetes objects that make up this job and their respective scheduling requirements. repeated ObjectRequirements object_requirements = 5; uint32 version = 9; + // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. + int64 queue_ttl_seconds = 10; } // Message capturing the scheduling requirements of a particular Kubernetes object. @@ -174,4 +176,4 @@ message PulsarSchedulerJobDetails { string JobId = 1; string Queue = 2; string JobSet = 3; -} \ No newline at end of file +} diff --git a/internal/scheduler/scheduling_algo.go b/internal/scheduler/scheduling_algo.go index 2c925d465a5..58be9e48936 100644 --- a/internal/scheduler/scheduling_algo.go +++ b/internal/scheduler/scheduling_algo.go @@ -7,14 +7,15 @@ import ( "github.com/benbjohnson/immutable" "github.com/google/uuid" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/exp/maps" "golang.org/x/exp/slices" + "golang.org/x/time/rate" "k8s.io/apimachinery/pkg/util/clock" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/logging" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/util" @@ -33,7 +34,7 @@ import ( type SchedulingAlgo interface { // Schedule should assign jobs to nodes. // Any jobs that are scheduled should be marked as such in the JobDb using the transaction provided. - Schedule(ctx context.Context, txn *jobdb.Txn, jobDb *jobdb.JobDb) (*SchedulerResult, error) + Schedule(ctx *armadacontext.Context, txn *jobdb.Txn, jobDb *jobdb.JobDb) (*SchedulerResult, error) } // FairSchedulingAlgo is a SchedulingAlgo based on PreemptingQueueScheduler. @@ -42,7 +43,12 @@ type FairSchedulingAlgo struct { executorRepository database.ExecutorRepository queueRepository database.QueueRepository schedulingContextRepository *SchedulingContextRepository - maxSchedulingDuration time.Duration + // Global job scheduling rate-limiter. + limiter *rate.Limiter + // Per-queue job scheduling rate-limiters. + limiterByQueue map[string]*rate.Limiter + // Max amount of time each scheduling round is allowed to take. + maxSchedulingDuration time.Duration // Order in which to schedule executor groups. // Executors are grouped by either id (i.e., individually) or by pool. executorGroupsToSchedule []string @@ -68,6 +74,8 @@ func NewFairSchedulingAlgo( executorRepository: executorRepository, queueRepository: queueRepository, schedulingContextRepository: schedulingContextRepository, + limiter: rate.NewLimiter(rate.Limit(config.MaximumSchedulingRate), config.MaximumSchedulingBurst), + limiterByQueue: make(map[string]*rate.Limiter), maxSchedulingDuration: maxSchedulingDuration, rand: util.NewThreadsafeRand(time.Now().UnixNano()), clock: clock.RealClock{}, @@ -80,24 +88,23 @@ func NewFairSchedulingAlgo( // It maintains state of which executors it has considered already and may take multiple Schedule() calls to consider all executors if scheduling is slow. // Newly leased jobs are updated as such in the jobDb using the transaction provided and are also returned to the caller. func (l *FairSchedulingAlgo) Schedule( - ctx context.Context, + ctx *armadacontext.Context, txn *jobdb.Txn, jobDb *jobdb.JobDb, ) (*SchedulerResult, error) { - log := ctxlogrus.Extract(ctx) - overallSchedulerResult := &SchedulerResult{ NodeIdByJobId: make(map[string]string), SchedulingContexts: make([]*schedulercontext.SchedulingContext, 0, 0), + FailedJobs: make([]interfaces.LegacySchedulerJob, 0), } // Exit immediately if scheduling is disabled. if l.schedulingConfig.DisableScheduling { - log.Info("skipping scheduling - scheduling disabled") + ctx.Info("skipping scheduling - scheduling disabled") return overallSchedulerResult, nil } - ctxWithTimeout, cancel := context.WithTimeout(ctx, l.maxSchedulingDuration) + ctxWithTimeout, cancel := armadacontext.WithTimeout(ctx, l.maxSchedulingDuration) defer cancel() fsctx, err := l.newFairSchedulingAlgoContext(ctx, txn, jobDb) @@ -115,7 +122,7 @@ func (l *FairSchedulingAlgo) Schedule( select { case <-ctxWithTimeout.Done(): // We've reached the scheduling time limit; exit gracefully. - log.Info("ending scheduling round early as we have hit the maximum scheduling duration") + ctx.Info("ending scheduling round early as we have hit the maximum scheduling duration") return overallSchedulerResult, nil default: } @@ -134,7 +141,7 @@ func (l *FairSchedulingAlgo) Schedule( // Assume pool and minimumJobSize are consistent within the group. pool := executorGroup[0].Pool minimumJobSize := executorGroup[0].MinimumJobSize - log.Infof( + ctx.Infof( "scheduling on executor group %s with capacity %s", executorGroupLabel, fsctx.totalCapacityByPool[pool].CompactString(), ) @@ -150,30 +157,34 @@ func (l *FairSchedulingAlgo) Schedule( // add the executorGroupLabel back to l.executorGroupsToSchedule such that we try it again next time, // and exit gracefully. l.executorGroupsToSchedule = append(l.executorGroupsToSchedule, executorGroupLabel) - log.Info("stopped scheduling early as we have hit the maximum scheduling duration") + ctx.Info("stopped scheduling early as we have hit the maximum scheduling duration") break } else if err != nil { return nil, err } if l.schedulingContextRepository != nil { if err := l.schedulingContextRepository.AddSchedulingContext(sctx); err != nil { - logging.WithStacktrace(log, err).Error("failed to add scheduling context") + logging.WithStacktrace(ctx, err).Error("failed to add scheduling context") } } - // Update jobDb. preemptedJobs := PreemptedJobsFromSchedulerResult[*jobdb.Job](schedulerResult) scheduledJobs := ScheduledJobsFromSchedulerResult[*jobdb.Job](schedulerResult) + failedJobs := FailedJobsFromSchedulerResult[*jobdb.Job](schedulerResult) if err := jobDb.Upsert(txn, preemptedJobs); err != nil { return nil, err } if err := jobDb.Upsert(txn, scheduledJobs); err != nil { return nil, err } + if err := jobDb.Upsert(txn, failedJobs); err != nil { + return nil, err + } // Aggregate changes across executors. overallSchedulerResult.PreemptedJobs = append(overallSchedulerResult.PreemptedJobs, schedulerResult.PreemptedJobs...) overallSchedulerResult.ScheduledJobs = append(overallSchedulerResult.ScheduledJobs, schedulerResult.ScheduledJobs...) + overallSchedulerResult.FailedJobs = append(overallSchedulerResult.FailedJobs, schedulerResult.FailedJobs...) overallSchedulerResult.SchedulingContexts = append(overallSchedulerResult.SchedulingContexts, schedulerResult.SchedulingContexts...) maps.Copy(overallSchedulerResult.NodeIdByJobId, schedulerResult.NodeIdByJobId) @@ -231,7 +242,7 @@ type fairSchedulingAlgoContext struct { jobDb *jobdb.JobDb } -func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx context.Context, txn *jobdb.Txn, jobDb *jobdb.JobDb) (*fairSchedulingAlgoContext, error) { +func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx *armadacontext.Context, txn *jobdb.Txn, jobDb *jobdb.JobDb) (*fairSchedulingAlgoContext, error) { executors, err := l.executorRepository.GetExecutors(ctx) if err != nil { return nil, err @@ -283,7 +294,7 @@ func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx context.Context, t } jobsByExecutorId[executorId] = append(jobsByExecutorId[executorId], job) nodeIdByJobId[job.Id()] = nodeId - gangId, _, isGangJob, err := GangIdAndCardinalityFromLegacySchedulerJob(job) + gangId, _, _, isGangJob, err := GangIdAndCardinalityFromLegacySchedulerJob(job) if err != nil { return nil, err } @@ -322,7 +333,7 @@ func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx context.Context, t // scheduleOnExecutors schedules jobs on a specified set of executors. func (l *FairSchedulingAlgo) scheduleOnExecutors( - ctx context.Context, + ctx *armadacontext.Context, fsctx *fairSchedulingAlgoContext, pool string, minimumJobSize schedulerobjects.ResourceList, @@ -372,6 +383,7 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( l.schedulingConfig.Preemption.PriorityClasses, l.schedulingConfig.Preemption.DefaultPriorityClass, fairnessCostProvider, + l.limiter, totalResources, ) for queue, priorityFactor := range fsctx.priorityFactorByQueue { @@ -387,7 +399,16 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( if priorityFactor > 0 { weight = 1 / priorityFactor } - if err := sctx.AddQueueSchedulingContext(queue, weight, allocatedByPriorityClass); err != nil { + queueLimiter, ok := l.limiterByQueue[queue] + if !ok { + // Create per-queue limiters lazily. + queueLimiter = rate.NewLimiter( + rate.Limit(l.schedulingConfig.MaximumPerQueueSchedulingRate), + l.schedulingConfig.MaximumPerQueueSchedulingBurst, + ) + l.limiterByQueue[queue] = queueLimiter + } + if err := sctx.AddQueueSchedulingContext(queue, weight, allocatedByPriorityClass, queueLimiter); err != nil { return nil, nil, err } } @@ -443,6 +464,10 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( result.ScheduledJobs[i] = jobDbJob.WithQueuedVersion(jobDbJob.QueuedVersion()+1).WithQueued(false).WithNewRun(node.Executor, node.Id, node.Name) } } + for i, job := range result.FailedJobs { + jobDbJob := job.(*jobdb.Job) + result.FailedJobs[i] = jobDbJob.WithQueued(false).WithFailed(true) + } return result, sctx, nil } @@ -538,17 +563,18 @@ func (l *FairSchedulingAlgo) filterStaleExecutors(executors []*schedulerobjects. // // TODO: Let's also check that jobs are on the right nodes. func (l *FairSchedulingAlgo) filterLaggingExecutors( - ctx context.Context, + ctx *armadacontext.Context, executors []*schedulerobjects.Executor, leasedJobsByExecutor map[string][]*jobdb.Job, ) []*schedulerobjects.Executor { - log := ctxlogrus.Extract(ctx) activeExecutors := make([]*schedulerobjects.Executor, 0, len(executors)) for _, executor := range executors { leasedJobs := leasedJobsByExecutor[executor.Id] executorRuns, err := executor.AllRuns() if err != nil { - logging.WithStacktrace(log, err).Errorf("failed to retrieve runs for executor %s; will not be considered for scheduling", executor.Id) + logging. + WithStacktrace(ctx, err). + Errorf("failed to retrieve runs for executor %s; will not be considered for scheduling", executor.Id) continue } executorRunIds := make(map[uuid.UUID]bool, len(executorRuns)) @@ -567,7 +593,7 @@ func (l *FairSchedulingAlgo) filterLaggingExecutors( if numUnacknowledgedJobs <= l.schedulingConfig.MaxUnacknowledgedJobsPerExecutor { activeExecutors = append(activeExecutors, executor) } else { - log.Warnf( + ctx.Warnf( "%d unacknowledged jobs on executor %s exceeds limit of %d; executor will not be considered for scheduling", numUnacknowledgedJobs, executor.Id, l.schedulingConfig.MaxUnacknowledgedJobsPerExecutor, ) diff --git a/internal/scheduler/scheduling_algo_test.go b/internal/scheduler/scheduling_algo_test.go index 6cb6a276f6a..9ebb813fae8 100644 --- a/internal/scheduler/scheduling_algo_test.go +++ b/internal/scheduler/scheduling_algo_test.go @@ -1,7 +1,6 @@ package scheduler import ( - "context" "fmt" "math" "testing" @@ -14,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/scheduler/database" "github.com/armadaproject/armada/internal/scheduler/jobdb" @@ -46,6 +46,9 @@ func TestSchedule(t *testing.T) { // Indices of queued jobs expected to be scheduled. expectedScheduledIndices []int + + // Count of jobs expected to fail + expectedFailedJobCount int }{ "scheduling": { schedulingConfig: testfixtures.TestSchedulingConfig(), @@ -249,13 +252,21 @@ func TestSchedule(t *testing.T) { }, expectedScheduledIndices: []int{0}, }, - "gang scheduling": { + "gang scheduling successful": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, queues: []*database.Queue{{Name: "A", Weight: 100}}, queuedJobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 2)), expectedScheduledIndices: []int{0, 1}, }, + "gang scheduling successful with some jobs failing to schedule above min cardinality": { + schedulingConfig: testfixtures.TestSchedulingConfig(), + executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, + queues: []*database.Queue{{Name: "A", Weight: 100}}, + queuedJobs: testfixtures.WithGangAnnotationsAndMinCardinalityJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 10), 2), + expectedScheduledIndices: []int{0, 1}, + expectedFailedJobCount: 8, + }, "not scheduling a gang that does not fit on any executor": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{ @@ -330,9 +341,8 @@ func TestSchedule(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx := testfixtures.ContextWithDefaultLogger(context.Background()) timeout := 5 * time.Second - ctx, cancel := context.WithTimeout(ctx, timeout) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), timeout) defer cancel() ctrl := gomock.NewController(t) @@ -434,6 +444,10 @@ func TestSchedule(t *testing.T) { assert.Equal(t, tc.expectedScheduledIndices, actualScheduledIndices) } + // Check that we failed the correct number of excess jobs when a gang schedules >= minimum cardinality + failedJobs := FailedJobsFromSchedulerResult[*jobdb.Job](schedulerResult) + assert.Equal(t, tc.expectedFailedJobCount, len(failedJobs)) + // Check that preempted jobs are marked as such consistently. for _, job := range preemptedJobs { dbJob := jobDb.GetById(txn, job.Id()) @@ -452,6 +466,13 @@ func TestSchedule(t *testing.T) { assert.NotEmpty(t, dbRun.NodeName()) } + // Check that failed jobs are marked as such consistently. + for _, job := range failedJobs { + dbJob := jobDb.GetById(txn, job.Id()) + assert.True(t, dbJob.Failed()) + assert.False(t, dbJob.Queued()) + } + // Check that jobDb was updated correctly. // TODO: Check that there are no unexpected jobs in the jobDb. for _, job := range preemptedJobs { @@ -462,6 +483,10 @@ func TestSchedule(t *testing.T) { dbJob := jobDb.GetById(txn, job.Id()) assert.Equal(t, job, dbJob) } + for _, job := range failedJobs { + dbJob := jobDb.GetById(txn, job.Id()) + assert.Equal(t, job, dbJob) + } }) } } diff --git a/internal/scheduler/simulator/simulator.go b/internal/scheduler/simulator/simulator.go index 97666a7a9fc..1c282e8c303 100644 --- a/internal/scheduler/simulator/simulator.go +++ b/internal/scheduler/simulator/simulator.go @@ -3,27 +3,26 @@ package simulator import ( "bytes" "container/heap" - "context" - fmt "fmt" + "fmt" "os" "path/filepath" "strings" "time" "github.com/caarlos0/log" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "github.com/mattn/go-zglob" "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/renstrom/shortuuid" - "github.com/sirupsen/logrus" "github.com/spf13/viper" "golang.org/x/exp/maps" "golang.org/x/exp/slices" + "golang.org/x/time/rate" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/yaml" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" commonconfig "github.com/armadaproject/armada/internal/common/config" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/util" @@ -33,7 +32,7 @@ import ( "github.com/armadaproject/armada/internal/scheduler/fairness" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/nodedb" - schedulerobjects "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduleringester" "github.com/armadaproject/armada/pkg/armadaevents" ) @@ -69,6 +68,11 @@ type Simulator struct { eventLog EventLog // Simulated events are emitted on this channel in order. c chan *armadaevents.EventSequence + + // Global job scheduling rate-limiter. + limiter *rate.Limiter + // Per-queue job scheduling rate-limiters. + limiterByQueue map[string]*rate.Limiter } func NewSimulator(testCase *TestCase, schedulingConfig configuration.SchedulingConfig) (*Simulator, error) { @@ -143,6 +147,11 @@ func NewSimulator(testCase *TestCase, schedulingConfig configuration.SchedulingC allocationByPoolAndQueueAndPriorityClass: make(map[string]map[string]schedulerobjects.QuantityByTAndResourceType[string]), totalResourcesByPool: totalResourcesByPool, c: make(chan *armadaevents.EventSequence), + limiter: rate.NewLimiter( + rate.Limit(schedulingConfig.MaximumSchedulingRate), + schedulingConfig.MaximumSchedulingBurst, + ), + limiterByQueue: make(map[string]*rate.Limiter), } // Mark all jobTemplates as active. @@ -415,13 +424,24 @@ func (s *Simulator) handleScheduleEvent() error { s.schedulingConfig.Preemption.PriorityClasses, s.schedulingConfig.Preemption.DefaultPriorityClass, fairnessCostProvider, + s.limiter, totalResources, ) + sctx.Started = s.time for _, queue := range s.testCase.Queues { + limiter, ok := s.limiterByQueue[queue.Name] + if !ok { + limiter = rate.NewLimiter( + rate.Limit(s.schedulingConfig.MaximumPerQueueSchedulingRate), + s.schedulingConfig.MaximumPerQueueSchedulingBurst, + ) + s.limiterByQueue[queue.Name] = limiter + } err := sctx.AddQueueSchedulingContext( queue.Name, queue.Weight, s.allocationByPoolAndQueueAndPriorityClass[pool.Name][queue.Name], + limiter, ) if err != nil { return err @@ -450,7 +470,7 @@ func (s *Simulator) handleScheduleEvent() error { if s.schedulingConfig.EnableNewPreemptionStrategy { sch.EnableNewPreemptionStrategy() } - ctx := ctxlogrus.ToContext(context.Background(), logrus.NewEntry(logrus.New())) + ctx := armadacontext.Background() result, err := sch.Schedule(ctx) if err != nil { return err @@ -514,6 +534,10 @@ func (s *Simulator) handleScheduleEvent() error { if err != nil { return err } + eventSequences, err = scheduler.AppendEventSequencesFromUnschedulableJobs(eventSequences, result.FailedJobs, s.time) + if err != nil { + return err + } } } txn.Commit() @@ -753,7 +777,7 @@ func (s *Simulator) handleJobRunPreempted(txn *jobdb.Txn, e *armadaevents.JobRun return true, nil } -// func (a *App) TestPattern(ctx context.Context, pattern string) (*TestSuiteReport, error) { +// func (a *App) TestPattern(ctx *context.Context, pattern string) (*TestSuiteReport, error) { // testSpecs, err := TestSpecsFromPattern(pattern) // if err != nil { // return nil, err diff --git a/internal/scheduler/submitcheck.go b/internal/scheduler/submitcheck.go index 6221e2611e9..4fe71c9c59f 100644 --- a/internal/scheduler/submitcheck.go +++ b/internal/scheduler/submitcheck.go @@ -1,7 +1,6 @@ package scheduler import ( - "context" "fmt" "strings" "sync" @@ -9,11 +8,12 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" "golang.org/x/exp/maps" "k8s.io/apimachinery/pkg/util/clock" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/logging" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/types" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" @@ -84,7 +84,7 @@ func NewSubmitChecker( } } -func (srv *SubmitChecker) Run(ctx context.Context) error { +func (srv *SubmitChecker) Run(ctx *armadacontext.Context) error { srv.updateExecutors(ctx) ticker := time.NewTicker(srv.ExecutorUpdateFrequency) @@ -98,10 +98,12 @@ func (srv *SubmitChecker) Run(ctx context.Context) error { } } -func (srv *SubmitChecker) updateExecutors(ctx context.Context) { +func (srv *SubmitChecker) updateExecutors(ctx *armadacontext.Context) { executors, err := srv.executorRepository.GetExecutors(ctx) if err != nil { - log.WithError(err).Error("Error fetching executors") + logging. + WithStacktrace(ctx, err). + Error("Error fetching executors") return } for _, executor := range executors { @@ -114,10 +116,14 @@ func (srv *SubmitChecker) updateExecutors(ctx context.Context) { } srv.mu.Unlock() if err != nil { - log.WithError(err).Errorf("Error constructing node db for executor %s", executor.Id) + logging. + WithStacktrace(ctx, err). + Errorf("Error constructing node db for executor %s", executor.Id) } } else { - log.WithError(err).Warnf("Error clearing nodedb for executor %s", executor.Id) + logging. + WithStacktrace(ctx, err). + Warnf("Error clearing nodedb for executor %s", executor.Id) } } @@ -128,17 +134,21 @@ func (srv *SubmitChecker) updateExecutors(ctx context.Context) { } func (srv *SubmitChecker) CheckApiJobs(jobs []*api.Job) (bool, string) { - return srv.check(schedulercontext.JobSchedulingContextsFromJobs(srv.priorityClasses, jobs)) + return srv.check(schedulercontext.JobSchedulingContextsFromJobs(srv.priorityClasses, jobs, GangIdAndCardinalityFromAnnotations)) } func (srv *SubmitChecker) CheckJobDbJobs(jobs []*jobdb.Job) (bool, string) { - return srv.check(schedulercontext.JobSchedulingContextsFromJobs(srv.priorityClasses, jobs)) + return srv.check(schedulercontext.JobSchedulingContextsFromJobs(srv.priorityClasses, jobs, GangIdAndCardinalityFromAnnotations)) } func (srv *SubmitChecker) check(jctxs []*schedulercontext.JobSchedulingContext) (bool, string) { // First, check if all jobs can be scheduled individually. for i, jctx := range jctxs { + // Override min cardinality to enable individual job scheduling checks, but reset after + originalGangMinCardinality := jctx.GangMinCardinality + jctx.GangMinCardinality = 1 schedulingResult := srv.getIndividualSchedulingResult(jctx) + jctx.GangMinCardinality = originalGangMinCardinality if !schedulingResult.isSchedulable { return schedulingResult.isSchedulable, fmt.Sprintf("%d-th job unschedulable:\n%s", i, schedulingResult.reason) } @@ -241,7 +251,7 @@ func (srv *SubmitChecker) getSchedulingResult(jctxs []*schedulercontext.JobSched sb.WriteString("\n") } else { sb.WriteString(":") - sb.WriteString(fmt.Sprintf(" %d out of %d pods schedulable\n", numSuccessfullyScheduled, len(jctxs))) + sb.WriteString(fmt.Sprintf(" %d out of %d pods schedulable (minCardinality %d)\n", numSuccessfullyScheduled, len(jctxs), jctxs[0].GangMinCardinality)) } } return schedulingResult{isSchedulable: isSchedulable, reason: sb.String()} diff --git a/internal/scheduler/submitcheck_test.go b/internal/scheduler/submitcheck_test.go index a95f3d9abbf..8e418db09a2 100644 --- a/internal/scheduler/submitcheck_test.go +++ b/internal/scheduler/submitcheck_test.go @@ -1,7 +1,7 @@ package scheduler import ( - "context" + "fmt" "testing" "time" @@ -14,6 +14,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/scheduler/jobdb" schedulermocks "github.com/armadaproject/armada/internal/scheduler/mocks" @@ -72,7 +73,7 @@ func TestSubmitChecker_CheckJobDbJobs(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() ctrl := gomock.NewController(t) @@ -170,7 +171,7 @@ func TestSubmitChecker_TestCheckApiJobs(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() ctrl := gomock.NewController(t) @@ -218,7 +219,11 @@ func testNJobGang(n int) []*api.Job { gang := make([]*api.Job, n) for i := 0; i < n; i++ { job := test1CoreCpuJob() - job.Annotations = map[string]string{configuration.GangIdAnnotation: gangId} + job.Annotations = map[string]string{ + configuration.GangIdAnnotation: gangId, + configuration.GangCardinalityAnnotation: fmt.Sprintf("%d", n), + configuration.GangMinimumCardinalityAnnotation: fmt.Sprintf("%d", n), + } gang[i] = job } return gang diff --git a/internal/scheduler/testfixtures/testfixtures.go b/internal/scheduler/testfixtures/testfixtures.go index 7c6e01f39c4..a4e45d8a14a 100644 --- a/internal/scheduler/testfixtures/testfixtures.go +++ b/internal/scheduler/testfixtures/testfixtures.go @@ -2,16 +2,13 @@ package testfixtures // This file contains test fixtures to be used throughout the tests for this package. import ( - "context" "fmt" "math" "sync/atomic" "time" "github.com/google/uuid" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "github.com/oklog/ulid" - "github.com/sirupsen/logrus" "golang.org/x/exp/maps" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -82,10 +79,6 @@ func Repeat[T any](v T, n int) []T { return rv } -func ContextWithDefaultLogger(ctx context.Context) context.Context { - return ctxlogrus.ToContext(ctx, logrus.NewEntry(logrus.New())) -} - func TestSchedulingConfig() configuration.SchedulingConfig { return configuration.SchedulingConfig{ ResourceScarcity: map[string]float64{"cpu": 1}, @@ -95,12 +88,16 @@ func TestSchedulingConfig() configuration.SchedulingConfig { NodeEvictionProbability: 1.0, NodeOversubscriptionEvictionProbability: 1.0, }, - IndexedResources: TestResources, - IndexedNodeLabels: TestIndexedNodeLabels, + MaximumSchedulingRate: math.Inf(1), + MaximumSchedulingBurst: math.MaxInt, + MaximumPerQueueSchedulingRate: math.Inf(1), + MaximumPerQueueSchedulingBurst: math.MaxInt, + IndexedResources: TestResources, + IndexedNodeLabels: TestIndexedNodeLabels, DominantResourceFairnessResourcesToConsider: TestResourceNames, - ExecutorTimeout: 15 * time.Minute, - MaxUnacknowledgedJobsPerExecutor: math.MaxInt, - EnableNewPreemptionStrategy: true, + ExecutorTimeout: 15 * time.Minute, + MaxUnacknowledgedJobsPerExecutor: math.MaxInt, + EnableNewPreemptionStrategy: true, } } @@ -166,18 +163,19 @@ func WithIndexedResourcesConfig(indexResources []configuration.IndexedResource, return config } -func WithMaxJobsToScheduleConfig(n uint, config configuration.SchedulingConfig) configuration.SchedulingConfig { - config.MaximumJobsToSchedule = n +func WithGlobalSchedulingRateLimiterConfig(maximumSchedulingRate float64, maximumSchedulingBurst int, config configuration.SchedulingConfig) configuration.SchedulingConfig { + config.MaximumSchedulingRate = maximumSchedulingRate + config.MaximumSchedulingBurst = maximumSchedulingBurst return config } -func WithMaxGangsToScheduleConfig(n uint, config configuration.SchedulingConfig) configuration.SchedulingConfig { - config.MaximumGangsToSchedule = n +func WithPerQueueSchedulingLimiterConfig(maximumPerQueueSchedulingRate float64, maximumPerQueueSchedulingBurst int, config configuration.SchedulingConfig) configuration.SchedulingConfig { + config.MaximumPerQueueSchedulingRate = maximumPerQueueSchedulingRate + config.MaximumPerQueueSchedulingBurst = maximumPerQueueSchedulingBurst return config } func WithMaxLookbackPerQueueConfig(n uint, config configuration.SchedulingConfig) configuration.SchedulingConfig { - // For legacy reasons, it's called QueueLeaseBatchSize in config. config.MaxQueueLookback = n return config } @@ -323,7 +321,17 @@ func WithGangAnnotationsJobs(jobs []*jobdb.Job) []*jobdb.Job { gangId := uuid.NewString() gangCardinality := fmt.Sprintf("%d", len(jobs)) return WithAnnotationsJobs( - map[string]string{configuration.GangIdAnnotation: gangId, configuration.GangCardinalityAnnotation: gangCardinality}, + map[string]string{configuration.GangIdAnnotation: gangId, configuration.GangCardinalityAnnotation: gangCardinality, configuration.GangMinimumCardinalityAnnotation: gangCardinality}, + jobs, + ) +} + +func WithGangAnnotationsAndMinCardinalityJobs(jobs []*jobdb.Job, minimumCardinality int) []*jobdb.Job { + gangId := uuid.NewString() + gangCardinality := fmt.Sprintf("%d", len(jobs)) + gangMinCardinality := fmt.Sprintf("%d", minimumCardinality) + return WithAnnotationsJobs( + map[string]string{configuration.GangIdAnnotation: gangId, configuration.GangCardinalityAnnotation: gangCardinality, configuration.GangMinimumCardinalityAnnotation: gangMinCardinality}, jobs, ) } diff --git a/internal/scheduleringester/instructions.go b/internal/scheduleringester/instructions.go index 429ab2d9112..3a9d6d780e2 100644 --- a/internal/scheduleringester/instructions.go +++ b/internal/scheduleringester/instructions.go @@ -1,7 +1,6 @@ package scheduleringester import ( - "context" "time" "github.com/gogo/protobuf/proto" @@ -10,6 +9,7 @@ import ( "golang.org/x/exp/maps" "golang.org/x/exp/slices" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/ingest" "github.com/armadaproject/armada/internal/common/ingest/metrics" @@ -46,7 +46,7 @@ func NewInstructionConverter( } } -func (c *InstructionConverter) Convert(_ context.Context, sequencesWithIds *ingest.EventSequencesWithIds) *DbOperationsWithMessageIds { +func (c *InstructionConverter) Convert(_ *armadacontext.Context, sequencesWithIds *ingest.EventSequencesWithIds) *DbOperationsWithMessageIds { operations := make([]DbOperation, 0) for _, es := range sequencesWithIds.EventSequences { for _, op := range c.dbOperationsFromEventSequence(es) { @@ -371,6 +371,7 @@ func SchedulingInfoFromSubmitJob(submitJob *armadaevents.SubmitJob, submitTime t SubmitTime: submitTime, Priority: submitJob.Priority, Version: 0, + QueueTtlSeconds: submitJob.QueueTtlSeconds, } // Scheduling requirements specific to the objects that make up this job. diff --git a/internal/scheduleringester/schedulerdb.go b/internal/scheduleringester/schedulerdb.go index e1ce855504b..058f0f4778b 100644 --- a/internal/scheduleringester/schedulerdb.go +++ b/internal/scheduleringester/schedulerdb.go @@ -1,7 +1,6 @@ package scheduleringester import ( - "context" "time" "github.com/google/uuid" @@ -10,6 +9,7 @@ import ( "github.com/pkg/errors" "golang.org/x/exp/maps" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/ingest" "github.com/armadaproject/armada/internal/common/ingest/metrics" @@ -45,14 +45,14 @@ func NewSchedulerDb( // Store persists all operations in the database. // This function retires until it either succeeds or encounters a terminal error. // This function locks the postgres table to avoid write conflicts; see acquireLock() for details. -func (s *SchedulerDb) Store(ctx context.Context, instructions *DbOperationsWithMessageIds) error { +func (s *SchedulerDb) Store(ctx *armadacontext.Context, instructions *DbOperationsWithMessageIds) error { return ingest.WithRetry(func() (bool, error) { err := pgx.BeginTxFunc(ctx, s.db, pgx.TxOptions{ IsoLevel: pgx.ReadCommitted, AccessMode: pgx.ReadWrite, DeferrableMode: pgx.Deferrable, }, func(tx pgx.Tx) error { - lockCtx, cancel := context.WithTimeout(ctx, s.lockTimeout) + lockCtx, cancel := armadacontext.WithTimeout(ctx, s.lockTimeout) defer cancel() // The lock is released automatically on transaction rollback/commit. if err := s.acquireLock(lockCtx, tx); err != nil { @@ -78,7 +78,7 @@ func (s *SchedulerDb) Store(ctx context.Context, instructions *DbOperationsWithM // rows with sequence numbers smaller than those already written. // // The scheduler relies on these sequence numbers to only fetch new or updated rows in each update cycle. -func (s *SchedulerDb) acquireLock(ctx context.Context, tx pgx.Tx) error { +func (s *SchedulerDb) acquireLock(ctx *armadacontext.Context, tx pgx.Tx) error { const lockId = 8741339439634283896 if _, err := tx.Exec(ctx, "SELECT pg_advisory_xact_lock($1)", lockId); err != nil { return errors.Wrapf(err, "could not obtain lock") @@ -86,7 +86,7 @@ func (s *SchedulerDb) acquireLock(ctx context.Context, tx pgx.Tx) error { return nil } -func (s *SchedulerDb) WriteDbOp(ctx context.Context, tx pgx.Tx, op DbOperation) error { +func (s *SchedulerDb) WriteDbOp(ctx *armadacontext.Context, tx pgx.Tx, op DbOperation) error { queries := schedulerdb.New(tx) switch o := op.(type) { case InsertJobs: @@ -274,7 +274,7 @@ func (s *SchedulerDb) WriteDbOp(ctx context.Context, tx pgx.Tx, op DbOperation) return nil } -func execBatch(ctx context.Context, tx pgx.Tx, batch *pgx.Batch) error { +func execBatch(ctx *armadacontext.Context, tx pgx.Tx, batch *pgx.Batch) error { result := tx.SendBatch(ctx, batch) for i := 0; i < batch.Len(); i++ { _, err := result.Exec() diff --git a/internal/scheduleringester/schedulerdb_test.go b/internal/scheduleringester/schedulerdb_test.go index 8317e421aff..873885c369e 100644 --- a/internal/scheduleringester/schedulerdb_test.go +++ b/internal/scheduleringester/schedulerdb_test.go @@ -1,7 +1,6 @@ package scheduleringester import ( - "context" "testing" "time" @@ -14,6 +13,7 @@ import ( "golang.org/x/exp/constraints" "golang.org/x/exp/maps" + "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/ingest/metrics" "github.com/armadaproject/armada/internal/common/util" schedulerdb "github.com/armadaproject/armada/internal/scheduler/database" @@ -312,7 +312,7 @@ func addDefaultValues(op DbOperation) DbOperation { } func assertOpSuccess(t *testing.T, schedulerDb *SchedulerDb, serials map[string]int64, op DbOperation) error { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 10*time.Second) defer cancel() // Apply the op to the database. @@ -329,7 +329,7 @@ func assertOpSuccess(t *testing.T, schedulerDb *SchedulerDb, serials map[string] // Read back the state from the db to compare. queries := schedulerdb.New(schedulerDb.db) - selectNewJobs := func(ctx context.Context, serial int64) ([]schedulerdb.Job, error) { + selectNewJobs := func(ctx *armadacontext.Context, serial int64) ([]schedulerdb.Job, error) { return queries.SelectNewJobs(ctx, schedulerdb.SelectNewJobsParams{Serial: serial, Limit: 1000}) } switch expected := op.(type) { @@ -645,7 +645,7 @@ func TestStore(t *testing.T) { runId: &JobRunDetails{queue: testQueueName, dbRun: &schedulerdb.Run{JobID: jobId, RunID: runId}}, }, } - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() err := schedulerdb.WithTestDb(func(q *schedulerdb.Queries, db *pgxpool.Pool) error { schedulerDb := NewSchedulerDb(db, metrics.NewMetrics("test"), time.Second, time.Second, 10*time.Second) diff --git a/internal/testsuite/app.go b/internal/testsuite/app.go index 218c7269288..0eb31036370 100644 --- a/internal/testsuite/app.go +++ b/internal/testsuite/app.go @@ -17,6 +17,8 @@ import ( "github.com/hashicorp/go-multierror" "github.com/mattn/go-zglob" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/push" "github.com/renstrom/shortuuid" "golang.org/x/sync/errgroup" apimachineryYaml "k8s.io/apimachinery/pkg/util/yaml" @@ -29,6 +31,8 @@ import ( "github.com/armadaproject/armada/pkg/client" ) +const metricsPrefix = "armada_testsuite_" + type App struct { // Parameters passed to the CLI by the user. Params *Params @@ -45,7 +49,13 @@ type App struct { // and that they can be provided either dynamically on a command line, or // statically in a config file that's reused between command runs. type Params struct { + // Armada connection details. ApiConnectionDetails *client.ApiConnectionDetails + // If non-empty, push metrics containing test results to a Prometheus push gateway with this url. + PrometheusPushGatewayUrl string + // Exported metrics are annotated with job=PrometheusPushGatewayJobName. + // Must be non-empty. + PrometheusPushGatewayJobName string } // New instantiates an App with default parameters, including standard output @@ -107,7 +117,7 @@ func TestSpecFromFilePath(filePath string) (*api.TestSpec, error) { return nil, err } - // Randomise jobSetName for each test to ensure we're only getting events for this run. + // Randomise job set for each test to ensure we're only getting events for this run. fileName := filepath.Base(filePath) fileName = strings.TrimSuffix(fileName, filepath.Ext(fileName)) testSpec.JobSetId = fileName + "-" + shortuuid.New() @@ -126,6 +136,18 @@ type TestSuiteReport struct { TestCaseReports []*TestCaseReport } +func (tsr *TestSuiteReport) Describe(c chan<- *prometheus.Desc) { + for _, tcr := range tsr.TestCaseReports { + tcr.Describe(c) + } +} + +func (tsr *TestSuiteReport) Collect(c chan<- prometheus.Metric) { + for _, tcr := range tsr.TestCaseReports { + tcr.Collect(c) + } +} + type TestCaseReport struct { Out *bytes.Buffer Start time.Time @@ -133,6 +155,108 @@ type TestCaseReport struct { FailureReason string BenchmarkReport *eventbenchmark.TestCaseBenchmarkReport TestSpec *api.TestSpec + + // Prometheus metric descriptions. + // Test start time in seconds since the epoch. + startTimePrometheusDesc *prometheus.Desc + // Test finish time in seconds since the epoch. + finishTimePrometheusDesc *prometheus.Desc + // Outputs 1 on test timeout. + testTimeoutPrometheusDesc *prometheus.Desc + // Outputs 1 on test failure, not including timeouts. + testFailurePrometheusDesc *prometheus.Desc +} + +func NewTestCaseReport(testSpec *api.TestSpec) *TestCaseReport { + rv := &TestCaseReport{ + Start: time.Now(), + TestSpec: testSpec, + } + rv.initialiseMetrics() + return rv +} + +func (r *TestCaseReport) initialiseMetrics() { + r.startTimePrometheusDesc = prometheus.NewDesc( + metricsPrefix+"test_start_time", + "The time at which a test started.", + []string{"testcase", "environment", "target"}, + nil, + ) + r.finishTimePrometheusDesc = prometheus.NewDesc( + metricsPrefix+"test_finish_time", + "The time at which a test finished.", + []string{"testcase", "environment", "target"}, + nil, + ) + r.testTimeoutPrometheusDesc = prometheus.NewDesc( + metricsPrefix+"test_timeout", + "Outputs 1 on test timeout and 0 otherwise.", + []string{"testcase", "environment", "target"}, + nil, + ) + r.testFailurePrometheusDesc = prometheus.NewDesc( + metricsPrefix+"test_failure", + "Outputs 1 on test failure, not including timeout, and 0 otherwise.", + []string{"testcase", "environment", "target"}, + nil, + ) +} + +func (r *TestCaseReport) Describe(c chan<- *prometheus.Desc) { + c <- r.startTimePrometheusDesc + c <- r.finishTimePrometheusDesc + c <- r.testTimeoutPrometheusDesc + c <- r.testFailurePrometheusDesc +} + +func (r *TestCaseReport) Collect(c chan<- prometheus.Metric) { + c <- prometheus.MustNewConstMetric( + r.startTimePrometheusDesc, + prometheus.CounterValue, + float64(r.Start.Unix()), + r.TestSpec.Name, + r.TestSpec.Environment, + r.TestSpec.Target, + ) + c <- prometheus.MustNewConstMetric( + r.finishTimePrometheusDesc, + prometheus.CounterValue, + float64(r.Finish.Unix()), + r.TestSpec.Name, + r.TestSpec.Environment, + r.TestSpec.Target, + ) + + // Test failures always contain either "unexpected event for job" or "error asserting failure reason". + // TODO(albin): Improve this. + testFailure := 0.0 + if strings.Contains(r.FailureReason, "unexpected event for job") || strings.Contains(r.FailureReason, "error asserting failure reason") { + testFailure = 1.0 + } + c <- prometheus.MustNewConstMetric( + r.testFailurePrometheusDesc, + prometheus.GaugeValue, + testFailure, + r.TestSpec.Name, + r.TestSpec.Environment, + r.TestSpec.Target, + ) + + // We assume that any other failures are due to timeout. + // TODO(albin): Improve this. + testTimeout := 0.0 + if r.FailureReason != "" && testFailure == 0 { + testTimeout = 1.0 + } + c <- prometheus.MustNewConstMetric( + r.testTimeoutPrometheusDesc, + prometheus.GaugeValue, + testTimeout, + r.TestSpec.Name, + r.TestSpec.Environment, + r.TestSpec.Target, + ) } func (report *TestSuiteReport) NumSuccesses() int { @@ -203,9 +327,26 @@ func (a *App) RunTests(ctx context.Context, testSpecs []*api.TestSpec) (*TestSui return nil, err } + // Optionally push metrics. + if a.Params.PrometheusPushGatewayUrl != "" { + if err := pushTestSuiteReportMetrics(rv, a.Params.PrometheusPushGatewayUrl, a.Params.PrometheusPushGatewayJobName); err != nil { + return nil, err + } + } return rv, nil } +func pushTestSuiteReportMetrics(tsr *TestSuiteReport, url, job string) error { + pusher := push.New(url, job) + pusher.Collector(tsr) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if err := pusher.PushContext(ctx); err != nil { + return errors.WithStack(err) + } + return nil +} + // UnmarshalTestCase unmarshalls bytes into a TestSpec. func UnmarshalTestCase(yamlBytes []byte, testSpec *api.TestSpec) error { var result *multierror.Error diff --git a/internal/testsuite/testrunner.go b/internal/testsuite/testrunner.go index 2feed1285c0..3108cc9888d 100644 --- a/internal/testsuite/testrunner.go +++ b/internal/testsuite/testrunner.go @@ -49,11 +49,8 @@ func (report *TestCaseReport) JunitTestCase() junit.Testcase { } func (srv *TestRunner) Run(ctx context.Context) (err error) { - report := &TestCaseReport{ - Out: &bytes.Buffer{}, - Start: time.Now(), - TestSpec: srv.testSpec, - } + report := NewTestCaseReport(srv.testSpec) + report.Out = &bytes.Buffer{} out := io.MultiWriter(srv.Out, report.Out) fmt.Fprintf(out, "test case started %s\n", srv.testSpec.ShortString()) diff --git a/magefiles/ci.go b/magefiles/ci.go index d77529fde7f..c18ec0d47f7 100644 --- a/magefiles/ci.go +++ b/magefiles/ci.go @@ -25,7 +25,6 @@ func createQueue() error { // Build images, spin up a test environment, and run the integration tests against it. func TestSuite() error { - mg.Deps(createQueue) mg.Deps(CheckForArmadaRunning) // Only set these if they have not already been set @@ -52,6 +51,7 @@ func TestSuite() error { // Checks if Armada is ready to accept jobs. func CheckForArmadaRunning() error { + time.Sleep(30 * time.Second) mg.Deps(createQueue) // Set high to take compile time into account diff --git a/magefiles/linting.go b/magefiles/linting.go index bc7094cebff..e301850e93c 100644 --- a/magefiles/linting.go +++ b/magefiles/linting.go @@ -63,7 +63,7 @@ func LintFix() error { } // Linting Check -func CheckLint() error { +func LintCheck() error { mg.Deps(golangciLintCheck) cmd, err := go_TEST_CMD() if err != nil { diff --git a/magefiles/sqlc.go b/magefiles/sqlc.go index 6dffb85252b..806747d2cba 100644 --- a/magefiles/sqlc.go +++ b/magefiles/sqlc.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -const SQLC_VERSION_CONSTRAINT = ">= 1.16.0" +const SQLC_VERSION_CONSTRAINT = ">= v1.22.0" func sqlcBinary() string { return binaryWithExt("sqlc") diff --git a/makefile b/makefile deleted file mode 100644 index f66c2653baf..00000000000 --- a/makefile +++ /dev/null @@ -1,640 +0,0 @@ -# Determine which platform we're on based on the kernel name -platform := $(shell uname -s || echo unknown) -host_arch := $(shell uname -m) -PWD := $(shell pwd) -# Check that all necessary executables are present -# Using 'where' on Windows and 'which' on Unix-like systems, respectively -# We do not check for 'date', since it's a cmdlet on Windows, which do not show up with where -# (:= assignment is necessary to force immediate evaluation of expression) -EXECUTABLES = git docker kubectl -ifeq ($(platform),windows32) - K := $(foreach exec,$(EXECUTABLES),$(if $(shell where $(exec)),some string,$(error "No $(exec) in PATH"))) -else - K := $(foreach exec,$(EXECUTABLES),$(if $(shell which $(exec)),some string,$(error "No $(exec) in PATH"))) -endif - -# Docker buildkit builds work in parallel, lowering build times. Outputs are compatable. -# Ignored on docker <18.09. This may lead to slower builds and different logs in STDOUT, -# but the image output is the same. -# See https://docs.docker.com/develop/develop-images/build_enhancements/ for more info. -export DOCKER_BUILDKIT = 1 - -# Get the current date and time (to insert into go build) -# On Windows, we need to use the powershell date command (alias of Get-Date) to get the full date-time string -ifeq ($(platform),unknown) - date := $(unknown) -else ifeq ($(platform),windows32) - date := $(shell powershell -c date || unknown) -else - date := $(shell date || unknown) -endif -BUILD_TIME = $(strip $(date)) # Strip leading/trailing whitespace (added by powershell) - -# GOPATH used for tests, which is mounted into the docker containers running the tests. -# If there's a GOPATH environment variable set, use that. -# Otherwise, if go is available on the host system, get the GOPATH via that. -# Otherwise, use ".go". -DOCKER_GOPATH = ${GOPATH} -ifeq ($(DOCKER_GOPATH),) - DOCKER_GOPATH = $(shell go env GOPATH || echo "") -endif -ifeq ($(DOCKER_GOPATH),) - DOCKER_GOPATH = .go -endif - -ifeq ($(platform),Darwin) - DOCKER_NET = -else - DOCKER_NET = --network=host -endif - -ifeq ($(host_arch),arm64) - PROTO_DOCKERFILE = ./build/proto/Dockerfile.arm64 -else - PROTO_DOCKERFILE = ./build/proto/Dockerfile -endif - -ifeq ($(DOCKER_RUN_AS_USER),) - DOCKER_RUN_AS_USER = -u $(shell id -u):$(shell id -g) -endif -ifeq ($(platform),windows32) - DOCKER_RUN_AS_USER = -endif - -# For reproducibility, run build commands in docker containers with known toolchain versions. -# INTEGRATION_ENABLED=true is needed for the e2e tests. -# -# For NuGet configuration, place a NuGet.Config in the project root directory. -# This file will get mounted into the container and used to configure NuGet. -# -# For npm, set the npm_config_disturl and npm_config_registry environment variables. -# Alternatively, place a .npmrc file in internal/lookout/ui - -# Deal with the fact that GOPATH might refer to multiple entries multiple directories -# For now just take the first one -DOCKER_GOPATH_TOKS := $(subst :, ,$(DOCKER_GOPATH:v%=%)) -DOCKER_GOPATH_DIR = $(word 1,$(DOCKER_GOPATH_TOKS)) - -# This is used to generate published artifacts; to raise the golang version we publish and run -# tests-in-docker against, you have to update the tag of the image used here. -GO_CMD = docker run --rm $(DOCKER_RUN_AS_USER) -v ${PWD}:/go/src/armada -w /go/src/armada $(DOCKER_NET) \ - -e GOPROXY -e GOPRIVATE -e GOCACHE=/go/cache -e INTEGRATION_ENABLED=true -e CGO_ENABLED=0 -e GOOS=linux -e GARCH=amd64 \ - -v $(DOCKER_GOPATH_DIR):/go \ - golang:1.20.2-buster - -# Versions of third party API -# Bump if you are updating -GRPC_GATEWAY_VERSION:=@v1.16.0 -GOGO_PROTOBUF_VERSION=@v1.3.2 -K8_APIM_VERSION = @v0.22.4 -K8_API_VERSION = @v0.22.4 - -# Optionally (if the TESTS_IN_DOCKER environment variable is set to true) run tests in docker containers. -# If using WSL, running tests in docker may result in network problems. -ifeq ($(TESTS_IN_DOCKER),true) - GO_TEST_CMD = $(GO_CMD) -else - GO_TEST_CMD = -endif - -# Get go version from the local install -# (using subst to change, e.g., 'go version go1.17.2 windows/amd64' to 'go1.17.2 windows/amd64') -GO_VERSION_STRING = $(strip $(subst go version,,$(shell $(GO_CMD) go version))) - -# Get most recent git commit (to insert into go build) -GIT_COMMIT := $(shell git rev-list --abbrev-commit -1 HEAD) - -# The RELEASE_VERSION environment variable is set by circleci (to insert into go build and output filenames) -ifndef RELEASE_VERSION -override RELEASE_VERSION = UNKNOWN_VERSION -endif - -# The RELEASE_TAG environment variable is set by circleci (to insert into go build and output filenames) -ifndef RELEASE_TAG -override RELEASE_TAG = UNKNOWN_TAG -endif - -# The NUGET_API_KEY environment variable is set by circleci (to insert into dotnet nuget push commands) -ifndef NUGET_API_KEY -override NUGET_API_KEY = UNKNOWN_NUGET_API_KEY -endif - -# use bash for running: -export SHELL:=/bin/bash -export SHELLOPTS:=$(if $(SHELLOPTS),$(SHELLOPTS):)pipefail:errexit - -gobuildlinux = go build -ldflags="-s -w" -gobuild = go build - -NODE_DOCKER_IMG := node:16.14-buster -DOTNET_DOCKER_IMG := mcr.microsoft.com/dotnet/sdk:3.1.417-buster - -# By default, the install process trusts standard SSL root CAs only. -# To use your host system's SSL certs on Debian/Ubuntu, AmazonLinux, or MacOS, uncomment the line below -# -# USE_SYSTEM_CERTS := true - -ifdef USE_SYSTEM_CERTS - -DOTNET_CMD = docker run -v ${PWD}:/go/src/armada -w /go/src/armada \ - -v ${PWD}/build/ssl/certs/:/etc/ssl/certs \ - -e SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt \ - ${DOTNET_DOCKER_IMG} - -NODE_CMD = docker run --rm -v ${PWD}:/go/src/armada -w /go/src/armada/internal/lookout/ui \ - -e npm_config_disturl \ - -e npm_config_registry \ - -v build/ssl/certs/:/etc/ssl/certs \ - -e SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt \ - -e npm_config_cafile=/etc/ssl/certs/ca-certificates.crt \ - ${NODE_DOCKER_IMG} - -UNAME_S := $(shell uname -s) -ssl-certs: - mkdir -p build/ssl/certs/ - rm -f build/ssl/certs/ca-certificates.crt - ifneq ("$(wildcard /etc/ssl/certs/ca-certificates.crt)", "") - # Debian-based distros - cp /etc/ssl/certs/ca-certificates.crt build/ssl/certs/ca-certificates.crt - else ifneq ("$(wildcard /etc/ssl/certs/ca-bundle.crt)","") - # AmazonLinux - cp /etc/ssl/certs/ca-bundle.crt build/ssl/certs/ca-certificates.crt - else ifeq ("$(UNAME_S)", "Darwin") - # MacOS - security find-certificate -a -p /System/Library/Keychains/SystemRootCertificates.keychain >> build/ssl/certs/ca-certificates.crt - security find-certificate -a -p /Library/Keychains/System.keychain >> build/ssl/certs/ca-certificates.crt - security find-certificate -a -p ~/Library/Keychains/login.keychain-db >> build/ssl/certs/ca-certificates.crt - else - echo "Don't know where to find root CA certs" - exit 1 - endif - -node-setup: ssl-certs -dotnet-setup: ssl-certs - -else - -DOTNET_CMD = docker run -v ${PWD}:/go/src/armada -w /go/src/armada \ - ${DOTNET_DOCKER_IMG} - -NODE_CMD = docker run --rm -v ${PWD}:/go/src/armada -w /go/src/armada/internal/lookout/ui \ - -e npm_config_disturl \ - -e npm_config_registry \ - ${NODE_DOCKER_IMG} - -# no setup necessary for node or dotnet if using default SSL certs -node-setup: -dotnet-setup: -endif - -build-server: - $(GO_CMD) $(gobuild) -o ./bin/server cmd/armada/main.go - -build-executor: - $(GO_CMD) $(gobuild) -o ./bin/executor cmd/executor/main.go - -build-fakeexecutor: - $(GO_CMD) $(gobuild) -o ./bin/executor cmd/fakeexecutor/main.go - -ARMADACTL_BUILD_PACKAGE := github.com/armadaproject/armada/internal/armadactl/build -define ARMADACTL_LDFLAGS --X '$(ARMADACTL_BUILD_PACKAGE).BuildTime=$(BUILD_TIME)' \ --X '$(ARMADACTL_BUILD_PACKAGE).ReleaseVersion=$(RELEASE_VERSION)' \ --X '$(ARMADACTL_BUILD_PACKAGE).GitCommit=$(GIT_COMMIT)' \ --X '$(ARMADACTL_BUILD_PACKAGE).GoVersion=$(GO_VERSION_STRING)' -endef -build-armadactl: - $(GO_CMD) $(gobuild) -ldflags="$(ARMADACTL_LDFLAGS)" -o ./bin/armadactl cmd/armadactl/main.go - -build-armadactl-multiplatform: - $(GO_CMD) gox -ldflags="$(ARMADACTL_LDFLAGS)" -output="./bin/{{.OS}}-{{.Arch}}/armadactl" -arch="amd64" -os="windows linux darwin" ./cmd/armadactl/ - -build-armadactl-release: build-armadactl-multiplatform - mkdir ./dist || true - tar -czvf ./dist/armadactl-$(RELEASE_VERSION)-linux-amd64.tar.gz -C ./bin/linux-amd64/ armadactl - tar -czvf ./dist/armadactl-$(RELEASE_VERSION)-darwin-amd64.tar.gz -C ./bin/darwin-amd64/ armadactl - zip -j ./dist/armadactl-$(RELEASE_VERSION)-windows-amd64.zip ./bin/windows-amd64/armadactl.exe - -PULSARTEST_BUILD_PACKAGE := github.com/armadaproject/armada/internal/pulsartest/build -define PULSARTEST_LDFLAGS --X '$(PULSARTEST_BUILD_PACKAGE).BuildTime=$(BUILD_TIME)' \ --X '$(PULSARTEST_BUILD_PACKAGE).ReleaseVersion=$(RELEASE_VERSION)' \ --X '$(PULSARTEST_BUILD_PACKAGE).GitCommit=$(GIT_COMMIT)' \ --X '$(PULSARTEST_BUILD_PACKAGE).GoVersion=$(GO_VERSION_STRING)' -endef -build-pulsartest: - $(GO_CMD) $(gobuild) -ldflags="$(PULSARTEST_LDFLAGS)" -o ./bin/pulsartest cmd/pulsartest/main.go - -TESTSUITE_BUILD_PACKAGE := github.com/armadaproject/armada/internal/testsuite/build -define TESTSUITE_LDFLAGS --X '$(TESTSUITE_BUILD_PACKAGE).BuildTime=$(BUILD_TIME)' \ --X '$(TESTSUITE_BUILD_PACKAGE).ReleaseVersion=$(RELEASE_VERSION)' \ --X '$(TESTSUITE_BUILD_PACKAGE).GitCommit=$(GIT_COMMIT)' \ --X '$(TESTSUITE_BUILD_PACKAGE).GoVersion=$(GO_VERSION_STRING)' -endef -build-testsuite: - $(GO_CMD) $(gobuild) -ldflags="$(TESTSUITE_LDFLAGS)" -o ./bin/testsuite cmd/testsuite/main.go - -build-binoculars: - $(GO_CMD) $(gobuild) -o ./bin/binoculars cmd/binoculars/main.go - -build-load-tester: - $(GO_CMD) $(gobuild) -o ./bin/armada-load-tester cmd/armada-load-tester/main.go - -build-lookout-ingester: - $(GO_CMD) $(gobuild) -o ./bin/lookoutingester cmd/lookoutingester/main.go - -build-event-ingester: - $(GO_CMD) $(gobuild) -o ./bin/eventingester cmd/eventingester/main.go - -build-jobservice: - $(GO_CMD) $(gobuild) -o ./bin/jobservice cmd/jobservice/main.go - -build-lookout: - $(GO_CMD) $(gobuild) -o ./bin/lookout cmd/lookout/main.go - -build-lookoutv2: - $(GO_CMD) $(gobuild) -o ./bin/lookoutv2 cmd/lookoutv2/main.go - -build-lookoutingesterv2: - $(GO_CMD) $(gobuild) -o ./bin/lookoutingesterv2 cmd/lookoutingesterv2/main.go - -build-scheduler: - $(GO_CMD) $(gobuild) -o ./bin/scheduler cmd/scheduler/main.go - -build-scheduler-ingester: - $(GO_CMD) $(gobuild) -o ./bin/scheduleringester cmd/scheduleringester/main.go - -build: build-lookoutingesterv2 build-lookoutv2 build-lookout build-jobservice build-server build-executor build-fakeexecutor build-armadactl build-load-tester build-testsuite build-binoculars build-lookout-ingester build-event-ingester build-scheduler build-scheduler-ingester - -build-docker-server: - mkdir -p .build/server - $(GO_CMD) $(gobuildlinux) -o ./.build/server/server cmd/armada/main.go - cp -a ./config/armada ./.build/server/config - docker buildx build -o type=docker $(dockerFlags) -t armada -f ./build/armada/Dockerfile ./.build/server/ - -build-docker-executor: - mkdir -p .build/executor - $(GO_CMD) $(gobuildlinux) -o ./.build/executor/executor cmd/executor/main.go - cp -a ./config/executor ./.build/executor/config - docker buildx build -o type=docker $(dockerFlags) -t armada-executor -f ./build/executor/Dockerfile ./.build/executor - -build-docker-armada-load-tester: - mkdir -p .build/armada-load-tester - $(GO_CMD) $(gobuildlinux) -o ./.build/armada-load-tester/armada-load-tester cmd/armada-load-tester/main.go - docker buildx build -o type=docker $(dockerFlags) -t armada-load-tester -f ./build/armada-load-tester/Dockerfile ./.build/armada-load-tester - -build-docker-testsuite: - mkdir -p .build/testsuite - $(GO_CMD) $(gobuildlinux) -ldflags="$(TESTSUITE_LDFLAGS)" -o ./.build/testsuite/testsuite cmd/testsuite/main.go - docker buildx build -o type=docker $(dockerFlags) -t testsuite -f ./build/testsuite/Dockerfile ./.build/testsuite - -build-docker-armadactl: - mkdir -p .build/armadactl - $(GO_CMD) $(gobuildlinux) -ldflags="$(ARMADACTL_LDFLAGS)" -o ./.build/armadactl/armadactl cmd/armadactl/main.go - docker buildx build -o type=docker $(dockerFlags) -t armadactl -f ./build/armadactl/Dockerfile ./.build/armadactl - -build-docker-fakeexecutor: - mkdir -p .build/fakeexecutor - $(GO_CMD) $(gobuildlinux) -o ./.build/fakeexecutor/fakeexecutor cmd/fakeexecutor/main.go - cp -a ./config/executor ./.build/fakeexecutor/config - docker buildx build -o type=docker $(dockerFlags) -t armada-fakeexecutor -f ./build/fakeexecutor/Dockerfile ./.build/fakeexecutor - -build-docker-lookout-ingester: - mkdir -p .build/lookoutingester - $(GO_CMD) $(gobuildlinux) -o ./.build/lookoutingester/lookoutingester cmd/lookoutingester/main.go - cp -a ./config/lookoutingester ./.build/lookoutingester/config - docker buildx build -o type=docker $(dockerFlags) -t armada-lookout-ingester -f ./build/lookoutingester/Dockerfile ./.build/lookoutingester - -build-docker-lookout-ingester-v2: - mkdir -p .build/lookoutingesterv2 - $(GO_CMD) $(gobuildlinux) -o ./.build/lookoutingesterv2/lookoutingesterv2 cmd/lookoutingesterv2/main.go - cp -a ./config/lookoutingesterv2 ./.build/lookoutingesterv2/config - docker buildx build -o type=docker $(dockerFlags) -t armada-lookout-ingester-v2 -f ./build/lookoutingesterv2/Dockerfile ./.build/lookoutingesterv2 - -build-docker-event-ingester: - mkdir -p .build/eventingester - $(GO_CMD) $(gobuildlinux) -o ./.build/eventingester/eventingester cmd/eventingester/main.go - cp -a ./config/eventingester ./.build/eventingester/config - docker buildx build -o type=docker $(dockerFlags) -t armada-event-ingester -f ./build/eventingester/Dockerfile ./.build/eventingester - -build-docker-lookout: node-setup - $(NODE_CMD) yarn install --immutable - # The following line is equivalent to running "yarn run openapi". - # We use this instead of "yarn run openapi" since if NODE_CMD is set to run npm in docker, - # "yarn run openapi" would result in running a docker container in docker. - docker run --rm $(DOCKER_RUN_AS_USER) -v ${PWD}:/project openapitools/openapi-generator-cli:v5.4.0 /project/internal/lookout/ui/openapi.sh - $(NODE_CMD) yarn run build - $(GO_CMD) $(gobuildlinux) -o ./bin/linux/lookout cmd/lookout/main.go - docker buildx build -o type=docker $(dockerFlags) -t armada-lookout -f ./build/lookout/Dockerfile . - -build-docker-lookout-v2: - mkdir -p .build/lookoutv2 - $(GO_CMD) $(gobuildlinux) -o ./.build/lookoutv2/lookoutv2 cmd/lookoutv2/main.go - cp -a ./config/lookoutv2 ./.build/lookoutv2/config - docker buildx build -o type=docker $(dockerFlags) -t armada-lookout-v2 -f ./build/lookoutv2/Dockerfile ./.build/lookoutv2 - -build-docker-binoculars: - mkdir -p .build/binoculars - $(GO_CMD) $(gobuildlinux) -o ./.build/binoculars/binoculars cmd/binoculars/main.go - cp -a ./config/binoculars ./.build/binoculars/config - docker buildx build -o type=docker $(dockerFlags) -t armada-binoculars -f ./build/binoculars/Dockerfile ./.build/binoculars - -build-docker-jobservice: - mkdir -p .build/jobservice - $(GO_CMD) $(gobuildlinux) -o ./.build/jobservice/jobservice cmd/jobservice/main.go - cp -a ./config/jobservice ./.build/jobservice/config - docker buildx build -o type=docker $(dockerFlags) -t armada-jobservice -f ./build/jobservice/Dockerfile ./.build/jobservice - -build-docker-scheduler: - mkdir -p .build/scheduler - $(GO_CMD) $(gobuildlinux) -o ./.build/scheduler/scheduler cmd/scheduler/main.go - cp -a ./config/scheduler ./.build/scheduler/config - docker buildx build -o type=docker $(dockerFlags) -t armada-scheduler -f ./build/scheduler/Dockerfile ./.build/scheduler - -build-docker-scheduler-ingester: - mkdir -p .build/scheduleringester - $(GO_CMD) $(gobuildlinux) -o ./.build/scheduleringester/scheduleringester cmd/scheduleringester/main.go - cp -a ./config/scheduleringester ./.build/scheduleringester/config - docker buildx build -o type=docker $(dockerFlags) -t armada-scheduler-ingester -f ./build/scheduleringester/Dockerfile ./.build/scheduleringester - -build-docker-full-bundle: build - cp -a ./bin/server ./server - cp -a ./bin/executor ./executor - cp -a ./bin/lookoutingester ./lookoutingester - cp -a ./bin/lookoutingesterv2 ./lookoutingesterv2 - cp -a ./bin/scheduler ./scheduler - cp -a ./bin/scheduleringester ./scheduleringester - cp -a ./bin/eventingester ./eventingester - cp -a ./bin/binoculars ./binoculars - cp -a ./bin/jobservice ./jobservice - cp -a ./bin/lookout ./lookout - cp -a ./bin/lookoutv2 ./lookoutv2 - cp -a ./bin/armadactl ./armadactl - - docker buildx build -o type=docker $(dockerFlags) -t armada-full-bundle -f ./build_goreleaser/bundles/full/Dockerfile . - -build-docker: build-docker-no-lookout build-docker-lookout build-docker-lookout-v2 - -# Build target without lookout (to avoid needing to load npm packages from the Internet). -# We still build lookout-ingester since that go code that is isolated from lookout itself. -build-docker-no-lookout: build-docker-server build-docker-executor build-docker-armadactl build-docker-testsuite build-docker-armada-load-tester build-docker-fakeexecutor build-docker-binoculars build-docker-lookout-ingester build-docker-lookout-ingester-v2 build-docker-event-ingester build-docker-jobservice build-docker-scheduler build-docker-scheduler-ingester - -build-ci: gobuild=$(gobuildlinux) -build-ci: build-docker build-armadactl build-armadactl-multiplatform build-load-tester build-testsuite - -.ONESHELL: -tests-teardown: - docker rm -f redis postgres || true - -.ONESHELL: -tests-no-setup: gotestsum - $(GOTESTSUM) -- -v ./internal... 2>&1 | tee test_reports/internal.txt - $(GOTESTSUM) -- -v ./pkg... 2>&1 | tee test_reports/pkg.txt - $(GOTESTSUM) -- -v ./cmd... 2>&1 | tee test_reports/cmd.txt - - -# Note that we do separate Job Service repository test runs for both sqlite and postgres database types -.ONESHELL: -tests: gotestsum - mkdir -p test_reports - docker run -d --name=redis $(DOCKER_NET) -p=6379:6379 redis:6.2.6 - docker run -d --name=postgres $(DOCKER_NET) -p 5432:5432 -e POSTGRES_PASSWORD=psw postgres:14.2 - sleep 3 - function tearDown { docker rm -f redis postgres; }; trap tearDown EXIT - $(GOTESTSUM) --format short-verbose --junitfile test-reports/unit-tests.xml --jsonfile test-reports/unit-tests.json -- -coverprofile=test-reports/coverage.out -covermode=atomic ./cmd/... ./pkg/... $(go list ./internal/... | grep -v 'jobservice/repository') - -.ONESHELL: -lint-fix: - $(GO_TEST_CMD) golangci-lint run --fix --timeout 10m - -.ONESHELL: -lint: - $(GO_TEST_CMD) golangci-lint run --timeout 10m - -.ONESHELL: -code-checks: lint - -# Rebuild and restart the server. -.ONESHELL: -rebuild-server: build-docker-server - docker rm -f server || true - docker run -d --name server --network=kind -p=50051:50051 -p 8080:8080 -v ${PWD}/e2e:/e2e \ - armada ./server --config /e2e/setup/insecure-armada-auth-config.yaml --config /e2e/setup/redis/armada-config.yaml --config /e2e/setup/pulsar/armada-config.yaml --config /e2e/setup/server/armada-config.yaml - -# Rebuild and restart the executor. -.ONESHELL: -rebuild-executor: build-docker-executor - docker rm -f executor || true - docker run -d --name executor --network=kind -v ${PWD}/.kube:/.kube -v ${PWD}/e2e:/e2e \ - -e KUBECONFIG=/.kube/config \ - -e ARMADA_KUBERNETES_IMPERSONATEUSERS=true \ - -e ARMADA_KUBERNETES_STUCKPODEXPIRY=15s \ - -e ARMADA_APICONNECTION_ARMADAURL="server:50051" \ - -e ARMADA_APICONNECTION_FORCENOTLS=true \ - armada-executor --config /e2e/setup/insecure-executor-config.yaml - -.ONESHELL: -tests-e2e-teardown: - docker rm -f redis pulsar server executor postgres lookout-ingester-migrate lookout-ingester jobservice event-ingester || true - kind delete cluster --name armada-test || true - rm .kube/config || true - rmdir .kube || true - -.ONESHELL: -setup-cluster: - kind create cluster --config e2e/setup/kind.yaml - - # Load images necessary for tests. - docker pull "alpine:3.18.3" # used for e2e tests - docker pull "nginx:1.21.6" # used for e2e tests (ingress) - docker pull "registry.k8s.io/ingress-nginx/controller:v1.4.0" - docker pull "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343" - kind load docker-image "alpine:3.18.3" --name armada-test - kind load docker-image "nginx:1.21.6" --name armada-test - kind load docker-image "registry.k8s.io/ingress-nginx/controller:v1.4.0" --name armada-test - kind load docker-image "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343" --name armada-test - - # Ingress controller needed for cluster ingress. - kubectl apply -f e2e/setup/ingress-nginx.yaml --context kind-armada-test - - # Priority classes. - kubectl apply -f e2e/setup/priorityclasses.yaml --context kind-armada-test - - # Wait until the ingress controller is ready - echo "Waiting for ingress controller to become ready" - sleep 10 # calling wait immediately can result in "no matching resources found" - kubectl wait --namespace ingress-nginx \ - --for=condition=ready pod \ - --selector=app.kubernetes.io/component=controller \ - --timeout=90s \ - --context kind-armada-test - - mkdir -p .kube - kind get kubeconfig --internal --name armada-test > .kube/config - -tests-e2e-setup: setup-cluster - docker run --rm -v ${PWD}:/go/src/armada -w /go/src/armada -e KUBECONFIG=/go/src/armada/.kube/config --network kind bitnami/kubectl:1.24.8 apply -f ./e2e/setup/namespace-with-anonymous-user.yaml - - # Armada dependencies. - docker run -d --name pulsar -p 0.0.0.0:6650:6650 --network=kind apachepulsar/pulsar:2.9.2 bin/pulsar standalone - docker run -d --name redis -p=6379:6379 --network=kind redis:6.2.6 - docker run -d --name postgres --network=kind -p 5432:5432 -e POSTGRES_PASSWORD=psw postgres:14.2 - - sleep 60 # give dependencies time to start up - docker run -d --name server --network=kind -p=50051:50051 -p 8080:8080 -v ${PWD}/e2e:/e2e \ - armada ./server --config /e2e/setup/insecure-armada-auth-config.yaml --config /e2e/setup/redis/armada-config.yaml --config /e2e/setup/pulsar/armada-config.yaml --config /e2e/setup/server/armada-config.yaml - docker run -d --name executor --network=kind -v ${PWD}/.kube:/.kube -v ${PWD}/e2e:/e2e \ - -e KUBECONFIG=/.kube/config \ - -e ARMADA_KUBERNETES_IMPERSONATEUSERS=true \ - -e ARMADA_KUBERNETES_STUCKPODEXPIRY=15s \ - -e ARMADA_APICONNECTION_ARMADAURL="server:50051" \ - -e ARMADA_APICONNECTION_FORCENOTLS=true \ - armada-executor --config /e2e/setup/insecure-executor-config.yaml - docker run -d --name lookout-ingester-migrate --network=kind -v ${PWD}/e2e:/e2e \ - armada-lookout-ingester --config /e2e/setup/lookout-ingester-config.yaml --migrateDatabase - docker run -d --name lookout-ingester --network=kind -v ${PWD}/e2e:/e2e \ - armada-lookout-ingester --config /e2e/setup/lookout-ingester-config.yaml - docker run -d --name event-ingester --network=kind -v ${PWD}/e2e:/e2e armada-event-ingester - docker run -d --name jobservice --network=kind -v ${PWD}/e2e:/e2e \ - armada-jobservice run --config /e2e/setup/jobservice.yaml - - # Create test queue if it doesn't already exist - $(GO_CMD) go run cmd/armadactl/main.go create queue e2e-test-queue || true - $(GO_CMD) go run cmd/armadactl/main.go create queue queue-a || true - $(GO_CMD) go run cmd/armadactl/main.go create queue queue-b || true - - # Logs to diagonose problems - docker logs executor - docker logs server -.ONESHELL: -tests-e2e-no-setup: gotestsum - function printApplicationLogs { - echo -e "\nexecutor logs:" - docker logs executor - echo -e "\nserver logs:" - docker logs server - } - trap printApplicationLogs exit - mkdir -p test_reports - $(GOTESTSUM) -- -v ./e2e/armadactl_test/... -count=1 2>&1 | tee test_reports/e2e_armadactl.txt - $(GOTESTSUM) -- -v ./e2e/pulsar_test/... -count=1 2>&1 | tee test_reports/e2e_pulsar.txt - $(GOTESTSUM) -- -v ./e2e/pulsartest_client/... -count=1 2>&1 | tee test_reports/e2e_pulsartest_client.txt - $(GOTESTSUM) -- -v ./e2e/lookout_ingester_test/... -count=1 2>&1 | tee test_reports/e2e_lookout_ingester.txt - # $(DOTNET_CMD) dotnet test client/DotNet/Armada.Client.Test/Armada.Client.Test.csproj - -.ONESHELL: -tests-e2e: build-armadactl build-docker-no-lookout tests-e2e-setup gotestsum - function teardown { - echo -e "\nexecutor logs:" - docker logs executor - echo -e "\nserver logs:" - docker logs server - docker rm -f redis pulsar server executor postgres lookout-ingester-migrate lookout-ingester event-ingester jobservice - kind delete cluster --name armada-test - rm .kube/config - rmdir .kube - } - mkdir -p test_reports - trap teardown exit - sleep 10 - echo -e "\nrunning tests:" - $(GOTESTSUM) -- -v ./e2e/armadactl_test/... -count=1 2>&1 | tee test_reports/e2e_armadactl.txt - $(GOTESTSUM) -- -v ./e2e/basic_test/... -count=1 2>&1 | tee test_reports/e2e_basic.txt - $(GOTESTSUM) -- -v ./e2e/pulsar_test/... -count=1 2>&1 | tee test_reports/e2e_pulsar.txt - $(GOTESTSUM) -- -v ./e2e/pulsartest_client/... -count=1 2>&1 | tee test_reports/e2e_pulsartest_client.txt - $(GOTESTSUM) -- -v ./e2e/lookout_ingester_test/... -count=1 2>&1 | tee test_reports/e2e_lookout_ingester.txt - - # $(DOTNET_CMD) dotnet test client/DotNet/Armada.Client.Test/Armada.Client.Test.csproj -.ONESHELL: -tests-e2e-python: python - docker run -v${PWD}/client/python:/code --workdir /code -e ARMADA_SERVER=server -e ARMADA_PORT=50051 --entrypoint python3 --network=kind armada-python-client-builder:latest -m pytest -v -s /code/tests/integration/test_no_auth.py - -# To run integration tests with jobservice and such, we can run this command -# For now, let's just have it in rare cases that people need to test. -# You must have an existing cluster working to run this command. -.ONESHELL: -tests-e2e-airflow: airflow-operator build-docker-jobservice - $(GO_CMD) go run cmd/armadactl/main.go create queue queue-a || true - docker rm -f jobservice || true - docker run -d --name jobservice --network=kind --mount 'type=bind,src=${PWD}/e2e,dst=/e2e' armada-jobservice run --config /e2e/setup/jobservice.yaml - docker run -v ${PWD}/e2e:/e2e -v ${PWD}/third_party/airflow:/code --workdir /code -e ARMADA_SERVER=server -e ARMADA_PORT=50051 -e JOB_SERVICE_HOST=jobservice -e JOB_SERVICE_PORT=60003 --entrypoint python3 --network=kind armada-airflow-operator-builder:latest -m pytest -v -s /code/tests/integration/test_airflow_operator_logic.py - docker rm -f jobservice - -# Output test results in Junit format, e.g., to display in Jenkins. -# Relies on go-junit-report -# https://github.com/jstemmer/go-junit-report -junit-report: - mkdir -p test_reports - sync # make sure everything has been synced to disc - rm -f test_reports/junit.xml - $(GO_TEST_CMD) bash -c "cat test_reports/*.txt | go-junit-report > test_reports/junit.xml" - -python: proto-setup - docker buildx build -o type=docker $(dockerFlags) -t armada-python-client-builder -f ./build/python-client/Dockerfile . - docker run --rm -v ${PWD}/proto:/proto -v ${PWD}:/go/src/armada -w /go/src/armada armada-python-client-builder ./scripts/build-python-client.sh - -airflow-operator: - rm -rf proto-airflow - mkdir -p proto-airflow - - docker buildx build -o type=docker $(dockerFlags) -t armada-airflow-operator-builder -f ./build/airflow-operator/Dockerfile . - docker run --rm -v ${PWD}/proto-airflow:/proto-airflow -v ${PWD}:/go/src/armada -w /go/src/armada armada-airflow-operator-builder ./scripts/build-airflow-operator.sh - -proto-setup: - go run github.com/magefile/mage@v1.14.0 BootstrapProto - -proto: - go run github.com/magefile/mage@v1.14.0 proto - -sql: - $(GO_TEST_CMD) sqlc generate -f internal/scheduler/sql/sql.yaml - $(GO_TEST_CMD) templify -e -p=sql internal/scheduler/sql/schema.sql - -# Target for compiling the dotnet Armada REST client -dotnet: dotnet-setup proto-setup - $(DOTNET_CMD) dotnet build ./client/DotNet/Armada.Client /t:NSwag - $(DOTNET_CMD) dotnet build ./client/DotNet/ArmadaProject.Io.Client - -# Pack and push dotnet clients to nuget. Requires RELEASE_TAG and NUGET_API_KEY env vars to be set -push-nuget: dotnet-setup proto - $(DOTNET_CMD) dotnet pack client/DotNet/Armada.Client/Armada.Client.csproj -c Release -p:PackageVersion=${RELEASE_TAG} -o ./bin/client/DotNet - $(DOTNET_CMD) dotnet nuget push ./bin/client/DotNet/G-Research.Armada.Client.${RELEASE_TAG}.nupkg -k ${NUGET_API_KEY} -s https://api.nuget.org/v3/index.json - $(DOTNET_CMD) dotnet pack client/DotNet/ArmadaProject.Io.Client/ArmadaProject.Io.Client.csproj -c Release -p:PackageVersion=${RELEASE_TAG} -o ./bin/client/DotNet - $(DOTNET_CMD) dotnet nuget push ./bin/client/DotNet/ArmadaProject.Io.Client.${RELEASE_TAG}.nupkg -k ${NUGET_API_KEY} -s https://api.nuget.org/v3/index.json - -# Download all dependencies and install tools listed in tools.yaml -download: - go run github.com/magefile/mage@v1.14.0 BootstrapTools - $(GO_TEST_CMD) go mod download - $(GO_TEST_CMD) go mod tidy - -generate: - $(GO_CMD) go run github.com/rakyll/statik \ - -dest=internal/lookout/repository/schema/ -src=internal/lookout/repository/schema/ -include=\*.sql -ns=lookout/sql -Z -f -m && \ - go run golang.org/x/tools/cmd/goimports -w -local "github.com/armadaproject/armada" internal/lookout/repository/schema/statik - - go generate ./... - -helm-docs: - ./scripts/helm-docs.sh - -LOCALBIN ?= $(PWD)/bin -$(LOCALBIN): - mkdir -p $(LOCALBIN) - -GOTESTSUM ?= $(LOCALBIN)/gotestsum - -.PHONY: gotestsum -gotestsum: $(GOTESTSUM)## Download gotestsum locally if necessary. -$(GOTESTSUM): $(LOCALBIN) - test -s $(LOCALBIN)/gotestsum || GOBIN=$(LOCALBIN) go install gotest.tools/gotestsum@v1.8.2 - -populate-lookout-test: - if [ "$$(docker ps -q -f name=postgres)" ]; then \ - docker stop postgres; \ - docker rm postgres; \ - fi - docker run -d --name=postgres $(DOCKER_NET) -p 5432:5432 -e POSTGRES_PASSWORD=psw postgres:14.2 - sleep 5 - go test -v ${PWD}/internal/lookout/db-gen/ diff --git a/pkg/api/api.swagger.go b/pkg/api/api.swagger.go index 8c05fc4ee76..c7f7574eadc 100644 --- a/pkg/api/api.swagger.go +++ b/pkg/api/api.swagger.go @@ -710,6 +710,11 @@ func SwaggerJsonTemplate() string { " \"type\": \"string\"\n" + " }\n" + " },\n" + + " \"queueTtlSeconds\": {\n" + + " \"description\": \"Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime.\",\n" + + " \"type\": \"string\",\n" + + " \"format\": \"int64\"\n" + + " },\n" + " \"requiredNodeLabels\": {\n" + " \"type\": \"object\",\n" + " \"additionalProperties\": {\n" + @@ -1355,6 +1360,11 @@ func SwaggerJsonTemplate() string { " \"type\": \"number\",\n" + " \"format\": \"double\"\n" + " },\n" + + " \"queueTtlSeconds\": {\n" + + " \"description\": \"Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime.\",\n" + + " \"type\": \"string\",\n" + + " \"format\": \"int64\"\n" + + " },\n" + " \"requiredNodeLabels\": {\n" + " \"type\": \"object\",\n" + " \"additionalProperties\": {\n" + diff --git a/pkg/api/api.swagger.json b/pkg/api/api.swagger.json index 5b02e7c5baa..2fc4e172a61 100644 --- a/pkg/api/api.swagger.json +++ b/pkg/api/api.swagger.json @@ -699,6 +699,11 @@ "type": "string" } }, + "queueTtlSeconds": { + "description": "Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime.", + "type": "string", + "format": "int64" + }, "requiredNodeLabels": { "type": "object", "additionalProperties": { @@ -1344,6 +1349,11 @@ "type": "number", "format": "double" }, + "queueTtlSeconds": { + "description": "Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime.", + "type": "string", + "format": "int64" + }, "requiredNodeLabels": { "type": "object", "additionalProperties": { diff --git a/pkg/api/lookout/api.swagger.go b/pkg/api/lookout/api.swagger.go index b17782a69f7..837e9d52ab3 100644 --- a/pkg/api/lookout/api.swagger.go +++ b/pkg/api/lookout/api.swagger.go @@ -227,6 +227,11 @@ func SwaggerJsonTemplate() string { " \"type\": \"string\"\n" + " }\n" + " },\n" + + " \"queueTtlSeconds\": {\n" + + " \"description\": \"Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime.\",\n" + + " \"type\": \"string\",\n" + + " \"format\": \"int64\"\n" + + " },\n" + " \"requiredNodeLabels\": {\n" + " \"type\": \"object\",\n" + " \"additionalProperties\": {\n" + diff --git a/pkg/api/lookout/api.swagger.json b/pkg/api/lookout/api.swagger.json index dc70d5c1dc1..63de36899fa 100644 --- a/pkg/api/lookout/api.swagger.json +++ b/pkg/api/lookout/api.swagger.json @@ -216,6 +216,11 @@ "type": "string" } }, + "queueTtlSeconds": { + "description": "Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime.", + "type": "string", + "format": "int64" + }, "requiredNodeLabels": { "type": "object", "additionalProperties": { diff --git a/pkg/api/queue.pb.go b/pkg/api/queue.pb.go index c983bbc4d90..18301a73ef5 100644 --- a/pkg/api/queue.pb.go +++ b/pkg/api/queue.pb.go @@ -79,6 +79,8 @@ type Job struct { // Indicates which scheduler should manage this job. // If empty, the default scheduler is used. Scheduler string `protobuf:"bytes,20,opt,name=scheduler,proto3" json:"scheduler,omitempty"` + // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. + QueueTtlSeconds int64 `protobuf:"varint,22,opt,name=queue_ttl_seconds,json=queueTtlSeconds,proto3" json:"queueTtlSeconds,omitempty"` } func (m *Job) Reset() { *m = Job{} } @@ -262,6 +264,13 @@ func (m *Job) GetScheduler() string { return "" } +func (m *Job) GetQueueTtlSeconds() int64 { + if m != nil { + return m.QueueTtlSeconds + } + return 0 +} + // For the bidirectional streaming job lease request service. // For the first message, populate all fields except SubmittedJobs, which should be empty. // For subsequent messages, these fields may be left empty, in which case the last non-zero value received is used. @@ -1291,161 +1300,163 @@ func init() { func init() { proto.RegisterFile("pkg/api/queue.proto", fileDescriptor_d92c0c680df9617a) } var fileDescriptor_d92c0c680df9617a = []byte{ - // 2453 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, - 0x15, 0xf7, 0xea, 0x83, 0xa2, 0x1e, 0xf5, 0x39, 0xfa, 0x5a, 0x53, 0x0e, 0xc9, 0xd0, 0xa8, 0xa3, - 0xb4, 0x36, 0x15, 0x2b, 0x4e, 0xe1, 0xf6, 0xd0, 0x40, 0xb4, 0xdd, 0x54, 0x8e, 0x13, 0x3b, 0x2b, - 0x25, 0x40, 0x83, 0x00, 0xeb, 0x25, 0x77, 0x4c, 0x8f, 0x44, 0xee, 0x6c, 0xf6, 0x43, 0x06, 0x7d, - 0x2a, 0xd2, 0x16, 0x28, 0x8a, 0x1e, 0x72, 0x08, 0xd0, 0x26, 0x40, 0xd1, 0x63, 0x81, 0x02, 0xfd, - 0x17, 0x7a, 0xce, 0x31, 0xb7, 0xe6, 0x52, 0xb6, 0xb5, 0x2f, 0x05, 0x8f, 0x3d, 0xf6, 0x50, 0x14, - 0xf3, 0xb1, 0xcb, 0xd9, 0xe5, 0x52, 0x52, 0x6a, 0xd9, 0xd0, 0x21, 0x27, 0x69, 0xde, 0x7b, 0xf3, - 0xde, 0x9b, 0x37, 0x6f, 0x7e, 0xef, 0xcd, 0x2c, 0x61, 0xc9, 0x3d, 0x68, 0x6d, 0x5a, 0x2e, 0xd9, - 0xfc, 0x38, 0xc4, 0x21, 0xae, 0xb9, 0x1e, 0x0d, 0x28, 0x1a, 0xb7, 0x5c, 0x52, 0x2c, 0xb7, 0x28, - 0x6d, 0xb5, 0xf1, 0x26, 0x27, 0x35, 0xc2, 0x07, 0x9b, 0x01, 0xe9, 0x60, 0x3f, 0xb0, 0x3a, 0xae, - 0x90, 0x2a, 0x56, 0x0f, 0xae, 0xfb, 0x35, 0x42, 0xf9, 0xec, 0x26, 0xf5, 0xf0, 0xe6, 0xe1, 0xd5, - 0xcd, 0x16, 0x76, 0xb0, 0x67, 0x05, 0xd8, 0x96, 0x32, 0x1b, 0x8a, 0x8c, 0x83, 0x83, 0x47, 0xd4, - 0x3b, 0x20, 0x4e, 0x2b, 0x4b, 0xf2, 0xda, 0x40, 0xb2, 0x63, 0x35, 0x1f, 0x12, 0x07, 0x7b, 0xdd, - 0xcd, 0xc8, 0x39, 0x0f, 0xfb, 0x34, 0xf4, 0x9a, 0x78, 0x68, 0xd6, 0x95, 0x16, 0x09, 0x1e, 0x86, - 0x8d, 0x5a, 0x93, 0x76, 0x36, 0x5b, 0xb4, 0x45, 0x07, 0xde, 0xb2, 0x11, 0x1f, 0xf0, 0xff, 0xa4, - 0xf8, 0x7a, 0x7a, 0x4d, 0xb8, 0xe3, 0x06, 0x5d, 0xc9, 0x5c, 0x8e, 0xac, 0xf9, 0x61, 0xa3, 0x43, - 0x02, 0x41, 0xad, 0x7e, 0x36, 0x0f, 0xe3, 0xb7, 0x69, 0x03, 0x55, 0x60, 0x8c, 0xd8, 0xba, 0x56, - 0xd1, 0x36, 0xa6, 0xeb, 0x0b, 0xfd, 0x5e, 0x79, 0x86, 0xd8, 0x97, 0x69, 0x87, 0x04, 0x5c, 0x83, - 0x31, 0x46, 0x6c, 0xf4, 0x3a, 0x4c, 0x37, 0xdb, 0x04, 0x3b, 0x81, 0x49, 0x6c, 0x7d, 0x96, 0x0b, - 0xae, 0xf6, 0x7b, 0x65, 0x24, 0x88, 0x3b, 0xaa, 0x78, 0x3e, 0xa2, 0xa1, 0x6b, 0x00, 0xfb, 0xb4, - 0x61, 0xfa, 0x98, 0xcf, 0x1a, 0x1b, 0xcc, 0xda, 0xa7, 0x8d, 0x5d, 0x9c, 0x9a, 0x15, 0xd1, 0xd0, - 0xab, 0x30, 0xc9, 0xf7, 0x4b, 0x1f, 0xe7, 0x13, 0x96, 0xfa, 0xbd, 0xf2, 0x3c, 0x27, 0x28, 0xd2, - 0x42, 0x02, 0xbd, 0x01, 0xd3, 0x8e, 0xd5, 0xc1, 0xbe, 0x6b, 0x35, 0xb1, 0x3e, 0xc5, 0xc5, 0xd7, - 0xfa, 0xbd, 0xf2, 0x52, 0x4c, 0x54, 0xa6, 0x0c, 0x24, 0x51, 0x1d, 0x72, 0x6d, 0xab, 0x81, 0xdb, - 0xbe, 0x3e, 0x5d, 0x19, 0xdf, 0x28, 0x6c, 0x2d, 0xd7, 0x2c, 0x97, 0xd4, 0x6e, 0xd3, 0x46, 0xed, - 0x0e, 0x27, 0xdf, 0x72, 0x02, 0xaf, 0x5b, 0x5f, 0xee, 0xf7, 0xca, 0x0b, 0x42, 0x4e, 0x51, 0x23, - 0x67, 0xa2, 0x0f, 0xa0, 0x60, 0x39, 0x0e, 0x0d, 0xac, 0x80, 0x50, 0xc7, 0xd7, 0x81, 0x2b, 0x3a, - 0x1f, 0x2b, 0xda, 0x1e, 0xf0, 0x84, 0xb6, 0xf3, 0xfd, 0x5e, 0x79, 0x45, 0x99, 0xa1, 0xa8, 0x54, - 0x15, 0xa1, 0x43, 0x58, 0xf6, 0xf0, 0xc7, 0x21, 0xf1, 0xb0, 0x6d, 0x3a, 0xd4, 0xc6, 0xa6, 0xf4, - 0xb4, 0xc0, 0x0d, 0x54, 0x62, 0x03, 0x86, 0x14, 0x7a, 0x97, 0xda, 0x58, 0xf5, 0xba, 0xda, 0xef, - 0x95, 0x2f, 0x78, 0x43, 0xcc, 0x81, 0x39, 0x5d, 0x33, 0xd0, 0x30, 0x9f, 0x45, 0x9d, 0x3e, 0x72, - 0xb0, 0xa7, 0xe7, 0x07, 0x51, 0xe7, 0x04, 0x35, 0xea, 0x9c, 0x80, 0x30, 0xac, 0xf3, 0xf0, 0x9b, - 0x7c, 0xe8, 0x3f, 0x24, 0xae, 0x19, 0xfa, 0xd8, 0x33, 0x5b, 0x1e, 0x0d, 0x5d, 0x5f, 0x9f, 0xaf, - 0x8c, 0x6f, 0x4c, 0xd7, 0x2f, 0xf5, 0x7b, 0xe5, 0x2a, 0x17, 0xbb, 0x1b, 0x49, 0xbd, 0xef, 0x63, - 0xef, 0x2d, 0x2e, 0xa3, 0xe8, 0xd4, 0x47, 0xc9, 0xa0, 0x5f, 0x68, 0x70, 0xa9, 0x49, 0x3b, 0xae, - 0x87, 0x7d, 0x1f, 0xdb, 0xe6, 0x51, 0x26, 0x97, 0x2a, 0xda, 0xc6, 0x4c, 0xfd, 0xb5, 0x7e, 0xaf, - 0x7c, 0x79, 0x30, 0xe3, 0xbd, 0xe3, 0x8d, 0x57, 0x8f, 0x97, 0x46, 0x5b, 0x90, 0x77, 0x3d, 0x42, - 0x3d, 0x12, 0x74, 0xf5, 0x89, 0x8a, 0xb6, 0xa1, 0x89, 0x14, 0x8e, 0x68, 0x6a, 0x0a, 0x47, 0x34, - 0x74, 0x17, 0xf2, 0x2e, 0xb5, 0x4d, 0xdf, 0xc5, 0x4d, 0x7d, 0xb2, 0xa2, 0x6d, 0x14, 0xb6, 0xd6, - 0x6b, 0x02, 0x02, 0xf8, 0xfe, 0x31, 0x40, 0xa9, 0x1d, 0x5e, 0xad, 0xdd, 0xa3, 0xf6, 0xae, 0x8b, - 0x9b, 0x3c, 0x67, 0x17, 0x5d, 0x31, 0x48, 0x6c, 0xd4, 0x94, 0x24, 0xa2, 0x7b, 0x30, 0x1d, 0x29, - 0xf4, 0xf5, 0x19, 0x9e, 0x0a, 0x47, 0x6a, 0x14, 0x2e, 0x8a, 0x81, 0x9f, 0x70, 0x51, 0xd2, 0xd0, - 0x17, 0x1a, 0x54, 0xfc, 0xe6, 0x43, 0x6c, 0x87, 0x6d, 0xe2, 0xb4, 0xcc, 0x08, 0x84, 0x4c, 0x99, - 0x1a, 0x1d, 0xec, 0x04, 0xbe, 0xbe, 0xc2, 0x7d, 0xdf, 0xc8, 0xb2, 0x64, 0xc8, 0x09, 0x86, 0x22, - 0x5f, 0xbf, 0xf4, 0x65, 0xaf, 0x7c, 0xae, 0xdf, 0x2b, 0x97, 0x06, 0x9a, 0xb3, 0xe4, 0x8c, 0x63, - 0xf8, 0x68, 0x07, 0xa6, 0x9a, 0x1e, 0x66, 0x50, 0xa8, 0xe7, 0xb8, 0x0b, 0xc5, 0x9a, 0x00, 0xb7, - 0x5a, 0x04, 0x6e, 0xb5, 0xbd, 0x08, 0xb0, 0xeb, 0x4b, 0xd2, 0x68, 0x34, 0xe5, 0xd3, 0xbf, 0x97, - 0x35, 0x23, 0x1a, 0xa0, 0x1b, 0x30, 0x45, 0x9c, 0x16, 0xdb, 0x63, 0x7d, 0x8e, 0xc7, 0x0d, 0xf1, - 0x65, 0xec, 0x08, 0xda, 0x0d, 0xea, 0x3c, 0x20, 0xad, 0xfa, 0x0a, 0xdb, 0x00, 0x29, 0xa6, 0x44, - 0x2b, 0x9a, 0x89, 0x7e, 0x0c, 0x79, 0x1f, 0x7b, 0x87, 0xa4, 0x89, 0x7d, 0x7d, 0x41, 0xd1, 0xb2, - 0x2b, 0x88, 0x52, 0x0b, 0x0f, 0x7a, 0x24, 0xa7, 0x06, 0x3d, 0xa2, 0xa1, 0x8f, 0xa0, 0x70, 0x70, - 0xdd, 0x37, 0x23, 0x87, 0x16, 0xb9, 0xaa, 0x97, 0xd5, 0xf0, 0x0e, 0xea, 0x08, 0x0b, 0xb2, 0xf4, - 0xb2, 0xae, 0xf7, 0x7b, 0xe5, 0xe5, 0x83, 0xeb, 0xfe, 0xce, 0x90, 0x8b, 0x30, 0xa0, 0x32, 0x48, - 0x62, 0xda, 0xa5, 0x35, 0x1d, 0x8d, 0x4e, 0x13, 0xe9, 0x77, 0xac, 0x57, 0x8e, 0x53, 0x7a, 0x25, - 0x95, 0xa1, 0xac, 0xdc, 0x2f, 0xec, 0xe9, 0xcb, 0x03, 0x94, 0x8d, 0x89, 0x2a, 0xca, 0xc6, 0xc4, - 0xa2, 0x05, 0x05, 0x05, 0x98, 0xd0, 0x45, 0x18, 0x3f, 0xc0, 0x5d, 0x59, 0x64, 0x16, 0xfb, 0xbd, - 0xf2, 0xec, 0x01, 0x56, 0x4f, 0x0f, 0xe3, 0x32, 0x14, 0x3a, 0xb4, 0xda, 0x21, 0x96, 0xc5, 0x82, - 0xa3, 0x10, 0x27, 0xa8, 0x28, 0xc4, 0x09, 0x3f, 0x1c, 0xbb, 0xae, 0x15, 0x1f, 0xc0, 0x42, 0x1a, - 0x68, 0x9f, 0x8b, 0x9d, 0x0e, 0xac, 0x8d, 0xc0, 0xdb, 0xe7, 0x61, 0xae, 0xfa, 0xb7, 0x1c, 0xac, - 0xec, 0x06, 0x1e, 0xb6, 0x3a, 0xc4, 0x69, 0xdd, 0xc1, 0x96, 0xcf, 0x4f, 0x07, 0xf6, 0x03, 0xf4, - 0x7d, 0x80, 0x66, 0x3b, 0xf4, 0x03, 0xec, 0x99, 0x71, 0xc1, 0xe6, 0x7b, 0x21, 0xa9, 0x89, 0x92, - 0x3a, 0x1d, 0x13, 0xd1, 0x25, 0x98, 0x70, 0x29, 0x6d, 0x4b, 0xfb, 0xa8, 0xdf, 0x2b, 0xcf, 0xb1, - 0xb1, 0x22, 0xcc, 0xf9, 0xe8, 0x43, 0x98, 0x8e, 0x90, 0xc0, 0xd7, 0xc7, 0x79, 0x02, 0xbd, 0x2a, - 0x32, 0x3d, 0xcb, 0x9d, 0x18, 0x04, 0x64, 0xed, 0x59, 0x94, 0x27, 0x71, 0xa0, 0xc3, 0x18, 0xfc, - 0x8b, 0x08, 0xac, 0x44, 0xbe, 0xb7, 0x99, 0x12, 0xdb, 0xf4, 0xb0, 0x4b, 0xbd, 0x80, 0xa3, 0x6a, - 0x61, 0x4b, 0xe7, 0x76, 0x6e, 0x08, 0x09, 0x6e, 0xc5, 0x36, 0x38, 0xbf, 0xbe, 0x2e, 0xd5, 0x46, - 0x8b, 0x54, 0x99, 0x46, 0x16, 0x11, 0xb9, 0xb0, 0xd0, 0x21, 0x0e, 0xe9, 0x84, 0x1d, 0x93, 0x37, - 0x20, 0xe4, 0x31, 0xd6, 0x27, 0xf9, 0x6a, 0x6a, 0x47, 0xac, 0xe6, 0x1d, 0x31, 0xe5, 0x36, 0x6d, - 0xec, 0x92, 0xc7, 0x58, 0x2c, 0x69, 0x55, 0xda, 0x9e, 0xeb, 0x24, 0x98, 0x46, 0x6a, 0x8c, 0xb6, - 0x60, 0x92, 0x55, 0x6b, 0x5f, 0xcf, 0x71, 0x33, 0xb3, 0xdc, 0x0c, 0xcb, 0x95, 0x1d, 0xe7, 0x01, - 0xad, 0xcf, 0x4a, 0x2d, 0x42, 0xc6, 0x10, 0x7f, 0xd0, 0x4d, 0x98, 0x33, 0x70, 0x13, 0x93, 0x43, - 0x6c, 0xdf, 0xa6, 0x8d, 0x1d, 0xdb, 0xd7, 0xa7, 0x78, 0xe9, 0xbc, 0xd0, 0xef, 0x95, 0xf5, 0x24, - 0x47, 0xd9, 0xa8, 0xd4, 0x9c, 0xe2, 0x67, 0x1a, 0x53, 0xa3, 0xee, 0xc3, 0xc9, 0x72, 0xf2, 0xa7, - 0x6a, 0x4e, 0xb2, 0xc0, 0x0c, 0x70, 0x22, 0xee, 0x51, 0x6b, 0xee, 0x41, 0x8b, 0xaf, 0x24, 0xda, - 0xc5, 0xda, 0x7b, 0xa1, 0xe5, 0x04, 0x24, 0xe8, 0x1e, 0x7b, 0x64, 0x3e, 0xd7, 0x60, 0x29, 0x23, - 0xa0, 0x67, 0xc1, 0xb7, 0xea, 0x5f, 0x17, 0x21, 0x1f, 0xed, 0x0d, 0x3b, 0x1a, 0xac, 0x33, 0x94, - 0x1e, 0xf1, 0xa3, 0xc1, 0xc6, 0xea, 0xd1, 0x60, 0x63, 0xb4, 0x0d, 0xb9, 0xc0, 0x22, 0xac, 0x2a, - 0x8e, 0xc9, 0x5e, 0x2f, 0x03, 0x58, 0xf7, 0x98, 0x44, 0x7d, 0x4e, 0x6e, 0xb7, 0x9c, 0x60, 0xc8, - 0xbf, 0xe8, 0xad, 0xb8, 0xef, 0x1c, 0x57, 0xda, 0xc5, 0xc8, 0x93, 0x6f, 0xd0, 0x7c, 0x3e, 0x86, - 0x15, 0xab, 0xdd, 0xa6, 0x4d, 0x2b, 0xb0, 0x1a, 0x6d, 0x6c, 0x0e, 0x8e, 0xec, 0x04, 0xd7, 0xfb, - 0x4a, 0x52, 0xef, 0xf6, 0x40, 0x34, 0x75, 0x60, 0x2f, 0x48, 0x47, 0x97, 0xad, 0x0c, 0x11, 0x23, - 0x93, 0x8a, 0x3c, 0x58, 0xb2, 0x0e, 0x2d, 0xd2, 0x4e, 0x59, 0x16, 0xc7, 0xeb, 0x3b, 0x29, 0xcb, - 0x91, 0x60, 0xca, 0x6e, 0x51, 0xda, 0x45, 0xd6, 0x90, 0x80, 0x91, 0x41, 0x43, 0x0d, 0x98, 0x0f, - 0x68, 0x60, 0xb5, 0x15, 0x7b, 0x39, 0x59, 0x3b, 0x13, 0xf6, 0xf6, 0x98, 0x50, 0xca, 0x56, 0x7c, - 0x82, 0x83, 0x04, 0xd3, 0x48, 0x8d, 0xf9, 0xba, 0xc4, 0x7a, 0x39, 0x32, 0x45, 0x76, 0xa6, 0x32, - 0xd7, 0x15, 0x09, 0x8e, 0x5c, 0xd7, 0x90, 0x80, 0x91, 0x41, 0x43, 0xf7, 0x61, 0xc1, 0x0b, 0x1d, - 0x93, 0xd8, 0xbe, 0xd9, 0xe8, 0x9a, 0x7e, 0x60, 0x05, 0x58, 0xcf, 0x2b, 0x8d, 0x7e, 0x6c, 0xd0, - 0x08, 0x9d, 0x1d, 0xdb, 0xaf, 0x77, 0x77, 0x99, 0x88, 0xb0, 0xb5, 0x22, 0x6d, 0xcd, 0x7a, 0x2a, - 0xcf, 0x48, 0x0e, 0xd1, 0xef, 0x34, 0x28, 0x39, 0xd4, 0x31, 0x2d, 0xaf, 0x63, 0xd9, 0x96, 0x99, - 0xb5, 0xc2, 0x69, 0x05, 0x18, 0x63, 0x83, 0xef, 0x52, 0x67, 0x9b, 0x4f, 0x19, 0xb5, 0xd4, 0x8b, - 0xd2, 0xfc, 0xba, 0x33, 0x5a, 0xd2, 0x38, 0x8a, 0x89, 0xb6, 0x61, 0x36, 0x74, 0x64, 0xbb, 0xc0, - 0xb6, 0x5b, 0x87, 0x8a, 0xb6, 0x91, 0xaf, 0xaf, 0xf7, 0x7b, 0xe5, 0xb5, 0x04, 0x43, 0x39, 0x00, - 0xc9, 0x19, 0xe8, 0x13, 0x0d, 0xd6, 0xe2, 0xce, 0x35, 0xf4, 0xad, 0x16, 0x66, 0x71, 0x14, 0xb7, - 0xc7, 0x42, 0xd6, 0x51, 0x88, 0xac, 0xbf, 0xcf, 0x64, 0xeb, 0x5d, 0xde, 0xf4, 0x0f, 0xee, 0x4d, - 0x25, 0x2f, 0x83, 0xad, 0x58, 0x5f, 0xce, 0xe2, 0xb3, 0xab, 0x31, 0xbf, 0xa8, 0x05, 0x5d, 0x17, - 0xeb, 0x33, 0x83, 0x4b, 0x2e, 0x23, 0xee, 0x75, 0x5d, 0x55, 0x41, 0x3e, 0xa2, 0xbd, 0x88, 0xe6, - 0xe8, 0x0f, 0x1a, 0x9c, 0x1f, 0x79, 0xf4, 0xcf, 0x44, 0x8d, 0xf8, 0xbd, 0x06, 0x6b, 0x23, 0x20, - 0xe2, 0xcc, 0xd4, 0xb0, 0x0c, 0x48, 0x39, 0x13, 0xbe, 0xfd, 0x9c, 0xc5, 0x2e, 0xfb, 0x6c, 0xaa, - 0xfe, 0x4d, 0x8e, 0xf4, 0xef, 0xcd, 0xa4, 0x7f, 0xe2, 0x0d, 0xe4, 0x06, 0xed, 0xb8, 0x61, 0x10, - 0xef, 0xc5, 0xb1, 0x5e, 0x3c, 0x02, 0x34, 0x0c, 0x4d, 0x27, 0x8b, 0xcf, 0x75, 0xd5, 0xfe, 0x9c, - 0xec, 0x98, 0x58, 0xab, 0xc0, 0xf4, 0x1c, 0x6b, 0xf8, 0x37, 0x1a, 0x54, 0x8e, 0xc3, 0xa8, 0x17, - 0x18, 0x87, 0x5f, 0x6a, 0x70, 0x7e, 0x24, 0xb6, 0x9c, 0x2c, 0x1e, 0xa7, 0xe1, 0x47, 0xf5, 0xb7, - 0x13, 0xa2, 0xb3, 0x61, 0x18, 0xa3, 0x74, 0x2c, 0xda, 0xb3, 0x77, 0x2c, 0x63, 0xa9, 0x8e, 0x85, - 0x59, 0x38, 0x8d, 0x8e, 0x65, 0x3c, 0x05, 0xd3, 0x5c, 0xef, 0xa9, 0x76, 0x2c, 0xdf, 0x62, 0x2d, - 0xcb, 0x8c, 0x3f, 0x4f, 0xc0, 0xba, 0xbc, 0x5c, 0xed, 0xc6, 0x8f, 0x2f, 0xac, 0x26, 0xca, 0x2b, - 0xd3, 0xb3, 0xde, 0x2c, 0xa7, 0x8e, 0xb9, 0x59, 0xee, 0x42, 0x41, 0x5c, 0xf7, 0xcc, 0x80, 0x74, - 0xa2, 0x45, 0x1e, 0xf5, 0xac, 0x13, 0xf5, 0x6d, 0x20, 0xa6, 0x31, 0x06, 0x7f, 0xd9, 0x51, 0xc6, - 0xe8, 0x16, 0x40, 0x5c, 0x7a, 0xa3, 0x16, 0x74, 0x36, 0x91, 0x4a, 0xf2, 0x3d, 0x58, 0x8e, 0xfc, - 0xc4, 0x7b, 0x70, 0x44, 0x44, 0x87, 0x19, 0xd7, 0x45, 0xd1, 0x5f, 0x5e, 0x53, 0x2f, 0xa5, 0x59, - 0x71, 0x7b, 0x96, 0x4b, 0xe3, 0x99, 0xbe, 0x23, 0xfd, 0x7b, 0x02, 0x16, 0x39, 0x84, 0x25, 0x2e, - 0xd6, 0x27, 0xbd, 0x2c, 0x51, 0x58, 0x88, 0x8f, 0xb8, 0xbc, 0xed, 0x4b, 0x04, 0xf9, 0x1e, 0xf7, - 0x67, 0x48, 0xf3, 0xe0, 0x29, 0x41, 0x50, 0x45, 0x20, 0xd7, 0x64, 0x20, 0xe7, 0xbd, 0x24, 0xd7, - 0x48, 0x13, 0xd0, 0xe7, 0x1a, 0x5c, 0x48, 0x5b, 0x64, 0xbd, 0x60, 0xfc, 0x74, 0x2b, 0x70, 0xe6, - 0x8d, 0x93, 0x59, 0xaf, 0x77, 0xef, 0xc9, 0x79, 0xc2, 0x8f, 0x97, 0xa5, 0x1f, 0xe7, 0xbd, 0x51, - 0x72, 0xc6, 0x68, 0x56, 0xf1, 0x0b, 0x0d, 0x96, 0xb3, 0x96, 0x77, 0x26, 0xfa, 0x88, 0x5f, 0x6b, - 0x50, 0x3a, 0x7a, 0xf5, 0x2f, 0xae, 0x8c, 0x56, 0xff, 0xa5, 0xc1, 0x52, 0xc6, 0x0b, 0xd0, 0xff, - 0x0d, 0x4e, 0xcf, 0x05, 0x74, 0x6e, 0x42, 0x8e, 0xdf, 0x30, 0xa2, 0xda, 0xb5, 0x9a, 0x9d, 0x53, - 0xa2, 0x20, 0x0a, 0x49, 0xb5, 0x20, 0x0a, 0x4a, 0xf5, 0xbf, 0x1a, 0xcc, 0xa7, 0xc2, 0x83, 0xf6, - 0xd4, 0xd7, 0x37, 0x51, 0xb3, 0x2f, 0x66, 0xc5, 0xf1, 0x1b, 0xbd, 0xbb, 0x9d, 0xd1, 0x07, 0xa2, - 0xea, 0x5f, 0x34, 0x98, 0x89, 0x1f, 0x53, 0x89, 0xd3, 0x42, 0x6f, 0xa7, 0x5e, 0x47, 0x5e, 0x8a, - 0x81, 0x3c, 0x12, 0x39, 0x79, 0xbf, 0xf1, 0x02, 0x6a, 0x7e, 0xf5, 0x07, 0x90, 0xbf, 0x4d, 0x1b, - 0x7c, 0xcb, 0xd1, 0x15, 0x18, 0xdf, 0xa7, 0x0d, 0xb9, 0x67, 0xf9, 0xa8, 0x95, 0x15, 0x96, 0xf6, - 0x69, 0x43, 0xb5, 0xb4, 0x4f, 0x1b, 0xd5, 0x3f, 0x6a, 0xb0, 0x18, 0xbf, 0x41, 0x0e, 0x2b, 0xd1, - 0x4e, 0xa2, 0x04, 0x6d, 0xc2, 0x94, 0xc3, 0x0b, 0x87, 0xcf, 0x1d, 0x9e, 0x15, 0x5f, 0x31, 0x24, - 0x49, 0xfd, 0x8a, 0x21, 0x49, 0x68, 0x0b, 0xf2, 0x4e, 0xd8, 0xd9, 0x6e, 0x1e, 0x60, 0x9b, 0x7f, - 0x5b, 0x9d, 0x95, 0xf7, 0x54, 0x49, 0x4b, 0xdc, 0x53, 0x25, 0xad, 0x7a, 0x05, 0x72, 0x3b, 0xf6, - 0x1d, 0xe2, 0x07, 0x2c, 0x84, 0xc4, 0x16, 0x69, 0x29, 0x43, 0x48, 0x12, 0xef, 0x92, 0x8c, 0x5b, - 0x75, 0x61, 0xd1, 0xc0, 0x0e, 0x7e, 0x74, 0x2a, 0x8f, 0xd6, 0xd2, 0xe2, 0xd8, 0x91, 0x16, 0x7f, - 0x35, 0x09, 0xc8, 0xc0, 0x41, 0xe8, 0x39, 0xa7, 0x62, 0xf3, 0xbb, 0x90, 0x63, 0x2d, 0x40, 0xfc, - 0xb9, 0x9a, 0x27, 0xc1, 0x3e, 0x6d, 0x24, 0xe4, 0x27, 0x39, 0x01, 0xdd, 0x87, 0x45, 0xeb, 0x90, - 0x92, 0xe4, 0x77, 0x5a, 0xf1, 0x98, 0xbd, 0xc2, 0x77, 0xef, 0xae, 0x67, 0x63, 0x0f, 0xdb, 0xbb, - 0x81, 0x47, 0x9c, 0xd6, 0x3b, 0x96, 0x5b, 0x7f, 0x89, 0xd5, 0x10, 0x3e, 0x27, 0xeb, 0xcb, 0xac, - 0x31, 0x9f, 0x62, 0xa1, 0xcb, 0x90, 0xf3, 0xb0, 0xe5, 0x53, 0x87, 0x7f, 0x45, 0x9c, 0x16, 0x39, - 0x2f, 0x28, 0x6a, 0xce, 0x0b, 0x0a, 0x7a, 0x13, 0x66, 0x0f, 0xc2, 0x06, 0xf6, 0x1c, 0x1c, 0x60, - 0x9f, 0x2d, 0x21, 0xc7, 0x27, 0x15, 0xfb, 0xbd, 0xf2, 0xea, 0x80, 0x91, 0x58, 0xc9, 0x8c, 0x4a, - 0x47, 0x3b, 0xb0, 0xc8, 0x16, 0xef, 0x85, 0x8e, 0x69, 0x05, 0x5c, 0x02, 0xdb, 0xbc, 0xb1, 0xcb, - 0x0b, 0xcf, 0xf7, 0x69, 0xc3, 0x08, 0x9d, 0xed, 0x88, 0xa5, 0x7a, 0x9e, 0x62, 0xa1, 0x4f, 0x34, - 0x58, 0x0a, 0x3c, 0x8b, 0xe5, 0x90, 0xa9, 0x7e, 0x27, 0x17, 0xaf, 0x5b, 0x9b, 0x3c, 0x3c, 0xc3, - 0xdb, 0x56, 0xdb, 0x13, 0x53, 0x86, 0xbe, 0x9e, 0x57, 0xfa, 0xbd, 0xf2, 0x85, 0x60, 0x88, 0xa9, - 0x78, 0x80, 0x86, 0xb9, 0xc5, 0x0e, 0xac, 0x8d, 0x50, 0xf8, 0x5c, 0x00, 0xc1, 0x06, 0x24, 0xb6, - 0xfa, 0x6d, 0xdc, 0xfd, 0x80, 0x51, 0xef, 0x59, 0xc4, 0x3b, 0x6d, 0x4b, 0xd5, 0x8f, 0x60, 0x21, - 0x9d, 0x57, 0xe8, 0x27, 0x30, 0x85, 0x9d, 0xc0, 0x23, 0x71, 0xd9, 0x58, 0x8b, 0x3e, 0x73, 0xa4, - 0xbc, 0x11, 0x18, 0x21, 0x65, 0x55, 0x8c, 0x90, 0xa4, 0xad, 0xff, 0x68, 0x30, 0xbf, 0xdd, 0x6a, - 0x79, 0xb8, 0xc5, 0xee, 0xd3, 0xe2, 0x81, 0xeb, 0x0e, 0x5f, 0x97, 0xf2, 0xc1, 0x84, 0xa3, 0x49, - 0x71, 0xf4, 0x97, 0x94, 0xe2, 0x6a, 0x92, 0x17, 0x21, 0xdc, 0x86, 0xf6, 0x9a, 0x86, 0xae, 0x02, - 0x0c, 0x20, 0x02, 0xad, 0xca, 0x4c, 0x48, 0x61, 0x46, 0xb1, 0x20, 0xbe, 0xd2, 0x0a, 0xe8, 0xf9, - 0x11, 0x14, 0x94, 0x5c, 0x41, 0x6b, 0x23, 0xb2, 0xa7, 0xb8, 0x3a, 0x54, 0xd9, 0x6f, 0xb1, 0xd5, - 0xa1, 0x4b, 0xcc, 0x24, 0xab, 0xc9, 0x37, 0xa9, 0x83, 0x91, 0xaa, 0x3a, 0x61, 0xa7, 0x7e, 0xff, - 0xeb, 0x7f, 0x96, 0xce, 0xfd, 0xec, 0x49, 0x49, 0xfb, 0xf2, 0x49, 0x49, 0xfb, 0xea, 0x49, 0x49, - 0xfb, 0xc7, 0x93, 0x92, 0xf6, 0xe9, 0xd3, 0xd2, 0xb9, 0xaf, 0x9e, 0x96, 0xce, 0x7d, 0xfd, 0xb4, - 0x74, 0xee, 0xc3, 0x57, 0x94, 0x9f, 0xe4, 0x88, 0x27, 0x55, 0xd7, 0xa3, 0xfb, 0xb8, 0x19, 0xc8, - 0x51, 0xf4, 0xa3, 0x9e, 0x3f, 0x8d, 0x2d, 0x8b, 0xa7, 0x89, 0x7b, 0x82, 0x5d, 0xdb, 0xa1, 0xb5, - 0x6d, 0x97, 0x34, 0x72, 0xdc, 0xb3, 0xd7, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x06, 0x0f, 0x22, - 0x0b, 0x9a, 0x24, 0x00, 0x00, + // 2489 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1c, 0xc7, + 0xd1, 0xd7, 0xf0, 0xb1, 0x5c, 0x16, 0xdf, 0xcd, 0xd7, 0x70, 0x29, 0xef, 0xae, 0xd7, 0xf8, 0x64, + 0xfa, 0x8b, 0xb4, 0xb4, 0x68, 0x39, 0x50, 0x72, 0x88, 0xc1, 0x95, 0x14, 0x67, 0x65, 0xd9, 0x92, + 0x87, 0xb4, 0x81, 0x18, 0x06, 0x46, 0xb3, 0x3b, 0xad, 0x55, 0x93, 0xbb, 0xd3, 0xe3, 0x79, 0x50, + 0x58, 0x9d, 0x02, 0x27, 0x01, 0x82, 0x20, 0x07, 0x1f, 0x02, 0x24, 0x36, 0x10, 0xe4, 0x18, 0x20, + 0x40, 0xfe, 0x85, 0x9c, 0x7d, 0xf4, 0x2d, 0xbe, 0x64, 0x9d, 0x48, 0x97, 0x60, 0x8f, 0x39, 0xe6, + 0x10, 0x04, 0xfd, 0x98, 0xd9, 0x9e, 0xd9, 0x59, 0x92, 0x8e, 0x28, 0x81, 0x87, 0x9c, 0xc8, 0xae, + 0xaa, 0xae, 0xaa, 0xae, 0xee, 0xfe, 0x55, 0x55, 0xcf, 0xc2, 0xb2, 0x7b, 0xd8, 0xda, 0xb6, 0x5c, + 0xb2, 0xfd, 0x49, 0x88, 0x43, 0x5c, 0x75, 0x3d, 0x1a, 0x50, 0x34, 0x6e, 0xb9, 0xa4, 0x50, 0x6a, + 0x51, 0xda, 0x6a, 0xe3, 0x6d, 0x4e, 0x6a, 0x84, 0x0f, 0xb6, 0x03, 0xd2, 0xc1, 0x7e, 0x60, 0x75, + 0x5c, 0x21, 0x55, 0xa8, 0x1c, 0x5e, 0xf7, 0xab, 0x84, 0xf2, 0xd9, 0x4d, 0xea, 0xe1, 0xed, 0xa3, + 0xab, 0xdb, 0x2d, 0xec, 0x60, 0xcf, 0x0a, 0xb0, 0x2d, 0x65, 0xb6, 0x14, 0x19, 0x07, 0x07, 0x8f, + 0xa8, 0x77, 0x48, 0x9c, 0x56, 0x96, 0xe4, 0xb5, 0x81, 0x64, 0xc7, 0x6a, 0x3e, 0x24, 0x0e, 0xf6, + 0xba, 0xdb, 0x91, 0x73, 0x1e, 0xf6, 0x69, 0xe8, 0x35, 0xf1, 0xd0, 0xac, 0x2b, 0x2d, 0x12, 0x3c, + 0x0c, 0x1b, 0xd5, 0x26, 0xed, 0x6c, 0xb7, 0x68, 0x8b, 0x0e, 0xbc, 0x65, 0x23, 0x3e, 0xe0, 0xff, + 0x49, 0xf1, 0xcd, 0xf4, 0x9a, 0x70, 0xc7, 0x0d, 0xba, 0x92, 0xb9, 0x12, 0x59, 0xf3, 0xc3, 0x46, + 0x87, 0x04, 0x82, 0x5a, 0xf9, 0x66, 0x01, 0xc6, 0x6f, 0xd3, 0x06, 0x2a, 0xc3, 0x18, 0xb1, 0x75, + 0xad, 0xac, 0x6d, 0x4d, 0xd7, 0x16, 0xfb, 0xbd, 0xd2, 0x2c, 0xb1, 0x2f, 0xd3, 0x0e, 0x09, 0xb8, + 0x06, 0x63, 0x8c, 0xd8, 0xe8, 0x0d, 0x98, 0x6e, 0xb6, 0x09, 0x76, 0x02, 0x93, 0xd8, 0xfa, 0x1c, + 0x17, 0x5c, 0xeb, 0xf7, 0x4a, 0x48, 0x10, 0xeb, 0xaa, 0x78, 0x3e, 0xa2, 0xa1, 0x6b, 0x00, 0x07, + 0xb4, 0x61, 0xfa, 0x98, 0xcf, 0x1a, 0x1b, 0xcc, 0x3a, 0xa0, 0x8d, 0x3d, 0x9c, 0x9a, 0x15, 0xd1, + 0xd0, 0x6b, 0x30, 0xc9, 0xf7, 0x4b, 0x1f, 0xe7, 0x13, 0x96, 0xfb, 0xbd, 0xd2, 0x02, 0x27, 0x28, + 0xd2, 0x42, 0x02, 0xbd, 0x09, 0xd3, 0x8e, 0xd5, 0xc1, 0xbe, 0x6b, 0x35, 0xb1, 0x3e, 0xc5, 0xc5, + 0xd7, 0xfb, 0xbd, 0xd2, 0x72, 0x4c, 0x54, 0xa6, 0x0c, 0x24, 0x51, 0x0d, 0x72, 0x6d, 0xab, 0x81, + 0xdb, 0xbe, 0x3e, 0x5d, 0x1e, 0xdf, 0x9a, 0xd9, 0x59, 0xa9, 0x5a, 0x2e, 0xa9, 0xde, 0xa6, 0x8d, + 0xea, 0x1d, 0x4e, 0xbe, 0xe5, 0x04, 0x5e, 0xb7, 0xb6, 0xd2, 0xef, 0x95, 0x16, 0x85, 0x9c, 0xa2, + 0x46, 0xce, 0x44, 0x1f, 0xc2, 0x8c, 0xe5, 0x38, 0x34, 0xb0, 0x02, 0x42, 0x1d, 0x5f, 0x07, 0xae, + 0x68, 0x23, 0x56, 0xb4, 0x3b, 0xe0, 0x09, 0x6d, 0x1b, 0xfd, 0x5e, 0x69, 0x55, 0x99, 0xa1, 0xa8, + 0x54, 0x15, 0xa1, 0x23, 0x58, 0xf1, 0xf0, 0x27, 0x21, 0xf1, 0xb0, 0x6d, 0x3a, 0xd4, 0xc6, 0xa6, + 0xf4, 0x74, 0x86, 0x1b, 0x28, 0xc7, 0x06, 0x0c, 0x29, 0xf4, 0x1e, 0xb5, 0xb1, 0xea, 0x75, 0xa5, + 0xdf, 0x2b, 0x5d, 0xf4, 0x86, 0x98, 0x03, 0x73, 0xba, 0x66, 0xa0, 0x61, 0x3e, 0x8b, 0x3a, 0x7d, + 0xe4, 0x60, 0x4f, 0xcf, 0x0f, 0xa2, 0xce, 0x09, 0x6a, 0xd4, 0x39, 0x01, 0x61, 0xd8, 0xe4, 0xe1, + 0x37, 0xf9, 0xd0, 0x7f, 0x48, 0x5c, 0x33, 0xf4, 0xb1, 0x67, 0xb6, 0x3c, 0x1a, 0xba, 0xbe, 0xbe, + 0x50, 0x1e, 0xdf, 0x9a, 0xae, 0x5d, 0xea, 0xf7, 0x4a, 0x15, 0x2e, 0x76, 0x37, 0x92, 0xfa, 0xc0, + 0xc7, 0xde, 0xdb, 0x5c, 0x46, 0xd1, 0xa9, 0x8f, 0x92, 0x41, 0x3f, 0xd3, 0xe0, 0x52, 0x93, 0x76, + 0x5c, 0x0f, 0xfb, 0x3e, 0xb6, 0xcd, 0xe3, 0x4c, 0x2e, 0x97, 0xb5, 0xad, 0xd9, 0xda, 0xeb, 0xfd, + 0x5e, 0xe9, 0xf2, 0x60, 0xc6, 0xfb, 0x27, 0x1b, 0xaf, 0x9c, 0x2c, 0x8d, 0x76, 0x20, 0xef, 0x7a, + 0x84, 0x7a, 0x24, 0xe8, 0xea, 0x13, 0x65, 0x6d, 0x4b, 0x13, 0x47, 0x38, 0xa2, 0xa9, 0x47, 0x38, + 0xa2, 0xa1, 0xbb, 0x90, 0x77, 0xa9, 0x6d, 0xfa, 0x2e, 0x6e, 0xea, 0x93, 0x65, 0x6d, 0x6b, 0x66, + 0x67, 0xb3, 0x2a, 0x20, 0x80, 0xef, 0x1f, 0x03, 0x94, 0xea, 0xd1, 0xd5, 0xea, 0x3d, 0x6a, 0xef, + 0xb9, 0xb8, 0xc9, 0xcf, 0xec, 0x92, 0x2b, 0x06, 0x89, 0x8d, 0x9a, 0x92, 0x44, 0x74, 0x0f, 0xa6, + 0x23, 0x85, 0xbe, 0x3e, 0xcb, 0x8f, 0xc2, 0xb1, 0x1a, 0x85, 0x8b, 0x62, 0xe0, 0x27, 0x5c, 0x94, + 0x34, 0xf4, 0x85, 0x06, 0x65, 0xbf, 0xf9, 0x10, 0xdb, 0x61, 0x9b, 0x38, 0x2d, 0x33, 0x02, 0x21, + 0x53, 0x1e, 0x8d, 0x0e, 0x76, 0x02, 0x5f, 0x5f, 0xe5, 0xbe, 0x6f, 0x65, 0x59, 0x32, 0xe4, 0x04, + 0x43, 0x91, 0xaf, 0x5d, 0xfa, 0xb2, 0x57, 0xba, 0xd0, 0xef, 0x95, 0x8a, 0x03, 0xcd, 0x59, 0x72, + 0xc6, 0x09, 0x7c, 0x54, 0x87, 0xa9, 0xa6, 0x87, 0x19, 0x14, 0xea, 0x39, 0xee, 0x42, 0xa1, 0x2a, + 0xc0, 0xad, 0x1a, 0x81, 0x5b, 0x75, 0x3f, 0x02, 0xec, 0xda, 0xb2, 0x34, 0x1a, 0x4d, 0xf9, 0xec, + 0x9b, 0x92, 0x66, 0x44, 0x03, 0x74, 0x03, 0xa6, 0x88, 0xd3, 0x62, 0x7b, 0xac, 0xcf, 0xf3, 0xb8, + 0x21, 0xbe, 0x8c, 0xba, 0xa0, 0xdd, 0xa0, 0xce, 0x03, 0xd2, 0xaa, 0xad, 0xb2, 0x0d, 0x90, 0x62, + 0x4a, 0xb4, 0xa2, 0x99, 0xe8, 0x87, 0x90, 0xf7, 0xb1, 0x77, 0x44, 0x9a, 0xd8, 0xd7, 0x17, 0x15, + 0x2d, 0x7b, 0x82, 0x28, 0xb5, 0xf0, 0xa0, 0x47, 0x72, 0x6a, 0xd0, 0x23, 0x1a, 0xfa, 0x18, 0x66, + 0x0e, 0xaf, 0xfb, 0x66, 0xe4, 0xd0, 0x12, 0x57, 0xf5, 0xb2, 0x1a, 0xde, 0x41, 0x1e, 0x61, 0x41, + 0x96, 0x5e, 0xd6, 0xf4, 0x7e, 0xaf, 0xb4, 0x72, 0x78, 0xdd, 0xaf, 0x0f, 0xb9, 0x08, 0x03, 0x2a, + 0x83, 0x24, 0xa6, 0x5d, 0x5a, 0xd3, 0xd1, 0xe8, 0x63, 0x22, 0xfd, 0x8e, 0xf5, 0xca, 0x71, 0x4a, + 0xaf, 0xa4, 0x32, 0x94, 0x95, 0xfb, 0x85, 0x3d, 0x7d, 0x65, 0x80, 0xb2, 0x31, 0x51, 0x45, 0xd9, + 0x98, 0x88, 0xea, 0xb0, 0x24, 0xee, 0x6c, 0x10, 0xb4, 0x4d, 0x1f, 0x37, 0xa9, 0x63, 0xfb, 0xfa, + 0x5a, 0x59, 0xdb, 0x1a, 0xaf, 0xbd, 0xd4, 0xef, 0x95, 0x36, 0x38, 0x73, 0x3f, 0x68, 0xef, 0x09, + 0x96, 0xa2, 0x64, 0x21, 0xc5, 0x2a, 0x58, 0x30, 0xa3, 0x60, 0x1c, 0x7a, 0x05, 0xc6, 0x0f, 0x71, + 0x57, 0xe6, 0xab, 0xa5, 0x7e, 0xaf, 0x34, 0x77, 0x88, 0xd5, 0x8b, 0xc8, 0xb8, 0x0c, 0xd0, 0x8e, + 0xac, 0x76, 0x88, 0x65, 0xde, 0xe1, 0x80, 0xc6, 0x09, 0x2a, 0xa0, 0x71, 0xc2, 0xf7, 0xc7, 0xae, + 0x6b, 0x85, 0x07, 0xb0, 0x98, 0xc6, 0xec, 0xe7, 0x62, 0xa7, 0x03, 0xeb, 0x23, 0xa0, 0xfb, 0x79, + 0x98, 0xab, 0xfc, 0x35, 0x07, 0xab, 0x7b, 0x81, 0x87, 0xad, 0x0e, 0x71, 0x5a, 0x77, 0xb0, 0xe5, + 0xf3, 0x8b, 0x86, 0xfd, 0x00, 0x7d, 0x17, 0xa0, 0xd9, 0x0e, 0xfd, 0x00, 0x7b, 0x66, 0x9c, 0xfb, + 0xf9, 0xb6, 0x4a, 0x6a, 0x22, 0x3b, 0x4f, 0xc7, 0x44, 0x74, 0x09, 0x26, 0x5c, 0x4a, 0xdb, 0xd2, + 0x3e, 0xea, 0xf7, 0x4a, 0xf3, 0x6c, 0xac, 0x08, 0x73, 0x3e, 0xfa, 0x08, 0xa6, 0x23, 0x50, 0xf1, + 0xf5, 0x71, 0x7e, 0x16, 0x5f, 0x13, 0x97, 0x26, 0xcb, 0x9d, 0x18, 0x4f, 0x64, 0x1a, 0x5b, 0x92, + 0x97, 0x7a, 0xa0, 0xc3, 0x18, 0xfc, 0x8b, 0x08, 0xac, 0x46, 0xbe, 0xb7, 0x99, 0x12, 0xdb, 0xf4, + 0xb0, 0x4b, 0xbd, 0x80, 0x03, 0xf4, 0xcc, 0x8e, 0xce, 0xed, 0xdc, 0x10, 0x12, 0xdc, 0x8a, 0x6d, + 0x70, 0x7e, 0x6d, 0x53, 0xaa, 0x8d, 0x16, 0xa9, 0x32, 0x8d, 0x2c, 0x22, 0x72, 0x61, 0xb1, 0x43, + 0x1c, 0xd2, 0x09, 0x3b, 0x26, 0xaf, 0x65, 0xc8, 0x63, 0xac, 0x4f, 0xf2, 0xd5, 0x54, 0x8f, 0x59, + 0xcd, 0xbb, 0x62, 0xca, 0x6d, 0xda, 0xd8, 0x23, 0x8f, 0xb1, 0x58, 0xd2, 0x9a, 0xb4, 0x3d, 0xdf, + 0x49, 0x30, 0x8d, 0xd4, 0x18, 0xed, 0xc0, 0x24, 0x4b, 0xfc, 0xbe, 0x9e, 0xe3, 0x66, 0xe6, 0xb8, + 0x19, 0x76, 0x56, 0xea, 0xce, 0x03, 0x5a, 0x9b, 0x93, 0x5a, 0x84, 0x8c, 0x21, 0xfe, 0xa0, 0x9b, + 0x30, 0x6f, 0xe0, 0x26, 0x26, 0x47, 0xd8, 0xbe, 0x4d, 0x1b, 0x75, 0xdb, 0xd7, 0xa7, 0x78, 0x16, + 0xbe, 0xd8, 0xef, 0x95, 0xf4, 0x24, 0x47, 0xd9, 0xa8, 0xd4, 0x9c, 0xc2, 0xaf, 0x35, 0xa6, 0x46, + 0xdd, 0x87, 0xd3, 0x9d, 0xc9, 0x1f, 0xab, 0x67, 0x92, 0x05, 0x66, 0x00, 0x39, 0x71, 0xb9, 0x5b, + 0x75, 0x0f, 0x5b, 0x7c, 0x25, 0xd1, 0x2e, 0x56, 0xdf, 0x0f, 0x2d, 0x27, 0x20, 0x41, 0xf7, 0xc4, + 0x2b, 0xf3, 0xb9, 0x06, 0xcb, 0x19, 0x01, 0x3d, 0x0f, 0xbe, 0x55, 0xfe, 0xb2, 0x04, 0xf9, 0x68, + 0x6f, 0xd8, 0xd5, 0x60, 0x45, 0xa6, 0xf4, 0x88, 0x5f, 0x0d, 0x36, 0x56, 0xaf, 0x06, 0x1b, 0xa3, + 0x5d, 0xc8, 0x05, 0x16, 0x61, 0x09, 0x76, 0x4c, 0x96, 0x8d, 0x19, 0x18, 0xbd, 0xcf, 0x24, 0x6a, + 0xf3, 0x72, 0xbb, 0xe5, 0x04, 0x43, 0xfe, 0x45, 0x6f, 0xc7, 0x25, 0xec, 0xb8, 0x52, 0x79, 0x46, + 0x9e, 0x7c, 0x8b, 0x3a, 0xf6, 0x31, 0xac, 0x5a, 0xed, 0x36, 0x6d, 0x5a, 0x81, 0xd5, 0x68, 0x63, + 0x73, 0x70, 0x65, 0x27, 0xb8, 0xde, 0x57, 0x93, 0x7a, 0x77, 0x07, 0xa2, 0xa9, 0x0b, 0x7b, 0x51, + 0x3a, 0xba, 0x62, 0x65, 0x88, 0x18, 0x99, 0x54, 0xe4, 0xc1, 0xb2, 0x75, 0x64, 0x91, 0x76, 0xca, + 0xb2, 0xb8, 0x5e, 0xff, 0x97, 0xb2, 0x1c, 0x09, 0xa6, 0xec, 0x16, 0xa4, 0x5d, 0x64, 0x0d, 0x09, + 0x18, 0x19, 0x34, 0xd4, 0x80, 0x85, 0x80, 0x06, 0x56, 0x5b, 0xb1, 0x97, 0x93, 0x69, 0x38, 0x61, + 0x6f, 0x9f, 0x09, 0xa5, 0x6c, 0xc5, 0x37, 0x38, 0x48, 0x30, 0x8d, 0xd4, 0x98, 0xaf, 0x4b, 0xac, + 0x97, 0x23, 0x53, 0x64, 0x67, 0x2a, 0x73, 0x5d, 0x91, 0xe0, 0xc8, 0x75, 0x0d, 0x09, 0x18, 0x19, + 0x34, 0x74, 0x1f, 0x16, 0xbd, 0xd0, 0x31, 0x89, 0xed, 0x9b, 0x8d, 0xae, 0xe9, 0x07, 0x56, 0x80, + 0xf5, 0xbc, 0xd2, 0x33, 0xc4, 0x06, 0x8d, 0xd0, 0xa9, 0xdb, 0x7e, 0xad, 0xbb, 0xc7, 0x44, 0x84, + 0xad, 0x55, 0x69, 0x6b, 0xce, 0x53, 0x79, 0x46, 0x72, 0x88, 0x7e, 0xab, 0x41, 0xd1, 0xa1, 0x8e, + 0x69, 0x79, 0x1d, 0xcb, 0xb6, 0xcc, 0xac, 0x15, 0x4e, 0x2b, 0xc0, 0x18, 0x1b, 0x7c, 0x8f, 0x3a, + 0xbb, 0x7c, 0xca, 0xa8, 0xa5, 0xbe, 0x22, 0xcd, 0x6f, 0x3a, 0xa3, 0x25, 0x8d, 0xe3, 0x98, 0x68, + 0x17, 0xe6, 0x42, 0x47, 0x56, 0x1e, 0x6c, 0xbb, 0x75, 0x28, 0x6b, 0x5b, 0xf9, 0xda, 0x66, 0xbf, + 0x57, 0x5a, 0x4f, 0x30, 0x94, 0x0b, 0x90, 0x9c, 0x81, 0x3e, 0xd5, 0x60, 0x3d, 0x2e, 0x82, 0x43, + 0xdf, 0x6a, 0x61, 0x16, 0x47, 0xd1, 0x88, 0xce, 0x64, 0x5d, 0x85, 0xc8, 0xfa, 0x07, 0x4c, 0xb6, + 0xd6, 0xe5, 0xfd, 0xc3, 0xa0, 0x05, 0x2b, 0x7a, 0x19, 0x6c, 0xc5, 0xfa, 0x4a, 0x16, 0x9f, 0x75, + 0xd9, 0xbc, 0xe7, 0x0b, 0xba, 0x2e, 0xd6, 0x67, 0x07, 0xfd, 0x32, 0x23, 0xee, 0x77, 0x5d, 0x55, + 0x41, 0x3e, 0xa2, 0xbd, 0x88, 0xe2, 0xe8, 0xf7, 0x1a, 0x6c, 0x8c, 0xbc, 0xfa, 0xe7, 0x22, 0x47, + 0xfc, 0x4e, 0x83, 0xf5, 0x11, 0x10, 0x71, 0x6e, 0x72, 0x58, 0x06, 0xa4, 0x9c, 0x0b, 0xdf, 0x7e, + 0xca, 0x62, 0x97, 0x7d, 0x37, 0x55, 0xff, 0x26, 0x47, 0xfa, 0xf7, 0x56, 0xd2, 0x3f, 0xf1, 0x9c, + 0x72, 0x83, 0x76, 0xdc, 0x30, 0x88, 0xf7, 0xe2, 0x44, 0x2f, 0x1e, 0x01, 0x1a, 0x86, 0xa6, 0xd3, + 0xc5, 0xe7, 0xba, 0x6a, 0x7f, 0x5e, 0x56, 0x4c, 0xac, 0x54, 0x60, 0x7a, 0x4e, 0x34, 0xfc, 0x2b, + 0x0d, 0xca, 0x27, 0x61, 0xd4, 0x0b, 0x8c, 0xc3, 0xcf, 0x35, 0xd8, 0x18, 0x89, 0x2d, 0xa7, 0x8b, + 0xc7, 0x59, 0xf8, 0x51, 0xf9, 0xcd, 0x84, 0xa8, 0x6c, 0x18, 0xc6, 0x28, 0x15, 0x8b, 0xf6, 0xec, + 0x15, 0xcb, 0x58, 0xaa, 0x62, 0x61, 0x16, 0xce, 0xa2, 0x62, 0x19, 0x4f, 0xc1, 0x34, 0xd7, 0x7b, + 0xa6, 0x15, 0xcb, 0xff, 0xb0, 0x96, 0x9d, 0x8c, 0x3f, 0x4d, 0xc0, 0xa6, 0x6c, 0xae, 0xf6, 0xe2, + 0x77, 0x1c, 0x96, 0x13, 0x65, 0xcb, 0xf4, 0xac, 0x9d, 0xe5, 0xd4, 0x09, 0x9d, 0xe5, 0x1e, 0xcc, + 0x88, 0x76, 0xcf, 0x0c, 0x48, 0x27, 0x5a, 0xe4, 0x71, 0x2f, 0x44, 0x51, 0xdd, 0x06, 0x62, 0x1a, + 0x63, 0xf0, 0x47, 0x22, 0x65, 0x8c, 0x6e, 0x01, 0xc4, 0xa9, 0x37, 0x2a, 0x41, 0xe7, 0x12, 0x47, + 0x49, 0x3e, 0x2d, 0xcb, 0x91, 0x9f, 0x78, 0x5a, 0x8e, 0x88, 0xe8, 0x28, 0xa3, 0x5d, 0x14, 0xf5, + 0xe5, 0x35, 0xb5, 0x29, 0xcd, 0x8a, 0xdb, 0xb3, 0x34, 0x8d, 0xe7, 0xba, 0x47, 0xfa, 0xe7, 0x04, + 0x2c, 0x71, 0x08, 0x4b, 0x34, 0xd6, 0xa7, 0x6d, 0x96, 0x28, 0x2c, 0xc6, 0x57, 0x5c, 0x76, 0xfb, + 0x12, 0x41, 0xbe, 0xc3, 0xfd, 0x19, 0xd2, 0x3c, 0x78, 0x4a, 0x10, 0x54, 0x11, 0xc8, 0x75, 0x19, + 0xc8, 0x05, 0x2f, 0xc9, 0x35, 0xd2, 0x04, 0xf4, 0xb9, 0x06, 0x17, 0xd3, 0x16, 0x59, 0x2d, 0x18, + 0xbf, 0x02, 0x0b, 0x9c, 0x79, 0xf3, 0x74, 0xd6, 0x6b, 0xdd, 0x7b, 0x72, 0x9e, 0xf0, 0xe3, 0x65, + 0xe9, 0xc7, 0x86, 0x37, 0x4a, 0xce, 0x18, 0xcd, 0x2a, 0x7c, 0xa1, 0xc1, 0x4a, 0xd6, 0xf2, 0xce, + 0x45, 0x1d, 0xf1, 0x4b, 0x0d, 0x8a, 0xc7, 0xaf, 0xfe, 0xc5, 0xa5, 0xd1, 0xca, 0x3f, 0x34, 0x58, + 0xce, 0x78, 0x01, 0xfa, 0xaf, 0xc1, 0xe9, 0xb9, 0x80, 0xce, 0x4d, 0xc8, 0xf1, 0x0e, 0x23, 0xca, + 0x5d, 0x6b, 0xd9, 0x67, 0x4a, 0x24, 0x44, 0x21, 0xa9, 0x26, 0x44, 0x41, 0xa9, 0xfc, 0x5b, 0x83, + 0x85, 0x54, 0x78, 0xd0, 0xbe, 0xfa, 0xfa, 0x26, 0x72, 0xf6, 0x2b, 0x59, 0x71, 0xfc, 0x56, 0xef, + 0x6e, 0xe7, 0xf4, 0x81, 0xa8, 0xf2, 0x67, 0x0d, 0x66, 0xe3, 0xc7, 0x54, 0xe2, 0xb4, 0xd0, 0x3b, + 0xa9, 0xd7, 0x91, 0x97, 0x62, 0x20, 0x8f, 0x44, 0x4e, 0x5f, 0x6f, 0xbc, 0x80, 0x9c, 0x5f, 0xf9, + 0x1e, 0xe4, 0x6f, 0xd3, 0x06, 0xdf, 0x72, 0x74, 0x05, 0xc6, 0x0f, 0x68, 0x43, 0xee, 0x59, 0x3e, + 0x2a, 0x65, 0x85, 0xa5, 0x03, 0xda, 0x50, 0x2d, 0x1d, 0xd0, 0x46, 0xe5, 0x0f, 0x1a, 0x2c, 0xc5, + 0x6f, 0x90, 0xc3, 0x4a, 0xb4, 0xd3, 0x28, 0x41, 0xdb, 0x30, 0xe5, 0xf0, 0xc4, 0xe1, 0x73, 0x87, + 0xe7, 0xc4, 0x07, 0x11, 0x49, 0x52, 0x3f, 0x88, 0x48, 0x12, 0xda, 0x81, 0xbc, 0x13, 0x76, 0x76, + 0x9b, 0x87, 0xd8, 0xe6, 0x9f, 0x69, 0xe7, 0x64, 0x9f, 0x2a, 0x69, 0x89, 0x3e, 0x55, 0xd2, 0x2a, + 0x57, 0x20, 0x57, 0xb7, 0xef, 0x10, 0x3f, 0x60, 0x21, 0x24, 0xb6, 0x38, 0x96, 0x32, 0x84, 0x24, + 0xf1, 0x2e, 0xc9, 0xb8, 0x15, 0x17, 0x96, 0x0c, 0xec, 0xe0, 0x47, 0x67, 0xf2, 0x68, 0x2d, 0x2d, + 0x8e, 0x1d, 0x6b, 0xf1, 0x17, 0x93, 0x80, 0x0c, 0x1c, 0x84, 0x9e, 0x73, 0x26, 0x36, 0xff, 0x1f, + 0x72, 0xac, 0x04, 0x88, 0xbf, 0x7c, 0xf3, 0x43, 0x70, 0x40, 0x1b, 0x09, 0xf9, 0x49, 0x4e, 0x40, + 0xf7, 0x61, 0xc9, 0x3a, 0xa2, 0x24, 0xf9, 0xc9, 0x57, 0x3c, 0x66, 0xaf, 0xf2, 0xdd, 0xbb, 0xeb, + 0xd9, 0xd8, 0xc3, 0xf6, 0x5e, 0xe0, 0x11, 0xa7, 0xf5, 0xae, 0xe5, 0x8a, 0x4f, 0x28, 0x7c, 0x4e, + 0xd6, 0x47, 0x5e, 0x63, 0x21, 0xc5, 0x42, 0x97, 0x21, 0xe7, 0x61, 0xcb, 0xa7, 0x0e, 0xff, 0x20, + 0x39, 0x2d, 0xce, 0xbc, 0xa0, 0xa8, 0x67, 0x5e, 0x50, 0xd0, 0x5b, 0x30, 0x77, 0x18, 0x36, 0xb0, + 0xe7, 0xe0, 0x00, 0xfb, 0x6c, 0x09, 0x39, 0x3e, 0xa9, 0xd0, 0xef, 0x95, 0xd6, 0x06, 0x8c, 0xc4, + 0x4a, 0x66, 0x55, 0x3a, 0xaa, 0xc3, 0x12, 0x5b, 0xbc, 0x17, 0x3a, 0xa6, 0x15, 0x70, 0x09, 0x6c, + 0xf3, 0xc2, 0x2e, 0x2f, 0x3c, 0x3f, 0xa0, 0x0d, 0x23, 0x74, 0x76, 0x23, 0x96, 0xea, 0x79, 0x8a, + 0x85, 0x3e, 0xd5, 0x60, 0x39, 0xf0, 0x2c, 0x76, 0x86, 0x4c, 0xf5, 0x93, 0xbb, 0x78, 0xdd, 0xda, + 0xe6, 0xe1, 0x19, 0xde, 0xb6, 0xea, 0xbe, 0x98, 0x32, 0xf4, 0x21, 0xbe, 0xdc, 0xef, 0x95, 0x2e, + 0x06, 0x43, 0x4c, 0xc5, 0x03, 0x34, 0xcc, 0x2d, 0x74, 0x60, 0x7d, 0x84, 0xc2, 0xe7, 0x02, 0x08, + 0x36, 0x20, 0xb1, 0xd5, 0xef, 0xe0, 0xee, 0x87, 0x8c, 0x7a, 0xcf, 0x22, 0xde, 0x59, 0x5b, 0xaa, + 0x7c, 0x0c, 0x8b, 0xe9, 0x73, 0x85, 0x7e, 0x04, 0x53, 0xd8, 0x09, 0x3c, 0x12, 0xa7, 0x8d, 0xf5, + 0xe8, 0x33, 0x47, 0xca, 0x1b, 0x81, 0x11, 0x52, 0x56, 0xc5, 0x08, 0x49, 0xda, 0xf9, 0x97, 0x06, + 0x0b, 0xbb, 0xad, 0x96, 0x87, 0x5b, 0xac, 0x9f, 0x16, 0x0f, 0x5c, 0x77, 0xf8, 0xba, 0x94, 0x0f, + 0x26, 0x1c, 0x4d, 0x0a, 0xa3, 0xbf, 0xa4, 0x14, 0xd6, 0x92, 0xbc, 0x08, 0xe1, 0xb6, 0xb4, 0xd7, + 0x35, 0x74, 0x15, 0x60, 0x00, 0x11, 0x68, 0x4d, 0x9e, 0x84, 0x14, 0x66, 0x14, 0x66, 0xc4, 0x07, + 0x5f, 0x01, 0x3d, 0x3f, 0x80, 0x19, 0xe5, 0xac, 0xa0, 0xf5, 0x11, 0xa7, 0xa7, 0xb0, 0x36, 0x94, + 0xd9, 0x6f, 0xb1, 0xd5, 0xa1, 0x4b, 0xcc, 0x24, 0xcb, 0xc9, 0x37, 0xa9, 0x83, 0x91, 0xaa, 0x3a, + 0x61, 0xa7, 0x76, 0xff, 0xeb, 0xbf, 0x17, 0x2f, 0xfc, 0xe4, 0x49, 0x51, 0xfb, 0xf2, 0x49, 0x51, + 0xfb, 0xea, 0x49, 0x51, 0xfb, 0xdb, 0x93, 0xa2, 0xf6, 0xd9, 0xd3, 0xe2, 0x85, 0xaf, 0x9e, 0x16, + 0x2f, 0x7c, 0xfd, 0xb4, 0x78, 0xe1, 0xa3, 0x57, 0x95, 0x5f, 0xf7, 0x88, 0x27, 0x55, 0xd7, 0xa3, + 0x07, 0xb8, 0x19, 0xc8, 0x51, 0xf4, 0xfb, 0xa0, 0x3f, 0x8e, 0xad, 0x88, 0xa7, 0x89, 0x7b, 0x82, + 0x5d, 0xad, 0xd3, 0xea, 0xae, 0x4b, 0x1a, 0x39, 0xee, 0xd9, 0x1b, 0xff, 0x09, 0x00, 0x00, 0xff, + 0xff, 0xdc, 0x5e, 0xd7, 0xc8, 0xe5, 0x24, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1689,6 +1700,13 @@ func (m *Job) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.QueueTtlSeconds != 0 { + i = encodeVarintQueue(dAtA, i, uint64(m.QueueTtlSeconds)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } { size, err := m.SchedulingResourceRequirements.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -3108,6 +3126,9 @@ func (m *Job) Size() (n int) { } l = m.SchedulingResourceRequirements.Size() n += 2 + l + sovQueue(uint64(l)) + if m.QueueTtlSeconds != 0 { + n += 2 + sovQueue(uint64(m.QueueTtlSeconds)) + } return n } @@ -3640,6 +3661,7 @@ func (this *Job) String() string { `CompressedQueueOwnershipUserGroups:` + fmt.Sprintf("%v", this.CompressedQueueOwnershipUserGroups) + `,`, `Scheduler:` + fmt.Sprintf("%v", this.Scheduler) + `,`, `SchedulingResourceRequirements:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SchedulingResourceRequirements), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `QueueTtlSeconds:` + fmt.Sprintf("%v", this.QueueTtlSeconds) + `,`, `}`, }, "") return s @@ -5037,6 +5059,25 @@ func (m *Job) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueTtlSeconds", wireType) + } + m.QueueTtlSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueue + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueTtlSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipQueue(dAtA[iNdEx:]) diff --git a/pkg/api/queue.proto b/pkg/api/queue.proto index 3fe9d3f0428..c2a0e684d8f 100644 --- a/pkg/api/queue.proto +++ b/pkg/api/queue.proto @@ -55,6 +55,8 @@ message Job { // Indicates which scheduler should manage this job. // If empty, the default scheduler is used. string scheduler = 20; + // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. + int64 queue_ttl_seconds = 22; } // For the bidirectional streaming job lease request service. diff --git a/pkg/api/submit.pb.go b/pkg/api/submit.pb.go index ae36a9a2837..c3aa832f664 100644 --- a/pkg/api/submit.pb.go +++ b/pkg/api/submit.pb.go @@ -135,6 +135,8 @@ type JobSubmitRequestItem struct { // Indicates which scheduler should manage this job. // If empty, the default scheduler is used. Scheduler string `protobuf:"bytes,11,opt,name=scheduler,proto3" json:"scheduler,omitempty"` + // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. + QueueTtlSeconds int64 `protobuf:"varint,12,opt,name=queue_ttl_seconds,json=queueTtlSeconds,proto3" json:"queueTtlSeconds,omitempty"` } func (m *JobSubmitRequestItem) Reset() { *m = JobSubmitRequestItem{} } @@ -248,6 +250,13 @@ func (m *JobSubmitRequestItem) GetScheduler() string { return "" } +func (m *JobSubmitRequestItem) GetQueueTtlSeconds() int64 { + if m != nil { + return m.QueueTtlSeconds + } + return 0 +} + type IngressConfig struct { Type IngressType `protobuf:"varint,1,opt,name=type,proto3,enum=api.IngressType" json:"type,omitempty"` // Deprecated: Do not use. Ports []uint32 `protobuf:"varint,2,rep,packed,name=ports,proto3" json:"ports,omitempty"` @@ -1583,143 +1592,145 @@ func init() { func init() { proto.RegisterFile("pkg/api/submit.proto", fileDescriptor_e998bacb27df16c1) } var fileDescriptor_e998bacb27df16c1 = []byte{ - // 2171 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0x1b, 0xd7, - 0x11, 0xd7, 0x92, 0x12, 0x45, 0xce, 0xea, 0x83, 0x7a, 0xfa, 0x5a, 0xad, 0x65, 0x52, 0xdd, 0x34, - 0xad, 0x2c, 0x24, 0x24, 0xac, 0x34, 0x80, 0xed, 0x06, 0x08, 0x4c, 0x89, 0x72, 0xe4, 0x18, 0xb4, - 0x2c, 0x59, 0x4d, 0xd2, 0x43, 0x99, 0x25, 0xf7, 0x89, 0x5a, 0x89, 0xdc, 0x5d, 0xef, 0x87, 0x0c, - 0xb5, 0x08, 0x10, 0xf4, 0x52, 0xf4, 0x56, 0xa0, 0xc7, 0x1e, 0xda, 0x73, 0xfa, 0x8f, 0xf4, 0x18, - 0xa0, 0x97, 0xf4, 0x42, 0xb4, 0x76, 0x3f, 0x00, 0xde, 0x7a, 0xef, 0xa1, 0x78, 0xf3, 0x76, 0xb9, - 0x6f, 0x49, 0xea, 0xcb, 0x80, 0xdb, 0x9b, 0xde, 0xef, 0xcd, 0xfc, 0x66, 0xe6, 0xbd, 0x79, 0x33, - 0xb3, 0x22, 0x2c, 0x38, 0xa7, 0xad, 0xb2, 0xee, 0x98, 0x65, 0x2f, 0x68, 0x74, 0x4c, 0xbf, 0xe4, - 0xb8, 0xb6, 0x6f, 0x93, 0xb4, 0xee, 0x98, 0xea, 0xad, 0x96, 0x6d, 0xb7, 0xda, 0xb4, 0x8c, 0x50, - 0x23, 0x38, 0x2a, 0xd3, 0x8e, 0xe3, 0x9f, 0x73, 0x09, 0x55, 0x3b, 0xbd, 0xe7, 0x95, 0x4c, 0x1b, - 0x55, 0x9b, 0xb6, 0x4b, 0xcb, 0x67, 0x77, 0xcb, 0x2d, 0x6a, 0x51, 0x57, 0xf7, 0xa9, 0x11, 0xca, - 0xac, 0x86, 0x04, 0x4c, 0x46, 0xb7, 0x2c, 0xdb, 0xd7, 0x7d, 0xd3, 0xb6, 0xbc, 0x70, 0xf7, 0xfd, - 0x96, 0xe9, 0x1f, 0x07, 0x8d, 0x52, 0xd3, 0xee, 0x94, 0x5b, 0x76, 0xcb, 0x8e, 0xed, 0xb0, 0x15, - 0x2e, 0xf0, 0xaf, 0x50, 0xbc, 0xef, 0xe8, 0x31, 0xd5, 0xdb, 0xfe, 0x31, 0x47, 0xb5, 0xdf, 0xe7, - 0x60, 0xe1, 0xb1, 0xdd, 0x38, 0x40, 0xe7, 0xf7, 0xe9, 0x8b, 0x80, 0x7a, 0xfe, 0xae, 0x4f, 0x3b, - 0x64, 0x13, 0xb2, 0x8e, 0x6b, 0xda, 0xae, 0xe9, 0x9f, 0x2b, 0xd2, 0x9a, 0xb4, 0x2e, 0x55, 0x96, - 0x7a, 0xdd, 0x22, 0x89, 0xb0, 0xf7, 0xec, 0x8e, 0xe9, 0x63, 0x3c, 0xfb, 0x7d, 0x39, 0xf2, 0x21, - 0xe4, 0x2c, 0xbd, 0x43, 0x3d, 0x47, 0x6f, 0x52, 0x25, 0xbd, 0x26, 0xad, 0xe7, 0x2a, 0xcb, 0xbd, - 0x6e, 0x71, 0xbe, 0x0f, 0x0a, 0x5a, 0xb1, 0x24, 0xf9, 0x00, 0x72, 0xcd, 0xb6, 0x49, 0x2d, 0xbf, - 0x6e, 0x1a, 0x4a, 0x16, 0xd5, 0xd0, 0x16, 0x07, 0x77, 0x0d, 0xd1, 0x56, 0x84, 0x91, 0x03, 0xc8, - 0xb4, 0xf5, 0x06, 0x6d, 0x7b, 0xca, 0xf8, 0x5a, 0x7a, 0x5d, 0xde, 0x7c, 0xb7, 0xa4, 0x3b, 0x66, - 0x69, 0x54, 0x28, 0xa5, 0x27, 0x28, 0x57, 0xb5, 0x7c, 0xf7, 0xbc, 0xb2, 0xd0, 0xeb, 0x16, 0xf3, - 0x5c, 0x51, 0xa0, 0x0d, 0xa9, 0x48, 0x0b, 0x64, 0xe1, 0x9c, 0x95, 0x09, 0x64, 0xde, 0xb8, 0x98, - 0xf9, 0x61, 0x2c, 0xcc, 0xe9, 0x57, 0x7a, 0xdd, 0xe2, 0xa2, 0x40, 0x21, 0xd8, 0x10, 0x99, 0xc9, - 0xaf, 0x24, 0x58, 0x70, 0xe9, 0x8b, 0xc0, 0x74, 0xa9, 0x51, 0xb7, 0x6c, 0x83, 0xd6, 0xc3, 0x60, - 0x32, 0x68, 0xf2, 0xee, 0xc5, 0x26, 0xf7, 0x43, 0xad, 0x9a, 0x6d, 0x50, 0x31, 0x30, 0xad, 0xd7, - 0x2d, 0xae, 0xba, 0x43, 0x9b, 0xb1, 0x03, 0x8a, 0xb4, 0x4f, 0x86, 0xf7, 0xc9, 0x53, 0xc8, 0x3a, - 0xb6, 0x51, 0xf7, 0x1c, 0xda, 0x54, 0x52, 0x6b, 0xd2, 0xba, 0xbc, 0x79, 0xab, 0xc4, 0x53, 0x13, - 0x7d, 0x60, 0xa9, 0x59, 0x3a, 0xbb, 0x5b, 0xda, 0xb3, 0x8d, 0x03, 0x87, 0x36, 0xf1, 0x3e, 0xe7, - 0x1c, 0xbe, 0x48, 0x70, 0x4f, 0x86, 0x20, 0xd9, 0x83, 0x5c, 0x44, 0xe8, 0x29, 0x93, 0x18, 0xce, - 0xa5, 0x8c, 0x3c, 0xad, 0xf8, 0xc2, 0x4b, 0xa4, 0x55, 0x88, 0x91, 0x2d, 0x98, 0x34, 0xad, 0x96, - 0x4b, 0x3d, 0x4f, 0xc9, 0x21, 0x1f, 0x41, 0xa2, 0x5d, 0x8e, 0x6d, 0xd9, 0xd6, 0x91, 0xd9, 0xaa, - 0x2c, 0x32, 0xc7, 0x42, 0x31, 0x81, 0x25, 0xd2, 0x24, 0x3b, 0x90, 0xf5, 0xa8, 0x7b, 0x66, 0x36, - 0xa9, 0xa7, 0x80, 0xc0, 0x72, 0xc0, 0xc1, 0x90, 0x05, 0x9d, 0x89, 0xe4, 0x44, 0x67, 0x22, 0x8c, - 0xe5, 0xb8, 0xd7, 0x3c, 0xa6, 0x46, 0xd0, 0xa6, 0xae, 0x22, 0xc7, 0x39, 0xde, 0x07, 0xc5, 0x1c, - 0xef, 0x83, 0xaa, 0x0e, 0xb2, 0x70, 0x5b, 0xe4, 0x1d, 0x48, 0x9f, 0x52, 0xfe, 0xb0, 0x72, 0x95, - 0xb9, 0x5e, 0xb7, 0x38, 0x7d, 0x4a, 0xc5, 0x37, 0xc5, 0x76, 0xc9, 0x1d, 0x98, 0x38, 0xd3, 0xdb, - 0x01, 0xc5, 0x7b, 0xc9, 0x55, 0xe6, 0x7b, 0xdd, 0xe2, 0x2c, 0x02, 0x82, 0x20, 0x97, 0x78, 0x90, - 0xba, 0x27, 0xa9, 0x47, 0x90, 0x1f, 0xcc, 0xc7, 0xb7, 0x62, 0xa7, 0x03, 0xcb, 0x17, 0x24, 0xe1, - 0xdb, 0x30, 0xa7, 0xfd, 0x3b, 0x0d, 0xd3, 0x89, 0xab, 0x26, 0x0f, 0x60, 0xdc, 0x3f, 0x77, 0x28, - 0x9a, 0x99, 0xd9, 0xcc, 0x8b, 0xc9, 0xf0, 0xfc, 0xdc, 0xa1, 0xf8, 0xc6, 0x67, 0x98, 0x44, 0x22, - 0x41, 0x51, 0x87, 0x19, 0x77, 0x6c, 0xd7, 0xf7, 0x94, 0xd4, 0x5a, 0x7a, 0x7d, 0x9a, 0x1b, 0x47, - 0x40, 0x34, 0x8e, 0x00, 0xf9, 0x32, 0x59, 0x0c, 0xd2, 0x98, 0x34, 0xef, 0x0c, 0xa7, 0xde, 0x9b, - 0x57, 0x81, 0xfb, 0x20, 0xfb, 0x6d, 0xaf, 0x4e, 0x2d, 0xbd, 0xd1, 0xa6, 0x86, 0x32, 0xbe, 0x26, - 0xad, 0x67, 0x2b, 0x4a, 0xaf, 0x5b, 0x5c, 0xf0, 0xd9, 0x89, 0x22, 0x2a, 0xe8, 0x42, 0x8c, 0x62, - 0xcd, 0xa4, 0xae, 0x5f, 0x67, 0x55, 0x54, 0x99, 0x10, 0x6a, 0x26, 0x75, 0xfd, 0x9a, 0xde, 0xa1, - 0x89, 0x9a, 0x19, 0x62, 0xe4, 0x63, 0x98, 0x0e, 0x3c, 0x5a, 0x6f, 0xb6, 0x03, 0xcf, 0xa7, 0xee, - 0xee, 0x9e, 0x92, 0x41, 0x8b, 0x6a, 0xaf, 0x5b, 0x5c, 0x0a, 0x3c, 0xba, 0x15, 0xe1, 0x82, 0xf2, - 0x94, 0x88, 0xff, 0xaf, 0x52, 0x4c, 0xf3, 0x61, 0x3a, 0xf1, 0x2e, 0xc9, 0xbd, 0x11, 0x57, 0x1e, - 0x4a, 0xe0, 0x95, 0x93, 0xe1, 0x2b, 0xbf, 0xf1, 0x85, 0x6b, 0x7f, 0x91, 0x20, 0x3f, 0x58, 0x73, - 0x99, 0xfe, 0x8b, 0x80, 0x06, 0x34, 0x0c, 0x10, 0xf5, 0x11, 0x10, 0xf5, 0x11, 0x20, 0x3f, 0x02, - 0x38, 0xb1, 0x1b, 0x75, 0x8f, 0x62, 0x23, 0x4b, 0xc5, 0x97, 0x72, 0x62, 0x37, 0x0e, 0xe8, 0x40, - 0x23, 0x8b, 0x30, 0x62, 0xc0, 0x1c, 0xd3, 0x72, 0xb9, 0xbd, 0x3a, 0x13, 0x88, 0x92, 0x6d, 0xe5, - 0xc2, 0x36, 0x50, 0xb9, 0xdd, 0xeb, 0x16, 0x57, 0x4e, 0xec, 0x86, 0x80, 0x89, 0x11, 0xcd, 0x0e, - 0x6c, 0x69, 0xff, 0xe1, 0xb1, 0x6d, 0xe9, 0x56, 0x93, 0xb6, 0xa3, 0xd8, 0x36, 0x20, 0xc3, 0x4c, - 0x9b, 0x86, 0x18, 0xdc, 0x89, 0xdd, 0x48, 0x78, 0x3a, 0x81, 0xc0, 0x1b, 0x06, 0xd7, 0x3f, 0xbd, - 0xf4, 0x95, 0xa7, 0xf7, 0x3e, 0x4c, 0x72, 0x67, 0x78, 0x47, 0xcf, 0xf1, 0x56, 0x8d, 0xc6, 0x13, - 0xad, 0x9a, 0x23, 0xe4, 0x3d, 0xc8, 0xb8, 0x54, 0xf7, 0x6c, 0x2b, 0xcc, 0x7e, 0x94, 0xe6, 0x88, - 0x28, 0xcd, 0x11, 0xed, 0x1f, 0x12, 0xcc, 0x3f, 0x46, 0xa7, 0x92, 0x27, 0x90, 0x8c, 0x4a, 0xba, - 0x69, 0x54, 0xa9, 0x2b, 0xa3, 0xfa, 0x18, 0x32, 0x47, 0x66, 0xdb, 0xa7, 0x2e, 0x9e, 0x80, 0xbc, - 0x39, 0xd7, 0xbf, 0x52, 0xea, 0xef, 0xe0, 0x06, 0xf7, 0x9c, 0x0b, 0x89, 0x9e, 0x73, 0x44, 0x88, - 0x73, 0xfc, 0x1a, 0x71, 0x7e, 0x0a, 0x53, 0x22, 0x37, 0xf9, 0x31, 0x64, 0x3c, 0x5f, 0xf7, 0xa9, - 0xa7, 0x48, 0x6b, 0xe9, 0xf5, 0x99, 0xcd, 0xe9, 0xbe, 0x79, 0x86, 0x72, 0x32, 0x2e, 0x20, 0x92, - 0x71, 0x44, 0xfb, 0xa7, 0x04, 0x4b, 0x8f, 0x59, 0x1e, 0x85, 0x03, 0x9e, 0xf9, 0x73, 0x1a, 0x9d, - 0x9b, 0x70, 0x59, 0xd2, 0x35, 0x2e, 0xeb, 0xad, 0x27, 0xcf, 0x47, 0x30, 0x65, 0xd1, 0x97, 0xf5, - 0xfe, 0xc4, 0x3a, 0x8e, 0x13, 0x2b, 0xd6, 0x61, 0x8b, 0xbe, 0xdc, 0x1b, 0x1e, 0x5a, 0x65, 0x01, - 0xd6, 0xfe, 0x98, 0x82, 0xe5, 0xa1, 0x40, 0x3d, 0xc7, 0xb6, 0x3c, 0x4a, 0x7e, 0x27, 0x81, 0xe2, - 0xc6, 0x1b, 0x58, 0xf9, 0xea, 0x2e, 0xf5, 0x82, 0xb6, 0xcf, 0x63, 0x97, 0x37, 0xef, 0x47, 0x87, - 0x3a, 0x8a, 0xa0, 0xb4, 0x3f, 0xa0, 0xbc, 0xcf, 0x75, 0x79, 0xa7, 0x78, 0xb7, 0xd7, 0x2d, 0x7e, - 0xcf, 0x1d, 0x2d, 0x21, 0x78, 0xbb, 0x7c, 0x81, 0x88, 0xea, 0xc2, 0xea, 0x65, 0xfc, 0x6f, 0xa5, - 0x38, 0x5b, 0xb0, 0x28, 0x94, 0x24, 0x1e, 0x25, 0x7e, 0x32, 0xdc, 0xa4, 0x9c, 0xdc, 0x81, 0x09, - 0xea, 0xba, 0xb6, 0x2b, 0xda, 0x44, 0x40, 0x14, 0x45, 0x40, 0xfb, 0x0a, 0xe6, 0x86, 0xec, 0x91, - 0x63, 0x20, 0xbc, 0x6a, 0xf2, 0x75, 0x58, 0x36, 0xf9, 0x7d, 0xa8, 0x83, 0x65, 0x33, 0xf6, 0xb1, - 0x52, 0xe8, 0x75, 0x8b, 0x2a, 0x16, 0xc7, 0x18, 0x14, 0x4f, 0x3a, 0x3f, 0xb8, 0xa7, 0x7d, 0x9d, - 0x81, 0x89, 0x67, 0x98, 0x64, 0x3f, 0x80, 0x71, 0x6c, 0xb7, 0x3c, 0x3a, 0x6c, 0x39, 0x56, 0xb2, - 0xd5, 0xe2, 0x3e, 0xa9, 0xc2, 0x6c, 0x94, 0x88, 0xf5, 0x23, 0xbd, 0xe9, 0x87, 0x51, 0x4a, 0x95, - 0xd5, 0x5e, 0xb7, 0xa8, 0x44, 0x5b, 0x3b, 0xb8, 0x23, 0x28, 0xcf, 0x24, 0x77, 0xd8, 0x74, 0x10, - 0x78, 0xd4, 0xad, 0xdb, 0x2f, 0x2d, 0xea, 0xf2, 0x96, 0x90, 0xe3, 0xd3, 0x01, 0x83, 0x9f, 0x22, - 0x2a, 0x4e, 0x07, 0x31, 0xca, 0x9e, 0x43, 0xcb, 0xb5, 0x03, 0x27, 0xd2, 0xe5, 0x05, 0x15, 0x9f, - 0x03, 0xe2, 0x43, 0xca, 0xb2, 0x00, 0x13, 0x0a, 0xb3, 0x2e, 0xf5, 0xec, 0xc0, 0x6d, 0xd2, 0x7a, - 0xdb, 0xec, 0x98, 0x7e, 0xf4, 0x25, 0x54, 0xc0, 0x83, 0xc5, 0xc3, 0x28, 0xed, 0x87, 0x12, 0x4f, - 0x50, 0x80, 0x67, 0x33, 0xc6, 0xe7, 0x26, 0x36, 0xc4, 0xf8, 0x92, 0x3b, 0xe4, 0x00, 0x64, 0x87, - 0xba, 0x1d, 0xd3, 0xf3, 0x70, 0xbe, 0xe2, 0x5f, 0x3e, 0x4b, 0x82, 0x89, 0xbd, 0x78, 0x97, 0xfb, - 0x2e, 0x88, 0x8b, 0xbe, 0x0b, 0xb0, 0xfa, 0x2f, 0x09, 0x64, 0x41, 0x8f, 0xec, 0x43, 0xd6, 0x0b, - 0x1a, 0x27, 0xb4, 0xd9, 0x7f, 0xad, 0x85, 0xd1, 0x16, 0x4a, 0x07, 0x5c, 0x2c, 0xfc, 0x04, 0x08, - 0x75, 0x12, 0x9f, 0x00, 0x21, 0x86, 0xef, 0x85, 0xba, 0x0d, 0x3e, 0x52, 0x44, 0xef, 0x85, 0x01, - 0x89, 0xf7, 0xc2, 0x00, 0xf5, 0x0b, 0x98, 0x0c, 0x79, 0x59, 0xf6, 0x9c, 0x9a, 0x96, 0x21, 0x66, - 0x0f, 0x5b, 0x8b, 0xd9, 0xc3, 0xd6, 0xfd, 0x2c, 0x4b, 0x5d, 0x9e, 0x65, 0xaa, 0x09, 0xf3, 0x23, - 0xee, 0xe0, 0x0d, 0x5e, 0xbc, 0x74, 0xe5, 0x8b, 0xaf, 0x42, 0x0e, 0xcf, 0xeb, 0x89, 0xe9, 0xf9, - 0xe4, 0x1e, 0x64, 0xb0, 0xe6, 0x46, 0xe7, 0x09, 0xf1, 0x79, 0xf2, 0x2e, 0xc0, 0x77, 0xc5, 0x2e, - 0xc0, 0x11, 0xed, 0x10, 0x08, 0xef, 0xbe, 0x6d, 0xa1, 0x50, 0xb1, 0xa1, 0xb4, 0xc9, 0x51, 0x6a, - 0x08, 0x0d, 0x05, 0x87, 0xd2, 0xfe, 0x46, 0xb2, 0xad, 0x4c, 0x89, 0xb8, 0x76, 0x1f, 0x66, 0xd1, - 0xfa, 0x23, 0xda, 0x1f, 0xda, 0xae, 0xf9, 0x52, 0xb5, 0x07, 0x90, 0x47, 0xd5, 0x5d, 0xeb, 0xc8, - 0xbe, 0xa9, 0xee, 0x47, 0x40, 0x50, 0x77, 0x9b, 0xb6, 0xa9, 0x4f, 0x6f, 0xaa, 0xfd, 0x6b, 0x29, - 0x3c, 0x53, 0x66, 0xfa, 0xda, 0x95, 0xe5, 0x39, 0xcc, 0xea, 0x4d, 0xdf, 0x3c, 0xa3, 0xf5, 0xb0, - 0x9d, 0xf2, 0x1c, 0x94, 0x37, 0x67, 0x85, 0xb1, 0x82, 0x31, 0x56, 0x6e, 0xf5, 0xba, 0xc5, 0x65, - 0x2e, 0xcb, 0x51, 0xf1, 0xfc, 0xa6, 0x13, 0x1b, 0xda, 0x37, 0x12, 0x40, 0xac, 0x7a, 0x6d, 0x67, - 0xee, 0x83, 0x8c, 0x17, 0x6b, 0x30, 0x67, 0x3c, 0x4c, 0xa5, 0x09, 0x5e, 0x9f, 0x38, 0xfc, 0xd8, - 0x4e, 0xbc, 0x08, 0x88, 0x51, 0xa6, 0xda, 0xa6, 0xba, 0x17, 0xa9, 0xa6, 0x63, 0x55, 0x0e, 0x0f, - 0xaa, 0xc6, 0xa8, 0xf6, 0x12, 0xe6, 0xf1, 0xdc, 0x0e, 0x1d, 0x43, 0xf7, 0xe3, 0x36, 0xfd, 0xa1, - 0x38, 0xa6, 0x27, 0x93, 0xf2, 0xb2, 0xb9, 0xe1, 0x06, 0x6d, 0x28, 0x00, 0xa5, 0xa2, 0xfb, 0xcd, - 0xe3, 0x51, 0xd6, 0xbf, 0x80, 0xe9, 0x23, 0xdd, 0x64, 0x09, 0x9c, 0x78, 0x1a, 0x4a, 0xec, 0x45, - 0x52, 0x81, 0x67, 0x37, 0x57, 0x79, 0x36, 0xf8, 0x5c, 0xa6, 0x44, 0xbc, 0x1f, 0xef, 0x96, 0x4b, - 0xff, 0x8f, 0xf1, 0x0e, 0x58, 0xbf, 0x3a, 0xde, 0xa4, 0xc2, 0xf5, 0xe3, 0xdd, 0x50, 0x41, 0x16, - 0xbe, 0xe5, 0x89, 0x0c, 0x93, 0xe1, 0x32, 0x3f, 0xb6, 0x71, 0x07, 0x64, 0xe1, 0xa3, 0x8f, 0x4c, - 0x41, 0xb6, 0x66, 0x1b, 0x74, 0xcf, 0x76, 0xfd, 0xfc, 0x18, 0x5b, 0x7d, 0x42, 0x75, 0xa3, 0xcd, - 0x44, 0xa5, 0x8d, 0xcf, 0x21, 0x1b, 0x4d, 0xb9, 0x04, 0x20, 0xf3, 0xec, 0xb0, 0x7a, 0x58, 0xdd, - 0xce, 0x8f, 0x31, 0xbe, 0xbd, 0x6a, 0x6d, 0x7b, 0xb7, 0xf6, 0x28, 0x2f, 0xb1, 0xc5, 0xfe, 0x61, - 0xad, 0xc6, 0x16, 0x29, 0x32, 0x0d, 0xb9, 0x83, 0xc3, 0xad, 0xad, 0x6a, 0x75, 0xbb, 0xba, 0x9d, - 0x4f, 0x33, 0xa5, 0x9d, 0x87, 0xbb, 0x4f, 0xaa, 0xdb, 0xf9, 0x71, 0x26, 0x77, 0x58, 0xfb, 0xb4, - 0xf6, 0xf4, 0xb3, 0x5a, 0x7e, 0x62, 0xf3, 0x0f, 0x59, 0xc8, 0xf0, 0xc1, 0x82, 0xfc, 0x04, 0x80, - 0xff, 0x85, 0x49, 0xbd, 0x38, 0xf2, 0x6b, 0x4d, 0x5d, 0x1a, 0x3d, 0x8d, 0x68, 0x2b, 0xbf, 0xfc, - 0xf3, 0xdf, 0x7f, 0x9b, 0x9a, 0xd7, 0x66, 0xca, 0x67, 0x77, 0xcb, 0x27, 0x76, 0x23, 0xfc, 0x07, - 0xf2, 0x03, 0x69, 0x83, 0x7c, 0x06, 0xc0, 0x0b, 0x65, 0x92, 0x37, 0xf1, 0xe9, 0xa2, 0x2e, 0x23, - 0x3c, 0x5c, 0x50, 0x87, 0x89, 0x79, 0xb5, 0x64, 0xc4, 0x3f, 0x83, 0xa9, 0x3e, 0xf1, 0x01, 0xf5, - 0x89, 0x22, 0x94, 0x8d, 0x24, 0xfb, 0x52, 0x89, 0xff, 0xef, 0xb9, 0x14, 0xfd, 0x53, 0xb9, 0x54, - 0x65, 0x97, 0xa5, 0xad, 0x22, 0xf9, 0x92, 0x36, 0x17, 0x92, 0x7b, 0xd4, 0x17, 0xf8, 0x2d, 0xc8, - 0x8b, 0x33, 0x30, 0xba, 0x7f, 0x6b, 0xf4, 0x74, 0xcc, 0xcd, 0xac, 0x5e, 0x36, 0x3a, 0x6b, 0x45, - 0x34, 0xb6, 0xa2, 0x2d, 0x44, 0x91, 0x08, 0x63, 0x30, 0x65, 0xf6, 0x1e, 0x81, 0xcc, 0x13, 0x8d, - 0x0f, 0x68, 0xc2, 0x2b, 0xb8, 0x30, 0x80, 0x05, 0xe4, 0x9c, 0xd1, 0x72, 0x8c, 0x13, 0xb3, 0x97, - 0x11, 0x35, 0x61, 0x4a, 0x20, 0xf2, 0xc8, 0x4c, 0xcc, 0xc4, 0x9a, 0x9e, 0x7a, 0x1b, 0xd7, 0x17, - 0xbd, 0x07, 0xed, 0xfb, 0x48, 0x5a, 0xd0, 0x56, 0x18, 0x69, 0x83, 0x49, 0x51, 0xa3, 0xdc, 0x44, - 0x99, 0xf0, 0x85, 0x30, 0x23, 0x35, 0x90, 0x79, 0x19, 0xb8, 0xbe, 0xb7, 0xb7, 0x90, 0x78, 0x51, - 0xcd, 0xf7, 0xbd, 0x2d, 0xff, 0x82, 0x15, 0xdf, 0xaf, 0x42, 0xa7, 0x05, 0xbe, 0xab, 0x9d, 0x4e, - 0xd6, 0xa0, 0xc8, 0x69, 0x35, 0xe1, 0x74, 0x80, 0x32, 0x82, 0xd3, 0x9f, 0x83, 0xcc, 0x3b, 0x1c, - 0x77, 0x7a, 0x39, 0xb6, 0x91, 0x68, 0x7c, 0x17, 0x46, 0xa0, 0xa0, 0x15, 0xb2, 0x31, 0x14, 0x01, - 0xd9, 0x81, 0xec, 0x23, 0xea, 0x73, 0xda, 0x85, 0x98, 0x36, 0x6e, 0xe3, 0xaa, 0x70, 0x42, 0x11, - 0x0f, 0x19, 0xe6, 0x79, 0x0e, 0x53, 0x11, 0x0f, 0xf6, 0xaf, 0xc5, 0x58, 0x4b, 0xe8, 0xeb, 0xea, - 0x4c, 0x12, 0xd6, 0x6e, 0x23, 0xe1, 0x32, 0x59, 0x1c, 0x24, 0x2c, 0x9b, 0x8c, 0xe5, 0x01, 0x64, - 0x3e, 0xc1, 0x1f, 0x4a, 0xc8, 0x05, 0x91, 0xa9, 0xfc, 0xf1, 0x70, 0xa1, 0xad, 0x63, 0xda, 0x3c, - 0xed, 0x57, 0xbb, 0x2f, 0xbf, 0xfb, 0x5b, 0x61, 0xec, 0xeb, 0x57, 0x05, 0xe9, 0x4f, 0xaf, 0x0a, - 0xd2, 0xb7, 0xaf, 0x0a, 0xd2, 0x5f, 0x5f, 0x15, 0xa4, 0xdf, 0xbc, 0x2e, 0x8c, 0x7d, 0xfb, 0xba, - 0x30, 0xf6, 0xdd, 0xeb, 0xc2, 0xd8, 0x4f, 0x7f, 0x28, 0xfc, 0x76, 0xa3, 0xbb, 0x1d, 0xdd, 0xd0, - 0x1d, 0xd7, 0x66, 0x63, 0x62, 0xb8, 0x2a, 0x87, 0x3f, 0xd6, 0x7c, 0x93, 0x5a, 0x78, 0x88, 0xc0, - 0x1e, 0xdf, 0x2e, 0xed, 0xda, 0xa5, 0x87, 0x8e, 0xd9, 0xc8, 0xa0, 0x2f, 0x1f, 0xfc, 0x37, 0x00, - 0x00, 0xff, 0xff, 0x79, 0x14, 0x32, 0x29, 0x7e, 0x1a, 0x00, 0x00, + // 2205 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x8a, 0x12, 0x25, 0x3e, 0xea, 0x83, 0x1a, 0x7d, 0xad, 0xd6, 0x32, 0xa9, 0x6e, 0x9a, + 0x56, 0x16, 0x12, 0x12, 0x56, 0x1a, 0xc0, 0x76, 0x03, 0x04, 0xa6, 0x44, 0x3b, 0x72, 0x0c, 0x5a, + 0x96, 0xac, 0x26, 0xe9, 0xa1, 0xcc, 0x72, 0x77, 0x44, 0xad, 0xb4, 0xdc, 0x5d, 0xef, 0x0e, 0x65, + 0xb8, 0x45, 0x80, 0xa0, 0x97, 0xa2, 0xb7, 0x02, 0x3d, 0xf6, 0xd2, 0x73, 0xfa, 0x8f, 0xf4, 0x18, + 0xa0, 0x97, 0xf4, 0x42, 0xb4, 0x76, 0x3f, 0x00, 0xde, 0x7a, 0xef, 0xa1, 0x98, 0x37, 0xbb, 0xdc, + 0x59, 0x92, 0xb2, 0x25, 0x03, 0x6e, 0x6e, 0x9a, 0xdf, 0xbc, 0xf7, 0x7b, 0xef, 0xcd, 0xbc, 0x79, + 0xef, 0xad, 0x08, 0x4b, 0xfe, 0x59, 0xab, 0x62, 0xf8, 0x76, 0x25, 0xec, 0x34, 0xdb, 0x36, 0x2b, + 0xfb, 0x81, 0xc7, 0x3c, 0x92, 0x31, 0x7c, 0x5b, 0xbb, 0xd6, 0xf2, 0xbc, 0x96, 0x43, 0x2b, 0x08, + 0x35, 0x3b, 0xc7, 0x15, 0xda, 0xf6, 0xd9, 0x73, 0x21, 0xa1, 0xe9, 0x67, 0xb7, 0xc2, 0xb2, 0xed, + 0xa1, 0xaa, 0xe9, 0x05, 0xb4, 0x72, 0x7e, 0xb3, 0xd2, 0xa2, 0x2e, 0x0d, 0x0c, 0x46, 0xad, 0x48, + 0x66, 0x3d, 0x22, 0xe0, 0x32, 0x86, 0xeb, 0x7a, 0xcc, 0x60, 0xb6, 0xe7, 0x86, 0xd1, 0xee, 0xfb, + 0x2d, 0x9b, 0x9d, 0x74, 0x9a, 0x65, 0xd3, 0x6b, 0x57, 0x5a, 0x5e, 0xcb, 0x4b, 0xec, 0xf0, 0x15, + 0x2e, 0xf0, 0xaf, 0x48, 0xbc, 0xef, 0xe8, 0x09, 0x35, 0x1c, 0x76, 0x22, 0x50, 0xbd, 0x97, 0x83, + 0xa5, 0x07, 0x5e, 0xf3, 0x10, 0x9d, 0x3f, 0xa0, 0x4f, 0x3b, 0x34, 0x64, 0x7b, 0x8c, 0xb6, 0xc9, + 0x36, 0x4c, 0xfb, 0x81, 0xed, 0x05, 0x36, 0x7b, 0xae, 0x2a, 0x1b, 0xca, 0xa6, 0x52, 0x5d, 0xe9, + 0x75, 0x4b, 0x24, 0xc6, 0xde, 0xf3, 0xda, 0x36, 0xc3, 0x78, 0x0e, 0xfa, 0x72, 0xe4, 0x43, 0xc8, + 0xb9, 0x46, 0x9b, 0x86, 0xbe, 0x61, 0x52, 0x35, 0xb3, 0xa1, 0x6c, 0xe6, 0xaa, 0xab, 0xbd, 0x6e, + 0x69, 0xb1, 0x0f, 0x4a, 0x5a, 0x89, 0x24, 0xf9, 0x00, 0x72, 0xa6, 0x63, 0x53, 0x97, 0x35, 0x6c, + 0x4b, 0x9d, 0x46, 0x35, 0xb4, 0x25, 0xc0, 0x3d, 0x4b, 0xb6, 0x15, 0x63, 0xe4, 0x10, 0xb2, 0x8e, + 0xd1, 0xa4, 0x4e, 0xa8, 0x4e, 0x6c, 0x64, 0x36, 0xf3, 0xdb, 0xef, 0x96, 0x0d, 0xdf, 0x2e, 0x8f, + 0x0a, 0xa5, 0xfc, 0x10, 0xe5, 0x6a, 0x2e, 0x0b, 0x9e, 0x57, 0x97, 0x7a, 0xdd, 0x52, 0x41, 0x28, + 0x4a, 0xb4, 0x11, 0x15, 0x69, 0x41, 0x5e, 0x3a, 0x67, 0x75, 0x12, 0x99, 0xb7, 0x2e, 0x66, 0xbe, + 0x9b, 0x08, 0x0b, 0xfa, 0xb5, 0x5e, 0xb7, 0xb4, 0x2c, 0x51, 0x48, 0x36, 0x64, 0x66, 0xf2, 0x1b, + 0x05, 0x96, 0x02, 0xfa, 0xb4, 0x63, 0x07, 0xd4, 0x6a, 0xb8, 0x9e, 0x45, 0x1b, 0x51, 0x30, 0x59, + 0x34, 0x79, 0xf3, 0x62, 0x93, 0x07, 0x91, 0x56, 0xdd, 0xb3, 0xa8, 0x1c, 0x98, 0xde, 0xeb, 0x96, + 0xd6, 0x83, 0xa1, 0xcd, 0xc4, 0x01, 0x55, 0x39, 0x20, 0xc3, 0xfb, 0xe4, 0x11, 0x4c, 0xfb, 0x9e, + 0xd5, 0x08, 0x7d, 0x6a, 0xaa, 0xe3, 0x1b, 0xca, 0x66, 0x7e, 0xfb, 0x5a, 0x59, 0xa4, 0x26, 0xfa, + 0xc0, 0x53, 0xb3, 0x7c, 0x7e, 0xb3, 0xbc, 0xef, 0x59, 0x87, 0x3e, 0x35, 0xf1, 0x3e, 0x17, 0x7c, + 0xb1, 0x48, 0x71, 0x4f, 0x45, 0x20, 0xd9, 0x87, 0x5c, 0x4c, 0x18, 0xaa, 0x53, 0x18, 0xce, 0x2b, + 0x19, 0x45, 0x5a, 0x89, 0x45, 0x98, 0x4a, 0xab, 0x08, 0x23, 0x3b, 0x30, 0x65, 0xbb, 0xad, 0x80, + 0x86, 0xa1, 0x9a, 0x43, 0x3e, 0x82, 0x44, 0x7b, 0x02, 0xdb, 0xf1, 0xdc, 0x63, 0xbb, 0x55, 0x5d, + 0xe6, 0x8e, 0x45, 0x62, 0x12, 0x4b, 0xac, 0x49, 0xee, 0xc1, 0x74, 0x48, 0x83, 0x73, 0xdb, 0xa4, + 0xa1, 0x0a, 0x12, 0xcb, 0xa1, 0x00, 0x23, 0x16, 0x74, 0x26, 0x96, 0x93, 0x9d, 0x89, 0x31, 0x9e, + 0xe3, 0xa1, 0x79, 0x42, 0xad, 0x8e, 0x43, 0x03, 0x35, 0x9f, 0xe4, 0x78, 0x1f, 0x94, 0x73, 0xbc, + 0x0f, 0x92, 0x3d, 0x58, 0x78, 0xda, 0xa1, 0x1d, 0xda, 0x60, 0xcc, 0x69, 0x84, 0xd4, 0xf4, 0x5c, + 0x2b, 0x54, 0x67, 0x36, 0x94, 0xcd, 0x4c, 0xf5, 0x7a, 0xaf, 0x5b, 0x5a, 0xc3, 0xcd, 0x27, 0xcc, + 0x39, 0x14, 0x5b, 0x12, 0xc9, 0xfc, 0xc0, 0x96, 0x66, 0x40, 0x5e, 0xba, 0x78, 0xf2, 0x0e, 0x64, + 0xce, 0xa8, 0x78, 0xa3, 0xb9, 0xea, 0x42, 0xaf, 0x5b, 0x9a, 0x3d, 0xa3, 0xf2, 0xf3, 0xe4, 0xbb, + 0xe4, 0x06, 0x4c, 0x9e, 0x1b, 0x4e, 0x87, 0xe2, 0x15, 0xe7, 0xaa, 0x8b, 0xbd, 0x6e, 0x69, 0x1e, + 0x01, 0x49, 0x50, 0x48, 0xdc, 0x19, 0xbf, 0xa5, 0x68, 0xc7, 0x50, 0x18, 0x4c, 0xed, 0xb7, 0x62, + 0xa7, 0x0d, 0xab, 0x17, 0xe4, 0xf3, 0xdb, 0x30, 0xa7, 0xff, 0x27, 0x03, 0xb3, 0xa9, 0xac, 0x21, + 0x77, 0x60, 0x82, 0x3d, 0xf7, 0x29, 0x9a, 0x99, 0xdb, 0x2e, 0xc8, 0x79, 0xf5, 0xe4, 0xb9, 0x4f, + 0xb1, 0x5c, 0xcc, 0x71, 0x89, 0x54, 0xae, 0xa3, 0x0e, 0x37, 0xee, 0x7b, 0x01, 0x0b, 0xd5, 0xf1, + 0x8d, 0xcc, 0xe6, 0xac, 0x30, 0x8e, 0x80, 0x6c, 0x1c, 0x01, 0xf2, 0x65, 0xba, 0xae, 0x64, 0x30, + 0xff, 0xde, 0x19, 0xce, 0xe2, 0x37, 0x2f, 0x28, 0xb7, 0x21, 0xcf, 0x9c, 0xb0, 0x41, 0x5d, 0xa3, + 0xe9, 0x50, 0x4b, 0x9d, 0xd8, 0x50, 0x36, 0xa7, 0xab, 0x6a, 0xaf, 0x5b, 0x5a, 0x62, 0xfc, 0x44, + 0x11, 0x95, 0x74, 0x21, 0x41, 0xb1, 0xfc, 0xd2, 0x80, 0x35, 0x78, 0x41, 0x56, 0x27, 0xa5, 0xf2, + 0x4b, 0x03, 0x56, 0x37, 0xda, 0x34, 0x55, 0x7e, 0x23, 0x8c, 0x7c, 0x0c, 0xb3, 0x9d, 0x90, 0x36, + 0x4c, 0xa7, 0x13, 0x32, 0x1a, 0xec, 0xed, 0xab, 0x59, 0xb4, 0xa8, 0xf5, 0xba, 0xa5, 0x95, 0x4e, + 0x48, 0x77, 0x62, 0x5c, 0x52, 0x9e, 0x91, 0xf1, 0xff, 0x57, 0x8a, 0xe9, 0x0c, 0x66, 0x53, 0x4f, + 0x9c, 0xdc, 0x1a, 0x71, 0xe5, 0x91, 0x04, 0x5e, 0x39, 0x19, 0xbe, 0xf2, 0x2b, 0x5f, 0xb8, 0xfe, + 0x57, 0x05, 0x0a, 0x83, 0xe5, 0x9b, 0xeb, 0xe3, 0x5b, 0x8e, 0x02, 0x44, 0x7d, 0x04, 0x64, 0x7d, + 0x04, 0xc8, 0x4f, 0x00, 0x4e, 0xbd, 0x66, 0x23, 0xa4, 0xd8, 0x13, 0xc7, 0x93, 0x4b, 0x39, 0xf5, + 0x9a, 0x87, 0x74, 0xa0, 0x27, 0xc6, 0x18, 0xb1, 0x60, 0x81, 0x6b, 0x05, 0xc2, 0x5e, 0x83, 0x0b, + 0xc4, 0xc9, 0xb6, 0x76, 0x61, 0x47, 0x11, 0xf5, 0xe7, 0xd4, 0x6b, 0x4a, 0x58, 0xaa, 0xfe, 0x0c, + 0x6c, 0xe9, 0xff, 0x15, 0xb1, 0xed, 0x18, 0xae, 0x49, 0x9d, 0x38, 0xb6, 0x2d, 0xc8, 0x72, 0xd3, + 0xb6, 0x25, 0x07, 0x77, 0xea, 0x35, 0x53, 0x9e, 0x4e, 0x22, 0xf0, 0x86, 0xc1, 0xf5, 0x4f, 0x2f, + 0xf3, 0xda, 0xd3, 0x7b, 0x1f, 0xa6, 0x84, 0x33, 0x62, 0x38, 0xc8, 0x89, 0xae, 0x8f, 0xc6, 0x53, + 0x5d, 0x5f, 0x20, 0xe4, 0x3d, 0xc8, 0x06, 0xd4, 0x08, 0x3d, 0x37, 0xca, 0x7e, 0x94, 0x16, 0x88, + 0x2c, 0x2d, 0x10, 0xfd, 0x9f, 0x0a, 0x2c, 0x3e, 0x40, 0xa7, 0xd2, 0x27, 0x90, 0x8e, 0x4a, 0xb9, + 0x6a, 0x54, 0xe3, 0xaf, 0x8d, 0xea, 0x63, 0xc8, 0x1e, 0xdb, 0x0e, 0xa3, 0x01, 0x9e, 0x40, 0x7e, + 0x7b, 0xa1, 0x7f, 0xa5, 0x94, 0xdd, 0xc3, 0x0d, 0xe1, 0xb9, 0x10, 0x92, 0x3d, 0x17, 0x88, 0x14, + 0xe7, 0xc4, 0x25, 0xe2, 0xfc, 0x14, 0x66, 0x64, 0x6e, 0xf2, 0x53, 0xc8, 0x86, 0xcc, 0x60, 0x34, + 0x54, 0x95, 0x8d, 0xcc, 0xe6, 0xdc, 0xf6, 0x6c, 0xdf, 0x3c, 0x47, 0x05, 0x99, 0x10, 0x90, 0xc9, + 0x04, 0xa2, 0xff, 0x4b, 0x81, 0x95, 0x07, 0x3c, 0x8f, 0xa2, 0x59, 0xd1, 0xfe, 0x25, 0x8d, 0xcf, + 0x4d, 0xba, 0x2c, 0xe5, 0x12, 0x97, 0xf5, 0xd6, 0x93, 0xe7, 0x23, 0x98, 0x71, 0xe9, 0xb3, 0x46, + 0x7f, 0xf8, 0x9d, 0xc0, 0xe1, 0x17, 0xeb, 0xb0, 0x4b, 0x9f, 0xed, 0x0f, 0xcf, 0xbf, 0x79, 0x09, + 0xd6, 0xff, 0x34, 0x0e, 0xab, 0x43, 0x81, 0x86, 0xbe, 0xe7, 0x86, 0x94, 0xfc, 0x41, 0x01, 0x35, + 0x48, 0x36, 0xb0, 0xf2, 0x35, 0x02, 0x1a, 0x76, 0x1c, 0x26, 0x62, 0xcf, 0x6f, 0xdf, 0x8e, 0x0f, + 0x75, 0x14, 0x41, 0xf9, 0x60, 0x40, 0xf9, 0x40, 0xe8, 0x8a, 0x4e, 0xf1, 0x6e, 0xaf, 0x5b, 0xfa, + 0x41, 0x30, 0x5a, 0x42, 0xf2, 0x76, 0xf5, 0x02, 0x11, 0x2d, 0x80, 0xf5, 0x57, 0xf1, 0xbf, 0x95, + 0xe2, 0xec, 0xc2, 0xb2, 0x54, 0x92, 0x44, 0x94, 0xf8, 0xf5, 0x71, 0x95, 0x72, 0x72, 0x03, 0x26, + 0x69, 0x10, 0x78, 0x81, 0x6c, 0x13, 0x01, 0x59, 0x14, 0x01, 0xfd, 0x2b, 0x58, 0x18, 0xb2, 0x47, + 0x4e, 0x80, 0x88, 0xaa, 0x29, 0xd6, 0x51, 0xd9, 0x14, 0xf7, 0xa1, 0x0d, 0x96, 0xcd, 0xc4, 0xc7, + 0x6a, 0xb1, 0xd7, 0x2d, 0x69, 0x58, 0x1c, 0x13, 0x50, 0x3e, 0xe9, 0xc2, 0xe0, 0x9e, 0xfe, 0x75, + 0x16, 0x26, 0x1f, 0x63, 0x92, 0xfd, 0x08, 0x26, 0xb0, 0xdd, 0x8a, 0xe8, 0xb0, 0xe5, 0xb8, 0xe9, + 0x56, 0x8b, 0xfb, 0xa4, 0x06, 0xf3, 0x71, 0x22, 0x36, 0x8e, 0x0d, 0x93, 0x45, 0x51, 0x2a, 0xd5, + 0xf5, 0x5e, 0xb7, 0xa4, 0xc6, 0x5b, 0xf7, 0x70, 0x47, 0x52, 0x9e, 0x4b, 0xef, 0xf0, 0xe9, 0xa0, + 0x13, 0xd2, 0xa0, 0xe1, 0x3d, 0x73, 0x69, 0x20, 0x5a, 0x42, 0x4e, 0x4c, 0x07, 0x1c, 0x7e, 0x84, + 0xa8, 0x3c, 0x1d, 0x24, 0x28, 0x7f, 0x0e, 0xad, 0xc0, 0xeb, 0xf8, 0xb1, 0xae, 0x28, 0xa8, 0xf8, + 0x1c, 0x10, 0x1f, 0x52, 0xce, 0x4b, 0x30, 0xa1, 0x30, 0x1f, 0xd0, 0xd0, 0xeb, 0x04, 0x26, 0x6d, + 0x38, 0x76, 0xdb, 0x66, 0xf1, 0x47, 0x55, 0x11, 0x0f, 0x16, 0x0f, 0xa3, 0x7c, 0x10, 0x49, 0x3c, + 0x44, 0x01, 0x91, 0xcd, 0x18, 0x5f, 0x90, 0xda, 0x90, 0xe3, 0x4b, 0xef, 0x90, 0x43, 0xc8, 0xfb, + 0x34, 0x68, 0xdb, 0x61, 0x88, 0xf3, 0x95, 0xf8, 0x88, 0x5a, 0x91, 0x4c, 0xec, 0x27, 0xbb, 0xc2, + 0x77, 0x49, 0x5c, 0xf6, 0x5d, 0x82, 0xb5, 0x7f, 0x2b, 0x90, 0x97, 0xf4, 0xc8, 0x01, 0x4c, 0x87, + 0x9d, 0xe6, 0x29, 0x35, 0xfb, 0xaf, 0xb5, 0x38, 0xda, 0x42, 0xf9, 0x50, 0x88, 0x45, 0x5f, 0x13, + 0x91, 0x4e, 0xea, 0x6b, 0x22, 0xc2, 0xf0, 0xbd, 0xd0, 0xa0, 0x29, 0x46, 0x8a, 0xf8, 0xbd, 0x70, + 0x20, 0xf5, 0x5e, 0x38, 0xa0, 0x7d, 0x01, 0x53, 0x11, 0x2f, 0xcf, 0x9e, 0x33, 0xdb, 0xb5, 0xe4, + 0xec, 0xe1, 0x6b, 0x39, 0x7b, 0xf8, 0xba, 0x9f, 0x65, 0xe3, 0xaf, 0xce, 0x32, 0xcd, 0x86, 0xc5, + 0x11, 0x77, 0xf0, 0x06, 0x2f, 0x5e, 0x79, 0xed, 0x8b, 0xaf, 0x41, 0x0e, 0xcf, 0xeb, 0xa1, 0x1d, + 0x32, 0x72, 0x0b, 0xb2, 0x58, 0x73, 0xe3, 0xf3, 0x84, 0xe4, 0x3c, 0x45, 0x17, 0x10, 0xbb, 0x72, + 0x17, 0x10, 0x88, 0x7e, 0x04, 0x44, 0x74, 0x5f, 0x47, 0x2a, 0x54, 0x7c, 0x28, 0x35, 0x05, 0x4a, + 0x2d, 0xa9, 0xa1, 0xe0, 0x50, 0xda, 0xdf, 0x48, 0xb7, 0x95, 0x19, 0x19, 0xd7, 0x6f, 0xc3, 0x3c, + 0x5a, 0xbf, 0x4f, 0xfb, 0x43, 0xdb, 0x25, 0x5f, 0xaa, 0x7e, 0x07, 0x0a, 0xa8, 0xba, 0xe7, 0x1e, + 0x7b, 0x57, 0xd5, 0xfd, 0x08, 0x08, 0xea, 0xee, 0x52, 0x87, 0x32, 0x7a, 0x55, 0xed, 0xdf, 0x2a, + 0xd1, 0x99, 0x72, 0xd3, 0x97, 0xae, 0x2c, 0x4f, 0x60, 0xde, 0x30, 0x99, 0x7d, 0x4e, 0x1b, 0x51, + 0x3b, 0x15, 0x39, 0x98, 0xdf, 0x9e, 0x97, 0xc6, 0x0a, 0xce, 0x58, 0xbd, 0xd6, 0xeb, 0x96, 0x56, + 0x85, 0xac, 0x40, 0xe5, 0xf3, 0x9b, 0x4d, 0x6d, 0xe8, 0xdf, 0x28, 0x00, 0x89, 0xea, 0xa5, 0x9d, + 0xb9, 0x0d, 0x79, 0xbc, 0x58, 0x8b, 0x3b, 0x13, 0x62, 0x2a, 0x4d, 0x8a, 0xfa, 0x24, 0xe0, 0x07, + 0x5e, 0xea, 0x45, 0x40, 0x82, 0x72, 0x55, 0x87, 0x1a, 0x61, 0xac, 0x9a, 0x49, 0x54, 0x05, 0x3c, + 0xa8, 0x9a, 0xa0, 0xfa, 0x33, 0x58, 0xc4, 0x73, 0x3b, 0xf2, 0x2d, 0x83, 0x25, 0x6d, 0xfa, 0x43, + 0x79, 0x4c, 0x4f, 0x27, 0xe5, 0xab, 0xe6, 0x86, 0x2b, 0xb4, 0xa1, 0x0e, 0xa8, 0x55, 0x83, 0x99, + 0x27, 0xa3, 0xac, 0x7f, 0x01, 0xb3, 0xc7, 0x86, 0xcd, 0x13, 0x38, 0xf5, 0x34, 0xd4, 0xc4, 0x8b, + 0xb4, 0x82, 0xc8, 0x6e, 0xa1, 0xf2, 0x78, 0xf0, 0xb9, 0xcc, 0xc8, 0x78, 0x3f, 0xde, 0x9d, 0x80, + 0x7e, 0x8f, 0xf1, 0x0e, 0x58, 0x7f, 0x7d, 0xbc, 0x69, 0x85, 0xcb, 0xc7, 0xbb, 0xa5, 0x41, 0x5e, + 0xfa, 0x96, 0x27, 0x79, 0x98, 0x8a, 0x96, 0x85, 0xb1, 0xad, 0x1b, 0x90, 0x97, 0x3e, 0xfa, 0xc8, + 0x0c, 0x4c, 0xd7, 0x3d, 0x8b, 0xee, 0x7b, 0x01, 0x2b, 0x8c, 0xf1, 0xd5, 0x27, 0xd4, 0xb0, 0x1c, + 0x2e, 0xaa, 0x6c, 0x7d, 0x0e, 0xd3, 0xf1, 0x94, 0x4b, 0x00, 0xb2, 0x8f, 0x8f, 0x6a, 0x47, 0xb5, + 0xdd, 0xc2, 0x18, 0xe7, 0xdb, 0xaf, 0xd5, 0x77, 0xf7, 0xea, 0xf7, 0x0b, 0x0a, 0x5f, 0x1c, 0x1c, + 0xd5, 0xeb, 0x7c, 0x31, 0x4e, 0x66, 0x21, 0x77, 0x78, 0xb4, 0xb3, 0x53, 0xab, 0xed, 0xd6, 0x76, + 0x0b, 0x19, 0xae, 0x74, 0xef, 0xee, 0xde, 0xc3, 0xda, 0x6e, 0x61, 0x82, 0xcb, 0x1d, 0xd5, 0x3f, + 0xad, 0x3f, 0xfa, 0xac, 0x5e, 0x98, 0xdc, 0xfe, 0xe3, 0x34, 0x64, 0xc5, 0x60, 0x41, 0x7e, 0x06, + 0x20, 0xfe, 0xc2, 0xa4, 0x5e, 0x1e, 0xf9, 0xb5, 0xa6, 0xad, 0x8c, 0x9e, 0x46, 0xf4, 0xb5, 0x5f, + 0xff, 0xe5, 0x1f, 0xbf, 0x1f, 0x5f, 0xd4, 0xe7, 0x2a, 0xe7, 0x37, 0x2b, 0xa7, 0x5e, 0x33, 0xfa, + 0x5f, 0xf4, 0x1d, 0x65, 0x8b, 0x7c, 0x06, 0x20, 0x0a, 0x65, 0x9a, 0x37, 0xf5, 0xe9, 0xa2, 0xad, + 0x22, 0x3c, 0x5c, 0x50, 0x87, 0x89, 0x45, 0xb5, 0xe4, 0xc4, 0xbf, 0x80, 0x99, 0x3e, 0xf1, 0x21, + 0x65, 0x44, 0x95, 0xca, 0x46, 0x9a, 0x7d, 0xa5, 0x2c, 0xfe, 0x8d, 0x5d, 0x8e, 0xff, 0x3f, 0x5d, + 0xae, 0xf1, 0xcb, 0xd2, 0xd7, 0x91, 0x7c, 0x45, 0x5f, 0x88, 0xc8, 0x43, 0xca, 0x24, 0x7e, 0x17, + 0x0a, 0xf2, 0x0c, 0x8c, 0xee, 0x5f, 0x1b, 0x3d, 0x1d, 0x0b, 0x33, 0xeb, 0xaf, 0x1a, 0x9d, 0xf5, + 0x12, 0x1a, 0x5b, 0xd3, 0x97, 0xe2, 0x48, 0xa4, 0x31, 0x98, 0x72, 0x7b, 0xf7, 0x21, 0x2f, 0x12, + 0x4d, 0x0c, 0x68, 0xd2, 0x2b, 0xb8, 0x30, 0x80, 0x25, 0xe4, 0x9c, 0xd3, 0x73, 0x9c, 0x13, 0xb3, + 0x97, 0x13, 0x99, 0x30, 0x23, 0x11, 0x85, 0x64, 0x2e, 0x61, 0xe2, 0x4d, 0x4f, 0xbb, 0x8e, 0xeb, + 0x8b, 0xde, 0x83, 0xfe, 0x43, 0x24, 0x2d, 0xea, 0x6b, 0x9c, 0xb4, 0xc9, 0xa5, 0xa8, 0x55, 0x31, + 0x51, 0x26, 0x7a, 0x21, 0xdc, 0x48, 0x1d, 0xf2, 0xa2, 0x0c, 0x5c, 0xde, 0xdb, 0x6b, 0x48, 0xbc, + 0xac, 0x15, 0xfa, 0xde, 0x56, 0x7e, 0xc5, 0x8b, 0xef, 0x57, 0x91, 0xd3, 0x12, 0xdf, 0xeb, 0x9d, + 0x4e, 0xd7, 0xa0, 0xd8, 0x69, 0x2d, 0xe5, 0x74, 0x07, 0x65, 0x24, 0xa7, 0x3f, 0x87, 0xbc, 0xe8, + 0x70, 0xc2, 0xe9, 0xd5, 0xc4, 0x46, 0xaa, 0xf1, 0x5d, 0x18, 0x81, 0x8a, 0x56, 0xc8, 0xd6, 0x50, + 0x04, 0xe4, 0x1e, 0x4c, 0xdf, 0xa7, 0x4c, 0xd0, 0x2e, 0x25, 0xb4, 0x49, 0x1b, 0xd7, 0xa4, 0x13, + 0x8a, 0x79, 0xc8, 0x30, 0xcf, 0x13, 0x98, 0x89, 0x79, 0xb0, 0x7f, 0x2d, 0x27, 0x5a, 0x52, 0x5f, + 0xd7, 0xe6, 0xd2, 0xb0, 0x7e, 0x1d, 0x09, 0x57, 0xc9, 0xf2, 0x20, 0x61, 0xc5, 0xe6, 0x2c, 0x77, + 0x20, 0xfb, 0x09, 0xfe, 0xe6, 0x42, 0x2e, 0x88, 0x4c, 0x13, 0x8f, 0x47, 0x08, 0xed, 0x9c, 0x50, + 0xf3, 0xac, 0x5f, 0xed, 0xbe, 0xfc, 0xee, 0xef, 0xc5, 0xb1, 0xaf, 0x5f, 0x14, 0x95, 0x3f, 0xbf, + 0x28, 0x2a, 0xdf, 0xbe, 0x28, 0x2a, 0x7f, 0x7b, 0x51, 0x54, 0x7e, 0xf7, 0xb2, 0x38, 0xf6, 0xed, + 0xcb, 0xe2, 0xd8, 0x77, 0x2f, 0x8b, 0x63, 0x3f, 0xff, 0xb1, 0xf4, 0x33, 0x90, 0x11, 0xb4, 0x0d, + 0xcb, 0xf0, 0x03, 0x8f, 0x8f, 0x89, 0xd1, 0xaa, 0x12, 0xfd, 0xee, 0xf3, 0xcd, 0xf8, 0xd2, 0x5d, + 0x04, 0xf6, 0xc5, 0x76, 0x79, 0xcf, 0x2b, 0xdf, 0xf5, 0xed, 0x66, 0x16, 0x7d, 0xf9, 0xe0, 0x7f, + 0x01, 0x00, 0x00, 0xff, 0xff, 0xd6, 0xde, 0x6b, 0x5b, 0xc9, 0x1a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2218,6 +2229,11 @@ func (m *JobSubmitRequestItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.QueueTtlSeconds != 0 { + i = encodeVarintSubmit(dAtA, i, uint64(m.QueueTtlSeconds)) + i-- + dAtA[i] = 0x60 + } if len(m.Scheduler) > 0 { i -= len(m.Scheduler) copy(dAtA[i:], m.Scheduler) @@ -3521,6 +3537,9 @@ func (m *JobSubmitRequestItem) Size() (n int) { if l > 0 { n += 1 + l + sovSubmit(uint64(l)) } + if m.QueueTtlSeconds != 0 { + n += 1 + sovSubmit(uint64(m.QueueTtlSeconds)) + } return n } @@ -4068,6 +4087,7 @@ func (this *JobSubmitRequestItem) String() string { `Ingress:` + repeatedStringForIngress + `,`, `Services:` + repeatedStringForServices + `,`, `Scheduler:` + fmt.Sprintf("%v", this.Scheduler) + `,`, + `QueueTtlSeconds:` + fmt.Sprintf("%v", this.QueueTtlSeconds) + `,`, `}`, }, "") return s @@ -5076,6 +5096,25 @@ func (m *JobSubmitRequestItem) Unmarshal(dAtA []byte) error { } m.Scheduler = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueTtlSeconds", wireType) + } + m.QueueTtlSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueTtlSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipSubmit(dAtA[iNdEx:]) diff --git a/pkg/api/submit.proto b/pkg/api/submit.proto index 7e47f3ffab6..0de048b6ecd 100644 --- a/pkg/api/submit.proto +++ b/pkg/api/submit.proto @@ -29,6 +29,8 @@ message JobSubmitRequestItem { // Indicates which scheduler should manage this job. // If empty, the default scheduler is used. string scheduler = 11; + // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. + int64 queue_ttl_seconds = 12; } message IngressConfig { @@ -90,7 +92,7 @@ enum JobState { PENDING = 1; RUNNING = 2; SUCCEEDED = 3; - FAILED = 4; + FAILED = 4; UNKNOWN = 5; } diff --git a/pkg/api/testspec.pb.go b/pkg/api/testspec.pb.go index dd3e172cde4..ab9307f5034 100644 --- a/pkg/api/testspec.pb.go +++ b/pkg/api/testspec.pb.go @@ -93,6 +93,10 @@ type TestSpec struct { RandomClientId bool `protobuf:"varint,11,opt,name=random_client_id,json=randomClientId,proto3" json:"randomClientId,omitempty"` // Toggle should testsuite scrape Armada Job (pod) logs GetLogs bool `protobuf:"varint,12,opt,name=get_logs,json=getLogs,proto3" json:"getLogs,omitempty"` + // Value of the environment label set on exported Prometheus metrics. + Environment string `protobuf:"bytes,13,opt,name=environment,proto3" json:"environment,omitempty"` + // Value of the target label set on exported Prometheus metrics. + Target string `protobuf:"bytes,14,opt,name=target,proto3" json:"target,omitempty"` } func (m *TestSpec) Reset() { *m = TestSpec{} } @@ -211,6 +215,20 @@ func (m *TestSpec) GetGetLogs() bool { return false } +func (m *TestSpec) GetEnvironment() string { + if m != nil { + return m.Environment + } + return "" +} + +func (m *TestSpec) GetTarget() string { + if m != nil { + return m.Target + } + return "" +} + func init() { proto.RegisterEnum("api.TestSpec_Cancel", TestSpec_Cancel_name, TestSpec_Cancel_value) proto.RegisterType((*TestSpec)(nil), "api.TestSpec") @@ -219,48 +237,50 @@ func init() { func init() { proto.RegisterFile("pkg/api/testspec.proto", fileDescriptor_38d601305b414287) } var fileDescriptor_38d601305b414287 = []byte{ - // 641 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xcf, 0x6e, 0xd3, 0x48, - 0x18, 0x8f, 0x93, 0xd6, 0x4d, 0xa6, 0xbb, 0x69, 0x3a, 0xc9, 0x76, 0x67, 0xab, 0x95, 0x1d, 0xf5, - 0x00, 0x41, 0x02, 0x07, 0xb5, 0x08, 0x89, 0x63, 0xdd, 0x16, 0x11, 0xfe, 0xab, 0xe9, 0x81, 0x72, - 0x31, 0x63, 0xe7, 0xc3, 0x9d, 0x10, 0x7b, 0xdc, 0xcc, 0xb8, 0x82, 0x9e, 0x78, 0x04, 0x8e, 0x3c, - 0x03, 0x12, 0xef, 0xd1, 0x63, 0x8f, 0x3d, 0x19, 0x48, 0x6f, 0x7e, 0x0a, 0x94, 0xb1, 0xdd, 0xba, - 0x27, 0x6e, 0xfe, 0xfd, 0xb5, 0x3d, 0xdf, 0x37, 0x68, 0x2d, 0xfa, 0xe0, 0xf7, 0x69, 0xc4, 0xfa, - 0x12, 0x84, 0x14, 0x11, 0x78, 0x56, 0x34, 0xe5, 0x92, 0xe3, 0x1a, 0x8d, 0xd8, 0xba, 0xe1, 0x73, - 0xee, 0x4f, 0xa0, 0xaf, 0x28, 0x37, 0x7e, 0xdf, 0x1f, 0xc5, 0x53, 0x2a, 0x19, 0x0f, 0x33, 0xd3, - 0xfa, 0x3d, 0x9f, 0xc9, 0xa3, 0xd8, 0xb5, 0x3c, 0x1e, 0xf4, 0x7d, 0xee, 0xf3, 0x6b, 0xe3, 0x1c, - 0x29, 0xa0, 0x9e, 0x72, 0x7b, 0xa7, 0x78, 0x97, 0x88, 0xdd, 0x80, 0xc9, 0x9c, 0x6d, 0x17, 0x2c, - 0x9c, 0x40, 0x98, 0x93, 0x1b, 0xdf, 0x75, 0x54, 0x3f, 0x00, 0x21, 0x87, 0x11, 0x78, 0x78, 0x1b, - 0x2d, 0x8c, 0xb9, 0x2b, 0x88, 0xd6, 0xad, 0xf5, 0x96, 0x37, 0xff, 0xb3, 0x68, 0xc4, 0xac, 0xa7, - 0xdc, 0x1d, 0xaa, 0x96, 0x7d, 0x38, 0x8e, 0x41, 0xc8, 0x81, 0x84, 0xc0, 0xc6, 0x69, 0x62, 0x36, - 0xe7, 0xd6, 0xbb, 0x3c, 0x60, 0x12, 0x82, 0x48, 0x7e, 0xda, 0x57, 0x51, 0xfc, 0x06, 0xad, 0xc0, - 0xc7, 0x08, 0x3c, 0x09, 0x23, 0x47, 0xbd, 0x47, 0x90, 0xaa, 0x6a, 0x5b, 0x55, 0x6d, 0x7b, 0x73, - 0xea, 0x05, 0x08, 0x41, 0x7d, 0xb0, 0xff, 0x4f, 0x13, 0x93, 0x14, 0x6e, 0xa5, 0x94, 0xfb, 0x9a, - 0x37, 0x15, 0x7c, 0x07, 0x2d, 0x1e, 0xc7, 0x10, 0x03, 0xa9, 0x75, 0xb5, 0x5e, 0xc3, 0x6e, 0xa7, - 0x89, 0xb9, 0xa2, 0x88, 0x52, 0x26, 0x73, 0xe0, 0x07, 0x08, 0x8d, 0xb9, 0xeb, 0x08, 0x90, 0x0e, - 0x1b, 0x91, 0x05, 0xe5, 0x5f, 0x4b, 0x13, 0x13, 0x8f, 0xb9, 0x3b, 0x04, 0x39, 0x18, 0x95, 0x22, - 0xf5, 0x82, 0xc3, 0x8f, 0xd0, 0x72, 0x18, 0x07, 0x8e, 0x4b, 0xa5, 0x77, 0x04, 0x82, 0x2c, 0x76, - 0xb5, 0xde, 0xdf, 0x36, 0x49, 0x13, 0xb3, 0x13, 0xc6, 0x81, 0x9d, 0xb1, 0xa5, 0x20, 0xba, 0x66, - 0xf1, 0x43, 0x84, 0x54, 0xcc, 0x11, 0xec, 0x14, 0x88, 0xae, 0x92, 0xff, 0xa6, 0x89, 0xd9, 0x56, - 0xec, 0x90, 0x9d, 0x96, 0x3f, 0xb2, 0x71, 0x45, 0xe2, 0x67, 0xa8, 0xce, 0x42, 0x09, 0xd3, 0x13, - 0x3a, 0x21, 0x4b, 0x5d, 0x4d, 0x1d, 0x7a, 0xb6, 0x0a, 0x56, 0x31, 0x61, 0x6b, 0x37, 0x5f, 0x05, - 0xbb, 0x73, 0x96, 0x98, 0x95, 0x34, 0x31, 0xaf, 0x22, 0x5f, 0x7f, 0x98, 0xda, 0xfe, 0x15, 0xc2, - 0x4f, 0xd0, 0x92, 0x64, 0x01, 0xf0, 0x58, 0x92, 0xfa, 0x9f, 0xba, 0xda, 0x79, 0x57, 0x91, 0x50, - 0x55, 0x05, 0xc0, 0x36, 0xd2, 0x3d, 0x1a, 0x7a, 0x30, 0x21, 0x8d, 0xae, 0xd6, 0x6b, 0x6e, 0x76, - 0xd4, 0xec, 0x8a, 0x35, 0xb1, 0x76, 0x94, 0x66, 0x77, 0xd2, 0xc4, 0x6c, 0x65, 0xbe, 0xd2, 0xdf, - 0xe5, 0x49, 0x7c, 0x0b, 0x2d, 0x84, 0x34, 0x00, 0x82, 0xd4, 0xe9, 0xab, 0x85, 0x99, 0xe3, 0xf2, - 0xc2, 0xcc, 0x31, 0x7e, 0x8c, 0x5a, 0x53, 0x1a, 0x8e, 0x78, 0xe0, 0x78, 0x13, 0x06, 0xa1, 0x9a, - 0xd8, 0x72, 0x57, 0xeb, 0xd5, 0xb3, 0xf5, 0xc8, 0xb4, 0x1d, 0x25, 0xdd, 0x98, 0x5b, 0xf3, 0xa6, - 0x82, 0xef, 0xa3, 0xba, 0x0f, 0xd2, 0x99, 0x70, 0x5f, 0x90, 0xbf, 0x54, 0xfe, 0x9f, 0x34, 0x31, - 0x57, 0x7d, 0x90, 0xcf, 0xb9, 0x5f, 0x9e, 0xdb, 0x52, 0x4e, 0x6d, 0x6c, 0x21, 0x3d, 0xfb, 0x13, - 0xac, 0xa3, 0xea, 0xcb, 0x57, 0xad, 0x0a, 0x6e, 0xa0, 0x45, 0xfb, 0xd0, 0x19, 0xec, 0xb6, 0x34, - 0x8c, 0x90, 0x6e, 0x1f, 0x3a, 0xc3, 0xbd, 0x83, 0x56, 0x35, 0x7f, 0x1e, 0xec, 0x0e, 0x5b, 0x35, - 0xfb, 0xdd, 0xc5, 0x2f, 0xa3, 0xf2, 0x79, 0x66, 0x68, 0x67, 0x33, 0x43, 0x3b, 0x9f, 0x19, 0xda, - 0xcf, 0x99, 0xa1, 0x7d, 0xb9, 0x34, 0x2a, 0xe7, 0x97, 0x46, 0xe5, 0xe2, 0xd2, 0xa8, 0xbc, 0xbd, - 0x5d, 0xba, 0xab, 0x74, 0x1a, 0xd0, 0x11, 0x8d, 0xa6, 0x7c, 0x0c, 0x9e, 0xcc, 0x51, 0x3f, 0xbf, - 0x86, 0xdf, 0xaa, 0x9d, 0x6d, 0x45, 0xbc, 0xce, 0x64, 0x6b, 0xc0, 0xad, 0xed, 0x88, 0xb9, 0xba, - 0x9a, 0xd6, 0xd6, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, 0x44, 0x62, 0xb0, 0x31, 0x04, 0x00, - 0x00, + // 683 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0xcf, 0x4e, 0xdb, 0x40, + 0x10, 0xc6, 0xe3, 0x04, 0x9c, 0x64, 0x53, 0x42, 0xd8, 0x04, 0xba, 0xa0, 0xca, 0x8e, 0x38, 0xb4, + 0xa9, 0x44, 0x9d, 0x0a, 0xaa, 0x4a, 0x55, 0x4f, 0x18, 0xa8, 0x9a, 0xfe, 0x17, 0xe1, 0x50, 0x7a, + 0x71, 0x6d, 0x67, 0x6a, 0x36, 0x8d, 0xbd, 0xc6, 0x5e, 0xa3, 0x96, 0x53, 0x1f, 0xa1, 0x47, 0x9e, + 0xa1, 0x4f, 0xc2, 0x91, 0x23, 0x27, 0xb7, 0x0d, 0x37, 0x3f, 0x45, 0x95, 0xb5, 0x0d, 0xe6, 0xd4, + 0xdb, 0xce, 0x6f, 0xbe, 0x6f, 0x76, 0x33, 0x33, 0x31, 0x5a, 0xf1, 0xbf, 0x3a, 0x7d, 0xd3, 0xa7, + 0x7d, 0x0e, 0x21, 0x0f, 0x7d, 0xb0, 0x35, 0x3f, 0x60, 0x9c, 0xe1, 0x8a, 0xe9, 0xd3, 0x35, 0xc5, + 0x61, 0xcc, 0x99, 0x40, 0x5f, 0x20, 0x2b, 0xfa, 0xd2, 0x1f, 0x45, 0x81, 0xc9, 0x29, 0xf3, 0x52, + 0xd1, 0xda, 0x23, 0x87, 0xf2, 0xa3, 0xc8, 0xd2, 0x6c, 0xe6, 0xf6, 0x1d, 0xe6, 0xb0, 0x1b, 0xe1, + 0x2c, 0x12, 0x81, 0x38, 0x65, 0xf2, 0x4e, 0x7e, 0x57, 0x18, 0x59, 0x2e, 0xe5, 0x19, 0x6d, 0xe7, + 0x14, 0x4e, 0xc0, 0xcb, 0xe0, 0xfa, 0x59, 0x15, 0xd5, 0x0e, 0x20, 0xe4, 0x43, 0x1f, 0x6c, 0xbc, + 0x8d, 0xe6, 0xc6, 0xcc, 0x0a, 0x89, 0xd4, 0xad, 0xf4, 0x1a, 0x9b, 0xab, 0x9a, 0xe9, 0x53, 0xed, + 0x15, 0xb3, 0x86, 0xa2, 0xca, 0x3e, 0x1c, 0x47, 0x10, 0xf2, 0x01, 0x07, 0x57, 0xc7, 0x49, 0xac, + 0x36, 0x67, 0xd2, 0x0d, 0xe6, 0x52, 0x0e, 0xae, 0xcf, 0xbf, 0xef, 0x0b, 0x2b, 0xfe, 0x88, 0x16, + 0xe1, 0x9b, 0x0f, 0x36, 0x87, 0x91, 0x21, 0xee, 0x09, 0x49, 0x59, 0x54, 0x5b, 0x12, 0xd5, 0xf6, + 0x66, 0xe8, 0x2d, 0x84, 0xa1, 0xe9, 0x80, 0x7e, 0x2f, 0x89, 0x55, 0x92, 0xab, 0x45, 0xa6, 0x58, + 0xaf, 0x79, 0x3b, 0x83, 0x1f, 0xa2, 0xf9, 0xe3, 0x08, 0x22, 0x20, 0x95, 0xae, 0xd4, 0xab, 0xeb, + 0xed, 0x24, 0x56, 0x17, 0x05, 0x28, 0x78, 0x52, 0x05, 0x7e, 0x82, 0xd0, 0x98, 0x59, 0x46, 0x08, + 0xdc, 0xa0, 0x23, 0x32, 0x27, 0xf4, 0x2b, 0x49, 0xac, 0xe2, 0x31, 0xb3, 0x86, 0xc0, 0x07, 0xa3, + 0x82, 0xa5, 0x96, 0x33, 0xfc, 0x0c, 0x35, 0xbc, 0xc8, 0x35, 0x2c, 0x93, 0xdb, 0x47, 0x10, 0x92, + 0xf9, 0xae, 0xd4, 0x5b, 0xd0, 0x49, 0x12, 0xab, 0x1d, 0x2f, 0x72, 0xf5, 0x94, 0x16, 0x8c, 0xe8, + 0x86, 0xe2, 0xa7, 0x08, 0x09, 0x9b, 0x11, 0xd2, 0x53, 0x20, 0xb2, 0x70, 0xde, 0x4d, 0x62, 0xb5, + 0x2d, 0xe8, 0x90, 0x9e, 0x16, 0x1f, 0x59, 0xbf, 0x86, 0xf8, 0x35, 0xaa, 0x51, 0x8f, 0x43, 0x70, + 0x62, 0x4e, 0x48, 0xb5, 0x2b, 0x89, 0xa6, 0xa7, 0xab, 0xa0, 0xe5, 0x13, 0xd6, 0x76, 0xb3, 0x55, + 0xd0, 0x3b, 0xe7, 0xb1, 0x5a, 0x4a, 0x62, 0xf5, 0xda, 0x72, 0xf6, 0x5b, 0x95, 0xf6, 0xaf, 0x23, + 0xfc, 0x12, 0x55, 0x39, 0x75, 0x81, 0x45, 0x9c, 0xd4, 0xfe, 0x57, 0xab, 0x9d, 0xd5, 0xca, 0x1d, + 0xa2, 0x54, 0x1e, 0x60, 0x1d, 0xc9, 0xb6, 0xe9, 0xd9, 0x30, 0x21, 0xf5, 0xae, 0xd4, 0x6b, 0x6e, + 0x76, 0xc4, 0xec, 0xf2, 0x35, 0xd1, 0x76, 0x44, 0x4e, 0xef, 0x24, 0xb1, 0xda, 0x4a, 0x75, 0x85, + 0x5f, 0x97, 0x39, 0xf1, 0x7d, 0x34, 0xe7, 0x99, 0x2e, 0x10, 0x24, 0xba, 0x2f, 0x16, 0x66, 0x16, + 0x17, 0x17, 0x66, 0x16, 0xe3, 0x17, 0xa8, 0x15, 0x98, 0xde, 0x88, 0xb9, 0x86, 0x3d, 0xa1, 0xe0, + 0x89, 0x89, 0x35, 0xba, 0x52, 0xaf, 0x96, 0xae, 0x47, 0x9a, 0xdb, 0x11, 0xa9, 0x5b, 0x73, 0x6b, + 0xde, 0xce, 0xe0, 0xc7, 0xa8, 0xe6, 0x00, 0x37, 0x26, 0xcc, 0x09, 0xc9, 0x1d, 0xe1, 0x5f, 0x4e, + 0x62, 0x75, 0xc9, 0x01, 0xfe, 0x86, 0x39, 0xc5, 0xb9, 0x55, 0x33, 0x84, 0x9f, 0xa3, 0x06, 0x78, + 0x27, 0x34, 0x60, 0x9e, 0x0b, 0x1e, 0x27, 0x0b, 0xe2, 0xa1, 0xab, 0x49, 0xac, 0x2e, 0x17, 0x70, + 0xc1, 0x58, 0x54, 0xe3, 0x0d, 0x24, 0x73, 0x33, 0x70, 0x80, 0x93, 0xa6, 0xf0, 0x89, 0x66, 0xa4, + 0xa4, 0xd8, 0x8c, 0x94, 0xac, 0x6f, 0x21, 0x39, 0x6d, 0x1a, 0x96, 0x51, 0xf9, 0xdd, 0xfb, 0x56, + 0x09, 0xd7, 0xd1, 0xbc, 0x7e, 0x68, 0x0c, 0x76, 0x5b, 0x12, 0x46, 0x48, 0xd6, 0x0f, 0x8d, 0xe1, + 0xde, 0x41, 0xab, 0x9c, 0x9d, 0x07, 0xbb, 0xc3, 0x56, 0x45, 0xff, 0x7c, 0xf9, 0x57, 0x29, 0xfd, + 0x98, 0x2a, 0xd2, 0xf9, 0x54, 0x91, 0x2e, 0xa6, 0x8a, 0xf4, 0x67, 0xaa, 0x48, 0x3f, 0xaf, 0x94, + 0xd2, 0xc5, 0x95, 0x52, 0xba, 0xbc, 0x52, 0x4a, 0x9f, 0x1e, 0x14, 0x3e, 0x0b, 0x66, 0xe0, 0x9a, + 0x23, 0xd3, 0x0f, 0xd8, 0x18, 0x6c, 0x9e, 0x45, 0xfd, 0xec, 0x1f, 0xff, 0xab, 0xdc, 0xd9, 0x16, + 0xe0, 0x43, 0x9a, 0xd6, 0x06, 0x4c, 0xdb, 0xf6, 0xa9, 0x25, 0x8b, 0xc5, 0xd8, 0xfa, 0x17, 0x00, + 0x00, 0xff, 0xff, 0x6e, 0x14, 0xc2, 0x91, 0x9c, 0x04, 0x00, 0x00, } func (m *TestSpec) Marshal() (dAtA []byte, err error) { @@ -283,6 +303,20 @@ func (m *TestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Target) > 0 { + i -= len(m.Target) + copy(dAtA[i:], m.Target) + i = encodeVarintTestspec(dAtA, i, uint64(len(m.Target))) + i-- + dAtA[i] = 0x72 + } + if len(m.Environment) > 0 { + i -= len(m.Environment) + copy(dAtA[i:], m.Environment) + i = encodeVarintTestspec(dAtA, i, uint64(len(m.Environment))) + i-- + dAtA[i] = 0x6a + } if m.GetLogs { i-- if m.GetLogs { @@ -446,6 +480,14 @@ func (m *TestSpec) Size() (n int) { if m.GetLogs { n += 2 } + l = len(m.Environment) + if l > 0 { + n += 1 + l + sovTestspec(uint64(l)) + } + l = len(m.Target) + if l > 0 { + n += 1 + l + sovTestspec(uint64(l)) + } return n } @@ -482,6 +524,8 @@ func (this *TestSpec) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `RandomClientId:` + fmt.Sprintf("%v", this.RandomClientId) + `,`, `GetLogs:` + fmt.Sprintf("%v", this.GetLogs) + `,`, + `Environment:` + fmt.Sprintf("%v", this.Environment) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, `}`, }, "") return s @@ -850,6 +894,70 @@ func (m *TestSpec) Unmarshal(dAtA []byte) error { } } m.GetLogs = bool(v != 0) + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Environment", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestspec + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTestspec + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTestspec + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Environment = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestspec + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTestspec + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTestspec + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTestspec(dAtA[iNdEx:]) diff --git a/pkg/api/testspec.proto b/pkg/api/testspec.proto index 2d7e26f4870..64e8ff5264b 100644 --- a/pkg/api/testspec.proto +++ b/pkg/api/testspec.proto @@ -15,39 +15,43 @@ option (gogoproto.stringer_all) = true; // Defines a test case for the Armada test suite. // Defined as a proto message to enable unmarshalling oneof fields. message TestSpec { - // Jobs to submit. - // The n jobs herein are copied BatchSize times to produce n*BatchSize jobs. - // A batch of n*BatchSize such jobs are submitted in each API call. - // NumBatches such batches are submitted in total. + // Jobs to submit. + // The n jobs herein are copied BatchSize times to produce n*BatchSize jobs. + // A batch of n*BatchSize such jobs are submitted in each API call. + // NumBatches such batches are submitted in total. repeated JobSubmitRequestItem jobs = 1; // Events expected in response to submitting each job. repeated EventMessage expected_events = 2; // Queue to submit jobs to. - string queue = 3; + string queue = 3; // Job set to submit jobs to. - string job_set_id = 4; - // Number of batches of jobs to submit. - // If 0, will submit forever. - uint32 num_batches = 5; - // Number of copies of the provided jobs to submit per batch. - uint32 batch_size = 6; - // Time between batches. - // If 0, jobs are submitted as quickly as possible. + string job_set_id = 4; + // Number of batches of jobs to submit. + // If 0, will submit forever. + uint32 num_batches = 5; + // Number of copies of the provided jobs to submit per batch. + uint32 batch_size = 6; + // Time between batches. + // If 0, jobs are submitted as quickly as possible. google.protobuf.Duration interval = 7 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; - // Number of seconds to wait for jobs to finish. + // Number of seconds to wait for jobs to finish. google.protobuf.Duration timeout = 8 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; - // If the jobs in this spec. should be cancelled. - enum Cancel { - NO = 0; // Do not cancel jobs. - BY_ID = 1; // Cancel jobs individually. - BY_SET = 2; // Cancel all jobs in the job set in bulk. - BY_IDS = 3; // Cancel all jobs in the job set by multiple ids - } - Cancel cancel = 9; - // Test name. Defaults to the filename if not provided. - string name = 10; + // If the jobs in this spec. should be cancelled. + enum Cancel { + NO = 0; // Do not cancel jobs. + BY_ID = 1; // Cancel jobs individually. + BY_SET = 2; // Cancel all jobs in the job set in bulk. + BY_IDS = 3; // Cancel all jobs in the job set by multiple ids + } + Cancel cancel = 9; + // Test name. Defaults to the filename if not provided. + string name = 10; // Randomize clientId if not provided bool random_client_id = 11; // Toggle should testsuite scrape Armada Job (pod) logs bool get_logs = 12; + // Value of the environment label set on exported Prometheus metrics. + string environment = 13; + // Value of the target label set on exported Prometheus metrics. + string target = 14; } \ No newline at end of file diff --git a/pkg/armadaevents/events.pb.go b/pkg/armadaevents/events.pb.go index 2e3fe18f078..3feeac79696 100644 --- a/pkg/armadaevents/events.pb.go +++ b/pkg/armadaevents/events.pb.go @@ -693,6 +693,8 @@ type SubmitJob struct { Scheduler string `protobuf:"bytes,11,opt,name=scheduler,proto3" json:"scheduler,omitempty"` // Indicates whether job is a duplicate IsDuplicate bool `protobuf:"varint,12,opt,name=isDuplicate,proto3" json:"isDuplicate,omitempty"` + // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. + QueueTtlSeconds int64 `protobuf:"varint,13,opt,name=queue_ttl_seconds,json=queueTtlSeconds,proto3" json:"queueTtlSeconds,omitempty"` } func (m *SubmitJob) Reset() { *m = SubmitJob{} } @@ -812,6 +814,13 @@ func (m *SubmitJob) GetIsDuplicate() bool { return false } +func (m *SubmitJob) GetQueueTtlSeconds() int64 { + if m != nil { + return m.QueueTtlSeconds + } + return 0 +} + // Kubernetes objects that can serve as main objects for an Armada job. type KubernetesMainObject struct { ObjectMeta *ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"` @@ -2364,6 +2373,7 @@ type Error struct { // *Error_PodLeaseReturned // *Error_PodTerminated // *Error_JobRunPreemptedError + // *Error_GangJobUnschedulable Reason isError_Reason `protobuf_oneof:"reason"` } @@ -2436,6 +2446,9 @@ type Error_PodTerminated struct { type Error_JobRunPreemptedError struct { JobRunPreemptedError *JobRunPreemptedError `protobuf:"bytes,11,opt,name=jobRunPreemptedError,proto3,oneof" json:"jobRunPreemptedError,omitempty"` } +type Error_GangJobUnschedulable struct { + GangJobUnschedulable *GangJobUnschedulable `protobuf:"bytes,12,opt,name=gangJobUnschedulable,proto3,oneof" json:"gangJobUnschedulable,omitempty"` +} func (*Error_KubernetesError) isError_Reason() {} func (*Error_ContainerError) isError_Reason() {} @@ -2447,6 +2460,7 @@ func (*Error_PodError) isError_Reason() {} func (*Error_PodLeaseReturned) isError_Reason() {} func (*Error_PodTerminated) isError_Reason() {} func (*Error_JobRunPreemptedError) isError_Reason() {} +func (*Error_GangJobUnschedulable) isError_Reason() {} func (m *Error) GetReason() isError_Reason { if m != nil { @@ -2532,6 +2546,13 @@ func (m *Error) GetJobRunPreemptedError() *JobRunPreemptedError { return nil } +func (m *Error) GetGangJobUnschedulable() *GangJobUnschedulable { + if x, ok := m.GetReason().(*Error_GangJobUnschedulable); ok { + return x.GangJobUnschedulable + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Error) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -2545,6 +2566,7 @@ func (*Error) XXX_OneofWrappers() []interface{} { (*Error_PodLeaseReturned)(nil), (*Error_PodTerminated)(nil), (*Error_JobRunPreemptedError)(nil), + (*Error_GangJobUnschedulable)(nil), } } @@ -3131,6 +3153,50 @@ func (m *JobRunPreemptedError) XXX_DiscardUnknown() { var xxx_messageInfo_JobRunPreemptedError proto.InternalMessageInfo +type GangJobUnschedulable struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *GangJobUnschedulable) Reset() { *m = GangJobUnschedulable{} } +func (m *GangJobUnschedulable) String() string { return proto.CompactTextString(m) } +func (*GangJobUnschedulable) ProtoMessage() {} +func (*GangJobUnschedulable) Descriptor() ([]byte, []int) { + return fileDescriptor_6aab92ca59e015f8, []int{38} +} +func (m *GangJobUnschedulable) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GangJobUnschedulable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GangJobUnschedulable.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GangJobUnschedulable) XXX_Merge(src proto.Message) { + xxx_messageInfo_GangJobUnschedulable.Merge(m, src) +} +func (m *GangJobUnschedulable) XXX_Size() int { + return m.Size() +} +func (m *GangJobUnschedulable) XXX_DiscardUnknown() { + xxx_messageInfo_GangJobUnschedulable.DiscardUnknown(m) +} + +var xxx_messageInfo_GangJobUnschedulable proto.InternalMessageInfo + +func (m *GangJobUnschedulable) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + // Generated by the scheduler whenever it detects a SubmitJob message that includes a previously used deduplication id // (i.e., when it detects a duplicate job submission). type JobDuplicateDetected struct { @@ -3142,7 +3208,7 @@ func (m *JobDuplicateDetected) Reset() { *m = JobDuplicateDetected{} } func (m *JobDuplicateDetected) String() string { return proto.CompactTextString(m) } func (*JobDuplicateDetected) ProtoMessage() {} func (*JobDuplicateDetected) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{38} + return fileDescriptor_6aab92ca59e015f8, []int{39} } func (m *JobDuplicateDetected) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3201,7 +3267,7 @@ func (m *JobRunPreempted) Reset() { *m = JobRunPreempted{} } func (m *JobRunPreempted) String() string { return proto.CompactTextString(m) } func (*JobRunPreempted) ProtoMessage() {} func (*JobRunPreempted) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{39} + return fileDescriptor_6aab92ca59e015f8, []int{40} } func (m *JobRunPreempted) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3270,7 +3336,7 @@ func (m *PartitionMarker) Reset() { *m = PartitionMarker{} } func (m *PartitionMarker) String() string { return proto.CompactTextString(m) } func (*PartitionMarker) ProtoMessage() {} func (*PartitionMarker) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{40} + return fileDescriptor_6aab92ca59e015f8, []int{41} } func (m *PartitionMarker) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3323,7 +3389,7 @@ func (m *JobRunPreemptionRequested) Reset() { *m = JobRunPreemptionReque func (m *JobRunPreemptionRequested) String() string { return proto.CompactTextString(m) } func (*JobRunPreemptionRequested) ProtoMessage() {} func (*JobRunPreemptionRequested) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{41} + return fileDescriptor_6aab92ca59e015f8, []int{42} } func (m *JobRunPreemptionRequested) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3414,6 +3480,7 @@ func init() { proto.RegisterType((*LeaseExpired)(nil), "armadaevents.LeaseExpired") proto.RegisterType((*MaxRunsExceeded)(nil), "armadaevents.MaxRunsExceeded") proto.RegisterType((*JobRunPreemptedError)(nil), "armadaevents.JobRunPreemptedError") + proto.RegisterType((*GangJobUnschedulable)(nil), "armadaevents.GangJobUnschedulable") proto.RegisterType((*JobDuplicateDetected)(nil), "armadaevents.JobDuplicateDetected") proto.RegisterType((*JobRunPreempted)(nil), "armadaevents.JobRunPreempted") proto.RegisterType((*PartitionMarker)(nil), "armadaevents.PartitionMarker") @@ -3423,222 +3490,226 @@ func init() { func init() { proto.RegisterFile("pkg/armadaevents/events.proto", fileDescriptor_6aab92ca59e015f8) } var fileDescriptor_6aab92ca59e015f8 = []byte{ - // 3431 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5b, 0x4b, 0x6c, 0x1b, 0xc7, - 0xf9, 0xf7, 0x92, 0x12, 0x1f, 0x1f, 0x25, 0x91, 0x1e, 0xcb, 0x0a, 0xad, 0xd8, 0xa2, 0xb3, 0xce, - 0xff, 0x1f, 0x27, 0x48, 0xc8, 0xc4, 0x79, 0x20, 0x8f, 0x22, 0x81, 0x68, 0x2b, 0xb1, 0x1d, 0xcb, - 0x76, 0x28, 0x3b, 0x75, 0x83, 0x14, 0xcc, 0x92, 0x3b, 0xa2, 0xd6, 0x22, 0x77, 0x37, 0xfb, 0x90, - 0x25, 0x20, 0x87, 0xb6, 0x68, 0xd3, 0x5b, 0x6b, 0xa0, 0x3d, 0x14, 0xe8, 0x21, 0xbd, 0x36, 0x40, - 0x6f, 0x05, 0x7a, 0xee, 0x2d, 0x05, 0x8a, 0x22, 0xed, 0xa9, 0x27, 0xb6, 0x48, 0xd0, 0x43, 0x79, - 0xe8, 0xb9, 0xed, 0xa9, 0x98, 0xd7, 0xee, 0xcc, 0xee, 0xd2, 0x56, 0xfc, 0xa8, 0x53, 0xf8, 0x24, - 0xed, 0xef, 0x7b, 0xcd, 0xce, 0xcc, 0xf7, 0xed, 0xf7, 0x7d, 0x33, 0x84, 0x63, 0xee, 0xf6, 0xa0, - 0x65, 0x78, 0x23, 0xc3, 0x34, 0xf0, 0x0e, 0xb6, 0x03, 0xbf, 0xc5, 0xfe, 0x34, 0x5d, 0xcf, 0x09, - 0x1c, 0x34, 0x27, 0x93, 0x96, 0xf5, 0xed, 0x97, 0xfd, 0xa6, 0xe5, 0xb4, 0x0c, 0xd7, 0x6a, 0xf5, - 0x1d, 0x0f, 0xb7, 0x76, 0x9e, 0x6b, 0x0d, 0xb0, 0x8d, 0x3d, 0x23, 0xc0, 0x26, 0x93, 0x58, 0x3e, - 0x29, 0xf1, 0xd8, 0x38, 0xb8, 0xe1, 0x78, 0xdb, 0x96, 0x3d, 0xc8, 0xe2, 0x6c, 0x0c, 0x1c, 0x67, - 0x30, 0xc4, 0x2d, 0xfa, 0xd4, 0x0b, 0x37, 0x5b, 0x81, 0x35, 0xc2, 0x7e, 0x60, 0x8c, 0x5c, 0xce, - 0xf0, 0x42, 0xac, 0x6a, 0x64, 0xf4, 0xb7, 0x2c, 0x1b, 0x7b, 0x7b, 0x2d, 0x3a, 0x5e, 0xd7, 0x6a, - 0x79, 0xd8, 0x77, 0x42, 0xaf, 0x8f, 0x53, 0x6a, 0x9f, 0x19, 0x58, 0xc1, 0x56, 0xd8, 0x6b, 0xf6, - 0x9d, 0x51, 0x6b, 0xe0, 0x0c, 0x9c, 0x58, 0x3f, 0x79, 0xa2, 0x0f, 0xf4, 0x3f, 0xce, 0xfe, 0xaa, - 0x65, 0x07, 0xd8, 0xb3, 0x8d, 0x61, 0xcb, 0xef, 0x6f, 0x61, 0x33, 0x1c, 0x62, 0x2f, 0xfe, 0xcf, - 0xe9, 0x5d, 0xc7, 0xfd, 0xc0, 0x4f, 0x01, 0x4c, 0x56, 0xbf, 0xb9, 0x08, 0xf3, 0x6b, 0x64, 0x6a, - 0x36, 0xf0, 0x87, 0x21, 0xb6, 0xfb, 0x18, 0x3d, 0x09, 0xb3, 0x1f, 0x86, 0x38, 0xc4, 0x75, 0xed, - 0xb8, 0x76, 0xb2, 0xdc, 0x3e, 0x34, 0x19, 0x37, 0xaa, 0x14, 0x78, 0xda, 0x19, 0x59, 0x01, 0x1e, - 0xb9, 0xc1, 0x5e, 0x87, 0x71, 0xa0, 0x57, 0x61, 0xee, 0xba, 0xd3, 0xeb, 0xfa, 0x38, 0xe8, 0xda, - 0xc6, 0x08, 0xd7, 0x73, 0x54, 0xa2, 0x3e, 0x19, 0x37, 0x16, 0xaf, 0x3b, 0xbd, 0x0d, 0x1c, 0x5c, - 0x34, 0x46, 0xb2, 0x18, 0xc4, 0x28, 0x7a, 0x06, 0x8a, 0xa1, 0x8f, 0xbd, 0xae, 0x65, 0xd6, 0xf3, - 0x54, 0x6c, 0x71, 0x32, 0x6e, 0xd4, 0x08, 0x74, 0xce, 0x94, 0x44, 0x0a, 0x0c, 0x41, 0x4f, 0x43, - 0x61, 0xe0, 0x39, 0xa1, 0xeb, 0xd7, 0x67, 0x8e, 0xe7, 0x05, 0x37, 0x43, 0x64, 0x6e, 0x86, 0xa0, - 0x4b, 0x50, 0x60, 0xeb, 0x5d, 0x9f, 0x3d, 0x9e, 0x3f, 0x59, 0x39, 0xf5, 0x58, 0x53, 0xde, 0x04, - 0x4d, 0xe5, 0x85, 0xd9, 0x13, 0x53, 0xc8, 0xe8, 0xb2, 0x42, 0xbe, 0x6d, 0xfe, 0x7e, 0x10, 0x66, - 0x29, 0x1f, 0xba, 0x04, 0xc5, 0xbe, 0x87, 0xc9, 0x62, 0xd5, 0xd1, 0x71, 0xed, 0x64, 0xe5, 0xd4, - 0x72, 0x93, 0x6d, 0x82, 0xa6, 0x58, 0xa4, 0xe6, 0x15, 0xb1, 0x09, 0xda, 0x47, 0x26, 0xe3, 0xc6, - 0x41, 0xce, 0x1e, 0x6b, 0xbd, 0xf9, 0x97, 0x86, 0xd6, 0x11, 0x5a, 0xd0, 0x65, 0x28, 0xfb, 0x61, - 0x6f, 0x64, 0x05, 0xe7, 0x9d, 0x1e, 0x9d, 0xf3, 0xca, 0xa9, 0x47, 0xd4, 0xe1, 0x6e, 0x08, 0x72, - 0xfb, 0x91, 0xc9, 0xb8, 0x71, 0x28, 0xe2, 0x8e, 0x35, 0x9e, 0x3d, 0xd0, 0x89, 0x95, 0xa0, 0x2d, - 0xa8, 0x7a, 0xd8, 0xf5, 0x2c, 0xc7, 0xb3, 0x02, 0xcb, 0xc7, 0x44, 0x6f, 0x8e, 0xea, 0x3d, 0xa6, - 0xea, 0xed, 0xa8, 0x4c, 0xed, 0x63, 0x93, 0x71, 0xe3, 0x48, 0x42, 0x52, 0xb1, 0x91, 0x54, 0x8b, - 0x02, 0x40, 0x09, 0x68, 0x03, 0x07, 0x74, 0x3d, 0x2b, 0xa7, 0x8e, 0xdf, 0xd2, 0xd8, 0x06, 0x0e, - 0xda, 0xc7, 0x27, 0xe3, 0xc6, 0xd1, 0xb4, 0xbc, 0x62, 0x32, 0x43, 0x3f, 0x1a, 0x42, 0x4d, 0x46, - 0x4d, 0xf2, 0x82, 0x33, 0xd4, 0xe6, 0xca, 0x74, 0x9b, 0x84, 0xab, 0xbd, 0x32, 0x19, 0x37, 0x96, - 0x93, 0xb2, 0x8a, 0xbd, 0x94, 0x66, 0xb2, 0x3e, 0x7d, 0xc3, 0xee, 0xe3, 0x21, 0x31, 0x33, 0x9b, - 0xb5, 0x3e, 0xa7, 0x05, 0x99, 0xad, 0x4f, 0xc4, 0xad, 0xae, 0x4f, 0x04, 0xa3, 0xf7, 0x61, 0x2e, - 0x7a, 0x20, 0xf3, 0x55, 0xe0, 0xfb, 0x28, 0x5b, 0x29, 0x99, 0xa9, 0xe5, 0xc9, 0xb8, 0xb1, 0x24, - 0xcb, 0x28, 0xaa, 0x15, 0x6d, 0xb1, 0xf6, 0x21, 0x9b, 0x99, 0xe2, 0x74, 0xed, 0x8c, 0x43, 0xd6, - 0x3e, 0x4c, 0xcf, 0x88, 0xa2, 0x8d, 0x68, 0x27, 0x4e, 0x1c, 0xf6, 0xfb, 0x18, 0x9b, 0xd8, 0xac, - 0x97, 0xb2, 0xb4, 0x9f, 0x97, 0x38, 0x98, 0x76, 0x59, 0x46, 0xd5, 0x2e, 0x53, 0xc8, 0x5c, 0x5f, - 0x77, 0x7a, 0x6b, 0x9e, 0xe7, 0x78, 0x7e, 0xbd, 0x9c, 0x35, 0xd7, 0xe7, 0x05, 0x99, 0xcd, 0x75, - 0xc4, 0xad, 0xce, 0x75, 0x04, 0xf3, 0xf1, 0x76, 0x42, 0xfb, 0x02, 0x36, 0x7c, 0x6c, 0xd6, 0x61, - 0xca, 0x78, 0x23, 0x8e, 0x68, 0xbc, 0x11, 0x92, 0x1a, 0x6f, 0x44, 0x41, 0x26, 0x2c, 0xb0, 0xe7, - 0x55, 0xdf, 0xb7, 0x06, 0x36, 0x36, 0xeb, 0x15, 0xaa, 0xff, 0x68, 0x96, 0x7e, 0xc1, 0xd3, 0x3e, - 0x3a, 0x19, 0x37, 0xea, 0xaa, 0x9c, 0x62, 0x23, 0xa1, 0x13, 0x7d, 0x00, 0xf3, 0x0c, 0xe9, 0x84, - 0xb6, 0x6d, 0xd9, 0x83, 0xfa, 0x1c, 0x35, 0xf2, 0x68, 0x96, 0x11, 0xce, 0xd2, 0x7e, 0x74, 0x32, - 0x6e, 0x3c, 0xa2, 0x48, 0x29, 0x26, 0x54, 0x85, 0x24, 0x62, 0x30, 0x20, 0x5e, 0xd8, 0xf9, 0xac, - 0x88, 0x71, 0x5e, 0x65, 0x62, 0x11, 0x23, 0x21, 0xa9, 0x46, 0x8c, 0x04, 0x31, 0x5e, 0x0f, 0xbe, - 0xc8, 0x0b, 0xd3, 0xd7, 0x83, 0xaf, 0xb3, 0xb4, 0x1e, 0x19, 0x4b, 0xad, 0x68, 0x43, 0x1f, 0x01, - 0xf9, 0xf0, 0x9c, 0x09, 0xdd, 0xa1, 0xd5, 0x37, 0x02, 0x7c, 0x06, 0x07, 0xb8, 0x4f, 0x22, 0x75, - 0x95, 0x5a, 0xd1, 0x53, 0x56, 0x52, 0x9c, 0x6d, 0x7d, 0x32, 0x6e, 0xac, 0x64, 0xe9, 0x50, 0xac, - 0x66, 0x5a, 0x41, 0xdf, 0xd1, 0xe0, 0xb0, 0x1f, 0x18, 0xb6, 0x69, 0x0c, 0x1d, 0x1b, 0x9f, 0xb3, - 0x07, 0x1e, 0xf6, 0xfd, 0x73, 0xf6, 0xa6, 0x53, 0xaf, 0x51, 0xfb, 0x27, 0x12, 0x61, 0x3d, 0x8b, - 0xb5, 0x7d, 0x62, 0x32, 0x6e, 0x34, 0x32, 0xb5, 0x28, 0x23, 0xc8, 0x36, 0x84, 0x76, 0xe1, 0x90, - 0xc8, 0x2a, 0xae, 0x06, 0xd6, 0xd0, 0xf2, 0x8d, 0xc0, 0x72, 0xec, 0xfa, 0x41, 0x6a, 0xff, 0xb1, - 0x64, 0x74, 0x4c, 0x31, 0xb6, 0x1f, 0x9b, 0x8c, 0x1b, 0xc7, 0x32, 0x34, 0x28, 0xb6, 0xb3, 0x4c, - 0xc4, 0x5b, 0xe8, 0xb2, 0x87, 0x09, 0x23, 0x36, 0xeb, 0x87, 0xa6, 0x6f, 0xa1, 0x88, 0x49, 0xde, - 0x42, 0x11, 0x98, 0xb5, 0x85, 0x22, 0x22, 0xb1, 0xe4, 0x1a, 0x5e, 0x60, 0x11, 0xb3, 0xeb, 0x86, - 0xb7, 0x8d, 0xbd, 0xfa, 0x62, 0x96, 0xa5, 0xcb, 0x2a, 0x13, 0xb3, 0x94, 0x90, 0x54, 0x2d, 0x25, - 0x88, 0xe8, 0xa6, 0x06, 0xea, 0xd0, 0x2c, 0xc7, 0xee, 0x90, 0xb4, 0xc1, 0x27, 0xaf, 0x77, 0x98, - 0x1a, 0x7d, 0xe2, 0x16, 0xaf, 0x27, 0xb3, 0xb7, 0x9f, 0x98, 0x8c, 0x1b, 0x27, 0xa6, 0x6a, 0x53, - 0x06, 0x32, 0xdd, 0x28, 0xba, 0x06, 0x15, 0x42, 0xc4, 0x34, 0x01, 0x33, 0xeb, 0x4b, 0x74, 0x0c, - 0x47, 0xd2, 0x63, 0xe0, 0x0c, 0x34, 0x03, 0x39, 0x2c, 0x49, 0x28, 0x76, 0x64, 0x55, 0xed, 0x22, - 0xcc, 0x52, 0x79, 0x7d, 0x52, 0x80, 0x43, 0x19, 0x7b, 0x03, 0xbd, 0x0e, 0x05, 0x2f, 0xb4, 0x49, - 0xc2, 0xc6, 0xb2, 0x14, 0xa4, 0x5a, 0xbd, 0x1a, 0x5a, 0x26, 0xcb, 0x16, 0xbd, 0xd0, 0x56, 0x72, - 0xb8, 0x59, 0x0a, 0x10, 0x79, 0x92, 0x2d, 0x5a, 0x26, 0xcf, 0x46, 0xa6, 0xca, 0x5f, 0x77, 0x7a, - 0xaa, 0x3c, 0x05, 0x10, 0x86, 0x79, 0xb1, 0xf1, 0xba, 0x16, 0xf1, 0x2a, 0x96, 0x67, 0x3c, 0xae, - 0xaa, 0x79, 0x3b, 0xec, 0x61, 0xcf, 0xc6, 0x01, 0xf6, 0xc5, 0x3b, 0x50, 0xb7, 0xa2, 0x51, 0xc4, - 0x93, 0x10, 0x49, 0xff, 0x9c, 0x8c, 0xa3, 0x9f, 0x6a, 0x50, 0x1f, 0x19, 0xbb, 0x5d, 0x01, 0xfa, - 0xdd, 0x4d, 0xc7, 0xeb, 0xba, 0xd8, 0xb3, 0x1c, 0x93, 0x26, 0x9f, 0x95, 0x53, 0xdf, 0xb8, 0xad, - 0x23, 0x35, 0xd7, 0x8d, 0x5d, 0x01, 0xfb, 0x6f, 0x3a, 0xde, 0x65, 0x2a, 0xbe, 0x66, 0x07, 0xde, - 0x5e, 0xfb, 0xd8, 0x67, 0xe3, 0xc6, 0x01, 0xb2, 0x2c, 0xa3, 0x2c, 0x9e, 0x4e, 0x36, 0x8c, 0x7e, - 0xac, 0xc1, 0x52, 0xe0, 0x04, 0xc6, 0xb0, 0xdb, 0x0f, 0x47, 0xe1, 0xd0, 0x08, 0xac, 0x1d, 0xdc, - 0x0d, 0x7d, 0x63, 0x80, 0x79, 0x8e, 0xfb, 0xda, 0xed, 0x07, 0x75, 0x85, 0xc8, 0x9f, 0x8e, 0xc4, - 0xaf, 0x12, 0x69, 0x36, 0xa6, 0xa3, 0x7c, 0x4c, 0x8b, 0x41, 0x06, 0x4b, 0x27, 0x13, 0x5d, 0xfe, - 0x85, 0x06, 0xcb, 0xd3, 0x5f, 0x13, 0x9d, 0x80, 0xfc, 0x36, 0xde, 0xe3, 0x55, 0xc4, 0xc1, 0xc9, - 0xb8, 0x31, 0xbf, 0x8d, 0xf7, 0xa4, 0x59, 0x27, 0x54, 0xf4, 0x2d, 0x98, 0xdd, 0x31, 0x86, 0x21, - 0xe6, 0x5b, 0xa2, 0xd9, 0x64, 0xf5, 0x52, 0x53, 0xae, 0x97, 0x9a, 0xee, 0xf6, 0x80, 0x00, 0x4d, - 0xb1, 0x22, 0xcd, 0x77, 0x42, 0xc3, 0x0e, 0xac, 0x60, 0x8f, 0x6d, 0x17, 0xaa, 0x40, 0xde, 0x2e, - 0x14, 0x78, 0x35, 0xf7, 0xb2, 0xb6, 0xfc, 0x89, 0x06, 0x47, 0xa6, 0xbe, 0xf4, 0xd7, 0x61, 0x84, - 0x7a, 0x17, 0x66, 0xc8, 0xc6, 0x27, 0xf5, 0xcd, 0x96, 0x35, 0xd8, 0x7a, 0xe9, 0x05, 0x3a, 0x9c, - 0x02, 0x2b, 0x47, 0x18, 0x22, 0x97, 0x23, 0x0c, 0x21, 0x35, 0xda, 0xd0, 0xb9, 0xf1, 0xd2, 0x0b, - 0x74, 0x50, 0x05, 0x66, 0x84, 0x02, 0xb2, 0x11, 0x0a, 0xe8, 0xbf, 0x2e, 0x40, 0x39, 0x2a, 0x20, - 0x24, 0x1f, 0xd4, 0xee, 0xc8, 0x07, 0xcf, 0x42, 0xcd, 0xc4, 0x26, 0xff, 0xf2, 0x59, 0x8e, 0x2d, - 0xbc, 0xb9, 0xcc, 0xa2, 0xab, 0x42, 0x53, 0xe4, 0xab, 0x09, 0x12, 0x3a, 0x05, 0x25, 0x9e, 0x68, - 0xef, 0x51, 0x47, 0x9e, 0x6f, 0x2f, 0x4d, 0xc6, 0x0d, 0x24, 0x30, 0x49, 0x34, 0xe2, 0x43, 0x1d, - 0x00, 0x56, 0xbd, 0xae, 0xe3, 0xc0, 0xe0, 0x29, 0x7f, 0x5d, 0x7d, 0x83, 0x4b, 0x11, 0x9d, 0xd5, - 0xa1, 0x31, 0xbf, 0x5c, 0x87, 0xc6, 0x28, 0x7a, 0x1f, 0x60, 0x64, 0x58, 0x36, 0x93, 0xe3, 0xf9, - 0xbd, 0x3e, 0x2d, 0xa4, 0xac, 0x47, 0x9c, 0x4c, 0x7b, 0x2c, 0x29, 0x6b, 0x8f, 0x51, 0x52, 0x2d, - 0xf2, 0x7a, 0xbb, 0x5e, 0xa0, 0x5e, 0xba, 0x32, 0x4d, 0x35, 0x57, 0x7b, 0x98, 0x54, 0x8c, 0x5c, - 0x44, 0xd2, 0x29, 0xb4, 0x90, 0x69, 0x1b, 0x5a, 0x9b, 0x38, 0xb0, 0x46, 0x98, 0x66, 0xf6, 0x7c, - 0xda, 0x04, 0x26, 0x4f, 0x9b, 0xc0, 0xd0, 0xcb, 0x00, 0x46, 0xb0, 0xee, 0xf8, 0xc1, 0x25, 0xbb, - 0x8f, 0x69, 0xc6, 0x5e, 0x62, 0xc3, 0x8f, 0x51, 0x79, 0xf8, 0x31, 0x8a, 0x5e, 0x83, 0x8a, 0xcb, - 0x3f, 0x42, 0xbd, 0x21, 0xa6, 0x19, 0x79, 0x89, 0x7d, 0x52, 0x24, 0x58, 0x92, 0x95, 0xb9, 0xd1, - 0x5b, 0x50, 0xed, 0x3b, 0x76, 0x3f, 0xf4, 0x3c, 0x6c, 0xf7, 0xf7, 0x36, 0x8c, 0x4d, 0x4c, 0xb3, - 0xef, 0x12, 0xdb, 0x2a, 0x09, 0x92, 0xbc, 0x55, 0x12, 0x24, 0xf4, 0x22, 0x94, 0xa3, 0xee, 0x05, - 0x4d, 0xb0, 0xcb, 0xbc, 0x10, 0x16, 0xa0, 0x24, 0x1c, 0x73, 0x92, 0xc1, 0x5b, 0x7e, 0x94, 0xa5, - 0xd1, 0xa4, 0x99, 0x0f, 0x5e, 0x82, 0xe5, 0xc1, 0x4b, 0xb0, 0xfe, 0x7b, 0x0d, 0x16, 0xb3, 0xd6, - 0x3d, 0xb1, 0x07, 0xb5, 0x7b, 0xb2, 0x07, 0xdf, 0x85, 0x92, 0xeb, 0x98, 0x5d, 0xdf, 0xc5, 0x7d, - 0x1e, 0x66, 0x12, 0x3b, 0xf0, 0xb2, 0x63, 0x6e, 0xb8, 0xb8, 0xff, 0x4d, 0x2b, 0xd8, 0x5a, 0xdd, - 0x71, 0x2c, 0xf3, 0x82, 0xe5, 0xf3, 0xad, 0xe2, 0x32, 0x8a, 0xf2, 0x59, 0x2f, 0x72, 0xb0, 0x5d, - 0x82, 0x02, 0xb3, 0xa2, 0xff, 0x21, 0x0f, 0xb5, 0xe4, 0x5e, 0xfb, 0x5f, 0x7a, 0x15, 0x74, 0x0d, - 0x8a, 0x16, 0xcb, 0x73, 0xf9, 0x67, 0xff, 0xff, 0xa4, 0x40, 0xdc, 0x8c, 0xbb, 0x74, 0xcd, 0x9d, - 0xe7, 0x9a, 0x3c, 0x21, 0xa6, 0x53, 0x40, 0x35, 0x73, 0x49, 0x55, 0x33, 0x07, 0x51, 0x07, 0x8a, - 0x3e, 0xf6, 0x76, 0xac, 0x3e, 0xe6, 0x11, 0xa5, 0x21, 0x6b, 0xee, 0x3b, 0x1e, 0x26, 0x3a, 0x37, - 0x18, 0x4b, 0xac, 0x93, 0xcb, 0xa8, 0x3a, 0x39, 0x88, 0xde, 0x85, 0x72, 0xdf, 0xb1, 0x37, 0xad, - 0xc1, 0xba, 0xe1, 0xf2, 0x98, 0x72, 0x2c, 0x4b, 0xeb, 0x69, 0xc1, 0xc4, 0x3b, 0x07, 0xe2, 0x31, - 0xd1, 0x39, 0x88, 0xb8, 0xe2, 0x05, 0xfd, 0xc7, 0x0c, 0x40, 0xbc, 0x38, 0xe8, 0x15, 0xa8, 0xe0, - 0x5d, 0xdc, 0x0f, 0x03, 0xc7, 0x13, 0xc1, 0x9d, 0x37, 0xe2, 0x04, 0xac, 0x44, 0x63, 0x88, 0x51, - 0xe2, 0x5d, 0xb6, 0x31, 0xc2, 0xbe, 0x6b, 0xf4, 0x45, 0x07, 0x8f, 0x0e, 0x26, 0x02, 0x65, 0xef, - 0x8a, 0x40, 0xf4, 0xff, 0x30, 0x43, 0x7b, 0x7e, 0xac, 0x79, 0x87, 0x26, 0xe3, 0xc6, 0x82, 0xad, - 0x76, 0xfb, 0x28, 0x1d, 0xbd, 0x01, 0xf3, 0xdb, 0xd1, 0xc6, 0x23, 0x63, 0x9b, 0xa1, 0x02, 0x34, - 0x1f, 0x8b, 0x09, 0xca, 0xe8, 0xe6, 0x64, 0x1c, 0x6d, 0x42, 0xc5, 0xb0, 0x6d, 0x27, 0xa0, 0x1f, - 0x0e, 0xd1, 0xd0, 0x7b, 0x72, 0xda, 0x36, 0x6d, 0xae, 0xc6, 0xbc, 0x2c, 0xb5, 0xa1, 0x1e, 0x2f, - 0x69, 0x90, 0x3d, 0x5e, 0x82, 0x51, 0x07, 0x0a, 0x43, 0xa3, 0x87, 0x87, 0x22, 0x52, 0x3f, 0x3e, - 0xd5, 0xc4, 0x05, 0xca, 0xc6, 0xb4, 0xd3, 0xef, 0x34, 0x93, 0x93, 0xbf, 0xd3, 0x0c, 0x59, 0xde, - 0x84, 0x5a, 0x72, 0x3c, 0xfb, 0xcb, 0x3a, 0x9e, 0x94, 0xb3, 0x8e, 0xf2, 0x6d, 0xf3, 0x1c, 0x03, - 0x2a, 0xd2, 0xa0, 0xee, 0x87, 0x09, 0xfd, 0x97, 0x1a, 0x2c, 0x66, 0xf9, 0x2e, 0x5a, 0x97, 0x3c, - 0x5e, 0xe3, 0x8d, 0x89, 0x8c, 0xad, 0xce, 0x65, 0xa7, 0xb8, 0x7a, 0xec, 0xe8, 0x6d, 0x58, 0xb0, - 0x1d, 0x13, 0x77, 0x0d, 0x62, 0x60, 0x68, 0xf9, 0x41, 0x3d, 0x47, 0x1b, 0xbe, 0xb4, 0xa1, 0x41, - 0x28, 0xab, 0x82, 0x20, 0x49, 0xcf, 0x2b, 0x04, 0xfd, 0x07, 0x1a, 0x54, 0x13, 0xfd, 0xc6, 0xbb, - 0xce, 0x7c, 0xe4, 0x7c, 0x25, 0xb7, 0xbf, 0x7c, 0x45, 0xff, 0x49, 0x0e, 0x2a, 0x52, 0x31, 0x76, - 0xd7, 0x63, 0xb8, 0x0e, 0x55, 0xfe, 0x79, 0xb3, 0xec, 0x01, 0xab, 0x81, 0x72, 0xbc, 0xb3, 0x90, - 0x6a, 0xef, 0x9f, 0x77, 0x7a, 0x1b, 0x11, 0x2f, 0x2d, 0x81, 0x68, 0xdb, 0xc9, 0x57, 0x30, 0xc9, - 0xc4, 0x82, 0x4a, 0x41, 0xd7, 0x60, 0x29, 0x74, 0x4d, 0x23, 0xc0, 0x5d, 0x9f, 0x37, 0xca, 0xbb, - 0x76, 0x38, 0xea, 0x61, 0x8f, 0x7a, 0xfc, 0x2c, 0x6b, 0x94, 0x30, 0x0e, 0xd1, 0x49, 0xbf, 0x48, - 0xe9, 0x92, 0xce, 0xc5, 0x2c, 0xba, 0x7e, 0x16, 0x50, 0xba, 0x19, 0xac, 0xcc, 0xaf, 0xb6, 0xcf, - 0xf9, 0xfd, 0x58, 0x83, 0x5a, 0xb2, 0xc7, 0xfb, 0x40, 0x16, 0x7a, 0x0f, 0xca, 0x51, 0xbf, 0xf6, - 0xae, 0x07, 0xf0, 0x34, 0x14, 0x3c, 0x6c, 0xf8, 0x8e, 0xcd, 0x3d, 0x93, 0x86, 0x18, 0x86, 0xc8, - 0x21, 0x86, 0x21, 0xfa, 0x15, 0x98, 0x63, 0x33, 0xf8, 0xa6, 0x35, 0x0c, 0xb0, 0x87, 0xce, 0x40, - 0xc1, 0x0f, 0x8c, 0x00, 0xfb, 0x75, 0xed, 0x78, 0xfe, 0xe4, 0xc2, 0xa9, 0xa5, 0x74, 0x6b, 0x96, - 0x90, 0x99, 0x56, 0xc6, 0x29, 0x6b, 0x65, 0x88, 0xfe, 0x3d, 0x0d, 0xe6, 0xe4, 0x0e, 0xf4, 0xbd, - 0x51, 0xfb, 0x15, 0x5f, 0xed, 0x23, 0x31, 0x86, 0xe1, 0xbd, 0x59, 0xd9, 0xaf, 0x66, 0xfd, 0x37, - 0x1a, 0x9b, 0xd9, 0xa8, 0x75, 0x79, 0xb7, 0xe6, 0x07, 0x71, 0xff, 0x82, 0x78, 0x98, 0x4f, 0x03, - 0xdb, 0x7e, 0xfb, 0x17, 0x34, 0xfc, 0x29, 0xe2, 0x72, 0xf8, 0x53, 0x08, 0xfa, 0x9f, 0x72, 0x74, - 0xe4, 0x71, 0x9b, 0xfa, 0x41, 0x77, 0x6e, 0x12, 0xd9, 0x49, 0xfe, 0x2b, 0x64, 0x27, 0xcf, 0x40, - 0x91, 0x7e, 0x0e, 0xa2, 0xc4, 0x81, 0x2e, 0x1a, 0x81, 0xd4, 0x63, 0x42, 0x86, 0xdc, 0x22, 0x6a, - 0xcd, 0xde, 0x65, 0xd4, 0xfa, 0x97, 0x06, 0x0b, 0x6a, 0x1f, 0xff, 0x81, 0x4f, 0x6b, 0x6a, 0x43, - 0xe5, 0xef, 0xd3, 0x86, 0xfa, 0xa7, 0x06, 0xf3, 0xca, 0xf1, 0xc2, 0xc3, 0xf3, 0xea, 0x3f, 0xcb, - 0xc1, 0x52, 0xb6, 0x9a, 0xfb, 0x52, 0x3e, 0x9d, 0x05, 0x92, 0x08, 0x9d, 0x8b, 0xbf, 0xec, 0x87, - 0x53, 0xd5, 0x13, 0x7d, 0x05, 0x91, 0x45, 0xa5, 0xce, 0x05, 0x84, 0x38, 0xba, 0x06, 0x15, 0x4b, - 0x3a, 0x81, 0xc8, 0x67, 0x35, 0x8a, 0xe5, 0x73, 0x07, 0x56, 0x18, 0x4f, 0x39, 0x6d, 0x90, 0x55, - 0xb5, 0x0b, 0x30, 0x43, 0x52, 0x0f, 0x7d, 0x07, 0x8a, 0x7c, 0x38, 0xe8, 0x79, 0x28, 0x53, 0x2f, - 0xa5, 0x15, 0x01, 0x4b, 0x3b, 0xe9, 0x47, 0x93, 0x80, 0x89, 0x3b, 0x00, 0x25, 0x81, 0xa1, 0x97, - 0x00, 0x48, 0xe2, 0xc8, 0xfd, 0x33, 0x47, 0xfd, 0x93, 0x56, 0x1e, 0xae, 0x63, 0xa6, 0x9c, 0xb2, - 0x1c, 0x81, 0xfa, 0xaf, 0x72, 0x50, 0x91, 0xcf, 0x3c, 0xee, 0xc8, 0xf8, 0x47, 0x20, 0xaa, 0xc2, - 0xae, 0x61, 0x9a, 0xe4, 0x2f, 0x16, 0x01, 0xb9, 0x35, 0x75, 0x92, 0xc4, 0xff, 0xab, 0x42, 0x82, - 0xd5, 0x00, 0xf4, 0x54, 0xd9, 0x4a, 0x90, 0x24, 0xab, 0xb5, 0x24, 0x6d, 0x79, 0x1b, 0x0e, 0x67, - 0xaa, 0x92, 0x33, 0xf7, 0xd9, 0x7b, 0x95, 0xb9, 0xff, 0x76, 0x16, 0x0e, 0x67, 0x9e, 0x35, 0x3d, - 0x70, 0x2f, 0x56, 0x3d, 0x28, 0x7f, 0x4f, 0x3c, 0xe8, 0x63, 0x2d, 0x6b, 0x65, 0x59, 0xdf, 0xfe, - 0x95, 0x7d, 0x1c, 0xc0, 0xdd, 0xab, 0x35, 0x56, 0xb7, 0xe5, 0xec, 0x1d, 0xf9, 0x44, 0x61, 0xbf, - 0x3e, 0x81, 0x9e, 0x65, 0x45, 0x18, 0xb5, 0x55, 0xa4, 0xb6, 0x44, 0x84, 0x48, 0x98, 0x2a, 0x72, - 0x88, 0xd4, 0xe5, 0x42, 0x82, 0x95, 0xfe, 0xa5, 0xb8, 0x2e, 0xe7, 0x3c, 0xc9, 0xea, 0x7f, 0x4e, - 0xc6, 0xff, 0xbb, 0x7b, 0xf8, 0xdf, 0x1a, 0x54, 0x13, 0x87, 0xcf, 0x0f, 0xcf, 0x37, 0xe8, 0x47, - 0x1a, 0x94, 0xa3, 0x7b, 0x0f, 0x77, 0x9d, 0x86, 0xae, 0x42, 0x01, 0xb3, 0xb3, 0x77, 0x16, 0xee, - 0x0e, 0x25, 0xee, 0x46, 0x11, 0x1a, 0xbf, 0x0d, 0x95, 0x38, 0x6e, 0xef, 0x70, 0x41, 0xfd, 0x8f, - 0x9a, 0x48, 0x30, 0xe3, 0x31, 0x3d, 0xd0, 0xa5, 0x88, 0xdf, 0x29, 0x7f, 0xa7, 0xef, 0xf4, 0xbb, - 0x12, 0xcc, 0x52, 0x3e, 0x52, 0x00, 0x06, 0xd8, 0x1b, 0x59, 0xb6, 0x31, 0xa4, 0xaf, 0x53, 0x62, - 0x7e, 0x2b, 0x30, 0xd9, 0x6f, 0x05, 0x86, 0xb6, 0xa0, 0x1a, 0x37, 0xad, 0xa8, 0x9a, 0xec, 0x2b, - 0x57, 0x6f, 0xab, 0x4c, 0xac, 0x15, 0x9e, 0x90, 0x54, 0xcf, 0xa4, 0x13, 0x44, 0x64, 0xc2, 0x42, - 0xdf, 0xb1, 0x03, 0xc3, 0xb2, 0xb1, 0xc7, 0x0c, 0xe5, 0xb3, 0xae, 0x9c, 0x9c, 0x56, 0x78, 0x58, - 0xed, 0xaf, 0xca, 0xa9, 0x57, 0x4e, 0x54, 0x1a, 0xfa, 0x00, 0xe6, 0x45, 0x12, 0xce, 0x8c, 0xcc, - 0x64, 0x5d, 0x39, 0x59, 0x93, 0x59, 0xd8, 0x96, 0x56, 0xa4, 0xd4, 0x2b, 0x27, 0x0a, 0x09, 0x0d, - 0xa1, 0xe6, 0x3a, 0xe6, 0x55, 0x9b, 0xb7, 0x1d, 0x8c, 0xde, 0x10, 0xf3, 0x4e, 0xe9, 0x4a, 0x2a, - 0xe5, 0x51, 0xb8, 0x58, 0x28, 0x4e, 0xca, 0xaa, 0x97, 0xb8, 0x92, 0x54, 0xf4, 0x3e, 0xcc, 0x0d, - 0x49, 0x2d, 0xb4, 0xb6, 0xeb, 0x5a, 0x1e, 0x36, 0xb3, 0xaf, 0x5c, 0x5d, 0x90, 0x38, 0x58, 0x20, - 0x94, 0x65, 0xd4, 0x6b, 0x27, 0x32, 0x85, 0xac, 0xfe, 0xc8, 0xd8, 0xed, 0x84, 0xb6, 0xbf, 0xb6, - 0xcb, 0xaf, 0xcf, 0x14, 0xb3, 0x56, 0x7f, 0x5d, 0x65, 0x62, 0xab, 0x9f, 0x90, 0x54, 0x57, 0x3f, - 0x41, 0x44, 0x17, 0x68, 0x9c, 0x67, 0x4b, 0xc2, 0xae, 0x5e, 0x2d, 0xa5, 0x66, 0x8b, 0xad, 0x06, - 0x6b, 0x5a, 0xf0, 0x27, 0x45, 0x69, 0xa4, 0x81, 0xaf, 0x01, 0x7d, 0xed, 0x0e, 0x0e, 0x42, 0xcf, - 0xc6, 0x26, 0xbf, 0x75, 0x95, 0x5e, 0x03, 0x85, 0x2b, 0x5a, 0x03, 0x05, 0x4d, 0xad, 0x81, 0x42, - 0x25, 0x7b, 0xca, 0x75, 0xcc, 0x2b, 0xcc, 0x65, 0x82, 0xe8, 0x2e, 0xd6, 0xa3, 0x29, 0x53, 0x31, - 0x0b, 0xdb, 0x53, 0x8a, 0x94, 0xba, 0xa7, 0x14, 0x12, 0xbf, 0xfe, 0x23, 0x5f, 0x16, 0x61, 0x33, - 0x55, 0x99, 0x72, 0xfd, 0x27, 0xc5, 0x19, 0x5d, 0xff, 0x49, 0x51, 0x52, 0xd7, 0x7f, 0xd2, 0xb2, - 0x25, 0xd1, 0x5e, 0xd0, 0x3f, 0xd1, 0xa0, 0x9a, 0xf0, 0x74, 0xf4, 0x3a, 0x44, 0xd7, 0x0c, 0xae, - 0xec, 0xb9, 0x22, 0x51, 0x55, 0xae, 0x25, 0x10, 0x3c, 0xeb, 0x5a, 0x02, 0xc1, 0xd1, 0x05, 0x80, - 0xe8, 0xab, 0x70, 0xab, 0x30, 0x49, 0xb3, 0xa4, 0x98, 0x53, 0xce, 0x92, 0x62, 0x54, 0xff, 0x3c, - 0x0f, 0x25, 0xb1, 0x55, 0xee, 0x4b, 0x21, 0xd3, 0x82, 0xe2, 0x08, 0xfb, 0xf4, 0x7a, 0x42, 0x2e, - 0xce, 0x47, 0x38, 0x24, 0xe7, 0x23, 0x1c, 0x52, 0xd3, 0xa5, 0xfc, 0x1d, 0xa5, 0x4b, 0x33, 0xfb, - 0x4e, 0x97, 0x30, 0x3d, 0x9a, 0x94, 0x02, 0x9e, 0x38, 0x57, 0xb8, 0x75, 0x14, 0x15, 0x07, 0x97, - 0xb2, 0x60, 0xe2, 0xe0, 0x52, 0x26, 0xa1, 0x6d, 0x38, 0x28, 0x9d, 0x7d, 0xf0, 0xde, 0x13, 0x09, - 0x3d, 0x0b, 0xd3, 0xcf, 0x81, 0x3b, 0x94, 0x8b, 0x39, 0xd8, 0x76, 0x02, 0x95, 0xf3, 0xcd, 0x24, - 0x4d, 0xff, 0x5b, 0x0e, 0x16, 0xd4, 0xf1, 0xde, 0x97, 0x85, 0x7d, 0x1e, 0xca, 0x78, 0xd7, 0x0a, - 0xba, 0x7d, 0xc7, 0xc4, 0xbc, 0x68, 0xa3, 0xeb, 0x44, 0xc0, 0xd3, 0x8e, 0xa9, 0xac, 0x93, 0xc0, - 0xe4, 0xdd, 0x90, 0xdf, 0xd7, 0x6e, 0x88, 0x5b, 0x75, 0x33, 0xb7, 0x6f, 0xd5, 0x65, 0xcf, 0x73, - 0xf9, 0x3e, 0xcd, 0xf3, 0xcd, 0x1c, 0xd4, 0x92, 0xf1, 0xf0, 0xeb, 0xe1, 0x42, 0xaa, 0x37, 0xe4, - 0xf7, 0xed, 0x0d, 0x6f, 0xc0, 0x3c, 0xc9, 0xde, 0x8c, 0x20, 0xe0, 0x17, 0xf7, 0x66, 0x68, 0xd6, - 0xc3, 0x62, 0x53, 0x68, 0xaf, 0x0a, 0x5c, 0x89, 0x4d, 0x12, 0xae, 0x7f, 0x37, 0x07, 0xf3, 0x4a, - 0xdc, 0x7e, 0xf8, 0x42, 0x8a, 0x5e, 0x85, 0x79, 0x25, 0x1d, 0xd2, 0xbf, 0xcf, 0xf6, 0x89, 0x9a, - 0x87, 0x3c, 0x7c, 0xf3, 0xb2, 0x00, 0x73, 0x72, 0x5e, 0xa5, 0xb7, 0xa1, 0x9a, 0x48, 0x83, 0xe4, - 0x17, 0xd0, 0xf6, 0xf3, 0x02, 0xfa, 0x12, 0x2c, 0x66, 0x7d, 0xbd, 0xf5, 0x4f, 0x35, 0x4a, 0x48, - 0xdf, 0xcc, 0x3d, 0x0b, 0x60, 0xe3, 0x1b, 0xdd, 0xdb, 0xd6, 0x4d, 0x6c, 0x1a, 0xf0, 0x8d, 0xf3, - 0x89, 0x32, 0xa3, 0x24, 0x30, 0xa2, 0xc9, 0x19, 0x9a, 0xdd, 0xdb, 0x56, 0x2b, 0x54, 0x93, 0x33, - 0x34, 0x53, 0x9a, 0x04, 0xa6, 0xff, 0x30, 0x2f, 0x4a, 0xda, 0xf8, 0x6a, 0xeb, 0x7b, 0x50, 0x73, - 0xc5, 0xc3, 0xed, 0x47, 0x4b, 0x93, 0xfa, 0x88, 0x3f, 0x69, 0x69, 0x41, 0xa5, 0xa8, 0xba, 0x79, - 0xb5, 0x96, 0xdb, 0xa7, 0xee, 0x4e, 0xa2, 0x6c, 0x5b, 0x50, 0x29, 0xe8, 0xdb, 0x70, 0x50, 0xdc, - 0xfc, 0xd9, 0xc1, 0x62, 0xe0, 0xf9, 0xa9, 0xca, 0xd9, 0x4d, 0xdc, 0x48, 0x20, 0x39, 0xf2, 0x6a, - 0x82, 0x94, 0x50, 0xcf, 0xc7, 0x3e, 0xb3, 0x5f, 0xf5, 0xc9, 0xc1, 0x57, 0x13, 0x24, 0x52, 0x5f, - 0x57, 0x13, 0x97, 0x85, 0xd1, 0x19, 0x28, 0xd1, 0xdf, 0x12, 0xdd, 0x7a, 0x05, 0xe8, 0x46, 0xa5, - 0x7c, 0x8a, 0x85, 0x22, 0x87, 0xd0, 0x8b, 0x50, 0x8e, 0xee, 0x14, 0xf3, 0xc3, 0x44, 0xe6, 0x33, - 0x02, 0x54, 0x7c, 0x46, 0x80, 0xfa, 0xcf, 0x35, 0x38, 0x32, 0xf5, 0x22, 0xf1, 0x83, 0x2e, 0xb6, - 0x9f, 0x7a, 0x16, 0x4a, 0xe2, 0xb8, 0x0f, 0x01, 0x14, 0xde, 0xb9, 0xba, 0x76, 0x75, 0xed, 0x4c, - 0xed, 0x00, 0xaa, 0x40, 0xf1, 0xf2, 0xda, 0xc5, 0x33, 0xe7, 0x2e, 0xbe, 0x55, 0xd3, 0xc8, 0x43, - 0xe7, 0xea, 0xc5, 0x8b, 0xe4, 0x21, 0xf7, 0xd4, 0x05, 0xf9, 0xf2, 0x11, 0xfb, 0x8c, 0xa2, 0x39, - 0x28, 0xad, 0xba, 0x2e, 0xf5, 0x5b, 0x26, 0xbb, 0xb6, 0x63, 0x11, 0x5f, 0xad, 0x69, 0xa8, 0x08, - 0xf9, 0x4b, 0x97, 0xd6, 0x6b, 0x39, 0xb4, 0x08, 0xb5, 0x33, 0xd8, 0x30, 0x87, 0x96, 0x8d, 0x45, - 0xb0, 0xa8, 0xe5, 0xdb, 0xd7, 0x3f, 0xfb, 0x62, 0x45, 0xfb, 0xfc, 0x8b, 0x15, 0xed, 0xaf, 0x5f, - 0xac, 0x68, 0x37, 0xbf, 0x5c, 0x39, 0xf0, 0xf9, 0x97, 0x2b, 0x07, 0xfe, 0xfc, 0xe5, 0xca, 0x81, - 0xf7, 0x9e, 0x95, 0x7e, 0x37, 0xc7, 0xde, 0xc9, 0xf5, 0x1c, 0x12, 0x27, 0xf9, 0x53, 0x2b, 0xf9, - 0x4b, 0xc1, 0x4f, 0x73, 0xc7, 0x56, 0xe9, 0xe3, 0x65, 0xc6, 0xd7, 0x3c, 0xe7, 0x34, 0x19, 0x40, - 0x7f, 0xec, 0xe5, 0xf7, 0x0a, 0xf4, 0x47, 0x5d, 0xcf, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0xb4, - 0x0d, 0xed, 0xa1, 0x64, 0x38, 0x00, 0x00, + // 3498 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5b, 0x4b, 0x6c, 0x1b, 0xd7, + 0xb9, 0xf6, 0x90, 0x12, 0x1f, 0x3f, 0x25, 0x91, 0x3e, 0x96, 0x15, 0x5a, 0xb1, 0x45, 0x67, 0x9c, + 0x7b, 0xe3, 0x04, 0x09, 0x99, 0x38, 0x0f, 0xe4, 0x71, 0x91, 0x40, 0xb4, 0x15, 0x3f, 0x62, 0xd9, + 0x0e, 0x65, 0xe7, 0xfa, 0x06, 0xb9, 0x60, 0x86, 0x9c, 0x23, 0x6a, 0xac, 0xe1, 0xcc, 0x64, 0x1e, + 0xb2, 0x04, 0x64, 0x71, 0xef, 0xc5, 0xbd, 0xb9, 0xbb, 0xd4, 0x40, 0xbb, 0x28, 0xd0, 0x45, 0xba, + 0x6d, 0x80, 0xae, 0xbb, 0xee, 0xaa, 0x59, 0x14, 0x45, 0xda, 0x55, 0x57, 0x6c, 0x91, 0xa0, 0x8b, + 0x72, 0xd1, 0x75, 0xdb, 0x4d, 0x8b, 0xf3, 0x9a, 0x39, 0x67, 0x38, 0xb4, 0xe5, 0x57, 0x9d, 0xc2, + 0x2b, 0x69, 0xbe, 0xff, 0x39, 0xe7, 0xf1, 0xcf, 0xff, 0xff, 0xe7, 0x10, 0x8e, 0x79, 0xdb, 0x83, + 0x96, 0xe1, 0x0f, 0x0d, 0xd3, 0xc0, 0x3b, 0xd8, 0x09, 0x83, 0x16, 0xfb, 0xd3, 0xf4, 0x7c, 0x37, + 0x74, 0xd1, 0x9c, 0x4c, 0x5a, 0xd6, 0xb7, 0x5f, 0x0f, 0x9a, 0x96, 0xdb, 0x32, 0x3c, 0xab, 0xd5, + 0x77, 0x7d, 0xdc, 0xda, 0x79, 0xa9, 0x35, 0xc0, 0x0e, 0xf6, 0x8d, 0x10, 0x9b, 0x4c, 0x62, 0xf9, + 0xa4, 0xc4, 0xe3, 0xe0, 0xf0, 0xa6, 0xeb, 0x6f, 0x5b, 0xce, 0x20, 0x8b, 0xb3, 0x31, 0x70, 0xdd, + 0x81, 0x8d, 0x5b, 0xf4, 0xa9, 0x17, 0x6d, 0xb6, 0x42, 0x6b, 0x88, 0x83, 0xd0, 0x18, 0x7a, 0x9c, + 0xe1, 0x95, 0x44, 0xd5, 0xd0, 0xe8, 0x6f, 0x59, 0x0e, 0xf6, 0xf7, 0x5a, 0xd4, 0x5f, 0xcf, 0x6a, + 0xf9, 0x38, 0x70, 0x23, 0xbf, 0x8f, 0x27, 0xd4, 0xbe, 0x30, 0xb0, 0xc2, 0xad, 0xa8, 0xd7, 0xec, + 0xbb, 0xc3, 0xd6, 0xc0, 0x1d, 0xb8, 0x89, 0x7e, 0xf2, 0x44, 0x1f, 0xe8, 0x7f, 0x9c, 0xfd, 0x4d, + 0xcb, 0x09, 0xb1, 0xef, 0x18, 0x76, 0x2b, 0xe8, 0x6f, 0x61, 0x33, 0xb2, 0xb1, 0x9f, 0xfc, 0xe7, + 0xf6, 0x6e, 0xe0, 0x7e, 0x18, 0x4c, 0x00, 0x4c, 0x56, 0xbf, 0xb5, 0x08, 0xf3, 0x6b, 0x64, 0x68, + 0x36, 0xf0, 0x27, 0x11, 0x76, 0xfa, 0x18, 0x3d, 0x0b, 0xb3, 0x9f, 0x44, 0x38, 0xc2, 0x75, 0xed, + 0xb8, 0x76, 0xb2, 0xdc, 0x3e, 0x34, 0x1e, 0x35, 0xaa, 0x14, 0x78, 0xde, 0x1d, 0x5a, 0x21, 0x1e, + 0x7a, 0xe1, 0x5e, 0x87, 0x71, 0xa0, 0x37, 0x61, 0xee, 0x86, 0xdb, 0xeb, 0x06, 0x38, 0xec, 0x3a, + 0xc6, 0x10, 0xd7, 0x73, 0x54, 0xa2, 0x3e, 0x1e, 0x35, 0x16, 0x6f, 0xb8, 0xbd, 0x0d, 0x1c, 0x5e, + 0x32, 0x86, 0xb2, 0x18, 0x24, 0x28, 0x7a, 0x01, 0x8a, 0x51, 0x80, 0xfd, 0xae, 0x65, 0xd6, 0xf3, + 0x54, 0x6c, 0x71, 0x3c, 0x6a, 0xd4, 0x08, 0x74, 0xde, 0x94, 0x44, 0x0a, 0x0c, 0x41, 0xcf, 0x43, + 0x61, 0xe0, 0xbb, 0x91, 0x17, 0xd4, 0x67, 0x8e, 0xe7, 0x05, 0x37, 0x43, 0x64, 0x6e, 0x86, 0xa0, + 0xcb, 0x50, 0x60, 0xf3, 0x5d, 0x9f, 0x3d, 0x9e, 0x3f, 0x59, 0x39, 0xf5, 0x54, 0x53, 0x5e, 0x04, + 0x4d, 0xe5, 0x85, 0xd9, 0x13, 0x53, 0xc8, 0xe8, 0xb2, 0x42, 0xbe, 0x6c, 0xfe, 0x78, 0x10, 0x66, + 0x29, 0x1f, 0xba, 0x0c, 0xc5, 0xbe, 0x8f, 0xc9, 0x64, 0xd5, 0xd1, 0x71, 0xed, 0x64, 0xe5, 0xd4, + 0x72, 0x93, 0x2d, 0x82, 0xa6, 0x98, 0xa4, 0xe6, 0x55, 0xb1, 0x08, 0xda, 0x47, 0xc6, 0xa3, 0xc6, + 0x41, 0xce, 0x9e, 0x68, 0xbd, 0xf5, 0xbb, 0x86, 0xd6, 0x11, 0x5a, 0xd0, 0x15, 0x28, 0x07, 0x51, + 0x6f, 0x68, 0x85, 0x17, 0xdc, 0x1e, 0x1d, 0xf3, 0xca, 0xa9, 0x27, 0x54, 0x77, 0x37, 0x04, 0xb9, + 0xfd, 0xc4, 0x78, 0xd4, 0x38, 0x14, 0x73, 0x27, 0x1a, 0xcf, 0x1d, 0xe8, 0x24, 0x4a, 0xd0, 0x16, + 0x54, 0x7d, 0xec, 0xf9, 0x96, 0xeb, 0x5b, 0xa1, 0x15, 0x60, 0xa2, 0x37, 0x47, 0xf5, 0x1e, 0x53, + 0xf5, 0x76, 0x54, 0xa6, 0xf6, 0xb1, 0xf1, 0xa8, 0x71, 0x24, 0x25, 0xa9, 0xd8, 0x48, 0xab, 0x45, + 0x21, 0xa0, 0x14, 0xb4, 0x81, 0x43, 0x3a, 0x9f, 0x95, 0x53, 0xc7, 0x6f, 0x6b, 0x6c, 0x03, 0x87, + 0xed, 0xe3, 0xe3, 0x51, 0xe3, 0xe8, 0xa4, 0xbc, 0x62, 0x32, 0x43, 0x3f, 0xb2, 0xa1, 0x26, 0xa3, + 0x26, 0x79, 0xc1, 0x19, 0x6a, 0x73, 0x65, 0xba, 0x4d, 0xc2, 0xd5, 0x5e, 0x19, 0x8f, 0x1a, 0xcb, + 0x69, 0x59, 0xc5, 0xde, 0x84, 0x66, 0x32, 0x3f, 0x7d, 0xc3, 0xe9, 0x63, 0x9b, 0x98, 0x99, 0xcd, + 0x9a, 0x9f, 0xd3, 0x82, 0xcc, 0xe6, 0x27, 0xe6, 0x56, 0xe7, 0x27, 0x86, 0xd1, 0x47, 0x30, 0x17, + 0x3f, 0x90, 0xf1, 0x2a, 0xf0, 0x75, 0x94, 0xad, 0x94, 0x8c, 0xd4, 0xf2, 0x78, 0xd4, 0x58, 0x92, + 0x65, 0x14, 0xd5, 0x8a, 0xb6, 0x44, 0xbb, 0xcd, 0x46, 0xa6, 0x38, 0x5d, 0x3b, 0xe3, 0x90, 0xb5, + 0xdb, 0x93, 0x23, 0xa2, 0x68, 0x23, 0xda, 0xc9, 0x26, 0x8e, 0xfa, 0x7d, 0x8c, 0x4d, 0x6c, 0xd6, + 0x4b, 0x59, 0xda, 0x2f, 0x48, 0x1c, 0x4c, 0xbb, 0x2c, 0xa3, 0x6a, 0x97, 0x29, 0x64, 0xac, 0x6f, + 0xb8, 0xbd, 0x35, 0xdf, 0x77, 0xfd, 0xa0, 0x5e, 0xce, 0x1a, 0xeb, 0x0b, 0x82, 0xcc, 0xc6, 0x3a, + 0xe6, 0x56, 0xc7, 0x3a, 0x86, 0xb9, 0xbf, 0x9d, 0xc8, 0xb9, 0x88, 0x8d, 0x00, 0x9b, 0x75, 0x98, + 0xe2, 0x6f, 0xcc, 0x11, 0xfb, 0x1b, 0x23, 0x13, 0xfe, 0xc6, 0x14, 0x64, 0xc2, 0x02, 0x7b, 0x5e, + 0x0d, 0x02, 0x6b, 0xe0, 0x60, 0xb3, 0x5e, 0xa1, 0xfa, 0x8f, 0x66, 0xe9, 0x17, 0x3c, 0xed, 0xa3, + 0xe3, 0x51, 0xa3, 0xae, 0xca, 0x29, 0x36, 0x52, 0x3a, 0xd1, 0xc7, 0x30, 0xcf, 0x90, 0x4e, 0xe4, + 0x38, 0x96, 0x33, 0xa8, 0xcf, 0x51, 0x23, 0x4f, 0x66, 0x19, 0xe1, 0x2c, 0xed, 0x27, 0xc7, 0xa3, + 0xc6, 0x13, 0x8a, 0x94, 0x62, 0x42, 0x55, 0x48, 0x22, 0x06, 0x03, 0x92, 0x89, 0x9d, 0xcf, 0x8a, + 0x18, 0x17, 0x54, 0x26, 0x16, 0x31, 0x52, 0x92, 0x6a, 0xc4, 0x48, 0x11, 0x93, 0xf9, 0xe0, 0x93, + 0xbc, 0x30, 0x7d, 0x3e, 0xf8, 0x3c, 0x4b, 0xf3, 0x91, 0x31, 0xd5, 0x8a, 0x36, 0xf4, 0x29, 0x90, + 0x0f, 0xcf, 0x99, 0xc8, 0xb3, 0xad, 0xbe, 0x11, 0xe2, 0x33, 0x38, 0xc4, 0x7d, 0x12, 0xa9, 0xab, + 0xd4, 0x8a, 0x3e, 0x61, 0x65, 0x82, 0xb3, 0xad, 0x8f, 0x47, 0x8d, 0x95, 0x2c, 0x1d, 0x8a, 0xd5, + 0x4c, 0x2b, 0xe8, 0xbf, 0x34, 0x38, 0x1c, 0x84, 0x86, 0x63, 0x1a, 0xb6, 0xeb, 0xe0, 0xf3, 0xce, + 0xc0, 0xc7, 0x41, 0x70, 0xde, 0xd9, 0x74, 0xeb, 0x35, 0x6a, 0xff, 0x44, 0x2a, 0xac, 0x67, 0xb1, + 0xb6, 0x4f, 0x8c, 0x47, 0x8d, 0x46, 0xa6, 0x16, 0xc5, 0x83, 0x6c, 0x43, 0x68, 0x17, 0x0e, 0x89, + 0xac, 0xe2, 0x5a, 0x68, 0xd9, 0x56, 0x60, 0x84, 0x96, 0xeb, 0xd4, 0x0f, 0x52, 0xfb, 0x4f, 0xa5, + 0xa3, 0xe3, 0x04, 0x63, 0xfb, 0xa9, 0xf1, 0xa8, 0x71, 0x2c, 0x43, 0x83, 0x62, 0x3b, 0xcb, 0x44, + 0xb2, 0x84, 0xae, 0xf8, 0x98, 0x30, 0x62, 0xb3, 0x7e, 0x68, 0xfa, 0x12, 0x8a, 0x99, 0xe4, 0x25, + 0x14, 0x83, 0x59, 0x4b, 0x28, 0x26, 0x12, 0x4b, 0x9e, 0xe1, 0x87, 0x16, 0x31, 0xbb, 0x6e, 0xf8, + 0xdb, 0xd8, 0xaf, 0x2f, 0x66, 0x59, 0xba, 0xa2, 0x32, 0x31, 0x4b, 0x29, 0x49, 0xd5, 0x52, 0x8a, + 0x88, 0x6e, 0x69, 0xa0, 0xba, 0x66, 0xb9, 0x4e, 0x87, 0xa4, 0x0d, 0x01, 0x79, 0xbd, 0xc3, 0xd4, + 0xe8, 0x33, 0xb7, 0x79, 0x3d, 0x99, 0xbd, 0xfd, 0xcc, 0x78, 0xd4, 0x38, 0x31, 0x55, 0x9b, 0xe2, + 0xc8, 0x74, 0xa3, 0xe8, 0x3a, 0x54, 0x08, 0x11, 0xd3, 0x04, 0xcc, 0xac, 0x2f, 0x51, 0x1f, 0x8e, + 0x4c, 0xfa, 0xc0, 0x19, 0x68, 0x06, 0x72, 0x58, 0x92, 0x50, 0xec, 0xc8, 0xaa, 0xda, 0x45, 0x98, + 0xa5, 0xf2, 0xfa, 0xb8, 0x00, 0x87, 0x32, 0xd6, 0x06, 0x7a, 0x1b, 0x0a, 0x7e, 0xe4, 0x90, 0x84, + 0x8d, 0x65, 0x29, 0x48, 0xb5, 0x7a, 0x2d, 0xb2, 0x4c, 0x96, 0x2d, 0xfa, 0x91, 0xa3, 0xe4, 0x70, + 0xb3, 0x14, 0x20, 0xf2, 0x24, 0x5b, 0xb4, 0x4c, 0x9e, 0x8d, 0x4c, 0x95, 0xbf, 0xe1, 0xf6, 0x54, + 0x79, 0x0a, 0x20, 0x0c, 0xf3, 0x62, 0xe1, 0x75, 0x2d, 0xb2, 0xab, 0x58, 0x9e, 0xf1, 0xb4, 0xaa, + 0xe6, 0xbd, 0xa8, 0x87, 0x7d, 0x07, 0x87, 0x38, 0x10, 0xef, 0x40, 0xb7, 0x15, 0x8d, 0x22, 0xbe, + 0x84, 0x48, 0xfa, 0xe7, 0x64, 0x1c, 0xfd, 0x40, 0x83, 0xfa, 0xd0, 0xd8, 0xed, 0x0a, 0x30, 0xe8, + 0x6e, 0xba, 0x7e, 0xd7, 0xc3, 0xbe, 0xe5, 0x9a, 0x34, 0xf9, 0xac, 0x9c, 0xfa, 0xb7, 0x3b, 0x6e, + 0xa4, 0xe6, 0xba, 0xb1, 0x2b, 0xe0, 0xe0, 0x5d, 0xd7, 0xbf, 0x42, 0xc5, 0xd7, 0x9c, 0xd0, 0xdf, + 0x6b, 0x1f, 0xfb, 0x6a, 0xd4, 0x38, 0x40, 0xa6, 0x65, 0x98, 0xc5, 0xd3, 0xc9, 0x86, 0xd1, 0xf7, + 0x34, 0x58, 0x0a, 0xdd, 0xd0, 0xb0, 0xbb, 0xfd, 0x68, 0x18, 0xd9, 0x46, 0x68, 0xed, 0xe0, 0x6e, + 0x14, 0x18, 0x03, 0xcc, 0x73, 0xdc, 0xb7, 0xee, 0xec, 0xd4, 0x55, 0x22, 0x7f, 0x3a, 0x16, 0xbf, + 0x46, 0xa4, 0x99, 0x4f, 0x47, 0xb9, 0x4f, 0x8b, 0x61, 0x06, 0x4b, 0x27, 0x13, 0x5d, 0xfe, 0xb1, + 0x06, 0xcb, 0xd3, 0x5f, 0x13, 0x9d, 0x80, 0xfc, 0x36, 0xde, 0xe3, 0x55, 0xc4, 0xc1, 0xf1, 0xa8, + 0x31, 0xbf, 0x8d, 0xf7, 0xa4, 0x51, 0x27, 0x54, 0xf4, 0x1f, 0x30, 0xbb, 0x63, 0xd8, 0x11, 0xe6, + 0x4b, 0xa2, 0xd9, 0x64, 0xf5, 0x52, 0x53, 0xae, 0x97, 0x9a, 0xde, 0xf6, 0x80, 0x00, 0x4d, 0x31, + 0x23, 0xcd, 0xf7, 0x23, 0xc3, 0x09, 0xad, 0x70, 0x8f, 0x2d, 0x17, 0xaa, 0x40, 0x5e, 0x2e, 0x14, + 0x78, 0x33, 0xf7, 0xba, 0xb6, 0xfc, 0x85, 0x06, 0x47, 0xa6, 0xbe, 0xf4, 0x77, 0xc1, 0x43, 0xbd, + 0x0b, 0x33, 0x64, 0xe1, 0x93, 0xfa, 0x66, 0xcb, 0x1a, 0x6c, 0xbd, 0xf6, 0x0a, 0x75, 0xa7, 0xc0, + 0xca, 0x11, 0x86, 0xc8, 0xe5, 0x08, 0x43, 0x48, 0x8d, 0x66, 0xbb, 0x37, 0x5f, 0x7b, 0x85, 0x3a, + 0x55, 0x60, 0x46, 0x28, 0x20, 0x1b, 0xa1, 0x80, 0xfe, 0xb7, 0x02, 0x94, 0xe3, 0x02, 0x42, 0xda, + 0x83, 0xda, 0x3d, 0xed, 0xc1, 0x73, 0x50, 0x33, 0xb1, 0xc9, 0xbf, 0x7c, 0x96, 0xeb, 0x88, 0xdd, + 0x5c, 0x66, 0xd1, 0x55, 0xa1, 0x29, 0xf2, 0xd5, 0x14, 0x09, 0x9d, 0x82, 0x12, 0x4f, 0xb4, 0xf7, + 0xe8, 0x46, 0x9e, 0x6f, 0x2f, 0x8d, 0x47, 0x0d, 0x24, 0x30, 0x49, 0x34, 0xe6, 0x43, 0x1d, 0x00, + 0x56, 0xbd, 0xae, 0xe3, 0xd0, 0xe0, 0x29, 0x7f, 0x5d, 0x7d, 0x83, 0xcb, 0x31, 0x9d, 0xd5, 0xa1, + 0x09, 0xbf, 0x5c, 0x87, 0x26, 0x28, 0xfa, 0x08, 0x60, 0x68, 0x58, 0x0e, 0x93, 0xe3, 0xf9, 0xbd, + 0x3e, 0x2d, 0xa4, 0xac, 0xc7, 0x9c, 0x4c, 0x7b, 0x22, 0x29, 0x6b, 0x4f, 0x50, 0x52, 0x2d, 0xf2, + 0x7a, 0xbb, 0x5e, 0xa0, 0xbb, 0x74, 0x65, 0x9a, 0x6a, 0xae, 0xf6, 0x30, 0xa9, 0x18, 0xb9, 0x88, + 0xa4, 0x53, 0x68, 0x21, 0xc3, 0x66, 0x5b, 0x9b, 0x38, 0xb4, 0x86, 0x98, 0x66, 0xf6, 0x7c, 0xd8, + 0x04, 0x26, 0x0f, 0x9b, 0xc0, 0xd0, 0xeb, 0x00, 0x46, 0xb8, 0xee, 0x06, 0xe1, 0x65, 0xa7, 0x8f, + 0x69, 0xc6, 0x5e, 0x62, 0xee, 0x27, 0xa8, 0xec, 0x7e, 0x82, 0xa2, 0xb7, 0xa0, 0xe2, 0xf1, 0x8f, + 0x50, 0xcf, 0xc6, 0x34, 0x23, 0x2f, 0xb1, 0x4f, 0x8a, 0x04, 0x4b, 0xb2, 0x32, 0x37, 0x3a, 0x0b, + 0xd5, 0xbe, 0xeb, 0xf4, 0x23, 0xdf, 0xc7, 0x4e, 0x7f, 0x6f, 0xc3, 0xd8, 0xc4, 0x34, 0xfb, 0x2e, + 0xb1, 0xa5, 0x92, 0x22, 0xc9, 0x4b, 0x25, 0x45, 0x42, 0xaf, 0x42, 0x39, 0xee, 0x5e, 0xd0, 0x04, + 0xbb, 0xcc, 0x0b, 0x61, 0x01, 0x4a, 0xc2, 0x09, 0x27, 0x71, 0xde, 0x0a, 0xe2, 0x2c, 0x8d, 0x26, + 0xcd, 0xdc, 0x79, 0x09, 0x96, 0x9d, 0x97, 0x60, 0x74, 0x1e, 0x0e, 0xd2, 0xef, 0x62, 0x37, 0x0c, + 0xed, 0x6e, 0x80, 0xfb, 0xae, 0x63, 0x06, 0x34, 0x27, 0xce, 0x33, 0xf7, 0x29, 0xf1, 0x6a, 0x68, + 0x6f, 0x30, 0x92, 0xec, 0x7e, 0x8a, 0xa4, 0xff, 0x52, 0x83, 0xc5, 0xac, 0x25, 0x94, 0x5a, 0xce, + 0xda, 0x03, 0x59, 0xce, 0x1f, 0x40, 0xc9, 0x73, 0xcd, 0x6e, 0xe0, 0xe1, 0x3e, 0x8f, 0x58, 0xa9, + 0xc5, 0x7c, 0xc5, 0x35, 0x37, 0x3c, 0xdc, 0xff, 0x77, 0x2b, 0xdc, 0x5a, 0xdd, 0x71, 0x2d, 0xf3, + 0xa2, 0x15, 0xf0, 0x55, 0xe7, 0x31, 0x8a, 0x92, 0x21, 0x14, 0x39, 0xd8, 0x2e, 0x41, 0x81, 0x59, + 0xd1, 0x7f, 0x95, 0x87, 0x5a, 0x7a, 0xd9, 0xfe, 0x33, 0xbd, 0x0a, 0xba, 0x0e, 0x45, 0x8b, 0xa5, + 0xcc, 0x3c, 0x83, 0xf8, 0x17, 0x29, 0xa6, 0x37, 0x93, 0x86, 0x5f, 0x73, 0xe7, 0xa5, 0x26, 0xcf, + 0xad, 0xe9, 0x10, 0x50, 0xcd, 0x5c, 0x52, 0xd5, 0xcc, 0x41, 0xd4, 0x81, 0x62, 0x80, 0xfd, 0x1d, + 0xab, 0x8f, 0x79, 0x70, 0x6a, 0xc8, 0x9a, 0xfb, 0xae, 0x8f, 0x89, 0xce, 0x0d, 0xc6, 0x92, 0xe8, + 0xe4, 0x32, 0xaa, 0x4e, 0x0e, 0xa2, 0x0f, 0xa0, 0xdc, 0x77, 0x9d, 0x4d, 0x6b, 0xb0, 0x6e, 0x78, + 0x3c, 0x3c, 0x1d, 0xcb, 0xd2, 0x7a, 0x5a, 0x30, 0xf1, 0x26, 0x84, 0x78, 0x4c, 0x35, 0x21, 0x62, + 0xae, 0x64, 0x42, 0xff, 0x34, 0x03, 0x90, 0x4c, 0x0e, 0x7a, 0x03, 0x2a, 0x78, 0x17, 0xf7, 0xa3, + 0xd0, 0xf5, 0xc5, 0x77, 0x82, 0xf7, 0xf4, 0x04, 0xac, 0x04, 0x76, 0x48, 0x50, 0xb2, 0x51, 0x1d, + 0x63, 0x88, 0x03, 0xcf, 0xe8, 0x8b, 0x66, 0x20, 0x75, 0x26, 0x06, 0xe5, 0x8d, 0x1a, 0x83, 0xe8, + 0x5f, 0x61, 0x86, 0xb6, 0x0f, 0x59, 0x1f, 0x10, 0x8d, 0x47, 0x8d, 0x05, 0x47, 0x6d, 0x1c, 0x52, + 0x3a, 0x7a, 0x07, 0xe6, 0xb7, 0xe3, 0x85, 0x47, 0x7c, 0x9b, 0xa1, 0x02, 0x34, 0xb5, 0x4b, 0x08, + 0x8a, 0x77, 0x73, 0x32, 0x8e, 0x36, 0xa1, 0x62, 0x38, 0x8e, 0x1b, 0xd2, 0x6f, 0x90, 0xe8, 0x0d, + 0x3e, 0x3b, 0x6d, 0x99, 0x36, 0x57, 0x13, 0x5e, 0x96, 0x25, 0xd1, 0xe0, 0x21, 0x69, 0x90, 0x83, + 0x87, 0x04, 0xa3, 0x0e, 0x14, 0x6c, 0xa3, 0x87, 0x6d, 0x11, 0xf4, 0x9f, 0x9e, 0x6a, 0xe2, 0x22, + 0x65, 0x63, 0xda, 0xe9, 0x27, 0x9f, 0xc9, 0xc9, 0x9f, 0x7c, 0x86, 0x2c, 0x6f, 0x42, 0x2d, 0xed, + 0xcf, 0xfe, 0x12, 0x98, 0x67, 0xe5, 0x04, 0xa6, 0x7c, 0xc7, 0x94, 0xc9, 0x80, 0x8a, 0xe4, 0xd4, + 0xc3, 0x30, 0xa1, 0xff, 0x44, 0x83, 0xc5, 0xac, 0xbd, 0x8b, 0xd6, 0xa5, 0x1d, 0xaf, 0xf1, 0x1e, + 0x47, 0xc6, 0x52, 0xe7, 0xb2, 0x53, 0xb6, 0x7a, 0xb2, 0xd1, 0xdb, 0xb0, 0xe0, 0xb8, 0x26, 0xee, + 0x1a, 0xc4, 0x80, 0x6d, 0x05, 0x61, 0x3d, 0x47, 0x7b, 0xc7, 0xb4, 0x37, 0x42, 0x28, 0xab, 0x82, + 0x20, 0x49, 0xcf, 0x2b, 0x04, 0xfd, 0xff, 0x34, 0xa8, 0xa6, 0x5a, 0x97, 0xf7, 0x9d, 0x44, 0xc9, + 0xa9, 0x4f, 0x6e, 0x7f, 0xa9, 0x8f, 0xfe, 0xfd, 0x1c, 0x54, 0xa4, 0xba, 0xee, 0xbe, 0x7d, 0xb8, + 0x01, 0x55, 0xfe, 0xa5, 0xb4, 0x9c, 0x01, 0x2b, 0xa7, 0x72, 0xbc, 0x49, 0x31, 0x71, 0x52, 0x70, + 0xc1, 0xed, 0x6d, 0xc4, 0xbc, 0xb4, 0x9a, 0xa2, 0x1d, 0xac, 0x40, 0xc1, 0x24, 0x13, 0x0b, 0x2a, + 0x05, 0x5d, 0x87, 0xa5, 0xc8, 0x33, 0x8d, 0x10, 0x77, 0x03, 0xde, 0x73, 0xef, 0x3a, 0xd1, 0xb0, + 0x87, 0x7d, 0xba, 0xe3, 0x67, 0x59, 0xcf, 0x85, 0x71, 0x88, 0xa6, 0xfc, 0x25, 0x4a, 0x97, 0x74, + 0x2e, 0x66, 0xd1, 0xf5, 0x73, 0x80, 0x26, 0xfb, 0xca, 0xca, 0xf8, 0x6a, 0xfb, 0x1c, 0xdf, 0xcf, + 0x34, 0xa8, 0xa5, 0xdb, 0xc5, 0x8f, 0x64, 0xa2, 0xf7, 0xa0, 0x1c, 0xb7, 0x7e, 0xef, 0xdb, 0x81, + 0xe7, 0xa1, 0xe0, 0x63, 0x23, 0x70, 0x1d, 0xbe, 0x33, 0x69, 0x88, 0x61, 0x88, 0x1c, 0x62, 0x18, + 0xa2, 0x5f, 0x85, 0x39, 0x36, 0x82, 0xef, 0x5a, 0x76, 0x88, 0x7d, 0x74, 0x06, 0x0a, 0x41, 0x68, + 0x84, 0x38, 0xa8, 0x6b, 0xc7, 0xf3, 0x27, 0x17, 0x4e, 0x2d, 0x4d, 0x76, 0x79, 0x09, 0x99, 0x69, + 0x65, 0x9c, 0xb2, 0x56, 0x86, 0xe8, 0xff, 0xa3, 0xc1, 0x9c, 0xdc, 0xcc, 0x7e, 0x30, 0x6a, 0xef, + 0xf2, 0xd5, 0x3e, 0x15, 0x3e, 0xd8, 0x0f, 0x66, 0x66, 0xef, 0xce, 0xfa, 0xcf, 0x34, 0x36, 0xb2, + 0x71, 0x17, 0xf4, 0x7e, 0xcd, 0x0f, 0x92, 0x56, 0x08, 0xd9, 0x61, 0x01, 0x0d, 0x6c, 0xfb, 0x6d, + 0x85, 0xd0, 0xf0, 0xa7, 0x88, 0xcb, 0xe1, 0x4f, 0x21, 0xe8, 0xbf, 0xc9, 0x51, 0xcf, 0x93, 0x8e, + 0xf7, 0xa3, 0x6e, 0x02, 0xa5, 0xb2, 0x93, 0xfc, 0x5d, 0x64, 0x27, 0x2f, 0x40, 0x91, 0x7e, 0x0e, + 0xe2, 0xc4, 0x81, 0x4e, 0x1a, 0x81, 0xd4, 0x13, 0x47, 0x86, 0xdc, 0x26, 0x6a, 0xcd, 0xde, 0x67, + 0xd4, 0xfa, 0x8b, 0x06, 0x0b, 0xea, 0x91, 0xc0, 0x23, 0x1f, 0xd6, 0x89, 0x05, 0x95, 0x7f, 0x48, + 0x0b, 0xea, 0xcf, 0x1a, 0xcc, 0x2b, 0x27, 0x15, 0x8f, 0xcf, 0xab, 0xff, 0x30, 0x07, 0x4b, 0xd9, + 0x6a, 0x1e, 0x4a, 0xf9, 0x74, 0x0e, 0x48, 0x22, 0x74, 0x3e, 0xf9, 0xb2, 0x1f, 0x9e, 0xa8, 0x9e, + 0xe8, 0x2b, 0x88, 0x2c, 0x6a, 0xe2, 0x88, 0x41, 0x88, 0xa3, 0xeb, 0x50, 0xb1, 0xa4, 0xc3, 0x8c, + 0x7c, 0x56, 0xcf, 0x59, 0x3e, 0xc2, 0x60, 0x35, 0xf6, 0x94, 0x83, 0x0b, 0x59, 0x55, 0xbb, 0x00, + 0x33, 0x24, 0xf5, 0xd0, 0x77, 0xa0, 0xc8, 0xdd, 0x41, 0x2f, 0x43, 0x99, 0xee, 0x52, 0x5a, 0x11, + 0xb0, 0xb4, 0x93, 0x7e, 0x34, 0x09, 0x98, 0xba, 0x4e, 0x50, 0x12, 0x18, 0x7a, 0x0d, 0x80, 0x24, + 0x8e, 0x7c, 0x7f, 0xe6, 0xe8, 0xfe, 0xa4, 0x95, 0x87, 0xe7, 0x9a, 0x13, 0x9b, 0xb2, 0x1c, 0x83, + 0xfa, 0x4f, 0x73, 0x50, 0x91, 0x8f, 0x4f, 0xee, 0xc9, 0xf8, 0xa7, 0x20, 0xaa, 0xc2, 0xae, 0x61, + 0x9a, 0xe4, 0x2f, 0x16, 0x01, 0xb9, 0x35, 0x75, 0x90, 0xc4, 0xff, 0xab, 0x42, 0x82, 0xd5, 0x00, + 0xf4, 0x80, 0xda, 0x4a, 0x91, 0x24, 0xab, 0xb5, 0x34, 0x6d, 0x79, 0x1b, 0x0e, 0x67, 0xaa, 0x92, + 0x33, 0xf7, 0xd9, 0x07, 0x95, 0xb9, 0xff, 0x7c, 0x16, 0x0e, 0x67, 0x1e, 0x5b, 0x3d, 0xf2, 0x5d, + 0xac, 0xee, 0xa0, 0xfc, 0x03, 0xd9, 0x41, 0x9f, 0x69, 0x59, 0x33, 0xcb, 0x8e, 0x00, 0xde, 0xd8, + 0xc7, 0x59, 0xde, 0x83, 0x9a, 0x63, 0x75, 0x59, 0xce, 0xde, 0xd3, 0x9e, 0x28, 0xec, 0x77, 0x4f, + 0xa0, 0x17, 0x59, 0x11, 0x46, 0x6d, 0x15, 0xa9, 0x2d, 0x11, 0x21, 0x52, 0xa6, 0x8a, 0x1c, 0x22, + 0x75, 0xb9, 0x90, 0x60, 0xa5, 0x7f, 0x29, 0xa9, 0xcb, 0x39, 0x4f, 0xba, 0xfa, 0x9f, 0x93, 0xf1, + 0x7f, 0xec, 0x1a, 0xfe, 0xab, 0x06, 0xd5, 0xd4, 0x39, 0xf6, 0xe3, 0xf3, 0x0d, 0xfa, 0x5c, 0x83, + 0x72, 0x7c, 0x85, 0xe2, 0xbe, 0xd3, 0xd0, 0x55, 0x28, 0x60, 0x76, 0x8c, 0xcf, 0xc2, 0xdd, 0xa1, + 0xd4, 0x35, 0x2b, 0x42, 0xe3, 0x17, 0xab, 0x52, 0x27, 0xf7, 0x1d, 0x2e, 0xa8, 0xff, 0x5a, 0x13, + 0x09, 0x66, 0xe2, 0xd3, 0x23, 0x9d, 0x8a, 0xe4, 0x9d, 0xf2, 0xf7, 0xfa, 0x4e, 0xbf, 0x28, 0xc3, + 0x2c, 0xe5, 0x23, 0x05, 0x60, 0x88, 0xfd, 0xa1, 0xe5, 0x18, 0x36, 0x7d, 0x9d, 0x12, 0xdb, 0xb7, + 0x02, 0x93, 0xf7, 0xad, 0xc0, 0xd0, 0x16, 0x54, 0x93, 0xa6, 0x15, 0x55, 0x93, 0x7d, 0x7b, 0xeb, + 0x3d, 0x95, 0x89, 0xb5, 0xa5, 0x53, 0x92, 0xea, 0xf1, 0x76, 0x8a, 0x88, 0x4c, 0x58, 0xe8, 0xbb, + 0x4e, 0x68, 0x58, 0x0e, 0xf6, 0x99, 0xa1, 0x7c, 0xd6, 0xed, 0x95, 0xd3, 0x0a, 0x0f, 0xab, 0xfd, + 0x55, 0x39, 0xf5, 0xf6, 0x8a, 0x4a, 0x43, 0x1f, 0xc3, 0xbc, 0x48, 0xc2, 0x99, 0x91, 0x99, 0xac, + 0xdb, 0x2b, 0x6b, 0x32, 0x0b, 0x5b, 0xd2, 0x8a, 0x94, 0x7a, 0x7b, 0x45, 0x21, 0x21, 0x1b, 0x6a, + 0x9e, 0x6b, 0x5e, 0x73, 0x78, 0xdb, 0xc1, 0xe8, 0xd9, 0x98, 0x77, 0x4a, 0x57, 0x26, 0x52, 0x1e, + 0x85, 0x8b, 0x85, 0xe2, 0xb4, 0xac, 0x7a, 0x1f, 0x2c, 0x4d, 0x45, 0x1f, 0xc1, 0x9c, 0x4d, 0x6a, + 0xa1, 0xb5, 0x5d, 0xcf, 0xf2, 0xb1, 0x99, 0x7d, 0x7b, 0xeb, 0xa2, 0xc4, 0xc1, 0x02, 0xa1, 0x2c, + 0xa3, 0xde, 0x60, 0x91, 0x29, 0x64, 0xf6, 0x87, 0xc6, 0x6e, 0x27, 0x72, 0x82, 0xb5, 0x5d, 0x7e, + 0x13, 0xa7, 0x98, 0x35, 0xfb, 0xeb, 0x2a, 0x13, 0x9b, 0xfd, 0x94, 0xa4, 0x3a, 0xfb, 0x29, 0x22, + 0xba, 0x48, 0xe3, 0x3c, 0x9b, 0x12, 0x76, 0x8b, 0x6b, 0x69, 0x62, 0xb4, 0xd8, 0x6c, 0xb0, 0xa6, + 0x05, 0x7f, 0x52, 0x94, 0xc6, 0x1a, 0xf8, 0x1c, 0xd0, 0xd7, 0xee, 0xe0, 0x30, 0xf2, 0x1d, 0x6c, + 0xf2, 0x0b, 0x5c, 0x93, 0x73, 0xa0, 0x70, 0xc5, 0x73, 0xa0, 0xa0, 0x13, 0x73, 0xa0, 0x50, 0xc9, + 0x9a, 0xf2, 0x5c, 0xf3, 0x2a, 0xdb, 0x32, 0x61, 0x7c, 0xad, 0xeb, 0xc9, 0x09, 0x53, 0x09, 0x0b, + 0x5b, 0x53, 0x8a, 0x94, 0xba, 0xa6, 0x14, 0x12, 0xbf, 0x49, 0x24, 0xdf, 0x3b, 0x61, 0x23, 0x55, + 0x99, 0x72, 0x93, 0x68, 0x82, 0x33, 0xbe, 0x49, 0x34, 0x41, 0x99, 0xb8, 0x49, 0x34, 0xc1, 0x41, + 0xac, 0x0f, 0x0c, 0x67, 0x70, 0xc1, 0xed, 0xa9, 0xab, 0x7a, 0x2e, 0xcb, 0xfa, 0xd9, 0x0c, 0x4e, + 0x66, 0x3d, 0x4b, 0x87, 0x6a, 0x3d, 0x8b, 0xa3, 0x5d, 0x12, 0xcd, 0x0d, 0xfd, 0x0b, 0x0d, 0xaa, + 0xa9, 0x38, 0x83, 0xde, 0x86, 0xf8, 0xbe, 0xc4, 0xd5, 0x3d, 0x4f, 0xa4, 0xc9, 0xca, 0xfd, 0x0a, + 0x82, 0x67, 0xdd, 0xaf, 0x20, 0x38, 0xba, 0x08, 0x10, 0x7f, 0x93, 0x6e, 0x17, 0xa4, 0x69, 0x8e, + 0x96, 0x70, 0xca, 0x39, 0x5a, 0x82, 0xea, 0x5f, 0xe7, 0xa1, 0x24, 0x16, 0xea, 0x43, 0x29, 0xa3, + 0x5a, 0x50, 0x1c, 0xe2, 0x80, 0xde, 0xb3, 0xc8, 0x25, 0xd9, 0x10, 0x87, 0xe4, 0x6c, 0x88, 0x43, + 0x6a, 0xb2, 0x96, 0xbf, 0xa7, 0x64, 0x6d, 0x66, 0xdf, 0xc9, 0x1a, 0xa6, 0x67, 0xac, 0x52, 0xb8, + 0x15, 0xa7, 0x1a, 0xb7, 0x8f, 0xe1, 0xe2, 0x04, 0x56, 0x16, 0x4c, 0x9d, 0xc0, 0xca, 0x24, 0xb4, + 0x0d, 0x07, 0xa5, 0x93, 0x17, 0xde, 0xf9, 0x22, 0x81, 0x6f, 0x61, 0xfa, 0x81, 0x76, 0x87, 0x72, + 0xb1, 0xed, 0xbd, 0x9d, 0x42, 0xe5, 0x6c, 0x37, 0x4d, 0xd3, 0xff, 0x90, 0x83, 0x05, 0xd5, 0xdf, + 0x87, 0x32, 0xb1, 0x2f, 0x43, 0x19, 0xef, 0x5a, 0x61, 0xb7, 0xef, 0x9a, 0x98, 0x97, 0x8c, 0x74, + 0x9e, 0x08, 0x78, 0xda, 0x35, 0x95, 0x79, 0x12, 0x98, 0xbc, 0x1a, 0xf2, 0xfb, 0x5a, 0x0d, 0x49, + 0xa3, 0x70, 0xe6, 0xce, 0x8d, 0xc2, 0xec, 0x71, 0x2e, 0x3f, 0xa4, 0x71, 0xbe, 0x95, 0x83, 0x5a, + 0x3a, 0x1a, 0x7f, 0x37, 0xb6, 0x90, 0xba, 0x1b, 0xf2, 0xfb, 0xde, 0x0d, 0xef, 0xc0, 0x3c, 0xc9, + 0x1d, 0x8d, 0x30, 0xe4, 0x37, 0x10, 0x67, 0x68, 0xce, 0xc5, 0x62, 0x53, 0xe4, 0xac, 0x0a, 0x5c, + 0x89, 0x4d, 0x12, 0xae, 0xff, 0x77, 0x0e, 0xe6, 0x95, 0xaf, 0xc6, 0xe3, 0x17, 0x52, 0xf4, 0x2a, + 0xcc, 0x2b, 0xc9, 0x98, 0xfe, 0xbf, 0x6c, 0x9d, 0xa8, 0x59, 0xd0, 0xe3, 0x37, 0x2e, 0x0b, 0x30, + 0x27, 0x67, 0x75, 0x7a, 0x1b, 0xaa, 0xa9, 0x24, 0x4c, 0x7e, 0x01, 0x6d, 0x3f, 0x2f, 0xa0, 0x2f, + 0xc1, 0x62, 0x56, 0xee, 0xa0, 0x9f, 0x85, 0xc5, 0xac, 0xaf, 0xfa, 0xdd, 0x1b, 0xf8, 0x52, 0xa3, + 0x16, 0x26, 0xef, 0x2a, 0x9f, 0x03, 0x70, 0xf0, 0xcd, 0xee, 0x1d, 0xcb, 0x3f, 0x36, 0x9e, 0xf8, + 0xe6, 0x85, 0x54, 0xb5, 0x54, 0x12, 0x18, 0xd1, 0xe4, 0xda, 0x66, 0xf7, 0x8e, 0x45, 0x17, 0xd5, + 0xe4, 0xda, 0xe6, 0x84, 0x26, 0x81, 0xe9, 0xff, 0x9f, 0x17, 0x95, 0x79, 0x72, 0xd9, 0xf7, 0x43, + 0xa8, 0x79, 0xe2, 0xe1, 0xce, 0xde, 0xd2, 0xda, 0x24, 0xe6, 0x4f, 0x5b, 0x5a, 0x50, 0x29, 0xaa, + 0x6e, 0x5e, 0x74, 0xe6, 0xf6, 0xa9, 0xbb, 0x93, 0xaa, 0x3e, 0x17, 0x54, 0x0a, 0xfa, 0x4f, 0x38, + 0x28, 0xee, 0x42, 0xed, 0x60, 0xe1, 0x78, 0x7e, 0xaa, 0x72, 0x76, 0x37, 0x39, 0x16, 0x48, 0x7b, + 0x5e, 0x4d, 0x91, 0x52, 0xea, 0xb9, 0xef, 0x33, 0xfb, 0x55, 0x9f, 0x76, 0xbe, 0x9a, 0x22, 0xe9, + 0x9f, 0x6b, 0x50, 0x4d, 0x5d, 0x9f, 0x46, 0x67, 0xa0, 0x44, 0x7f, 0x5d, 0x75, 0xfb, 0x19, 0xa0, + 0x0b, 0x92, 0xf2, 0x29, 0x16, 0x8a, 0x1c, 0x42, 0xaf, 0x42, 0x39, 0xbe, 0x65, 0xcd, 0xcf, 0x44, + 0xd9, 0xe6, 0x13, 0xa0, 0xb2, 0xf9, 0x04, 0xa8, 0xff, 0x48, 0x83, 0x23, 0x53, 0xaf, 0x56, 0x3f, + 0xea, 0x9e, 0xc1, 0x73, 0x2f, 0x42, 0x49, 0x9c, 0x5a, 0x22, 0x80, 0xc2, 0xfb, 0xd7, 0xd6, 0xae, + 0xad, 0x9d, 0xa9, 0x1d, 0x40, 0x15, 0x28, 0x5e, 0x59, 0xbb, 0x74, 0xe6, 0xfc, 0xa5, 0xb3, 0x35, + 0x8d, 0x3c, 0x74, 0xae, 0x5d, 0xba, 0x44, 0x1e, 0x72, 0xcf, 0x5d, 0x94, 0xef, 0x50, 0xb1, 0xef, + 0x31, 0x9a, 0x83, 0xd2, 0xaa, 0xe7, 0xd1, 0x00, 0xc0, 0x64, 0xd7, 0x76, 0x2c, 0xb2, 0x57, 0x6b, + 0x1a, 0x2a, 0x42, 0xfe, 0xf2, 0xe5, 0xf5, 0x5a, 0x0e, 0x2d, 0x42, 0xed, 0x0c, 0x36, 0x4c, 0xdb, + 0x72, 0xb0, 0x88, 0x3a, 0xb5, 0x7c, 0xfb, 0xc6, 0x57, 0xdf, 0xac, 0x68, 0x5f, 0x7f, 0xb3, 0xa2, + 0xfd, 0xfe, 0x9b, 0x15, 0xed, 0xd6, 0xb7, 0x2b, 0x07, 0xbe, 0xfe, 0x76, 0xe5, 0xc0, 0x6f, 0xbf, + 0x5d, 0x39, 0xf0, 0xe1, 0x8b, 0xd2, 0x2f, 0x09, 0xd9, 0x3b, 0x79, 0xbe, 0x4b, 0x02, 0x2e, 0x7f, + 0x6a, 0xa5, 0x7f, 0x3b, 0xf9, 0x65, 0xee, 0xd8, 0x2a, 0x7d, 0xbc, 0xc2, 0xf8, 0x9a, 0xe7, 0xdd, + 0x26, 0x03, 0xe8, 0xcf, 0xdf, 0x82, 0x5e, 0x81, 0xfe, 0xcc, 0xed, 0xe5, 0xbf, 0x07, 0x00, 0x00, + 0xff, 0xff, 0x2c, 0x75, 0x86, 0x00, 0x76, 0x39, 0x00, 0x00, } func (m *EventSequence) Marshal() (dAtA []byte, err error) { @@ -4367,6 +4438,11 @@ func (m *SubmitJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.QueueTtlSeconds != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.QueueTtlSeconds)) + i-- + dAtA[i] = 0x68 + } if m.IsDuplicate { i-- if m.IsDuplicate { @@ -6073,6 +6149,27 @@ func (m *Error_JobRunPreemptedError) MarshalToSizedBuffer(dAtA []byte) (int, err } return len(dAtA) - i, nil } +func (m *Error_GangJobUnschedulable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Error_GangJobUnschedulable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GangJobUnschedulable != nil { + { + size, err := m.GangJobUnschedulable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} func (m *KubernetesError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6511,6 +6608,36 @@ func (m *JobRunPreemptedError) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *GangJobUnschedulable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GangJobUnschedulable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GangJobUnschedulable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *JobDuplicateDetected) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7132,6 +7259,9 @@ func (m *SubmitJob) Size() (n int) { if m.IsDuplicate { n += 2 } + if m.QueueTtlSeconds != 0 { + n += 1 + sovEvents(uint64(m.QueueTtlSeconds)) + } return n } @@ -7835,6 +7965,18 @@ func (m *Error_JobRunPreemptedError) Size() (n int) { } return n } +func (m *Error_GangJobUnschedulable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GangJobUnschedulable != nil { + l = m.GangJobUnschedulable.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} func (m *KubernetesError) Size() (n int) { if m == nil { return 0 @@ -8023,6 +8165,19 @@ func (m *JobRunPreemptedError) Size() (n int) { return n } +func (m *GangJobUnschedulable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + func (m *JobDuplicateDetected) Size() (n int) { if m == nil { return 0 @@ -9976,6 +10131,25 @@ func (m *SubmitJob) Unmarshal(dAtA []byte) error { } } m.IsDuplicate = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueTtlSeconds", wireType) + } + m.QueueTtlSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueTtlSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -14126,6 +14300,41 @@ func (m *Error) Unmarshal(dAtA []byte) error { } m.Reason = &Error_JobRunPreemptedError{v} iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GangJobUnschedulable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &GangJobUnschedulable{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Reason = &Error_GangJobUnschedulable{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -15402,6 +15611,88 @@ func (m *JobRunPreemptedError) Unmarshal(dAtA []byte) error { } return nil } +func (m *GangJobUnschedulable) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GangJobUnschedulable: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GangJobUnschedulable: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *JobDuplicateDetected) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/armadaevents/events.proto b/pkg/armadaevents/events.proto index 448bdecab3f..dff09a6783f 100644 --- a/pkg/armadaevents/events.proto +++ b/pkg/armadaevents/events.proto @@ -163,6 +163,8 @@ message SubmitJob { string scheduler = 11; // Indicates whether job is a duplicate bool isDuplicate = 12; + // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. + int64 queue_ttl_seconds = 13; } // Kubernetes objects that can serve as main objects for an Armada job. @@ -404,6 +406,7 @@ message Error { PodLeaseReturned podLeaseReturned = 9; PodTerminated podTerminated = 10; JobRunPreemptedError jobRunPreemptedError = 11; + GangJobUnschedulable gangJobUnschedulable = 12; } } @@ -486,6 +489,10 @@ message MaxRunsExceeded { message JobRunPreemptedError{ } +message GangJobUnschedulable{ + string message = 1; +} + // Generated by the scheduler whenever it detects a SubmitJob message that includes a previously used deduplication id // (i.e., when it detects a duplicate job submission). message JobDuplicateDetected { diff --git a/scripts/common.sh b/scripts/common.sh new file mode 100755 index 00000000000..1a83d8311b4 --- /dev/null +++ b/scripts/common.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# This script defines common variables and functions for the other scripts. + +export docker_registry="gresearch" +export image_names=( + "armada-bundle" + "armada-lookout-bundle" + "armada-full-bundle" + "armada-server" + "armada-executor" + "armada-fakeexecutor" + "armada-lookout-ingester" + "armada-lookout-ingester-v2" + "armada-lookout" + "armada-lookout-v2" + "armada-event-ingester" + "armada-scheduler" + "armada-scheduler-ingester" + "armada-binoculars" + "armada-jobservice" + "armadactl" +) diff --git a/scripts/docker-push.sh b/scripts/docker-push.sh index 920c6384d19..7756f79954c 100755 --- a/scripts/docker-push.sh +++ b/scripts/docker-push.sh @@ -1,38 +1,34 @@ #!/bin/bash -docker_registry="gresearch" + +if ! source "$(dirname "$0")/common.sh"; then + echo "::error ::failed to source common.sh" + exit 1 +fi + docker_tag="" -image_names=( - "armada-bundle" - "armada-lookout-bundle" - "armada-full-bundle" - "armada-server" - "armada-executor" - "armada-fakeexecutor" - "armada-lookout-ingester" - "armada-lookout-ingester-v2" - "armada-lookout" - "armada-lookout-v2" - "armada-event-ingester" - "armada-scheduler" - "armada-scheduler-ingester" - "armada-binoculars" - "armada-jobservice" - "armadactl" -) +use_tarballs=false print_usage() { - echo "Usage: $0 [-t|--tag ] [-r|--registry ]" + echo "Usage: $0 [-t|--tag ] [-r|--registry -i|--images ]" echo "" echo "Options:" - echo " -t|--tag Docker tag (required)" - echo " -r|--registry Docker registry (default: 'gresearch')" - echo " -h|--help Display this help message" + echo " -u|--use-tarballs Directory with image tarballs to push" + echo " -i|--images-dir Directory with image tarballs to push" + echo " -t|--tag Docker tag (required)" + echo " -r|--registry Docker registry (default: '$docker_registry')" + echo " -h|--help Display this help message" } # parse command-line arguments while [[ $# -gt 0 ]]; do case "$1" in + -i|--images-dir) + images_dir=$2 + images_dir=${images_dir%/} + shift + shift + ;; -t|--tag) docker_tag=$2 shift @@ -43,6 +39,11 @@ while [[ $# -gt 0 ]]; do shift shift ;; + -u|--use-tarballs) + use_tarballs=$2 + shift + shift + ;; -h|--help) print_usage exit 0 @@ -60,14 +61,31 @@ if [ -z "$docker_tag" ]; then exit 1 fi +if [ "$use_tarballs" = true ]; then + if [ -z "$images_dir" ]; then + echo "::error ::tarball images dir must be provided with -i|--images-dir option" + exit 1 + fi +fi + # iterate over image names, check existence and push them for image_name in "${image_names[@]}"; do - full_image_name="${docker_registry}/${image_name}:${docker_tag}" - echo "checking existence of $full_image_name..." - # Check if the image with the tag exists - if ! docker image inspect "$full_image_name" > /dev/null 2>&1; then - echo "::error ::image $full_image_name does not exist locally" - exit 1 + echo "::group::validating $image_name..." + if [ "$use_tarballs" = true ]; then + tarball_image="${images_dir}/${image_name}.tar" + if [ ! -f "$tarball_image" ]; then + echo "::error ::image $tarball_image does not exist" + exit 1 + fi + else + full_image_name="${docker_registry}/${image_name}:${docker_tag}" + echo "checking existence of $full_image_name..." + # Check if the image with the tag exists + if ! docker image inspect "$full_image_name" > /dev/null 2>&1; then + echo "::error ::image $full_image_name does not exist locally" + exit 1 + fi + echo "::endgroup::" fi done @@ -75,11 +93,18 @@ echo "pushing Armada images to $docker_registry with tag $docker_tag..." # iterate over image names and push them for image_name in "${image_names[@]}"; do + echo "::group::pushing $image_name..." full_image_name="${docker_registry}/${image_name}:${docker_tag}" + if [ "$use_tarballs" = true ]; then + tarball_image="${images_dir}/${image_name}.tar" + echo "loading tarball image $tarball_image..." + docker load --input "$tarball_image" + fi echo "pushing $full_image_name..." docker push $full_image_name if [ $? -ne 0 ]; then echo "::error ::failed to push $full_image_name" exit 1 fi + echo "::endgroup::" done diff --git a/scripts/docker-save.sh b/scripts/docker-save.sh new file mode 100755 index 00000000000..056bd0a342d --- /dev/null +++ b/scripts/docker-save.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +if ! source "$(dirname "$0")/common.sh"; then + echo "::error ::failed to source common.sh" + exit 1 +fi + +docker_tag="" +output_dir="." + +while [[ $# -gt 0 ]]; do + case "$1" in + -t|--tag) + docker_tag=$2 + shift + shift + ;; + -o|--output) + output_dir=$2 + shift + shift + ;; + esac +done + +# validate that docker_tag is provided +if [ -z "$docker_tag" ]; then + echo "::error ::docker tag is must be provided with -t|--tag option" + exit 1 +fi + +# Check if output directory exists, if not create it +if [[ ! -d $output_dir ]]; then + if ! mkdir -p "$output_dir"; then + echo "::error ::failed to create output directory $output_dir" + exit 1 + fi +fi + +for image_name in "${image_names[@]}"; do + output_tarball="$output_dir/$image_name.tar" + echo "::group::saving $docker_registry/$image_name:$docker_tag to $output_tarball" + docker save -o "$output_tarball" "$docker_registry/$image_name:$docker_tag" + echo "::endgroup::" +done diff --git a/third_party/airflow/README.md b/third_party/airflow/README.md index 73a9b36c525..573b3861e5b 100644 --- a/third_party/airflow/README.md +++ b/third_party/airflow/README.md @@ -42,7 +42,7 @@ Airflow with the Armada operator can be run alongside the other Armada services mage airflow start ``` -Airflow's web UI will then be accessible at http://localhost:8081 (login with admin/admin). +Airflow's web UI will then be accessible at http://localhost:8081/login/ (login with airflow/airflow). You can install the package via `pip3 install third_party/airflow`. diff --git a/third_party/airflow/armada/operators/armada_deferrable.py b/third_party/airflow/armada/operators/armada_deferrable.py index 2f53a702228..f7aa1413637 100644 --- a/third_party/airflow/armada/operators/armada_deferrable.py +++ b/third_party/airflow/armada/operators/armada_deferrable.py @@ -103,6 +103,25 @@ def __init__( self.lookout_url_template = lookout_url_template self.poll_interval = poll_interval + def serialize(self) -> dict: + """ + Get a serialized version of this object. + + :return: A dict of keyword arguments used when instantiating + this object. + """ + + return { + "task_id": self.task_id, + "name": self.name, + "armada_channel_args": self.armada_channel_args.serialize(), + "job_service_channel_args": self.job_service_channel_args.serialize(), + "armada_queue": self.armada_queue, + "job_request_items": self.job_request_items, + "lookout_url_template": self.lookout_url_template, + "poll_interval": self.poll_interval, + } + def execute(self, context) -> None: """ Executes the Armada Operator. Only meant to be called by airflow. @@ -156,6 +175,7 @@ def execute(self, context) -> None: armada_queue=self.armada_queue, job_set_id=context["run_id"], airflow_task_name=self.name, + poll_interval=self.poll_interval, ), method_name="resume_job_complete", kwargs={"job_id": job_id}, @@ -216,6 +236,7 @@ class ArmadaJobCompleteTrigger(BaseTrigger): :param job_set_id: The ID of the job set. :param airflow_task_name: Name of the airflow task to which this trigger belongs. + :param poll_interval: How often to poll jobservice to get status. :return: An armada job complete trigger instance. """ @@ -226,6 +247,7 @@ def __init__( armada_queue: str, job_set_id: str, airflow_task_name: str, + poll_interval: int = 30, ) -> None: super().__init__() self.job_id = job_id @@ -233,6 +255,7 @@ def __init__( self.armada_queue = armada_queue self.job_set_id = job_set_id self.airflow_task_name = airflow_task_name + self.poll_interval = poll_interval def serialize(self) -> tuple: return ( @@ -243,9 +266,21 @@ def serialize(self) -> tuple: "armada_queue": self.armada_queue, "job_set_id": self.job_set_id, "airflow_task_name": self.airflow_task_name, + "poll_interval": self.poll_interval, }, ) + def __eq__(self, o): + return ( + self.task_id == o.task_id + and self.job_id == o.job_id + and self.job_service_channel_args == o.job_service_channel_args + and self.armada_queue == o.armada_queue + and self.job_set_id == o.job_set_id + and self.airflow_task_name == o.airflow_task_name + and self.poll_interval == o.poll_interval + ) + async def run(self): """ Runs the trigger. Meant to be called by an airflow triggerer process. @@ -255,12 +290,12 @@ async def run(self): ) job_state, job_message = await search_for_job_complete_async( - job_service_client=job_service_client, armada_queue=self.armada_queue, job_set_id=self.job_set_id, airflow_task_name=self.airflow_task_name, job_id=self.job_id, - poll_interval=self.poll_interval, + job_service_client=job_service_client, log=self.log, + poll_interval=self.poll_interval, ) yield TriggerEvent({"job_state": job_state, "job_message": job_message}) diff --git a/third_party/airflow/armada/operators/grpc.py b/third_party/airflow/armada/operators/grpc.py index bebb0f98835..3e146ccce07 100644 --- a/third_party/airflow/armada/operators/grpc.py +++ b/third_party/airflow/armada/operators/grpc.py @@ -78,10 +78,19 @@ def __init__( self.options = options self.compression = compression self.credentials_callback = None + self.credentials_callback_args = credentials_callback_args if credentials_callback_args is not None: self.credentials_callback = CredentialsCallback(**credentials_callback_args) + def __eq__(self, o): + return ( + self.target == o.target + and self.options == o.options + and self.compression == o.compression + and self.credentials_callback_args == o.credentials_callback_args + ) + def channel(self) -> grpc.Channel: """ Create a grpc.Channel based on arguments supplied to this object. diff --git a/third_party/airflow/armada/operators/utils.py b/third_party/airflow/armada/operators/utils.py index e3c68beb321..1ab7fa35d04 100644 --- a/third_party/airflow/armada/operators/utils.py +++ b/third_party/airflow/armada/operators/utils.py @@ -217,6 +217,7 @@ async def search_for_job_complete_async( job_id: str, job_service_client: JobServiceAsyncIOClient, log, + poll_interval: int, time_out_for_failure: int = 7200, ) -> Tuple[JobState, str]: """ @@ -231,6 +232,7 @@ async def search_for_job_complete_async( :param job_id: The name of the job id that armada assigns to it :param job_service_client: A JobServiceClient that is used for polling. It is optional only for testing + :param poll_interval: How often to poll jobservice to get status. :param time_out_for_failure: The amount of time a job can be in job_id_not_found before we decide it was a invalid job @@ -251,7 +253,7 @@ async def search_for_job_complete_async( job_state = job_state_from_pb(job_status_return.state) log.debug(f"Got job state '{job_state.name}' for job {job_id}") - await asyncio.sleep(3) + await asyncio.sleep(poll_interval) if job_state == JobState.SUCCEEDED: job_message = f"Armada {airflow_task_name}:{job_id} succeeded" diff --git a/third_party/airflow/examples/big_armada.py b/third_party/airflow/examples/big_armada.py index f1196307227..dc64cdc76b2 100644 --- a/third_party/airflow/examples/big_armada.py +++ b/third_party/airflow/examples/big_armada.py @@ -57,7 +57,7 @@ def submit_sleep_job(): with DAG( dag_id="big_armada", start_date=pendulum.datetime(2016, 1, 1, tz="UTC"), - schedule_interval="@daily", + schedule="@daily", catchup=False, default_args={"retries": 2}, ) as dag: diff --git a/third_party/airflow/pyproject.toml b/third_party/airflow/pyproject.toml index bd9814cc10c..d3fb7abfa6f 100644 --- a/third_party/airflow/pyproject.toml +++ b/third_party/airflow/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "armada_airflow" -version = "0.5.3" +version = "0.5.6" description = "Armada Airflow Operator" requires-python = ">=3.7" # Note(JayF): This dependency value is not suitable for release. Whatever @@ -10,9 +10,9 @@ requires-python = ">=3.7" dependencies = [ "armada-client", "apache-airflow>=2.6.3", - "grpcio>=1.46.3", - "grpcio-tools>=1.46.3", - "types-protobuf>=3.19.22" + "grpcio==1.58.0", + "grpcio-tools==1.58.0", + "types-protobuf==4.24.0.1" ] authors = [{name = "Armada-GROSS", email = "armada@armadaproject.io"}] license = { text = "Apache Software License" } @@ -20,7 +20,7 @@ readme = "README.md" [project.optional-dependencies] format = ["black==23.7.0", "flake8==6.1.0", "pylint==2.17.5"] -test = ["pytest==7.3.1", "coverage>=6.5.0", "pytest-asyncio==0.21.1"] +test = ["pytest==7.3.1", "coverage==7.3.1", "pytest-asyncio==0.21.1"] # note(JayF): sphinx-jekyll-builder was broken by sphinx-markdown-builder 0.6 -- so pin to 0.5.5 docs = ["sphinx==7.1.2", "sphinx-jekyll-builder==0.3.0", "sphinx-toolbox==3.2.0b1", "sphinx-markdown-builder==0.5.5"] diff --git a/third_party/airflow/tests/unit/test_airflow_operator_mock.py b/third_party/airflow/tests/unit/test_airflow_operator_mock.py index 4634e644795..1ab2d37ced1 100644 --- a/third_party/airflow/tests/unit/test_airflow_operator_mock.py +++ b/third_party/airflow/tests/unit/test_airflow_operator_mock.py @@ -170,7 +170,7 @@ def test_annotate_job_request_items(): dag = DAG( dag_id="hello_armada", start_date=pendulum.datetime(2016, 1, 1, tz="UTC"), - schedule_interval="@daily", + schedule="@daily", catchup=False, default_args={"retries": 2}, ) @@ -204,7 +204,7 @@ def test_parameterize_armada_operator(): dag = DAG( dag_id="hello_armada", start_date=pendulum.datetime(2016, 1, 1, tz="UTC"), - schedule_interval="@daily", + schedule="@daily", catchup=False, default_args={"retries": 2}, ) diff --git a/third_party/airflow/tests/unit/test_armada_deferrable_operator.py b/third_party/airflow/tests/unit/test_armada_deferrable_operator.py new file mode 100644 index 00000000000..0f156ed177e --- /dev/null +++ b/third_party/airflow/tests/unit/test_armada_deferrable_operator.py @@ -0,0 +1,171 @@ +import copy + +import pytest + +from armada_client.armada import submit_pb2 +from armada_client.k8s.io.api.core.v1 import generated_pb2 as core_v1 +from armada_client.k8s.io.apimachinery.pkg.api.resource import ( + generated_pb2 as api_resource, +) +from armada.operators.armada_deferrable import ArmadaDeferrableOperator +from armada.operators.grpc import CredentialsCallback + + +def test_serialize_armada_deferrable(): + grpc_chan_args = { + "target": "localhost:443", + "credentials_callback_args": { + "module_name": "channel_test", + "function_name": "get_credentials", + "function_kwargs": { + "example_arg": "test", + }, + }, + } + + pod = core_v1.PodSpec( + containers=[ + core_v1.Container( + name="sleep", + image="busybox", + args=["sleep", "10s"], + securityContext=core_v1.SecurityContext(runAsUser=1000), + resources=core_v1.ResourceRequirements( + requests={ + "cpu": api_resource.Quantity(string="120m"), + "memory": api_resource.Quantity(string="510Mi"), + }, + limits={ + "cpu": api_resource.Quantity(string="120m"), + "memory": api_resource.Quantity(string="510Mi"), + }, + ), + ) + ], + ) + + job_requests = [ + submit_pb2.JobSubmitRequestItem( + priority=1, + pod_spec=pod, + namespace="personal-anonymous", + annotations={"armadaproject.io/hello": "world"}, + ) + ] + + source = ArmadaDeferrableOperator( + task_id="test_task_id", + name="test task", + armada_channel_args=grpc_chan_args, + job_service_channel_args=grpc_chan_args, + armada_queue="test-queue", + job_request_items=job_requests, + lookout_url_template="https://lookout.test.domain/", + poll_interval=5, + ) + + serialized = source.serialize() + assert serialized["name"] == source.name + + reconstituted = ArmadaDeferrableOperator(**serialized) + assert reconstituted == source + + +get_lookout_url_test_cases = [ + ( + "http://localhost:8089/jobs?job_id=", + "test_id", + "http://localhost:8089/jobs?job_id=test_id", + ), + ( + "https://lookout.armada.domain/jobs?job_id=", + "test_id", + "https://lookout.armada.domain/jobs?job_id=test_id", + ), + ("", "test_id", ""), + (None, "test_id", ""), +] + + +@pytest.mark.parametrize( + "lookout_url_template, job_id, expected_url", get_lookout_url_test_cases +) +def test_get_lookout_url(lookout_url_template, job_id, expected_url): + armada_channel_args = {"target": "127.0.0.1:50051"} + job_service_channel_args = {"target": "127.0.0.1:60003"} + + operator = ArmadaDeferrableOperator( + task_id="test_task_id", + name="test_task", + armada_channel_args=armada_channel_args, + job_service_channel_args=job_service_channel_args, + armada_queue="test_queue", + job_request_items=[], + lookout_url_template=lookout_url_template, + ) + + assert operator._get_lookout_url(job_id) == expected_url + + +def test_deepcopy_operator(): + armada_channel_args = {"target": "127.0.0.1:50051"} + job_service_channel_args = {"target": "127.0.0.1:60003"} + + operator = ArmadaDeferrableOperator( + task_id="test_task_id", + name="test_task", + armada_channel_args=armada_channel_args, + job_service_channel_args=job_service_channel_args, + armada_queue="test_queue", + job_request_items=[], + lookout_url_template="http://localhost:8089/jobs?job_id=", + ) + + try: + copy.deepcopy(operator) + except Exception as e: + assert False, f"{e}" + + +def test_deepcopy_operator_with_grpc_credentials_callback(): + armada_channel_args = { + "target": "127.0.0.1:50051", + "credentials_callback_args": { + "module_name": "tests.unit.test_armada_operator", + "function_name": "__example_test_callback", + "function_kwargs": { + "test_arg": "fake_arg", + }, + }, + } + job_service_channel_args = {"target": "127.0.0.1:60003"} + + operator = ArmadaDeferrableOperator( + task_id="test_task_id", + name="test_task", + armada_channel_args=armada_channel_args, + job_service_channel_args=job_service_channel_args, + armada_queue="test_queue", + job_request_items=[], + lookout_url_template="http://localhost:8089/jobs?job_id=", + ) + + try: + copy.deepcopy(operator) + except Exception as e: + assert False, f"{e}" + + +def __example_test_callback(foo=None): + return f"fake_cred {foo}" + + +def test_credentials_callback(): + callback = CredentialsCallback( + module_name="test_armada_operator", + function_name="__example_test_callback", + function_kwargs={"foo": "bar"}, + ) + + result = callback.call() + assert result == "fake_cred bar" diff --git a/third_party/airflow/tests/unit/test_grpc.py b/third_party/airflow/tests/unit/test_grpc.py new file mode 100644 index 00000000000..1e12b566067 --- /dev/null +++ b/third_party/airflow/tests/unit/test_grpc.py @@ -0,0 +1,26 @@ +import armada.operators.grpc + + +def test_serialize_grpc_channel(): + src_chan_args = { + "target": "localhost:443", + "credentials_callback_args": { + "module_name": "channel_test", + "function_name": "get_credentials", + "function_kwargs": { + "example_arg": "test", + }, + }, + } + + source = armada.operators.grpc.GrpcChannelArguments(**src_chan_args) + + serialized = source.serialize() + assert serialized["target"] == src_chan_args["target"] + assert ( + serialized["credentials_callback_args"] + == src_chan_args["credentials_callback_args"] + ) + + reconstituted = armada.operators.grpc.GrpcChannelArguments(**serialized) + assert reconstituted == source diff --git a/third_party/airflow/tests/unit/test_search_for_job_complete_asyncio.py b/third_party/airflow/tests/unit/test_search_for_job_complete_asyncio.py index 83cc3e220aa..a842fa994d3 100644 --- a/third_party/airflow/tests/unit/test_search_for_job_complete_asyncio.py +++ b/third_party/airflow/tests/unit/test_search_for_job_complete_asyncio.py @@ -71,6 +71,7 @@ async def test_failed_event(js_aio_client): job_service_client=js_aio_client, time_out_for_failure=5, log=logging.getLogger(), + poll_interval=1, ) assert job_complete[0] == JobState.FAILED assert ( @@ -89,6 +90,7 @@ async def test_successful_event(js_aio_client): job_service_client=js_aio_client, time_out_for_failure=5, log=logging.getLogger(), + poll_interval=1, ) assert job_complete[0] == JobState.SUCCEEDED assert job_complete[1] == "Armada test:test_succeeded succeeded" @@ -104,6 +106,7 @@ async def test_cancelled_event(js_aio_client): job_service_client=js_aio_client, time_out_for_failure=5, log=logging.getLogger(), + poll_interval=1, ) assert job_complete[0] == JobState.CANCELLED assert job_complete[1] == "Armada test:test_cancelled cancelled" @@ -119,6 +122,7 @@ async def test_job_id_not_found(js_aio_client): time_out_for_failure=5, job_service_client=js_aio_client, log=logging.getLogger(), + poll_interval=1, ) assert job_complete[0] == JobState.JOB_ID_NOT_FOUND assert ( @@ -142,6 +146,7 @@ async def test_error_retry(js_aio_retry_client): job_service_client=js_aio_retry_client, time_out_for_failure=5, log=logging.getLogger(), + poll_interval=1, ) assert job_complete[0] == JobState.SUCCEEDED assert job_complete[1] == "Armada test:test_succeeded succeeded" diff --git a/tools.yaml b/tools.yaml index e50e8859b2b..6a04fd8b55b 100644 --- a/tools.yaml +++ b/tools.yaml @@ -7,7 +7,7 @@ tools: - github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.16.0 - github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v1.16.0 - github.com/jstemmer/go-junit-report@v1.0.0 -- github.com/kyleconroy/sqlc/cmd/sqlc@v1.16.0 +- github.com/sqlc-dev/sqlc/cmd/sqlc@v1.22.0 - github.com/matryer/moq@v0.3.0 - github.com/mitchellh/gox@v1.0.1 - github.com/wlbr/templify@v0.0.0-20210816202250-7b8044ca19e9