diff --git a/.github/actions/go-lint/Dockerfile b/.github/actions/go-lint/Dockerfile index 8c617196f..df8621810 100644 --- a/.github/actions/go-lint/Dockerfile +++ b/.github/actions/go-lint/Dockerfile @@ -1,7 +1,7 @@ FROM golang:1.15.1-buster -LABEL author="Rodrigo Pavan" -LABEL maintainer="Daitan Digital Solutions" +LABEL author="Everton Haise Taques" +LABEL maintainer="netboxlabs" LABEL version="1.0.0" RUN mkdir -p /workspace @@ -16,4 +16,4 @@ RUN apt-get update \ && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b /golangci-lint v1.38.0 \ && chmod +x /github-commenter /entrypoint.sh /golangci-lint -ENTRYPOINT ["/entrypoint.sh"] \ No newline at end of file +ENTRYPOINT ["/entrypoint.sh"] diff --git a/.github/actions/go-lint/action.yml b/.github/actions/go-lint/action.yml index bde52d7ea..b61e63858 100644 --- a/.github/actions/go-lint/action.yml +++ b/.github/actions/go-lint/action.yml @@ -1,7 +1,7 @@ name: 'go-lint' -author: 'Rodrigo Pavan' -description: 'Daitan Digital Solutions Docker Actions' +author: 'Everton Haise Taques' +description: 'netboxlabs' runs: using: 'docker' - image: 'Dockerfile' \ No newline at end of file + image: 'Dockerfile' diff --git a/.github/actions/go-report/Dockerfile b/.github/actions/go-report/Dockerfile index 561a0a806..8e45517b6 100644 --- a/.github/actions/go-report/Dockerfile +++ b/.github/actions/go-report/Dockerfile @@ -1,15 +1,16 @@ FROM golang:1.20-alpine -LABEL author="Everton Taques" -LABEL maintainer="ns1labs" +LABEL author="Everton Haise Taques" +LABEL maintainer="netboxlabs" LABEL version="1.0.0" RUN mkdir -p /workspace COPY ./entrypoint.sh /entrypoint.sh +COPY ./github-commenter_linux_amd64 /github-commenter_linux_amd64 RUN apk add git make curl jq && \ -wget https://github.com/cloudposse/github-commenter/releases/download/0.7.0/github-commenter_linux_amd64 -O /github-commenter && \ +mv /github-commenter_linux_amd64 /github-commenter && \ chmod +x /github-commenter /entrypoint.sh RUN git clone https://github.com/gojp/goreportcard.git && \ diff --git a/.github/actions/go-report/action.yml b/.github/actions/go-report/action.yml index 445092e17..bdbaf0315 100644 --- a/.github/actions/go-report/action.yml +++ b/.github/actions/go-report/action.yml @@ -9,7 +9,8 @@ inputs: github_token: description: " github token" - required: true + required: false + default: "" github_owner: description: " github owner" diff --git a/.github/actions/go-report/entrypoint.sh b/.github/actions/go-report/entrypoint.sh index 3d9b2d340..5143eccb1 100644 --- a/.github/actions/go-report/entrypoint.sh +++ b/.github/actions/go-report/entrypoint.sh @@ -3,7 +3,6 @@ function validateParams() { echo "========================= Checking parameters =========================" [[ -z $INPUT_GO_REPORT_THRESHOLD ]] && echo "Threshold of failure is required" && exit 1 echo " Threshold of failure present" - [[ -z $INPUT_GITHUB_TOKEN ]] && echo "GITHUB TOKEN is required" && exit 1 echo " GITHUB TOKEN present" [[ -z $INPUT_GITHUB_OWNER ]] && echo "GITHUB OWNER is required" && exit 1 echo " GITHUB OWNER present" [[ -z $INPUT_GITHUB_REPO ]] && echo "GITHUB REPO is required" && exit 1 echo " GITHUB REPO present" diff --git a/.github/actions/go-report/github-commenter_linux_amd64 b/.github/actions/go-report/github-commenter_linux_amd64 new file mode 100644 index 000000000..e6dddbc9f Binary files /dev/null and b/.github/actions/go-report/github-commenter_linux_amd64 differ diff --git a/.github/actions/slack-post/Dockerfile b/.github/actions/slack-post/Dockerfile index 50ba1f9c7..5362fd75a 100644 --- a/.github/actions/slack-post/Dockerfile +++ b/.github/actions/slack-post/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 -LABEL author="Everton Taques" -LABEL maintainer="ns1labs" +LABEL author="Everton Haise Taques" +LABEL maintainer="netboxlabs" LABEL version="1.0.0" RUN mkdir -p /workspace @@ -15,4 +15,4 @@ RUN chmod a+x /entrypoint.sh RUN apt-get update \ && apt-get install jq git curl -y -ENTRYPOINT ["/entrypoint.sh"] \ No newline at end of file +ENTRYPOINT ["/entrypoint.sh"] diff --git a/.github/actions/slack-post/action.yml b/.github/actions/slack-post/action.yml index 321db08b6..ad98e3528 100644 --- a/.github/actions/slack-post/action.yml +++ b/.github/actions/slack-post/action.yml @@ -1,6 +1,6 @@ name: 'slack-post' author: 'Everton Taques' -description: 'ns1labs' +description: 'netboxlabs' inputs: branch: @@ -25,4 +25,4 @@ inputs: runs: using: 'docker' - image: 'Dockerfile' \ No newline at end of file + image: 'Dockerfile' diff --git a/.github/workflows/agent.yml b/.github/workflows/agent.yml index 9b68d05f1..82bdad1a0 100644 --- a/.github/workflows/agent.yml +++ b/.github/workflows/agent.yml @@ -88,12 +88,6 @@ jobs: - name: Debug version run: echo ${{ env.VERSION }} - - - name: Login to Docker Hub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Replace crashpad and geo-db params run: | @@ -118,8 +112,6 @@ jobs: - name: Build orb-agent shell: bash - env: - IMAGE_NAME: ns1labs/orb-agent run: | if [ "${{ github.event.inputs.pktvisor_tag }}" == "" ]; then make agent @@ -128,7 +120,14 @@ jobs: PKTVISOR_TAG=${{ github.event.inputs.pktvisor_tag }} make agent PKTVISOR_TAG=${{ github.event.inputs.pktvisor_tag }} make agent_debug fi + + - name: Login to Docker Hub orbcommunity + if: github.event_name != 'pull_request' + uses: docker/login-action@v2 + with: + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push agent container - run: | - docker push -a ns1labs/orb-agent + - name: Push agent container orbcommunity + if: github.event_name != 'pull_request' + run: docker push -a orbcommunity/orb-agent diff --git a/.github/workflows/deploy-prod.yml b/.github/workflows/deploy-prod.yml deleted file mode 100644 index 4efa947a3..000000000 --- a/.github/workflows/deploy-prod.yml +++ /dev/null @@ -1,186 +0,0 @@ -name: Deploy Production - -on: - workflow_dispatch: - inputs: - develop_image_id: - description: 'The docker image tag that you want to promote to production' - required: true - default: 'develop' - -jobs: - check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: dorny/paths-filter@v2 - id: filter - with: - filters: | - agent: - - 'agent/**' - - 'cmd/agent/**' - orb: - - 'fleet/**' - - 'cmd/fleet/**' - - 'policies/**' - - 'cmd/policies/**' - - 'sinks/**' - - 'cmd/sinks/**' - - 'sinker/**' - - 'cmd/sinker/**' - ui: - - 'ui/**' - - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: Build services (go build only) - run: make services - - - name: Build orb agent (go build only) - run: make agent_bin - - - name: Go unit tests - run: make test - - - name: Build UI - if: steps.filter.outputs.ui == 'true' - run: make ui - - update-image-tag-push: - needs: check - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - uses: dorny/paths-filter@v2 - id: filter - with: - filters: | - agent: - - 'agent/**' - - 'cmd/agent/**' - orb: - - 'fleet/**' - - 'cmd/fleet/**' - - 'policies/**' - - 'cmd/policies/**' - - 'sinks/**' - - 'cmd/sinks/**' - - 'sinker/**' - - 'cmd/sinker/**' - ui: - - 'ui/**' - - - name: Set branch name - shell: bash - run: | - echo "BRANCH_NAME=production" >> $GITHUB_ENV - - - name: Generate ref tag (production) - run: | - echo "REF_TAG=production" >> $GITHUB_ENV - - - name: Append suffix on VERSION file for production build - run: | - echo "`cat ${{github.workspace}}/VERSION`-${{ env.REF_TAG }}" > VERSION - - - name: Get VERSION - run: | - echo "VERSION=`cat ${{github.workspace}}/VERSION`" >> $GITHUB_ENV - - - name: Get short commit hash to a variable - id: commit_hash - run: | - echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" - - - name: Debug VERSION - run: | - echo ${{ env.VERSION }} - echo ${{ env.VERSION }}-${{ steps.commit_hash.outputs.sha_short }} - - - name: Commit orb-ui-live on orb-live repo production - run: | - git config --global user.email "${{secrets.GH_ORB_EMAIL}}" - git config --global user.name "${{secrets.GH_ORB_USER}}" - git config --global credential.helper cache - git clone -b main https://${{secrets.GH_ORB_USER}}:${{secrets.GH_ORB_ACCESS_TOKEN}}@github.com/ns1labs/orb-live.git - cd orb-live - rm -rf ui - git add . - git commit -m "[NS1 Orb Bot] clean ui folder" - cp -rf ../ui . - cp -rf ../VERSION . - echo "${{ steps.commit_hash.outputs.sha_short }}" > COMMIT_HASH - git add . - git commit -m "[NS1 Orb Bot] Update Orb Live UI for production" - git push origin main - - - name: Login to Docker Hub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build orb-agent - # make this as "latest" tag - shell: bash - env: - IMAGE_NAME: ns1labs/orb-agent - run: | - if [ "${{ github.event.inputs.pktvisor_tag }}" == "" ]; then - make agent_production - make agent_debug_production - else - PKTVISOR_TAG=${{ github.event.inputs.pktvisor_tag }} make agent_production - PKTVISOR_TAG=${{ github.event.inputs.pktvisor_tag }} make agent_debug_production - fi - - - name: Update - run: | - ${{ env.VERSION }} - docker push -a ns1labs/orb-agent - - docker pull ns1labs/orb-fleet:develop - eval FLEET_IMG_ID=$(docker images ns1labs/orb-fleet:develop --format "{{.ID}}") - docker tag $(FLEET_IMG_ID) ns1labs/orb-fleet:production - docker push ns1labs/orb-fleet:production - - docker pull ns1labs/orb-policies:develop - eval POLICIES_IMG_ID=$(docker images ns1labs/orb-policies:develop --format "{{.ID}}") - docker tag $(POLICIES_IMG_ID) ns1labs/orb-policies:production - docker push ns1labs/orb-policies:production - - docker pull ns1labs/orb-sinks:develop - eval SINKS_IMG_ID=$(docker images ns1labs/orb-sinks:develop --format "{{.ID}}") - docker tag $(SINKS_IMG_ID) ns1labs/orb-sinks:production - docker push ns1labs/orb-sinks:production - - docker pull ns1labs/orb-sinker:develop - eval SINKER_IMG_ID=$(docker images ns1labs/orb-sinker:develop --format "{{.ID}}") - docker tag $(SINKER_IMG_ID) ns1labs/orb-sinker:production - docker push -a ns1labs/orb-sinker:production - - docker pull ns1labs/orb-ui:develop - eval UI_IMG_ID=$(docker images ns1labs/orb-ui:develop --format "{{.ID}}") - docker tag $(UI_IMG_ID) ns1labs/orb-ui:production - docker push -a ns1labs/orb-ui:production - - - - name: Commit image tag on orb-live-manifest environment prod - run: | - git config --global user.email "${{secrets.GH_ORB_EMAIL}}" - git config --global user.name "${{secrets.GH_ORB_USER}}" - git config --global credential.helper cache - git clone https://${{secrets.GH_ORB_USER}}:${{secrets.GH_ORB_ACCESS_TOKEN}}@github.com/ns1labs/orb-live-manifest.git - cd orb-live-manifest/stg - mv values.yaml .template/values.old - cat .template/values.yaml.tpl >> values.yaml - sed -i -e "s/IMAGE_TAG/${{ env.VERSION }}-${{ steps.commit_hash.outputs.sha_short }}/g" values.yaml - git add values.yaml - git add .template/values.old - git commit -m "[NS1 Orb Bot] Update image tag on prod environment" - git push origin main diff --git a/.github/workflows/deploy-stg.yml b/.github/workflows/deploy-stg.yml deleted file mode 100644 index f7e92faec..000000000 --- a/.github/workflows/deploy-stg.yml +++ /dev/null @@ -1,690 +0,0 @@ -name: Update Staging with a branch - -on: - workflow_dispatch: - inputs: - branch_source: - description: 'branch to deploy on stg' - required: true - default: 'develop' - -jobs: - prebuild: - runs-on: ubuntu-latest - outputs: - agent: ${{ steps.filter.outputs.agent }} - orb: ${{ steps.filter.outputs.orb }} - migrate: ${{ steps.filter.outputs.migrate }} - ui: ${{ steps.filter.outputs.ui }} - docs: ${{ steps.filter.outputs.docs }} - VERSION: ${{ env.VERSION }} - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - uses: dorny/paths-filter@v2 - id: filter - with: - filters: | - migrate: - - 'migrate/**' - - 'cmd/migrate/**' - agent: - - 'agent/**' - - 'cmd/agent/**' - orb: - - 'fleet/**' - - 'cmd/fleet/**' - - 'policies/**' - - 'cmd/policies/**' - - 'sinks/**' - - 'cmd/sinks/**' - - 'sinker/**' - - 'cmd/sinker/**' - - 'maestro/**' - - 'cmd/maestro/**' - ui: - - 'ui/**' - docs: - - 'fleet/api/http/openapi.yaml' - - 'sinks/api/http/openapi.yaml' - - 'policies/api/openapi.yaml' - - - name: Set branch name - shell: bash - run: | - echo "BRANCH_NAME=develop" >> $GITHUB_ENV - - - name: Generate ref tag (develop) - run: | - echo "REF_TAG=develop" >> $GITHUB_ENV - - - name: Append suffix on VERSION file for develop build - run: | - echo "`cat ${{github.workspace}}/VERSION`-${{ env.REF_TAG }}" > VERSION - - - name: Get VERSION - run: | - echo "VERSION=`cat ${{github.workspace}}/VERSION`" >> $GITHUB_ENV - - - name: Debug VERSION - run: echo ${{ env.VERSION }} - - - name: Get short commit hash to a variable - id: commit_hash - run: | - echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" - - go-report: - needs: prebuild - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - name: workspace - - - name: Run go report - uses: ./.github/actions/go-report - with: - go_report_threshold: 90.1 #grade A+ - - - name: refresh go-report - uses: creekorful/goreportcard-action@v1.0 - - update-api-docs: - needs: prebuild - runs-on: ubuntu-latest - steps: - - name: Repository Dispatch - uses: peter-evans/repository-dispatch@v2 - if: ${{ needs.prebuild.outputs.docs == 'true' && github.event_name != 'pull_request' }} - with: - token: ${{ secrets.GH_ORB_ACCESS_TOKEN }} - repository: ns1labs/orb-website - event-type: build-docs - client-payload: '{"branch_name": "main"}' - - test-agent: - runs-on: ubuntu-latest - needs: prebuild - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.19 - - - name: Go unit tests - run: | - SERVICE=agent make test_service_cov - - - name: Install dependencies - run: | - go mod tidy - sudo apt update && sudo apt install -y build-essential jq - go install github.com/axw/gocov/gocov@latest - go install github.com/AlekSi/gocov-xml@latest - - - name: coverage to xml - run: | - echo "Current directory: ${PWD}" - echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV - gocov convert ./coverage.out | gocov-xml > ./coverage.xml - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - files: coverage.xml - name: orb - verbose: true - - test-fleet: - runs-on: ubuntu-latest - needs: prebuild - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.19 - - - name: Go unit tests - run: | - SERVICE=fleet make test_service_cov - - - name: Install dependencies - run: | - go mod tidy - sudo apt update && sudo apt install -y build-essential jq - go install github.com/axw/gocov/gocov@latest - go install github.com/AlekSi/gocov-xml@latest - - - name: coverage to xml - run: | - echo "Current directory: ${PWD}" - echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV - gocov convert ./coverage.out | gocov-xml > ./coverage.xml - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - files: coverage.xml - name: orb - verbose: true - - test-policies: - runs-on: ubuntu-latest - needs: prebuild - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.19 - - - name: Go unit tests - run: | - SERVICE=policies make test_service_cov - - - name: Install dependencies - run: | - go mod tidy - sudo apt update && sudo apt install -y build-essential jq - go install github.com/axw/gocov/gocov@latest - go install github.com/AlekSi/gocov-xml@latest - - - name: coverage to xml - run: | - echo "Current directory: ${PWD}" - echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV - gocov convert ./coverage.out | gocov-xml > ./coverage.xml - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - files: coverage.xml - name: orb - verbose: true - - test-sinks: - runs-on: ubuntu-latest - needs: prebuild - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.19 - - - name: Go unit tests - run: | - SERVICE=sinks make test_service_cov - - - name: Install dependencies - run: | - go mod tidy - sudo apt update && sudo apt install -y build-essential jq - go install github.com/axw/gocov/gocov@latest - go install github.com/AlekSi/gocov-xml@latest - - - name: coverage to xml - run: | - echo "Current directory: ${PWD}" - echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV - gocov convert ./coverage.out | gocov-xml > ./coverage.xml - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - files: coverage.xml - name: orb - verbose: true - - test-sinker: - runs-on: ubuntu-latest - needs: prebuild - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.19 - - - name: Go unit tests - run: | - SERVICE=sinker make test_service_cov - - - name: Install dependencies - run: | - go mod tidy - sudo apt update && sudo apt install -y build-essential jq - go install github.com/axw/gocov/gocov@latest - go install github.com/AlekSi/gocov-xml@latest - - - name: coverage to xml - run: | - echo "Current directory: ${PWD}" - echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV - gocov convert ./coverage.out | gocov-xml > ./coverage.xml - - - name: Upload coverage to Codecov -# if: ${{ needs.prebuild.outputs.orb == 'true' }} - uses: codecov/codecov-action@v3 - with: - files: coverage.xml - name: orb - verbose: true - - test-maestro: - runs-on: ubuntu-latest - needs: prebuild - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.19 - - - name: Go unit tests - run: | - SERVICE=maestro make test_service_cov - - - name: Install dependencies - run: | - go mod tidy - sudo apt update && sudo apt install -y build-essential jq - go install github.com/axw/gocov/gocov@latest - go install github.com/AlekSi/gocov-xml@latest - - - name: coverage to xml - run: | - echo "Current directory: ${PWD}" - echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV - gocov convert ./coverage.out | gocov-xml > ./coverage.xml - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - files: coverage.xml - name: orb - verbose: true - - package-agent: - needs: - - prebuild - - test-agent - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Get short commit hash to a variable - id: commit_hash - run: | - echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" - echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - - - name: Replace crashpad and geo-db params - run: | - ESCAPED_REPLACE_LINE1_NEW=$(printf '%s\n' "pvOptions = append(pvOptions, \"--cp-token\", \"${{ secrets.CRASHPAD_TOKEN }}\")" | sed -e 's/[\/&]/\\&/g') - ESCAPED_REPLACE_LINE2_NEW=$(printf '%s\n' "pvOptions = append(pvOptions, \"--cp-url\", \"${{ secrets.CRASHPAD_URL }}\")" | sed -e 's/[\/&]/\\&/g') - ESCAPED_REPLACE_LINE3_NEW=$(printf '%s\n' "pvOptions = append(pvOptions, \"--cp-path\", \"/usr/local/sbin/crashpad_handler\")" | sed -e 's/[\/&]/\\&/g') - ESCAPED_REPLACE_LINE4_NEW=$(printf '%s\n' "pvOptions = append(pvOptions, \"--default-geo-city\", \"/geo-db/city.mmdb\")" | sed -e 's/[\/&]/\\&/g') - ESCAPED_REPLACE_LINE5_NEW=$(printf '%s\n' "pvOptions = append(pvOptions, \"--default-geo-asn\", \"/geo-db/asn.mmdb\")" | sed -e 's/[\/&]/\\&/g') - ESCAPED_REPLACE_LINE6_NEW=$(printf '%s\n' "pvOptions = append(pvOptions, \"--default-service-registry\", \"/iana/custom-iana.csv\")" | sed -e 's/[\/&]/\\&/g') - ESCAPED_REPLACE_LINE1_OLD=$(printf '%s\n' "// pvOptions = append(pvOptions, \"--cp-token\", PKTVISOR_CP_TOKEN)" | sed -e 's/[\/&]/\\&/g') - ESCAPED_REPLACE_LINE2_OLD=$(printf '%s\n' "// pvOptions = append(pvOptions, \"--cp-url\", PKTVISOR_CP_URL)" | sed -e 's/[\/&]/\\&/g') - ESCAPED_REPLACE_LINE3_OLD=$(printf '%s\n' "// pvOptions = append(pvOptions, \"--cp-path\", PKTVISOR_CP_PATH)" | sed -e 's/[\/&]/\\&/g') - ESCAPED_REPLACE_LINE4_OLD=$(printf '%s\n' "// pvOptions = append(pvOptions, \"--default-geo-city\", \"/geo-db/city.mmdb\")" | sed -e 's/[\/&]/\\&/g') - ESCAPED_REPLACE_LINE5_OLD=$(printf '%s\n' "// pvOptions = append(pvOptions, \"--default-geo-asn\", \"/geo-db/asn.mmdb\")" | sed -e 's/[\/&]/\\&/g') - ESCAPED_REPLACE_LINE6_OLD=$(printf '%s\n' "// pvOptions = append(pvOptions, \"--default-service-registry\", \"/iana/custom-iana.csv\")" | sed -e 's/[\/&]/\\&/g') - sed -i -e "s/$ESCAPED_REPLACE_LINE1_OLD/$ESCAPED_REPLACE_LINE1_NEW/g" agent/backend/pktvisor/pktvisor.go - sed -i -e "s/$ESCAPED_REPLACE_LINE2_OLD/$ESCAPED_REPLACE_LINE2_NEW/g" agent/backend/pktvisor/pktvisor.go - sed -i -e "s/$ESCAPED_REPLACE_LINE3_OLD/$ESCAPED_REPLACE_LINE3_NEW/g" agent/backend/pktvisor/pktvisor.go - sed -i -e "s/$ESCAPED_REPLACE_LINE4_OLD/$ESCAPED_REPLACE_LINE4_NEW/g" agent/backend/pktvisor/pktvisor.go - sed -i -e "s/$ESCAPED_REPLACE_LINE5_OLD/$ESCAPED_REPLACE_LINE5_NEW/g" agent/backend/pktvisor/pktvisor.go - sed -i -e "s/$ESCAPED_REPLACE_LINE6_OLD/$ESCAPED_REPLACE_LINE6_NEW/g" agent/backend/pktvisor/pktvisor.go - - - name: Build orb-agent - shell: bash - env: - IMAGE_NAME: ns1labs/orb-agent - run: | - if [ "${{ github.event.inputs.pktvisor_tag }}" == "" ]; then - make agent - make agent_debug - else - PKTVISOR_TAG=${{ github.event.inputs.pktvisor_tag }} make agent - PKTVISOR_TAG=${{ github.event.inputs.pktvisor_tag }} make agent_debug - fi - - - name: Login to Docker Hub - if: github.event_name != 'pull_request' - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Push agent container - if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-agent - - package-fleet: - needs: - - prebuild - - test-fleet - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Get short commit hash to a variable - id: commit_hash - run: | - echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" - echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - - - name: Build service containers - run: SERVICE=fleet make build_docker - - - name: Login to Docker Hub - uses: docker/login-action@v2 - if: github.event_name != 'pull_request' - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Push service containers - if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-fleet - - package-policies: - needs: - - prebuild - - test-policies - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Get short commit hash to a variable - id: commit_hash - run: | - echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" - echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - - - name: Build service containers - run: SERVICE=policies make build_docker - - - name: Login to Docker Hub - uses: docker/login-action@v2 - if: github.event_name != 'pull_request' - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Push service containers - if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-policies - - package-sinker: - needs: - - prebuild - - test-sinker - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Get short commit hash to a variable - id: commit_hash - run: | - echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" - echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - - - name: Build service containers - run: SERVICE=sinker make build_docker - - - name: Login to Docker Hub - uses: docker/login-action@v2 - if: github.event_name != 'pull_request' - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Push service containers - if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-sinker - - package-sinks: - needs: - - prebuild - - test-sinks - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Get short commit hash to a variable - id: commit_hash - run: | - echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" - echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - - - name: Build service containers - run: SERVICE=sinks make build_docker - - - name: Login to Docker Hub - uses: docker/login-action@v2 - if: github.event_name != 'pull_request' - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Push service containers - if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-sinks - - package-maestro: - needs: - - prebuild - - test-maestro - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Get short commit hash to a variable - id: commit_hash - run: | - echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" - echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - - - name: Build service containers - run: SERVICE=maestro make build_docker - - - name: Login to Docker Hub - uses: docker/login-action@v2 - if: github.event_name != 'pull_request' - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Push service containers - if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-maestro - - package-ui-dependencies: - needs: - - prebuild - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - uses: dorny/paths-filter@v2 - id: filter - with: - filters: | - yarn: - ui/package.json - - - name: Login to Docker Hub - if: ${{ steps.filter.outputs.yarn == 'true' }} - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build orb yarn image - if: ${{ steps.filter.outputs.yarn == 'true' }} - env: - IMAGE_NAME: ns1labs/orb-ui-modules - run: | - make ui-modules - - name: Push ui image - if: ${{ steps.filter.outputs.yarn == 'true' }} - run: | - docker push -a ns1labs/orb-ui-modules - - package-ui: - needs: - - prebuild - - package-ui-dependencies - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - name: Get short commit hash to a variable - id: commit_hash - run: | - echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" - echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - - - name: Build orb-ui - env: - IMAGE_NAME: ns1labs/orb-ui - run: | - make ui - - - name: Commit orb-ui-live on orb-live repo develop - if: github.event_name != 'pull_request' - run: | - git config --global user.email "${{secrets.GH_ORB_EMAIL}}" - git config --global user.name "${{secrets.GH_ORB_USER}}" - git config --global credential.helper cache - git clone -b develop https://${{secrets.GH_ORB_USER}}:${{secrets.GH_ORB_ACCESS_TOKEN}}@github.com/ns1labs/orb-live.git - cd orb-live - rm -rf ui - git add . - git commit -m "[NS1 Orb Bot] clean ui folder" - cp -rf ../ui . - cp -rf ../VERSION . - echo "${{ steps.commit_hash.outputs.sha_short }}" > COMMIT_HASH - git add . - git commit -m "[NS1 Orb Bot] Update Orb Live UI for develop" - git push origin develop - - - name: Login to Docker Hub - uses: docker/login-action@v1 - if: github.event_name != 'pull_request' - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Push ui container - if: github.event_name != 'pull_request' - run: | - docker push -a ns1labs/orb-ui - - package-migrate: - needs: - - package-ui-dependencies - - package-ui - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - - name: Login to Docker Hub - if: ${{ needs.prebuild.outputs.migrate == 'true' }} - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build orb migrate service image - if: ${{ needs.prebuild.outputs.migrate == 'true' }} - env: - IMAGE_NAME: ns1labs/orb-migrate - run: | - SERVICE=migrate make build_docker - - - name: Push orb migrate service image - if: ${{ needs.prebuild.outputs.migrate == 'true' }} - run: | - docker push -a ns1labs/orb-migrate - - publish-orb-live-stg: - needs: - - prebuild - - package-fleet - - package-policies - - package-sinker - - package-sinks - - package-ui - - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.branch_source }} - - - name: Debug values - run: | - echo ${{ needs.prebuild.outputs.VERSION }} - echo ${{ github.event.inputs.pktvisor_tag }} - env: - VERSION: ${{ needs.prebuild.env.VERSION }} - - - name: Login to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Get short commit hash to a variable - id: commit_hash - run: | - echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" - echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - - - name: Commit image tag on orb-live-manifest environment dev - run: | - git config --global user.email "${{secrets.GH_ORB_EMAIL}}" - git config --global user.name "${{secrets.GH_ORB_USER}}" - git config --global credential.helper cache - git clone https://${{secrets.GH_ORB_USER}}:${{secrets.GH_ORB_ACCESS_TOKEN}}@github.com/ns1labs/orb-live-manifest.git - cd orb-live-manifest/stg - mv values.yaml .template/values.old - cat .template/values.yaml.tpl >> values.yaml - sed -i -e "s/IMAGE_TAG/${{ needs.prebuild.outputs.VERSION }}-${{ steps.commit_hash.outputs.sha_short }}/g" values.yaml - git add values.yaml - git add .template/values.old - git commit -m "[NS1 Orb Bot] Update image tag on stg environment" - git push origin main diff --git a/.github/workflows/go-develop.yml b/.github/workflows/go-develop.yml index f0731221f..02cd7b8e7 100644 --- a/.github/workflows/go-develop.yml +++ b/.github/workflows/go-develop.yml @@ -378,8 +378,6 @@ jobs: - name: Build orb-agent shell: bash - env: - IMAGE_NAME: ns1labs/orb-agent run: | if [ "${{ github.event.inputs.pktvisor_tag }}" == "" ]; then make agent @@ -388,17 +386,17 @@ jobs: PKTVISOR_TAG=${{ github.event.inputs.pktvisor_tag }} make agent PKTVISOR_TAG=${{ github.event.inputs.pktvisor_tag }} make agent_debug fi - - - name: Login to Docker Hub + + - name: Login to Docker Hub orbcommunity if: github.event_name != 'pull_request' uses: docker/login-action@v2 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push agent container + - name: Push agent container orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-agent + run: docker push -a orbcommunity/orb-agent package-fleet: needs: @@ -416,16 +414,16 @@ jobs: - name: Build service containers run: SERVICE=fleet make build_docker - - name: Login to Docker Hub + - name: Login to Docker Hub orbcommunity uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-fleet + run: docker push -a orbcommunity/orb-fleet package-policies: needs: @@ -442,17 +440,17 @@ jobs: - name: Build service containers run: SERVICE=policies make build_docker - - - name: Login to Docker Hub + + - name: Login to Docker Hub orbcommunity uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-policies + run: docker push -a orbcommunity/orb-policies package-sinker: needs: @@ -469,17 +467,17 @@ jobs: - name: Build service containers run: SERVICE=sinker make build_docker - - - name: Login to Docker Hub + + - name: Login to Docker Hub orbcommunity uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-sinker + run: docker push -a orbcommunity/orb-sinker package-sinks: needs: @@ -496,17 +494,17 @@ jobs: - name: Build service containers run: SERVICE=sinks make build_docker - - - name: Login to Docker Hub + + - name: Login to Docker Hub orbcommunity uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-sinks + run: docker push -a orbcommunity/orb-sinks package-maestro: needs: @@ -522,18 +520,18 @@ jobs: echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - name: Build service containers - run: SERVICE=maestro make build_docker + run: SERVICE=maestro make build_docker - - name: Login to Docker Hub + - name: Login to Docker Hub orbcommunity uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-maestro + run: docker push -a orbcommunity/orb-maestro package-ui-dependencies: needs: @@ -548,22 +546,22 @@ jobs: yarn: ui/package.json - - name: Login to Docker Hub - if: ${{ steps.filter.outputs.yarn == 'true' }} - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build orb yarn image if: ${{ steps.filter.outputs.yarn == 'true' }} - env: - IMAGE_NAME: ns1labs/orb-ui-modules run: | make ui-modules - - name: Push ui image + + - name: Login to Docker Hub orbcommunity + if: ${{ steps.filter.outputs.yarn == 'true' }} + uses: docker/login-action@v2 + with: + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} + + - name: Push ui image orbcommunity if: ${{ steps.filter.outputs.yarn == 'true' }} run: | - docker push -a ns1labs/orb-ui-modules + docker push -a orbcommunity/orb-ui-modules package-ui: needs: @@ -579,8 +577,6 @@ jobs: echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - name: Build orb-ui - env: - IMAGE_NAME: ns1labs/orb-ui run: | make ui @@ -601,18 +597,18 @@ jobs: git add . git commit -m "[NS1 Orb Bot] Update Orb Live UI for develop" git push origin develop - - - name: Login to Docker Hub + + - name: Login to Docker Hub orbcommunity uses: docker/login-action@v1 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push ui container + - name: Push ui container orbcommunity if: github.event_name != 'pull_request' run: | - docker push -a ns1labs/orb-ui + docker push -a orbcommunity/orb-ui package-migrate: needs: @@ -622,24 +618,23 @@ jobs: steps: - uses: actions/checkout@v2 - - name: Login to Docker Hub + - name: Build orb migrate service image + if: ${{ needs.prebuild.outputs.migrate == 'true' }} + run: | + SERVICE=migrate make build_docker + + - name: Login to Docker Hub orbcommunity if: ${{ needs.prebuild.outputs.migrate == 'true' }} uses: docker/login-action@v2 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Build orb migrate service image + - name: Push orb migrate service image orbcommunity if: ${{ needs.prebuild.outputs.migrate == 'true' }} - env: - IMAGE_NAME: ns1labs/orb-migrate run: | - SERVICE=migrate make build_docker + docker push -a orbcommunity/orb-migrate - - name: Push orb migrate service image - if: ${{ needs.prebuild.outputs.migrate == 'true' }} - run: | - docker push -a ns1labs/orb-migrate publish-orb-live-stg: needs: @@ -662,12 +657,6 @@ jobs: env: VERSION: ${{ needs.prebuild.env.VERSION }} - - name: Login to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Get short commit hash to a variable id: commit_hash run: | diff --git a/.github/workflows/go-main.yml b/.github/workflows/go-main.yml index ecfc98267..0f7c4c045 100644 --- a/.github/workflows/go-main.yml +++ b/.github/workflows/go-main.yml @@ -125,6 +125,21 @@ jobs: - name: Go unit tests if: ${{ needs.prebuild.outputs.orb == 'true' }} run: SERVICE=sinker make test_service + + test-maestro: + runs-on: ubuntu-latest + needs: prebuild + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.19 + + - name: Go unit tests + if: ${{ needs.prebuild.outputs.orb == 'true' }} + run: | + SERVICE=maestro make test_service_cov package-agent: # This is just for debug agent @@ -163,23 +178,23 @@ jobs: - name: Build orb-agent shell: bash - env: - IMAGE_NAME: ns1labs/orb-agent run: | if [ "${{ github.event.inputs.pktvisor_tag }}" == "" ]; then make agent_debug else PKTVISOR_TAG=${{ github.event.inputs.pktvisor_tag }} make agent_debug fi - - - name: Login to Docker Hub - uses: docker/login-action@v1 + + - name: Login to Docker Hub orbcommunity + if: github.event_name != 'pull_request' + uses: docker/login-action@v2 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push agent container - run: docker push -a ns1labs/orb-agent + - name: Push agent container orbcommunity + if: github.event_name != 'pull_request' + run: docker push -a orbcommunity/orb-agent package-fleet: needs: @@ -195,17 +210,17 @@ jobs: echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - name: Build service containers run: SERVICE=fleet make build_docker - - - name: Login to Docker Hub - uses: docker/login-action@v1 + + - name: Login to Docker Hub orbcommunity + uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-fleet + run: docker push -a orbcommunity/orb-fleet package-policies: needs: @@ -221,17 +236,17 @@ jobs: echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - name: Build service containers run: SERVICE=policies make build_docker - - - name: Login to Docker Hub - uses: docker/login-action@v1 + + - name: Login to Docker Hub orbcommunity + uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-policies + run: docker push -a orbcommunity/orb-policies package-sinker: needs: @@ -247,17 +262,17 @@ jobs: echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - name: Build service containers run: SERVICE=sinker make build_docker - - - name: Login to Docker Hub - uses: docker/login-action@v1 + + - name: Login to Docker Hub orbcommunity + uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-sinker + run: docker push -a orbcommunity/orb-sinker package-sinks: needs: @@ -273,17 +288,43 @@ jobs: echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - name: Build service containers run: SERVICE=sinks make build_docker + + - name: Login to Docker Hub orbcommunity + uses: docker/login-action@v2 + if: github.event_name != 'pull_request' + with: + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Login to Docker Hub - uses: docker/login-action@v1 + - name: Push service containers orbcommunity + if: github.event_name != 'pull_request' + run: docker push -a orbcommunity/orb-sinks + + package-maestro: + needs: + - prebuild + - test-maestro + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Get short commit hash to a variable + id: commit_hash + run: | + echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" + echo ${{ needs.prebuild.outputs.VERSION }} > VERSION + - name: Build service containers + run: SERVICE=maestro make build_docker + + - name: Login to Docker Hub orbcommunity + uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-sinks + run: docker push -a orbcommunity/orb-maestro package-ui-dependencies: needs: @@ -297,23 +338,23 @@ jobs: filters: | yarn: ui/package.json - - name: Login to Docker Hub - if: ${{ steps.filter.outputs.yarn == 'true' }} - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build orb yarn image if: ${{ steps.filter.outputs.yarn == 'true' }} - env: - IMAGE_NAME: ns1labs/orb-ui-modules run: | make ui-modules - - name: Push ui image + + - name: Login to Docker Hub orbcommunity + if: ${{ steps.filter.outputs.yarn == 'true' }} + uses: docker/login-action@v2 + with: + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} + + - name: Push ui image orbcommunity if: ${{ steps.filter.outputs.yarn == 'true' }} run: | - docker push -a ns1labs/orb-ui-modules + docker push -a orbcommunity/orb-ui-modules package-ui: needs: @@ -327,21 +368,21 @@ jobs: run: | echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" echo ${{ needs.prebuild.outputs.VERSION }} > VERSION + - name: Build orb-ui - env: - IMAGE_NAME: ns1labs/orb-ui run: make ui - - - name: Login to Docker Hub + + - name: Login to Docker Hub orbcommunity uses: docker/login-action@v1 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push ui container + - name: Push ui container orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-ui + run: | + docker push -a orbcommunity/orb-ui release: needs: diff --git a/.github/workflows/go-production.yml b/.github/workflows/go-production.yml index 9fce66047..1cc773efe 100644 --- a/.github/workflows/go-production.yml +++ b/.github/workflows/go-production.yml @@ -186,7 +186,7 @@ jobs: - name: Build orb-agent shell: bash env: - IMAGE_NAME: ns1labs/orb-agent + IMAGE_NAME: orbcommunity/orb-agent run: | if [ "${{ github.event.inputs.pktvisor_tag }}" == "" ]; then make agent_production @@ -195,16 +195,27 @@ jobs: PKTVISOR_TAG=${{ github.event.inputs.pktvisor_tag }} make agent_production PKTVISOR_TAG=${{ github.event.inputs.pktvisor_tag }} make agent_debug_production fi - - name: Login to Docker Hub + - name: Login to Docker Hub orbcommunity if: github.event_name != 'pull_request' uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Push agent container + - name: Push agent container orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-agent + run: docker push -a orbcommunity/orb-agent + + - name: Login to Docker Hub orbcommunity + if: github.event_name != 'pull_request' + uses: docker/login-action@v2 + with: + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} + + - name: Push agent container orbcommunity + if: github.event_name != 'pull_request' + run: docker push -a orbcommunity/orb-agent package-fleet: needs: @@ -220,17 +231,17 @@ jobs: echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - name: Build service containers run: SERVICE=fleet make build_docker - - - name: Login to Docker Hub - uses: docker/login-action@v1 + + - name: Login to Docker Hub orbcommunity + uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-fleet + run: docker push -a orbcommunity/orb-fleet package-policies: needs: @@ -247,16 +258,16 @@ jobs: - name: Build service containers run: SERVICE=policies make build_docker - - name: Login to Docker Hub - uses: docker/login-action@v1 + - name: Login to Docker Hub orbcommunity + uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-policies + run: docker push -a orbcommunity/orb-policies package-sinker: needs: @@ -272,17 +283,17 @@ jobs: echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - name: Build service containers run: SERVICE=sinker make build_docker - - - name: Login to Docker Hub - uses: docker/login-action@v1 + + - name: Login to Docker Hub orbcommunity + uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-sinker + run: docker push -a orbcommunity/orb-sinker package-sinks: needs: @@ -298,17 +309,17 @@ jobs: echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - name: Build service containers run: SERVICE=sinks make build_docker - - - name: Login to Docker Hub - uses: docker/login-action@v1 + + - name: Login to Docker Hub orbcommunity + uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-sinks + run: docker push -a orbcommunity/orb-sinks package-maestro: needs: @@ -325,16 +336,16 @@ jobs: - name: Build service containers run: SERVICE=maestro make build_docker - - name: Login to Docker Hub + - name: Login to Docker Hub orbcommunity uses: docker/login-action@v2 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push service containers + - name: Push service containers orbcommunity if: github.event_name != 'pull_request' - run: docker push -a ns1labs/orb-maestro + run: docker push -a orbcommunity/orb-maestro package-ui-dependencies: needs: @@ -348,23 +359,24 @@ jobs: filters: | yarn: ui/package.json - - name: Login to Docker Hub - if: ${{ steps.filter.outputs.yarn == 'true' }} - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build orb yarn image if: ${{ steps.filter.outputs.yarn == 'true' }} - env: - IMAGE_NAME: ns1labs/orb-ui-modules run: | make ui-modules - - name: Push ui image + + - name: Login to Docker Hub orbcommunity + if: ${{ steps.filter.outputs.yarn == 'true' }} + uses: docker/login-action@v2 + with: + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} + + - name: Push ui image orbcommunity if: ${{ steps.filter.outputs.yarn == 'true' }} run: | - docker push -a ns1labs/orb-ui-modules + docker push -a orbcommunity/orb-ui-modules + package-ui: needs: - prebuild @@ -379,7 +391,7 @@ jobs: echo ${{ needs.prebuild.outputs.VERSION }} > VERSION - name: Build orb-ui env: - IMAGE_NAME: ns1labs/orb-ui + IMAGE_NAME: orbcommunity/orb-ui run: | make ui - name: Commit orb-ui-live on orb-live repo production @@ -398,17 +410,18 @@ jobs: git add . git commit -m "[NS1 Orb Bot] Update Orb Live UI for production" git push origin main - - name: Login to Docker Hub + + - name: Login to Docker Hub orbcommunity uses: docker/login-action@v1 if: github.event_name != 'pull_request' with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - - name: Push ui container + - name: Push ui container orbcommunity if: github.event_name != 'pull_request' run: | - docker push -a ns1labs/orb-ui + docker push -a orbcommunity/orb-ui publish-orb-live-prd: needs: @@ -431,12 +444,6 @@ jobs: env: VERSION: ${{ needs.prebuild.env.VERSION }} - - name: Login to Docker Hub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Get short commit hash to a variable id: commit_hash run: | @@ -520,11 +527,11 @@ jobs: id: buildx uses: docker/setup-buildx-action@v2 - - name: Login to Docker Hub + - name: Login to Docker Hub orbcommunity uses: docker/login-action@v1 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} + password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - name: Replace crashpad and geo-db params run: | @@ -549,8 +556,8 @@ jobs: - name: Build + push - agent (multi-arch) env: - IMAGE_NAME: ns1labs/orb-agent:latest - IMAGE_NAME_2: ns1labs/orb-agent:${{ env.VERSION }}-${{ steps.commit_hash.outputs.sha_short }} + IMAGE_NAME: orbcommunity/orb-agent:latest + IMAGE_NAME_2: orbcommunity/orb-agent:${{ env.VERSION }}-${{ steps.commit_hash.outputs.sha_short }} uses: docker/build-push-action@v3 with: builder: ${{ steps.buildx.outputs.name }} diff --git a/Makefile b/Makefile index 8fb38f7ec..851ea600c 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,8 @@ DEBUG_REF_TAG ?= develop-debug PKTVISOR_TAG ?= latest-develop PKTVISOR_DEBUG_TAG ?= latest-develop-debug DOCKER_IMAGE_NAME_PREFIX ?= orb -DOCKERHUB_REPO = ns1labs +DOCKERHUB_REPO = orbcommunity +ORB_DOCKERHUB_REPO = orbcommunity BUILD_DIR = build SERVICES = fleet policies sinks sinker migrate maestro DOCKERS = $(addprefix docker_,$(SERVICES)) @@ -51,9 +52,9 @@ define make_docker --build-arg SVC=$(SERVICE) \ --build-arg GOARCH=$(GOARCH) \ --build-arg GOARM=$(GOARM) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(SERVICE):$(REF_TAG) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(SERVICE):$(ORB_VERSION) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(SERVICE):$(ORB_VERSION)-$(COMMIT_HASH) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(SERVICE):$(REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(SERVICE):$(ORB_VERSION) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(SERVICE):$(ORB_VERSION)-$(COMMIT_HASH) \ -f docker/Dockerfile . $(eval SERVICE="") endef @@ -62,9 +63,9 @@ define make_docker_dev docker build \ --no-cache \ --build-arg SVC=$(svc) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(svc):$(REF_TAG) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(svc):$(ORB_VERSION) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(svc):$(ORB_VERSION)-$(COMMIT_HASH) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(svc):$(REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(svc):$(ORB_VERSION) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-$(svc):$(ORB_VERSION)-$(COMMIT_HASH) \ -f docker/Dockerfile.dev ./build $(eval svc="") endef @@ -170,13 +171,13 @@ kind-delete-cluster: kind delete cluster kind-load-images: - kind load docker-image ns1labs/orb-fleet:develop - kind load docker-image ns1labs/orb-policies:develop - kind load docker-image ns1labs/orb-sinks:develop - kind load docker-image ns1labs/orb-sinker:develop - kind load docker-image ns1labs/orb-migrate:develop - kind load docker-image ns1labs/orb-maestro:develop - kind load docker-image ns1labs/orb-ui:develop + kind load docker-image orbcommunity/orb-fleet:develop + kind load docker-image orbcommunity/orb-policies:develop + kind load docker-image orbcommunity/orb-sinks:develop + kind load docker-image orbcommunity/orb-sinker:develop + kind load docker-image orbcommunity/orb-migrate:develop + kind load docker-image orbcommunity/orb-maestro:develop + kind load docker-image orbcommunity/orb-ui:develop kind-install-orb: kubectl create namespace orb @@ -211,29 +212,30 @@ agent_bin: agent: docker build --no-cache \ --build-arg PKTVISOR_TAG=$(PKTVISOR_TAG) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(REF_TAG) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION)-$(COMMIT_HASH) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION)-$(COMMIT_HASH) \ -f agent/docker/Dockerfile . agent_debug: docker build \ --build-arg PKTVISOR_TAG=$(PKTVISOR_DEBUG_TAG) \ --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(DEBUG_REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(DEBUG_REF_TAG) \ -f agent/docker/Dockerfile . agent_production: docker build \ --build-arg PKTVISOR_TAG=$(PKTVISOR_TAG) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(PRODUCTION_AGENT_REF_TAG) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION)-$(COMMIT_HASH) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(PRODUCTION_AGENT_REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION)-$(COMMIT_HASH) \ -f agent/docker/Dockerfile . agent_debug_production: docker build \ --build-arg PKTVISOR_TAG=$(PKTVISOR_DEBUG_TAG) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(PRODUCTION_AGENT_DEBUG_REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(PRODUCTION_AGENT_DEBUG_REF_TAG) \ -f agent/docker/Dockerfile . test_ui: @@ -241,9 +243,9 @@ test_ui: ui-modules: cd ui/ && docker build \ - --tag=$(DOCKERHUB_REPO)/orb-ui-modules:latest \ - --tag=$(DOCKERHUB_REPO)/orb-ui-modules:$(REF_TAG) \ - --tag=$(DOCKERHUB_REPO)/orb-ui-modules:$(ORB_VERSION)-$(COMMIT_HASH) \ + --tag=$(ORB_DOCKERHUB_REPO)/orb-ui-modules:latest \ + --tag=$(ORB_DOCKERHUB_REPO)/orb-ui-modules:$(REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/orb-ui-modules:$(ORB_VERSION)-$(COMMIT_HASH) \ -f docker/Dockerfile.buildyarn . ui: @@ -251,9 +253,9 @@ ui: --build-arg ENV_PS_SID=${PS_SID} \ --build-arg ENV_PS_GROUP_KEY=${PS_GROUP_KEY} \ --build-arg ENV=${ENVIRONMENT} \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-ui:$(REF_TAG) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-ui:$(ORB_VERSION) \ - --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-ui:$(ORB_VERSION)-$(COMMIT_HASH) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-ui:$(REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-ui:$(ORB_VERSION) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-ui:$(ORB_VERSION)-$(COMMIT_HASH) \ -f docker/Dockerfile . platform: dockers_dev agent ui diff --git a/RFCs/2021-04-16-5-orb-data-model.md b/RFCs/2021-04-16-5-orb-data-model.md index 7dd7962f7..4f8c886ec 100644 --- a/RFCs/2021-04-16-5-orb-data-model.md +++ b/RFCs/2021-04-16-5-orb-data-model.md @@ -1,7 +1,7 @@ ## Orb Data Model Orb manages pktvisor configuration in a central control plane. The only configuration that remains at the edge with the -agent are the Tap configuration (ns1labs/pktvisor#75) and edge Tags configuration (below) because they are host +agent are the [Tap configuration](https://github.com/orb-community/pktvisor/blob/develop/RFCs/2021-04-16-75-taps.md) and edge Tags configuration (below) because they are host specific. ### Tags and Group Configurations diff --git a/VERSION b/VERSION index ca222b7cf..2094a100c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.23.0 +0.24.0 diff --git a/agent/backend/pktvisor/pktvisor.go b/agent/backend/pktvisor/pktvisor.go index 13019bfaa..a778169fe 100644 --- a/agent/backend/pktvisor/pktvisor.go +++ b/agent/backend/pktvisor/pktvisor.go @@ -8,8 +8,10 @@ import ( "context" "errors" "fmt" + "net" "net/http" "os/exec" + "strconv" "strings" "time" @@ -79,11 +81,26 @@ type pktvisorBackend struct { // OpenTelemetry management scrapeOtel bool otelReceiverType string + otelReceiverHost string + otelReceiverPort int receiver map[string]component.MetricsReceiver exporter map[string]component.MetricsExporter routineMap map[string]context.CancelFunc } +func (p *pktvisorBackend) getFreePort() (int, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return 0, err + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} + func (p *pktvisorBackend) addScraperProcess(ctx context.Context, cancel context.CancelFunc, policyID string, policyName string) { attributeCtx := context.WithValue(ctx, "policy_name", policyName) attributeCtx = context.WithValue(attributeCtx, "policy_id", policyID) @@ -175,6 +192,15 @@ func (p *pktvisorBackend) Start(ctx context.Context, cancelFunc context.CancelFu if p.scrapeOtel && p.otelReceiverType == Otlp { pvOptions = append(pvOptions, "--otel") + pvOptions = append(pvOptions, "--otel-host", p.otelReceiverHost) + if p.otelReceiverPort == 0 { + p.otelReceiverPort, err = p.getFreePort() + if err != nil { + p.logger.Error("pktvisor otlp startup error", zap.Error(err)) + return err + } + } + pvOptions = append(pvOptions, "--otel-port", strconv.Itoa(p.otelReceiverPort)) } // the macros should be properly configured to enable crashpad @@ -330,6 +356,11 @@ func (p *pktvisorBackend) Configure(logger *zap.Logger, repo policies.PolicyRepo if v.(string) == Otlp { p.logger.Info("OTLP receiver enabled") } + + case "Host": + p.otelReceiverHost = v.(string) + case "Port": + p.otelReceiverPort = v.(int) } } diff --git a/agent/backend/pktvisor/scrape.go b/agent/backend/pktvisor/scrape.go index ea8d8fca8..d50a971e2 100644 --- a/agent/backend/pktvisor/scrape.go +++ b/agent/backend/pktvisor/scrape.go @@ -18,6 +18,7 @@ import ( "github.com/orb-community/orb/agent/otel/pktvisorreceiver" "github.com/orb-community/orb/fleet" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/receiver/otlpreceiver" "go.uber.org/zap" ) @@ -177,6 +178,11 @@ func (p *pktvisorBackend) receiveOtlp() { } pFactory := otlpreceiver.NewFactory() cfg := pFactory.CreateDefaultConfig() + cfg.(*otlpreceiver.Config).Protocols = otlpreceiver.Protocols{ + HTTP: &confighttp.HTTPServerSettings{ + Endpoint: p.otelReceiverHost + ":" + strconv.Itoa(p.otelReceiverPort), + }, + } set := pktvisorreceiver.CreateDefaultSettings(p.logger) p.receiver[policyID], err = pFactory.CreateMetricsReceiver(exeCtx, set, cfg, p.exporter[policyID]) if err != nil { @@ -207,21 +213,23 @@ func (p *pktvisorBackend) receiveOtlp() { } } } - select { - case <-exeCtx.Done(): - p.ctx.Done() - p.cancelFunc() - case <-p.ctx.Done(): - err := p.exporter[policyID].Shutdown(exeCtx) - if err != nil { - return - } - err = p.receiver[policyID].Shutdown(exeCtx) - if err != nil { + for { + select { + case <-exeCtx.Done(): + p.ctx.Done() + p.cancelFunc() + case <-p.ctx.Done(): + err := p.exporter[policyID].Shutdown(exeCtx) + if err != nil { + return + } + err = p.receiver[policyID].Shutdown(exeCtx) + if err != nil { + return + } + p.logger.Info("stopped Orb OpenTelemetry agent collector") return } - p.logger.Info("stopped Orb OpenTelemetry agent collector") - return } }() } diff --git a/agent/config/types.go b/agent/config/types.go index 2a1cd8a43..46e3e450b 100644 --- a/agent/config/types.go +++ b/agent/config/types.go @@ -38,6 +38,8 @@ type Cloud struct { type Opentelemetry struct { Enable bool `mapstructure:"enable"` ReceiverType string `mapstructure:"receiver_type"` + Host string `mapstructure:"host"` + Port int `mapstructure:"port"` } type Debug struct { diff --git a/agent/docker/Dockerfile b/agent/docker/Dockerfile index 0f6bf27b9..fbb041741 100644 --- a/agent/docker/Dockerfile +++ b/agent/docker/Dockerfile @@ -1,20 +1,20 @@ ARG PKTVISOR_TAG=latest-develop FROM golang:1.19-alpine AS builder -WORKDIR /go/src/github.com/ns1labs/orb +WORKDIR /go/src/github.com/orbcommunity/orb COPY go.mod . RUN go mod tidy COPY . . RUN apk update && apk add make build-base git RUN mkdir /tmp/build && CGO_ENABLED=1 make agent_bin && mv build/orb-agent /tmp/build/orb-agent -FROM ns1labs/pktvisor:${PKTVISOR_TAG} +FROM orbcommunity/pktvisor:${PKTVISOR_TAG} RUN mkdir /opt/orb COPY --from=builder /tmp/build/orb-agent /usr/local/bin/orb-agent -COPY --from=builder /go/src/github.com/ns1labs/orb/agent/docker/agent_default.yaml /opt/orb/agent_default.yaml -COPY --from=builder /go/src/github.com/ns1labs/orb/agent/docker/orb-agent-entry.sh /usr/local/bin/orb-agent-entry.sh -COPY --from=builder /go/src/github.com/ns1labs/orb/agent/docker/run-agent.sh /run-agent.sh +COPY --from=builder /go/src/github.com/orbcommunity/orb/agent/docker/agent_default.yaml /opt/orb/agent_default.yaml +COPY --from=builder /go/src/github.com/orbcommunity/orb/agent/docker/orb-agent-entry.sh /usr/local/bin/orb-agent-entry.sh +COPY --from=builder /go/src/github.com/orbcommunity/orb/agent/docker/run-agent.sh /run-agent.sh RUN chmod a+x /run-agent.sh diff --git a/agent/otel/otlpmqttexporter/otlp.go b/agent/otel/otlpmqttexporter/otlp.go index d2a4bed3c..7b0f91cd3 100644 --- a/agent/otel/otlpmqttexporter/otlp.go +++ b/agent/otel/otlpmqttexporter/otlp.go @@ -236,7 +236,11 @@ func (e *exporter) injectScopeAttribute(metricsScope pmetric.ScopeMetrics, attri metricItem.Summary().DataPoints().At(i).Attributes().PutStr(attribute, value) } default: - continue + e.logger.Warn("not supported metric type", zap.String("name", metricItem.Name()), + zap.String("type", metricItem.Type().String())) + metrics.RemoveIf(func(m pmetric.Metric) bool { + return m.Name() == metricItem.Name() + }) } } return metricsScope @@ -344,6 +348,7 @@ func (e *exporter) pushAllMetrics(ctx context.Context, md pmetric.Metrics) error for key, value := range agentData.AgentTags { scope = e.injectScopeAttribute(scope, key, value) } + e.logger.Info("Scrapping policy via OTLP", zap.String("policyName", policyName)) scope.CopyTo(ref.ScopeMetrics().AppendEmpty()) } diff --git a/cmd/agent/agent.example.yaml b/cmd/agent/agent.example.yaml index 317cb716d..4ee5ebb11 100644 --- a/cmd/agent/agent.example.yaml +++ b/cmd/agent/agent.example.yaml @@ -4,7 +4,7 @@ version: "1.0" # see https://github.com/orb-community/pktvisor/blob/develop/RFCs/2021-04-16-75-taps.md visor: taps: - mydefault: + default_pcap: input_type: pcap config: iface: "auto" diff --git a/cmd/agent/main.go b/cmd/agent/main.go index 5854a47e7..929bcac89 100644 --- a/cmd/agent/main.go +++ b/cmd/agent/main.go @@ -7,6 +7,11 @@ package main import ( "context" "fmt" + "os" + "os/signal" + "strings" + "syscall" + "github.com/orb-community/orb/agent" "github.com/orb-community/orb/agent/backend/pktvisor" "github.com/orb-community/orb/agent/config" @@ -15,10 +20,6 @@ import ( "github.com/spf13/viper" "go.uber.org/zap" "go.uber.org/zap/zapcore" - "os" - "os/signal" - "strings" - "syscall" ) const ( @@ -145,8 +146,10 @@ func mergeOrError(path string) { v.SetDefault("orb.cloud.mqtt.channel_id", "") v.SetDefault("orb.db.file", "./orb-agent.db") v.SetDefault("orb.tls.verify", true) - v.SetDefault("orb.otel.enable", false) + v.SetDefault("orb.otel.enable", true) v.SetDefault("orb.otel.receiver_type", "prometheus") + v.SetDefault("orb.otel.host", "localhost") + v.SetDefault("orb.otel.port", 0) v.SetDefault("orb.debug.enable", false) v.SetDefault("orb.backends.pktvisor.binary", "/usr/local/sbin/pktvisord") diff --git a/cmd/sinker/main.go b/cmd/sinker/main.go index 5feddea57..078e1f710 100644 --- a/cmd/sinker/main.go +++ b/cmd/sinker/main.go @@ -168,7 +168,7 @@ func main() { sinksGRPCClient := sinksgrpc.NewClient(tracer, sinksGRPCConn, sinksGRPCTimeout, logger) configRepo := cacheconfig.NewSinkerCache(cacheClient, logger) - configRepo = producer.NewEventStoreMiddleware(configRepo, esClient) + configRepo = producer.NewEventStoreMiddleware(configRepo, esClient, logger) gauge := kitprometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: "sinker", Subsystem: "sink", diff --git a/docker/Dockerfile b/docker/Dockerfile index a419b6ca0..3a740b963 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -3,7 +3,7 @@ ARG SVC ARG GOARCH ARG GOARM -WORKDIR /go/src/github.com/ns1labs/orb +WORKDIR /go/src/github.com/orbcommunity/orb COPY . . RUN apk update \ && apk add make diff --git a/docs/Orb-OpenTelemetry.md b/docs/Orb-OpenTelemetry.md index 33dbde597..df4155203 100644 --- a/docs/Orb-OpenTelemetry.md +++ b/docs/Orb-OpenTelemetry.md @@ -2,7 +2,7 @@ Orb Agent fetches information from Pktvisor using a receiver pktvisorreceiver in package that implements a customized receiver from opentelemetry. -In the PR [1428](https://github.com/ns1labs/orb/pull/1428), the orb-agent has now the opentelemetry exporter that will, pass the otlp through MQTT, through the usual channels that orb-sinker receives the information. +In the PR [1428](https://github.com/orb-community/orb/pull/1428), the orb-agent has now the opentelemetry exporter that will, pass the otlp through MQTT, through the usual channels that orb-sinker receives the information. In a sequence Diagram as follows ```mermaid diff --git a/fleet/redis/producer/streams.go b/fleet/redis/producer/streams.go index a03e2b465..07e2d431d 100644 --- a/fleet/redis/producer/streams.go +++ b/fleet/redis/producer/streams.go @@ -103,9 +103,10 @@ func (es eventStore) RemoveAgentGroup(ctx context.Context, token string, groupID token: token, } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.encode(), + Stream: streamID, + MaxLen: streamLen, + Approx: true, + Values: event.encode(), } err = es.client.XAdd(ctx, record).Err() if err != nil { diff --git a/kind/README.md b/kind/README.md index aafc51dc4..ea6487b08 100644 --- a/kind/README.md +++ b/kind/README.md @@ -171,7 +171,7 @@ make kind-load-images Load just one image to the kind cluster ```shell -kind load docker-image ns1labs/orb-maestro:0.22.0-088bee14 +kind load docker-image orbcommunity/orb-maestro:0.22.0-088bee14 ``` > **💡 Note:** Do not forget to change **kind/values.yaml** manifest to use your image tag diff --git a/kind/values.yaml b/kind/values.yaml index 44775a650..40662d11e 100644 --- a/kind/values.yaml +++ b/kind/values.yaml @@ -1,9 +1,9 @@ orb: defaults: - logLevel: "info" + logLevel: "debug" image: pullPolicy: "IfNotPresent" - repository: "ns1labs" + repository: "orbcommunity" tag: "develop" mfRepository: "mainflux" mfTag: "0.13.0" @@ -13,28 +13,28 @@ orb: image: name: "orb-sinks" pullPolicy: "IfNotPresent" - repository: "ns1labs" + repository: "orbcommunity" tag: "develop" policies: image: name: "orb-policies" pullPolicy: "IfNotPresent" - repository: "ns1labs" + repository: "orbcommunity" tag: "develop" fleet: image: name: "orb-fleet" pullPolicy: "IfNotPresent" - repository: "ns1labs" + repository: "orbcommunity" tag: "develop" sinker: image: name: "orb-sinker" pullPolicy: "IfNotPresent" - repository: "ns1labs" + repository: "orbcommunity" tag: "develop" sinkerOtel: @@ -107,7 +107,7 @@ orb: image: name: "orb-maestro" pullPolicy: "IfNotPresent" - repository: ns1labs + repository: orbcommunity tag: "develop" #0.22.0-develop-478d318 rbac: serviceAccountName: "k8s-maestro-role" @@ -119,7 +119,7 @@ orb: image: name: "orb-migrate" pullPolicy: "IfNotPresent" - repository: ns1labs + repository: orbcommunity tag: "develop" autoMigrate: false logLevel: "debug" diff --git a/maestro/config/config_builder.go b/maestro/config/config_builder.go index f0d4e004a..d6f32278b 100644 --- a/maestro/config/config_builder.go +++ b/maestro/config/config_builder.go @@ -455,7 +455,6 @@ func ReturnConfigYamlFromSink(_ context.Context, kafkaUrlConfig, sinkId, sinkUrl s := strings.ReplaceAll(returnedString, "\"", "") s = strings.ReplaceAll(s, "\n", `\n`) return s, nil - } type OtelConfigFile struct { @@ -481,11 +480,12 @@ type Processors struct { } type Extensions struct { - HealthCheckExtConfig *HealthCheckExtension `json:"health_check,omitempty" yaml:"health_check,omitempty"` - PProf *PProfExtension `json:"pprof,omitempty" yaml:"pprof,omitempty"` - ZPages *ZPagesExtension `json:"zpages,omitempty" yaml:"zpages,omitempty"` + HealthCheckExtConfig *HealthCheckExtension `json:"health_check,omitempty" yaml:"health_check,omitempty" :"health_check_ext_config"` + PProf *PProfExtension `json:"pprof,omitempty" yaml:"pprof,omitempty" :"p_prof"` + ZPages *ZPagesExtension `json:"zpages,omitempty" yaml:"zpages,omitempty" :"z_pages"` // Exporters Authentication - BasicAuth *BasicAuthenticationExtension `json:"basicauth/exporter,omitempty" yaml:"basicauth/exporter,omitempty"` + BasicAuth *BasicAuthenticationExtension `json:"basicauth/exporter,omitempty" yaml:"basicauth/exporter,omitempty" :"basic_auth"` + //BearerAuth *BearerAuthExtension `json:"bearerauth/exporter,omitempty" yaml:"bearerauth/exporter,omitempty" :"bearer_auth"` } type HealthCheckExtension struct { @@ -515,6 +515,12 @@ type BasicAuthenticationExtension struct { } `json:"client_auth" yaml:"client_auth"` } +type BearerAuthExtension struct { + BearerAuth *struct { + Token string `json:"token" yaml:"token"` + } `json:"client_auth" yaml:"client_auth"` +} + type Exporters struct { PrometheusRemoteWrite *PrometheusRemoteWriteExporterConfig `json:"prometheusremotewrite,omitempty" yaml:"prometheusremotewrite,omitempty"` LoggingExporter *LoggingExporterConfig `json:"logging,omitempty" yaml:"logging,omitempty"` diff --git a/maestro/config/config_builder_test.go b/maestro/config/config_builder_test.go index 3b13e0585..5af7030e7 100644 --- a/maestro/config/config_builder_test.go +++ b/maestro/config/config_builder_test.go @@ -2,6 +2,7 @@ package config import ( "context" + "fmt" "testing" ) @@ -37,8 +38,9 @@ func TestReturnConfigYamlFromSink(t *testing.T) { t.Errorf("ReturnConfigYamlFromSink() error = %v, wantErr %v", err, tt.wantErr) return } + fmt.Printf("%s\n", got) if got != tt.want { - t.Errorf("ReturnConfigYamlFromSink() got = %v, want %v", got, tt.want) + t.Errorf("ReturnConfigYamlFromSink() got = \n%v\n, want \n%v", got, tt.want) } }) } diff --git a/maestro/config/types.go b/maestro/config/types.go index b24ead999..74df590b4 100644 --- a/maestro/config/types.go +++ b/maestro/config/types.go @@ -11,6 +11,7 @@ type SinkData struct { Url string `json:"remote_host"` User string `json:"username"` Password string `json:"password"` + Token string `json:"token"` OpenTelemetry string `json:"opentelemetry"` State PrometheusState `json:"state,omitempty"` Migrate string `json:"migrate,omitempty"` diff --git a/maestro/redis/consumer/hashset.go b/maestro/redis/consumer/hashset.go index 7e26aa30b..25d04512c 100644 --- a/maestro/redis/consumer/hashset.go +++ b/maestro/redis/consumer/hashset.go @@ -20,6 +20,7 @@ import ( const ( deploymentKey = "orb.sinks.deployment" activityPrefix = "sinker_activity" + streamLen = 1000 ) func (es eventStore) GetDeploymentEntryFromSinkId(ctx context.Context, sinkId string) (string, error) { @@ -61,8 +62,15 @@ func (es eventStore) handleSinksCreateCollector(ctx context.Context, event redis return err } sinkUrl := data.Url - sinkUsername := data.User - sinkPassword := data.Password + var sinkUsername string + var sinkPassword string + if data.User != "" { + sinkUsername = data.User + sinkPassword = data.Password + } else { + sinkPassword = data.Token + } + err2 := es.CreateDeploymentEntry(ctx, event.SinkID, sinkUrl, sinkUsername, sinkPassword) if err2 != nil { return err2 @@ -187,6 +195,8 @@ func (es eventStore) PublishSinkStateChange(sink *sinkspb.SinkRes, status string record := &redis2.XAddArgs{ Stream: streamID, Values: event.Encode(), + MaxLen: streamLen, + Approx: true, } err = es.streamRedisClient.XAdd(context.Background(), record).Err() if err != nil { diff --git a/policies/redis/producer/streams.go b/policies/redis/producer/streams.go index 5358e4822..999c20910 100644 --- a/policies/redis/producer/streams.go +++ b/policies/redis/producer/streams.go @@ -62,9 +62,10 @@ func (e eventStore) RemoveDataset(ctx context.Context, token string, dsID string datasetID: ds.ID, } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), + Stream: streamID, + MaxLen: streamLen, + Approx: true, + Values: event.Encode(), } err = e.client.XAdd(ctx, record).Err() @@ -107,9 +108,10 @@ func (e eventStore) EditDataset(ctx context.Context, token string, ds policies.D } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), + Stream: streamID, + MaxLen: streamLen, + Approx: true, + Values: event.Encode(), } err = e.client.XAdd(ctx, record).Err() if err != nil { @@ -158,9 +160,10 @@ func (e eventStore) RemovePolicy(ctx context.Context, token string, policyID str groupIDs: strings.Join(groupsIDs, ","), } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), + Stream: streamID, + MaxLen: streamLen, + Approx: true, + Values: event.Encode(), } err = e.client.XAdd(ctx, record).Err() if err != nil { @@ -202,9 +205,10 @@ func (e eventStore) EditPolicy(ctx context.Context, token string, pol policies.P groupIDs: strings.Join(groupsIDs, ","), } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), + Stream: streamID, + MaxLen: streamLen, + Approx: true, + Values: event.Encode(), } err = e.client.XAdd(ctx, record).Err() if err != nil { @@ -250,9 +254,10 @@ func (e eventStore) AddDataset(ctx context.Context, token string, d policies.Dat sinkIDs: strings.Join(*ds.SinkIDs, ","), } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), + Stream: streamID, + MaxLen: streamLen, + Approx: true, + Values: event.Encode(), } err = e.client.XAdd(ctx, record).Err() if err != nil { @@ -296,9 +301,10 @@ func (e eventStore) InactivateDatasetByIDInternal(ctx context.Context, ownerID s turnedInvalid: true, } record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), + Stream: streamID, + MaxLen: streamLen, + Approx: true, + Values: event.Encode(), } err = e.client.XAdd(ctx, record).Err() diff --git a/python-test/README.md b/python-test/README.md index 58be9f9bb..4e25c4e8f 100644 --- a/python-test/README.md +++ b/python-test/README.md @@ -73,7 +73,7 @@ Then fill in the correct values: - Default value: `True` - **agent_docker_image**: - Docker image of the orb agent. - - Default value: `ns1labs/orb-agent` + - Default value: `orbcommunity/orb-agent` - **agent_docker_tag**: - Tag of the Orb agent docker image. - Default value: `latest` @@ -91,7 +91,7 @@ Then fill in the correct values: - **enable_otel**: - Bool - Value to be used in variable "ORB_OTEL_ENABLE". Note that `include_otel_env_var` parameter must be `true` if this variable is true. - - Default value: `false` + - Default value: `true` - **use_orb_live_address_pattern**: - Bool - If true, uses orb_address as base to api and mqtt address using orb.live pattern. If false, requires you to add the corresponding addresses. diff --git a/python-test/features/agentsProvider.feature b/python-test/features/agentsProvider.feature index 59742498d..52fa51e02 100644 --- a/python-test/features/agentsProvider.feature +++ b/python-test/features/agentsProvider.feature @@ -145,7 +145,7 @@ Scenario: Forced remove agent container And the agent container is started on an available port And the agent status is online When forced remove the orb-agent container - Then the agent status in Orb should be stale within 360 seconds + Then the agent status in Orb should be stale after 360 seconds @smoke diff --git a/python-test/features/integration.feature b/python-test/features/integration.feature index b271abf38..aa7cbea1c 100644 --- a/python-test/features/integration.feature +++ b/python-test/features/integration.feature @@ -92,7 +92,7 @@ Scenario: Apply two simple policies to an agent And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds -@smoke +@sanity Scenario: apply one policy using multiple datasets to the same group Given the Orb user has a registered account And the Orb user logs in @@ -1120,3 +1120,263 @@ Scenario: Check new policies applied after pktvisor stop running And this agent's heartbeat shows that 10 policies are applied and all has status running When 2 mixed policies are applied to the group Then this agent's heartbeat shows that 12 policies are applied and all has status running + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink name + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + When the name of this sink is updated + Then the name updates to the new value and other fields remains the same + And referred sink must have active state on response after 360 seconds + + + @sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink description + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + When the description of this sink is updated + Then the description updates to the new value and other fields remains the same + And referred sink must have active state on response after 360 seconds + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink tags + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + When the tags of this sink is updated + Then the tags updates to the new value and other fields remains the same + And referred sink must have active state on response after 360 seconds + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink configs + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink with invalid password already exists + And 3 simple policies are applied to the group + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have error state on response within 120 seconds + When the config of this sink is updated + Then the config updates to the new value and other fields remains the same + And referred sink must have active state on response within 360 seconds + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink name and description + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + When the name and description of this sink is updated + Then the name and description updates to the new value and other fields remains the same + And referred sink must have active state on response after 360 seconds + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink name and configs + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink with invalid password already exists + And 3 simple policies are applied to the group + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have error state on response within 120 seconds + When the name and config of this sink is updated + Then the name and config updates to the new value and other fields remains the same + And referred sink must have active state on response within 360 seconds + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink name and tags + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + When the name and tags of this sink is updated + Then the name and tags updates to the new value and other fields remains the same + And referred sink must have active state on response after 360 seconds + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink description and tags + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + When the description and tags of this sink is updated + Then the description and tags updates to the new value and other fields remains the same + And referred sink must have active state on response after 360 seconds + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink description and configs + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink with invalid password already exists + And 3 simple policies are applied to the group + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have error state on response within 360 seconds + When the description and config of this sink is updated + Then the description and config updates to the new value and other fields remains the same + And referred sink must have active state on response within 360 seconds + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink tags and configs + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink with invalid password already exists + And 3 simple policies are applied to the group + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have error state on response within 120 seconds + When the tags and config of this sink is updated + Then the tags and config updates to the new value and other fields remains the same + And referred sink must have active state on response within 360 seconds + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink name, description and tags + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 2 simple policies are applied to the group + And this agent's heartbeat shows that 2 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds + And referred sink must have active state on response within 120 seconds + When the name, description and tags of this sink is updated + Then the name, description and tags updates to the new value and other fields remains the same + And referred sink must have active state on response after 360 seconds + + + +@sanity@sink_partial_update +Scenario: Partial Update: sink status after updating only sink name, description and configs + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink with invalid password already exists + And 3 simple policies are applied to the group + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have error state on response within 120 seconds + When the name, description and config of this sink is updated + Then the name, description and config updates to the new value and other fields remains the same + And referred sink must have active state on response within 360 seconds + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink name, tags and configs + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink with invalid password already exists + And 3 simple policies are applied to the group + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have error state on response within 120 seconds + When the name, tags and config of this sink is updated + Then the name, tags and config updates to the new value and other fields remains the same + And referred sink must have active state on response within 360 seconds + + +@sanity @sink_partial_update +Scenario: Partial Update: sink status after updating only sink description, tags and configs + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink with invalid password already exists + And 3 simple policies are applied to the group + And this agent's heartbeat shows that 3 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have error state on response within 120 seconds + When the description, tags and config of this sink is updated + Then the description, tags and config updates to the new value and other fields remains the same + And referred sink must have active state on response within 360 seconds diff --git a/python-test/features/integration_config_file.feature b/python-test/features/integration_config_file.feature index e9bc88426..96428ce26 100644 --- a/python-test/features/integration_config_file.feature +++ b/python-test/features/integration_config_file.feature @@ -2,7 +2,7 @@ Feature: Integration tests using agent provided via config file -@private +@private @auto_provision Scenario: General smoke test to validate private agent image - using configuration files Given the Orb user has a registered account And the Orb user logs in @@ -23,7 +23,7 @@ Scenario: General smoke test to validate private agent image - using configurati ########### provisioning agents without specify pktvisor configs on backend -@smoke @config_file @pktvisor_configs +@smoke @config_file @pktvisor_configs @auto_provision Scenario: provisioning agent without specify pktvisor binary path and path to config file (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -43,7 +43,7 @@ Scenario: provisioning agent without specify pktvisor binary path and path to co And remove the agent .yaml generated on each scenario -@smoke @config_file @pktvisor_configs +@smoke @config_file @pktvisor_configs @auto_provision Scenario: provisioning agent without specify pktvisor binary path (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -63,7 +63,7 @@ Scenario: provisioning agent without specify pktvisor binary path (config file - And remove the agent .yaml generated on each scenario -@smoke @config_file @pktvisor_configs +@smoke @config_file @pktvisor_configs @auto_provision Scenario: provisioning agent without specify pktvisor path to config file (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -149,7 +149,7 @@ Scenario: provisioning agent without specify pktvisor path to config file (confi ########### tap_selector -@smoke @config_file +@smoke @config_file @auto_provision Scenario: tap_selector - any - matching 0 of all tags from an agent Given the Orb user has a registered account And the Orb user logs in @@ -165,7 +165,7 @@ Scenario: tap_selector - any - matching 0 of all tags from an agent And the policy application error details must show that 422 no tap match found for specified 'input.tap_selector' tags And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: tap_selector - any - matching 1 of all tags from an agent Given the Orb user has a registered account And the Orb user logs in @@ -183,7 +183,7 @@ Scenario: tap_selector - any - matching 1 of all tags from an agent And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: tap_selector - any - matching 1 of all tags (plus 1 random tag) from an agent Given the Orb user has a registered account And the Orb user logs in @@ -201,7 +201,7 @@ Scenario: tap_selector - any - matching 1 of all tags (plus 1 random tag) from a And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: tap_selector - all - matching 0 of all tags from an agent Given the Orb user has a registered account And the Orb user logs in @@ -217,7 +217,7 @@ Scenario: tap_selector - all - matching 0 of all tags from an agent And the policy application error details must show that 422 no tap match found for specified 'input.tap_selector' tags And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: tap_selector - all - matching 1 of all tags from an agent Given the Orb user has a registered account And the Orb user logs in @@ -236,7 +236,7 @@ Scenario: tap_selector - all - matching 1 of all tags from an agent And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: tap_selector - all - matching all tags from an agent Given the Orb user has a registered account And the Orb user logs in @@ -256,7 +256,7 @@ Scenario: tap_selector - all - matching all tags from an agent ########### pcap -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent pcap with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -275,7 +275,7 @@ Scenario: agent pcap with only agent tags subscription to a group with policies And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent pcap with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -295,7 +295,7 @@ Scenario: agent pcap with only agent tags subscription to a group with policies And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent pcap with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -315,7 +315,7 @@ Scenario: agent pcap with mixed tags subscription to a group with policies creat And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent pcap with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -416,7 +416,7 @@ Scenario: agent pcap with mixed tags subscription to a group with policies creat ########### flow -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent flow with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -435,7 +435,7 @@ Scenario: agent flow with only agent tags subscription to a group with policies And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent flow with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -455,7 +455,7 @@ Scenario: agent flow with only agent tags subscription to a group with policies And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent flow with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -475,7 +475,7 @@ Scenario: agent flow with mixed tags subscription to a group with policies creat And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent flow with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -577,7 +577,7 @@ Scenario: agent flow with mixed tags subscription to a group with policies creat ########### dnstap -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent dnstap with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -596,7 +596,7 @@ Scenario: agent dnstap with only agent tags subscription to a group with policie And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent dnstap with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -616,7 +616,7 @@ Scenario: agent dnstap with only agent tags subscription to a group with policie And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent dnstap with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -636,7 +636,7 @@ Scenario: agent dnstap with mixed tags subscription to a group with policies cre And remove the agent .yaml generated on each scenario -@smoke @config_file +@smoke @config_file @auto_provision Scenario: agent dnstap with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -738,7 +738,7 @@ Scenario: agent dnstap with mixed tags subscription to a group with policies cre ########### netprobe -@smoke @config_file @netprobe +@smoke @config_file @netprobe @auto_provision Scenario: agent netprobe with only agent tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -757,7 +757,7 @@ Scenario: agent netprobe with only agent tags subscription to a group with polic And remove the agent .yaml generated on each scenario -@smoke @config_file @netprobe +@smoke @config_file @netprobe @auto_provision Scenario: agent netprobe with only agent tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -777,7 +777,7 @@ Scenario: agent netprobe with only agent tags subscription to a group with polic And remove the agent .yaml generated on each scenario -@smoke @config_file @netprobe +@smoke @config_file @netprobe @auto_provision Scenario: agent netprobe with mixed tags subscription to a group with policies created after provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in @@ -797,7 +797,7 @@ Scenario: agent netprobe with mixed tags subscription to a group with policies c And remove the agent .yaml generated on each scenario -@smoke @config_file @netprobe +@smoke @config_file @netprobe @auto_provision Scenario: agent netprobe with mixed tags subscription to a group with policies created before provision the agent (config file - auto_provision=true) Given the Orb user has a registered account And the Orb user logs in diff --git a/python-test/features/metrics.feature b/python-test/features/metrics.feature index 2fcc5dcbd..56497dd38 100644 --- a/python-test/features/metrics.feature +++ b/python-test/features/metrics.feature @@ -3,7 +3,7 @@ Feature: Integration tests validating metric groups #### netprobe -@sanity @metric_groups @metrics_netprobe +@sanity @metric_groups @metrics_netprobe @auto_provision Scenario: netprobe handler with default metric groups configuration Given the Orb user has a registered account And the Orb user logs in @@ -15,13 +15,13 @@ Scenario: netprobe handler with default metric groups configuration And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_netprobe +@sanity @metric_groups @metrics_netprobe @auto_provision Scenario: netprobe handler with all metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -33,13 +33,13 @@ Scenario: netprobe handler with all metric groups enabled And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_netprobe +@sanity @metric_groups @metrics_netprobe @auto_provision Scenario: netprobe handler with all metric groups disabled Given the Orb user has a registered account And the Orb user logs in @@ -51,13 +51,12 @@ Scenario: netprobe handler with all metric groups disabled And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_netprobe +@sanity @metric_groups @metrics_netprobe @auto_provision Scenario: netprobe handler with only counters metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -69,13 +68,13 @@ Scenario: netprobe handler with only counters metric groups enabled And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_netprobe +@sanity @metric_groups @metrics_netprobe @auto_provision Scenario: netprobe handler with only quantiles metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -87,13 +86,13 @@ Scenario: netprobe handler with only quantiles metric groups enabled And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_netprobe +@sanity @metric_groups @metrics_netprobe @auto_provision Scenario: netprobe handler with only histograms metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -105,13 +104,13 @@ Scenario: netprobe handler with only histograms metric groups enabled And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_netprobe +@sanity @metric_groups @metrics_netprobe @auto_provision Scenario: netprobe handler with counters and histograms metric groups enabled and quantiles disabled Given the Orb user has a registered account And the Orb user logs in @@ -123,13 +122,13 @@ Scenario: netprobe handler with counters and histograms metric groups enabled an And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_netprobe +@sanity @metric_groups @metrics_netprobe @auto_provision Scenario: netprobe handler with counters and quantiles metric groups enabled and histograms disabled Given the Orb user has a registered account And the Orb user logs in @@ -141,13 +140,13 @@ Scenario: netprobe handler with counters and quantiles metric groups enabled and And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_netprobe +@sanity @metric_groups @metrics_netprobe @auto_provision Scenario: netprobe handler with histograms and quantiles metric groups enabled and counters disabled Given the Orb user has a registered account And the Orb user logs in @@ -159,14 +158,14 @@ Scenario: netprobe handler with histograms and quantiles metric groups enabled a And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for netprobe handler And remove the agent .yaml generated on each scenario #### flow netflow -@sanity @metric_groups @metrics_flow @root @mocked_interface +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision Scenario: flow handler type netflow with default metric groups configuration Given the Orb user has a registered account And the Orb user logs in @@ -181,13 +180,13 @@ Scenario: flow handler type netflow with default metric groups configuration And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision Scenario: flow handler type netflow with all metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -202,13 +201,13 @@ Scenario: flow handler type netflow with all metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision Scenario: flow handler type netflow with all metric groups disabled Given the Orb user has a registered account And the Orb user logs in @@ -223,13 +222,12 @@ Scenario: flow handler type netflow with all metric groups disabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision Scenario: flow handler type netflow with only cardinality metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -239,18 +237,18 @@ Scenario: flow handler type netflow with only cardinality metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, cardinality metric_groups enabled, counters, by_packets, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, cardinality metric_groups enabled, top_tos, counters, by_packets, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision Scenario: flow handler type netflow with only counters metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -260,189 +258,760 @@ Scenario: flow handler type netflow with only counters metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, counters metric_groups enabled, cardinality, by_packets, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, counters metric_groups enabled, top_tos, cardinality, by_packets, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only counters and by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, counters, by_bytes metric_groups enabled, top_tos, cardinality, by_packets, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only counters and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, counters, by_packets metric_groups enabled, top_tos, cardinality, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision Scenario: flow handler type netflow with only by_packets metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 - And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" - When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, by_packets metric_groups enabled, top_tos, counters, cardinality, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, by_bytes metric_groups enabled, top_tos, counters, by_packets, cardinality, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_geo metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_geo metric_groups enabled, top_tos, counters, by_packets, by_bytes, cardinality, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_geo and by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_geo, by_bytes metric_groups enabled, top_tos, counters, by_packets, cardinality, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_geo and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_geo, by_packets metric_groups enabled, top_tos, counters, by_bytes, cardinality, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only conversations metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, conversations metric_groups enabled, top_tos, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only conversations and cardinality metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, conversations, cardinality metric_groups enabled, top_tos, counters, by_packets, by_bytes, top_geo, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only conversations and by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, conversations, by_bytes metric_groups enabled, top_tos, counters, by_packets, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only conversations and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, conversations, by_packets metric_groups enabled, top_tos, counters, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_ports metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ports metric_groups enabled, top_tos, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_ports and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ports, by_packets metric_groups enabled, top_tos, conversations, counters, by_bytes, top_geo, cardinality, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_ports and by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ports, by_bytes metric_groups enabled, top_tos, conversations, counters, by_packets, top_geo, cardinality, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_ips_ports metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips_ports metric_groups enabled, top_tos, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + + @sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_ips_ports and by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips_ports, by_bytes metric_groups enabled, top_tos, conversations, counters, by_packets, top_geo, cardinality, top_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_ips_ports and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips_ports, by_packets metric_groups enabled, top_tos, conversations, counters, by_bytes, top_geo, cardinality, top_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_interfaces metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_interfaces metric_groups enabled, top_tos, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_interfaces and by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_interfaces, by_bytes metric_groups enabled, top_tos, conversations, counters, by_packets, top_geo, cardinality, top_ports, top_ips_ports, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_interfaces and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_interfaces, by_packets metric_groups enabled, top_tos, conversations, counters, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_ips metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips metric_groups enabled, top_tos, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_ips and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips, by_packets metric_groups enabled, top_tos, conversations, counters, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_ips and by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips, by_bytes metric_groups enabled, top_tos, conversations, counters, by_packets, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_tos metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_tos metric_groups enabled, top_ips, by_bytes, conversations, counters, by_packets, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_tos and by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_tos, by_bytes metric_groups enabled, top_ips, conversations, counters, by_packets, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type netflow with only top_tos and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_tos, by_packets metric_groups enabled, top_ips, by_bytes, conversations, counters, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +#### flow sflow + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with default metric groups configuration + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, default metric_groups enabled, default metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with all metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, by_packets metric_groups enabled, counters, cardinality, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, all metric_groups enabled, none metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type netflow with only by_bytes metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with all metric groups disabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 - And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" - When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, by_bytes metric_groups enabled, counters, by_packets, cardinality, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, none metric_groups enabled, all metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type netflow with only top_geo metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only cardinality metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 - And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" - When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_geo metric_groups enabled, counters, by_packets, by_bytes, cardinality, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, cardinality metric_groups enabled, top_tos, counters, by_packets, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type netflow with only conversations metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only counters metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 - And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" - When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, conversations metric_groups enabled, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, counters metric_groups enabled, top_tos, cardinality, by_packets, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type netflow with only top_ports metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only counters and by_bytes metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 - And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" - When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ports metric_groups enabled, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, counters, by_bytes metric_groups enabled, top_tos, cardinality, by_packets, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type netflow with only top_ips_ports metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only counters and by_packets metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 - And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" - When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips_ports metric_groups enabled, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, counters, by_packets metric_groups enabled, top_tos, cardinality, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type netflow with only top_interfaces metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only by_packets metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 - And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" - When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_interfaces metric_groups enabled, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, by_packets metric_groups enabled, top_tos, counters, cardinality, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type netflow with only top_ips metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only by_bytes metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 - And a virtual switch is configured and is up with type netflow and target \"192.168.100.2:available\" - When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"netflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips metric_groups enabled, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, by_bytes metric_groups enabled, top_tos, counters, by_packets, cardinality, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -#### flow sflow +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_geo metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_geo metric_groups enabled, top_tos, counters, by_packets, by_bytes, cardinality, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type sflow with default metric groups configuration +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_geo and by_bytes metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists @@ -451,19 +1020,19 @@ Scenario: flow handler type sflow with default metric groups configuration When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, default metric_groups enabled, default metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_geo, by_bytes metric_groups enabled, top_tos, counters, by_packets, cardinality, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type sflow with all metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_geo and by_packets metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists @@ -472,19 +1041,19 @@ Scenario: flow handler type sflow with all metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, all metric_groups enabled, none metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_geo, by_packets metric_groups enabled, top_tos, counters, by_bytes, cardinality, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type sflow with all metric groups disabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only conversations metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists @@ -493,19 +1062,18 @@ Scenario: flow handler type sflow with all metric groups disabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, none metric_groups enabled, all metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, conversations metric_groups enabled, top_tos, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type sflow with only cardinality metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only conversations and cardinality metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists @@ -514,19 +1082,18 @@ Scenario: flow handler type sflow with only cardinality metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, cardinality metric_groups enabled, counters, by_packets, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, conversations, cardinality metric_groups enabled, top_tos, counters, by_packets, by_bytes, top_geo, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type sflow with only counters metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only conversations and by_bytes metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists @@ -535,19 +1102,19 @@ Scenario: flow handler type sflow with only counters metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, counters metric_groups enabled, cardinality, by_packets, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, conversations, by_bytes metric_groups enabled, top_tos, counters, by_packets, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type sflow with only by_packets metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only conversations and by_packets metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists @@ -556,19 +1123,19 @@ Scenario: flow handler type sflow with only by_packets metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, by_packets metric_groups enabled, counters, cardinality, by_bytes, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, conversations, by_packets metric_groups enabled, top_tos, counters, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type sflow with only by_bytes metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_ports metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists @@ -577,19 +1144,17 @@ Scenario: flow handler type sflow with only by_bytes metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, by_bytes metric_groups enabled, counters, by_packets, cardinality, top_geo, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ports metric_groups enabled, top_tos, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario - -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type sflow with only top_geo metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_ports and by_bytes metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists @@ -598,19 +1163,19 @@ Scenario: flow handler type sflow with only top_geo metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_geo metric_groups enabled, counters, by_packets, by_bytes, cardinality, conversations, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ports, by_bytes metric_groups enabled, top_tos, conversations, counters, by_packets, top_geo, cardinality, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type sflow with only conversations metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_ports and by_packets metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists @@ -619,19 +1184,19 @@ Scenario: flow handler type sflow with only conversations metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, conversations metric_groups enabled, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ports, by_packets metric_groups enabled, top_tos, conversations, counters, by_bytes, top_geo, cardinality, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type sflow with only top_ports metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_ips_ports metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists @@ -640,19 +1205,18 @@ Scenario: flow handler type sflow with only top_ports metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ports metric_groups enabled, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ips_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips_ports metric_groups enabled, top_tos, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface -Scenario: flow handler type sflow with only top_ips_ports metric groups enabled +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_ips_ports and by_bytes metric groups enabled Given the Orb user has a registered account And the Orb user logs in And that a sink already exists @@ -661,18 +1225,38 @@ Scenario: flow handler type sflow with only top_ips_ports metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips_ports metric_groups enabled, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips_ports, by_bytes metric_groups enabled, top_tos, conversations, counters, by_packets, top_geo, cardinality, top_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_ips_ports and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips_ports, by_packets metric_groups enabled, top_tos, conversations, counters, by_bytes, top_geo, cardinality, top_ports, top_interfaces, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision Scenario: flow handler type sflow with only top_interfaces metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -682,18 +1266,58 @@ Scenario: flow handler type sflow with only top_interfaces metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_interfaces metric_groups enabled, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_ips metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_interfaces metric_groups enabled, top_tos, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_interfaces and by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_interfaces, by_bytes metric_groups enabled, top_tos, conversations, counters, by_packets, top_geo, cardinality, top_ports, top_ips_ports, top_ips metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_interfaces and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_interfaces, by_packets metric_groups enabled, top_tos, conversations, counters, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_ips metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_flow @root @mocked_interface +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision Scenario: flow handler type sflow with only top_ips metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -703,20 +1327,122 @@ Scenario: flow handler type sflow with only top_ips metric groups enabled When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips metric_groups enabled, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips metric_groups enabled, top_tos, conversations, counters, by_packets, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_ips and by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips, by_bytes metric_groups enabled, top_tos, conversations, counters, by_packets, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_ips and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_ips, by_packets metric_groups enabled, top_tos, conversations, counters, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_tos metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_tos metric_groups enabled, top_ips, by_packets, conversations, counters, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_tos and by_bytes metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_tos, by_bytes metric_groups enabled, top_ips, by_packets, conversations, counters, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for flow handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_flow @root @mocked_interface @auto_provision +Scenario: flow handler type sflow with only top_tos and by_packets metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: 192.168.100.2/24 + And a virtual switch is configured and is up with type sflow and target \"192.168.100.2:available\" + When an agent(input_type:flow, settings: {"bind":"192.168.100.2", "port":"switch", "flow_type":"sflow"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a flow policy flow with tap_selector matching all tag(s) of the tap from an agent, top_tos, by_packets metric_groups enabled, top_ips, conversations, counters, by_bytes, top_geo, cardinality, top_ports, top_ips_ports, top_interfaces metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual switch - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for flow handler And remove the agent .yaml generated on each scenario #### pcap -@sanity @metric_groups @metrics_pcap @root @mocked_interface +@sanity @metric_groups @metrics_pcap @root @mocked_interface @auto_provision Scenario: pcap handler with default metric groups configuration Given the Orb user has a registered account And the Orb user logs in @@ -730,13 +1456,13 @@ Scenario: pcap handler with default metric groups configuration And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for pcap handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_pcap @root @mocked_interface +@sanity @metric_groups @metrics_pcap @root @mocked_interface @auto_provision Scenario: pcap handler with all metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -750,13 +1476,13 @@ Scenario: pcap handler with all metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for pcap handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_pcap @root @mocked_interface +@sanity @metric_groups @metrics_pcap @root @mocked_interface @auto_provision Scenario: pcap handler with all metric groups disabled Given the Orb user has a registered account And the Orb user logs in @@ -770,15 +1496,15 @@ Scenario: pcap handler with all metric groups disabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for pcap handler And remove the agent .yaml generated on each scenario #### bgp -@sanity @metric_groups @metrics_bgp @root @mocked_interface +@sanity @metric_groups @metrics_bgp @root @mocked_interface @auto_provision Scenario: bgp handler with default metric groups configuration Given the Orb user has a registered account And the Orb user logs in @@ -792,13 +1518,13 @@ Scenario: bgp handler with default metric groups configuration And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data bgp.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for bgp handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_bgp @root @mocked_interface +@sanity @metric_groups @metrics_bgp @root @mocked_interface @auto_provision Scenario: bgp handler with all metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -812,13 +1538,13 @@ Scenario: bgp handler with all metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data bgp.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for bgp handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_bgp @root @mocked_interface +@sanity @metric_groups @metrics_bgp @root @mocked_interface @auto_provision Scenario: bgp handler with all metric groups disabled Given the Orb user has a registered account And the Orb user logs in @@ -832,15 +1558,15 @@ Scenario: bgp handler with all metric groups disabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data bgp.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for bgp handler And remove the agent .yaml generated on each scenario #### dhcp -@sanity @metric_groups @metrics_dhcp @root @mocked_interface +@sanity @metric_groups @metrics_dhcp @root @mocked_interface @auto_provision Scenario: dhcp handler with default metric groups configuration Given the Orb user has a registered account And the Orb user logs in @@ -854,13 +1580,13 @@ Scenario: dhcp handler with default metric groups configuration And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dhcp handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dhcp @root @mocked_interface +@sanity @metric_groups @metrics_dhcp @root @mocked_interface @auto_provision Scenario: dhcp handler with all metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -874,13 +1600,13 @@ Scenario: dhcp handler with all metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dhcp handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dhcp @root @mocked_interface +@sanity @metric_groups @metrics_dhcp @root @mocked_interface @auto_provision Scenario: dhcp handler with all metric groups disabled Given the Orb user has a registered account And the Orb user logs in @@ -894,15 +1620,15 @@ Scenario: dhcp handler with all metric groups disabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dhcpv6.pcap, dhcp-flow.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dhcp handler And remove the agent .yaml generated on each scenario #### net v1.0 -@sanity @metric_groups @metrics_net @root @mocked_interface +@sanity @metric_groups @metrics_net @root @mocked_interface @auto_provision Scenario: net handler with default metric groups configuration Given the Orb user has a registered account And the Orb user logs in @@ -916,13 +1642,13 @@ Scenario: net handler with default metric groups configuration And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_net @root @mocked_interface +@sanity @metric_groups @metrics_net @root @mocked_interface @auto_provision Scenario: net handler with all metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -936,13 +1662,13 @@ Scenario: net handler with all metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_net @root @mocked_interface +@sanity @metric_groups @metrics_net @root @mocked_interface @auto_provision Scenario: net handler with all metric groups disabled Given the Orb user has a registered account And the Orb user logs in @@ -956,13 +1682,12 @@ Scenario: net handler with all metric groups disabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_net @root @mocked_interface +@sanity @metric_groups @metrics_net @root @mocked_interface @auto_provision Scenario: net handler with only cardinality metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -976,13 +1701,13 @@ Scenario: net handler with only cardinality metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_net @root @mocked_interface +@sanity @metric_groups @metrics_net @root @mocked_interface @auto_provision Scenario: net handler with only counters metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -996,13 +1721,13 @@ Scenario: net handler with only counters metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_net @root @mocked_interface +@sanity @metric_groups @metrics_net @root @mocked_interface @auto_provision Scenario: net handler with only top_geo metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -1016,13 +1741,13 @@ Scenario: net handler with only top_geo metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_net @root @mocked_interface +@sanity @metric_groups @metrics_net @root @mocked_interface @auto_provision Scenario: net handler with only top_ips metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -1036,15 +1761,15 @@ Scenario: net handler with only top_ips metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net handler And remove the agent .yaml generated on each scenario #### dns v1.0 -@sanity @metric_groups @metrics_dns @root @mocked_interface +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision Scenario: dns handler with default metric groups configuration Given the Orb user has a registered account And the Orb user logs in @@ -1058,13 +1783,13 @@ Scenario: dns handler with default metric groups configuration And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns @root @mocked_interface +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision Scenario: dns handler with all metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -1078,13 +1803,13 @@ Scenario: dns handler with all metric groups enabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns @root @mocked_interface +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision Scenario: dns handler with all metric groups disabled Given the Orb user has a registered account And the Orb user logs in @@ -1098,13 +1823,12 @@ Scenario: dns handler with all metric groups disabled And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns @root @mocked_interface +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision Scenario: dns handler with only top_ecs metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -1113,18 +1837,18 @@ Scenario: dns handler with only top_ecs metric groups enabled When an agent(input_type:pcap, settings: {"iface":"mocked", "host_spec":"fe80::a00:27ff:fed4:10bb/48,192.168.0.0/24,75.75.75.75/32"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_ecs metric_groups enabled, top_qnames_details, cardinality, counters, dns_transaction, top_qnames, top_ports metric_groups disabled and settings: default is applied to the group + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_ecs metric_groups enabled, quantiles, top_qnames_details, cardinality, counters, dns_transaction, top_qnames, top_ports metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns @root @mocked_interface +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision Scenario: dns handler with only top_qnames_details metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -1133,18 +1857,18 @@ Scenario: dns handler with only top_qnames_details metric groups enabled When an agent(input_type:pcap, settings: {"iface":"mocked", "host_spec":"fe80::a00:27ff:fed4:10bb/48,192.168.0.0/24,75.75.75.75/32"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_qnames_details metric_groups enabled, top_ecs, cardinality, counters, dns_transaction, top_qnames, top_ports metric_groups disabled and settings: default is applied to the group + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_qnames_details metric_groups enabled, quantiles, top_ecs, cardinality, counters, dns_transaction, top_qnames, top_ports metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns @root @mocked_interface +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision Scenario: dns handler with only cardinality metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -1153,18 +1877,18 @@ Scenario: dns handler with only cardinality metric groups enabled When an agent(input_type:pcap, settings: {"iface":"mocked", "host_spec":"fe80::a00:27ff:fed4:10bb/48,192.168.0.0/24,75.75.75.75/32"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, cardinality metric_groups enabled, top_qnames_details, top_ecs, counters, dns_transaction, top_qnames, top_ports metric_groups disabled and settings: default is applied to the group + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, cardinality metric_groups enabled, quantiles, top_qnames_details, top_ecs, counters, dns_transaction, top_qnames, top_ports metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns @root @mocked_interface +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision Scenario: dns handler with only counters metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -1173,18 +1897,18 @@ Scenario: dns handler with only counters metric groups enabled When an agent(input_type:pcap, settings: {"iface":"mocked", "host_spec":"fe80::a00:27ff:fed4:10bb/48,192.168.0.0/24,75.75.75.75/32"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, counters metric_groups enabled, top_qnames_details, cardinality, top_ecs, dns_transaction, top_qnames, top_ports metric_groups disabled and settings: default is applied to the group + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, counters metric_groups enabled, quantiles, top_qnames_details, cardinality, top_ecs, dns_transaction, top_qnames, top_ports metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns @root @mocked_interface +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision Scenario: dns handler with only dns_transaction metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -1193,18 +1917,18 @@ Scenario: dns handler with only dns_transaction metric groups enabled When an agent(input_type:pcap, settings: {"iface":"mocked", "host_spec":"fe80::a00:27ff:fed4:10bb/48,192.168.0.0/24,75.75.75.75/32"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, dns_transaction metric_groups enabled, top_qnames_details, cardinality, counters, top_ecs, top_qnames, top_ports metric_groups disabled and settings: default is applied to the group + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, dns_transaction metric_groups enabled, quantiles, top_qnames_details, cardinality, counters, top_ecs, top_qnames, top_ports metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns @root @mocked_interface +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision Scenario: dns handler with only top_qnames metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -1213,18 +1937,18 @@ Scenario: dns handler with only top_qnames metric groups enabled When an agent(input_type:pcap, settings: {"iface":"mocked", "host_spec":"fe80::a00:27ff:fed4:10bb/48,192.168.0.0/24,75.75.75.75/32"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_qnames metric_groups enabled, top_qnames_details, cardinality, counters, dns_transaction, top_ecs, top_ports metric_groups disabled and settings: default is applied to the group + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_qnames metric_groups enabled, quantiles, top_qnames_details, cardinality, counters, dns_transaction, top_ecs, top_ports metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns @root @mocked_interface +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision Scenario: dns handler with only top_ports metric groups enabled Given the Orb user has a registered account And the Orb user logs in @@ -1233,20 +1957,78 @@ Scenario: dns handler with only top_ports metric groups enabled When an agent(input_type:pcap, settings: {"iface":"mocked", "host_spec":"fe80::a00:27ff:fed4:10bb/48,192.168.0.0/24,75.75.75.75/32"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent - And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_ports metric_groups enabled, top_qnames_details, cardinality, counters, dns_transaction, top_qnames, top_ecs metric_groups disabled and settings: default is applied to the group + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, top_ports metric_groups enabled, quantiles, top_qnames_details, cardinality, counters, dns_transaction, top_qnames, top_ecs metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for dns handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision +Scenario: dns handler with only histograms metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked", "host_spec":"fe80::a00:27ff:fed4:10bb/48,192.168.0.0/24,75.75.75.75/32"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, histograms metric_groups enabled, quantiles, top_ports, top_qnames_details, cardinality, counters, dns_transaction, top_qnames, top_ecs metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + Then metrics must be correctly generated for dns handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision +Scenario: dns handler with only histograms and dns_transaction metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked", "host_spec":"fe80::a00:27ff:fed4:10bb/48,192.168.0.0/24,75.75.75.75/32"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, histograms, dns_transaction metric_groups enabled, quantiles, top_ports, top_qnames_details, cardinality, counters, dns_transaction, top_qnames, top_ecs metric_groups disabled and settings: default is applied to the group + And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + And this agent's heartbeat shows that 1 groups are matching the agent + And this agent's heartbeat shows that 1 policies are applied and all has status running + And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds + Then metrics must be correctly generated for dns handler + And remove the agent .yaml generated on each scenario + + +@sanity @metric_groups @metrics_dns @root @mocked_interface @auto_provision +Scenario: dns handler with only quantiles metric groups enabled + Given the Orb user has a registered account + And the Orb user logs in + And that a sink already exists + And a mocked interface is configured with mtu: 65000 and ip: None + When an agent(input_type:pcap, settings: {"iface":"mocked", "host_spec":"fe80::a00:27ff:fed4:10bb/48,192.168.0.0/24,75.75.75.75/32"}) is self-provisioned via a configuration file on port available with 1 agent tags and has status online. [Overwrite default: False. Paste only file: True] + And pktvisor state is running + And 1 Agent Group(s) is created with all tags contained in the agent + And a dns policy pcap with tap_selector matching all tag(s) of the tap from an agent, quantiles metric_groups enabled, histograms, top_ports, top_qnames_details, cardinality, counters, dns_transaction, top_qnames, top_ecs metric_groups disabled and settings: default is applied to the group And 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for dns handler And remove the agent .yaml generated on each scenario #### dns v2.0 -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with default metric groups configuration (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1260,13 +2042,13 @@ Scenario: dns handler with default metric groups configuration (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with all metric groups enabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1280,13 +2062,13 @@ Scenario: dns handler with all metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with all metric groups disabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1300,13 +2082,12 @@ Scenario: dns handler with all metric groups disabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with only top_ecs metric groups enabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1320,13 +2101,13 @@ Scenario: dns handler with only top_ecs metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with only top_ports metric groups enabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1340,13 +2121,13 @@ Scenario: dns handler with only top_ports metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with only top_size metric groups enabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1360,13 +2141,13 @@ Scenario: dns handler with only top_size metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with only xact_times metric groups enabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1380,13 +2161,13 @@ Scenario: dns handler with only xact_times metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with only cardinality metric groups enabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1400,13 +2181,13 @@ Scenario: dns handler with only cardinality metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with only counters metric groups enabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1420,13 +2201,13 @@ Scenario: dns handler with only counters metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with only top_qnames metric groups enabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1440,13 +2221,13 @@ Scenario: dns handler with only top_qnames metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with only quantiles metric groups enabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1460,13 +2241,13 @@ Scenario: dns handler with only quantiles metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with only top_qtypes metric groups enabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1480,13 +2261,13 @@ Scenario: dns handler with only top_qtypes metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario -@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface +@sanity @metric_groups @metrics_dns_v2 @root @mocked_interface @auto_provision Scenario: dns handler with only top_rcodes metric groups enabled (v2) Given the Orb user has a registered account And the Orb user logs in @@ -1500,8 +2281,8 @@ Scenario: dns handler with only top_rcodes metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for dns-v2 handler And remove the agent .yaml generated on each scenario @@ -1522,8 +2303,8 @@ Scenario: net handler with default metric groups configuration (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net-v2 handler And remove the agent .yaml generated on each scenario @@ -1542,8 +2323,8 @@ Scenario: net handler with all metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net-v2 handler And remove the agent .yaml generated on each scenario @@ -1562,8 +2343,7 @@ Scenario: net handler with all metric groups disabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds Then metrics must be correctly generated for net-v2 handler And remove the agent .yaml generated on each scenario @@ -1582,8 +2362,8 @@ Scenario: net handler with only cardinality metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net-v2 handler And remove the agent .yaml generated on each scenario @@ -1602,8 +2382,8 @@ Scenario: net handler with only counters metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net-v2 handler And remove the agent .yaml generated on each scenario @@ -1622,8 +2402,8 @@ Scenario: net handler with only top_geo metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net-v2 handler And remove the agent .yaml generated on each scenario @@ -1642,8 +2422,8 @@ Scenario: net handler with only top_ips metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net-v2 handler And remove the agent .yaml generated on each scenario @@ -1662,7 +2442,7 @@ Scenario: net handler with only quantiles metric groups enabled (v2) And this agent's heartbeat shows that 1 groups are matching the agent And this agent's heartbeat shows that 1 policies are applied and all has status running And run mocked data dns_ipv4_tcp.pcap, dns_ipv4_udp.pcap, dns_ipv6_tcp.pcap, dns_ipv6_udp.pcap, dns_udp_mixed_rcode.pcap, dns_udp_tcp_random.pcap, ecs.pcap, ipfix.pcap, ecmp.pcap, ipfix.pcap, nf9.pcap, dnssec.pcap, dhcpv6.pcap on the created virtual interface - And the container logs that were output after all policies have been applied contain the message "scraped metrics for policy" referred to each applied policy within 180 seconds - And referred sink must have active state on response within 120 seconds + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And referred sink must have active state on response within 240 seconds Then metrics must be correctly generated for net-v2 handler And remove the agent .yaml generated on each scenario \ No newline at end of file diff --git a/python-test/features/migration.feature b/python-test/features/migration.feature index 1b414dec6..0c4d3c085 100644 --- a/python-test/features/migration.feature +++ b/python-test/features/migration.feature @@ -17,7 +17,7 @@ Scenario: Agent legacy + sink legacy -> sink OTEL And referred sink must have active state on response within 120 seconds And 2 dataset(s) have validity valid and 0 have validity invalid in 30 seconds When the sink is updated and OTEL is enabled - And referred sink must have active state on response after 10 seconds + And referred sink must have active state on response after 120 seconds @pre-migration diff --git a/python-test/features/sinks.feature b/python-test/features/sinks.feature index f95533720..e3ff8309a 100644 --- a/python-test/features/sinks.feature +++ b/python-test/features/sinks.feature @@ -29,3 +29,143 @@ Scenario: Edit sink using an already existent name (conflict) And a new sink is created When the name of last Sink is edited using an already existent one Then the error message on response is entity already exists + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink name + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the name of this sink is updated + Then the name updates to the new value and other fields remains the same + + + @sanity @sink_partial_update +Scenario: Partial Update: updating only sink description + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the description of this sink is updated + Then the description updates to the new value and other fields remains the same + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink tags + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the tags of this sink is updated + Then the tags updates to the new value and other fields remains the same + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink configs + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the config of this sink is updated + Then the config updates to the new value and other fields remains the same + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink name and description + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the name and description of this sink is updated + Then the name and description updates to the new value and other fields remains the same + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink name and configs + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the name and config of this sink is updated + Then the name and config updates to the new value and other fields remains the same + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink name and tags + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the name and tags of this sink is updated + Then the name and tags updates to the new value and other fields remains the same + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink description and tags + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the description and tags of this sink is updated + Then the description and tags updates to the new value and other fields remains the same + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink description and configs + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the description and config of this sink is updated + Then the description and config updates to the new value and other fields remains the same + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink tags and configs + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the tags and config of this sink is updated + Then the tags and config updates to the new value and other fields remains the same + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink name, description and tags + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the name, description and tags of this sink is updated + Then the name, description and tags updates to the new value and other fields remains the same + + +@sanity@sink_partial_update +Scenario: Partial Update: updating only sink name, description and configs + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the name, description and config of this sink is updated + Then the name, description and config updates to the new value and other fields remains the same + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink name, tags and configs + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the name, tags and config of this sink is updated + Then the name, tags and config updates to the new value and other fields remains the same + + +@sanity @sink_partial_update +Scenario: Partial Update: updating only sink description, tags and configs + Given that the user has the prometheus/grafana credentials + And the Orb user has a registered account + And the Orb user logs in + And a new sink is created + When the description, tags and config of this sink is updated + Then the description, tags and config updates to the new value and other fields remains the same diff --git a/python-test/features/steps/agent_config_file.py b/python-test/features/steps/agent_config_file.py index 7ceb84062..8ffc1d77e 100644 --- a/python-test/features/steps/agent_config_file.py +++ b/python-test/features/steps/agent_config_file.py @@ -14,7 +14,7 @@ def __init__(self): def config_file_of_orb_agent(cls, name, token, iface, orb_url, base_orb_mqtt, tap_name, tls_verify=True, auto_provision=True, orb_cloud_mqtt_id=None, orb_cloud_mqtt_key=None, orb_cloud_mqtt_channel_id=None, input_type="pcap", input_tags='3', settings=None, - include_otel_env_var=False, enable_otel=False, overwrite_default=False): + include_otel_env_var=False, enable_otel=True, overwrite_default=False): if isinstance(include_otel_env_var, str): assert_that(include_otel_env_var.lower(), any_of("true", "false"), "Unexpected value for " "'include_otel_env_var'.") diff --git a/python-test/features/steps/control_plane_agent_groups.py b/python-test/features/steps/control_plane_agent_groups.py index 7d2eb70db..d5489c781 100644 --- a/python-test/features/steps/control_plane_agent_groups.py +++ b/python-test/features/steps/control_plane_agent_groups.py @@ -49,7 +49,7 @@ def create_agent_group_matching_agent(context, amount_of_agent_groups, amount_of @step("{amount_of_agent_groups} Agent Group(s) is created with {orb_tags} orb tag(s) (lower case)") -# this step is temporary because of issue https://github.com/ns1labs/orb/issues/1053 +# this step is temporary because of issue https://github.com/orb-community/orb/issues/1053 def create_group_with_tags_lower_case(context, amount_of_agent_groups, orb_tags): create_new_agent_group(context, amount_of_agent_groups, orb_tags, tags_lower_case=True) @@ -325,7 +325,7 @@ def create_agent_group(token, name, description, tags, expected_status_code=201) try: response_json = response.json() except ValueError: - response_json = ValueError + response_json = response.text assert_that(response.status_code, equal_to(expected_status_code), f"Request to create agent group failed with status= {str(response.status_code)}. Response=" f" {str(response_json)}. Json used: {json_request}") @@ -469,15 +469,15 @@ def edit_agent_group(token, agent_group_id, name, description, tags, expected_st if tags == {} or name == {}: expected_status_code = 400 - assert_that(group_edited_response.status_code, equal_to(expected_status_code), - 'Request to edit agent group failed with status=' + "status code =" + - str(group_edited_response.status_code) + "response =" + str(group_edited_response.json()) + - " json used: " + str(json_request)) - try: response_json = group_edited_response.json() except ValueError: - response_json = ValueError + response_json = group_edited_response.text + + assert_that(group_edited_response.status_code, equal_to(expected_status_code), + 'Request to edit agent group failed with status=' + "status code =" + + str(group_edited_response.status_code) + "response =" + str(response_json) + + " json used: " + str(json_request)) return response_json, group_edited_response.status_code diff --git a/python-test/features/steps/control_plane_agents.py b/python-test/features/steps/control_plane_agents.py index 54e0abd5c..37e44064f 100644 --- a/python-test/features/steps/control_plane_agents.py +++ b/python-test/features/steps/control_plane_agents.py @@ -92,7 +92,7 @@ def agent_is_created_matching_group(context, amount_of_group): @then('the agent status in Orb should be {status} within {seconds} seconds') -def check_agent_online(context, status, seconds): +def check_agent_status_within_seconds(context, status, seconds): timeout = int(seconds) token = context.token agent_status, context.agent = wait_until_expected_agent_status(token, context.agent['id'], status, timeout=timeout) @@ -102,6 +102,23 @@ def check_agent_online(context, status, seconds): f"\n Agent: {context.agent}. \nAgent logs: {logs}") +@then('the agent status in Orb should be {status} after {seconds} seconds') +def check_agent_status_after_seconds(context, status, seconds): + event = threading.Event() + event.wait(int(seconds)) + event.set() + if event.is_set() is True: + token = context.token + agent_status, context.agent = wait_until_expected_agent_status(token, context.agent['id'], status, timeout=120) + try: + logs = get_orb_agent_logs(context.container_id) + except Exception as e: + logs = e + assert_that(agent_status, is_(equal_to(status)), + f"Agent did not get '{status}' after {str(seconds)} seconds, but was '{agent_status}'." + f"\n Agent: {context.agent}. \nAgent logs: {logs}") + + @step('the agent status is {status}') def check_agent_status(context, status): timeout = 30 @@ -390,7 +407,8 @@ def provision_agent_using_config_file(context, input_type, settings, provision, if "config_file" in kwargs['pkt_config'].keys(): pkt_configs["config_file"] = kwargs['pkt_config']["config_file"] context.agent_file_name, tags_on_agent, context.tap, safe_config_file = \ - create_agent_config_file(context.token, agent_name, interface, agent_tags, orb_url, base_orb_address, context.port, + create_agent_config_file(context.token, agent_name, interface, agent_tags, orb_url, base_orb_address, + context.port, context.agent_groups, tap_name, input_type, input_tags, auto_provision, orb_cloud_mqtt_id, orb_cloud_mqtt_key, orb_cloud_mqtt_channel_id, settings, overwrite_default, paste_only_file, pkt_configs['binary'], pkt_configs['config_file']) @@ -669,8 +687,12 @@ def list_up_to_limit_agents(token, limit=100, offset=0): response = requests.get(orb_url + '/api/v1/agents', headers={'Authorization': f'Bearer {token}'}, params={"limit": limit, "offset": offset}, verify=verify_ssl_bool) + try: + response_json = response.json() + except ValueError: + response_json = response.text assert_that(response.status_code, equal_to(200), - f"Request to list agents failed with status= {str(response.status_code)}:{str(response.json())}") + f"Request to list agents failed with status= {str(response.status_code)}:{str(response_json)}") agents_as_json = response.json() return agents_as_json['agents'], agents_as_json['total'], agents_as_json['offset'] @@ -713,7 +735,7 @@ def wait_until_agent_being_created(token, name, tags, expected_status_code=201, try: response_json = response.json() except ValueError: - response_json = ValueError + response_json = response.text if response.status_code == expected_status_code: event.set() return response, response_json @@ -752,10 +774,14 @@ def edit_agent(token, agent_id, name, tags, expected_status_code=200): 'Authorization': f'Bearer {token}'} response = requests.put(orb_url + '/api/v1/agents/' + agent_id, json=json_request, headers=headers_request, verify=verify_ssl_bool) + try: + response_json = response.json() + except ValueError: + response_json = response.text assert_that(response.status_code, equal_to(expected_status_code), - 'Request to edit agent failed with status=' + str(response.status_code) + ":" + str(response.json())) + 'Request to edit agent failed with status=' + str(response.status_code) + ":" + str(response_json)) - return response.json() + return response_json @threading_wait_until diff --git a/python-test/features/steps/control_plane_datasets.py b/python-test/features/steps/control_plane_datasets.py index 461c43254..4e650dec5 100644 --- a/python-test/features/steps/control_plane_datasets.py +++ b/python-test/features/steps/control_plane_datasets.py @@ -183,7 +183,7 @@ def create_dataset(token, name_label, policy_id, agent_group_id, sink_id, expect try: response_json = response.json() except ValueError: - response_json = ValueError + response_json = response.text assert_that(response.status_code, equal_to(expected_status_code), 'Request to create dataset failed with status=' + str(response.status_code) + ': ' + str(response_json)) @@ -210,10 +210,14 @@ def edit_dataset(token, dataset_id, name_label, policy_id, agent_group_id, sink_ response = requests.put(f"{orb_url}/api/v1/policies/dataset/{dataset_id}", json=json_request, headers=header_request, verify=verify_ssl_bool) + try: + response_json = response.json() + except ValueError: + response_json = response.text assert_that(response.status_code, equal_to(expected_status_code), - 'Request to edit dataset failed with status=' + str(response.status_code) + ': ' + str(response.json())) + 'Request to edit dataset failed with status=' + str(response.status_code) + ': ' + str(response_json)) - return response.json() + return response_json @then('cleanup datasets') @@ -262,9 +266,13 @@ def list_up_to_limit_datasets(token, limit=100, offset=0): response = requests.get(orb_url + '/api/v1/policies/dataset', headers={'Authorization': f'Bearer {token}'}, params={"limit": limit, "offset": offset}, verify=verify_ssl_bool) + try: + response_json = response.json() + except ValueError: + response_json = response.text assert_that(response.status_code, equal_to(200), - 'Request to list datasets failed with status=' + str(response.status_code) + ':' + str(response.json())) + 'Request to list datasets failed with status=' + str(response.status_code) + ':' + str(response_json)) datasets_as_json = response.json() return datasets_as_json['datasets'], datasets_as_json['total'], datasets_as_json['offset'] @@ -313,7 +321,7 @@ def get_dataset(token, dataset_id, expected_status_code=200): try: response_json = get_dataset_response.json() except ValueError: - response_json = ValueError + response_json = get_dataset_response.text assert_that(get_dataset_response.status_code, equal_to(expected_status_code), 'Request to get policy id=' + dataset_id + ' failed with status=' + diff --git a/python-test/features/steps/control_plane_policies.py b/python-test/features/steps/control_plane_policies.py index 238f4c592..42e6c57c8 100644 --- a/python-test/features/steps/control_plane_policies.py +++ b/python-test/features/steps/control_plane_policies.py @@ -488,12 +488,16 @@ def create_duplicated_policy(token, policy_id, new_policy_name=None, status_code 'Authorization': f'Bearer {token}'} post_url = f"{orb_url}/api/v1/policies/agent/{policy_id}/duplicate" response = requests.post(post_url, json=json_request, headers=headers_request, verify=verify_ssl_bool) + try: + response_json = response.json() + except ValueError: + response_json = response.text assert_that(response.status_code, equal_to(status_code), 'Request to create duplicated policy failed with status=' + str(response.status_code) + ': ' - + str(response.json())) + + str(response_json)) if status_code == 201: compare_two_policies(token, policy_id, response.json()['id']) - return response.json() + return response_json def compare_two_policies(token, id_policy_one, id_policy_two): @@ -531,7 +535,7 @@ def create_policy(token, json_request, expected_status_code=201): try: response_json = response.json() except ValueError: - response_json = ValueError + response_json = response.text assert_that(response.status_code, equal_to(expected_status_code), 'Request to create policy failed with status=' + str(response.status_code) + ': ' + str(response_json)) @@ -556,7 +560,7 @@ def edit_policy(token, policy_id, json_request, expected_status_code=200): try: response_json = response.json() except ValueError: - response_json = ValueError + response_json = response.text assert_that(response.status_code, equal_to(expected_status_code), 'Request to editing policy failed with status=' + str(response.status_code) + ': ' + str(response_json)) @@ -758,7 +762,7 @@ def get_policy(token, policy_id, expected_status_code=200): try: response_json = get_policy_response.json() except ValueError: - response_json = ValueError + response_json = get_policy_response.text assert_that(get_policy_response.status_code, equal_to(expected_status_code), 'Request to get policy id=' + policy_id + ' failed with status= ' + str(get_policy_response.status_code) + " response= " + str(response_json)) @@ -800,12 +804,16 @@ def list_up_to_limit_policies(token, limit=100, offset=0): response = requests.get(orb_url + '/api/v1/policies/agent', headers={'Authorization': f'Bearer {token}'}, params={'limit': limit, 'offset': offset}, verify=verify_ssl_bool) + try: + response_json = response.json() + except ValueError: + response_json = response.text assert_that(response.status_code, equal_to(200), 'Request to list policies failed with status=' + str(response.status_code) + ': ' - + str(response.json())) + + str(response_json)) - policies_as_json = response.json() + policies_as_json = response_json return policies_as_json['data'], policies_as_json['total'], policies_as_json['offset'] diff --git a/python-test/features/steps/control_plane_sink.py b/python-test/features/steps/control_plane_sink.py index d4d70a5b0..932e5e30b 100644 --- a/python-test/features/steps/control_plane_sink.py +++ b/python-test/features/steps/control_plane_sink.py @@ -1,4 +1,4 @@ -from behave import given, then, step +from behave import given, when, then, step from test_config import TestConfig from utils import random_string, filter_list_by_parameter_start_with, threading_wait_until, validate_json from hamcrest import * @@ -153,9 +153,9 @@ def create_invalid_sink(context, credential): @step("referred sink must have {status} state on response within {time_to_wait} seconds") def check_sink_status(context, status, time_to_wait): sink_id = context.sink["id"] - get_sink_response = get_sink_status_and_check(context.token, sink_id, status, timeout=time_to_wait) + context.sink = get_sink_status_and_check(context.token, sink_id, status, timeout=time_to_wait, wait_time=10) - assert_that(get_sink_response['state'], equal_to(status), f"Sink {context.sink} state failed") + assert_that(context.sink['state'], equal_to(status), f"Sink {context.sink} state failed") @step("referred sink must have {status} state on response after {time_to_wait} seconds") @@ -164,9 +164,9 @@ def check_sink_status(context, status, time_to_wait): assert_that(time_to_wait.isdigit(), is_(True), f"Invalid type: 'time_to_wait' must be an int and is {time_to_wait}") time_to_wait = int(time_to_wait) threading.Event().wait(time_to_wait) - get_sink_response = get_sink_status_and_check(context.token, sink_id, status) + context.sink = get_sink_status_and_check(context.token, sink_id, status) - assert_that(get_sink_response['state'], equal_to(status), f"Sink {context.sink} state failed") + assert_that(context.sink['state'], equal_to(status), f"Sink {context.sink} state failed") # this step is only necessary for OTEL migration tests, so we can exclude it after the migration @@ -194,7 +194,7 @@ def check_all_sinks_status(context): if "migrated" in sink["config"].keys() and sink["config"]["migrated"] == "m3": migrated_sinks.append(sink["id"]) - print(f"{len(migrated_sinks)} sinks were migrated") + print(f"{len(migrated_sinks)} of {len(all_sinks)} sinks have migration tags") assert_that(len(otel_sinks), equal_to(len(all_sinks)), f"{len(legacy_sinks)} sinks are not with otel tag enabled: " f"{legacy_sinks}") @@ -225,6 +225,63 @@ def edit_sink_field(context, field_to_edit, type_of_field): context.sink = edit_sink(context.token, context.sink['id'], sink) +@when("the {sink_keys} of this sink is updated") +def sink_partial_update(context, sink_keys): + sink_keys = sink_keys.replace(" and ", ", ") + keys_to_update = sink_keys.split(", ") + update_sink_body = get_sink(context.token, context.sink['id']) + keys_to_not_update = list(set(update_sink_body.keys()).symmetric_difference(set(keys_to_update))) + all_sink_keys = list(update_sink_body.keys()) + for sink_key in all_sink_keys: + if sink_key in keys_to_not_update: + del update_sink_body[sink_key] + else: + assert_that(sink_key, any_of("name", "description", "tags", "config"), f"Unexpected key for sink") + remote_host = context.remote_prometheus_endpoint + username = context.prometheus_username + password = context.prometheus_key + include_otel_env_var = configs.get("include_otel_env_var") + enable_otel = configs.get("enable_otel") + otel_map = {"true": "enabled", "false": "disabled"} + assert_that(enable_otel, any_of("true", "false"), "Unexpected value for 'enable_otel' on sinks " + "partial update") + assert_that(include_otel_env_var, any_of("true", "false"), "Unexpected value for 'include_otel_env_var' on " + "sinks partial update") + if include_otel_env_var == "true": + sink_configs = {"remote_host": remote_host, "username": username, "password": password, + "opentelemetry": otel_map[enable_otel]} + else: + sink_configs = {"remote_host": remote_host, "username": username, "password": password} + context.values_to_use_to_update_sink = {"name": f"{context.sink['name']}_updated", + "description": "this sink has been updated", + "tags": {"sink": "updated", "new": "tag"}, + "config": sink_configs} + update_sink_body[sink_key] = context.values_to_use_to_update_sink[sink_key] + context.sink_before_update = get_sink(context.token, context.sink['id']) + context.sink = edit_sink(context.token, context.sink['id'], update_sink_body, 200) + + +@then("the {sink_keys} updates to the new value and other fields remains the same") +def verify_sink_after_update(context, sink_keys): + sink_keys = sink_keys.replace(" and ", ", ") + updated_keys = sink_keys.split(", ") + all_sink_keys = list(context.sink.keys()) + assert_that(set(context.sink.keys()), equal_to(set(context.sink_before_update)), + f"Sink keys are not the same after sink partial update:" + f"Sink before update: {context.sink_before_update}. Sink after update: {context.sink}") + for sink_key in all_sink_keys: + if sink_key in updated_keys and sink_key != "config": # config returns empty as a security action + assert_that(context.sink[sink_key], equal_to(context.values_to_use_to_update_sink[sink_key]), + f"{sink_key} was not correctly updated on sink. Sink before update: " + f"{context.sink_before_update}. Sink after update: {context.sink}") + elif sink_key != "ts_created": + assert_that(context.sink[sink_key], equal_to(context.sink_before_update[sink_key]), + f"Unexpected value for {sink_key} after sink partial update." + f"Sink before update: {context.sink_before_update}. Sink after update: {context.sink}") + else: + pass # insert validation to ts_created > must be higher than before + + @then('cleanup sinks') def clean_sinks(context): """ @@ -270,11 +327,12 @@ def create_new_sink(token, name_label, remote_host, username, password, descript headers_request = {'Content-type': 'application/json', 'Accept': '*/*', 'Authorization': f'Bearer {token}'} - response = requests.post(orb_url + '/api/v1/sinks', json=json_request, headers=headers_request, verify=verify_ssl_bool) + response = requests.post(orb_url + '/api/v1/sinks', json=json_request, headers=headers_request, + verify=verify_ssl_bool) try: response_json = response.json() except ValueError: - response_json = ValueError + response_json = response.text assert_that(response.status_code, equal_to(expected_status_code), 'Request to create sink failed with status=' + str(response.status_code) + ': ' + str(response_json)) @@ -296,7 +354,7 @@ def get_sink(token, sink_id): try: response_json = get_sink_response.json() except ValueError: - response_json = ValueError + response_json = get_sink_response.text assert_that(get_sink_response.status_code, equal_to(200), 'Request to get sink id=' + sink_id + ' failed with status=' + str(get_sink_response.status_code) + ': ' @@ -338,10 +396,15 @@ def list_up_to_limit_sinks(token, limit=100, offset=0): response = requests.get(orb_url + '/api/v1/sinks', headers={'Authorization': f'Bearer {token}'}, params={'limit': limit, 'offset': offset}, verify=verify_ssl_bool) + + try: + response_json = response.json() + except ValueError: + response_json = response.text assert_that(response.status_code, equal_to(200), 'Request to list sinks failed with status=' + str(response.status_code) + ': ' - + str(response.json())) + + str(response_json)) sinks_as_json = response.json() return sinks_as_json['sinks'], sinks_as_json['total'], sinks_as_json['offset'] @@ -405,8 +468,12 @@ def edit_sink(token, sink_id, sink_body, expected_status_code=200): response = requests.put(orb_url + '/api/v1/sinks/' + sink_id, json=sink_body, headers=headers_request, verify=verify_ssl_bool) + try: + response_json = response.json() + except ValueError: + response_json = response.text assert_that(response.status_code, equal_to(expected_status_code), - 'Request to edit sink failed with status=' + str(response.status_code) + ":" + str(response.json())) + 'Request to edit sink failed with status=' + str(response.status_code) + ":" + str(response_json)) return response.json() diff --git a/python-test/features/steps/local_agent.py b/python-test/features/steps/local_agent.py index 038e3aacf..18f72e0b3 100644 --- a/python-test/features/steps/local_agent.py +++ b/python-test/features/steps/local_agent.py @@ -59,7 +59,7 @@ def run_local_agent_container(context, status_port, **kwargs): env_vars.update(kwargs) assert_that(status_port, any_of(equal_to("available"), equal_to("unavailable")), "Unexpected value for port") availability = {"available": True, "unavailable": False} - agent_docker_image = configs.get('agent_docker_image', 'ns1labs/orb-agent') + agent_docker_image = configs.get('agent_docker_image', 'orbcommunity/orb-agent') image_tag = ':' + configs.get('agent_docker_tag', 'latest') agent_image = agent_docker_image + image_tag @@ -335,7 +335,7 @@ def check_logs_contain_log(logs, expected_log, event, start_time=0): def run_local_agent_from_terminal(command, verify_ssl, pktvisor_port, - include_otel_env_var="false", enable_otel="false"): + include_otel_env_var="false", enable_otel="true"): """ :param (str) command: docker command to provision an agent :param (bool) verify_ssl: False if orb address doesn't have a valid certificate. @@ -429,7 +429,7 @@ def run_agent_config_file(agent_name, overwrite_default=False, only_file=False, :param time_to_wait: seconds that threading must wait after run the agent :return: agent container id """ - agent_docker_image = configs.get('agent_docker_image', 'ns1labs/orb-agent') + agent_docker_image = configs.get('agent_docker_image', 'orbcommunity/orb-agent') agent_image = f"{agent_docker_image}:{configs.get('agent_docker_tag', 'latest')}" local_orb_path = configs.get("local_orb_path") if only_file is True: diff --git a/python-test/features/steps/metrics.py b/python-test/features/steps/metrics.py index f13879b6b..fae388bb0 100644 --- a/python-test/features/steps/metrics.py +++ b/python-test/features/steps/metrics.py @@ -36,7 +36,7 @@ def default_enabled_metric_groups_by_handler(handler): assert_that(handler, any_of("dns", "dns-v2", "net", "net-v2", "dhcp", "bgp", "pcap", "flow", "netprobe"), "Invalid handler") groups_default_enabled = { - "dns": ["cardinality", "counters", "dns_transaction", "top_qnames", "top_ports"], + "dns": ["cardinality", "counters", "dns_transaction", "top_qnames", "top_ports", "quantiles"], "dns-v2": ["cardinality", "counters", "top_qnames", "quantiles", "top_qtypes", "top_rcodes"], "net": ["cardinality", "counters", "top_geo", "top_ips"], "net-v2": ["cardinality", "counters", "top_geo", "top_ips", "quantiles"], @@ -60,7 +60,7 @@ def expected_metrics_by_handlers_and_groups(handler, groups_enabled, groups_disa default_enabled_metric_groups = default_enabled_metric_groups_by_handler(handler) groups_enabled = groups_enabled.split(", ") - groups_enabled = groups_enabled + default_enabled_metric_groups + groups_enabled = list(set(groups_enabled + default_enabled_metric_groups)) groups_disabled = groups_disabled.split(", ") if all(metric_group in groups_disabled for metric_group in groups_enabled) or "all" in groups_disabled: if len(default_enabled_metric_groups) > 0: @@ -119,20 +119,30 @@ def expected_metrics_by_handlers_and_groups(handler, groups_enabled, groups_disa ("all" in groups_enabled and "dns_transaction" not in groups_disabled): metric_groups.add("dns_xact_counts_timed_out") metric_groups.add("dns_xact_counts_total") - metric_groups.add("dns_xact_in_quantiles_us") - metric_groups.add("dns_xact_in_quantiles_us_sum") - metric_groups.add("dns_xact_in_quantiles_us_count") metric_groups.add("dns_xact_in_total") - metric_groups.add("dns_xact_out_quantiles_us") - metric_groups.add("dns_xact_out_quantiles_us_sum") - metric_groups.add("dns_xact_out_quantiles_us_count") + metric_groups.add("dns_xact_out_total") # todo find a way to test slow metrics # metric_groups.add("dns_xact_out_top_slow") # metric_groups.add("dns_xact_in_top_slow") - metric_groups.add("dns_xact_out_total") - metric_groups.add("dns_xact_ratio_quantiles") - metric_groups.add("dns_xact_ratio_quantiles_sum") - metric_groups.add("dns_xact_ratio_quantiles_count") + + if ("quantiles" in groups_enabled and "quantiles" not in groups_disabled) or \ + ("all" in groups_enabled and "quantiles" not in groups_disabled): + metric_groups.add("dns_xact_ratio_quantiles") + metric_groups.add("dns_xact_ratio_quantiles_sum") + metric_groups.add("dns_xact_ratio_quantiles_count") + metric_groups.add("dns_xact_out_quantiles_us") + metric_groups.add("dns_xact_out_quantiles_us_sum") + metric_groups.add("dns_xact_out_quantiles_us_count") + metric_groups.add("dns_xact_in_quantiles_us") + metric_groups.add("dns_xact_in_quantiles_us_sum") + metric_groups.add("dns_xact_in_quantiles_us_count") + + if ("histograms" in groups_enabled and "histograms" not in groups_disabled) or \ + ("all" in groups_enabled and "histograms" not in groups_disabled): + metric_groups.add("dns_xact_in_histogram_us_bucket") + metric_groups.add("dns_xact_in_histogram_us_count") + metric_groups.add("dns_xact_out_histogram_us_bucket") + metric_groups.add("dns_xact_out_histogram_us_count") if ("top_qnames" in groups_enabled and "top_qnames" not in groups_disabled) or \ ("all" in groups_enabled and "top_qnames" not in groups_disabled): @@ -155,9 +165,9 @@ def expected_metrics_by_handlers_and_groups(handler, groups_enabled, groups_disa metric_groups = { "dns_observed_packets", "dns_deep_sampled_packets", - "dns_rates_events_sum", - "dns_rates_events_count", - "dns_rates_events" + "dns_rates_observed_pps_sum", + "dns_rates_observed_pps_count", + "dns_rates_observed_pps" } if ("cardinality" in groups_enabled and "cardinality" not in groups_disabled) or \ ("all" in groups_enabled and "cardinality" not in groups_disabled): @@ -225,6 +235,8 @@ def expected_metrics_by_handlers_and_groups(handler, groups_enabled, groups_disa metric_groups.add("dns_xact_time_us_sum") metric_groups.add("dns_xact_time_us_count") metric_groups.add("dns_xact_time_us") + metric_groups.add("dns_xact_histogram_us_bucket") + metric_groups.add("dns_xact_histogram_us_count") # todo find a way to test slow metrics # metric_groups.add("dns_top_slow_xacts") elif isinstance(handler, str) and handler.lower() == "net": @@ -389,7 +401,20 @@ def expected_metrics_by_handlers_and_groups(handler, groups_enabled, groups_disa if ("conversations" in groups_enabled and "conversations" not in groups_disabled) or \ ("all" in groups_enabled and "conversations" not in groups_disabled): metric_groups.add("flow_cardinality_conversations") - + if ("top_tos" in groups_enabled and "top_tos" not in groups_disabled) or \ + ("all" in groups_enabled and "top_tos" not in groups_disabled): + if ("by_bytes" in groups_enabled and "by_bytes" not in groups_disabled) or \ + ("all" in groups_enabled and "by_bytes" not in groups_disabled): + metric_groups.add("flow_top_in_dscp_bytes") + metric_groups.add("flow_top_out_dscp_bytes") + metric_groups.add("flow_top_in_ecn_bytes") + metric_groups.add("flow_top_out_ecn_bytes") + if ("by_packets" in groups_enabled and "by_packets" not in groups_disabled) or \ + ("all" in groups_enabled and "by_packets" not in groups_disabled): + metric_groups.add("flow_top_in_dscp_packets") + metric_groups.add("flow_top_out_dscp_packets") + metric_groups.add("flow_top_in_ecn_packets") + metric_groups.add("flow_top_out_ecn_packets") if ("counters" in groups_enabled and "counters" not in groups_disabled) or \ ("all" in groups_enabled and "counters" not in groups_disabled): metric_groups.add("flow_records_filtered") @@ -489,7 +514,11 @@ def expected_metrics_by_handlers_and_groups(handler, groups_enabled, groups_disa if ("cardinality" in groups_enabled and "cardinality" not in groups_disabled) or \ ("all" in groups_enabled and "cardinality" not in groups_disabled): metric_groups.add("flow_cardinality_conversations") + if ("by_bytes" in groups_enabled and "by_bytes" not in groups_disabled) or \ + ("all" in groups_enabled and "by_bytes" not in groups_disabled): metric_groups.add("flow_top_conversations_bytes") + if ("by_packets" in groups_enabled and "by_packets" not in groups_disabled) or \ + ("all" in groups_enabled and "by_packets" not in groups_disabled): metric_groups.add("flow_top_conversations_packets") elif isinstance(handler, str) and handler.lower() == "netprobe": @@ -526,5 +555,4 @@ def expected_metrics_by_handlers_and_groups(handler, groups_enabled, groups_disa metric_groups.add("netprobe_response_min_us") else: raise f"{handler} is not a valid handler" - return metric_groups diff --git a/python-test/features/steps/test_config.py b/python-test/features/steps/test_config.py index 1b89e2f7f..47ceaed40 100644 --- a/python-test/features/steps/test_config.py +++ b/python-test/features/steps/test_config.py @@ -81,12 +81,12 @@ def _read_configs(): configs['is_credentials_registered'] = is_credentials_registered include_otel_env_var = configs.get("include_otel_env_var", "false").lower() configs["include_otel_env_var"] = include_otel_env_var - enable_otel = configs.get('enable_otel', 'false').lower() + enable_otel = configs.get('enable_otel', 'true').lower() assert_that(enable_otel, any_of(equal_to('true'), equal_to('false')), 'Invalid value to enable_otel parameter. A boolean value is expected.') configs['enable_otel'] = enable_otel - if include_otel_env_var == "false" and enable_otel == "true": - raise ValueError("'enable_otel' is enabled, but the variable is not being included in the commands because of " + if include_otel_env_var == "false" and enable_otel == "false": + raise ValueError("'enable_otel' is false, but the variable is not being included in the commands because of " "'include_otel_env_var' is false. Check your parameters.") return configs diff --git a/python-test/features/steps/users.py b/python-test/features/steps/users.py index 2ccc5c74b..7f70eb087 100644 --- a/python-test/features/steps/users.py +++ b/python-test/features/steps/users.py @@ -101,10 +101,14 @@ def authenticate(user_email, user_password, expected_status_code=201): json=json_request, headers=headers, verify=verify_ssl_bool) + try: + response_json = response.json() + except ValueError: + response_json = response.text assert_that(response.status_code, equal_to(expected_status_code), - 'Authentication failed with status= ' + str(response.status_code) + str(response.json())) - - return response.json() + f"Authentication failed with status= {str(response.status_code)}. Response: {str(response_json)}" + ) + return response_json def register_account(user_email, user_password, company_name=None, user_full_name=None, expected_status_code=201): @@ -143,7 +147,12 @@ def get_account_information(token, expected_status_code=200): """ response = requests.get(orb_url + '/api/v1/users/profile', headers={'Authorization': f'Bearer {token}'}, verify=verify_ssl_bool) + + try: + response_json = response.json() + except ValueError: + response_json = response.text assert_that(response.status_code, equal_to(expected_status_code), f"Unexpected status code for get account data." f"Status Code = {response.status_code}." - f"Response = {str(response.json())}") - return response.json() + f"Response = {str(response_json)}") + return response_json diff --git a/python-test/test_config.ini.tpl b/python-test/test_config.ini.tpl index e09c40d7f..6875045dd 100644 --- a/python-test/test_config.ini.tpl +++ b/python-test/test_config.ini.tpl @@ -11,7 +11,7 @@ remote_prometheus_endpoint= # Optional fields: -# agent_docker_image=ns1labs/orb-agent +# agent_docker_image=orbcommunity/orb-agent # agent_docker_tag=latest # orb_agent_interface=mock # verify_ssl=True @@ -19,7 +19,7 @@ remote_prometheus_endpoint= diff --git a/sinker/message_handler.go b/sinker/message_handler.go index 32ac4ba01..c61d1d484 100644 --- a/sinker/message_handler.go +++ b/sinker/message_handler.go @@ -47,7 +47,7 @@ func (svc SinkerService) remoteWriteToPrometheus(tsList prometheus.TSList, owner headers["Authorization"] = svc.encodeBase64(cfgRepo.User, cfgRepo.Password) result, writeErr := promClient.WriteTimeSeries(ctx, tsList, prometheus.WriteOptions{Headers: headers}) if err := error(writeErr); err != nil { - if cfgRepo.State != config.Error || cfgRepo.Msg != fmt.Sprint(err) { + if cfgRepo.Msg != fmt.Sprint(err) { cfgRepo.State = config.Error cfgRepo.Msg = fmt.Sprint(err) cfgRepo.LastRemoteWrite = time.Now() diff --git a/sinker/otel/bridgeservice/bridge.go b/sinker/otel/bridgeservice/bridge.go index f3cf27a73..43aa759af 100644 --- a/sinker/otel/bridgeservice/bridge.go +++ b/sinker/otel/bridgeservice/bridge.go @@ -5,6 +5,7 @@ import ( "sort" "time" + "github.com/go-kit/kit/metrics" fleetpb "github.com/orb-community/orb/fleet/pb" policiespb "github.com/orb-community/orb/policies/pb" "github.com/orb-community/orb/sinker/config" @@ -16,25 +17,40 @@ type BridgeService interface { GetDataSetsFromAgentGroups(ctx context.Context, mfOwnerId string, agentGroupIds []string) (map[string]string, error) NotifyActiveSink(ctx context.Context, mfOwnerId, sinkId, state, message string) error GetSinkIdsFromPolicyID(ctx context.Context, mfOwnerId string, policyID string) (map[string]string, error) + IncreamentMessageCounter(publisher, subtopic, channel, protocol string) } func NewBridgeService(logger *zap.Logger, sinkerCache config.ConfigRepo, policiesClient policiespb.PolicyServiceClient, - fleetClient fleetpb.FleetServiceClient) SinkerOtelBridgeService { + fleetClient fleetpb.FleetServiceClient, messageInputCounter metrics.Counter) SinkerOtelBridgeService { return SinkerOtelBridgeService{ - logger: logger, - sinkerCache: sinkerCache, - policiesClient: policiesClient, - fleetClient: fleetClient, + logger: logger, + sinkerCache: sinkerCache, + policiesClient: policiesClient, + fleetClient: fleetClient, + messageInputCounter: messageInputCounter, } } type SinkerOtelBridgeService struct { - logger *zap.Logger - sinkerCache config.ConfigRepo - policiesClient policiespb.PolicyServiceClient - fleetClient fleetpb.FleetServiceClient + logger *zap.Logger + sinkerCache config.ConfigRepo + policiesClient policiespb.PolicyServiceClient + fleetClient fleetpb.FleetServiceClient + messageInputCounter metrics.Counter +} + +// Implementar nova funcao +func (bs *SinkerOtelBridgeService) IncreamentMessageCounter(publisher, subtopic, channel, protocol string) { + labels := []string{ + "method", "handleMsgFromAgent", + "agent_id", publisher, + "subtopic", subtopic, + "channel", channel, + "protocol", protocol, + } + bs.messageInputCounter.With(labels...).Add(1) } func (bs *SinkerOtelBridgeService) NotifyActiveSink(ctx context.Context, mfOwnerId, sinkId, newState, message string) error { diff --git a/sinker/otel/orbreceiver/otlp.go b/sinker/otel/orbreceiver/otlp.go index edbf089a1..902b59f20 100644 --- a/sinker/otel/orbreceiver/otlp.go +++ b/sinker/otel/orbreceiver/otlp.go @@ -365,6 +365,8 @@ func (r *OrbReceiver) MessageInbound(msg messaging.Message) error { return } + r.sinkerService.IncreamentMessageCounter(msg.Publisher, msg.Subtopic, msg.Channel, msg.Protocol) + if mr.Metrics().ResourceMetrics().Len() == 0 || mr.Metrics().ResourceMetrics().At(0).ScopeMetrics().Len() == 0 { r.cfg.Logger.Info("No data information from metrics request") return diff --git a/sinker/redis/producer/streams.go b/sinker/redis/producer/streams.go index 7c81e7b76..39c14fe1c 100644 --- a/sinker/redis/producer/streams.go +++ b/sinker/redis/producer/streams.go @@ -10,7 +10,8 @@ import ( ) const ( - streamID = "orb.sinker" + streamID = "orb.sinker" + streamLen = 1000 ) var _ config.ConfigRepo = (*eventStore)(nil) @@ -38,6 +39,8 @@ func (e eventStore) DeployCollector(ctx context.Context, config config.SinkConfi recordToSink := &redis.XAddArgs{ Stream: streamID, Values: eventToSink.Encode(), + MaxLen: streamLen, + Approx: true, } err = e.client.XAdd(ctx, recordToSink).Err() if err != nil { @@ -67,6 +70,8 @@ func (e eventStore) Add(config config.SinkConfig) error { record := &redis.XAddArgs{ Stream: streamID, Values: event.Encode(), + MaxLen: streamLen, + Approx: true, } err = e.client.XAdd(context.Background(), record).Err() if err != nil { @@ -90,6 +95,8 @@ func (e eventStore) Remove(ownerID string, sinkID string) error { record := &redis.XAddArgs{ Stream: streamID, Values: event.Encode(), + MaxLen: streamLen, + Approx: true, } err = e.client.XAdd(context.Background(), record).Err() if err != nil { @@ -118,6 +125,8 @@ func (e eventStore) Edit(config config.SinkConfig) error { record := &redis.XAddArgs{ Stream: streamID, Values: event.Encode(), + MaxLen: streamLen, + Approx: true, } err = e.client.XAdd(context.Background(), record).Err() if err != nil { @@ -142,9 +151,10 @@ func (e eventStore) GetAllOwners() ([]string, error) { return e.sinkCache.GetAllOwners() } -func NewEventStoreMiddleware(repo config.ConfigRepo, client *redis.Client) config.ConfigRepo { +func NewEventStoreMiddleware(repo config.ConfigRepo, client *redis.Client, logger *zap.Logger) config.ConfigRepo { return eventStore{ sinkCache: repo, client: client, + logger: logger, } } diff --git a/sinker/redis/sinker.go b/sinker/redis/sinker.go index 1fd2c14f1..97c3a4d39 100644 --- a/sinker/redis/sinker.go +++ b/sinker/redis/sinker.go @@ -21,6 +21,7 @@ const ( keyPrefix = "sinker_key" activityPrefix = "sinker_activity" idPrefix = "orb.maestro" + streamLen = 1000 ) var _ sinkerconfig.ConfigRepo = (*sinkerCache)(nil) @@ -133,6 +134,8 @@ func (s *sinkerCache) DeployCollector(ctx context.Context, config sinkerconfig.S ID: config.SinkID, Stream: idPrefix, Values: event, + MaxLen: streamLen, + Approx: true, } if cmd := s.client.XAdd(ctx, &encodeEvent); cmd.Err() != nil { return cmd.Err() diff --git a/sinker/service.go b/sinker/service.go index 8a8db6717..ef05b69dc 100644 --- a/sinker/service.go +++ b/sinker/service.go @@ -95,7 +95,7 @@ func (svc SinkerService) Start() error { func (svc SinkerService) startOtel(ctx context.Context) error { if svc.otel { var err error - bridgeService := bridgeservice.NewBridgeService(svc.logger, svc.sinkerCache, svc.policiesClient, svc.fleetClient) + bridgeService := bridgeservice.NewBridgeService(svc.logger, svc.sinkerCache, svc.policiesClient, svc.fleetClient, svc.messageInputCounter) svc.otelCancelFunct, err = otel.StartOtelComponents(ctx, &bridgeService, svc.logger, svc.otelKafkaUrl, svc.pubSub) if err != nil { svc.logger.Error("error during StartOtelComponents", zap.Error(err)) diff --git a/sinks/api/http/endpoint.go b/sinks/api/http/endpoint.go index 155e4fd7d..8a1101a2e 100644 --- a/sinks/api/http/endpoint.go +++ b/sinks/api/http/endpoint.go @@ -15,11 +15,13 @@ import ( "github.com/orb-community/orb/pkg/types" "github.com/orb-community/orb/sinks" "github.com/orb-community/orb/sinks/backend" + "go.uber.org/zap" + "time" ) var restrictiveKeyPrefixes = []string{backend.ConfigFeatureTypePassword} -func omitSecretInformation(metadata types.Metadata) (restrictedMetadata types.Metadata) { +func omitSecretInformation(be backend.Backend, format string, metadata types.Metadata) (restrictedMetadata types.Metadata, configData string) { metadata.RestrictKeys(func(key string) bool { match := false for _, restrictiveKey := range restrictiveKeyPrefixes { @@ -30,33 +32,63 @@ func omitSecretInformation(metadata types.Metadata) (restrictedMetadata types.Me } return match }) - return metadata + var err error + if format != "" { + configData, err = be.ConfigToFormat(format, metadata) + if err != nil { + return metadata, "" + } + } + return metadata, configData } func addEndpoint(svc sinks.SinkService) endpoint.Endpoint { return func(ctx context.Context, request interface{}) (interface{}, error) { req := request.(addReq) if err := req.validate(); err != nil { + svc.GetLogger().Error("got error in validating request", zap.Error(err)) return nil, err } nID, err := types.NewIdentifier(req.Name) if err != nil { + svc.GetLogger().Error("got error in creating new identifier", zap.Error(err)) return nil, err } - + var config types.Metadata + reqBackend := backend.GetBackend(req.Backend) + if req.Format != "" { + config, err = reqBackend.ParseConfig(req.Format, req.ConfigData) + if err != nil { + svc.GetLogger().Error("got error in parsing configuration", zap.Error(err)) + return nil, err + } + } else { + if req.Config != nil { + config = req.Config + } else { + svc.GetLogger().Error("did not receive any valid configuration") + return nil, errors.ErrMalformedEntity + } + } sink := sinks.Sink{ Name: nID, Backend: req.Backend, - Config: req.Config, + Config: config, Description: &req.Description, Tags: req.Tags, + ConfigData: req.ConfigData, + Format: req.Format, + Created: time.Now(), } saved, err := svc.CreateSink(ctx, req.token, sink) if err != nil { + svc.GetLogger().Error("received error on creating sink") return nil, err } + omittedConfig, omittedConfigData := omitSecretInformation(reqBackend, saved.Format, saved.Config) + res := sinkRes{ ID: saved.ID, Name: saved.Name.String(), @@ -65,7 +97,9 @@ func addEndpoint(svc sinks.SinkService) endpoint.Endpoint { State: saved.State.String(), Error: saved.Error, Backend: saved.Backend, - Config: omitSecretInformation(saved.Config), + Config: omittedConfig, + ConfigData: omittedConfigData, + Format: saved.Format, TsCreated: saved.Created, created: true, } @@ -77,19 +111,44 @@ func addEndpoint(svc sinks.SinkService) endpoint.Endpoint { func updateSinkEndpoint(svc sinks.SinkService) endpoint.Endpoint { return func(ctx context.Context, request interface{}) (response interface{}, err error) { req := request.(updateSinkReq) - if err := req.validate(); err != nil { + if req.token == "" { + return nil, errors.ErrUnauthorizedAccess + } + currentSink, err := svc.ViewSink(ctx, req.token, req.id) + if err != nil { + svc.GetLogger().Error("could not find sink with id", zap.String("sinkID", req.id), zap.Error(err)) return nil, err } + sinkBackend := backend.GetBackend(currentSink.Backend) + if err := req.validate(sinkBackend); err != nil { + svc.GetLogger().Error("error validating request", zap.Error(err)) + return nil, err + } + var config types.Metadata + if req.Format != "" { + config, err = sinkBackend.ParseConfig(req.Format, req.ConfigData) + if err != nil { + svc.GetLogger().Error("got error in parsing configuration", zap.Error(err)) + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + } else { + if req.Config != nil { + config = req.Config + } + } sink := sinks.Sink{ ID: req.id, Tags: req.Tags, - Config: req.Config, + Config: config, + ConfigData: req.ConfigData, + Format: req.Format, Description: req.Description, } if req.Name != "" { nameID, err := types.NewIdentifier(req.Name) if err != nil { + svc.GetLogger().Error("error on getting new identifier", zap.Error(err)) return nil, errors.ErrMalformedEntity } sink.Name = nameID @@ -97,8 +156,10 @@ func updateSinkEndpoint(svc sinks.SinkService) endpoint.Endpoint { sinkEdited, err := svc.UpdateSink(ctx, req.token, sink) if err != nil { + svc.GetLogger().Error("error on updating sink", zap.Error(err)) return nil, err } + omittedConfig, omittedConfigData := omitSecretInformation(sinkBackend, sinkEdited.Format, sinkEdited.Config) res := sinkRes{ ID: sinkEdited.ID, Name: sinkEdited.Name.String(), @@ -107,7 +168,9 @@ func updateSinkEndpoint(svc sinks.SinkService) endpoint.Endpoint { State: sinkEdited.State.String(), Error: sinkEdited.Error, Backend: sinkEdited.Backend, - Config: omitSecretInformation(sinkEdited.Config), + Config: omittedConfig, + ConfigData: omittedConfigData, + Format: sinkEdited.Format, created: false, } return res, nil @@ -137,17 +200,20 @@ func listSinksEndpoint(svc sinks.SinkService) endpoint.Endpoint { }, Sinks: []sinkRes{}, } - for _, sink := range page.Sinks { + reqBackend := backend.GetBackend(sink.Backend) + omittedConfig, omittedConfigData := omitSecretInformation(reqBackend, sink.Format, sink.Config) view := sinkRes{ - ID: sink.ID, - Name: sink.Name.String(), - Tags: sink.Tags, - State: sink.State.String(), - Error: sink.Error, - Backend: sink.Backend, - Config: omitSecretInformation(sink.Config), - TsCreated: sink.Created, + ID: sink.ID, + Name: sink.Name.String(), + Tags: sink.Tags, + State: sink.State.String(), + Error: sink.Error, + Backend: sink.Backend, + Config: omittedConfig, + ConfigData: omittedConfigData, + Format: sink.Format, + TsCreated: sink.Created, } if sink.Description != nil { view.Description = *sink.Description @@ -216,7 +282,8 @@ func viewSinkEndpoint(svc sinks.SinkService) endpoint.Endpoint { if err != nil { return sink, err } - + reqBackend := backend.GetBackend(sink.Backend) + omittedConfig, omittedConfigData := omitSecretInformation(reqBackend, sink.Format, sink.Config) res := sinkRes{ ID: sink.ID, Name: sink.Name.String(), @@ -225,7 +292,9 @@ func viewSinkEndpoint(svc sinks.SinkService) endpoint.Endpoint { State: sink.State.String(), Error: sink.Error, Backend: sink.Backend, - Config: omitSecretInformation(sink.Config), + Config: omittedConfig, + ConfigData: omittedConfigData, + Format: sink.Format, TsCreated: sink.Created, } if sink.Description != nil { diff --git a/sinks/api/http/endpoint_test.go b/sinks/api/http/endpoint_test.go index 9b2c11f73..848cd739e 100644 --- a/sinks/api/http/endpoint_test.go +++ b/sinks/api/http/endpoint_test.go @@ -17,6 +17,8 @@ import ( "github.com/opentracing/opentracing-go/mocktracer" "github.com/orb-community/orb/pkg/types" "github.com/orb-community/orb/sinks" + "github.com/orb-community/orb/sinks/backend" + prometheusbackend "github.com/orb-community/orb/sinks/backend/prometheus" skmocks "github.com/orb-community/orb/sinks/mocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,8 +36,8 @@ const ( token = "token" invalidToken = "invalid" email = "user@example.com" - validJson = "{\n \"name\": \"my-prom-sink\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"https://orb.community/\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n },\n \"validate_only\": false\n}" - conflictValidJson = "{\n \"name\": \"conflict\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"https://orb.community/\",\n \"username\": \"dbuser\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n },\n \"validate_only\": false\n}" + validJson = "{ \"name\": \"my-prom-sink\", \"backend\": \"prometheus\", \"config\": { \"remote_host\": \"https://orb.community/\", \"username\": \"dbuser\", \"password\": \"dbpassword\" }, \"description\": \"An example prometheus sink\", \"tags\": { \"cloud\": \"aws\" }, \"validate_only\": false}" + conflictValidJson = "{\n \"name\": \"conflict\",\n \"backend\": \"prometheus\",\n \"config\": {\n \"remote_host\": \"https://orb.community/\",\n \"username\": \"dbuser\"\n, \"password\": \"dbpass\"\n },\n \"description\": \"An example prometheus sink\",\n \"tags\": {\n \"cloud\": \"aws\"\n },\n \"validate_only\": false\n}" invalidJson = "{" ) @@ -46,7 +48,7 @@ var ( Name: nameID, Description: &description, Backend: "prometheus", - Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": "dbuser"}, + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": "dbuser", "password": "dbpass"}, Tags: map[string]string{"cloud": "aws"}, } invalidName = strings.Repeat("m", maxNameSize+1) @@ -81,16 +83,17 @@ func (tr testRequest) make() (*http.Response, error) { } func newService(tokens map[string]string) sinks.SinkService { + logger := zap.NewNop() auth := skmocks.NewAuthService(tokens) - sinkRepo := skmocks.NewSinkRepository() - var logger *zap.Logger + pwdSvc := sinks.NewPasswordService(logger, "_testing_string_") + sinkRepo := skmocks.NewSinkRepository(pwdSvc) config := mfsdk.Config{ ThingsURL: "localhost", } mfsdk := mfsdk.NewSDK(config) - pwdSvc := sinks.NewPasswordService(logger, "_testing_string_") + return sinks.NewSinkService(logger, auth, sinkRepo, mfsdk, pwdSvc) } @@ -240,7 +243,7 @@ func TestCreateSinks(t *testing.T) { body: strings.NewReader(tc.req), } res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("unexpected erro %s", err)) + assert.Nil(t, err, fmt.Sprintf("unexpected error %s", err)) assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", desc, tc.status, res.StatusCode)) }) } @@ -257,6 +260,7 @@ func TestUpdateSink(t *testing.T) { dataInvalidName := toJSON(updateSinkReq{ Name: invalidName, + Backend: "prometheus", Description: sk.Description, Config: sink.Config, Tags: sk.Tags, @@ -264,6 +268,7 @@ func TestUpdateSink(t *testing.T) { dataInvalidRgxName := toJSON(updateSinkReq{ Name: "&*sink*&", + Backend: "prometheus", Description: sk.Description, Config: sink.Config, Tags: sk.Tags, @@ -279,6 +284,7 @@ func TestUpdateSink(t *testing.T) { "update existing sink": { req: toJSON(updateSinkReq{ Name: sk.Name.String(), + Backend: "prometheus", Description: sk.Description, Config: sink.Config, Tags: sk.Tags, @@ -298,6 +304,7 @@ func TestUpdateSink(t *testing.T) { "update sink with a invalid id": { req: toJSON(updateSinkReq{ Name: sk.Name.String(), + Backend: "prometheus", Description: sk.Description, Config: sink.Config, Tags: sk.Tags, @@ -310,6 +317,7 @@ func TestUpdateSink(t *testing.T) { "update non-existing sink": { req: toJSON(updateSinkReq{ Name: sk.Name.String(), + Backend: "prometheus", Description: sk.Description, Config: sink.Config, Tags: sk.Tags, @@ -322,6 +330,7 @@ func TestUpdateSink(t *testing.T) { "update sink with invalid user token": { req: toJSON(updateSinkReq{ Name: sk.Name.String(), + Backend: "prometheus", Description: sk.Description, Config: sink.Config, Tags: sk.Tags, @@ -334,6 +343,7 @@ func TestUpdateSink(t *testing.T) { "update sink with empty user token": { req: toJSON(updateSinkReq{ Name: sk.Name.String(), + Backend: "prometheus", Description: sk.Description, Config: sink.Config, Tags: sk.Tags, @@ -346,6 +356,7 @@ func TestUpdateSink(t *testing.T) { "update sink with invalid content type": { req: toJSON(updateSinkReq{ Name: sk.Name.String(), + Backend: "prometheus", Description: sk.Description, Config: sink.Config, Tags: sk.Tags, @@ -358,6 +369,7 @@ func TestUpdateSink(t *testing.T) { "update sink without content type": { req: toJSON(updateSinkReq{ Name: sk.Name.String(), + Backend: "prometheus", Description: sk.Description, Config: sink.Config, Tags: sk.Tags, @@ -767,13 +779,15 @@ func TestViewSink(t *testing.T) { sk, err := service.CreateSink(context.Background(), token, sink) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - + sinkBE := backend.GetBackend("prometheus") + omitedConfig, _ := omitSecretInformation(sinkBE, sk.Format, sk.Config) + require.NoError(t, err, "error during omitting secrets") data := toJSON(sinkRes{ ID: sk.ID, Name: sk.Name.String(), Description: *sk.Description, Backend: sk.Backend, - Config: sk.Config, + Config: omitedConfig, Tags: sk.Tags, State: sk.State.String(), Error: sk.Error, @@ -997,19 +1011,22 @@ func TestValidateSink(t *testing.T) { } func TestOmitPasswords(t *testing.T) { + username := "387157" cases := map[string]struct { + backend backend.Backend inputMetadata types.Metadata expectedMetadata types.Metadata }{ "omit configuration with password": { - inputMetadata: types.Metadata{"user": 387157, "password": "s3cr3tp@ssw0rd", "url": "someUrl"}, - expectedMetadata: types.Metadata{"user": 387157, "password": "", "url": "someUrl"}, + backend: &prometheusbackend.Backend{}, + inputMetadata: types.Metadata{"username": &username, "password": "s3cr3tp@ssw0rd", "remote_host": "someUrl"}, + expectedMetadata: types.Metadata{"username": &username, "password": "", "remote_host": "someUrl"}, }, } for desc, tc := range cases { t.Run(desc, func(t *testing.T) { - metadata := omitSecretInformation(tc.inputMetadata) + metadata, _ := omitSecretInformation(tc.backend, "yaml", tc.inputMetadata) assert.Equal(t, tc.expectedMetadata, metadata) }) } diff --git a/sinks/api/http/logging.go b/sinks/api/http/logging.go index 7c3d1e324..e420ef25e 100644 --- a/sinks/api/http/logging.go +++ b/sinks/api/http/logging.go @@ -173,6 +173,10 @@ func (l loggingMiddleware) ValidateSink(ctx context.Context, token string, s sin return l.svc.ValidateSink(ctx, token, s) } +func (l loggingMiddleware) GetLogger() *zap.Logger { + return l.logger +} + func NewLoggingMiddleware(svc sinks.SinkService, logger *zap.Logger) sinks.SinkService { return &loggingMiddleware{logger, svc} } diff --git a/sinks/api/http/metrics.go b/sinks/api/http/metrics.go index 1ee6b1b45..59bb96538 100644 --- a/sinks/api/http/metrics.go +++ b/sinks/api/http/metrics.go @@ -11,6 +11,7 @@ import ( "github.com/orb-community/orb/pkg/errors" "github.com/orb-community/orb/sinks" "github.com/orb-community/orb/sinks/backend" + "go.uber.org/zap" "time" ) @@ -235,6 +236,10 @@ func (m metricsMiddleware) identify(token string) (string, error) { return res.GetId(), nil } +func (m metricsMiddleware) GetLogger() *zap.Logger { + return m.svc.GetLogger() +} + // MetricsMiddleware instruments core service by tracking request count and latency. func MetricsMiddleware(auth mainflux.AuthServiceClient, svc sinks.SinkService, counter metrics.Counter, latency metrics.Histogram) sinks.SinkService { return &metricsMiddleware{ diff --git a/sinks/api/http/openapi.yaml b/sinks/api/http/openapi.yaml index 36ffa2f26..35cd07b64 100644 --- a/sinks/api/http/openapi.yaml +++ b/sinks/api/http/openapi.yaml @@ -167,7 +167,31 @@ paths: description: Database can't process request. '500': $ref: "#/components/responses/ServiceErrorRes" - + /v2/sinks: + parameters: + - $ref: "#/components/parameters/Authorization" + post: + summary: 'Create a new Sink with YAML support' + operationId: createSinkExperimental + tags: + - sink + requestBody: + $ref: "#/components/requestBodies/SinkCreateReqV2" + responses: + '201': + $ref: "#/components/responses/SinkObjV2Res" + '400': + description: Failed due to malformed JSON. + '401': + description: Missing or invalid access token provided. + '409': + description: Entity already exist. + '415': + description: Missing or invalid content type. + '422': + description: Database can't process request. + '500': + $ref: "#/components/responses/ServiceErrorRes" components: securitySchemes: bearerAuth: @@ -181,7 +205,16 @@ components: content: application/json: schema: - $ref: "#/components/schemas/SinkCreateReqSchema" + oneOf: + - $ref: "#/components/schemas/SinkCreateReqSchema" + - $ref: "#/components/schemas/SinkCreateReqV2Schema" + SinkCreateReqV2: + description: JSON-formatted document describing the new Sink configuration + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SinkCreateReqV2Schema" SinkUpdateReq: description: JSON-formatted document describing the updated Sink configuration required: true @@ -277,6 +310,12 @@ components: application/json: schema: $ref: "#/components/schemas/SinksObjSchema" + SinkObjV2Res: + description: Sink object + content: + application/json: + schema: + $ref: "#/components/schemas/SinksObjSchemaV2" SinksPageRes: description: Data retrieved. content: @@ -350,6 +389,39 @@ components: remote_host: my.prometheus-host.com username: dbuser description: Object representing backend specific configuration information + SinkCreateReqV2Schema: + type: object + required: + - name + - backend + - config + properties: + name: + type: string + description: A unique name label + example: my-prom-sink + description: + type: string + description: User description of this Sink + example: An example prometheus sink + tags: + type: object + description: User defined key/values for organization and searching + example: + cloud: aws + backend: + type: string + example: prometheus + description: The sink backend to use. Must match a backend from /features/sinks. Cannot change once created. + format: + type: string + enum: + - yaml + - json + description: Format that the config will be passed on + config_data: + type: string + description: Configuration as String aligned with given format SinkPageSchema: type: object properties: @@ -443,6 +515,55 @@ components: type: string format: date-time description: Timestamp of creation + SinksObjSchemaV2: + type: object + required: + - id + properties: + id: + type: string + format: uuid + description: Unique identifier (UUID) + name: + type: string + description: A unique name label + example: my-prom-sink + description: + type: string + description: User description of this Sink + example: An example prometheus sink + tags: + type: object + description: User defined key/values for organization and searching + example: + cloud: aws + state: + readOnly: true + enum: + - active + - error + type: string + description: State of this Sink connection to the backend + error: + readOnly: true + type: string + description: Error message from Sink backend connection if the Sink is in error state + backend: + type: string + readOnly: true + example: prometheus + description: The sink backend to use. Must match a backend from /features/sinks. Cannot change once created. + config: + type: string + example: + remote_host: my.prometheus-host.com + username: dbuser + opentelemetry: enabled + description: YAML representing backend specific configuration information + ts_created: + type: string + format: date-time + description: Timestamp of creation SinkBackendResSchema: type: object properties: diff --git a/sinks/api/http/requests.go b/sinks/api/http/requests.go index 76b74c4c4..f9f5bfda0 100644 --- a/sinks/api/http/requests.go +++ b/sinks/api/http/requests.go @@ -12,6 +12,7 @@ import ( "github.com/orb-community/orb/pkg/errors" "github.com/orb-community/orb/pkg/types" "github.com/orb-community/orb/sinks" + "github.com/orb-community/orb/sinks/backend" ) const ( @@ -27,42 +28,49 @@ type addReq struct { Name string `json:"name,omitempty"` Backend string `json:"backend,omitempty"` Config types.Metadata `json:"config,omitempty"` + Format string `json:"format,omitempty"` + ConfigData string `json:"config_data,omitempty"` Description string `json:"description,omitempty"` Tags types.Tags `json:"tags,omitempty"` token string } -func (req addReq) validate() error { +func (req addReq) validate() (err error) { if req.token == "" { return errors.ErrUnauthorizedAccess } - keySize := 0 - if req.Config == nil { - return errors.ErrMalformedEntity - } else if !req.Config.IsApplicable(func(key string, value interface{}) bool { - if key != "" { - keySize++ - } - //currently, with only prometheus, 2 keys is enough, maybe change latter - if keySize >= 2 { - //minimal number of keys passed, valid config - return true + if req.Backend == "" || !backend.HaveBackend(req.Backend) { + return errors.Wrap(errors.ErrMalformedEntity, errors.New("backend not found")) + } + + reqBackend := backend.GetBackend(req.Backend) + if req.ConfigData == "" && req.Config == nil { + return errors.Wrap(errors.ErrMalformedEntity, errors.New("config not found")) + } + + var config types.Metadata + if req.Format != "" { + config, err = reqBackend.ParseConfig(req.Format, req.ConfigData) + if err != nil { + return errors.Wrap(errors.ErrMalformedEntity, errors.New("invalid config")) } - //still not get enough keys to create sink, check if there are more keys on map - return false - }) { - //not get enough keys to create sink, invalid config - return errors.ErrMalformedEntity + } else { + config = req.Config + } + + err = reqBackend.ValidateConfiguration(config) + if err != nil { + return errors.Wrap(errors.ErrMalformedEntity, errors.New("invalid config")) } if req.Name == "" { - return errors.ErrMalformedEntity + return errors.Wrap(errors.ErrMalformedEntity, errors.New("name not found")) } - _, err := types.NewIdentifier(req.Name) + _, err = types.NewIdentifier(req.Name) if err != nil { - return errors.Wrap(errors.ErrMalformedEntity, err) + return errors.Wrap(errors.ErrMalformedEntity, errors.New("identifier duplicated")) } return nil @@ -71,13 +79,16 @@ func (req addReq) validate() error { type updateSinkReq struct { Name string `json:"name,omitempty"` Config types.Metadata `json:"config,omitempty"` + Backend string `json:"backend,omitempty"` + Format string `json:"format,omitempty"` + ConfigData string `json:"config_data,omitempty"` Description *string `json:"description,omitempty"` Tags types.Tags `json:"tags,omitempty"` id string token string } -func (req updateSinkReq) validate() error { +func (req updateSinkReq) validate(sinkBackend backend.Backend) error { if req.token == "" { return errors.ErrUnauthorizedAccess } @@ -86,7 +97,24 @@ func (req updateSinkReq) validate() error { return errors.ErrMalformedEntity } - if req.Description == nil && req.Name == "" && len(req.Config) == 0 && req.Tags == nil { + if req.ConfigData != "" || req.Config != nil { + var config types.Metadata + var err error + if req.Format != "" { + config, err = sinkBackend.ParseConfig(req.Format, req.ConfigData) + if err != nil { + return errors.Wrap(errors.ErrMalformedEntity, err) + } + } else { + config = req.Config + } + err = sinkBackend.ValidateConfiguration(config) + if err != nil { + return errors.Wrap(errors.ErrMalformedEntity, err) + } + } + + if req.Description == nil && req.Name == "" && req.ConfigData == "" && len(req.Config) == 0 && req.Tags == nil { return errors.ErrMalformedEntity } diff --git a/sinks/api/http/requests_test.go b/sinks/api/http/requests_test.go new file mode 100644 index 000000000..9c6c82780 --- /dev/null +++ b/sinks/api/http/requests_test.go @@ -0,0 +1,134 @@ +package http + +import ( + "fmt" + "github.com/orb-community/orb/pkg/types" + "github.com/orb-community/orb/sinks/backend/prometheus" + "github.com/stretchr/testify/assert" + "testing" +) + +func Test_updateSinkReq_validate(t *testing.T) { + promBe := prometheus.Backend{} + aDescription := "a description worth reading" + type fields struct { + Name string + Config types.Metadata + Backend string + Format string + ConfigData string + Description *string + Tags types.Tags + id string + token string + } + tests := []struct { + name string + fields fields + wantErr assert.ErrorAssertionFunc + }{ + { + name: "full update no yaml", + fields: fields{ + Name: "new-name", + Config: map[string]interface{}{"username": "wile.e.coyote", "password": "C@rnivurousVulgar1s", "remote_host": "https://acme.com/prom/push"}, + Backend: "prometheus", + Description: &aDescription, + Tags: map[string]string{"cloud": "aws", "region": "us-east-1"}, + id: "1122", + token: "valid-token", + }, + wantErr: func(t assert.TestingT, err error, i ...interface{}) bool { + return err != nil + }, + }, + { + name: "full update yaml", + fields: fields{ + Name: "new-name", + Backend: "prometheus", + Format: "yaml", + ConfigData: "remote_host: https://acme.com/prom/push\nusername: wile.e.coyote\npassword: \"@DesertL00kingForMeal\"", + Description: &aDescription, + Tags: map[string]string{"cloud": "aws", "region": "us-east-1"}, + id: "1122", + token: "valid-token", + }, + wantErr: func(t assert.TestingT, err error, i ...interface{}) bool { + return err != nil + }, + }, + { + name: "only name update", + fields: fields{ + Name: "new-name", + id: "1122", + token: "valid-token", + }, + wantErr: func(t assert.TestingT, err error, i ...interface{}) bool { + return err != nil + }, + }, + { + name: "description update", + fields: fields{ + Description: &aDescription, + id: "1122", + token: "valid-token", + }, + wantErr: func(t assert.TestingT, err error, i ...interface{}) bool { + return err != nil + }, + }, + { + name: "json config update", + fields: fields{ + Config: map[string]interface{}{"username": "wile.e.coyote", "password": "C@rnivurousVulgar1s", "remote_host": "https://acme.com/prom/push"}, + id: "1122", + token: "valid-token", + }, + wantErr: func(t assert.TestingT, err error, i ...interface{}) bool { + return err != nil + }, + }, + { + name: "yaml config update", + fields: fields{ + Format: "yaml", + ConfigData: "remote_host: https://acme.com/prom/push\nusername: wile.e.coyote\npassword: \"@DesertL00kingForMeal\"", + id: "1122", + token: "valid-token", + }, + wantErr: func(t assert.TestingT, err error, i ...interface{}) bool { + return err != nil + }, + }, + { + name: "tags update", + fields: fields{ + Tags: map[string]string{"cloud": "aws", "region": "us-east-1"}, + id: "1122", + token: "valid-token", + }, + wantErr: func(t assert.TestingT, err error, i ...interface{}) bool { + return err != nil + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := updateSinkReq{ + Name: tt.fields.Name, + Config: tt.fields.Config, + Backend: tt.fields.Backend, + Format: tt.fields.Format, + ConfigData: tt.fields.ConfigData, + Description: tt.fields.Description, + Tags: tt.fields.Tags, + id: tt.fields.id, + token: tt.fields.token, + } + tt.wantErr(t, req.validate(&promBe), fmt.Sprintf("validate(%v)", promBe)) + }) + } +} diff --git a/sinks/api/http/responses.go b/sinks/api/http/responses.go index bb6361ae5..627f020ab 100644 --- a/sinks/api/http/responses.go +++ b/sinks/api/http/responses.go @@ -19,6 +19,8 @@ type sinkRes struct { Error string `json:"error,omitempty"` Backend string `json:"backend,omitempty"` Config types.Metadata `json:"config,omitempty"` + Format string `json:"format,omitempty"` + ConfigData string `json:"config_data,omitempty"` TsCreated time.Time `json:"ts_created,omitempty"` created bool } diff --git a/sinks/api/http/transport.go b/sinks/api/http/transport.go index e13e79279..3a364926c 100644 --- a/sinks/api/http/transport.go +++ b/sinks/api/http/transport.go @@ -40,7 +40,6 @@ func MakeHandler(tracer opentracing.Tracer, svcName string, svc sinks.SinkServic kithttp.ServerErrorEncoder(encodeError), } r := bone.New() - r.Post("/sinks", kithttp.NewServer( kitot.TraceServer(tracer, "create_sink")(addEndpoint(svc)), decodeAddRequest, diff --git a/sinks/backend/backend.go b/sinks/backend/backend.go index 9a9ba03e3..0683fe8f1 100644 --- a/sinks/backend/backend.go +++ b/sinks/backend/backend.go @@ -4,9 +4,14 @@ package backend +import "github.com/orb-community/orb/pkg/types" + type Backend interface { Metadata() interface{} CreateFeatureConfig() []ConfigFeature + ValidateConfiguration(config types.Metadata) error + ParseConfig(format string, config string) (types.Metadata, error) + ConfigToFormat(format string, metadata types.Metadata) (string, error) } const ConfigFeatureTypePassword = "password" diff --git a/sinks/backend/prometheus/configuration.go b/sinks/backend/prometheus/configuration.go new file mode 100644 index 000000000..96c58832b --- /dev/null +++ b/sinks/backend/prometheus/configuration.go @@ -0,0 +1,108 @@ +package prometheus + +import ( + "github.com/orb-community/orb/pkg/errors" + "github.com/orb-community/orb/pkg/types" + "github.com/orb-community/orb/sinks/backend" + "golang.org/x/exp/maps" + "gopkg.in/yaml.v3" + "net/url" +) + +func (p *Backend) ConfigToFormat(format string, metadata types.Metadata) (string, error) { + if format == "yaml" { + username := metadata[UsernameConfigFeature].(*string) + password := metadata[PasswordConfigFeature].(string) + parseUtil := configParseUtility{ + RemoteHost: metadata[RemoteHostURLConfigFeature].(string), + Username: username, + Password: &password, + } + config, err := yaml.Marshal(parseUtil) + if err != nil { + return "", err + } + return string(config), nil + } else { + return "", errors.New("unsupported format") + } +} + +func (p *Backend) ParseConfig(format string, config string) (configReturn types.Metadata, err error) { + if format == "yaml" { + configAsByte := []byte(config) + // Parse the YAML data into a Config struct + var configUtil configParseUtility + err = yaml.Unmarshal(configAsByte, &configUtil) + if err != nil { + return nil, errors.Wrap(errors.New("failed to parse config YAML"), err) + } + configReturn = make(types.Metadata) + // Check for Token Auth + configReturn[RemoteHostURLConfigFeature] = configUtil.RemoteHost + configReturn[UsernameConfigFeature] = configUtil.Username + configReturn[PasswordConfigFeature] = configUtil.Password + return + } else { + return nil, errors.New("unsupported format") + } +} + +func (p *Backend) ValidateConfiguration(config types.Metadata) error { + authType := BasicAuth + for _, key := range maps.Keys(config) { + if key == ApiTokenConfigFeature { + authType = TokenAuth + break + } + } + switch authType { + case BasicAuth: + _, userOk := config[UsernameConfigFeature] + _, passwordOk := config[PasswordConfigFeature] + if !userOk || !passwordOk { + return errors.New("basic authentication, must provide username and password fields") + } + case TokenAuth: + return errors.New("not implemented yet") + } + remoteUrl, remoteHostOk := config[RemoteHostURLConfigFeature] + if !remoteHostOk { + return errors.New("must send valid URL for Remote Write") + } + // Validate remote_host + _, err := url.ParseRequestURI(remoteUrl.(string)) + if err != nil { + return errors.New("must send valid URL for Remote Write") + } + return nil +} + +func (p *Backend) CreateFeatureConfig() []backend.ConfigFeature { + var configs []backend.ConfigFeature + + remoteHost := backend.ConfigFeature{ + Type: backend.ConfigFeatureTypeText, + Input: "text", + Title: "Remote Write URL", + Name: RemoteHostURLConfigFeature, + Required: true, + } + + userName := backend.ConfigFeature{ + Type: backend.ConfigFeatureTypeText, + Input: "text", + Title: "Username", + Name: UsernameConfigFeature, + Required: true, + } + password := backend.ConfigFeature{ + Type: backend.ConfigFeatureTypePassword, + Input: "text", + Title: "Password", + Name: PasswordConfigFeature, + Required: true, + } + configs = append(configs, remoteHost, userName, password) + return configs +} diff --git a/sinks/backend/prometheus/configuration_test.go b/sinks/backend/prometheus/configuration_test.go new file mode 100644 index 000000000..562c8eae9 --- /dev/null +++ b/sinks/backend/prometheus/configuration_test.go @@ -0,0 +1,150 @@ +package prometheus + +import ( + "github.com/orb-community/orb/pkg/types" + "reflect" + "testing" +) + +var ( + validConfiguration = map[string]interface{}{RemoteHostURLConfigFeature: "https://acme.com/prom/push", UsernameConfigFeature: "wile.e.coyote", PasswordConfigFeature: "@secr3t-passw0rd"} + validYaml = "remote_host: https://acme.com/prom/push\nusername: wile.e.coyote\npassword: \"@secr3t-passw0rd\"" +) + +func TestBackend_ValidateConfiguration(t *testing.T) { + type args struct { + config types.Metadata + } + + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "valid configuration", + args: args{ + config: validConfiguration, + }, + wantErr: false, + }, + { + name: "invalid host configuration", + args: args{ + config: map[string]interface{}{RemoteHostURLConfigFeature: "acme.com/prom/push", UsernameConfigFeature: "wile.e.coyote", PasswordConfigFeature: "@secr3t-passw0rd"}, + }, + wantErr: true, + }, + { + name: "missing host configuration", + args: args{ + config: map[string]interface{}{UsernameConfigFeature: "wile.e.coyote", PasswordConfigFeature: "@secr3t-passw0rd"}, + }, + wantErr: true, + }, + { + name: "missing username configuration", + args: args{ + config: map[string]interface{}{RemoteHostURLConfigFeature: "acme.com/prom/push", PasswordConfigFeature: "@secr3t-passw0rd"}, + }, + wantErr: true, + }, + { + name: "missing password configuration", + args: args{ + config: map[string]interface{}{RemoteHostURLConfigFeature: "acme.com/prom/push", UsernameConfigFeature: "wile.e.coyote"}, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &Backend{} + if err := p.ValidateConfiguration(tt.args.config); (err != nil) != tt.wantErr { + t.Errorf("ValidateConfiguration() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestBackend_ParseConfig(t *testing.T) { + type args struct { + format string + config string + } + pass := "@secr3t-passw0rd" + user := "wile.e.coyote" + tests := []struct { + name string + args args + wantConfigReturn types.Metadata + wantErr bool + }{ + { + name: "valid parse", + args: args{ + format: "yaml", + config: validYaml, + }, + wantConfigReturn: map[string]interface{}{RemoteHostURLConfigFeature: "https://acme.com/prom/push", UsernameConfigFeature: &user, PasswordConfigFeature: &pass}, + wantErr: false, + }, + { + name: "invalid parse", + args: args{ + format: "yaml", + config: "remote_host: https://acme.com/prom/push\nusername: wile.e.coyote\npassword \"@secr3t-passw0rd\"", + }, + wantConfigReturn: map[string]interface{}{RemoteHostURLConfigFeature: "https://acme.com/prom/push", UsernameConfigFeature: &user, PasswordConfigFeature: &pass}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &Backend{} + gotConfigReturn, err := p.ParseConfig(tt.args.format, tt.args.config) + if (err != nil) != tt.wantErr { + t.Errorf("ParseConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && !reflect.DeepEqual(gotConfigReturn, tt.wantConfigReturn) { + t.Errorf("ParseConfig() gotConfigReturn = %v, want %v", gotConfigReturn, tt.wantConfigReturn) + } + }) + } +} + +func TestBackend_CreateFeatureConfig(t *testing.T) { + tests := []struct { + name string + }{ + {name: "valid"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &Backend{} + got := p.CreateFeatureConfig() + usernameOk := false + passwordOk := false + remoteHostOk := false + for _, feature := range got { + if feature.Name == UsernameConfigFeature { + usernameOk = true + continue + } + if feature.Name == PasswordConfigFeature { + passwordOk = true + continue + } + if feature.Name == RemoteHostURLConfigFeature { + remoteHostOk = true + } + } + if usernameOk && passwordOk && remoteHostOk { + return + } else { + t.Fail() + } + }) + } +} diff --git a/sinks/backend/prometheus/prometheus.go b/sinks/backend/prometheus/prometheus.go deleted file mode 100644 index 8c1ea7239..000000000 --- a/sinks/backend/prometheus/prometheus.go +++ /dev/null @@ -1,71 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ - -package prometheus - -import ( - "github.com/orb-community/orb/sinks/backend" - "io" -) - -var _ backend.Backend = (*prometheusBackend)(nil) - -type prometheusBackend struct { - apiHost string - apiPort uint64 - apiUser string - apiPassword string -} - -type SinkFeature struct { - Backend string `json:"backend"` - Description string `json:"description"` - Config []backend.ConfigFeature `json:"config"` -} - -func (p *prometheusBackend) Metadata() interface{} { - return SinkFeature{ - Backend: "prometheus", - Description: "Prometheus time series database sink", - Config: p.CreateFeatureConfig(), - } -} - -func (p *prometheusBackend) request(url string, payload interface{}, method string, body io.Reader, contentType string) error { - return nil -} - -func Register() bool { - backend.Register("prometheus", &prometheusBackend{}) - - return true -} - -func (p *prometheusBackend) CreateFeatureConfig() []backend.ConfigFeature { - var configs []backend.ConfigFeature - - remoteHost := backend.ConfigFeature{ - Type: backend.ConfigFeatureTypeText, - Input: "text", - Title: "Remote Write URL", - Name: "remote_host", - Required: true, - } - userName := backend.ConfigFeature{ - Type: backend.ConfigFeatureTypeText, - Input: "text", - Title: "Username", - Name: "username", - Required: true, - } - password := backend.ConfigFeature{ - Type: backend.ConfigFeatureTypePassword, - Input: "text", - Title: "Password", - Name: "password", - Required: true, - } - configs = append(configs, remoteHost, userName, password) - return configs -} diff --git a/sinks/backend/prometheus/type.go b/sinks/backend/prometheus/type.go new file mode 100644 index 000000000..96f837e2e --- /dev/null +++ b/sinks/backend/prometheus/type.go @@ -0,0 +1,60 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +package prometheus + +import ( + "github.com/orb-community/orb/sinks/backend" +) + +var _ backend.Backend = (*Backend)(nil) + +const ( + RemoteHostURLConfigFeature = "remote_host" + UsernameConfigFeature = "username" + PasswordConfigFeature = "password" + ApiTokenConfigFeature = "api_token" +) + +//type PrometheusConfigMetadata = types.Metadata + +type AuthType int + +const ( + BasicAuth AuthType = iota + TokenAuth +) + +type Backend struct { + apiHost string + apiPort uint64 + apiUser string + apiPassword string +} + +type configParseUtility struct { + RemoteHost string `yaml:"remote_host"` + Username *string `yaml:"username,omitempty"` + Password *string `yaml:"password,omitempty"` + APIToken *string `yaml:"api_token,omitempty"` +} + +type SinkFeature struct { + Backend string `json:"backend"` + Description string `json:"description"` + Config []backend.ConfigFeature `json:"config"` +} + +func (p *Backend) Metadata() interface{} { + return SinkFeature{ + Backend: "prometheus", + Description: "Prometheus time series database sink", + Config: p.CreateFeatureConfig(), + } +} + +func Register() bool { + backend.Register("prometheus", &Backend{}) + return true +} diff --git a/sinks/mocks/sinks.go b/sinks/mocks/sinks.go index 571c30e08..0f195ef65 100644 --- a/sinks/mocks/sinks.go +++ b/sinks/mocks/sinks.go @@ -21,9 +21,10 @@ var _ sinks.SinkRepository = (*sinkRepositoryMock)(nil) // Mock Repository type sinkRepositoryMock struct { - mu sync.Mutex - counter uint64 - sinksMock map[string]sinks.Sink + mu sync.Mutex + counter uint64 + sinksMock map[string]sinks.Sink + passwordService sinks.PasswordService } func (s *sinkRepositoryMock) SearchAllSinks(ctx context.Context, filter sinks.Filter) ([]sinks.Sink, error) { @@ -49,9 +50,10 @@ func (s *sinkRepositoryMock) RetrieveByOwnerAndId(ctx context.Context, ownerID s return sinks.Sink{}, sinks.ErrNotFound } -func NewSinkRepository() sinks.SinkRepository { +func NewSinkRepository(service sinks.PasswordService) sinks.SinkRepository { return &sinkRepositoryMock{ - sinksMock: make(map[string]sinks.Sink), + sinksMock: make(map[string]sinks.Sink), + passwordService: service, } } @@ -69,7 +71,6 @@ func (s *sinkRepositoryMock) Save(ctx context.Context, sink sinks.Sink) (string, ID, _ := uuid.NewV4() sink.ID = ID.String() s.sinksMock[sink.ID] = sink - return sink.ID, nil } diff --git a/sinks/postgres/database.go b/sinks/postgres/database.go index 31db91055..86b793a10 100644 --- a/sinks/postgres/database.go +++ b/sinks/postgres/database.go @@ -21,7 +21,7 @@ type database struct { db *sqlx.DB } -// Provides a database interface +// Database Provides a database interface type Database interface { NamedExecContext(context.Context, string, interface{}) (sql.Result, error) QueryRowxContext(context.Context, string, ...interface{}) *sqlx.Row diff --git a/sinks/postgres/init.go b/sinks/postgres/init.go index 86e41cd93..1b987b21d 100644 --- a/sinks/postgres/init.go +++ b/sinks/postgres/init.go @@ -64,6 +64,17 @@ func migrateDB(db *sqlx.DB) error { "DROP TABLE sinks", }, }, + { + Id: "sinks_2", + Up: []string{ + `ALTER TABLE sinks ADD COLUMN format TEXT ;`, + `ALTER TABLE sinks ADD COLUMN config_data TEXT ;`, + }, + Down: []string{ + `ALTER TABLE sinks DROP COLUMN format;`, + `ALTER TABLE sinks DROP COLUMN config_data;`, + }, + }, }, } diff --git a/sinks/postgres/sinks.go b/sinks/postgres/sinks.go index e25dfeb88..67ed295b9 100644 --- a/sinks/postgres/sinks.go +++ b/sinks/postgres/sinks.go @@ -80,8 +80,8 @@ func (s sinksRepository) SearchAllSinks(ctx context.Context, filter sinks.Filter } func (s sinksRepository) Save(ctx context.Context, sink sinks.Sink) (string, error) { - q := `INSERT INTO sinks (name, mf_owner_id, metadata, description, backend, tags, state, error) - VALUES (:name, :mf_owner_id, :metadata, :description, :backend, :tags, :state, :error) RETURNING id` + q := `INSERT INTO sinks (name, mf_owner_id, metadata, config_data, format, description, backend, tags, state, error) + VALUES (:name, :mf_owner_id, :metadata, :config_data, :format, :description, :backend, :tags, :state, :error) RETURNING id` if !sink.Name.IsValid() || sink.MFOwnerID == "" { return "", errors.ErrMalformedEntity @@ -117,7 +117,15 @@ func (s sinksRepository) Save(ctx context.Context, sink sinks.Sink) (string, err } func (s sinksRepository) Update(ctx context.Context, sink sinks.Sink) error { - q := `UPDATE sinks SET description = :description, tags = :tags, metadata = :metadata, name = :name WHERE mf_owner_id = :mf_owner_id AND id = :id;` + q := `UPDATE sinks + SET description = :description, + tags = :tags, + metadata = :metadata, + config_data = :config_data, + format = :format, + name = :name + WHERE mf_owner_id = :mf_owner_id + AND id = :id;` sinkDB, err := toDBSink(sink) if err != nil { @@ -163,7 +171,10 @@ func (s sinksRepository) RetrieveAllByOwnerID(ctx context.Context, owner string, } q := fmt.Sprintf(`SELECT id, name, mf_owner_id, description, tags, state, coalesce(error, '') as error, backend, metadata, ts_created - FROM sinks WHERE mf_owner_id = :mf_owner_id %s%s%s ORDER BY %s %s LIMIT :limit OFFSET :offset;`, tagsQuery, metadataQuery, nameQuery, orderQuery, dirQuery) + FROM sinks + WHERE mf_owner_id = :mf_owner_id %s%s%s + ORDER BY %s %s LIMIT :limit OFFSET :offset;`, + tagsQuery, metadataQuery, nameQuery, orderQuery, dirQuery) params := map[string]interface{}{ "mf_owner_id": owner, "limit": pm.Limit, @@ -180,12 +191,12 @@ func (s sinksRepository) RetrieveAllByOwnerID(ctx context.Context, owner string, var items []sinks.Sink for rows.Next() { - dbSink := dbSink{MFOwnerID: owner} - if err := rows.StructScan(&dbSink); err != nil { + d := dbSink{MFOwnerID: owner} + if err := rows.StructScan(&d); err != nil { return sinks.Page{}, errors.Wrap(errors.ErrSelectEntity, err) } - sink, err := toSink(dbSink) + sink, err := toSink(d) if err != nil { return sinks.Page{}, errors.Wrap(errors.ErrSelectEntity, err) } @@ -216,7 +227,7 @@ func (s sinksRepository) RetrieveAllByOwnerID(ctx context.Context, owner string, func (s sinksRepository) RetrieveById(ctx context.Context, id string) (sinks.Sink, error) { - q := `SELECT id, name, mf_owner_id, description, tags, backend, metadata, ts_created, state, coalesce(error, '') as error + q := `SELECT id, name, mf_owner_id, description, tags, backend, metadata, format, config_data, ts_created, state, coalesce(error, '') as error FROM sinks where id = $1` dba := dbSink{} @@ -234,7 +245,7 @@ func (s sinksRepository) RetrieveById(ctx context.Context, id string) (sinks.Sin func (s sinksRepository) RetrieveByOwnerAndId(ctx context.Context, ownerID string, id string) (sinks.Sink, error) { - q := `SELECT id, name, mf_owner_id, description, tags, backend, metadata, ts_created, state, coalesce(error, '') as error + q := `SELECT id, name, mf_owner_id, description, tags, backend, metadata, format, config_data, ts_created, state, coalesce(error, '') as error FROM sinks where id = $1 and mf_owner_id = $2` if ownerID == "" || id == "" { @@ -300,6 +311,8 @@ type dbSink struct { Name types.Identifier `db:"name"` MFOwnerID string `db:"mf_owner_id"` Metadata db.Metadata `db:"metadata"` + ConfigData *string `db:"config_data"` + Format *string `db:"format"` Backend string `db:"backend"` Description string `db:"description"` Created time.Time `db:"ts_created"` @@ -328,16 +341,27 @@ func toDBSink(sink sinks.Sink) (dbSink, error) { Name: sink.Name, MFOwnerID: uID.String(), Metadata: db.Metadata(sink.Config), + ConfigData: &sink.ConfigData, + Format: &sink.Format, Backend: sink.Backend, Description: description, + Created: sink.Created, + Tags: db.Tags(sink.Tags), State: sink.State, Error: sink.Error, - Tags: db.Tags(sink.Tags), }, nil } func toSink(dba dbSink) (sinks.Sink, error) { + configData := "" + format := "" + if dba.ConfigData != nil { + configData = *dba.ConfigData + } + if dba.Format != nil { + format = *dba.Format + } sink := sinks.Sink{ ID: dba.ID, Name: dba.Name, @@ -347,6 +371,8 @@ func toSink(dba dbSink) (sinks.Sink, error) { State: dba.State, Error: dba.Error, Config: types.Metadata(dba.Metadata), + ConfigData: configData, + Format: format, Created: dba.Created, Tags: types.Tags(dba.Tags), } diff --git a/sinks/redis/producer/streams.go b/sinks/redis/producer/streams.go index e24317efc..49c7f2aa2 100644 --- a/sinks/redis/producer/streams.go +++ b/sinks/redis/producer/streams.go @@ -59,6 +59,7 @@ func (es eventStore) CreateSink(ctx context.Context, token string, s sinks.Sink) record := &redis.XAddArgs{ Stream: streamID, MaxLen: streamLen, + Approx: true, Values: encode, } @@ -88,6 +89,7 @@ func (es eventStore) UpdateSink(ctx context.Context, token string, s sinks.Sink) record := &redis.XAddArgs{ Stream: streamID, MaxLen: streamLen, + Approx: true, Values: encode, } @@ -115,6 +117,10 @@ func (es eventStore) ViewSink(ctx context.Context, token string, key string) (_ return es.svc.ViewSink(ctx, token, key) } +func (es eventStore) GetLogger() *zap.Logger { + return es.logger +} + func (es eventStore) DeleteSink(ctx context.Context, token, id string) (err error) { sink, err := es.svc.ViewSink(ctx, token, id) if err != nil { @@ -138,6 +144,7 @@ func (es eventStore) DeleteSink(ctx context.Context, token, id string) (err erro record := &redis.XAddArgs{ Stream: streamID, MaxLen: streamLen, + Approx: true, Values: encode, } diff --git a/sinks/service.go b/sinks/service.go index 6c0c0b9c8..4ceef585d 100644 --- a/sinks/service.go +++ b/sinks/service.go @@ -56,6 +56,10 @@ func (svc sinkService) identify(token string) (string, error) { return res.GetId(), nil } +func (svc sinkService) GetLogger() *zap.Logger { + return svc.logger +} + func NewSinkService(logger *zap.Logger, auth mainflux.AuthServiceClient, sinkRepo SinkRepository, mfsdk mfsdk.SDK, services PasswordService) SinkService { prometheus.Register() diff --git a/sinks/sinks.go b/sinks/sinks.go index 196877a16..ca41ec72e 100644 --- a/sinks/sinks.go +++ b/sinks/sinks.go @@ -10,6 +10,7 @@ import ( "github.com/orb-community/orb/pkg/errors" "github.com/orb-community/orb/pkg/types" "github.com/orb-community/orb/sinks/backend" + "go.uber.org/zap" "time" ) @@ -86,7 +87,7 @@ func (s *State) Scan(value interface{}) error { } asString = string(asBytes) } - *s = stateRevMap[string(asString)] + *s = stateRevMap[asString] return nil } func (s State) Value() (driver.Value, error) { return s.String(), nil } @@ -98,6 +99,8 @@ type Sink struct { Description *string Backend string Config types.Metadata + Format string + ConfigData string Tags types.Tags State State Error string @@ -135,6 +138,8 @@ type SinkService interface { ValidateSink(ctx context.Context, token string, sink Sink) (Sink, error) // ChangeSinkStateInternal change the sink internal state from new/idle/active ChangeSinkStateInternal(ctx context.Context, sinkID string, msg string, ownerID string, state State) error + // GetLogger gets service logger to log within gokit's packages + GetLogger() *zap.Logger } type SinkRepository interface { diff --git a/sinks/sinks_service.go b/sinks/sinks_service.go index cb6c6b638..3ec0072fe 100644 --- a/sinks/sinks_service.go +++ b/sinks/sinks_service.go @@ -13,6 +13,7 @@ import ( "github.com/orb-community/orb/pkg/errors" "github.com/orb-community/orb/pkg/types" "github.com/orb-community/orb/sinks/backend" + "go.uber.org/zap" "net/url" ) @@ -71,13 +72,32 @@ func (svc sinkService) encryptMetadata(sink Sink) (Sink, error) { sink.Config.FilterMap(func(key string) bool { return key == backend.ConfigFeatureTypePassword }, func(key string, value interface{}) (string, interface{}) { - newValue, err2 := svc.passwordService.EncodePassword(value.(string)) + var stringVal string + switch v := value.(type) { + case *string: + stringVal = *v + case string: + stringVal = v + } + newValue, err2 := svc.passwordService.EncodePassword(stringVal) if err2 != nil { err = err2 return key, value } return key, newValue }) + if sink.ConfigData != "" { + sinkBE := backend.GetBackend(sink.Backend) + if sinkBE == nil { + return sink, errors.New("backend cannot be nil") + } + sink.ConfigData, err = sinkBE.ConfigToFormat(sink.Format, sink.Config) + if err != nil { + svc.logger.Error("error on parsing encrypted config in data") + return sink, err + } + } + return sink, err } @@ -93,6 +113,17 @@ func (svc sinkService) decryptMetadata(sink Sink) (Sink, error) { } return key, newValue }) + if sink.ConfigData != "" { + sinkBE := backend.GetBackend(sink.Backend) + if sinkBE == nil { + return sink, errors.New("backend cannot be nil") + } + sink.ConfigData, err = sinkBE.ConfigToFormat(sink.Format, sink.Config) + if err != nil { + svc.logger.Error("error on parsing encrypted config in data") + return sink, err + } + } return sink, err } @@ -108,17 +139,33 @@ func (svc sinkService) UpdateSink(ctx context.Context, token string, sink Sink) return Sink{}, err } - if sink.Config == nil { + if sink.Config == nil && sink.ConfigData == "" { + // No config sent sink.Config = currentSink.Config - } else { - // Validate remote_host - _, err := url.ParseRequestURI(sink.Config["remote_host"].(string)) + // get the decrypted config, otherwise the password would be encrypted again + sink, err = svc.decryptMetadata(sink) if err != nil { - return Sink{}, errors.Wrap(ErrUpdateEntity, err) + return Sink{}, err } - // This will keep the previous tags - currentSink.Config.Merge(sink.Config) - sink.Config = currentSink.Config + } else { + if sink.ConfigData != "" { + sinkBE := backend.GetBackend(currentSink.Backend) + if sinkBE == nil { + return sink, errors.New("backend cannot be nil") + } + sink.Config, err = sinkBE.ParseConfig(sink.Format, sink.ConfigData) + if err != nil { + return Sink{}, err + } + if err := sinkBE.ValidateConfiguration(sink.Config); err != nil { + return Sink{}, err + } + } + //// add default values + defaultMetadata := make(types.Metadata, 1) + defaultMetadata["opentelemetry"] = "enabled" + sink.Config.Merge(defaultMetadata) + currentSink.Error = "" } if sink.Tags == nil { @@ -133,10 +180,10 @@ func (svc sinkService) UpdateSink(ctx context.Context, token string, sink Sink) sink.Name = currentSink.Name } - if sink.Backend != "" || sink.Error != "" { - return Sink{}, errors.ErrUpdateEntity - } sink.MFOwnerID = skOwnerID + if sink.Backend == "" && currentSink.Backend != "" { + sink.Backend = currentSink.Backend + } sink, err = svc.encryptMetadata(sink) if err != nil { return Sink{}, errors.Wrap(ErrUpdateEntity, err) @@ -219,6 +266,7 @@ func (svc sinkService) ListSinksInternal(ctx context.Context, filter Filter) (si func (svc sinkService) ListSinks(ctx context.Context, token string, pm PageMetadata) (Page, error) { res, err := svc.identify(token) if err != nil { + svc.GetLogger().Error("got error on identifying token", zap.Error(err)) return Page{}, err } diff --git a/sinks/sinks_service_test.go b/sinks/sinks_service_test.go index 7b6de807d..0654de8b6 100644 --- a/sinks/sinks_service_test.go +++ b/sinks/sinks_service_test.go @@ -48,17 +48,17 @@ var ( ) func newService(tokens map[string]string) sinks.SinkService { + logger := zap.NewNop() auth := thmocks.NewAuthService(tokens, make(map[string][]thmocks.MockSubjectSet)) - sinkRepo := skmocks.NewSinkRepository() - var logger *zap.Logger + pwdSvc := sinks.NewPasswordService(logger, "_testing_string_") + sinkRepo := skmocks.NewSinkRepository(pwdSvc) config := mfsdk.Config{ ThingsURL: "localhost", } - mfsdk := mfsdk.NewSDK(config) - pwdSvc := sinks.NewPasswordService(logger, "_testing_string_") - return sinks.NewSinkService(logger, auth, sinkRepo, mfsdk, pwdSvc) + newSDK := mfsdk.NewSDK(config) + return sinks.NewSinkService(logger, auth, sinkRepo, newSDK, pwdSvc) } func TestCreateSink(t *testing.T) { @@ -107,6 +107,216 @@ func TestCreateSink(t *testing.T) { } +func TestIdempotencyUpdateSink(t *testing.T) { + ctx := context.Background() + service := newService(map[string]string{token: email}) + jsonSinkName, err := types.NewIdentifier("initial-json-Sink") + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + yamlSinkName, err := types.NewIdentifier("initial-yaml-Sink") + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + aInitialDescription := "A initial description worthy reading" + initialJsonSink := sinks.Sink{ + Name: jsonSinkName, + Description: &aInitialDescription, + Backend: "prometheus", + State: sinks.Unknown, + Error: "", + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": "netops", "password": "w0w-orb-Rocks!"}, + Tags: map[string]string{"cloud": "aws"}, + } + initialUsername := "netops" + initialPassword := "w0w-orb-Rocks!" + initialYamlSink := sinks.Sink{ + Name: yamlSinkName, + Description: &aInitialDescription, + Backend: "prometheus", + State: sinks.Unknown, + Error: "", + ConfigData: "remote_host: https://orb.community/\nusername: netops\npassword: w0w-orb-Rocks!", + Format: "yaml", + MFOwnerID: "OrbCommunity", + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": &initialUsername, "password": &initialPassword}, + Tags: map[string]string{"cloud": "aws"}, + } + jsonCreatedSink, err := service.CreateSink(ctx, token, initialJsonSink) + require.NoError(t, err, "failed to create entity") + require.NotEmptyf(t, jsonCreatedSink.ID, "id must not be empty") + yamlCreatedSink, err := service.CreateSink(ctx, token, initialYamlSink) + require.NoError(t, err, "failed to create entity") + initialJsonSink.ID = jsonCreatedSink.ID + initialYamlSink.ID = yamlCreatedSink.ID + var cases = map[string]struct { + name string + requestSink sinks.Sink + expected func(t *testing.T, value sinks.Sink, err error) + token string + }{ + "idempotency json update": { + requestSink: initialJsonSink, + expected: func(t *testing.T, value sinks.Sink, err error) { + require.NoError(t, err, "no error expected") + require.NotNilf(t, value.Description, "description is nil") + desc := *value.Description + require.Equal(t, desc, aInitialDescription, "description is not equal") + require.Equal(t, value.Name, jsonSinkName, "sink name is not equal") + tagVal, tagOk := value.Tags["cloud"] + require.True(t, tagOk) + require.Equal(t, "aws", tagVal) + require.Equalf(t, "https://orb.community/", value.Config["remote_host"], "remote host is not equal") + require.Equalf(t, "netops", value.Config["username"], "username is not equal") + }, + token: token, + }, + "idempotency yaml update": { + requestSink: initialYamlSink, + expected: func(t *testing.T, value sinks.Sink, err error) { + require.NoError(t, err, "no error expected") + require.NotNilf(t, value.Description, "description is nil") + desc := *value.Description + require.Equal(t, desc, aInitialDescription, "description is not equal") + require.Equal(t, value.Name, yamlSinkName, "sink name is not equal") + tagVal, tagOk := value.Tags["cloud"] + require.True(t, tagOk) + require.Equal(t, "aws", tagVal) + require.Equalf(t, "https://orb.community/", value.Config["remote_host"], "remote host is not equal") + actual := value.Config["username"].(*string) + require.Equalf(t, "netops", *actual, "username is not equal") + }, + token: token, + }, + } + for desc, tc := range cases { + t.Run(desc, func(t *testing.T) { + res, err := service.UpdateSink(ctx, tc.token, tc.requestSink) + tc.expected(t, res, err) + }) + } +} + +func TestPartialUpdateSink(t *testing.T) { + ctx := context.Background() + service := newService(map[string]string{token: email}) + jsonSinkName, err := types.NewIdentifier("initial-json-Sink") + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + yamlSinkName, err := types.NewIdentifier("initial-yaml-Sink") + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + //newSinkName, err := types.NewIdentifier("updated-Sink") + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + //aNewDescription := "A new description worthy reading" + aInitialDescription := "A initial description worthy reading" + initialJsonSink := sinks.Sink{ + Name: jsonSinkName, + Description: &aInitialDescription, + Backend: "prometheus", + State: sinks.Unknown, + Error: "", + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": "netops", "password": "w0w-orb-Rocks!"}, + Tags: map[string]string{"cloud": "aws"}, + } + initialUsername := "netops" + initialPassword := "w0w-orb-Rocks!" + initialYamlSink := sinks.Sink{ + Name: yamlSinkName, + Description: &aInitialDescription, + Backend: "prometheus", + State: sinks.Unknown, + Error: "", + ConfigData: "remote_host:https://orb.community/\nusername: netops\npassword: w0w-orb-Rocks!", + Format: "yaml", + MFOwnerID: "OrbCommunity", + Config: map[string]interface{}{"remote_host": "https://orb.community/", "username": &initialUsername, "password": &initialPassword}, + Tags: map[string]string{"cloud": "aws"}, + } + jsonCreatedSink, err := service.CreateSink(ctx, token, initialJsonSink) + require.NoError(t, err, "failed to create entity") + require.NotEmptyf(t, jsonCreatedSink.ID, "id must not be empty") + yamlCreatedSink, err := service.CreateSink(ctx, token, initialYamlSink) + require.NoError(t, err, "failed to create entity") + userHelper := "netops_admin" + initialJsonSink.ID = jsonCreatedSink.ID + initialYamlSink.ID = yamlCreatedSink.ID + var cases = map[string]struct { + name string + requestSink sinks.Sink + expected func(t *testing.T, value sinks.Sink, err error) + token string + }{ + // TODO this will fail locally because of password encryption, + // TODO we will revisit this whenever there is a update on password encryption + //"update only name": { + // requestSink: sinks.Sink{ + // ID: jsonCreatedSink.ID, + // Name: newSinkName, + // }, + // expected: func(t *testing.T, value sinks.Sink, err error) { + // require.NoError(t, err, "no error expected") + // require.Equal(t, value.Name, newSinkName, "sink name is not equal") + // }, + // token: token, + //}, + //"update only description": { + // requestSink: sinks.Sink{ + // ID: jsonCreatedSink.ID, + // Description: &aNewDescription, + // }, + // expected: func(t *testing.T, value sinks.Sink, err error) { + // require.NoError(t, err, "no error expected") + // require.NotNilf(t, value.Description, "description is nil") + // desc := *value.Description + // require.Equal(t, desc, aNewDescription, "description is not equal") + // }, + // token: token, + //}, "update only tags": { + // requestSink: sinks.Sink{ + // ID: jsonCreatedSink.ID, + // Tags: map[string]string{"cloud": "gcp", "from_aws": "true"}, + // }, + // expected: func(t *testing.T, value sinks.Sink, err error) { + // require.NoError(t, err, "no error expected") + // tagVal, tagOk := value.Tags["cloud"] + // tag2Val, tag2Ok := value.Tags["from_aws"] + // require.True(t, tagOk) + // require.Equal(t, "gcp", tagVal) + // require.True(t, tag2Ok) + // require.Equal(t, "true", tag2Val) + // }, + // token: token, + //}, + "update config json": { + requestSink: sinks.Sink{ + ID: jsonCreatedSink.ID, + Config: map[string]interface{}{"remote_host": "https://orb.community/prom/push", "username": "netops_admin", "password": "w0w-orb-Rocks!"}, + }, + expected: func(t *testing.T, value sinks.Sink, err error) { + require.NoError(t, err, "no error expected") + require.Equalf(t, "https://orb.community/prom/push", value.Config["remote_host"], "want %s, got %s", "https://orb.community/prom/push", value.Config["remote_host"]) + require.Equalf(t, "netops_admin", value.Config["username"], "want %s, got %s", "netops_admin", value.Config["username"]) + }, + token: token, + }, "update config yaml": { + requestSink: sinks.Sink{ + ID: yamlCreatedSink.ID, + Format: "yaml", + ConfigData: "remote_host: https://orb.community/prom/push\nusername: netops_admin\npassword: \"w0w-orb-Rocks!\"", + }, + expected: func(t *testing.T, value sinks.Sink, err error) { + require.NoError(t, err, "no error expected") + require.Equalf(t, "https://orb.community/prom/push", value.Config["remote_host"], "want %s, got %s", "https://orb.community/prom/push", value.Config["remote_host"]) + require.NotNilf(t, value.Config["username"], "description is nil") + desc := value.Config["username"] + require.Equal(t, &userHelper, desc, "description is not equal") + }, + token: token, + }, + } + for desc, tc := range cases { + t.Run(desc, func(t *testing.T) { + res, err := service.UpdateSink(ctx, tc.token, tc.requestSink) + tc.expected(t, res, err) + }) + } +} + func TestUpdateSink(t *testing.T) { service := newService(map[string]string{token: email}) sk, err := service.CreateSink(context.Background(), token, sink) @@ -194,11 +404,6 @@ func TestUpdateSink(t *testing.T) { token: token, err: sinks.ErrNotFound, }, - "update sink read only fields": { - incomingSink: sink, - token: token, - err: errors.ErrUpdateEntity, - }, "update existing sink - only updating config": { incomingSink: sinks.Sink{ ID: sinkTestConfigAttribute.ID, @@ -210,7 +415,7 @@ func TestUpdateSink(t *testing.T) { expectedSink: sinks.Sink{ Name: sinkTestConfigAttribute.Name, Config: types.Metadata{ - "opentelemetry": "enabled", "remote_host": "https://orb.community/", "username": "dbuser", + "opentelemetry": "enabled", "remote_host": "https://orb.community/", }, Description: sinkTestConfigAttribute.Description, Tags: sinkTestConfigAttribute.Tags, @@ -307,10 +512,10 @@ func TestUpdateSink(t *testing.T) { t.Run(desc, func(t *testing.T) { res, err := service.UpdateSink(context.Background(), tc.token, tc.incomingSink) if err == nil { - assert.Equal(t, tc.expectedSink.Config, res.Config, fmt.Sprintf("%s: expected %s got %s", desc, tc.expectedSink.Config, res.Config)) - assert.Equal(t, tc.expectedSink.Name.String(), res.Name.String(), fmt.Sprintf("%s: expected name %s got %s", desc, tc.expectedSink.Name.String(), res.Name.String())) - assert.Equal(t, *tc.expectedSink.Description, *res.Description, fmt.Sprintf("%s: expected description %s got %s", desc, *tc.expectedSink.Description, *res.Description)) - assert.Equal(t, tc.expectedSink.Tags, res.Tags, fmt.Sprintf("%s: expected tags %s got %s", desc, tc.expectedSink.Tags, res.Tags)) + assert.Equal(t, tc.expectedSink.Config, res.Config, "config not as expected") + assert.Equal(t, tc.expectedSink.Name.String(), res.Name.String(), "sink name not as expected") + assert.Equal(t, *tc.expectedSink.Description, *res.Description, "sink description not as expected") + assert.Equal(t, tc.expectedSink.Tags, res.Tags, "sink tags not as expected") } assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %d got %d", desc, tc.err, err)) }) diff --git a/ui/docker/Dockerfile b/ui/docker/Dockerfile index 365b4901f..9adc7ca34 100644 --- a/ui/docker/Dockerfile +++ b/ui/docker/Dockerfile @@ -1,5 +1,5 @@ # Stage 0, based on orb-ui-module, to build and compile Angular -FROM ns1labs/orb-ui-modules as built-module +FROM orbcommunity/orb-ui-modules as built-module # ARG variables which direct the UI build # can be overwritten with --build-arg docker flag diff --git a/ui/scripts/setenv.ts b/ui/scripts/setenv.ts index 9386ea312..e0878f0e9 100644 --- a/ui/scripts/setenv.ts +++ b/ui/scripts/setenv.ts @@ -31,10 +31,20 @@ const enableMaintenace = () => { } }; +const enableGTAG = () => { + if (process.env.GTAGID) { + return ` + GTAGID: '${ process.env.GTAGID }', + `; + } else { + return ''; + } +}; + // we have access to our environment variables // in the process.env object thanks to dotenv const environmentFileContent = ` -export const environment = {${enablePS()}${enableMaintenace()}}; +export const environment = {${enablePS()}${enableMaintenace()}${enableGTAG()}}; `; // write the content to the respective file diff --git a/ui/src/app/@theme/components/footer/footer.component.html b/ui/src/app/@theme/components/footer/footer.component.html index 074539969..9d79adbf9 100644 --- a/ui/src/app/@theme/components/footer/footer.component.html +++ b/ui/src/app/@theme/components/footer/footer.component.html @@ -1,6 +1,6 @@

- Made with ♥ by NS1Labs 2021 + Made with ♥ by NetBox Labs 2021

{{disclaimer}} diff --git a/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html b/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html index d510079eb..1b6d3c497 100644 --- a/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html +++ b/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html @@ -34,7 +34,7 @@

{{ isEdit ? 'Edit Agent Policy' : 'Create Agent Policy'}}

+ for="name">Sink Name *
auto \\ -e ORB_OTEL_ENABLE=true \\ -ns1labs/orb-agent`; +orbcommunity/orb-agent`; } toggleIcon (target) { diff --git a/ui/src/app/pages/sinks/add/sink.add.component.html b/ui/src/app/pages/sinks/add/sink.add.component.html index 55c6ed239..d51c8d6b2 100644 --- a/ui/src/app/pages/sinks/add/sink.add.component.html +++ b/ui/src/app/pages/sinks/add/sink.add.component.html @@ -27,7 +27,7 @@

{{strings.sink[isEdit ? 'edit' : 'add']['header']}}

- + *
{{strings.sink[isEdit ? 'edit' : 'add']['header']}}
- +

{{firstFormGroup?.controls.name.value}}

diff --git a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts index dfa9fb41c..bb54f2056 100644 --- a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts +++ b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts @@ -51,7 +51,7 @@ export class AgentProvisioningComponent implements OnInit { -e ORB_CLOUD_MQTT_KEY="AGENT_KEY" \\ -e PKTVISOR_PCAP_IFACE_DEFAULT=auto \\ -e ORB_OTEL_ENABLE=true \\ -ns1labs/orb-agent`; +orbcommunity/orb-agent`; this.command2show = `docker run -d --restart=always --net=host \\ -e ORB_CLOUD_ADDRESS=${ document.location.hostname } \\ @@ -60,7 +60,7 @@ ns1labs/orb-agent`; -e ORB_CLOUD_MQTT_KEY=AGENT_KEY \\ -e PKTVISOR_PCAP_IFACE_DEFAULT=auto \\ -e ORB_OTEL_ENABLE=true \\ -ns1labs/orb-agent`; +orbcommunity/orb-agent`; } } diff --git a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.html b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.html index 858da28ed..e525bfc35 100644 --- a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.html +++ b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.html @@ -40,7 +40,7 @@ diff --git a/ui/src/environments/environment.prod.beta.ts b/ui/src/environments/environment.prod.beta.ts index 36fd7baf1..c42d73f31 100644 --- a/ui/src/environments/environment.prod.beta.ts +++ b/ui/src/environments/environment.prod.beta.ts @@ -5,7 +5,7 @@ const {orbApi: {apiUrl, version, urlKeys, servicesUrls}} = defaultEnvironment; export const environment = { production: true, - GTAGID: 'G-387CGPZQF0', + GTAGID: '', ...defaultEnvironment, ...envVars, // ORB api --prod diff --git a/ui/src/environments/environment.prod.ts b/ui/src/environments/environment.prod.ts index 36fd7baf1..c42d73f31 100644 --- a/ui/src/environments/environment.prod.ts +++ b/ui/src/environments/environment.prod.ts @@ -5,7 +5,7 @@ const {orbApi: {apiUrl, version, urlKeys, servicesUrls}} = defaultEnvironment; export const environment = { production: true, - GTAGID: 'G-387CGPZQF0', + GTAGID: '', ...defaultEnvironment, ...envVars, // ORB api --prod diff --git a/ui/src/environments/environment.ts b/ui/src/environments/environment.ts index 234831ae4..f42983127 100644 --- a/ui/src/environments/environment.ts +++ b/ui/src/environments/environment.ts @@ -4,7 +4,7 @@ import { environment as envVars } from './environment.env'; export const environment = { production: false, appPrefix: '', - GTAGID: 'G-387CGPZQF0', + GTAGID: '', ...defaultEnvironment, ...envVars, };