diff --git a/.circleci/config.yml b/.circleci/config.yml
index 236b17fc58d0..f56dc26e19e6 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -14,41 +14,6 @@ commands:
command: |
echo 'export INSTANCE_ROLE="<< parameters.instance_role >>"' >> $BASH_ENV
echo 'export INSTANCE_WAS_CREATED="<< parameters.instance_created >>"' >> $BASH_ENV
- start-tunnel:
- description: "Opens an ssh tunnel to the demisto servers and wait until the tunnels are established"
- parameters:
- timeout:
- type: integer
- default: 10
- steps:
- - run:
- name: add ssh configurations
- shell: /bin/bash
- command: |
- if [ -z $INSTANCE_WAS_CREATED ];
- then
- echo "Skipping - instance was not created"
- exit 0
- fi
- # Modifying ssh config file
- echo "Host 10.0.*
- StrictHostKeyChecking no
- LogLevel ERROR
- ProxyJump content-build@content-build-lb.demisto.works # disable-secrets-detection
- Host content-build-lb.demisto.works
- Port 43567
- UserKnownHostsFile /dev/null
- StrictHostKeyChecking no
- LogLevel ERROR" >> ~/.ssh/config
- - run:
- name: Open SSH Tunnel
- command: |
- if [ -z $INSTANCE_WAS_CREATED ];
- then
- echo "Skipping - instance was not created"
- exit 0
- fi
- ./Tests/scripts/open_ssh_tunnel.sh
parameters:
artifact_build_num:
@@ -91,6 +56,31 @@ references:
NIGHTLY_PARAMETER: << pipeline.parameters.nightly >>
GCS_MARKET_BUCKET: << pipeline.parameters.gcs_market_bucket >>
+ install_neo4j: &install_neo4j
+ run:
+ name: Install Neo4j
+ command: |
+ # workaround for 402 Payment required issue: https://github.com/actions/runner-images/issues/1983
+ sudo rm -fv /etc/apt/sources.list.d/github_git-lfs.list
+ sudo rm -fv /etc/apt/sources.list.d/github_git-lfs.list.save
+ wget -O - https://debian.neo4j.com/neotechnology.gpg.key | sudo apt-key add -
+ echo 'deb https://debian.neo4j.com stable 4.4' | sudo tee /etc/apt/sources.list.d/neo4j.list
+ sudo apt-get update
+ apt list -a neo4j
+ sudo apt-get install neo4j
+ sudo chown -R circleci /var/log/neo4j
+ sudo chown -R circleci /var/lib/neo4j
+ sudo chown -R circleci /etc/neo4j
+ mkdir -p /var/lib/neo4j/plugins
+ wget -O /var/lib/neo4j/plugins/apoc-4.4.0.8-all.jar https://github.com/neo4j-contrib/neo4j-apoc-procedures/releases/download/4.4.0.8/apoc-4.4.0.8-all.jar
+ neo4j_conf_file="/etc/neo4j/neo4j.conf"
+ sudo echo "dbms.security.procedures.unrestricted=apoc.*" >> $neo4j_conf_file
+ sudo echo "dbms.security.procedures.allowlist=apoc.*" >> $neo4j_conf_file
+ apoc_conf_file="/etc/neo4j/apoc.conf"
+ sudo echo "apoc.export.file.enabled=true" > $apoc_conf_file
+ sudo echo "apoc.import.file.enabled=true" >> $apoc_conf_file
+ sudo echo "apoc.import.file.use_neo4j_config=true" >> $apoc_conf_file
+ neo4j-admin set-initial-password test
container_config: &container_config
docker:
@@ -112,19 +102,12 @@ references:
attach_workspace:
at: *workspace_root
- add_ssh_keys: &add_ssh_keys
- add_ssh_keys:
- fingerprints:
- - "02:df:a5:6a:53:9a:f5:5d:bd:a6:fc:b2:db:9b:c9:47" # disable-secrets-detection
- - "f5:25:6a:e5:ac:4b:84:fb:60:54:14:82:f1:e9:6c:f9" # disable-secrets-detection
-
prepare_environment: &prepare_environment
run:
name: Prepare Environment
when: always
command: |
poetry --version
-
# Check if CircleCI's config file and poetry files files are up to date
# if poetry isn't up-to-date, checkout from origin/master.
./Tests/scripts/is_file_up_to_date.sh .circleci/config.yml $CIRCLE_BRANCH
@@ -210,12 +193,19 @@ references:
exit 0
fi
+ neo4j start
+ # poll for neo4j status until available
+ while ! neo4j status; do sleep 1; done
+
./Tests/scripts/linters_runner.sh
./Tests/scripts/validate.sh
run_unit_testing_and_lint: &run_unit_testing_and_lint
run:
- name: Run Unit Testing and Lint
+ parameters:
+ dockerimageflag:
+ type: string
+ name: Run Unit Testing And Lint - Docker Image:<< parameters.dockerimageflag >>
when: always
no_output_timeout: 5h
command: |
@@ -233,7 +223,7 @@ references:
echo "vulture py2 version: $(python2 -m vulture --version 2>&1)"
echo "vulture py3 version: $(python3 -m vulture --version 2>&1)"
mkdir ./unit-tests
- demisto-sdk lint -p 8 -g -vvv --test-xml ./unit-tests --log-path ./artifacts --failure-report ./artifacts --coverage-report $ARTIFACTS_FOLDER/coverage_report
+ demisto-sdk lint -p 8 -g -vvv --test-xml ./unit-tests --log-path ./artifacts --failure-report ./artifacts --coverage-report $ARTIFACTS_FOLDER/coverage_report --docker-image << parameters.dockerimageflag >>
generate_coverage_reports: &generate_coverage_reports
run:
@@ -267,32 +257,6 @@ references:
./Tests/scripts/sdk_pylint_check.sh
fi
- create_id_set: &create_id_set
- run:
- name: Create ID Set
- when: always
- command: |
- demisto-sdk create-id-set -o ./Tests/id_set.json --fail-duplicates
- cp ./Tests/id_set.json $CIRCLE_ARTIFACTS
-
- merge_public_and_private_id_sets: &merge_public_and_private_id_sets
- run:
- name: Merge public and private ID sets
- when: always
- command: |
- if [[ $CIRCLE_BRANCH =~ pull/[0-9]+ ]]; then
- echo "Skipping, Should not run on contributor's branch."
- exit 0
- fi
-
- # Download private ID set
- gsutil cp gs://marketplace-dist/content/private_id_set.json $CIRCLE_ARTIFACTS/unified_id_set.json
- echo "successfully downloaded private ID set"
-
- # Merge public and private ID sets
- demisto-sdk merge-id-sets -i1 ./Tests/id_set.json -i2 $CIRCLE_ARTIFACTS/unified_id_set.json -o $CIRCLE_ARTIFACTS/unified_id_set.json
- echo "successfully merged public and private ID sets"
-
get_contribution_pack: &get_contribution_pack
when:
condition: << pipeline.parameters.contrib_branch >>
@@ -323,6 +287,10 @@ references:
context: nightly_env
requires:
- Setup Environment
+ matrix:
+ parameters:
+ dockerimageflag: [ "native:ga", "native:maintenance", "native:dev", "from-yml" ]
+ name: Run Unit Testing And Lint - Docker Image:<< matrix.dockerimageflag >>
- Run Validations:
requires:
- Setup Environment
@@ -343,19 +311,21 @@ jobs:
- node_modules
key: virtualenv-venv-{{ checksum "pyproject.toml" }}-{{ checksum "poetry.lock" }}-{{ checksum "package-lock.json" }}
- *get_contribution_pack
- - *add_ssh_keys
- *persist_to_workspace
Run Unit Testing And Lint:
<<: *container_config
resource_class: medium
<<: *environment
+ parameters:
+ dockerimageflag:
+ type: string
steps:
- *attach_workspace
- *remote_docker
- *restore_cache
- - *add_ssh_keys
- *prepare_environment
+ - *install_neo4j
- *infrastructure_testing
- *run_unit_testing_and_lint
- *generate_coverage_reports
@@ -365,16 +335,14 @@ jobs:
Run Validations:
<<: *container_config
- resource_class: medium
+ resource_class: large
<<: *environment
steps:
- *attach_workspace
- *restore_cache
- - *add_ssh_keys
- *prepare_environment
+ - *install_neo4j
- *secrets
- - *create_id_set
- - *merge_public_and_private_id_sets
- *validate_files_and_yaml
- run:
name: Spell Checks
@@ -417,6 +385,10 @@ workflows:
- Run Unit Testing And Lint:
requires:
- Setup Environment
+ matrix:
+ parameters:
+ dockerimageflag: [ "native:ga", "native:maintenance", "native:dev", "from-yml" ]
+ name: Run Unit Testing And Lint - Docker Image:<< matrix.dockerimageflag >>
- Run Validations:
requires:
- Setup Environment
@@ -437,5 +409,4 @@ workflows:
# will initiate when using the trigger script.
when: << pipeline.parameters.nightly >>
jobs:
- *nightly_jobs
-
+ *nightly_jobs
\ No newline at end of file
diff --git a/.circleci/gitlab-ci-env-variables.sh b/.circleci/gitlab-ci-env-variables.sh
index 486b8e6bf642..3e50fd3fc09f 100644
--- a/.circleci/gitlab-ci-env-variables.sh
+++ b/.circleci/gitlab-ci-env-variables.sh
@@ -2,6 +2,7 @@ echo 'export CI_BUILD_ID="$CIRCLE_BUILD_NUM"' >> $BASH_ENV
echo 'export CI_PIPELINE_ID="$CIRCLE_WORKFLOW_ID"' >> $BASH_ENV
echo 'export CI_COMMIT_BRANCH="$CIRCLE_BRANCH"' >> $BASH_ENV
echo 'export ARTIFACTS_FOLDER=/home/circleci/project/artifacts' >> $BASH_ENV
+echo 'export PIPELINE_JOBS_FOLDER=/home/circleci/project/pipeline_jobs_folder' >> $BASH_ENV
echo 'export CI_COMMIT_SHA="$CIRCLE_SHA1"' >> $BASH_ENV
echo 'export CI_JOB_URL="$CIRCLE_BUILD_URL"' >> $BASH_ENV
echo 'export CI_JOB_NAME="$CIRCLE_JOB"' >> $BASH_ENV
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index 83c053992f60..dd7296a3a6fa 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -1,34 +1,35 @@
FROM python:3.10-slim-bullseye
-ADD createCerts.sh .
-RUN apt-get update && apt-get install dos2unix -y \
- && dos2unix /createCerts.sh \
- && chmod +x /createCerts.sh \
- && /createCerts.sh /usr/local/share/ca-certificates/certs.crt \
- && update-ca-certificates \
- && apt-get install python2 -y
-
ENV USERNAME demisto
ENV HOME /home/$USERNAME
ENV NODE_EXTRA_CA_CERTS /usr/local/share/ca-certificates/certs.crt
ENV PATH $PATH:$HOME/.local/bin:/root/.local/bin:/usr/local/share/nvm/current/bin
-# This is a workaround for VSCode devcontainer features in self signed certificate
-
-ADD https://raw.githubusercontent.com/devcontainers/features/main/src/common-utils/install.sh /install-common.sh
-ADD https://raw.githubusercontent.com/devcontainers/features/main/src/git/install.sh /install-git.sh
-ADD https://raw.githubusercontent.com/devcontainers/features/main/src/docker-in-docker/install.sh /install-dind.sh
-ADD https://raw.githubusercontent.com/devcontainers/features/main/src/node/install.sh /install-node.sh
+ADD createCerts.sh .
+RUN apt-get update && apt-get install dos2unix git python2 curl -y \
+ && dos2unix /createCerts.sh \
+ && chmod +x /createCerts.sh \
+ && /createCerts.sh $NODE_EXTRA_CA_CERTS \
+ && update-ca-certificates \
+ && rm -rf /features \
+ && git clone https://github.com/devcontainers/features.git /features \
+ && cd /features \
+ # locking to the latest master commit in this repo to prevent breaking changes
+ # We should update this commit hash from time to time to
+ && git checkout fbdc4556d519512736a8e2abfb3e03fcb2c9e0c7
-RUN UID="1000" GID="1000" bash install-common.sh
-RUN VERSION="os-provided" bash install-git.sh
-RUN VERSION="latest" bash install-dind.sh
-RUN VERSION="lts" bash install-node.sh
+# This is a workaround for VSCode devcontainer features in self signed certificate
+RUN UID="1000" GID="1000" bash /features/src/common-utils/install.sh
+RUN VERSION="os-provided" bash /features/src/git/install.sh
+RUN VERSION="latest" bash /features/src/docker-in-docker/install.sh
+RUN VERSION="lts" bash /features/src/node/install.sh
+RUN bash /features/src/sshd/install.sh
+RUN bash /features/src/github-cli/install.sh
# install poetry, configure certificate for git and tools for oh my zsh
-RUN curl -sSL https://install.python-poetry.org | python3 - \
- && python -m pip install --user pipx \
+RUN python -m pip install --user pipx \
&& python -m pipx ensurepath \
+ && pipx install poetry \
&& git clone https://github.com/zsh-users/zsh-syntax-highlighting.git $HOME/.zsh/zsh-syntax-highlighting \
&& echo "source ~/.zsh/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh" >> $HOME/.zshrc \
&& git clone https://github.com/zsh-users/zsh-autosuggestions $HOME/.zsh/zsh-autosuggestions \
diff --git a/.devcontainer/createCerts.sh b/.devcontainer/createCerts.sh
index c32b37b8026a..abba6854039d 100644
--- a/.devcontainer/createCerts.sh
+++ b/.devcontainer/createCerts.sh
@@ -5,6 +5,13 @@
# It tries to connect to a server (for instance, github), show the local certificates and save it to a file.
# We connect to a random server and not paloaltonetworks.com to get external certificates.
+# if command fails
+
+if curl -I https://github.com > /dev/null; then
+ echo "No need to update certificate"
+ exit
+fi
+
CONNECT_SERVER="github.com:443"
FILE=$1
@@ -21,3 +28,6 @@ if [ ! -f "$FILE" ]; then
exit
fi
+git config --system http.sslCAInfo $FILE
+
+
diff --git a/.devcontainer/createCommand.sh b/.devcontainer/createCommand.sh
index 1b2b7fb85a48..8ffa5865cf35 100755
--- a/.devcontainer/createCommand.sh
+++ b/.devcontainer/createCommand.sh
@@ -6,22 +6,19 @@ echo "Fixing permissions"
sudo chown demisto .venv
sudo chown demisto node_modules
+sudo chown demisto /workspaces
sudo chown -R demisto $HOME
-sudo chown -R demisto /workspaces
-
-echo "Setting up git certificate"
-
-sudo git config --system http.sslCAInfo /usr/local/share/ca-certificates/certs.crt
echo "Setting up VSCode paths"
cp .devcontainer/settings.json .vscode/settings.json
touch CommonServerUserPython.py
+touch DemistoClassApiModule.py
path=$(printf '%s:' Packs/ApiModules/Scripts/*)
rm -f .env
echo "PYTHONPATH=""$path"":$PYTHONPATH" >> .env
echo "MYPYPATH=""$path"":$MYPYPATH" >> .env
echo "Setting up content dependencies"
-
-NO_HOOKS=1 .hooks/bootstrap
\ No newline at end of file
+poetry install
+npm install
\ No newline at end of file
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 8706277e04bc..4944b295eb84 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -1,3 +1,4 @@
+// Development container for Content. Poetry hash 97b4446a28329c24352fcb41ca8570403da0ae3f7434121ab73b1ff83fdec395
{
"name": "XSOAR Content",
"build": {
@@ -14,52 +15,11 @@
"source=node-modules,target=${containerWorkspaceFolder}/node_modules,type=volume",
"source=dind-var-lib-docker,target=/var/lib/docker,type=volume"
],
+ "containerUser": "demisto",
"remoteUser": "demisto",
- "settings": {
- "terminal.integrated.defaultProfile.linux": "zsh",
- "terminal.integrated.profiles.linux": {
- "zsh": {
- "path": "/bin/zsh"
- }
- },
- "cSpell.words": [
- "demisto",
- "xsoar",
- "xsiam",
- "fromversion",
- "toversion",
- "marketplacev",
- "ciac",
- "whois",
- ]
- },
- "extensions": [
- "cortexxsoarext.xsoar",
- "ms-python.python",
- "ms-vscode.PowerShell",
- "ms-toolsai.jupyter",
- "timonwong.shellcheck",
- "GitHub.vscode-pull-request-github",
- "eamodio.gitlens",
- "ms-azuretools.vscode-docker",
- "ms-vscode-remote.remote-containers",
- "streetsidesoftware.code-spell-checker",
- "njpwerner.autodocstring",
- "VisualStudioExptTeam.vscodeintellicode",
- "yzhang.markdown-all-in-one",
- "shd101wyy.markdown-preview-enhanced",
- "Gruntfuggly.todo-tree",
- "redhat.vscode-yaml",
- "PKief.material-icon-theme",
- "mikestead.dotenv",
- "KevinRose.vsc-python-indent",
- "rangav.vscode-thunder-client",
- "ms-python.black-formatter",
- "LittleFoxTeam.vscode-python-test-adapter"
- ],
"remoteEnv": {
"LOCAL_WORKSPACE_PATH": "${localWorkspaceFolder}",
- "PATH": "${containerWorkspaceFolder}/.venv/bin:${containerEnv:PATH}",
+ "PATH": "${containerEnv:PATH}:${containerWorkspaceFolder}/.venv/bin",
"CONTENT": "${containerWorkspaceFolder}",
"DEMISTO_SDK_CONTENT_PATH": "${containerWorkspaceFolder}",
"PYTHONPATH": "${containerWorkspaceFolder}:${containerWorkspaceFolder}/Packs/Base/Scripts/CommonServerPython/:${containerWorkspaceFolder}/Packs/Base/Scripts/CommonServerUserPython/:${containerWorkspaceFolder}/Tests/demistomock/:${containerEnv:PYTHONPATH}",
@@ -70,26 +30,79 @@
"DEMISTO_VERIFY_SSL": "${localEnv:DEMISTO_VERIFY_SSL}",
"DEMISTO_API_KEY": "${localEnv:DEMISTO_API_KEY}"
},
- // this is commented out until VSCode will fix self signed certificate issues
- // "features": {
- // "ghcr.io/devcontainers/features/docker-in-docker:1": {
- // "version": "latest"
- // },
- // "ghcr.io/devcontainers/features/git:1": {
- // "version": "os-provided"
- // },
- // "ghcr.io/devcontainers/features/powershell:1": {
- // "version": "latest"
- // },
- // "ghcr.io/devcontainers/features/common-utils:1": {
- // "version": "latest"
- // },
- // "ghcr.io/devcontainers/features/node:1": {
- // "version": "latest"
- // }
- // },
- // "overrideFeatureInstallOrder": [
- // "ghcr.io/devcontainers/features/common-utils:1"
- // ],
- "onCreateCommand": "dos2unix .devcontainer/createCommand.sh && chmod +x .devcontainer/createCommand.sh && bash .devcontainer/createCommand.sh",
+ "customizations": {
+ "vscode": {
+ "settings": {
+ "terminal.integrated.defaultProfile.linux": "zsh",
+ "terminal.integrated.profiles.linux": {
+ "zsh": {
+ "path": "/bin/zsh"
+ }
+ },
+ "cSpell.words": [
+ "demisto",
+ "xsoar",
+ "xsiam",
+ "fromversion",
+ "toversion",
+ "marketplacev",
+ "ciac",
+ "whois",
+ ]
+ },
+ "extensions": [
+ "cortexxsoarext.xsoar",
+ "ms-python.python",
+ "ms-vscode.PowerShell",
+ "ms-toolsai.jupyter",
+ "timonwong.shellcheck",
+ "GitHub.vscode-pull-request-github",
+ "eamodio.gitlens",
+ "ms-azuretools.vscode-docker",
+ "ms-vscode-remote.remote-containers",
+ "streetsidesoftware.code-spell-checker",
+ "njpwerner.autodocstring",
+ "VisualStudioExptTeam.vscodeintellicode",
+ "yzhang.markdown-all-in-one",
+ "shd101wyy.markdown-preview-enhanced",
+ "Gruntfuggly.todo-tree",
+ "redhat.vscode-yaml",
+ "PKief.material-icon-theme",
+ "mikestead.dotenv",
+ "KevinRose.vsc-python-indent",
+ "rangav.vscode-thunder-client",
+ "ms-python.black-formatter",
+ "LittleFoxTeam.vscode-python-test-adapter"
+ ]
+ },
+ "codespaces": {
+ "repositories": {
+ "demisto/demisto-sdk": {
+ "permissions": "write-all"
+ }
+ }
+ }
+ },
+// this is commented out until VSCode will fix self signed certificate issues
+// "features": {
+// "ghcr.io/devcontainers/features/docker-in-docker:1": {
+// "version": "latest"
+// },
+// "ghcr.io/devcontainers/features/git:1": {
+// "version": "os-provided"
+// },
+// "ghcr.io/devcontainers/features/powershell:1": {
+// "version": "latest"
+// },
+// "ghcr.io/devcontainers/features/common-utils:1": {
+// "version": "latest"
+// },
+// "ghcr.io/devcontainers/features/node:1": {
+// "version": "latest"
+// }
+// },
+// "overrideFeatureInstallOrder": [
+// "ghcr.io/devcontainers/features/common-utils:1"
+// ],
+"onCreateCommand": "dos2unix -n .devcontainer/createCommand.sh .devcontainer/createCommand_unix.sh && chmod +x .devcontainer/createCommand_unix.sh && bash .devcontainer/createCommand_unix.sh"
}
\ No newline at end of file
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 50a4b3e9b2a9..02c12ae9037b 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -7,7 +7,7 @@
/Tests/Marketplace/core_packs_mpv2_list.json @yaakovpraisler @bakatzir @GuyLibermanPA @demisto/content-leaders
# Docker native image
-/Tests/native_image_config.json @GuyAfik @JudahSchwartz
+/Tests/docker_native_image_config.json @GuyAfik @JudahSchwartz
# Marketplace & Upload-Flow
/Tests/scripts/create_artifacts_graph/create_artifacts.py @ilaner
@@ -39,7 +39,7 @@
# Important Integrations
/Packs/Jira/Integrations/JiraV2/* @demisto/content-leaders
-/Packs/ServiceNow/Integrations/ServiceNowv2/* @demisto/content-leaders
+/Packs/ServiceNow/Integrations/ServiceNowv2/* @demisto/content-leaders @Shellyber
/Packs/EWS/Integrations/EWSv2/* @demisto/content-leaders
/Packs/ArcSightESM/Integrations/ArcSightESMv2/* @demisto/content-leaders
/Packs/QRadar/Integrations/QRadar_v3/* @tkatzir
@@ -66,13 +66,13 @@
.gitlab/* @esharf
.gitlab-ci.yml @esharf
.gitlab/ci/on-push.yml @daryakoval
-/Tests/scripts/wait_in_line_for_xsiam_env.sh @daryakoval
+/Tests/scripts/wait_in_line_for_cloud_env.sh @daryakoval
.gitlab/ci/staging.yml @ilaner
-/Tests/scripts/uninstall_packs_and_reset_bucket_xsiam.sh @daryakoval
+/Tests/scripts/uninstall_packs_and_reset_bucket_cloud.sh @daryakoval
/Tests/Marketplace/search_and_uninstall_pack.py @daryakoval
/Tests/scripts/install_content_and_test_integrations.sh @daryakoval
/Tests/configure_and_test_integration_instances.py @daryakoval
-/Tests/scripts/print_xsiam_machine_details.sh @daryakoval
+/Tests/scripts/print_cloud_machine_details.sh @daryakoval
/Tests/scripts/run_tests.sh @daryakoval
# SDK Related
diff --git a/.github/workflows/auto-merge-dockers.yml b/.github/workflows/auto-merge-dockers.yml
new file mode 100644
index 000000000000..2cabb1cf6d3a
--- /dev/null
+++ b/.github/workflows/auto-merge-dockers.yml
@@ -0,0 +1,41 @@
+name: Docker Update Auto Merge
+on:
+ pull_request:
+ types: [opened, edited, synchronize, assigned]
+
+
+permissions:
+ pull-requests: write
+ contents: write
+
+jobs:
+ auto_approve_and_merge:
+ runs-on: ubuntu-latest
+ if: startsWith(github.head_ref, 'demisto/') && github.repository == 'demisto/content' && contains(github.event.pull_request.labels.*.name, 'docker-image-auto-update')
+ steps:
+ - name: Print out context
+ run: |
+ echo "$GITHUB_CONTEXT"
+ env:
+ GITHUB_CONTEXT: ${{ toJSON(github) }}
+ - name: Approve and auto-merge for docker update PRs
+ env:
+ PR_URL: ${{github.event.pull_request.html_url}}
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ echo "Approving the pull request"
+ gh pr review --approve "$PR_URL"
+ echo "Waiting tii all the checks are done"
+ gh pr checks --required --interval 60 --watch "$PR_URL"
+ echo "Merging the pull request"
+ gh pr merge --squash "$PR_URL"
+ - name: Post to a Slack channel
+ if: ${{ failure() }}
+ id: slack
+ uses: slackapi/slack-github-action@v1.23.0
+ with:
+ channel-id: 'auto-merge-docker-action'
+ slack-message: "GitHub build result: ${{ job.status }}\n${{ github.event.pull_request.html_url || github.event.head_commit.url }}"
+ env:
+ SLACK_BOT_TOKEN: ${{ secrets.CORTEX_XSOAR_SLACK_TOKEN }}
+ CONTENTBOT_GH_ADMIN_TOKEN: ${{ secrets.CONTENTBOT_GH_ADMIN_TOKEN }}
diff --git a/.github/workflows/check-devcontainer.yml b/.github/workflows/check-devcontainer.yml
new file mode 100644
index 000000000000..bf433a56baac
--- /dev/null
+++ b/.github/workflows/check-devcontainer.yml
@@ -0,0 +1,43 @@
+name: Check Devcontainer
+on:
+ pull_request:
+ paths:
+ - .devcontainer/**
+ - pyproject.toml
+ - poetry.lock
+ - .github/workflows/check-devcontainer.yml
+ push:
+ branches:
+ - master
+ paths:
+ - .devcontainer/**
+ - pyproject.toml
+ - poetry.lock
+ - .github/workflows/check-devcontainer.yml
+jobs:
+ Build-Devcontainer:
+ permissions:
+ contents: write
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ - name: Update .devcontainer.json description
+ run: |
+ # replace the first line of .devcontainer/devcontainer.json with the new description
+ POETRY_LOCK_HASH=${{ hashFiles('poetry.lock') }}
+ sed -i "1s/^.*$/\/\/ Development container for Content. Poetry hash $POETRY_LOCK_HASH/" .devcontainer/devcontainer.json
+ - name: commit file
+ if: github.ref == 'refs/heads/master'
+ uses: stefanzweifel/git-auto-commit-action@v4
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ commit_message: Update .devcontainer.json name
+ - name: Run demisto-sdk in devcontainer
+ uses: devcontainers/ci@v0.2
+ with:
+ push: never
+ runCmd: demisto-sdk --version
diff --git a/.github/workflows/close_jira_issue_by_pr_merge.yml b/.github/workflows/close_jira_issue_by_pr_merge.yml
index 8dba218eb383..19ed663ac879 100644
--- a/.github/workflows/close_jira_issue_by_pr_merge.yml
+++ b/.github/workflows/close_jira_issue_by_pr_merge.yml
@@ -32,11 +32,10 @@ jobs:
PR_TITLE: ${{ github.event.pull_request.title }}
PR_LINK: ${{ github.event.pull_request.html_url }}
PR_BODY: ${{ github.event.pull_request.body }}
- IS_MERGED: True
USERNAME: ${{ secrets.LINK_PR_TO_JIRA_ISSUE_USER }}
PASSWORD: ${{ secrets.LINK_PR_TO_JIRA_ISSUE_PASSWORD }}
INSTANCE_URL: ${{ secrets.ENGINE_URL }}
run: |
echo "Checking for related Jira issues to PR: $PR_NUMBER"
cd Utils/github_workflow_scripts/jira_integration_scripts
- poetry run python ./link_pr_to_jira_issue.py --url "$INSTANCE_URL" --pr_num "$PR_NUMBER" --pr_link "$PR_LINK" --pr_title "$PR_TITLE" --pr_body "$PR_BODY" --is_merged $IS_MERGED --username $USERNAME --password $PASSWORD
+ poetry run python ./link_pr_to_jira_issue.py --url "$INSTANCE_URL" --pr_num "$PR_NUMBER" --pr_link "$PR_LINK" --pr_title "$PR_TITLE" --pr_body "$PR_BODY" --is_merged --username $USERNAME --password $PASSWORD
diff --git a/.github/workflows/link_edited_pr_to_jira_issue.yml b/.github/workflows/link_edited_pr_to_jira_issue.yml
index 29c8678fc8fd..83453b251840 100644
--- a/.github/workflows/link_edited_pr_to_jira_issue.yml
+++ b/.github/workflows/link_edited_pr_to_jira_issue.yml
@@ -16,7 +16,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v3
with:
- python-version: '3.9'
+ python-version: '3.10'
- name: Setup Poetry
uses: Gr1N/setup-poetry@v7
- uses: actions/cache@v2
@@ -32,12 +32,11 @@ jobs:
PR_TITLE: ${{ github.event.pull_request.title }}
PR_LINK: ${{ github.event.pull_request.html_url }}
PR_BODY: ${{ github.event.pull_request.body }}
- IS_MERGED: false
USERNAME: ${{ secrets.LINK_PR_TO_JIRA_ISSUE_USER }}
PASSWORD: ${{ secrets.LINK_PR_TO_JIRA_ISSUE_PASSWORD }}
INSTANCE_URL: ${{ secrets.ENGINE_URL }}
run: |
echo "Checking for related Jira issues to PR: $PR_NUMBER"
cd Utils/github_workflow_scripts/jira_integration_scripts
- echo --pr_num $PR_NUMBER --pr_link $PR_LINK --pr_title $PR_TITLE --pr_body $PR_BODY --is_merged $IS_MERGED
- poetry run python ./link_pr_to_jira_issue.py --url "$INSTANCE_URL" --pr_num "$PR_NUMBER" --pr_link "$PR_LINK" --pr_title "$PR_TITLE" --pr_body "$PR_BODY" --is_merged $IS_MERGED --username $USERNAME --password $PASSWORD
+ echo --pr_num $PR_NUMBER --pr_link $PR_LINK --pr_title $PR_TITLE --pr_body $PR_BODY --no-is_merged
+ poetry run python ./link_pr_to_jira_issue.py --url "$INSTANCE_URL" --pr_num "$PR_NUMBER" --pr_link "$PR_LINK" --pr_title "$PR_TITLE" --pr_body "$PR_BODY" --no-is_merged --username $USERNAME --password $PASSWORD
diff --git a/.github/workflows/review-release-notes.yml b/.github/workflows/review-release-notes.yml
index bc98b16d01c0..2d8b20605f36 100644
--- a/.github/workflows/review-release-notes.yml
+++ b/.github/workflows/review-release-notes.yml
@@ -1,10 +1,9 @@
name: Review Release Notes
on: pull_request
-
jobs:
release_notes_review:
runs-on: ubuntu-latest
- if: github.repository == 'demisto/content' && github.event.pull_request.head.repo.fork == false
+ if: github.repository == 'demisto/content'
steps:
- name: Checkout
uses: actions/checkout@v3
diff --git a/.github/workflows/run-secrets-detection.yml b/.github/workflows/run-secrets-detection.yml
index b3b8301a1386..73cd353a243d 100644
--- a/.github/workflows/run-secrets-detection.yml
+++ b/.github/workflows/run-secrets-detection.yml
@@ -23,15 +23,17 @@ jobs:
BRANCH_NAME: ${{ github.head_ref }}
USERNAME: ${{ secrets.SECRET_CHECK_USER }}
PASSWORD: ${{ secrets.SECRET_CHECK_PASS }}
+ GOLD_SERVER_URL: ${{ secrets.GOLD_SERVER_URL }}
run: |
echo "Run secrets detection for PR: $PR_NUMBER on branch: $BRANCH_NAME"
- investigation_id=$(poetry run Utils/github_workflow_scripts/run_secrets_detection.py --pr_number $PR_NUMBER --branch_name $BRANCH_NAME --username $USERNAME --password $PASSWORD)
+ investigation_id=$(poetry run Utils/github_workflow_scripts/run_secrets_detection.py --pr_number $PR_NUMBER --branch_name $BRANCH_NAME --username $USERNAME --password $PASSWORD --gold_server_url $GOLD_SERVER_URL)
echo "INVESTIGATION_ID=$investigation_id" >> $GITHUB_ENV
- name: Wait For Playbook To Finish
env:
GOLD_API_KEY: ${{ secrets.SECRETS_GOLD_API_KEY }}
+ GOLD_SERVER_URL: ${{ secrets.GOLD_SERVER_URL }}
run: |
echo "Invastigation id is: $INVESTIGATION_ID "
- poetry run python ./Utils/github_workflow_scripts/run_secrets_detection_get_playbook_status.py -i $INVESTIGATION_ID -k $GOLD_API_KEY
+ poetry run python ./Utils/github_workflow_scripts/run_secrets_detection_get_playbook_status.py -i $INVESTIGATION_ID -k $GOLD_API_KEY --gold_server_url $GOLD_SERVER_URL
diff --git a/.github/workflows/trigger-contribution-build.yml b/.github/workflows/trigger-contribution-build.yml
index 72be250f516d..cb9535c253e2 100644
--- a/.github/workflows/trigger-contribution-build.yml
+++ b/.github/workflows/trigger-contribution-build.yml
@@ -26,6 +26,7 @@ jobs:
CONTRIB_BRANCH: ${{ github.event.pull_request.head.label }}
USERNAME: ${{ secrets.TRIGGER_BUILD_USER }}
PASSWORD: ${{ secrets.TRIGGER_BUILD_PASSWORD }}
+ GOLD_SERVER_URL: ${{ secrets.GOLD_SERVER_URL }}
run: |
echo "Trigger contribution build for PR: $PR_NUMBER with base branch: $BASE_BRANCH contrib branch: $CONTRIB_BRANCH"
- poetry run python ./Utils/github_workflow_scripts/trigger_contribution_build.py --pr_number $PR_NUMBER --base_branch $BASE_BRANCH --contrib_branch $CONTRIB_BRANCH --username $USERNAME --password $PASSWORD
+ poetry run python ./Utils/github_workflow_scripts/trigger_contribution_build.py --pr_number $PR_NUMBER --base_branch $BASE_BRANCH --contrib_branch $CONTRIB_BRANCH --username $USERNAME --password $PASSWORD --gold_server_url $GOLD_SERVER_URL
diff --git a/.gitignore b/.gitignore
index ab930b635da4..5563291dff4d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,8 +39,7 @@ Scripts/**/__init__.py
Integrations/**/__init__.py
Packs/**/__init__.py
requirements.txt
-Tests/scripts/infrastructure_tests/collect_tests.log
-Tests/scripts/collect_tests.log
+**/collect_tests.log
# Environment and reports
.demisto-sdk-conf
@@ -56,6 +55,7 @@ Packs/**/report_pytest.json
# devcontainer folders
.devcontainer/certs.crt
+.devcontainer/createCommand_unix.sh
**/.devcontainer
!/.devcontainer
diff --git a/.gitlab/ci/.gitlab-ci.yml b/.gitlab/ci/.gitlab-ci.yml
index 0c8ebecf67cc..34cade6ddbd9 100644
--- a/.gitlab/ci/.gitlab-ci.yml
+++ b/.gitlab/ci/.gitlab-ci.yml
@@ -1,18 +1,19 @@
default:
- image: docker-io.art.code.pan.run/devdemisto/gitlab-content-ci:1.0.0.41253
+ image: docker-io.art.code.pan.run/devdemisto/gitlab-content-ci:1.0.0.41284
artifacts:
expire_in: 30 days
paths:
- /builds/xsoar/content/artifacts/*
+ - /builds/xsoar/content/pipeline_jobs_folder/*
when: always
stages:
- unittests-and-validations
- - create-instances
+ - prepare-testing-bucket
- run-instances
- - stage-compare
- upload-to-marketplace
+ - are-jobs-realy-done
- fan-in # concentrate pipeline artifacts to single job before triggering child slack pipeline
@@ -21,10 +22,13 @@ variables:
GCS_MARKET_BUCKET: "marketplace-dist"
GCS_MARKET_V2_BUCKET: "marketplace-v2-dist"
GCS_MARKET_XPANSE_BUCKET: "xpanse-dist"
+ GCS_MARKET_BUCKET_DEV: "marketplace-dist-dev"
+ GCS_MARKET_V2_BUCKET_DEV: "marketplace-v2-dist-dev"
STORAGE_BASE_PATH: ""
SLACK_CHANNEL: "dmst-build-test"
DEMISTO_README_VALIDATION: "true"
ARTIFACTS_FOLDER: "/builds/xsoar/content/artifacts"
+ PIPELINE_JOBS_FOLDER: "/builds/xsoar/content/pipeline_jobs_folder"
ARTIFACTS_FOLDER_XSOAR: "/builds/xsoar/content/artifacts/xsoar"
ARTIFACTS_FOLDER_MPV2: "/builds/xsoar/content/artifacts/marketplacev2"
ARTIFACTS_FOLDER_XPANSE: "/builds/xsoar/content/artifacts/xpanse"
@@ -42,6 +46,7 @@ variables:
DEMISTO_CONNECTION_POOL_MAXSIZE: "180" # see this issue for more info https://github.com/demisto/etc/issues/36886
SDK_REF: "master" # The default sdk branch to use
OVERRIDE_ALL_PACKS: "false"
+ TEST_UPLOAD: "true"
include:
@@ -51,4 +56,3 @@ include:
- local: .gitlab/ci/instance-test.yml
- local: .gitlab/ci/sdk-nightly.yml
- local: .gitlab/ci/miscellaneous.yml
- - local: .gitlab/ci/staging.yml
diff --git a/.gitlab/ci/bucket-upload.yml b/.gitlab/ci/bucket-upload.yml
index 8ad79018ce3c..eaeb55d6dbcd 100644
--- a/.gitlab/ci/bucket-upload.yml
+++ b/.gitlab/ci/bucket-upload.yml
@@ -22,6 +22,7 @@
LEADER_NAMES=$(echo "${LEADER_NAMES}" "content-bot" "svc -xsoar-gitlab-mirror" "${USERS_ALLOWED_TRIGGER_UPLOAD}" )
if [[ -z "$GITLAB_USER_NAME" ]] || [[ -z "`echo $LEADER_NAMES | grep -w "$GITLAB_USER_NAME"`" ]]; then
echo -e "User '$GITLAB_USER_NAME' is not allowed to trigger this build, only one of:\n${LEADER_NAMES}"
+ job-done
exit 1
else
echo "User '${GITLAB_USER_NAME}' is allowed to upload packs / force upload packs."
@@ -29,6 +30,22 @@
fi
- section_end "Check User Permissions to Upload Packs"
+.upload_content_graph: &upload_content_graph
+ - |
+ if [[ $TEST_UPLOAD == "false" ]]; then
+ section_start "Upload content graph CSVs to GCP" --collapsed
+ # gsutil cp $ARTIFACTS_FOLDER/content_graph/$MARKETPLACE_VERSION.zip "gs://$GCS_MARKET_BUCKET_DEV/content_graph/$MARKETPLACE_VERSION.zip"
+ section_end "Upload content graph CSVs to GCP"
+ fi
+
+.upload_dependencies_file: &upload_dependencies_file
+ - |
+ if [[ $TEST_UPLOAD == "false" ]]; then
+ section_start "Upload packs_dependencies.json to GCP" --collapsed
+ gcloud auth activate-service-account --key-file="$GCS_ARTIFACTS_KEY" > auth.out 2>&1
+ gsutil cp $ARTIFACTS_FOLDER/packs_dependencies.json "gs://xsoar-ci-artifacts/content-cache-docs/$MARKETPLACE_VERSION/packs_dependencies.json"
+ section_end "Upload packs_dependencies.json to GCP"
+ fi
run-validations-upload-flow:
extends:
@@ -43,10 +60,19 @@ run-unittests-and-lint-upload-flow:
- .run-unittests-and-lint
- .bucket-upload-rule
+jobs-done-check-upload-flow:
+ extends:
+ - .jobs-done-check
+ - .bucket-upload-rule
+ needs: ['run-unittests-and-lint-upload-flow', 'run-validations-upload-flow', 'mpv2-prepare-testing-bucket-upload-flow', 'upload-id-set-bucket', 'xpanse-prepare-testing-bucket-upload-flow', 'xsoar-prepare-testing-bucket-upload-flow', 'install-packs-in-server6_5', 'install-packs-in-server6_6', 'install-packs-in-server6_8', 'install-packs-in-server-master', 'install-packs-in-xsiam-ga', 'sync-buckets-between-projects', 'upload-packs-to-marketplace', 'upload-packs-to-marketplace-v2', 'upload-packs-to-xpanse-marketplace']
+ tags:
+ - gke
+
+
-create-instances-upload-flow:
+xsoar-prepare-testing-bucket-upload-flow:
extends:
- - create-instances
+ - xsoar-prepare-testing-bucket
variables:
IFRA_ENV_TYPE: "Bucket-Upload"
ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XSOAR}"
@@ -56,9 +82,10 @@ create-instances-upload-flow:
- if: '$BUCKET_UPLOAD == "true"'
- if: '$FORCE_BUCKET_UPLOAD == "true"'
-prepare-testing-bucket-mpv2-upload-flow:
+
+mpv2-prepare-testing-bucket-upload-flow:
extends:
- - prepare-testing-bucket-mpv2
+ - mpv2-prepare-testing-bucket
variables:
IFRA_ENV_TYPE: "Bucket-Upload"
ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_MPV2}"
@@ -69,9 +96,9 @@ prepare-testing-bucket-mpv2-upload-flow:
- if: '$FORCE_BUCKET_UPLOAD == "true"'
-prepare-testing-bucket-xpanse-upload-flow:
+xpanse-prepare-testing-bucket-upload-flow:
extends:
- - prepare-testing-bucket-xpanse
+ - xpanse-prepare-testing-bucket
variables:
IFRA_ENV_TYPE: "Bucket-Upload"
ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XPANSE}"
@@ -85,12 +112,13 @@ prepare-testing-bucket-xpanse-upload-flow:
.install_packs_in_xsoar_server:
tags:
- gke
- needs: ["create-instances-upload-flow"]
+ needs: ["xsoar-prepare-testing-bucket-upload-flow"]
stage: run-instances
artifacts:
expire_in: 48 hrs
paths:
- /builds/xsoar/content/artifacts/*
+ - /builds/xsoar/content/pipeline_jobs_folder/*
when: always
variables:
ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XSOAR}"
@@ -102,10 +130,11 @@ prepare-testing-bucket-xpanse-upload-flow:
- .bucket-upload-rule
- .default-job-settings
script:
- - '[[ "create instances done" = $(cat "$ARTIFACTS_FOLDER/create_instances_done.txt" 2> /dev/null) ]] || (echo "this is a known issue with GitLab pipline please mention @esharf in your PR" && exit 1)'
- EXIT_CODE=0
- section_start "Download configuration"
- ./Tests/scripts/download_demisto_conf.sh >> $ARTIFACTS_FOLDER/logs/installations.log
+ - SECRET_CONF_PATH=$(cat secret_conf_path)
+ - python3 ./Tests/scripts/add_secrets_file_to_build.py -sa "$GSM_SERVICE_ACCOUNT" -sf "$SECRET_CONF_PATH" -u "$DEMISTO_USERNAME" -p "$DEMISTO_PASSWORD" -gpid "$GSM_PROJECT_ID"
- section_end "Download configuration"
- !reference [.open-ssh-tunnel]
- section_start "Get Instance Variables"
@@ -121,6 +150,7 @@ prepare-testing-bucket-xpanse-upload-flow:
- section_start "Destroy instances"
- python3 ./Tests/scripts/destroy_instances.py $ARTIFACTS_FOLDER $ARTIFACTS_FOLDER/env_results.json "$INSTANCE_ROLE" "$TIME_TO_LIVE" || EXIT_CODE=$?
- section_end "Destroy instances"
+ - job-done
- exit "$EXIT_CODE"
@@ -149,40 +179,42 @@ install-packs-in-server-master:
.install-mpv2-packs-on-xsiam-instances:
tags:
- gke
- needs: ["prepare-testing-bucket-mpv2-upload-flow"]
+ needs: ["mpv2-prepare-testing-bucket-upload-flow"]
stage: run-instances
artifacts:
expire_in: 48 hrs
paths:
- /builds/xsoar/content/artifacts/*
+ - /builds/xsoar/content/pipeline_jobs_folder/*
when: always
variables:
ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_MPV2}"
ENV_RESULTS_PATH: "${ARTIFACTS_FOLDER_MPV2}/env_results.json"
INSTANCE_CREATED: "true"
TIME_TO_LIVE: ""
+ GCS_LOCKS_PATH: "content-locks/locks-xsiam-ga"
extends:
- .bucket-upload-rule
- .default-job-settings
script:
- - '[[ "create instances done" = $(cat "$ARTIFACTS_FOLDER/create_instances_done.txt" 2> /dev/null) ]] || (echo "this is a known issue with GitLab pipline please mention @esharf in your PR" && exit 1)'
- EXIT_CODE=0
- section_start "Download configuration"
- ./Tests/scripts/download_demisto_conf.sh >> $ARTIFACTS_FOLDER/logs/installations.log
+ - SECRET_CONF_PATH=$(cat secret_conf_path)
+ - python3 ./Tests/scripts/add_secrets_file_to_build.py -sa "$GSM_SERVICE_ACCOUNT" -sf "$SECRET_CONF_PATH" -u "$DEMISTO_USERNAME" -p "$DEMISTO_PASSWORD" -gpid "$GSM_PROJECT_ID"
- section_end "Download configuration"
-
- - section_start "Lock XSIAM Machine"
+ - section_start "Lock Machine"
- echo "Authenticating GCP"
- gcloud auth activate-service-account --key-file="$GCS_ARTIFACTS_KEY" > auth.out 2>&1
- echo "Auth done successfully"
- - ./Tests/scripts/wait_in_line_for_xsiam_env.sh
- - source XSIAMEnvVariables
- - echo "XSIAM chosen_machine_id is $XSIAM_CHOSEN_MACHINE_ID"
- - section_end "Lock XSIAM Machine"
+ - ./Tests/scripts/wait_in_line_for_cloud_env.sh
+ - source CloudEnvVariables
+ - echo "CLOUD chosen_machine_id is $CLOUD_CHOSEN_MACHINE_ID"
+ - section_end "Lock Machine"
- - section_start "Clean XSIAM Machine"
- - ./Tests/scripts/uninstall_packs_and_reset_bucket_xsiam.sh
- - section_end "Clean XSIAM Machine"
+ - section_start "Clean Machine"
+ - ./Tests/scripts/uninstall_packs_and_reset_bucket_cloud.sh
+ - section_end "Clean Machine"
- section_start "Get Instance Variables"
- echo INSTANCE_ROLE="$INSTANCE_ROLE"
@@ -193,27 +225,32 @@ install-packs-in-server-master:
- ./Tests/Marketplace/install_packs.sh "$INSTANCE_ROLE" || EXIT_CODE=$?
- section_end "Install Packs"
+ - job-done
- exit "$EXIT_CODE"
after_script:
- echo "Job finished, removing lock file"
- gcloud auth activate-service-account --key-file="$GCS_ARTIFACTS_KEY" > auth.out 2>&1
- - gsutil rm "gs://xsoar-ci-artifacts/content-locks-xsiam/*-lock-$CI_JOB_ID"
+ - gsutil rm "gs://xsoar-ci-artifacts/content-locks-xsiam/machines_locks/*-lock-$CI_JOB_ID"
- echo "Finished removing lock file"
#install-packs-in-xsiam-dev:
# extends: .install-mpv2-packs-on-xsiam-instances
# variables:
-# INSTANCE_ROLE: "XSIAM Master"
+# INSTANCE_ROLE: "XSIAM"
# GCS_QUEUE_FILE: "queue-master"
# TEST_MACHINES_LIST: "test-machines-master"
install-packs-in-xsiam-ga:
extends: .install-mpv2-packs-on-xsiam-instances
variables:
- INSTANCE_ROLE: "XSIAM 1.2"
+ INSTANCE_ROLE: "XSIAM"
GCS_QUEUE_FILE: "queue-ga"
TEST_MACHINES_LIST: "test-machines-ga"
+ GCS_SOURCE_BUCKET: "$GCS_PRODUCTION_V2_BUCKET"
+ GCS_MACHINES_BUCKET: "marketplace-v2-dist-dev/upload-flow/builds-xsiam"
+ CLOUD_SERVERS_FILE: "xsiam_servers_path"
+ CLOUD_API_KEYS: $XSIAM_API_KEYS
upload-packs-to-marketplace:
@@ -225,8 +262,10 @@ upload-packs-to-marketplace:
expire_in: 48 hrs
paths:
- /builds/xsoar/content/artifacts/*
+ - /builds/xsoar/content/pipeline_jobs_folder/*
when: always
variables:
+ MARKETPLACE_VERSION: "xsoar"
ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XSOAR}"
ENV_RESULTS_PATH: "${ARTIFACTS_FOLDER_XSOAR}/env_results.json"
INSTANCE_ROLE: "Server Master"
@@ -274,8 +313,10 @@ upload-packs-to-marketplace:
echo "successfully activated google cloud service account"
gsutil -m cp -r $PACKS_SRC $ZIP_FOLDER
echo "successfully downloaded index.zip"
- gcloud auth revoke $GCS_ARTIFACTS_ACCOUNT_NAME
- section_end "Download packs from GCP"
+ - *upload_content_graph
+ - *upload_dependencies_file
+ - gcloud auth revoke $GCS_ARTIFACTS_ACCOUNT_NAME
- section_start "Zip Content Packs From GCS"
- python3 ./Tests/Marketplace/zip_packs.py -z $ZIP_FOLDER -a $ARTIFACTS_FOLDER -s $GCS_MARKET_KEY
@@ -289,19 +330,21 @@ upload-packs-to-marketplace:
echo "Skipping Premium Packs Validation"
fi
- section_end "Validate Premium Packs"
-
+ - job-done
upload-packs-to-marketplace-v2:
tags:
- gke
- needs: ["run-validations-upload-flow", "run-unittests-and-lint-upload-flow", "prepare-testing-bucket-mpv2-upload-flow", "install-packs-in-xsiam-ga"] # "install-packs-in-xsiam-dev"
+ needs: ["run-validations-upload-flow", "run-unittests-and-lint-upload-flow", "mpv2-prepare-testing-bucket-upload-flow", "install-packs-in-xsiam-ga"] # "install-packs-in-xsiam-dev"
stage: upload-to-marketplace
artifacts:
expire_in: 48 hrs
paths:
- /builds/xsoar/content/artifacts/*
+ - /builds/xsoar/content/pipeline_jobs_folder/*
when: always
variables:
+ MARKETPLACE_VERSION: "marketplacev2"
ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_MPV2}"
INSTANCE_ROLE: "Server Master"
INSTANCE_CREATED: "true"
@@ -340,25 +383,28 @@ upload-packs-to-marketplace-v2:
echo "successfully activated google cloud service account"
gsutil -m cp -r $PACKS_SRC $ZIP_FOLDER
echo "successfully downloaded index.zip"
- gcloud auth revoke $GCS_ARTIFACTS_ACCOUNT_NAME
- section_end "Download packs from GCP"
-
+ - *upload_content_graph
+ - *upload_dependencies_file
+ - gcloud auth revoke $GCS_ARTIFACTS_ACCOUNT_NAME
- section_start "Zip Content Packs From GCS"
- python3 ./Tests/Marketplace/zip_packs.py -z $ZIP_FOLDER -a $ARTIFACTS_FOLDER -s $GCS_MARKET_KEY
- section_end "Zip Content Packs From GCS"
-
+ - job-done
upload-packs-to-xpanse-marketplace:
tags:
- gke
- needs: ["run-validations-upload-flow", "run-unittests-and-lint-upload-flow", "prepare-testing-bucket-xpanse-upload-flow"] # "install-packs-in-xpanse-dev"
+ needs: ["run-validations-upload-flow", "run-unittests-and-lint-upload-flow", "xpanse-prepare-testing-bucket-upload-flow"] # "install-packs-in-xpanse-dev"
stage: upload-to-marketplace
artifacts:
expire_in: 48 hrs
paths:
- /builds/xsoar/content/artifacts/*
+ - /builds/xsoar/content/pipeline_jobs_folder/*
when: always
variables:
+ MARKETPLACE_VERSION: "xpanse"
ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XPANSE}"
INSTANCE_ROLE: "Server Master"
INSTANCE_CREATED: "true"
@@ -397,20 +443,21 @@ upload-packs-to-xpanse-marketplace:
echo "successfully activated google cloud service account"
gsutil -m cp -r $PACKS_SRC $ZIP_FOLDER
echo "successfully downloaded index.zip"
- gcloud auth revoke $GCS_ARTIFACTS_ACCOUNT_NAME
- section_end "Download packs from GCP"
+ - *upload_content_graph
+ - *upload_dependencies_file
+ - gcloud auth revoke $GCS_ARTIFACTS_ACCOUNT_NAME
- section_start "Zip Content Packs From GCS"
- python3 ./Tests/Marketplace/zip_packs.py -z $ZIP_FOLDER -a $ARTIFACTS_FOLDER -s $GCS_MARKET_KEY
- section_end "Zip Content Packs From GCS"
-
-
+ - job-done
force-pack-upload:
tags:
- gke
stage: upload-to-marketplace
- needs: ["create-instances-upload-flow"]
+ needs: ["xsoar-prepare-testing-bucket-upload-flow"]
rules:
- if: '$FORCE_BUCKET_UPLOAD == "true"'
extends:
@@ -456,3 +503,56 @@ slack-notify-bucket-upload:
extends:
- .trigger-slack-notification
- .bucket-upload-rule-always
+
+
+upload-id-set-bucket:
+ tags:
+ - gke
+ stage: prepare-testing-bucket
+ extends:
+ - .bucket-upload-rule
+ - .default-job-settings
+ script:
+ # This is needed because we still use id_set.json in other repos
+ - !reference [.create-id-set-xsoar]
+ - gcloud auth activate-service-account --key-file="$GCS_MARKET_KEY"
+ - gsutil cp $ARTIFACTS_FOLDER/id_set.json "gs://$GCS_MARKET_BUCKET/content/id_set.json"
+ - job-done
+
+
+sync-buckets-between-projects:
+ # syncs buckets from oproxy-dev project to xdr-xsoar-content-dev-01 project
+ tags:
+ - gke
+ extends:
+ - .bucket-upload-rule
+ stage: upload-to-marketplace
+ needs: ["upload-packs-to-marketplace", "upload-packs-to-marketplace-v2", "upload-packs-to-xpanse-marketplace"]
+ when: always
+ script:
+ - |
+ if [[ -z "$GCS_XSOAR_CONTENT_DEV_KEY" ]] || [[ -z "$GCS_XSOAR_CONTENT_PROD_KEY" ]]; then
+ echo "GCS_XSOAR_CONTENT_DEV_KEY or GCS_XSOAR_CONTENT_PROD_KEY not set, cannot perform sync"
+ job-done
+ exit 1
+ else
+ gcloud auth activate-service-account --key-file="$GCS_XSOAR_CONTENT_DEV_KEY"
+
+ echo "Syncing gs://marketplace-xsoar-dev"
+ gsutil -m rsync -r gs://marketplace-dist gs://marketplace-xsoar-dev
+ echo "Syncing gs://marketplace-xsiam-dev"
+ gsutil -m rsync -r gs://marketplace-v2-dist gs://marketplace-xsiam-dev
+ echo "Syncing gs://marketplace-xpanse-dev"
+ gsutil -m rsync -r gs://xpanse-dist gs://marketplace-xpanse-dev
+
+ gcloud auth activate-service-account --key-file="$GCS_XSOAR_CONTENT_PROD_KEY"
+
+ echo "Syncing gs://marketplace-xsoar-prod-us"
+ gsutil -m rsync -r gs://marketplace-dist gs://marketplace-xsoar-prod-us
+ echo "Syncing gs://marketplace-xsiam-prod-us"
+ gsutil -m rsync -r gs://marketplace-v2-dist gs://marketplace-xsiam-prod-us
+ echo "Syncing gs://marketplace-xpanse-prod-us"
+ gsutil -m rsync -r gs://xpanse-dist gs://marketplace-xpanse-prod-us
+
+ echo "Bucket sync completed"
+ fi
diff --git a/.gitlab/ci/global.yml b/.gitlab/ci/global.yml
index bf69eda2dd9e..bacf3648926a 100644
--- a/.gitlab/ci/global.yml
+++ b/.gitlab/ci/global.yml
@@ -54,7 +54,9 @@
.download-demisto-conf:
- section_start "Download content-test-conf" --collapsed
- - ./Tests/scripts/download_demisto_conf.sh >> $ARTIFACTS_FOLDER/logs/download_demisto_conf.log
+ - ./Tests/scripts/download_demisto_conf.sh | tee --append $ARTIFACTS_FOLDER/logs/download_demisto_conf.log
+ - SECRET_CONF_PATH=$(cat secret_conf_path)
+ - python3 ./Tests/scripts/add_secrets_file_to_build.py -sa "$GSM_SERVICE_ACCOUNT" -sf "$SECRET_CONF_PATH" -u "$DEMISTO_USERNAME" -p "$DEMISTO_PASSWORD" -gpid "$GSM_PROJECT_ID"
- section_end "Download content-test-conf"
.open-ssh-tunnel:
@@ -140,9 +142,17 @@
- nvm use default
- echo "Installing Node Modules" | tee --append $ARTIFACTS_FOLDER/logs/installations.log
- npm ci --cache .npm --prefer-offline | tee --append $ARTIFACTS_FOLDER/logs/installations.log
+ - npm list --json
- npm link jsdoc-to-markdown@5.0.3 | tee --append $ARTIFACTS_FOLDER/logs/installations.log # disable-secrets-detection
- section_end "Installing node modules"
+.get_last_upload_commit: &get_last_upload_commit
+ - section_start "Getting last bucket upload commit"
+ - gcloud auth activate-service-account --key-file="$GCS_MARKET_KEY" > auth.out 2>&1
+ - gsutil cp "gs://$GCS_MARKET_BUCKET/content/packs/index.json" "$ARTIFACTS_FOLDER/previous_index.json"
+ - export LAST_UPLOAD_COMMIT=$(cat $ARTIFACTS_FOLDER/previous_index.json | jq -r ".\"commit\"")
+ - section_end "Getting last bucket upload commit"
+
.default-before-script:
before_script:
- *setup-network-certs
@@ -155,9 +165,12 @@
- *install_node_modules
- *install_venv
- *get_contribution_pack
+ - *get_last_upload_commit
- *install_ssh_keys
- section_start "Build Parameters"
- - set | grep -E "^NIGHTLY=|^INSTANCE_TESTS=|^SERVER_BRANCH_NAME=|^ARTIFACT_BUILD_NUM=|^DEMISTO_SDK_NIGHTLY=|^TIME_TO_LIVE=|^CONTRIB_BRANCH=|^FORCE_PACK_UPLOAD=|^PACKS_TO_UPLOAD=|^BUCKET_UPLOAD=|^STORAGE_BASE_PATH=|^OVERRIDE_ALL_PACKS=|^GCS_MARKET_BUCKET=|^GCS_MARKET_V2_BUCKET=|^GCS_MARKET_XPANSE_BUCKET=|^SLACK_CHANNEL=|^NVM_DIR=|^NODE_VERSION=|^PATH=|^ARTIFACTS_FOLDER=|^ENV_RESULTS_PATH="
+ - set | grep -E "^NIGHTLY=|^INSTANCE_TESTS=|^SERVER_BRANCH_NAME=|^ARTIFACT_BUILD_NUM=|^DEMISTO_SDK_NIGHTLY=|^TIME_TO_LIVE=|^CONTRIB_BRANCH=|^FORCE_PACK_UPLOAD=|^PACKS_TO_UPLOAD=|^BUCKET_UPLOAD=|^STORAGE_BASE_PATH=|^OVERRIDE_ALL_PACKS=|^GCS_MARKET_BUCKET=|^GCS_MARKET_V2_BUCKET=|^GCS_MARKET_XPANSE_BUCKET=|^SLACK_CHANNEL=|^NVM_DIR=|^NODE_VERSION=|^PATH=|^ARTIFACTS_FOLDER=|^ENV_RESULTS_PATH=|^LAST_UPLOAD_COMMIT="
+ - neo4j-admin set-initial-password test
+ - neo4j start
- python --version
- python2 --version
- python3 --version
@@ -197,6 +210,7 @@
paths:
- /builds/xsoar/content/unit-tests
- /builds/xsoar/content/artifacts/*
+ - /builds/xsoar/content/pipeline_jobs_folder/*
when: always
services:
- name: docker.art.code.pan.run/build-tools--image-dind:20.10.12-dind
@@ -239,19 +253,16 @@
mkdir ./unit-tests
if [ -n "$SHOULD_LINT_ALL" ]; then
echo -e "----------\nLinting all because:\n${SHOULD_LINT_ALL}\n----------"
- demisto-sdk lint -vvv -p 10 -a --test-xml ./unit-tests --log-path $ARTIFACTS_FOLDER --failure-report $ARTIFACTS_FOLDER --coverage-report $ARTIFACTS_FOLDER/coverage_report -dt 120 --time-measurements-dir $ARTIFACTS_FOLDER
+ demisto-sdk lint -vvv -p 10 -a --test-xml ./unit-tests --log-path $ARTIFACTS_FOLDER --failure-report $ARTIFACTS_FOLDER --coverage-report $ARTIFACTS_FOLDER/coverage_report -dt 120 --time-measurements-dir $ARTIFACTS_FOLDER --docker-image all
else
if [[ -n $BUCKET_UPLOAD ]]; then
- gcloud auth activate-service-account --key-file="$GCS_MARKET_KEY" > auth.out 2>&1
- gsutil cp "gs://$GCS_MARKET_BUCKET/content/packs/index.json" "$ARTIFACTS_FOLDER/previous_index.json"
- export COMMIT_HASH_COMPARE_TO=$(cat $ARTIFACTS_FOLDER/previous_index.json | jq -r ".\"commit\"")
- demisto-sdk lint -vvv -p 8 -g --no-mypy --prev-ver $COMMIT_HASH_COMPARE_TO -v --test-xml ./unit-tests --log-path $ARTIFACTS_FOLDER --failure-report $ARTIFACTS_FOLDER --coverage-report $ARTIFACTS_FOLDER/coverage_report -idp $ARTIFACTS_FOLDER/id_set.json -cdam
+ demisto-sdk lint -vvv -p 8 -g --no-mypy --prev-ver $LAST_UPLOAD_COMMIT -v --test-xml ./unit-tests --log-path $ARTIFACTS_FOLDER --failure-report $ARTIFACTS_FOLDER --coverage-report $ARTIFACTS_FOLDER/coverage_report -cdam
else
echo "Skipping, Should run on circleCi."
fi
fi
if [[ -f $ARTIFACTS_FOLDER/coverage_report/.coverage ]]; then
- if [[ "$CI_PIPELINE_SOURCE" == "schedule" || -n "${NIGHTLY}" || -n "${BUCKET_UPLOAD}" || -n "${DEMISTO_SDK_NIGHTLY}" ]]; then
+ if [[ "$CI_PIPELINE_SOURCE" == "schedule" || -n "$SHOULD_LINT_ALL" || -n "${NIGHTLY}" || -n "${BUCKET_UPLOAD}" || -n "${DEMISTO_SDK_NIGHTLY}" ]]; then
demisto-sdk coverage-analyze -i $ARTIFACTS_FOLDER/coverage_report/.coverage --report-dir $ARTIFACTS_FOLDER/coverage_report --report-type all --allowed-coverage-degradation-percentage 100
if [[ -n "${NIGHTLY}" && "$CI_COMMIT_BRANCH" == "master" ]]; then
python3 Utils/upload_code_coverage_report.py --service_account $GCS_MARKET_KEY --source_file_name $ARTIFACTS_FOLDER/coverage_report/coverage.json --minimal_file_name $ARTIFACTS_FOLDER/coverage_report/coverage-min.json
@@ -262,6 +273,7 @@
fi
fi
- section_end "Run Unit Testing and Lint"
+ - job-done
.run-validations:
stage: unittests-and-validations
@@ -274,32 +286,12 @@
expire_in: 30 days
paths:
- /builds/xsoar/content/artifacts/*
+ - /builds/xsoar/content/pipeline_jobs_folder/*
when: always
script:
- section_start "Look For Secrets"
- demisto-sdk secrets --post-commit --ignore-entropy
- section_end "Look For Secrets"
- - section_start "Create id set"
- - *create-id-set
- - section_end "Create id set"
- - section_start "Merge public and private id sets"
- - |
- if [[ $CI_COMMIT_BRANCH =~ pull/[0-9]+ ]]; then
- echo "Skipping, Should not run on contributor's branch."
- else
- gcloud auth activate-service-account --key-file="$GCS_MARKET_KEY" >> $ARTIFACTS_FOLDER/logs/auth.out
- echo "successfully activated google cloud service account"
-
- echo "Download private ID set"
- gsutil cp "gs://marketplace-dist/content/private_id_set.json" $ARTIFACTS_FOLDER/unified_id_set.json
- echo "successfully downloaded private ID set"
- gcloud auth revoke $GCS_ARTIFACTS_ACCOUNT_NAME
-
- echo "Merge public and private ID sets"
- demisto-sdk merge-id-sets -i1 ./Tests/id_set.json -i2 $ARTIFACTS_FOLDER/unified_id_set.json -o $ARTIFACTS_FOLDER/unified_id_set.json
- echo "successfully merged public and private ID sets"
- fi
- - section_end "Merge public and private id sets"
- section_start "Copy Tests To Artifact Folder"
- cp "./Tests/conf.json" "$ARTIFACTS_FOLDER/conf.json"
- section_end "Copy Tests To Artifact Folder"
@@ -327,6 +319,7 @@
echo "No such branch in content-test-conf: $UNDERSCORE_CI_BRANCH"
else
echo "ERROR: Found a branch with the same name in contest-test-conf conf.json - $UNDERSCORE_CI_BRANCH.\n Merge it in order to merge the current branch into content repo."
+ job-done
exit 1
fi
}
@@ -348,3 +341,9 @@
- python3 Tests/Marketplace/validate_landing_page_sections.py -i $UNZIP_PATH
- section_end "Validate landingPageSections.json"
+ - job-done
+
+.jobs-done-check:
+ stage: are-jobs-realy-done
+ script:
+ - python3 Tests/scripts/check_jobs_done.py --job-done-files $PIPELINE_JOBS_FOLDER
diff --git a/.gitlab/ci/instance-test.yml b/.gitlab/ci/instance-test.yml
index 4242940277a4..82dbc7d1b7c5 100644
--- a/.gitlab/ci/instance-test.yml
+++ b/.gitlab/ci/instance-test.yml
@@ -8,10 +8,10 @@ test_instances:
extends:
- .default-job-settings
- .instance-test-rule
- needs: [ "create-instances" ]
+ needs: [ "xsoar-prepare-testing-bucket" ]
stage: run-instances
dependencies:
- - create-instances
+ - xsoar-prepare-testing-bucket
script:
- EXIT_CODE=0
- !reference [.download-demisto-conf]
diff --git a/.gitlab/ci/on-push.yml b/.gitlab/ci/on-push.yml
index 1a6c33f55167..a97980e70085 100644
--- a/.gitlab/ci/on-push.yml
+++ b/.gitlab/ci/on-push.yml
@@ -1,8 +1,14 @@
+# This rule is to not run the build for docker update branches (for non-nightly packs)
+.filter-non-nightly-docker-updates-rule:
+ rules:
+ - if: '$CI_COMMIT_BRANCH =~ /^demisto\// && $CI_COMMIT_BRANCH !~ /^demisto\/.*-nightly$/'
+ when: never
+
.push-rule:
rules:
+ - !reference [.filter-non-nightly-docker-updates-rule, rules]
- if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
-
trigger-private-build:
tags:
- gke
@@ -11,18 +17,25 @@ trigger-private-build:
extends:
- .default-job-settings
rules:
+ - !reference [.filter-non-nightly-docker-updates-rule, rules]
- if: '$CI_COMMIT_BRANCH =~ /pull\/[0-9]+/'
when: never
- if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
- if: '$NIGHTLY'
script:
- echo "====Trigger Private Build===="
- - python3 Utils/trigger_private_build.py --github-token $GITHUB_TOKEN
+ - |
+ if [ 'true' = $(./Tests/scripts/check_if_branch_exist.sh -t $GITHUB_TOKEN --repo demisto/content-private -b $CI_COMMIT_BRANCH) ]; then
+ PRIVATE_BRANCH_NAME=$CI_COMMIT_BRANCH
+ else
+ PRIVATE_BRANCH_NAME='master'
+ fi
+ - python3 Utils/trigger_private_build.py --github-token $GITHUB_TOKEN --private-branch-name $PRIVATE_BRANCH_NAME
- sleep 60
- python3 Utils/get_private_build_status.py --github-token $GITHUB_TOKEN
+ - job-done
timeout: 2 hours
-
.create-release-notes-and-common-docs:
- section_start "Create Release Notes and Common Server Documentation" --collapsed
- echo "Creating Release Notes and Content Descriptor"
@@ -31,6 +44,24 @@ trigger-private-build:
- ./Documentation/commonServerDocs.sh
- section_end "Create Release Notes and Common Server Documentation"
+stop-running-pipelines:
+ tags:
+ - gke
+ stage: unittests-and-validations
+ needs: []
+ extends:
+ - .default-job-settings
+ variables:
+ master_branch_name: master
+ rules:
+ - !reference [.filter-non-nightly-docker-updates-rule, rules]
+ - if: '$CI_PIPELINE_SOURCE =~ /^(push)$/ && $CI_COMMIT_BRANCH != $master_branch_name'
+ script:
+ - section_start "Stop running piplines on current branch"
+ - Utils/gitlab/stop_running_pipelines.sh $CI_COMMIT_BRANCH $CI_PIPELINE_ID
+ - section_end "Stop running piplines on current branch"
+
+
# runs in circle for the on-push flow (because we need to run it there for contributors anyways)
run-unittests-and-lint:
extends:
@@ -57,7 +88,7 @@ validate-content-conf:
extends:
- .default-job-settings
rules:
- - if: '$CI_PIPELINE_SOURCE =~ /^(push)$/ && $CI_COMMIT_BRANCH !~ /^[0-9]{2}\.[0-9]{1,2}\.[0-9]$/'
+ - if: '$CI_PIPELINE_SOURCE =~ /^(push)$/'
script:
- section_start "Validate content-test-conf Branch Merged"
- |
@@ -70,189 +101,127 @@ validate-content-conf:
if [ "$RESP" != "[]" ]; then
echo "Found a branch with the same name in contest-test-conf- $UNDERSCORE_CI_BRANCH."
echo "Merge it in order to merge the current branch into content repo."
+ job-done
exit 1
fi
echo "No branch with the name *$UNDERSCORE_CI_BRANCH* were found in contest-test-conf repo."
fi
- section_end "Validate content-tesgt-conf Branch Merged"
+ - job-done
-create-instances:
+.generic-prepare-testing-bucket:
tags:
- gke
extends:
- .default-job-settings
rules:
- - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
- - if: '$NIGHTLY'
- - if: '$INSTANCE_TESTS'
+ - !reference [.filter-non-nightly-docker-updates-rule, rules]
+ - if: "$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/"
+ - if: "$NIGHTLY"
cache:
policy: pull-push
variables:
- ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XSOAR}"
KUBERNETES_CPU_REQUEST: 2000m
needs: []
- stage: create-instances
+ stage: prepare-testing-bucket
script:
- !reference [.download-demisto-conf]
- - !reference [.create-id-set-xsoar]
- !reference [.create-release-notes-and-common-docs]
+ - section_start "Create or update content graph" --collapsed
+
+ - echo "Updating the content graph"
+ - mkdir $ARTIFACTS_FOLDER/content_graph
+ - demisto-sdk create-content-graph --marketplace $MARKETPLACE_VERSION -o $ARTIFACTS_FOLDER/content_graph
+ - echo "Successfully updated content graph"
+
+ - section_end "Create or update content graph"
+
- section_start "Create Content Artifacts and Update Conf" --collapsed
- - demisto-sdk create-content-artifacts -a $ARTIFACTS_FOLDER --cpus 8 --content_version $CONTENT_VERSION --marketplace "xsoar" -fbi -idp ./Tests/id_set.json >> $ARTIFACTS_FOLDER/logs/create_content_artifacts.log
+ - export DEMISTO_SDK_MARKETPLACE=$MARKETPLACE_VERSION # This is done because the demisto-sdk uses this environment variable.
+ - |
+ if [[ $MARKETPLACE_VERSION == "xsoar" ]];
+ then
+ echo "Starting to create artifacts with zip for XSOAR."
+ python Tests/scripts/create_artifacts_graph/create_artifacts.py --marketplace "xsoar" --artifacts-output $ARTIFACTS_FOLDER/content_packs --dependencies-output $ARTIFACTS_FOLDER/packs_dependencies.json --bucket-upload "$BUCKET_UPLOAD"
+ else
+ echo "Starting to create artifacts without zip."
+ python Tests/scripts/create_artifacts_graph/create_artifacts.py --marketplace "$MARKETPLACE_VERSION" --artifacts-output $ARTIFACTS_FOLDER/content_packs --dependencies-output $ARTIFACTS_FOLDER/packs_dependencies.json --no-zip --bucket-upload "$BUCKET_UPLOAD"
+ fi
+
- gcloud auth activate-service-account --key-file="$GCS_ARTIFACTS_KEY"
- - successful_feature_branch_build=$(gsutil ls "gs://xsoar-ci-artifacts/content/$FEATURE_BRANCH_NAME/*" | tail -n 1 | grep -o -E "content/$FEATURE_BRANCH_NAME/[0-9]*")
- - echo $successful_feature_branch_build
- - python3 Utils/merge_content_new_zip.py -f $FEATURE_BRANCH_NAME -b $successful_feature_branch_build
- - zip -j $ARTIFACTS_FOLDER/uploadable_packs.zip $ARTIFACTS_FOLDER/uploadable_packs/* >> $ARTIFACTS_FOLDER/logs/zipping_uploadable_packs.log || echo "failed to zip the uploadable packs, ignoring the failure"
- - rm -rf $ARTIFACTS_FOLDER/uploadable_packs
+
- cp "./Tests/conf.json" "$ARTIFACTS_FOLDER/conf.json"
- section_end "Create Content Artifacts and Update Conf"
- - section_start "Collect Tests"
+ - section_start "Replace Cortex XSOAR" --collapsed
+ - |
+ if [[ $MARKETPLACE_VERSION != "xsoar" ]];
+ then
+ echo "Replace Cortex XSOAR for non-xsoar build."
+ pushd "$ARTIFACTS_FOLDER"
+ find content_packs -type f -not \( -path "*/ReleaseNotes/*" \) -exec sed -i -e 's/Cortex XSOAR/'"$PRODUCTNAME"'/gI' {} \;
+ pushd content_packs; zip -r ../content_packs.zip * 1> /dev/null; popd
+ rm -rf content_packs
+ popd
+ fi
+ - section_end "Replace Cortex XSOAR"
+ - section_start "Collect Tests" --collapsed
- |
if [ -n "${INSTANCE_TESTS}" ]; then
echo "Skipping - not running in INSTANCE_TESTS build"
else
[ -n "${NIGHTLY}" ] && IS_NIGHTLY=true || IS_NIGHTLY=false
- python3 ./Tests/scripts/collect_tests/collect_tests.py -n $IS_NIGHTLY --marketplace "xsoar" --service_account $GCS_MARKET_KEY
+ python3 ./Tests/scripts/collect_tests/collect_tests.py -n $IS_NIGHTLY --marketplace "$MARKETPLACE_VERSION" --service_account $GCS_MARKET_KEY --graph true --override_all_packs $OVERRIDE_ALL_PACKS
fi
- section_end "Collect Tests"
-
- - section_start "Calculate Packs Dependencies" --collapsed
- - demisto-sdk find-dependencies -idp ./Tests/id_set.json --output-path $ARTIFACTS_FOLDER/packs_dependencies.json --all-packs-dependencies
- - section_end "Calculate Packs Dependencies"
- section_start "Prepare Content Packs for Testing"
- - ./Tests/scripts/prepare_content_packs_for_testing.sh "$GCS_MARKET_BUCKET" "$STORAGE_BASE_PATH"
+ - ./Tests/scripts/prepare_content_packs_for_testing.sh "$MARKETPLACE_BUCKET" "$STORAGE_BASE_PATH" "$MARKETPLACE_VERSION"
- section_end "Prepare Content Packs for Testing"
- - section_start "Create Instances"
+ - section_start "Create Instances for XSOAR"
- |
- [ -n "${TIME_TO_LIVE}" ] && TTL=${TIME_TO_LIVE} || TTL=300
- python3 ./Tests/scripts/awsinstancetool/aws_instance_tool.py -envType "$IFRA_ENV_TYPE" -timetolive $TTL -outfile "$ARTIFACTS_FOLDER/env_results.json"
- - section_end "Create Instances"
+ if [[ ${MARKETPLACE_VERSION} = "xsoar" ]]; then
+ echo "Creating Instances, only for XSOAR."
+ [ -n "${TIME_TO_LIVE}" ] && TTL=${TIME_TO_LIVE} || TTL=300
+ python3 ./Tests/scripts/awsinstancetool/aws_instance_tool.py -envType "$IFRA_ENV_TYPE" -timetolive $TTL -outfile "$ARTIFACTS_FOLDER/env_results.json"
+ fi
+ - section_end "Create Instances for XSOAR"
- section_start "Upload Artifacts to GCP" --collapsed
- ./Tests/scripts/upload_artifacts.sh
- section_end "Upload Artifacts to GCP"
- echo "create instances done" > "$ARTIFACTS_FOLDER/create_instances_done.txt"
+ - job-done
-prepare-testing-bucket-mpv2:
- tags:
- - gke
- extends:
- - .default-job-settings
- rules:
- - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
- - if: '$NIGHTLY'
- cache:
- policy: pull-push
- variables:
- ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_MPV2}"
- KUBERNETES_CPU_REQUEST: 2000m
- needs: []
- stage: create-instances
- script:
- - !reference [.download-demisto-conf]
- - !reference [.create-id-set-mp-v2]
- - !reference [.create-release-notes-and-common-docs]
-
- - section_start "Create Content Artifacts and Update Conf" --collapsed
- - demisto-sdk create-content-artifacts -a $ARTIFACTS_FOLDER --cpus 8 --content_version $CONTENT_VERSION --marketplace "marketplacev2" --no-zip -fbi -idp ./Tests/id_set.json >> $ARTIFACTS_FOLDER/logs/create_content_artifacts.log
- - zip -j $ARTIFACTS_FOLDER/uploadable_packs_mpv2.zip $ARTIFACTS_FOLDER/uploadable_packs/* >> $ARTIFACTS_FOLDER/logs/zipping_uploadable_packs.log || echo "failed to zip the uploadable packs, ignoring the failure"
- - rm -rf $ARTIFACTS_FOLDER/uploadable_packs
- - cp "./Tests/conf.json" "$ARTIFACTS_FOLDER/conf.json"
- - section_end "Create Content Artifacts and Update Conf"
-
- - section_start "Calculate Packs Dependencies" --collapsed
- - demisto-sdk find-dependencies -idp Tests/id_set.json --all-packs-dependencies -o $ARTIFACTS_FOLDER/packs_dependencies.json
- - section_end "Calculate Packs Dependencies"
-
- - section_start "Replace Cortex XSOAR" --collapsed
- - pushd "$ARTIFACTS_FOLDER"
- - find content_packs -type f -not \( -path "*/ReleaseNotes/*" \) -exec sed -i -e 's/Cortex XSOAR/'"$PRODUCT_NAME"'/gI' {} \;
- - pushd content_packs; zip -r ../content_packs.zip *; popd
- - pushd all_content; zip -r ../all_content.zip *; popd
- - pushd content_new; zip -r ../content_new.zip *; popd
- - pushd content_test; zip -r ../content_test.zip *; popd
- - rm -rf content_packs all_content content_new content_test
- - popd
- - section_end "Replace Cortex XSOAR"
- - section_start "Collect Tests and Content Packs"
- - |
- [ -n "${NIGHTLY}" ] && IS_NIGHTLY=true || IS_NIGHTLY=false
- python3 ./Tests/scripts/collect_tests/collect_tests.py -n $IS_NIGHTLY --marketplace "marketplacev2" --service_account $GCS_MARKET_KEY
- - section_end "Collect Tests and Content Packs"
-
- - section_start "Prepare Content Packs for Testing"
- - ./Tests/scripts/prepare_content_packs_for_testing.sh "$GCS_MARKET_V2_BUCKET" "$STORAGE_BASE_PATH" "marketplacev2"
- - section_end "Prepare Content Packs for Testing"
-
- - section_start "Upload Artifacts to GCP" --collapsed
- - ./Tests/scripts/upload_artifacts.sh
- - section_end "Upload Artifacts to GCP"
- - echo "create instances done" > "$ARTIFACTS_FOLDER/create_instances_done.txt"
-
-prepare-testing-bucket-xpanse:
- tags:
- - gke
+xsoar-prepare-testing-bucket:
+ variables:
+ ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XSOAR}"
+ MARKETPLACE_VERSION: "xsoar"
+ MARKETPLACE_BUCKET: "$GCS_MARKET_BUCKET"
extends:
- - .default-job-settings
- rules:
- - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
- - if: '$NIGHTLY'
- cache:
- policy: pull-push
+ - .generic-prepare-testing-bucket
+
+xpanse-prepare-testing-bucket:
variables:
ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XPANSE}"
- KUBERNETES_CPU_REQUEST: 2000m
- needs: []
- stage: create-instances
- script:
- - !reference [.download-demisto-conf]
- - !reference [.create-id-set-xpanse]
- - !reference [.create-release-notes-and-common-docs]
-
- - section_start "Create Content Artifacts and Update Conf" --collapsed
- - demisto-sdk create-content-artifacts -a $ARTIFACTS_FOLDER --cpus 8 --content_version $CONTENT_VERSION --marketplace "xpanse" --no-zip -fbi -idp ./Tests/id_set.json >> $ARTIFACTS_FOLDER/logs/create_content_artifacts.log
- - zip -j $ARTIFACTS_FOLDER/uploadable_packs_xpanse.zip $ARTIFACTS_FOLDER/uploadable_packs/* >> $ARTIFACTS_FOLDER/logs/zipping_uploadable_packs.log || echo "failed to zip the uploadable packs, ignoring the failure"
- - rm -rf $ARTIFACTS_FOLDER/uploadable_packs
- - cp "./Tests/conf.json" "$ARTIFACTS_FOLDER/conf.json"
- - section_end "Create Content Artifacts and Update Conf"
-
- - section_start "Calculate Packs Dependencies" --collapsed
- - demisto-sdk find-dependencies -idp Tests/id_set.json --all-packs-dependencies -o $ARTIFACTS_FOLDER/packs_dependencies.json
- - section_end "Calculate Packs Dependencies"
-
- - section_start "Replace Cortex XSOAR" --collapsed
- - pushd "$ARTIFACTS_FOLDER"
- - export PRODUCT_NAME_XPANSE="Cortex XPANSE"
- - find content_packs -type f -not \( -path "*/ReleaseNotes/*" \) -exec sed -i -e 's/Cortex XSOAR/'"$PRODUCT_NAME_XPANSE"'/gI' {} \;
- - pushd content_packs; zip -r ../content_packs.zip *; popd
- - pushd all_content; zip -r ../all_content.zip *; popd
- - pushd content_new; zip -r ../content_new.zip *; popd
- - pushd content_test; zip -r ../content_test.zip *; popd
- - rm -rf content_packs all_content content_new content_test
- - popd
- - section_end "Replace Cortex XSOAR"
-
- - section_start "Collect Tests and Content Packs"
- - |
- [ -n "${NIGHTLY}" ] && IS_NIGHTLY=true || IS_NIGHTLY=false
- python3 ./Tests/scripts/collect_tests/collect_tests.py -n $IS_NIGHTLY --marketplace "xpanse" --service_account $GCS_MARKET_KEY
- - section_end "Collect Tests and Content Packs"
-
- - section_start "Prepare Content Packs for Testing"
- - ./Tests/scripts/prepare_content_packs_for_testing.sh "$GCS_MARKET_XPANSE_BUCKET" "$STORAGE_BASE_PATH" "xpanse"
- - section_end "Prepare Content Packs for Testing"
-
- - section_start "Upload Artifacts to GCP" --collapsed
- - ./Tests/scripts/upload_artifacts.sh
- - section_end "Upload Artifacts to GCP"
- - echo "create instances done" > "$ARTIFACTS_FOLDER/create_instances_done.txt"
+ MARKETPLACE_VERSION: "xpanse"
+ PRODUCTNAME: "Cortex XPANSE"
+ MARKETPLACE_BUCKET: "$GCS_MARKET_XPANSE_BUCKET"
+ extends:
+ - .generic-prepare-testing-bucket
+mpv2-prepare-testing-bucket:
+ variables:
+ ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_MPV2}"
+ MARKETPLACE_VERSION: "marketplacev2"
+ MARKETPLACE_BUCKET: "$GCS_MARKET_V2_BUCKET"
+ PRODUCTNAME: "Cortex XSIAM"
+ extends:
+ - .generic-prepare-testing-bucket
.test_content_on_xsoar_server_instances_base:
tags:
@@ -264,17 +233,19 @@ prepare-testing-bucket-xpanse:
ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XSOAR}"
ENV_RESULTS_PATH: "${ARTIFACTS_FOLDER_XSOAR}/env_results.json"
SERVER_TYPE: "XSOAR"
- needs: ["create-instances"]
stage: run-instances
+ needs:
+ - job: xsoar-prepare-testing-bucket
+ optional: true
script:
- - '[[ "create instances done" = $(cat "$ARTIFACTS_FOLDER/create_instances_done.txt" 2> /dev/null) ]] || (echo "this is a known issue with GitLab pipline please mention @esharf in your PR" && exit 1)'
- EXIT_CODE=0
- !reference [.download-demisto-conf]
- export TEMP=$(cat $ARTIFACTS_FOLDER/filter_envs.json | jq ".\"$INSTANCE_ROLE\"")
-# If instance was not created
+ # If instance was not created
- |
if [[ "$TEMP" != "true" && -z "${NIGHTLY}" ]]; then
echo "Instance with role $INSTANCE_ROLE was not created"
+ job-done
exit 0
fi
- !reference [.open-ssh-tunnel]
@@ -286,8 +257,8 @@ prepare-testing-bucket-xpanse:
- ./Tests/scripts/install_content_and_test_integrations.sh "$INSTANCE_ROLE" "$SERVER_TYPE"|| EXIT_CODE=$?
- cp -f $ARTIFACTS_FOLDER/conf.json Tests/conf.json
- - echo Going to sleep for 60 seconds to allow server finish indexing
- - sleep 60
+ - echo Going to sleep for 15 minutes to allow server finish indexing
+ - sleep 900
- ./Tests/scripts/run_tests.sh "$INSTANCE_ROLE" || EXIT_CODE=$?
- |
@@ -308,40 +279,43 @@ prepare-testing-bucket-xpanse:
TIME_TO_LIVE=300
fi
python3 ./Tests/scripts/destroy_instances.py $ARTIFACTS_FOLDER $ARTIFACTS_FOLDER/env_results.json "$INSTANCE_ROLE" "$TIME_TO_LIVE" || EXIT_CODE=$?
+ - job-done
- exit $EXIT_CODE
-
xsoar_server_6_5:
extends: .test_content_on_xsoar_server_instances_base
- # No need to trigger in case of release branch
+ # No need to trigger in case of release branch or docker update branches (non-nightly packs)
rules:
- - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/ && $CI_COMMIT_BRANCH !~ /^[0-9]{2}\.[0-9]{1,2}\.[0-9]$/'
+ - !reference [.filter-non-nightly-docker-updates-rule, rules]
+ - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
variables:
INSTANCE_ROLE: "Server 6.5"
-
xsoar_server_6_6:
extends: .test_content_on_xsoar_server_instances_base
- # No need to trigger in case of release branch
+ # No need to trigger in case of release branch or docker update branches (non-nightly packs)
rules:
- - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/ && $CI_COMMIT_BRANCH !~ /^[0-9]{2}\.[0-9]{1,2}\.[0-9]$/'
+ - !reference [.filter-non-nightly-docker-updates-rule, rules]
+ - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
variables:
INSTANCE_ROLE: "Server 6.6"
xsoar_server_6_8:
extends: .test_content_on_xsoar_server_instances_base
- # No need to trigger in case of release branch
+ # No need to trigger in case of release branch or docker update branches (non-nightly packs)
rules:
- - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/ && $CI_COMMIT_BRANCH !~ /^[0-9]{2}\.[0-9]{1,2}\.[0-9]$/'
+ - !reference [.filter-non-nightly-docker-updates-rule, rules]
+ - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
variables:
INSTANCE_ROLE: "Server 6.8"
xsoar_server_master:
extends:
- .test_content_on_xsoar_server_instances_base
- # No need to trigger in case of release branch
+ # No need to trigger in case of release branch or docker update branches (non-nightly packs)
rules:
- - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/ && $CI_COMMIT_BRANCH !~ /^[0-9]{2}\.[0-9]{1,2}\.[0-9]$/'
+ - !reference [.filter-non-nightly-docker-updates-rule, rules]
+ - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
- if: '$NIGHTLY'
when: always
variables:
@@ -358,6 +332,46 @@ fan-in-nightly:
script:
- echo "fan in"
+jobs-done-check-nightly:
+ extends:
+ - .jobs-done-check
+ needs: ['run-unittests-and-lint', 'run-validations', 'trigger-private-build', 'mpv2-prepare-testing-bucket', 'xpanse-prepare-testing-bucket', 'xsoar-prepare-testing-bucket', 'xsiam_server_ga', 'xsoar_server_master']
+ tags:
+ - gke
+ rules:
+ - if: '$NIGHTLY'
+ when: always
+
+jobs-done-check-on-push:
+ extends:
+ - .push-rule
+ - .jobs-done-check
+ needs:
+ - job: run-unittests-and-lint
+ optional: true
+ - job: trigger-private-build
+ optional: true
+ - job: validate-content-conf
+ optional: true
+ - job: mpv2-prepare-testing-bucket
+ optional: true
+ - job: xpanse-prepare-testing-bucket
+ optional: true
+ - job: xsoar-prepare-testing-bucket
+ optional: true
+ - job: xsiam_server_ga
+ optional: true
+ - job: xsoar_server_6_5
+ optional: true
+ - job: xsoar_server_6_6
+ optional: true
+ - job: xsoar_server_6_8
+ optional: true
+ - job: xsoar_server_master
+ optional: true
+ tags:
+ - gke
+
slack-notify-nightly-build:
extends:
@@ -375,7 +389,7 @@ slack-notify-nightly-build:
SLACK_JOB: 'true'
-.test_content_on_xsiam_server_instances_base:
+.test_content_on_cloud_server_instances_base:
tags:
- gke
- us-west1
@@ -383,42 +397,44 @@ slack-notify-nightly-build:
- .default-job-settings
- .push-rule
variables:
- ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_MPV2}"
- GCS_MARKET_BUCKET: "${GCS_MARKET_V2_BUCKET}"
- ENV_RESULTS_PATH: "${ARTIFACTS_FOLDER_MPV2}/env_results.json"
- SERVER_TYPE: "XSIAM"
- needs: ["prepare-testing-bucket-mpv2"]
+ EXTRACT_PRIVATE_TESTDATA: "true"
stage: run-instances
script:
- - '[[ "create instances done" = $(cat "$ARTIFACTS_FOLDER/create_instances_done.txt" 2> /dev/null) ]] || (echo "this is a known issue with GitLab pipline please mention @esharf in your PR" && exit 1)'
- EXIT_CODE=0
- !reference [.download-demisto-conf]
-
- section_start "Are there tests to run?" --collapsed
- |
if ! [[ -s $ARTIFACTS_FOLDER/content_packs_to_install.txt || -s $ARTIFACTS_FOLDER/filter_file.txt ]]; then
# The files are empty.
- echo "Not running XSIAM instance flow, no tests to run were found."
+ echo "Not running the instance flow, no tests to run were found."
+ job-done
exit $EXIT_CODE
fi
- section_end "Are there tests to run?"
- - section_start "Lock XSIAM Machine" --collapsed
+ - section_start "Lock Machine" --collapsed
- cp "$ARTIFACTS_FOLDER/filter_file.txt" "./artifacts/filter_file.txt"
- echo "Authenticating GCP"
- gcloud auth activate-service-account --key-file="$GCS_ARTIFACTS_KEY" > auth.out 2>&1
- echo "Auth done successfully"
- - ./Tests/scripts/wait_in_line_for_xsiam_env.sh
- - source XSIAMEnvVariables
- - echo "XSIAM chosen_machine_id is $XSIAM_CHOSEN_MACHINE_ID"
- - section_end "Lock XSIAM Machine"
+ - ./Tests/scripts/wait_in_line_for_cloud_env.sh
+ - source CloudEnvVariables
+ - echo "CLOUD chosen_machine_id is $CLOUD_CHOSEN_MACHINE_ID"
+ - section_end "Lock Machine"
- - section_start "Clean XSIAM Machine" --collapsed
- - ./Tests/scripts/uninstall_packs_and_reset_bucket_xsiam.sh
- - section_end "Clean XSIAM Machine"
+ - section_start "Clean Machine" --collapsed
+ - ./Tests/scripts/uninstall_packs_and_reset_bucket_cloud.sh
+ - section_end "Clean Machine"
- section_start "Install Packs and run Test-Module"
- ./Tests/scripts/install_content_and_test_integrations.sh "$INSTANCE_ROLE" "$SERVER_TYPE"|| EXIT_CODE=$?
+ - |
+ if [[ $EXIT_CODE != 0 ]]
+ then
+ echo "Failed to install packs. Exiting"
+ job-done
+ exit $EXIT_CODE
+ fi
- section_end "Install Packs and run Test-Module"
- section_start "Run Tests"
@@ -434,28 +450,37 @@ slack-notify-nightly-build:
fi
- section_end "Run Tests"
- - section_start "XSIAM Machine information"
- - ./Tests/scripts/print_xsiam_machine_details.sh
- - section_end "XSIAM Machine information"
+ - section_start "Test Modeling Rules"
+ - |
+ if [[ -s "$ARTIFACTS_FOLDER/modeling_rules_to_test.txt" ]]; then
+ ./Tests/scripts/test_modeling_rules.sh || EXIT_CODE=$?
+ else
+ echo "No modeling rules were marked for testing during test collection"
+ fi
+ - section_end "Test Modeling Rules"
+
+ - section_start "Cloud Machine information"
+ - ./Tests/scripts/print_cloud_machine_details.sh
+ - section_end "Cloud Machine information"
- section_start "After script" --collapsed
- |
- if ! [ -z "$XSIAM_CHOSEN_MACHINE_ID" ]
+ if ! [ -z "$CLOUD_CHOSEN_MACHINE_ID" ]
then
echo "Job finished, removing lock file"
gcloud auth activate-service-account --key-file="$GCS_ARTIFACTS_KEY" > auth.out 2>&1
- gsutil rm "gs://xsoar-ci-artifacts/content-locks-xsiam/*-lock-$CI_JOB_ID"
+ gsutil rm "gs://xsoar-ci-artifacts/$GCS_LOCKS_PATH/machines_locks/*-lock-$CI_JOB_ID"
echo "Finished removing lock file"
fi
- section_end "After script"
+ - job-done
- exit $EXIT_CODE
-
#xsiam_server_dev:
# extends:
# - .test_content_on_xsiam_server_instances_base
# rules:
-# - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/ && $CI_COMMIT_BRANCH !~ /^[0-9]{2}\.[0-9]{1,2}\.[0-9]$/'
+# - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
# - if: '$NIGHTLY'
# when: always
# variables:
@@ -466,12 +491,96 @@ slack-notify-nightly-build:
xsiam_server_ga:
extends:
- - .test_content_on_xsiam_server_instances_base
+ - .test_content_on_cloud_server_instances_base
rules:
- - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/ && $CI_COMMIT_BRANCH !~ /^[0-9]{2}\.[0-9]{1,2}\.[0-9]$/'
+ - !reference [.filter-non-nightly-docker-updates-rule, rules]
+ - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
- if: '$NIGHTLY'
when: always
variables:
- INSTANCE_ROLE: "XSIAM 1.2"
+ INSTANCE_ROLE: "XSIAM"
GCS_QUEUE_FILE: "queue-ga"
- TEST_MACHINES_LIST: "test-machines-ga"
\ No newline at end of file
+ TEST_MACHINES_LIST: "test-machines-ga"
+ GCS_LOCKS_PATH: "content-locks/locks-xsiam-ga"
+ CLOUD_SERVERS_FILE: "xsiam_servers_path"
+ CLOUD_API_KEYS: $XSIAM_API_KEYS
+ ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_MPV2}"
+ GCS_MARKET_BUCKET: "${GCS_MARKET_V2_BUCKET}"
+ ENV_RESULTS_PATH: "${ARTIFACTS_FOLDER_MPV2}/env_results.json"
+ GCS_SOURCE_BUCKET: "$GCS_PRODUCTION_V2_BUCKET"
+ GCS_MACHINES_BUCKET: "marketplace-v2-dist-dev/upload-flow/builds-xsiam"
+ SERVER_TYPE: "XSIAM"
+ MARKETPLACE_NAME: "marketplacev2"
+ needs:
+ - job: mpv2-prepare-testing-bucket
+ optional: true
+
+#xsoar_ng_server_ga:
+# extends:
+# - .test_content_on_cloud_server_instances_base
+# rules:
+# - !reference [.filter-non-nightly-docker-updates-rule, rules]
+# - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
+# when: always
+# variables:
+# INSTANCE_ROLE: "XSIAM"
+# GCS_QUEUE_FILE: "queue-ga"
+# TEST_MACHINES_LIST: "test-machines-ga"
+# GCS_LOCKS_PATH: "content-locks/locks-xsoar-ng"
+# CLOUD_SERVERS_FILE: "xsoar_ng_servers_path"
+# CLOUD_API_KEYS: $XSOAR_NG_API_KEYS
+# GCS_SOURCE_BUCKET: "${GCS_PRODUCTION_BUCKET}"
+# ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XSOAR}"
+# ENV_RESULTS_PATH: "${ARTIFACTS_FOLDER_XSOAR}/env_results.json"
+# GCS_MACHINES_BUCKET: "marketplace-dist-dev/upload-flow/builds-xsoar-ng"
+# SERVER_TYPE: "XSIAM"
+# MARKETPLACE_NAME: "xsoar"
+# needs:
+# - job: xsoar-prepare-testing-bucket
+# optional: true
+# allow_failure: true
+
+
+test-upload-flow:
+ tags:
+ - gke
+ extends:
+ - .default-job-settings
+ rules:
+ - !reference [.filter-non-nightly-docker-updates-rule, rules]
+ - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
+ variables:
+ ALL_BUCKETS: "$GCS_MARKET_BUCKET_DEV,$GCS_MARKET_V2_BUCKET_DEV"
+ stage: unittests-and-validations
+ allow_failure: true
+ script:
+ - |
+ - section_start "Checks Whether to Trigger a Test Upload"
+ - SHOULD_SKIP_TEST_UPLOAD=$(./Utils/should_trigger_test_upload.sh)
+ - if [ -z "$SHOULD_SKIP_TEST_UPLOAD" ]; then
+ - echo "No upload-flow related files were modified, skipping upload test"
+ - exit 0
+ - fi
+ - echo "Found modified files that should be tested in upload-flow"
+ - section_end "Checks Whether to Trigger a Test Upload"
+
+ - section_start "Create Testing Branch"
+ - export BRANCH="${CI_COMMIT_BRANCH}-upload_test_branch-$(date +%s)"
+ - python3 ./Utils/test_upload_flow/create_test_branch.py -tb $BRANCH -a $ARTIFACTS_FOLDER -g $GITLAB_PUSH_TOKEN
+ - echo $BRANCH
+ - section_end "Create Testing Branch"
+
+ - section_start "Trigger Test Upload Flow On Testing Branch"
+ - export pipeline_id=$(./Utils/trigger_test_upload_flow.sh -ct $GITLAB_SVC_USER_TOKEN -g -b $BRANCH | jq .id)
+ - echo "Successful triggered test upload - https://code.pan.run/xsoar/content/-/pipelines/$pipeline_id" # disable-secrets-detection
+ - section_end "Trigger Test Upload Flow On Testing Branch"
+
+ - section_start "Wait For Upload To Finish"
+ - python3 ./Utils/test_upload_flow/wait_for_upload.py -p $pipeline_id -g $GITLAB_API_TOKEN
+ - section_end "Wait For Upload To Finish"
+
+ - section_start "Verify Created Testing Bucket"
+ - current_storage_base_path="upload-flow/builds/$BRANCH/$pipeline_id/content/packs"
+ - python3 ./Utils/test_upload_flow/verify_bucket.py -a $ARTIFACTS_FOLDER -s $GCS_MARKET_KEY -sb $current_storage_base_path -b $ALL_BUCKETS
+ - section_end "Verify Created Testing Bucket"
+
diff --git a/.gitlab/ci/sdk-nightly.yml b/.gitlab/ci/sdk-nightly.yml
index f19d9c962d54..6918c798856f 100644
--- a/.gitlab/ci/sdk-nightly.yml
+++ b/.gitlab/ci/sdk-nightly.yml
@@ -81,9 +81,10 @@ demisto_sdk_nightly:check_idset_dependent_commands:
- section_start "Calculate Packs Dependencies" --collapsed
- demisto-sdk find-dependencies -idp $ARTIFACTS_FOLDER/id_set.json --output-path $ARTIFACTS_FOLDER/packs_dependencies.json --all-packs-dependencies
- section_end "Calculate Packs Dependencies"
+ - job-done
-demisto-sdk-nightly:create-instance:
+demisto-sdk-nightly:xsoar-prepare-testing-bucket:
tags:
- gke
extends:
@@ -95,12 +96,13 @@ demisto-sdk-nightly:create-instance:
cache:
policy: pull-push
needs: []
- stage: create-instances
+ stage: prepare-testing-bucket
script:
- !reference [.download-demisto-conf]
- |
[ -n "${TIME_TO_LIVE}" ] && TTL=${TIME_TO_LIVE} || TTL=300
python3 ./Tests/scripts/awsinstancetool/aws_instance_tool.py -envType "$IFRA_ENV_TYPE" -timetolive $TTL -outfile "$ARTIFACTS_FOLDER/env_results.json"
+ - job-done
demisto-sdk-nightly:run-commands-against-instance:
@@ -154,6 +156,7 @@ demisto-sdk-nightly:run-commands-against-instance:
- section_start "Destroy instances"
- python3 ./Tests/scripts/destroy_instances.py $ARTIFACTS_FOLDER $ARTIFACTS_FOLDER/env_results.json "$INSTANCE_ROLE" "$TIME_TO_LIVE"
- section_end "Destroy instances"
+ - job-done
demisto-sdk-nightly:run-end-to-end-tests:
@@ -170,7 +173,7 @@ demisto-sdk-nightly:run-end-to-end-tests:
DOCKER_DRIVER: overlay2
DOCKER_TLS_CERTDIR: ""
INSTANCE_ROLE: "Server Master"
- needs: ["demisto-sdk-nightly:create-instance"]
+ needs: ["demisto-sdk-nightly:xsoar-prepare-testing-bucket"]
stage: run-instances
script:
- !reference [.open-ssh-tunnel]
@@ -182,6 +185,7 @@ demisto-sdk-nightly:run-end-to-end-tests:
export DEMISTO_BASE_URL="https://localhost:$TUNNEL_PORT"
echo "Server URL: $DEMISTO_BASE_URL"
python3 -m pytest $ARTIFACTS_FOLDER/demisto-sdk/tests_end_to_end/ || EXIT_CODE=$?
+ - job-done
- exit $EXIT_CODE
demisto-sdk-nightly:fan-in:
diff --git a/.gitlab/ci/slack-notify.yml b/.gitlab/ci/slack-notify.yml
index 0a15ecaeaac8..77c2b947795f 100644
--- a/.gitlab/ci/slack-notify.yml
+++ b/.gitlab/ci/slack-notify.yml
@@ -5,6 +5,7 @@ default:
expire_in: 30 days
paths:
- /builds/xsoar/content/artifacts/*
+ - /builds/xsoar/content/pipeline_jobs_folder/*
when: always
stages:
diff --git a/.gitlab/ci/staging.yml b/.gitlab/ci/staging.yml
index 68b8b60831a1..dc74a0e46b09 100644
--- a/.gitlab/ci/staging.yml
+++ b/.gitlab/ci/staging.yml
@@ -1,129 +1 @@
-.generic-graph-prepare-testing-bucket:
- tags:
- - gke
- extends:
- - .default-job-settings
- rules:
- - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
- - if: '$NIGHTLY'
- cache:
- policy: pull
- variables:
- KUBERNETES_CPU_REQUEST: 2000m
- needs: []
- stage: create-instances
- allow_failure: true
- script:
- - pip3 uninstall -y demisto-sdk
- - pip3 install git+https://github.com/demisto/demisto-sdk@master
-
- - !reference [.download-demisto-conf]
- - !reference [.create-release-notes-and-common-docs]
-
- - section_start "Create content graph" --collapsed
- - demisto-sdk create-content-graph -o $ARTIFACTS_FOLDER/content-graph.dump
- - section_end "Create content graph"
-
- - section_start "Create Content Artifacts and Update Conf" --collapsed
- - |
- if [[ $MARKETPLACE_VERSION == "xsoar" ]];
- then
- echo "Starting to create artifacts with zip for XSOAR."
- python Tests/scripts/create_artifacts_graph/create_artifacts.py --marketplace "xsoar" --artifacts-output $ARTIFACTS_FOLDER/content_packs --dependencies-output $ARTIFACTS_FOLDER/packs_dependencies.json
- else
- echo "Starting to create artifacts without zip."
- python Tests/scripts/create_artifacts_graph/create_artifacts.py --marketplace "$MARKETPLACE_VERSION" --artifacts-output $ARTIFACTS_FOLDER/content_packs --dependencies-output $ARTIFACTS_FOLDER/packs_dependencies.json --no-zip
- fi
-
- - gcloud auth activate-service-account --key-file="$GCS_ARTIFACTS_KEY"
-
- - cp "./Tests/conf.json" "$ARTIFACTS_FOLDER/conf.json"
- - section_end "Create Content Artifacts and Update Conf"
-
- - section_start "Replace Cortex XSOAR" --collapsed
- - |
- if [[ $MARKETPLACE_VERSION != "xsoar" ]];
- then
- echo "Replace Cortex XSOAR for non-xsoar build."
- pushd "$ARTIFACTS_FOLDER"
- find content_packs -type f -not \( -path "*/ReleaseNotes/*" \) -exec sed -i -e 's/Cortex XSOAR/'"$PRODUCT_NAME"'/gI' {} \;
- pushd content_packs; zip -r ../content_packs.zip * 1> /dev/null; popd
- rm -rf content_packs
- popd
- fi
- - section_end "Replace Cortex XSOAR"
-
- - section_start "Collect Tests" --collapsed
- - |
- if [ -n "${INSTANCE_TESTS}" ]; then
- echo "Skipping - not running in INSTANCE_TESTS build"
- else
- [ -n "${NIGHTLY}" ] && IS_NIGHTLY=true || IS_NIGHTLY=false
- python3 ./Tests/scripts/collect_tests/collect_tests.py -n $IS_NIGHTLY --marketplace "$MARKETPLACE_VERSION" --service_account $GCS_MARKET_KEY --graph true
- fi
- - cp $ARTIFACTS_FOLDER/content_packs_to_install.txt $ARTIFACTS_FOLDER/content_packs_to_install-graph.txt
- - section_end "Collect Tests"
-
- - section_start "Prepare Content Packs for Testing" --collapsed
- - ./Tests/scripts/prepare_content_packs_for_testing.sh "$MARKETPLACE_BUCKET" "$STORAGE_BASE_PATH" "$MARKETPLACE_VERSION"
- - section_end "Prepare Content Packs for Testing"
-
- - section_start "Create Instances for XSOAR"
- - |
- if [ -n "${INSTANCE_TESTS}" ]; then
- echo "Creating Instances, only for XSOAR."
- [ -n "${TIME_TO_LIVE}" ] && TTL=${TIME_TO_LIVE} || TTL=300
- python3 ./Tests/scripts/awsinstancetool/aws_instance_tool.py -envType "$IFRA_ENV_TYPE" -timetolive $TTL -outfile "$ARTIFACTS_FOLDER/env_results.json"
- fi
- - section_end "Create Instances for XSOAR"
-
- - section_start "Upload Artifacts to GCP" --collapsed
- - ./Tests/scripts/upload_artifacts.sh
- - section_end "Upload Artifacts to GCP"
-
-
-graph-prepare-testing-bucket-xsoar:
- variables:
- ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XSOAR}"
- MARKETPLACE_VERSION: "xsoar"
- MARKETPLACE_BUCKET: "$GCS_MARKET_BUCKET"
- extends:
- - .generic-graph-prepare-testing-bucket
-
-
-graph-prepare-testing-bucket-xpanse:
- variables:
- ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_XPANSE}"
- MARKETPLACE_VERSION: "xpanse"
- PRODUCT_NAME: "Cortex XPANSE"
- MARKETPLACE_BUCKET: "$GCS_MARKET_XPANSE_BUCKET"
- extends:
- - .generic-graph-prepare-testing-bucket
-
-
-graph-prepare-testing-bucket-mpv2:
- variables:
- ARTIFACTS_FOLDER: "${ARTIFACTS_FOLDER_MPV2}"
- MARKETPLACE_VERSION: "marketplacev2"
- MARKETPLACE_BUCKET: "$GCS_MARKET_V2_BUCKET"
- extends:
- - .generic-graph-prepare-testing-bucket
-
-compare_zips:
- needs: ["prepare-testing-bucket-mpv2", "graph-prepare-testing-bucket-mpv2", "create-instances", "graph-prepare-testing-bucket-xsoar", "graph-prepare-testing-bucket-xpanse", "prepare-testing-bucket-xpanse"]
- stage: stage-compare
- rules:
- - if: '$CI_PIPELINE_SOURCE =~ /^(push|contrib)$/'
- - if: '$NIGHTLY'
- extends:
- - .default-job-settings
- cache:
- policy: pull
- allow_failure: true
- script:
- - echo "compare between graph and id_set"
- - python3 ./Tests/scripts/utils/compare_staging_graph.py --artifacts $ARTIFACTS_FOLDER_XSOAR --marketplace xsoar --output-path $ARTIFACTS_FOLDER_XSOAR/report -s "$SLACK_TOKEN"
- - python3 ./Tests/scripts/utils/compare_staging_graph.py --artifacts $ARTIFACTS_FOLDER_MPV2 --marketplace marketplacev2 --output-path $ARTIFACTS_FOLDER_MPV2/report -s "$SLACK_TOKEN"
- - python3 ./Tests/scripts/utils/compare_staging_graph.py --artifacts $ARTIFACTS_FOLDER_XPANSE --marketplace xpanse --output-path $ARTIFACTS_FOLDER_XPANSE/report -s "$SLACK_TOKEN"
-
-
+# this file is reserved for staging jobs
\ No newline at end of file
diff --git a/.gitlab/helper_functions.sh b/.gitlab/helper_functions.sh
index f708006cb17f..825b38dcea6d 100644
--- a/.gitlab/helper_functions.sh
+++ b/.gitlab/helper_functions.sh
@@ -59,3 +59,8 @@ section_end() {
end="$(echo "$end" | sed -e "s/the_time/$end_time/" -e "s/section_id/$section_id/")"
echo -e "$end"
}
+
+job-done() {
+ mkdir -p "${PIPELINE_JOBS_FOLDER}"
+ echo "done" > ${PIPELINE_JOBS_FOLDER}/${CI_JOB_NAME}.txt
+}
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 4d2c4a534aea..ca0a7138eb22 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v2.3.0
+ rev: v4.4.0
hooks:
- id: check-yaml
exclude: .gitlab/ci/|.circleci/
@@ -9,3 +9,9 @@ repos:
- id: check-ast
- id: check-toml
- id: check-xml
+ - id: check-merge-conflict
+
+- repo: https://github.com/abravalheri/validate-pyproject
+ rev: v0.10.1
+ hooks:
+ - id: validate-pyproject
diff --git a/Images/campaign-canvas.png b/Images/campaign-canvas.png
new file mode 100644
index 000000000000..cf3a589a9b32
Binary files /dev/null and b/Images/campaign-canvas.png differ
diff --git a/Images/campaign-overview.png b/Images/campaign-overview.png
new file mode 100644
index 000000000000..229d9475ec76
Binary files /dev/null and b/Images/campaign-overview.png differ
diff --git a/LICENSE b/LICENSE
index 2a9c6f4f2610..51ab66c153f7 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,5 @@
The MIT License (MIT)
-Copyright (c) 2016 Demisto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/Packs/AHA/Integrations/AHA/AHA.py b/Packs/AHA/Integrations/AHA/AHA.py
index 0595f667e793..1f1fdaf05b9d 100644
--- a/Packs/AHA/Integrations/AHA/AHA.py
+++ b/Packs/AHA/Integrations/AHA/AHA.py
@@ -3,6 +3,7 @@
import requests
from typing import Dict
+from enum import Enum
# Disable insecure warnings
@@ -11,9 +12,42 @@
''' CONSTANTS '''
REPLACE = 'replace'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
-URL_SUFFIX_PATTERN = f'/products/{REPLACE}/features/'
+URL_SUFFIX_PATTERN = f'/products/{REPLACE}/'
EDIT_FIELDS = ['id', 'reference_num', 'name', 'description', 'workflow_status', 'created_at']
DEFAULT_FIELDS = ['reference_num', 'name', 'id', 'created_at']
+FEATURE_FIELDS = ['ideas']
+
+''' AHA ENUM'''
+
+
+class AHA_TYPE(Enum):
+ IDEAS = 1
+ FEATURES = 2
+
+ def get_url_suffix(self) -> str:
+ if (self == AHA_TYPE.IDEAS):
+ return 'ideas/'
+ else:
+ return 'features/'
+
+ def get_type_plural(self) -> str:
+ if (self == AHA_TYPE.IDEAS):
+ return 'ideas'
+ else:
+ return 'features'
+
+ def get_type_singular(self) -> str:
+ if (self == AHA_TYPE.IDEAS):
+ return 'idea'
+ else:
+ return 'feature'
+
+ def get_type_for_outputs(self) -> str:
+ if (self == AHA_TYPE.IDEAS):
+ return 'Idea'
+ else:
+ return 'Feature'
+
''' CLIENT CLASS '''
@@ -31,20 +65,22 @@ def __init__(self,
self.url = url
self._headers['Content-Type'] = 'application/json'
- def get_features(self,
- feature_name: str,
- fields: str,
- from_date: str,
- page: str,
- per_page: str) -> Dict:
+ def get(self,
+ aha_type: AHA_TYPE,
+ name: str,
+ fields: str,
+ from_date: str,
+ page: str,
+ per_page: str) -> Dict:
"""
- Retrieves a list of features from AHA
+ Retrieves a list of features/ideas from AHA
Args:
- feature_name: str if given it will fetch the feature specified. if not, it will fetch all features.
- fields: str optional feature fields to retrive from the service.
- from_date: str format: YYYY-MM-DD get features created after from_date.
+ aha_type: determine what to get ideas or features using AHA_TYPE Enum.
+ name: str if given it will fetch the feature/idea specified. if not, it will fetch all features/ideas.
+ fields: str optional feature/idea fields to retrieve from the service.
+ from_date: str format: YYYY-MM-DD get features/ideas created after from_date.
page: str pagination specify the number of the page.
- per_page: str pagination specify the maximum number of features per page.
+ per_page: str pagination specify the maximum number of features/ideas per page.
"""
headers = self._headers
params = {
@@ -54,27 +90,28 @@ def get_features(self,
'per_page': per_page,
}
return self._http_request(method='GET',
- url_suffix=f'{self.url}{feature_name}',
+ url_suffix=f'{self.url}{aha_type.get_url_suffix()}{name}',
headers=headers, params=params, resp_type='json')
- def edit_feature(self, feature_name: str, fields: Dict) -> Dict:
+ def edit(self, aha_object_name: str, aha_type: AHA_TYPE, fields: Dict) -> Dict:
"""
- Updates fields in a feature from AHA
+ Updates fields in a feature/idea from AHA
Args:
- feature_name: str feature to update
+ aha_object_name: str idea to update
+ aha_type: determine what to edit ideas or features using AHA_TYPE Enum.
fields: Dict fields to update
"""
- payload = extract_payload(fields=fields)
- demisto.debug(f'Edit feature payload: {payload}')
+ payload = build_edit_idea_req_payload() if aha_type == AHA_TYPE.IDEAS else build_edit_feature_req_payload(fields=fields)
+ demisto.debug(f'Edit {aha_type.get_type_singular()} payload: {payload}')
fields = ','.join(EDIT_FIELDS)
- return self._http_request(method='PUT', url_suffix=f'{self.url}{feature_name}?fields={fields}',
- resp_type='json', json_data=payload)
+ url_suffix = f'{self.url}{aha_type.get_url_suffix()}{aha_object_name}?fields={fields}'
+ return self._http_request(method='PUT', url_suffix=url_suffix, resp_type='json', json_data=payload)
''' HELPER FUNCTIONS'''
-def extract_payload(fields: Dict):
+def build_edit_feature_req_payload(fields: Dict):
payload: Dict = {'feature': {}}
for field in fields:
feature = payload.get('feature', {})
@@ -86,24 +123,40 @@ def extract_payload(fields: Dict):
return payload
-def parse_features(features: dict, fields: List) -> List:
+def build_edit_idea_req_payload():
+ payload: Dict = {'idea': {}}
+ idea = payload.get('idea', {})
+ idea['workflow_status'] = "Shipped"
+ return payload
+
+
+def extract_ideas_from_feature(ideas: List) -> List:
+ ret_list: list[str] = []
+ for idea in ideas:
+ ret_list.append(idea.get('reference_num'))
+ return ret_list
+
+
+def parse_multiple_objects(aha_objects: dict, fields: List) -> List:
res_list = []
- for res in features:
- curr = parse_feature(res, fields=fields)
+ for res in aha_objects:
+ curr = parse_single_object(res, fields=fields)
res_list.extend(curr)
demisto.debug(f'Parsed response fields: {res_list}')
return res_list
-def parse_feature(feature: dict, fields: List = DEFAULT_FIELDS) -> List:
+def parse_single_object(aha_object: dict, fields: List = DEFAULT_FIELDS) -> List:
ret_dict = {}
for curr in fields:
if curr == 'description':
- ret_dict[curr] = feature.get(curr, {}).get('body')
+ ret_dict[curr] = aha_object.get(curr, {}).get('body')
elif curr == 'workflow_status':
- ret_dict[curr] = feature.get(curr, {}).get('name')
+ ret_dict[curr] = aha_object.get(curr, {}).get('name')
+ elif curr == 'ideas':
+ ret_dict[curr] = extract_ideas_from_feature(aha_object.get(curr, {}))
else:
- ret_dict[curr] = feature.get(curr, '')
+ ret_dict[curr] = aha_object.get(curr, '')
return [ret_dict]
@@ -115,7 +168,7 @@ def test_module(client: Client) -> str:
message: str = ''
try:
- result = client.get_features('', '', '2020-01-01', page='1', per_page='1')
+ result = client.get(AHA_TYPE.FEATURES, '', '', '2020-01-01', page='1', per_page='1')
if result:
message = 'ok'
except DemistoException as e:
@@ -126,26 +179,30 @@ def test_module(client: Client) -> str:
return message
-def get_features(client: Client,
- from_date: str,
- feature_name: str = '',
- fields: List = [],
- page: str = '1',
- per_page: str = '30') -> CommandResults:
+def get_command(client: Client,
+ aha_type: AHA_TYPE,
+ from_date: str,
+ aha_object_name: str = '',
+ fields: str = '',
+ page: str = '1',
+ per_page: str = '30') -> CommandResults:
message: List = []
- req_fields = ','.join(DEFAULT_FIELDS + fields)
- response = client.get_features(feature_name=feature_name, fields=req_fields,
- from_date=from_date, page=page, per_page=per_page)
+ fields_list: List = DEFAULT_FIELDS + argToList(fields)
+ if aha_type == AHA_TYPE.FEATURES:
+ fields_list.extend(FEATURE_FIELDS)
+ req_fields = ','.join(fields_list)
+ response = client.get(aha_type=aha_type, name=aha_object_name, fields=req_fields,
+ from_date=from_date, page=page, per_page=per_page)
if response:
- if 'features' in response:
- message = parse_features(response['features'], DEFAULT_FIELDS + fields)
+ if aha_type.get_type_plural() in response:
+ message = parse_multiple_objects(response[aha_type.get_type_plural()], fields_list)
else:
- message = parse_feature(response['feature'], DEFAULT_FIELDS + fields)
- human_readable = tableToMarkdown('Aha! get features',
+ message = parse_single_object(response[aha_type.get_type_singular()], fields_list)
+ human_readable = tableToMarkdown(f'Aha! get {aha_type.get_type_plural()}',
message,
removeNull=True)
return CommandResults(
- outputs_prefix='AHA.Feature',
+ outputs_prefix=f'AHA.{aha_type.get_type_for_outputs()}',
outputs_key_field='id',
outputs=message,
raw_response=response,
@@ -153,18 +210,20 @@ def get_features(client: Client,
)
-def edit_feature(client: Client,
- feature_name: str,
- fields: Dict) -> CommandResults:
+def edit_command(client: Client,
+ aha_type: AHA_TYPE,
+ aha_object_name: str,
+ fields: str = '{}') -> CommandResults:
message: List = []
- response = client.edit_feature(feature_name=feature_name, fields=fields)
+ fieldsDict = json.loads(fields)
+ response = client.edit(aha_object_name=aha_object_name, aha_type=aha_type, fields=fieldsDict)
if response:
- message = parse_feature(response['feature'], fields=EDIT_FIELDS)
- human_readable = tableToMarkdown('Aha! edit feature',
+ message = parse_single_object(response[aha_type.get_type_singular()], fields=EDIT_FIELDS)
+ human_readable = tableToMarkdown(f'Aha! edit {aha_type.get_type_singular()}',
message,
removeNull=True)
return CommandResults(
- outputs_prefix='AHA.Feature',
+ outputs_prefix=f'AHA.{aha_type.get_type_for_outputs()}',
outputs_key_field='id',
outputs=message,
readable_output=human_readable,
@@ -200,18 +259,20 @@ def main() -> None:
result = test_module(client)
return_results(result)
elif command == 'aha-get-features':
- from_date = args.get('from_date', '2020-01-01')
- feature_name = args.get('feature_name', '')
- fields = argToList(args.get('fields', ''))
- page = args.get('page', '1')
- per_page = args.get('per_page', '30')
- command_result = get_features(client, from_date=from_date, feature_name=feature_name, fields=fields, page=page,
- per_page=per_page)
+ command_result = get_command(client, aha_type=AHA_TYPE.FEATURES,
+ aha_object_name=args.pop('feature_name', ''), **args)
return_results(command_result)
elif command == 'aha-edit-feature':
- feature_name = args.get('feature_name', '')
- edit_fields = json.loads(args.get('fields', {}))
- command_result = edit_feature(client, feature_name=feature_name, fields=edit_fields)
+ command_result = edit_command(client, aha_type=AHA_TYPE.FEATURES,
+ aha_object_name=args.pop('feature_name', ''), **args)
+ return_results(command_result)
+ elif command == 'aha-get-ideas':
+ command_result = get_command(client=client, aha_type=AHA_TYPE.IDEAS,
+ aha_object_name=args.pop('idea_name', ''), **args)
+ return_results(command_result)
+ elif command == 'aha-edit-idea':
+ command_result = edit_command(client, aha_type=AHA_TYPE.IDEAS,
+ aha_object_name=args.pop('idea_name', ''), **args)
return_results(command_result)
else:
raise NotImplementedError(f'{command} command is not implemented.')
diff --git a/Packs/AHA/Integrations/AHA/AHA.yml b/Packs/AHA/Integrations/AHA/AHA.yml
index bd86159546ec..91f90b7d3b3a 100644
--- a/Packs/AHA/Integrations/AHA/AHA.yml
+++ b/Packs/AHA/Integrations/AHA/AHA.yml
@@ -86,16 +86,16 @@ script:
description: The feature creation date.
type: Date
- name: aha-edit-feature
- description: "You can edit any of the following fields in a feature: Name, Status and Description."
+ description: "You can edit the following fields in a feature: Name and Status."
deprecated: false
arguments:
- name: feature_name
- description: The name of the features to edit.
+ description: The name of the feature to edit.
required: true
isArray: false
defaultValue: ""
- name: fields
- description: 'Fields in JSON format to edit in a feature. Possible fields are name, description and status. Status should match Aha values under workflow_status. Example:" {"name": "name", "description": "desc", "status" : "Closed"}'
+ description: 'Fields in JSON format to edit in a feature. Possible fields are name and status. Status should match Aha values under workflow_status. Example:" {"name": "name", "status" : "Closed"}'
required: true
isArray: false
defaultValue: ""
@@ -109,20 +109,96 @@ script:
- contextPath: AHA.Feature.reference_num
description: The feature reference number.
type: String
- - contextPath: AHA.Feature.workflow_status
- description: The feature status description.
- type: String
- contextPath: AHA.Feature.description
description: The feature description.
type: String
+ - contextPath: AHA.Feature.workflow_status
+ description: The feature status description.
+ type: String
- contextPath: AHA.Feature.created_at
description: The feature creation date.
type: Date
+ - name: aha-get-ideas
+ description: Lists all ideas from service, unless a specific idea is specified.
+ deprecated: false
+ arguments:
+ - name: from_date
+ description: Show ideas created after this date.
+ required: false
+ isArray: false
+ defaultValue: "2020-01-01"
+ - name: idea_name
+ description: The name of a specific idea to retrieve.
+ required: false
+ isArray: false
+ defaultValue: ""
+ - name: fields
+ description: A comma-separated list of fields to include in the Aha! service response.
+ required: false
+ isArray: false
+ defaultValue: "name,reference_num,id,created_at"
+ - name: page
+ description: The specific results page to retrieve.
+ required: false
+ isArray: false
+ defaultValue: "1"
+ - name: per_page
+ description: The maximum number of results per page.
+ required: false
+ isArray: false
+ defaultValue: "30"
+ outputs:
+ - contextPath: AHA.Idea.id
+ description: The idea ID.
+ type: UUID
+ - contextPath: AHA.Idea.name
+ description: The idea name.
+ type: String
+ - contextPath: AHA.Idea.reference_num
+ description: The idea reference number.
+ type: String
+ - contextPath: AHA.Idea.workflow_status
+ description: The idea status description.
+ type: String
+ - contextPath: AHA.Idea.description
+ description: The idea description.
+ type: String
+ - contextPath: AHA.Idea.created_at
+ description: The idea creation date.
+ type: Date
+ - name: aha-edit-idea
+ description: "Edit an idea status to Shipped."
+ deprecated: false
+ arguments:
+ - name: idea_name
+ description: The name of the idea to edit.
+ required: true
+ isArray: false
+ defaultValue: ""
+ outputs:
+ - contextPath: AHA.Idea.id
+ description: The idea ID.
+ type: UUID
+ - contextPath: AHA.Idea.name
+ description: The idea name.
+ type: String
+ - contextPath: AHA.Idea.reference_num
+ description: The idea reference number.
+ type: String
+ - contextPath: AHA.Idea.workflow_status
+ description: The idea status description.
+ type: String
+ - contextPath: AHA.Idea.description
+ description: The idea description.
+ type: String
+ - contextPath: AHA.Idea.created_at
+ description: The idea creation date.
+ type: Date
isfetch: false
runonce: false
script: "-"
type: python
subtype: python3
- dockerimage: demisto/python3:3.10.8.37753
+ dockerimage: demisto/python3:3.10.10.48392
feed: false
fromversion: 6.5.0
diff --git a/Packs/AHA/Integrations/AHA/AHA_test.py b/Packs/AHA/Integrations/AHA/AHA_test.py
index f1337a5d478d..009c3b6e5e01 100644
--- a/Packs/AHA/Integrations/AHA/AHA_test.py
+++ b/Packs/AHA/Integrations/AHA/AHA_test.py
@@ -1,6 +1,8 @@
import demistomock as demisto # noqa: F401
+import pytest
from CommonServerPython import * # noqa: F401
-from AHA import Client, get_features, edit_feature
+from AHA import Client, get_command, edit_command
+from AHA import AHA_TYPE
import io
@@ -47,7 +49,7 @@ def test_main(mocker):
'api_key': {'password': 'test_api'},
}
)
- mocker.patch('AHA.Client.get_features', return_value={'name': 'test'})
+ mocker.patch('AHA.Client.get', return_value={'name': 'test'})
mocker.patch.object(
demisto, 'command',
return_value='test-module'
@@ -58,6 +60,36 @@ def test_main(mocker):
assert demisto.results.call_args[0][0] == 'ok'
+def test_notImplementedCommand(mocker):
+ """
+ Given:
+ - All return values from helper functions are valid
+ When:
+ - Calling main function with invalid command
+ Then:
+ - Return sys.exit(0)
+ """
+ from AHA import main
+
+ mocker.patch.object(
+ demisto, 'params', return_value={
+ 'url': 'example.com',
+ 'project_name': 'DEMO',
+ 'api_key': {'password': 'test_api'},
+ }
+ )
+ mocker.patch('AHA.Client.get', return_value={'name': 'test'})
+ mocker.patch.object(
+ demisto, 'command',
+ return_value='tests-module'
+ )
+ mocker.patch.object(demisto, 'results')
+ with pytest.raises(SystemExit) as pytest_wrapped_e:
+ main()
+ assert pytest_wrapped_e.type == SystemExit
+ assert pytest_wrapped_e.value.code == 0
+
+
def test_Module(mocker):
"""
Given:
@@ -81,23 +113,40 @@ def test_getFeatures(mocker):
- Asserts get a list of expected length with all features.
"""
client = mock_client(mocker, util_load_json('test_data/get_all_features.json'))
- results = get_features(client=client, from_date='2022-01-01')
+ results = get_command(client=client, aha_type=AHA_TYPE.FEATURES, from_date='2022-01-01')
assert len(results.outputs) == 3
+ assert len(results.outputs[0].get('ideas')) == 1
+ assert results.outputs[0].get('ideas')[0] == 'DEMO-I-299'
-def test_getFeaturesFromDate(mocker):
+def test_getIdeas(mocker):
+ """
+ When:
+ - Requesting all ideas
+ Then:
+ - Asserts get a list of expected length with all ideas.
+ """
+ client = mock_client(mocker, util_load_json('test_data/get_all_ideas.json'))
+ results = get_command(client=client, aha_type=AHA_TYPE.IDEAS, from_date='2022-01-01')
+ assert len(results.outputs) == 4
+
+
+@pytest.mark.parametrize('file_path, aha_type, from_date',
+ [('test_data/empty_feature_result.json', AHA_TYPE.FEATURES, '3000-01-01'),
+ ('test_data/empty_idea_result.json', AHA_TYPE.IDEAS, '3000-01-01')])
+def test_getFeaturesFromDate(mocker, file_path, aha_type, from_date):
"""
When:
- Requesting all features with created date of the future
Then:
- Return en empty list
"""
- client = mock_client(mocker, util_load_json('test_data/empty_feature_result.json'))
- results = get_features(client=client, from_date='3000-01-01')
+ client = mock_client(mocker, util_load_json(file_path))
+ results = get_command(client=client, aha_type=aha_type, from_date=from_date)
assert len(results.outputs) == 0
-def test_getSpecificFeature(mocker):
+def test_getAFeature(mocker):
"""
When:
- Requesting a specific feature
@@ -105,21 +154,34 @@ def test_getSpecificFeature(mocker):
- Returns the requested feature
"""
client = mock_client(mocker, util_load_json('test_data/get_specific_feature.json'))
- result = get_features(client=client, from_date='2020-01-01', feature_name='DEMO-10')
+ result = get_command(client=client, aha_type=AHA_TYPE.FEATURES, from_date='2020-01-01', aha_object_name='DEMO-10')
assert len(result.outputs) == 1
assert result.outputs[0]['reference_num'] == 'DEMO-10'
+def test_getAnIdea(mocker):
+ """
+ When:
+ - Requesting a specific idea
+ Then:
+ - Returns the requested idea
+ """
+ client = mock_client(mocker, util_load_json('test_data/get_specific_idea.json'))
+ result = get_command(client=client, aha_type=AHA_TYPE.IDEAS, from_date='2020-01-01', aha_object_name='DEMO-I-2895')
+ assert len(result.outputs) == 1
+ assert result.outputs[0]['reference_num'] == 'DEMO-I-2895'
+
+
def test_editFeatureField(mocker):
"""
When:
- - Requesting to update fields in a feautre.
+ - Requesting to update fields in a feature.
Then:
- Return the feature with updated fields.
"""
client = mock_client(mocker, util_load_json('test_data/update_feature_fields.json'))
- result = edit_feature(client=client, feature_name='DEMO-10', fields={'name': 'DEMO-10', 'description': 'new description',
- 'status': 'Closed'})
+ result = edit_command(client=client, aha_type=AHA_TYPE.FEATURES, aha_object_name='DEMO-10',
+ fields='{"name": "DEMO-10", "description": "new description", "status": "Closed"}')
assert len(result.outputs) == 1
output = result.outputs[0]
assert output.get('name') == 'Demo-10'
@@ -127,18 +189,35 @@ def test_editFeatureField(mocker):
assert output.get('workflow_status') == 'Closed'
+def test_editIdeaStatus(mocker):
+ """
+ When:
+ - Requesting to update status in an idea.
+ Then:
+ - Return the idea with an updated field.
+ """
+ client = mock_client(mocker, util_load_json('test_data/update_idea_status.json'))
+ result = edit_command(client=client, aha_type=AHA_TYPE.IDEAS, aha_object_name='DEMO-I-2895', fields='{}')
+ assert len(result.outputs) == 1
+ output = result.outputs[0]
+ assert output.get('name') == '[Test] Mirroring'
+ assert output.get('description') == 'Aha Jira Mirroring'
+ assert output.get('workflow_status') == 'Shipped'
+
+
def test_editSpecificFeatureField(mocker):
"""
When:
- - Requesting to update a specific field in a feautre.
+ - Requesting to update a specific field in a feature.
Then:
- Return the feature with only the specific field updated.
"""
- new_description = 'change just description'
+ new_name = 'change just name'
client = mock_client(mocker, util_load_json('test_data/update_feature_field.json'))
- result = edit_feature(client=client, feature_name='DEMO-10', fields={'description': new_description})
+ result = edit_command(client=client, aha_type=AHA_TYPE.FEATURES, aha_object_name='DEMO-10',
+ fields=f'{{"description": "{new_name}"}}')
assert len(result.outputs) == 1
output = result.outputs[0]
- assert output.get('name') == 'Demo-10'
- assert output.get('description') == new_description
+ assert output.get('name') == new_name
+ assert output.get('description') == 'description'
assert output.get('workflow_status') == 'Closed'
diff --git a/Packs/AHA/Integrations/AHA/README.md b/Packs/AHA/Integrations/AHA/README.md
index 5ad0c7cd8b82..33e6566376d6 100644
--- a/Packs/AHA/Integrations/AHA/README.md
+++ b/Packs/AHA/Integrations/AHA/README.md
@@ -1,5 +1,6 @@
Use the Aha! integration to list and manage Cortex XSOAR features from Aha.
-This integration was integrated and tested with API version September 30, 2022 release of Aha.
+This integration was integrated and tested with API version December 02, 2022 release of Aha.
+
## Configure Aha on Cortex XSOAR
1. Navigate to **Settings** > **Integrations** > **Servers & Services**.
@@ -9,8 +10,8 @@ This integration was integrated and tested with API version September 30, 2022 r
| **Parameter** | **Description** | **Required** |
| --- | --- | --- |
| Server URL | | True |
- | Project Name | Check the Aha\! project name in the URL. Replace the <PROJECT_NAME> placeholder in the following : example.com.aha.io/products/<PROJECT_NAME>/features | True |
- | Api Key | API Key to access the service REST API, | True |
+ | Project Name | Check the Aha\! project name in the URL. Replace the <PROJECT_NAME> placeholder in the following : example.com.aha.io/products/<PROJECT_NAME>/features. | True |
+ | Api Key | API Key to access the service REST API. | True |
| Trust any certificate (not secure) | | False |
| Use system proxy settings | | False |
@@ -30,7 +31,7 @@ Lists all features from service, unless a specific feature is specified.
| **Argument Name** | **Description** | **Required** |
| --- | --- | --- |
-| from_date | Show features created after that date. Default is 2020-01-01. | Optional |
+| from_date | Show features created after this date. Default is 2020-01-01. | Optional |
| feature_name | The name of a specific feature to retrieve. | Optional |
| fields | A comma-separated list of fields to include in the Aha! service response. Default is name,reference_num,id,created_at. | Optional |
| page | The specific results page to retrieve. Default is 1. | Optional |
@@ -44,7 +45,7 @@ Lists all features from service, unless a specific feature is specified.
| AHA.Feature.id | UUID | The feature ID. |
| AHA.Feature.name | String | The feature name. |
| AHA.Feature.reference_num | String | The feature reference number. |
-| AHA.Feature.workflow_status | String | The feature workflow status. |
+| AHA.Feature.workflow_status | String | The feature status description. |
| AHA.Feature.description | String | The feature description. |
| AHA.Feature.created_at | Date | The feature creation date. |
@@ -55,7 +56,7 @@ Lists all features from service, unless a specific feature is specified.
### aha-edit-feature
***
-Edit any of the following fields in a feature: Name, Status and Description.
+You can edit the following fields in a feature: Name and Description.
#### Base Command
@@ -65,8 +66,8 @@ Edit any of the following fields in a feature: Name, Status and Description.
| **Argument Name** | **Description** | **Required** |
| --- | --- | --- |
-| feature_name | The name of the features to edit. | Required |
-| fields | Fields in JSON format to edit in a feature. Possible fields are name, description and status. Status should match Aha values under workflow_status. Example:" {"name": "name", "description": "desc", "status" : "Closed"}. | Required |
+| feature_name | The name of the feature to edit. | Required |
+| fields | Fields in JSON format to edit in a feature. Possible fields are name and status. Status should match Aha values under workflow_status. Example:" {"name": "name", "status" : "Closed"}. | Required |
#### Context Output
@@ -76,9 +77,73 @@ Edit any of the following fields in a feature: Name, Status and Description.
| AHA.Feature.id | UUID | The feature ID. |
| AHA.Feature.name | String | The feature name. |
| AHA.Feature.reference_num | String | The feature reference number. |
-| AHA.Feature.workflow_status | String | The feature workflow status. |
+| AHA.Feature.workflow_status | String | The feature status description. |
| AHA.Feature.description | String | The feature description. |
| AHA.Feature.created_at | Date | The feature creation date. |
#### Command example
-```!aha-edit-feature feature_name=DEMO-10 fields=`{"name":"the_new_name", "description":"the_new_desc", "status":"Closed"}```
+```!aha-edit-feature feature_name=DEMO-10 fields=`{"name":"the_new_name", "status":"Closed"}```
+
+### aha-get-ideas
+***
+Lists all ideas from service, unless a specific idea is specified.
+
+
+#### Base Command
+
+`aha-get-ideas`
+#### Input
+
+| **Argument Name** | **Description** | **Required** |
+| --- | --- | --- |
+| from_date | Show ideas created after this date. Default is 2020-01-01. | Optional |
+| idea_name | The name of a specific idea to retrieve. | Optional |
+| fields | A comma-separated list of fields to include in the Aha! service response. Default is name,reference_num,id,created_at. | Optional |
+| page | The specific results page to retrieve. Default is 1. | Optional |
+| per_page | The maximum number of results per page. Default is 30. | Optional |
+
+
+#### Context Output
+
+| **Path** | **Type** | **Description** |
+| --- | --- | --- |
+| AHA.Idea.id | UUID | The idea ID. |
+| AHA.Idea.name | String | The idea name. |
+| AHA.Idea.reference_num | String | The idea reference number. |
+| AHA.Idea.workflow_status | String | The idea status description. |
+| AHA.Idea.description | String | The idea description. |
+| AHA.Idea.created_at | Date | The idea creation date. |
+
+#### Command example
+```!aha-get-ideas```
+```!aha-get-ideas idea_name=DEMO-I-2895```
+```!aha-get-ideas idea_name=DEMO-I-2895 fields=workflow_status```
+
+### aha-edit-idea
+***
+Edit an idea status to Shipped.
+
+
+#### Base Command
+
+`aha-edit-idea`
+#### Input
+
+| **Argument Name** | **Description** | **Required** |
+| --- | --- | --- |
+| idea_name | The name of the idea to edit. | Required |
+
+
+#### Context Output
+
+| **Path** | **Type** | **Description** |
+| --- | --- | --- |
+| AHA.Idea.id | UUID | The idea ID. |
+| AHA.Idea.name | String | The idea name. |
+| AHA.Idea.reference_num | String | The idea reference number. |
+| AHA.Idea.workflow_status | String | The idea status description. |
+| AHA.Idea.description | String | The idea description. |
+| AHA.Idea.created_at | Date | The idea creation date. |
+
+#### Command example
+```!aha-edit-idea idea_name=DEMO-I-2895```
diff --git a/Packs/AHA/Integrations/AHA/test_data/empty_idea_result.json b/Packs/AHA/Integrations/AHA/test_data/empty_idea_result.json
new file mode 100644
index 000000000000..6bcb18d31949
--- /dev/null
+++ b/Packs/AHA/Integrations/AHA/test_data/empty_idea_result.json
@@ -0,0 +1,8 @@
+{
+ "ideas": [],
+ "pagination": {
+ "total_records": 0,
+ "total_pages": 0,
+ "current_page": 1
+ }
+}
\ No newline at end of file
diff --git a/Packs/AHA/Integrations/AHA/test_data/get_all_features.json b/Packs/AHA/Integrations/AHA/test_data/get_all_features.json
index f7c920dc09e8..af7bc7dcf19b 100644
--- a/Packs/AHA/Integrations/AHA/test_data/get_all_features.json
+++ b/Packs/AHA/Integrations/AHA/test_data/get_all_features.json
@@ -23,7 +23,20 @@
"description": {
"id": "7142047390469716564",
"body": "
Italy is a major cycling destination from folks from around the world. As part of the Giro coverage also highlight routes and places of interest for casual travelers.
Many cities want to make their roads safer and more convenient for cyclists, but they have a problem. They have very little data on where people ride and what influences their choices. We have tons of data that we can make anonymous and sell to these cities.
BucketName:\s+(.*?)\<\/li\>
+ unpack_matches: {}
+ - operator: uniq
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 450,
+ "y": 790
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+view: |-
+ {
+ "linkLabelsPosition": {},
+ "paper": {
+ "dimensions": {
+ "height": 975,
+ "width": 380,
+ "x": 450,
+ "y": 50
+ }
+ }
+ }
+inputs:
+- key: RemoteHostname
+ value:
+ complex:
+ root: alert
+ accessor: hostname
+ required: true
+ description: 'Remote hostname in an incident/alert. '
+ playbookInputQuery:
+outputs:
+- contextPath: S3BucketName
+ description: This is the bucket name extracted from HTTP response body.
+ type: unknown
+tests:
+- No tests (auto formatted)
+fromversion: 6.5.0
diff --git a/Packs/AWS-Enrichment-Remediation/Playbooks/AWS_-_Unclaimed_S3_Bucket_Validation_README.md b/Packs/AWS-Enrichment-Remediation/Playbooks/AWS_-_Unclaimed_S3_Bucket_Validation_README.md
new file mode 100644
index 000000000000..4f316d0b4721
--- /dev/null
+++ b/Packs/AWS-Enrichment-Remediation/Playbooks/AWS_-_Unclaimed_S3_Bucket_Validation_README.md
@@ -0,0 +1,35 @@
+The playbook sends a HTTP get response to the hostname and validates if there is missing bucket information.
+
+## Dependencies
+This playbook uses the following sub-playbooks, integrations, and scripts.
+
+### Sub-playbooks
+This playbook does not use any sub-playbooks.
+
+### Integrations
+This playbook does not use any integrations.
+
+### Scripts
+* Set
+* http
+
+### Commands
+This playbook does not use any commands.
+
+## Playbook Inputs
+---
+
+| **Name** | **Description** | **Default Value** | **Required** |
+| --- | --- | --- | --- |
+| RemoteHostname | Remote hostname in an incident/alert. | alert.hostname | Required |
+
+## Playbook Outputs
+---
+
+| **Path** | **Description** | **Type** |
+| --- | --- | --- |
+| S3BucketName | This is the bucket name extracted from HTTP response body. | unknown |
+
+## Playbook Image
+---
+![AWS - Unclaimed S3 Bucket Validation](../doc_files/AWS_-_Unclaimed_S3_Bucket_Validation.png)
\ No newline at end of file
diff --git a/Packs/AWS-Enrichment-Remediation/README.md b/Packs/AWS-Enrichment-Remediation/README.md
index ba648d3a33b2..38943f67d6df 100644
--- a/Packs/AWS-Enrichment-Remediation/README.md
+++ b/Packs/AWS-Enrichment-Remediation/README.md
@@ -3,8 +3,10 @@
The pack contains AWS playbooks that conduct enrichment and/or remediation and can use multiple other AWS content packs:
- Enrichment: Give an IP address, see if there is a EC2 instance associated and if so pull information on the security group associated and IAM information for the user that created that security group.
- Remediation: Give the information collected from enrichment, replace the security group with a "quarantine" security group until vulnerabilities are resolved.
+- Unclaimed S3 Bucket Validation: The playbook sends a HTTP get response to the domain and validates the missing bucket information.
+- Unclaimed S3 Bucket Remediation: The playbook will create the unclaimed S3 bucket.
-There are multiple AWS content packs for multiple AWS products (EC2, IAM, Route53, etc). The intent was that users can install and use only the packs they need. However, if an AWS playbook uses multiple pack integrations (such as EC2 and IAM), the integrations can't reside in one of the current packs because they include content from multiple pack integrations. This pack was created as a place to put AWS playbooks that use AWS integrations from multiple packs with a focus on enrichment and remediation.
+There are multiple AWS content packs for multiple AWS products (EC2, IAM, Route53, S3, etc). The intent was that users can install and use only the packs they need. However, if an AWS playbook uses multiple pack integrations (such as EC2, S3 and IAM), the integrations can't reside in one of the current packs because they include content from multiple pack integrations. This pack was created as a place to put AWS playbooks that use AWS integrations from multiple packs with a focus on enrichment and remediation.
### Playbooks
@@ -13,6 +15,8 @@ This content pack includes the following playbooks:
- AWS - Enrichment
- AWS - Security Group Remediation
- Cloud Response - AWS
+- AWS - Unclaimed S3 Bucket Validation
+- AWS - Unclaimed S3 Bucket Remediation
#### AWS - Enrichment
AWS - Enrichment playbook reports EC2 and IAM information given an IP address of an EC2 instance.
@@ -24,3 +28,12 @@ AWS - Security Group Remediation playbook replaces current security groups assoc
![AWS - Security Group Remediation](https://raw.githubusercontent.com/demisto/content/master/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Security_Group_Remediation.png)
+#### AWS - Unclaimed S3 Bucket Validation
+AWS - Unclaimed S3 Bucket Validation playbook validates the unclaimed S3 bucket details.
+
+![AWS - Unclaimed S3 Bucket Validation](https://raw.githubusercontent.com/demisto/content/f0d7512f35321b195b59e8e677fbe7b2e1319c74/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Unclaimed_S3_Bucket_Validation.png)
+
+#### AWS - Unclaimed S3 Bucket Remediation
+AWS - Unclaimed S3 Bucket Remediation playbook creates the unclaimed S3 bucket so other vectors can't claim the bucket.
+
+![AWS - Unclaimed S3 Bucket Remediation](https://raw.githubusercontent.com/demisto/content/7f3e223a86964eda5689a9c3e1f22511021c5f40/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Unclaimed_S3_Bucket_Remediation.png)
\ No newline at end of file
diff --git a/Packs/AWS-Enrichment-Remediation/ReleaseNotes/1_1_0.md b/Packs/AWS-Enrichment-Remediation/ReleaseNotes/1_1_0.md
new file mode 100644
index 000000000000..6618154bb754
--- /dev/null
+++ b/Packs/AWS-Enrichment-Remediation/ReleaseNotes/1_1_0.md
@@ -0,0 +1,6 @@
+
+#### Playbooks
+##### New: AWS - Unclaimed S3 Bucket Remediation
+- The playbook will create the unclaimed S3 bucket. (Available from Cortex XSOAR 6.5.0).
+##### New: AWS - Unclaimed S3 Bucket Validation
+- The playbook sends a HTTP get response to the domain and validates the missing bucket information. (Available from Cortex XSOAR 6.5.0).
diff --git a/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Unclaimed_S3_Bucket_Remediation.png b/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Unclaimed_S3_Bucket_Remediation.png
new file mode 100644
index 000000000000..572fe18b8bb8
Binary files /dev/null and b/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Unclaimed_S3_Bucket_Remediation.png differ
diff --git a/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Unclaimed_S3_Bucket_Validation.png b/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Unclaimed_S3_Bucket_Validation.png
new file mode 100644
index 000000000000..76241d918766
Binary files /dev/null and b/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Unclaimed_S3_Bucket_Validation.png differ
diff --git a/Packs/AWS-Enrichment-Remediation/pack_metadata.json b/Packs/AWS-Enrichment-Remediation/pack_metadata.json
index 1c32b94b5357..d74b2623bb67 100644
--- a/Packs/AWS-Enrichment-Remediation/pack_metadata.json
+++ b/Packs/AWS-Enrichment-Remediation/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AWS Enrichment and Remediation",
"description": "Playbooks using multiple AWS content packs for enrichment and remediation purposes",
"support": "xsoar",
- "currentVersion": "1.0.2",
+ "currentVersion": "1.1.0",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
@@ -14,7 +14,8 @@
"keywords": [],
"marketplaces": [
"xsoar",
- "marketplacev2"
+ "marketplacev2",
+ "xpanse"
],
"dependencies": {
"AWS-IAM": {
@@ -28,11 +29,16 @@
"AWS-EC2": {
"mandatory": true,
"display_name": "AWS - EC2"
+ },
+ "AWS-S3": {
+ "mandatory": true,
+ "display_name": "AWS - S3"
}
},
"displayedImages": [
"AWS-IAM",
"CommonScripts",
- "AWS-EC2"
+ "AWS-EC2",
+ "AWS-S3"
]
}
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/.pack-ignore b/Packs/AWS-GuardDuty/.pack-ignore
index b59fc9dd2f26..995a7cc6e4f6 100644
--- a/Packs/AWS-GuardDuty/.pack-ignore
+++ b/Packs/AWS-GuardDuty/.pack-ignore
@@ -9,4 +9,7 @@ ebsVolumesMalwareProtection
findingFrequency
Eks
Ebs
-Ecs
\ No newline at end of file
+Ecs
+returnRawResponse
+aws-gd-get-findings
+AWS
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/Classifiers/classifier-AWSGuardDuty_-_Classifier.json b/Packs/AWS-GuardDuty/Classifiers/classifier-AWSGuardDuty_-_Classifier.json
new file mode 100644
index 000000000000..4b4085e7241f
--- /dev/null
+++ b/Packs/AWS-GuardDuty/Classifiers/classifier-AWSGuardDuty_-_Classifier.json
@@ -0,0 +1,28 @@
+{
+ "description": "Classifies AWSGuardDuty incidents.",
+ "feed": false,
+ "id": "AWSGuardDuty - Classifier",
+ "keyTypeMap": {
+ "AccessKey": "AWS Guard Duty IAM Finding",
+ "Container": "AWS Guard Duty Malware Protection Finding",
+ "ECSCluster": "AWS Guard Duty Malware Protection Finding",
+ "EKSCluster": "AWS Guard Duty Kubernetes Finding",
+ "Instance": "AWS Guard Duty EC2 Finding",
+ "S3Bucket": "AWS Guard Duty S3 Finding"
+ },
+ "name": "AWSGuardDuty - Classifier",
+ "propagationLabels": [
+ "all"
+ ],
+ "transformer": {
+ "complex": {
+ "accessor": "ResourceType",
+ "filters": [],
+ "root": "Resource",
+ "transformers": []
+ }
+ },
+ "type": "classification",
+ "version": -1,
+ "fromVersion": "6.5.0"
+}
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/Classifiers/classifier-AWSGuardDuty_-_Incoming_Mapper.json b/Packs/AWS-GuardDuty/Classifiers/classifier-AWSGuardDuty_-_Incoming_Mapper.json
new file mode 100644
index 000000000000..b647aa9cda2e
--- /dev/null
+++ b/Packs/AWS-GuardDuty/Classifiers/classifier-AWSGuardDuty_-_Incoming_Mapper.json
@@ -0,0 +1,841 @@
+{
+ "description": "Maps incoming AWSGuardDuty incidents fields.",
+ "feed": false,
+ "id": "AWSGuardDuty - Incoming Mapper",
+ "mapping": {
+ "AWS Guard Duty EC2 Finding": {
+ "dontMapEventToLabels": false,
+ "internalMapping": {
+ "AWS Arn": {
+ "simple": "Arn"
+ },
+ "AWS GuardDuty Confidence Score": {
+ "simple": "Confidence"
+ },
+ "AWS GuardDuty Iam Instance Profile": {
+ "complex": {
+ "accessor": "IamInstanceProfile",
+ "filters": [],
+ "root": "Resource.InstanceDetails",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Instance Details": {
+ "complex": {
+ "filters": [
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.AvailabilityZone"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.imageDescription"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.ImageId"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.InstanceId"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.InstanceState"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.InstanceType"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.OutpostArn"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.LaunchTime"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.Platform"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.ProductCodes"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.Tags"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ]
+ ],
+ "root": "Resource.InstanceDetails",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Network Interface": {
+ "complex": {
+ "accessor": "NetworkInterfaces",
+ "filters": [],
+ "root": "Resource.InstanceDetails",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Partition": {
+ "simple": "Partition"
+ },
+ "AWS GuardDuty Resource Type": {
+ "simple": "Resource.ResourceType"
+ },
+ "AWS GuardDuty Schema Version": {
+ "simple": "SchemaVersion"
+ },
+ "AWS GuardDuty Service": {
+ "complex": {
+ "filters": [],
+ "root": "Service",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Type": {
+ "simple": "Type"
+ },
+ "Account ID": {
+ "simple": "AccountId"
+ },
+ "Alert ID": {
+ "simple": "Id"
+ },
+ "Description": {
+ "simple": "Description"
+ },
+ "Last Update Time": {
+ "simple": "UpdatedAt"
+ },
+ "Region": {
+ "simple": "Region"
+ },
+ "Title": {
+ "simple": "Title"
+ },
+ "occurred": {
+ "simple": "CreatedAt"
+ },
+ "severity": {
+ "simple": "Severity"
+ }
+ }
+ },
+ "AWS Guard Duty IAM Finding": {
+ "dontMapEventToLabels": false,
+ "internalMapping": {
+ "AWS Arn": {
+ "simple": "Arn"
+ },
+ "AWS GuardDuty Access Key Details": {
+ "complex": {
+ "accessor": "AccessKeyDetails",
+ "filters": [],
+ "root": "Resource",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Confidence Score": {
+ "simple": "Confidence"
+ },
+ "AWS GuardDuty Iam Instance Profile": {
+ "complex": {
+ "accessor": "IamInstanceProfile",
+ "filters": [],
+ "root": "Resource.InstanceDetails",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Instance Details": {
+ "complex": {
+ "filters": [
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.AvailabilityZone"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.imageDescription"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.ImageId"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.InstanceId"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.InstanceState"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.InstanceType"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.OutpostArn"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.LaunchTime"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.Platform"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.ProductCodes"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.Tags"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ]
+ ],
+ "root": "Resource.InstanceDetails",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Network Interface": {
+ "complex": {
+ "accessor": "NetworkInterfaces",
+ "filters": [],
+ "root": "Resource.InstanceDetails",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Partition": {
+ "simple": "Partition"
+ },
+ "AWS GuardDuty Resource Type": {
+ "simple": "Resource.ResourceType"
+ },
+ "AWS GuardDuty Schema Version": {
+ "simple": "SchemaVersion"
+ },
+ "AWS GuardDuty Service": {
+ "complex": {
+ "filters": [],
+ "root": "Service",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Type": {
+ "simple": "Type"
+ },
+ "Account ID": {
+ "simple": "AccountId"
+ },
+ "Alert ID": {
+ "simple": "Id"
+ },
+ "Description": {
+ "simple": "Description"
+ },
+ "Last Update Time": {
+ "simple": "UpdatedAt"
+ },
+ "Region": {
+ "simple": "Region"
+ },
+ "Title": {
+ "simple": "Title"
+ },
+ "occurred": {
+ "simple": "CreatedAt"
+ },
+ "severity": {
+ "simple": "Severity"
+ }
+ }
+ },
+ "AWS Guard Duty Kubernetes Finding": {
+ "dontMapEventToLabels": false,
+ "internalMapping": {
+ "AWS Arn": {
+ "simple": "Arn"
+ },
+ "AWS GuardDuty Confidence Score": {
+ "simple": "Confidence"
+ },
+ "AWS GuardDuty Eks Cluster Details": {
+ "complex": {
+ "accessor": "EksClusterDetails",
+ "filters": [],
+ "root": "Resource",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Kubernetes User Details": {
+ "complex": {
+ "accessor": "KubernetesUserDetails",
+ "filters": [],
+ "root": "Resource.KubernetesDetails",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Kubernetes Workload Details": {
+ "complex": {
+ "accessor": "KubernetesWorkloadDetails",
+ "filters": [],
+ "root": "Resource.KubernetesDetails",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Partition": {
+ "simple": "Partition"
+ },
+ "AWS GuardDuty Resource Type": {
+ "simple": "Resource.ResourceType"
+ },
+ "AWS GuardDuty Schema Version": {
+ "simple": "SchemaVersion"
+ },
+ "AWS GuardDuty Service": {
+ "complex": {
+ "filters": [],
+ "root": "Service",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Type": {
+ "simple": "Type"
+ },
+ "Account ID": {
+ "simple": "AccountId"
+ },
+ "Alert ID": {
+ "simple": "Id"
+ },
+ "Description": {
+ "simple": "Description"
+ },
+ "Last Update Time": {
+ "simple": "UpdatedAt"
+ },
+ "Region": {
+ "simple": "Region"
+ },
+ "Title": {
+ "simple": "Title"
+ },
+ "occurred": {
+ "simple": "CreatedAt"
+ },
+ "severity": {
+ "simple": "Severity"
+ }
+ }
+ },
+ "AWS Guard Duty Malware Protection Finding": {
+ "dontMapEventToLabels": false,
+ "internalMapping": {
+ "AWS Arn": {
+ "simple": "Arn"
+ },
+ "AWS GuardDuty Confidence Score": {
+ "simple": "Confidence"
+ },
+ "AWS GuardDuty Container Details": {
+ "complex": {
+ "accessor": "ContainerDetails",
+ "filters": [],
+ "root": "Resource",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Ebs Volume Details": {
+ "complex": {
+ "accessor": "EbsVolumeDetails",
+ "filters": [],
+ "root": "Resource",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Ecs Cluster Details": {
+ "complex": {
+ "accessor": "EcsClusterDetails",
+ "filters": [],
+ "root": "Resource",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Partition": {
+ "simple": "Partition"
+ },
+ "AWS GuardDuty Resource Type": {
+ "simple": "Resource.ResourceType"
+ },
+ "AWS GuardDuty Schema Version": {
+ "simple": "SchemaVersion"
+ },
+ "AWS GuardDuty Service": {
+ "complex": {
+ "filters": [],
+ "root": "Service",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Type": {
+ "simple": "Type"
+ },
+ "Account ID": {
+ "simple": "AccountId"
+ },
+ "Alert ID": {
+ "simple": "Id"
+ },
+ "Description": {
+ "simple": "Description"
+ },
+ "Last Update Time": {
+ "simple": "UpdatedAt"
+ },
+ "Region": {
+ "simple": "Region"
+ },
+ "Title": {
+ "simple": "Title"
+ },
+ "occurred": {
+ "simple": "CreatedAt"
+ },
+ "severity": {
+ "simple": "Severity"
+ }
+ }
+ },
+ "AWS Guard Duty S3 Finding": {
+ "dontMapEventToLabels": false,
+ "internalMapping": {
+ "AWS Arn": {
+ "simple": "Arn"
+ },
+ "AWS GuardDuty Access Key Details": {
+ "complex": {
+ "accessor": "AccessKeyDetails",
+ "filters": [],
+ "root": "Resource",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Confidence Score": {
+ "simple": "Confidence"
+ },
+ "AWS GuardDuty Iam Instance Profile": {
+ "complex": {
+ "accessor": "IamInstanceProfile",
+ "filters": [],
+ "root": "Resource.InstanceDetails",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Instance Details": {
+ "complex": {
+ "filters": [
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.AvailabilityZone"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.imageDescription"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.ImageId"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.InstanceId"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.InstanceState"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.InstanceType"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.OutpostArn"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.LaunchTime"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.Platform"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.ProductCodes"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ],
+ [
+ {
+ "left": {
+ "isContext": true,
+ "value": {
+ "simple": "Resource.InstanceDetails.Tags"
+ }
+ },
+ "operator": "isNotEmpty"
+ }
+ ]
+ ],
+ "root": "Resource.InstanceDetails",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Network Interface": {
+ "complex": {
+ "accessor": "NetworkInterfaces",
+ "filters": [],
+ "root": "Resource.InstanceDetails",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Partition": {
+ "simple": "Partition"
+ },
+ "AWS GuardDuty Resource Type": {
+ "simple": "Resource.ResourceType"
+ },
+ "AWS GuardDuty S3 Bucket Details": {
+ "complex": {
+ "filters": [],
+ "root": "AWS GuardDuty S3 Bucket Details",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Schema Version": {
+ "simple": "SchemaVersion"
+ },
+ "AWS GuardDuty Service": {
+ "complex": {
+ "filters": [],
+ "root": "Service",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Type": {
+ "simple": "Type"
+ },
+ "Account ID": {
+ "simple": "AccountId"
+ },
+ "Alert ID": {
+ "simple": "Id"
+ },
+ "Description": {
+ "simple": "Description"
+ },
+ "Last Update Time": {
+ "simple": "UpdatedAt"
+ },
+ "Region": {
+ "simple": "Region"
+ },
+ "Title": {
+ "simple": "Title"
+ },
+ "occurred": {
+ "simple": "CreatedAt"
+ },
+ "severity": {
+ "simple": "Severity"
+ }
+ }
+ },
+ "dbot_classification_incident_type_all": {
+ "dontMapEventToLabels": true,
+ "internalMapping": {
+ "AWS Arn": {
+ "simple": "Arn"
+ },
+ "AWS GuardDuty Confidence Score": {
+ "simple": "Confidence"
+ },
+ "AWS GuardDuty Partition": {
+ "simple": "Partition"
+ },
+ "AWS GuardDuty Resource Type": {
+ "simple": "Resource.ResourceType"
+ },
+ "AWS GuardDuty Schema Version": {
+ "simple": "SchemaVersion"
+ },
+ "AWS GuardDuty Service": {
+ "complex": {
+ "filters": [],
+ "root": "Service",
+ "transformers": []
+ }
+ },
+ "AWS GuardDuty Type": {
+ "simple": "Type"
+ },
+ "Account ID": {
+ "simple": "AccountId"
+ },
+ "Alert ID": {
+ "simple": "Id"
+ },
+ "Description": {
+ "simple": "Description"
+ },
+ "Last Update Time": {
+ "simple": "UpdatedAt"
+ },
+ "Region": {
+ "simple": "Region"
+ },
+ "Title": {
+ "simple": "Title"
+ },
+ "occurred": {
+ "simple": "CreatedAt"
+ },
+ "severity": {
+ "simple": "Severity"
+ }
+ }
+ }
+ },
+ "name": "AWSGuardDuty - Incoming Mapper",
+ "type": "mapping-incoming",
+ "version": -1,
+ "fromVersion": "6.5.0"
+}
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/Classifiers/classifier-AWSGuardDuty_classifier.json b/Packs/AWS-GuardDuty/Classifiers/classifier-AWSGuardDuty_classifier.json
deleted file mode 100644
index ba2f6dbde208..000000000000
--- a/Packs/AWS-GuardDuty/Classifiers/classifier-AWSGuardDuty_classifier.json
+++ /dev/null
@@ -1,28 +0,0 @@
-{
- "description": "",
- "feed": false,
- "id": "classifier-AWSGuardDuty_classifier.json",
- "keyTypeMap": {
- "AccessKey": "AWS Guard Duty IAM Finding",
- "Container": "AWS Guard Duty Malware Protection Finding",
- "ECSCluster": "AWS Guard Duty Malware Protection Finding",
- "EKSCluster": "AWS Guard Duty Kubernetes Finding",
- "Instance": "AWS Guard Duty EC2 Finding",
- "S3Bucket": "AWS Guard Duty S3 Finding"
- },
- "name": "classifier-AWSGuardDuty_classifier.json",
- "propagationLabels": [
- "all"
- ],
- "transformer": {
- "complex": {
- "accessor": "ResourceType",
- "filters": [],
- "root": "Resource",
- "transformers": []
- }
- },
- "type": "classification",
- "version": -1,
- "fromVersion": "6.5.0"
-}
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/Classifiers/classifier-mapper-incoming-AWSGuardDuty.json b/Packs/AWS-GuardDuty/Classifiers/classifier-mapper-incoming-AWSGuardDuty.json
deleted file mode 100644
index 35e3621a0a99..000000000000
--- a/Packs/AWS-GuardDuty/Classifiers/classifier-mapper-incoming-AWSGuardDuty.json
+++ /dev/null
@@ -1,841 +0,0 @@
-{
- "description": "",
- "feed": false,
- "id": "classifier-mapper-incoming-AWSGuardDuty.json",
- "mapping": {
- "AWS Guard Duty EC2 Finding": {
- "dontMapEventToLabels": false,
- "internalMapping": {
- "AWS Arn": {
- "simple": "Arn"
- },
- "AWS GuardDuty Confidence Score": {
- "simple": "Confidence"
- },
- "AWS GuardDuty Iam Instance Profile": {
- "complex": {
- "accessor": "IamInstanceProfile",
- "filters": [],
- "root": "Resource.InstanceDetails",
- "transformers": []
- }
- },
- "AWS GuardDuty Instance Details": {
- "complex": {
- "filters": [
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.AvailabilityZone"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.imageDescription"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.ImageId"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.InstanceId"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.InstanceState"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.InstanceType"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.OutpostArn"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.LaunchTime"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.Platform"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.ProductCodes"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.Tags"
- }
- },
- "operator": "isNotEmpty"
- }
- ]
- ],
- "root": "Resource.InstanceDetails",
- "transformers": []
- }
- },
- "AWS GuardDuty Network Interface": {
- "complex": {
- "accessor": "NetworkInterfaces",
- "filters": [],
- "root": "Resource.InstanceDetails",
- "transformers": []
- }
- },
- "AWS GuardDuty Partition": {
- "simple": "Partition"
- },
- "AWS GuardDuty Resource Type": {
- "simple": "Resource.ResourceType"
- },
- "AWS GuardDuty Schema Version": {
- "simple": "SchemaVersion"
- },
- "AWS GuardDuty Service": {
- "complex": {
- "filters": [],
- "root": "Service",
- "transformers": []
- }
- },
- "AWS GuardDuty Type": {
- "simple": "Type"
- },
- "Account ID": {
- "simple": "AccountId"
- },
- "Alert ID": {
- "simple": "Id"
- },
- "Description": {
- "simple": "Description"
- },
- "Last Update Time": {
- "simple": "UpdatedAt"
- },
- "Region": {
- "simple": "Region"
- },
- "Title": {
- "simple": "Title"
- },
- "occurred": {
- "simple": "CreatedAt"
- },
- "severity": {
- "simple": "Severity"
- }
- }
- },
- "AWS Guard Duty IAM Finding": {
- "dontMapEventToLabels": false,
- "internalMapping": {
- "AWS Arn": {
- "simple": "Arn"
- },
- "AWS GuardDuty Access Key Details": {
- "complex": {
- "accessor": "AccessKeyDetails",
- "filters": [],
- "root": "Resource",
- "transformers": []
- }
- },
- "AWS GuardDuty Confidence Score": {
- "simple": "Confidence"
- },
- "AWS GuardDuty Iam Instance Profile": {
- "complex": {
- "accessor": "IamInstanceProfile",
- "filters": [],
- "root": "Resource.InstanceDetails",
- "transformers": []
- }
- },
- "AWS GuardDuty Instance Details": {
- "complex": {
- "filters": [
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.AvailabilityZone"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.imageDescription"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.ImageId"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.InstanceId"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.InstanceState"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.InstanceType"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.OutpostArn"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.LaunchTime"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.Platform"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.ProductCodes"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.Tags"
- }
- },
- "operator": "isNotEmpty"
- }
- ]
- ],
- "root": "Resource.InstanceDetails",
- "transformers": []
- }
- },
- "AWS GuardDuty Network Interface": {
- "complex": {
- "accessor": "NetworkInterfaces",
- "filters": [],
- "root": "Resource.InstanceDetails",
- "transformers": []
- }
- },
- "AWS GuardDuty Partition": {
- "simple": "Partition"
- },
- "AWS GuardDuty Resource Type": {
- "simple": "Resource.ResourceType"
- },
- "AWS GuardDuty Schema Version": {
- "simple": "SchemaVersion"
- },
- "AWS GuardDuty Service": {
- "complex": {
- "filters": [],
- "root": "Service",
- "transformers": []
- }
- },
- "AWS GuardDuty Type": {
- "simple": "Type"
- },
- "Account ID": {
- "simple": "AccountId"
- },
- "Alert ID": {
- "simple": "Id"
- },
- "Description": {
- "simple": "Description"
- },
- "Last Update Time": {
- "simple": "UpdatedAt"
- },
- "Region": {
- "simple": "Region"
- },
- "Title": {
- "simple": "Title"
- },
- "occurred": {
- "simple": "CreatedAt"
- },
- "severity": {
- "simple": "Severity"
- }
- }
- },
- "AWS Guard Duty Kubernetes Finding": {
- "dontMapEventToLabels": false,
- "internalMapping": {
- "AWS Arn": {
- "simple": "Arn"
- },
- "AWS GuardDuty Confidence Score": {
- "simple": "Confidence"
- },
- "AWS GuardDuty Eks Cluster Details": {
- "complex": {
- "accessor": "EksClusterDetails",
- "filters": [],
- "root": "Resource",
- "transformers": []
- }
- },
- "AWS GuardDuty Kubernetes User Details": {
- "complex": {
- "accessor": "KubernetesUserDetails",
- "filters": [],
- "root": "Resource.KubernetesDetails",
- "transformers": []
- }
- },
- "AWS GuardDuty Kubernetes Workload Details": {
- "complex": {
- "accessor": "KubernetesWorkloadDetails",
- "filters": [],
- "root": "Resource.KubernetesDetails",
- "transformers": []
- }
- },
- "AWS GuardDuty Partition": {
- "simple": "Partition"
- },
- "AWS GuardDuty Resource Type": {
- "simple": "Resource.ResourceType"
- },
- "AWS GuardDuty Schema Version": {
- "simple": "SchemaVersion"
- },
- "AWS GuardDuty Service": {
- "complex": {
- "filters": [],
- "root": "Service",
- "transformers": []
- }
- },
- "AWS GuardDuty Type": {
- "simple": "Type"
- },
- "Account ID": {
- "simple": "AccountId"
- },
- "Alert ID": {
- "simple": "Id"
- },
- "Description": {
- "simple": "Description"
- },
- "Last Update Time": {
- "simple": "UpdatedAt"
- },
- "Region": {
- "simple": "Region"
- },
- "Title": {
- "simple": "Title"
- },
- "occurred": {
- "simple": "CreatedAt"
- },
- "severity": {
- "simple": "Severity"
- }
- }
- },
- "AWS Guard Duty Malware Protection Finding": {
- "dontMapEventToLabels": false,
- "internalMapping": {
- "AWS Arn": {
- "simple": "Arn"
- },
- "AWS GuardDuty Confidence Score": {
- "simple": "Confidence"
- },
- "AWS GuardDuty Container Details": {
- "complex": {
- "accessor": "ContainerDetails",
- "filters": [],
- "root": "Resource",
- "transformers": []
- }
- },
- "AWS GuardDuty Ebs Volume Details": {
- "complex": {
- "accessor": "EbsVolumeDetails",
- "filters": [],
- "root": "Resource",
- "transformers": []
- }
- },
- "AWS GuardDuty Ecs Cluster Details": {
- "complex": {
- "accessor": "EcsClusterDetails",
- "filters": [],
- "root": "Resource",
- "transformers": []
- }
- },
- "AWS GuardDuty Partition": {
- "simple": "Partition"
- },
- "AWS GuardDuty Resource Type": {
- "simple": "Resource.ResourceType"
- },
- "AWS GuardDuty Schema Version": {
- "simple": "SchemaVersion"
- },
- "AWS GuardDuty Service": {
- "complex": {
- "filters": [],
- "root": "Service",
- "transformers": []
- }
- },
- "AWS GuardDuty Type": {
- "simple": "Type"
- },
- "Account ID": {
- "simple": "AccountId"
- },
- "Alert ID": {
- "simple": "Id"
- },
- "Description": {
- "simple": "Description"
- },
- "Last Update Time": {
- "simple": "UpdatedAt"
- },
- "Region": {
- "simple": "Region"
- },
- "Title": {
- "simple": "Title"
- },
- "occurred": {
- "simple": "CreatedAt"
- },
- "severity": {
- "simple": "Severity"
- }
- }
- },
- "AWS Guard Duty S3 Finding": {
- "dontMapEventToLabels": false,
- "internalMapping": {
- "AWS Arn": {
- "simple": "Arn"
- },
- "AWS GuardDuty Access Key Details": {
- "complex": {
- "accessor": "AccessKeyDetails",
- "filters": [],
- "root": "Resource",
- "transformers": []
- }
- },
- "AWS GuardDuty Confidence Score": {
- "simple": "Confidence"
- },
- "AWS GuardDuty Iam Instance Profile": {
- "complex": {
- "accessor": "IamInstanceProfile",
- "filters": [],
- "root": "Resource.InstanceDetails",
- "transformers": []
- }
- },
- "AWS GuardDuty Instance Details": {
- "complex": {
- "filters": [
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.AvailabilityZone"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.imageDescription"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.ImageId"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.InstanceId"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.InstanceState"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.InstanceType"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.OutpostArn"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.LaunchTime"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.Platform"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.ProductCodes"
- }
- },
- "operator": "isNotEmpty"
- }
- ],
- [
- {
- "left": {
- "isContext": true,
- "value": {
- "simple": "Resource.InstanceDetails.Tags"
- }
- },
- "operator": "isNotEmpty"
- }
- ]
- ],
- "root": "Resource.InstanceDetails",
- "transformers": []
- }
- },
- "AWS GuardDuty Network Interface": {
- "complex": {
- "accessor": "NetworkInterfaces",
- "filters": [],
- "root": "Resource.InstanceDetails",
- "transformers": []
- }
- },
- "AWS GuardDuty Partition": {
- "simple": "Partition"
- },
- "AWS GuardDuty Resource Type": {
- "simple": "Resource.ResourceType"
- },
- "AWS GuardDuty S3 Bucket Details": {
- "complex": {
- "filters": [],
- "root": "AWS GuardDuty S3 Bucket Details",
- "transformers": []
- }
- },
- "AWS GuardDuty Schema Version": {
- "simple": "SchemaVersion"
- },
- "AWS GuardDuty Service": {
- "complex": {
- "filters": [],
- "root": "Service",
- "transformers": []
- }
- },
- "AWS GuardDuty Type": {
- "simple": "Type"
- },
- "Account ID": {
- "simple": "AccountId"
- },
- "Alert ID": {
- "simple": "Id"
- },
- "Description": {
- "simple": "Description"
- },
- "Last Update Time": {
- "simple": "UpdatedAt"
- },
- "Region": {
- "simple": "Region"
- },
- "Title": {
- "simple": "Title"
- },
- "occurred": {
- "simple": "CreatedAt"
- },
- "severity": {
- "simple": "Severity"
- }
- }
- },
- "dbot_classification_incident_type_all": {
- "dontMapEventToLabels": true,
- "internalMapping": {
- "AWS Arn": {
- "simple": "Arn"
- },
- "AWS GuardDuty Confidence Score": {
- "simple": "Confidence"
- },
- "AWS GuardDuty Partition": {
- "simple": "Partition"
- },
- "AWS GuardDuty Resource Type": {
- "simple": "Resource.ResourceType"
- },
- "AWS GuardDuty Schema Version": {
- "simple": "SchemaVersion"
- },
- "AWS GuardDuty Service": {
- "complex": {
- "filters": [],
- "root": "Service",
- "transformers": []
- }
- },
- "AWS GuardDuty Type": {
- "simple": "Type"
- },
- "Account ID": {
- "simple": "AccountId"
- },
- "Alert ID": {
- "simple": "Id"
- },
- "Description": {
- "simple": "Description"
- },
- "Last Update Time": {
- "simple": "UpdatedAt"
- },
- "Region": {
- "simple": "Region"
- },
- "Title": {
- "simple": "Title"
- },
- "occurred": {
- "simple": "CreatedAt"
- },
- "severity": {
- "simple": "Severity"
- }
- }
- }
- },
- "name": "classifier-mapper-incoming-AWSGuardDuty.json",
- "type": "mapping-incoming",
- "version": -1,
- "fromVersion": "6.5.0"
-}
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.py b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.py
index a49166dbbe75..fe5daf70b6ef 100644
--- a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.py
+++ b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.py
@@ -477,7 +477,9 @@ def parse_finding(finding: dict) -> Dict:
return parsed_finding
-def get_findings(client: boto3.client, args: dict) -> CommandResults:
+def get_findings(client: boto3.client, args: dict) -> dict:
+ return_raw_response = argToBoolean(args.get('returnRawResponse', 'false'))
+
response = client.get_findings(
DetectorId=args.get('detectorId'),
FindingIds=argToList(args.get('findingIds')))
@@ -492,11 +494,15 @@ def get_findings(client: boto3.client, args: dict) -> CommandResults:
headers = ['Id', 'Title', 'Description', 'Type', 'ResourceType', 'CreatedAt', 'AccountId', 'Arn']
readable_output = tableToMarkdown('AWS GuardDuty Findings', data, removeNull=True, headers=headers) \
if data else 'No result were found'
- return CommandResults(readable_output=readable_output,
- raw_response=raw,
- outputs=data,
- outputs_prefix='AWS.GuardDuty.Findings',
- outputs_key_field='Id')
+
+ return {
+ 'ContentsFormat': formats['json'],
+ 'Type': entryTypes['note'],
+ 'Contents': raw if raw else data,
+ 'ReadableContentsFormat': formats['markdown'],
+ 'HumanReadable': readable_output,
+ 'EntryContext': {"AWS.GuardDuty.Findings(val.FindingId === obj.Id)": raw if return_raw_response else data}
+ }
def parse_incident_from_finding(finding: dict):
diff --git a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.yml b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.yml
index 2e47270706f8..d4870f9f93c3 100644
--- a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.yml
+++ b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.yml
@@ -1127,6 +1127,14 @@ script:
name: roleSessionDuration
required: false
secret: false
+ - name: returnRawResponse
+ description: Select 'true' to save all fields from the response to the context. Otherwise, complex fields will be stored in JSON format. Default value is False.
+ required: false
+ auto: PREDEFINED
+ predefined:
+ - 'true'
+ - 'false'
+ defaultValue: false
deprecated: false
description: Describes Amazon GuardDuty findings specified by finding IDs.
execution: false
@@ -1497,7 +1505,7 @@ script:
- contextPath: AWS.GuardDuty.Members.UpdatedAt
description: The time a member was last updated.
type: string
- dockerimage: demisto/boto3py3:1.0.0.38849
+ dockerimage: demisto/boto3py3:1.0.0.48955
feed: false
isfetch: true
longRunning: false
@@ -1509,5 +1517,5 @@ script:
tests:
- No tests
fromversion: 5.0.0
-defaultmapperin: AWSGuardDuty Mapper (incoming)
-defaultclassifier: AWSGuardDuty classifier
+defaultmapperin: AWSGuardDuty - Incoming Mapper
+defaultclassifier: AWSGuardDuty - Classifier
diff --git a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty_test.py b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty_test.py
index 85668f80492a..c5672a64e20e 100644
--- a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty_test.py
+++ b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty_test.py
@@ -338,7 +338,7 @@ def test_get_findings(mocker):
get_findings_mock.assert_called_with(DetectorId='some_id',
FindingIds=['finding_id1', 'finding_id2'])
- assert command_results.outputs == EXPECTED_FINDING_OUTPUTS
+ assert command_results.get('EntryContext') == EXPECTED_FINDING_OUTPUTS
class MockedPaginator:
diff --git a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/README.md b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/README.md
index 9fd2e599ae6d..d4777db7ca92 100644
--- a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/README.md
+++ b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/README.md
@@ -649,15 +649,15 @@ Action: _guardduty:GetFindings_
`aws-gd-get-findings`
#### Input
-| **Argument Name** | **Description** | **Required** |
-| --- | --- | --- |
-| detectorId | The ID of the detector that specifies the GuardDuty service whose findings you want to retrieve. | Required |
-| findingIds | IDs of the findings that you want to retrieve. | Required |
-| region | The AWS Region, if not specified the default region will be used. | Optional |
-| roleArn | The Amazon Resource Name (ARN) of the role to assume. | Optional |
-| roleSessionName | An identifier for the assumed role session. | Optional |
-| roleSessionDuration | The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role. | Optional |
-
+| **Argument Name** | **Description** | **Required** |
+|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|
+| detectorId | The ID of the detector that specifies the GuardDuty service whose findings you want to retrieve. | Required |
+| findingIds | IDs of the findings that you want to retrieve. | Required |
+| region | The AWS Region, if not specified the default region will be used. | Optional |
+| roleArn | The Amazon Resource Name (ARN) of the role to assume. | Optional |
+| roleSessionName | An identifier for the assumed role session. | Optional |
+| roleSessionDuration | The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role. | Optional |
+| returnRawResponse | Select 'true' to save all fields from the response to the context. Otherwise, complex fields will be stored in JSON format. Default value is False. | Optional |
#### Context Output
diff --git a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/test_data/get_findings_expected_outputs.py b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/test_data/get_findings_expected_outputs.py
index 57a00689759e..24d4a36abe04 100644
--- a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/test_data/get_findings_expected_outputs.py
+++ b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/test_data/get_findings_expected_outputs.py
@@ -1,4 +1,4 @@
-EXPECTED_FINDING_OUTPUTS = [{'AccountId': 'string',
+FINDING_OUTPUTS = [{'AccountId': 'string',
'Arn': 'string',
'Confidence': 123.0,
'CreatedAt': '2022-11-08T14:24:52.908Z',
@@ -352,3 +352,5 @@
'Title': 'title',
'Type': 'string',
'UpdatedAt': '2022-09-07T13:48:00.814Z'}]
+
+EXPECTED_FINDING_OUTPUTS = {'AWS.GuardDuty.Findings(val.FindingId === obj.Id)': FINDING_OUTPUTS}
diff --git a/Packs/AWS-GuardDuty/Integrations/AWSGuardDutyEventCollector/AWSGuardDutyEventCollector.yml b/Packs/AWS-GuardDuty/Integrations/AWSGuardDutyEventCollector/AWSGuardDutyEventCollector.yml
index d3a6d9dc7c95..fddb23ab6148 100644
--- a/Packs/AWS-GuardDuty/Integrations/AWSGuardDutyEventCollector/AWSGuardDutyEventCollector.yml
+++ b/Packs/AWS-GuardDuty/Integrations/AWSGuardDutyEventCollector/AWSGuardDutyEventCollector.yml
@@ -112,7 +112,7 @@ script:
description: Manual command used to fetch events and display them.
execution: false
name: aws-gd-get-events
- dockerimage: demisto/boto3py3:1.0.0.37755
+ dockerimage: demisto/boto3py3:1.0.0.48376
isfetchevents: true
runonce: false
subtype: python3
diff --git a/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_EC2_Finding.json b/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_EC2_Finding.json
index cf8cfb8a5dcf..ee0e8f8b90a1 100644
--- a/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_EC2_Finding.json
+++ b/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_EC2_Finding.json
@@ -1386,5 +1386,6 @@
"system": false,
"version": -1,
"fromVersion": "6.5.0",
- "description": ""
+ "description": "",
+ "marketplaces": ["xsoar"]
}
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_IAM_Finding.json b/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_IAM_Finding.json
index a6ba6c297254..7a883bbd653a 100644
--- a/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_IAM_Finding.json
+++ b/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_IAM_Finding.json
@@ -1393,5 +1393,6 @@
"system": false,
"version": -1,
"fromVersion": "6.5.0",
- "description": ""
+ "description": "",
+ "marketplaces": ["xsoar"]
}
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_Kubernetes_Finding.json b/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_Kubernetes_Finding.json
index e3de25c7e0bf..9231e39d5a76 100644
--- a/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_Kubernetes_Finding.json
+++ b/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_Kubernetes_Finding.json
@@ -1384,5 +1384,6 @@
"system": false,
"version": -1,
"fromVersion": "6.5.0",
- "description": ""
+ "description": "",
+ "marketplaces": ["xsoar"]
}
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_Malware_Protection_Finding.json b/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_Malware_Protection_Finding.json
index 9813d3d4da12..4659f48fc98f 100644
--- a/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_Malware_Protection_Finding.json
+++ b/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_Malware_Protection_Finding.json
@@ -1388,5 +1388,6 @@
"system": false,
"version": -1,
"fromVersion": "6.5.0",
- "description": ""
+ "description": "",
+ "marketplaces": ["xsoar"]
}
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_S3_Finding.json b/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_S3_Finding.json
index 7d59d66dd8af..9b7df4caa656 100644
--- a/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_S3_Finding.json
+++ b/Packs/AWS-GuardDuty/Layouts/layoutscontainer-AWS_Guard_Duty_S3_Finding.json
@@ -1402,5 +1402,6 @@
"system": false,
"version": -1,
"fromVersion": "6.5.0",
- "description": ""
+ "description": "",
+ "marketplaces": ["xsoar"]
}
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/ModelingRules/AWSGuardDutyModelingRules_1_3/AWSGuardDutyModelingRules_1_3.xif b/Packs/AWS-GuardDuty/ModelingRules/AWSGuardDutyModelingRules_1_3/AWSGuardDutyModelingRules_1_3.xif
index 0236bdc24fbd..738262997072 100644
--- a/Packs/AWS-GuardDuty/ModelingRules/AWSGuardDutyModelingRules_1_3/AWSGuardDutyModelingRules_1_3.xif
+++ b/Packs/AWS-GuardDuty/ModelingRules/AWSGuardDutyModelingRules_1_3/AWSGuardDutyModelingRules_1_3.xif
@@ -1,22 +1,20 @@
[MODEL: dataset="aws_guardduty_raw"]
-alter
+alter targetIP1 = json_extract_scalar(Service, "$.Action.NetworkConnectionAction.RemoteIpDetails.IpAddressV4"),
+ targetIP2 = json_extract_scalar(Service, "$.Action.KubernetesApiCallAction.RemoteIpDetails.IpAddressV4"),
+ username1 = trim(json_extract_scalar(Resource,"$.AccessKeyDetails.UserName"), "\""),
+ username2 = json_extract_scalar(Resource, "$.KubernetesDetails.KubernetesUserDetails.Username"),
+ userType = json_extract_scalar(Resource, "$.AccessKeyDetails.UserType")
+| alter
xdm.alert.category = json_extract_scalar(Resource, "$.ResourceType"),
xdm.alert.subcategory = Type,
xdm.alert.description = Description,
xdm.event.outcome_reason = Title,
xdm.alert.severity = to_string(Severity),
xdm.target.host.hostname = json_extract_scalar(Resource, "$.EksClusterDetails.Name"),
- xdm.source.user.user_type = json_extract_scalar(Resource, "$.AccessKeyDetails.UserType"),
+ xdm.source.user.user_type = if(userType in("Root","IAMUser","Role","FederatedUser","AWSAccount"),XDM_CONST.USER_TYPE_REGULAR , userType in("Directory","AWSService") ,XDM_CONST.USER_TYPE_SERVICE_ACCOUNT,userType in("AssumedRole") ,XDM_CONST.USER_TYPE_MACHINE_ACCOUNT ,to_string(userType)),
xdm.source.user.employee_id = json_extract_scalar(Resource, "$.AccessKeyDetails.PrincipalId"),
xdm.target.process.name = json_extract_scalar(Service, "$.ServiceName"),
xdm.source.host.ipv4_addresses = arraycreate(coalesce(json_extract_scalar(Service, "$.Action.AwsApiCallAction.RemoteIpDetails.IpAddressV4"), "")),
- xdm.source.ipv4 = json_extract_scalar(Service, "$.Action.NetworkConnectionAction.LocalIpDetails.IpAddressV4")
- | alter targetIP1 = json_extract_scalar(Service, "$.Action.NetworkConnectionAction.RemoteIpDetails.IpAddressV4"),
- targetIP2 = json_extract_scalar(Service, "$.Action.KubernetesApiCallAction.RemoteIpDetails.IpAddressV4")
- | alter
- xdm.target.ipv4 = coalesce(targetIP1, targetIP2)
- // UseNameExtraction
- | alter
- username1 = trim(json_extract_scalar(Resource,"$.AccessKeyDetails.UserName"), "\""),
- username2 = json_extract_scalar(Resource, "$.KubernetesDetails.KubernetesUserDetails.Username")
- | alter xdm.source.user.username = coalesce(username1, username2 );
\ No newline at end of file
+ xdm.source.ipv4 = json_extract_scalar(Service, "$.Action.NetworkConnectionAction.LocalIpDetails.IpAddressV4"),
+ xdm.target.ipv4 = coalesce(targetIP1, targetIP2),
+ xdm.source.user.username = coalesce(username1, username2 );
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/ReleaseNotes/1_3_10.md b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_10.md
new file mode 100644
index 000000000000..f969bb8a9c21
--- /dev/null
+++ b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_10.md
@@ -0,0 +1,5 @@
+
+#### Integrations
+##### AWS - GuardDuty
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.43720*.
+- Added the *returnRawResponse* argument to the ***aws-gd-get-findings*** command.
\ No newline at end of file
diff --git a/Packs/AWS-GuardDuty/ReleaseNotes/1_3_11.md b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_11.md
new file mode 100644
index 000000000000..c55746fe6c8b
--- /dev/null
+++ b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_11.md
@@ -0,0 +1,12 @@
+
+#### Layouts
+##### AWS Guard Duty Malware Protection Finding
+- This item is no longer supported in XSIAM.
+##### AWS Guard Duty Kubernetes Finding
+- This item is no longer supported in XSIAM.
+##### AWS Guard Duty S3 Finding
+- This item is no longer supported in XSIAM.
+##### AWS Guard Duty IAM Finding
+- This item is no longer supported in XSIAM.
+##### AWS Guard Duty EC2 Finding
+- This item is no longer supported in XSIAM.
diff --git a/Packs/AWS-GuardDuty/ReleaseNotes/1_3_12.md b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_12.md
new file mode 100644
index 000000000000..0e9f42c6c5d9
--- /dev/null
+++ b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_12.md
@@ -0,0 +1,5 @@
+#### Integrations
+##### AWS - GuardDuty
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.48376*.
+##### AWS - GuardDuty Event Collector
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.48376*.
diff --git a/Packs/AWS-GuardDuty/ReleaseNotes/1_3_13.md b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_13.md
new file mode 100644
index 000000000000..228ffaaec028
--- /dev/null
+++ b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_13.md
@@ -0,0 +1,4 @@
+
+#### Modeling Rules
+##### AWSGuardDuty Modeling Rule
+- Update modeling rules.
diff --git a/Packs/AWS-GuardDuty/ReleaseNotes/1_3_14.json b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_14.json
new file mode 100644
index 000000000000..867b2f2f34c3
--- /dev/null
+++ b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_14.json
@@ -0,0 +1 @@
+{"breakingChanges":true,"breakingChangesNotes":"The names of the integration's default mapper and classifier were changed."}
diff --git a/Packs/AWS-GuardDuty/ReleaseNotes/1_3_14.md b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_14.md
new file mode 100644
index 000000000000..9c6b8e863451
--- /dev/null
+++ b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_14.md
@@ -0,0 +1,15 @@
+
+#### Integrations
+##### AWS - GuardDuty
+- **Breaking changes**: Changed the default classifier name to *AWSGuardDuty - Classifier* and the default mapper name to *AWSGuardDuty - Incoming Mapper*.
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.48955*.
+
+#### Mappers
+##### New: AWSGuardDuty - Incoming Mapper
+- Maps incoming AWSGuardDuty incidents fields.
+
+
+#### Classifiers
+##### New: AWSGuardDuty - Classifier
+- Classifies AWSGuardDuty incidents.
+
diff --git a/Packs/AWS-GuardDuty/ReleaseNotes/1_3_7.md b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_7.md
new file mode 100644
index 000000000000..b9e787430d69
--- /dev/null
+++ b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_7.md
@@ -0,0 +1,5 @@
+#### Integrations
+##### AWS - GuardDuty
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.41464*.
+##### AWS - GuardDuty Event Collector
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.41464*.
diff --git a/Packs/AWS-GuardDuty/ReleaseNotes/1_3_8.md b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_8.md
new file mode 100644
index 000000000000..3164d22a2e60
--- /dev/null
+++ b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_8.md
@@ -0,0 +1,6 @@
+#### Integrations
+##### AWS - GuardDuty Event Collector
+- Fixed an issue where the **region** argument was not being taken into account in some commands.
+
+##### AWS - GuardDuty
+- Fixed an issue where the **region** argument was not being taken into account in some commands.
diff --git a/Packs/AWS-GuardDuty/ReleaseNotes/1_3_9.md b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_9.md
new file mode 100644
index 000000000000..532f3aa1bc2b
--- /dev/null
+++ b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_9.md
@@ -0,0 +1,7 @@
+
+#### Integrations
+##### AWS - GuardDuty Event Collector
+- Fixed a typo in **AWSApiModule**.
+
+##### AWS - GuardDuty
+- Fixed a typo in **AWSApiModule**.
diff --git a/Packs/AWS-GuardDuty/pack_metadata.json b/Packs/AWS-GuardDuty/pack_metadata.json
index f7e0defb1a65..741dba2130c1 100644
--- a/Packs/AWS-GuardDuty/pack_metadata.json
+++ b/Packs/AWS-GuardDuty/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AWS - GuardDuty",
"description": "Amazon Web Services Guard Duty Service (gd)",
"support": "xsoar",
- "currentVersion": "1.3.6",
+ "currentVersion": "1.3.14",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
@@ -10,9 +10,7 @@
"categories": [
"Cloud Services"
],
- "tags": [
- "marketplacev2:Data Source"
- ],
+ "tags": [],
"itemPrefix": [
"AWS",
"AWS GuardDuty"
diff --git a/Packs/AWS-IAM/Integrations/AWS-IAM/AWS-IAM.yml b/Packs/AWS-IAM/Integrations/AWS-IAM/AWS-IAM.yml
index dc21e8c76d20..e6d719e5ac1a 100644
--- a/Packs/AWS-IAM/Integrations/AWS-IAM/AWS-IAM.yml
+++ b/Packs/AWS-IAM/Integrations/AWS-IAM/AWS-IAM.yml
@@ -1,4 +1,7 @@
category: IT Services
+sectionOrder:
+- Connect
+- Collect
commonfields:
id: AWS - IAM
version: -1
@@ -7,10 +10,12 @@ configuration:
name: roleArn
required: false
type: 0
+ section: Connect
- display: Role Session Name
name: roleSessionName
required: false
type: 0
+ section: Connect
- display: AWS Default Region
name: defaultRegion
options:
@@ -34,51 +39,60 @@ configuration:
- us-gov-west-1
required: false
type: 15
+ section: Collect
+ advanced: true
- display: Role Session Duration
name: sessionDuration
required: false
type: 0
+ section: Connect
+ advanced: true
- display: Access Key
name: credentials
required: false
type: 9
displaypassword: Secret Key
+ section: Connect
- display: Access Key
name: access_key
required: false
type: 0
hidden: true
+ section: Connect
- display: Secret Key
name: secret_key
required: false
type: 4
hidden: true
-- additionalinfo: The time in seconds till a timeout exception is reached. You can
- specify just the read timeout (for example 60) or also the connect timeout followed
- after a comma (for example 60,10). If a connect timeout is not specified, a default
- of 10 second will be used.
+ section: Connect
+- additionalinfo: The time in seconds till a timeout exception is reached. You can specify just the read timeout (for example 60) or also the connect timeout followed after a comma (for example 60,10). If a connect timeout is not specified, a default of 10 second will be used.
defaultvalue: 60,10
display: Timeout
name: timeout
required: false
type: 0
-- additionalinfo: 'The maximum number of retry attempts when connection or throttling
- errors are encountered. Set to 0 to disable retries. The default value is 5 and
- the limit is 10. Note: Increasing the number of retries will increase the execution
- time.'
+ section: Connect
+ advanced: true
+- additionalinfo: 'The maximum number of retry attempts when connection or throttling errors are encountered. Set to 0 to disable retries. The default value is 5 and the limit is 10. Note: Increasing the number of retries will increase the execution time.'
defaultvalue: '5'
display: Retries
name: retries
required: false
type: 0
+ section: Connect
+ advanced: true
- display: Trust any certificate (not secure)
name: insecure
required: false
type: 8
+ section: Connect
+ advanced: true
- display: Use system proxy settings
name: proxy
required: false
type: 8
+ section: Connect
+ advanced: true
description: Amazon Web Services Identity and Access Management (IAM)
display: AWS - Identity and Access Management
name: AWS - IAM
@@ -92,8 +106,7 @@ script:
required: true
secret: false
- default: false
- description: The path for the user name. This parameter is optional. If it is
- not included, it defaults to a slash (/).
+ description: The path for the user name. This parameter is optional. If it is not included, it defaults to a slash (/).
isArray: false
name: path
required: false
@@ -111,9 +124,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -158,16 +169,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Retrieves information about the specified IAM user, including the
- user's creation date, path, unique ID, and ARN.
+ description: Retrieves information about the specified IAM user, including the user's creation date, path, unique ID, and ARN.
execution: false
name: aws-iam-get-user
outputs:
@@ -187,8 +195,7 @@ script:
description: The path to the user.
type: string
- contextPath: AWS.IAM.Users.PasswordLastUsed
- description: The date and time, when the user's password was last used to sign
- in to an AWS website.
+ description: The date and time, when the user's password was last used to sign in to an AWS website.
type: date
- arguments:
- default: false
@@ -204,9 +211,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -242,15 +247,13 @@ script:
required: true
secret: false
- default: false
- description: New name for the user. Include this parameter only if you're changing
- the user's name.
+ description: New name for the user. Include this parameter only if you're changing the user's name.
isArray: false
name: newUserName
required: false
secret: false
- default: false
- description: New path for the IAM user. Include this parameter only if you're
- changing the user's path.
+ description: New path for the IAM user. Include this parameter only if you're changing the user's path.
isArray: false
name: newPath
required: false
@@ -268,9 +271,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -299,16 +300,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Deletes the specified IAM user. The user must not belong to any groups
- or have any access keys, signing certificates, or attached policies.
+ description: Deletes the specified IAM user. The user must not belong to any groups or have any access keys, signing certificates, or attached policies.
execution: false
name: aws-iam-delete-user
- arguments:
@@ -326,8 +324,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Allows this new password to be used only once by requiring the
- specified IAM user to set a new password on next sign-in.
+ description: Allows this new password to be used only once by requiring the specified IAM user to set a new password on next sign-in.
isArray: false
name: passwordResetRequired
predefined:
@@ -348,9 +345,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -361,8 +356,7 @@ script:
name: aws-iam-update-login-profile
- arguments:
- default: false
- description: The name of the group to create. Do not include the path in this
- value.
+ description: The name of the group to create. Do not include the path in this value.
isArray: false
name: groupName
required: false
@@ -386,9 +380,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -427,9 +419,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -474,9 +464,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -527,9 +515,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -558,16 +544,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Creates a new AWS secret access key and corresponding AWS access
- key ID for the specified user. The default status for new keys is Active .
+ description: Creates a new AWS secret access key and corresponding AWS access key ID for the specified user. The default status for new keys is Active .
execution: false
name: aws-iam-create-access-key
outputs:
@@ -578,8 +561,7 @@ script:
description: The secret key used to sign requests.
type: string
- contextPath: AWS.IAM.Users.AccessKeys.Status
- description: The status of the access key. Active means that the key is valid
- for API calls, while Inactive means it is not.
+ description: The status of the access key. Active means that the key is valid for API calls, while Inactive means it is not.
type: string
- contextPath: AWS.IAM.Users.AccessKeys.CreateDate
description: The date when the access key was created.
@@ -599,9 +581,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: The status you want to assign to the secret access key. Active
- means that the key can be used for API calls to AWS, while Inactive means
- that the key cannot be used.
+ description: The status you want to assign to the secret access key. Active means that the key can be used for API calls to AWS, while Inactive means that the key cannot be used.
isArray: false
name: status
predefined:
@@ -622,17 +602,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Changes the status of the specified access key from Active to Inactive,
- or vice versa. This operation can be used to disable a user's key as part of
- a key rotation workflow.
+ description: Changes the status of the specified access key from Active to Inactive, or vice versa. This operation can be used to disable a user's key as part of a key rotation workflow.
execution: false
name: aws-iam-update-access-key
- arguments:
@@ -655,16 +631,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Returns information about the access key IDs associated with the
- specified IAM user.
+ description: Returns information about the access key IDs associated with the specified IAM user.
execution: false
name: aws-iam-list-access-keys-for-user
outputs:
@@ -672,8 +645,7 @@ script:
description: The ID for this access key.
type: string
- contextPath: AWS.IAM.Users.AccessKeys.Status
- description: The status of the access key. Active means the key is valid for
- API calls; Inactive means it is not.
+ description: The status of the access key. Active means the key is valid for API calls; Inactive means it is not.
type: string
- contextPath: AWS.IAM.Users.AccessKeys.CreateDate
description: The date when the access key was created.
@@ -685,9 +657,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: All
- description: The scope to use for filtering the results. To list only AWS managed
- policies, set Scope to AWS. To list only the customer managed policies in
- your AWS account, set Scope to Local.
+ description: The scope to use for filtering the results. To list only AWS managed policies, set Scope to AWS. To list only the customer managed policies in your AWS account, set Scope to Local.
isArray: false
name: scope
predefined:
@@ -699,10 +669,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: 'False'
- description: A flag to filter the results to only the attached policies. When
- OnlyAttached is true , the returned list contains only the policies that are
- attached to an IAM user, group, or role. When OnlyAttached is false , or when
- the parameter is not included, all policies are returned.
+ description: A flag to filter the results to only the attached policies. When OnlyAttached is true , the returned list contains only the policies that are attached to an IAM user, group, or role. When OnlyAttached is false , or when the parameter is not included, all policies are returned.
isArray: false
name: onlyAttached
predefined:
@@ -723,16 +690,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Lists all the managed policies that are available in your AWS account,
- including your own customer-defined managed policies and all AWS managed policies.
+ description: Lists all the managed policies that are available in your AWS account, including your own customer-defined managed policies and all AWS managed policies.
execution: false
name: aws-iam-list-policies
outputs:
@@ -749,12 +713,10 @@ script:
description: The path to the policy.
type: string
- contextPath: AWS.IAM.Policies.DefaultVersionId
- description: The identifier for the version of the policy that is set as the
- default version.
+ description: The identifier for the version of the policy that is set as the default version.
type: string
- contextPath: AWS.IAM.Policies.IsAttachable
- description: Specifies whether the policy can be attached to an IAM user, group,
- or role.
+ description: Specifies whether the policy can be attached to an IAM user, group, or role.
type: string
- contextPath: AWS.IAM.Policies.CreateDate
description: when the policy was created.
@@ -763,8 +725,7 @@ script:
description: when the policy was last updated.
type: date
- contextPath: AWS.IAM.Policies.AttachmentCount
- description: The number of entities (users, groups, and roles) that the policy
- is attached to.
+ description: The number of entities (users, groups, and roles) that the policy is attached to.
type: number
- arguments:
- default: false
@@ -780,9 +741,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -814,9 +773,7 @@ script:
description: A description of the role that you provide.
type: string
- contextPath: AWS.IAM.Roles.MaxSessionDuration
- description: The maximum session duration (in seconds) for the specified role.
- Anyone who uses the AWS CLI or API to assume the role can specify the duration
- using the optional DurationSeconds API parameter or duration-seconds CLI parameter.
+ description: The maximum session duration (in seconds) for the specified role. Anyone who uses the AWS CLI or API to assume the role can specify the duration using the optional DurationSeconds API parameter or duration-seconds CLI parameter.
type: number
- arguments:
- auto: PREDEFINED
@@ -831,8 +788,7 @@ script:
required: true
secret: false
- default: false
- description: The name (friendly name, not ARN) of the IAM Entity to attach the
- policy to.
+ description: The name (friendly name, not ARN) of the IAM Entity to attach the policy to.
isArray: false
name: entityName
required: false
@@ -860,8 +816,7 @@ script:
required: true
secret: false
- default: false
- description: The name (friendly name, not ARN) of the IAM Entity to detach the
- policy from.
+ description: The name (friendly name, not ARN) of the IAM Entity to detach the policy from.
isArray: false
name: entityName
required: false
@@ -885,9 +840,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -916,16 +869,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Deletes the password for the specified IAM user, which terminates
- the user's ability to access AWS services through the AWS Management Console.
+ description: Deletes the password for the specified IAM user, which terminates the user's ability to access AWS services through the AWS Management Console.
execution: false
name: aws-iam-delete-login-profile
- arguments:
@@ -948,16 +898,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Deletes the specified IAM group. The group must not contain any users
- or have any attached policies.
+ description: Deletes the specified IAM group. The group must not contain any users or have any attached policies.
execution: false
name: aws-iam-delete-group
- arguments:
@@ -986,9 +933,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -999,8 +944,7 @@ script:
name: aws-iam-remove-user-from-group
- arguments:
- default: false
- description: The name of the IAM user to create a password for. The user must
- already exist.
+ description: The name of the IAM user to create a password for. The user must already exist.
isArray: false
name: userName
required: true
@@ -1013,8 +957,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Specifies whether the user is required to set a new password on
- next sign-in.
+ description: Specifies whether the user is required to set a new password on next sign-in.
isArray: false
name: passwordResetRequired
predefined:
@@ -1035,16 +978,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Creates a password for the specified user, giving the user the ability
- to access AWS services through the AWS Management Console.
+ description: Creates a password for the specified user, giving the user the ability to access AWS services through the AWS Management Console.
execution: false
name: aws-iam-create-login-profile
- arguments:
@@ -1055,8 +995,7 @@ script:
required: true
secret: false
- default: false
- description: The access key ID for the access key ID and secret access key you
- want to delete.
+ description: The access key ID for the access key ID and secret access key you want to delete.
isArray: false
name: AccessKeyId
required: true
@@ -1074,9 +1013,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -1111,9 +1048,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -1158,16 +1093,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Deletes the specified instance profile. The instance profile must
- not have an associated role.
+ description: Deletes the specified instance profile. The instance profile must not have an associated role.
execution: false
name: aws-iam-delete-instance-profile
- arguments:
@@ -1184,9 +1116,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -1261,18 +1191,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Adds the specified IAM role to the specified instance profile. An
- instance profile can contain only one role, and this limit cannot be increased.
- You can remove the existing role and then add a different role to an instance
- profile.
+ description: Adds the specified IAM role to the specified instance profile. An instance profile can contain only one role, and this limit cannot be increased. You can remove the existing role and then add a different role to an instance profile.
execution: false
name: aws-iam-add-role-to-instance-profile
outputs:
@@ -1341,9 +1266,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -1412,16 +1335,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Lists the instance profiles that have the specified associated IAM
- role.
+ description: Lists the instance profiles that have the specified associated IAM role.
execution: false
name: aws-iam-list-instance-profiles-for-role
outputs:
@@ -1484,9 +1404,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -1555,9 +1473,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -1611,9 +1527,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -1630,8 +1544,7 @@ script:
required: true
secret: false
- default: false
- description: The trust relationship policy document that grants an entity permission
- to assume the role.
+ description: The trust relationship policy document that grants an entity permission to assume the role.
isArray: false
name: assumeRolePolicyDocument
required: true
@@ -1649,10 +1562,7 @@ script:
required: false
secret: false
- default: false
- description: The maximum session duration (in seconds) that you want to set
- for the specified role. If you do not specify a value for this setting, the
- default maximum of one hour is applied. This setting can have a value from
- 1 hour to 12 hours.
+ description: The maximum session duration (in seconds) that you want to set for the specified role. If you do not specify a value for this setting, the default maximum of one hour is applied. This setting can have a value from 1 hour to 12 hours.
isArray: false
name: maxSessionDuration
required: false
@@ -1670,9 +1580,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -1714,8 +1622,7 @@ script:
required: true
secret: false
- default: false
- description: The JSON policy document that you want to use as the content for
- the new policy.
+ description: The JSON policy document that you want to use as the content for the new policy.
isArray: false
name: policyDocument
required: true
@@ -1745,17 +1652,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Creates a new managed policy for your AWS account. This operation
- creates a policy version with a version identifier of v1 and sets v1 as the
- policy's default version.
+ description: Creates a new managed policy for your AWS account. This operation creates a policy version with a version identifier of v1 and sets v1 as the policy's default version.
execution: true
name: aws-iam-create-policy
outputs:
@@ -1766,38 +1669,31 @@ script:
description: The stable and unique string identifying the policy.
type: string
- contextPath: AWS.IAM.Policies.Arn
- description: The Amazon Resource Name (ARN). ARNs are unique identifiers for
- AWS resources.
+ description: The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources.
type: string
- contextPath: AWS.IAM.Policies.Path
description: The path to the policy.
type: string
- contextPath: AWS.IAM.Policies.DefaultVersionId
- description: The identifier for the version of the policy that is set as the
- default version.
+ description: The identifier for the version of the policy that is set as the default version.
type: string
- contextPath: AWS.IAM.Policies.AttachmentCount
- description: The number of entities (users, groups, and roles) that the policy
- is attached to.
+ description: The number of entities (users, groups, and roles) that the policy is attached to.
type: number
- contextPath: AWS.IAM.Policies.PermissionsBoundaryUsageCount
- description: The number of entities (users and roles) for which the policy is
- used to set the permissions boundary.
+ description: The number of entities (users and roles) for which the policy is used to set the permissions boundary.
type: number
- contextPath: AWS.IAM.Policies.IsAttachable
- description: Specifies whether the policy can be attached to an IAM user, group,
- or role.
+ description: Specifies whether the policy can be attached to an IAM user, group, or role.
type: boolean
- contextPath: AWS.IAM.Policies.Description
description: A friendly description of the policy.
type: string
- contextPath: AWS.IAM.Policies.CreateDate
- description: The date and time, in ISO 8601 date-time format , when the policy
- was created.
+ description: The date and time, in ISO 8601 date-time format , when the policy was created.
type: date
- contextPath: AWS.IAM.Policies.UpdateDate
- description: The date and time, in ISO 8601 date-time format , when the policy
- was last updated.
+ description: The date and time, in ISO 8601 date-time format , when the policy was last updated.
type: date
- arguments:
- default: false
@@ -1819,30 +1715,24 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Deletes the specified managed policy. Before you can delete a managed
- policy, you must first detach the policy from all users, groups, and roles that
- it is attached to. In addition you must delete all the policy's versions.
+ description: Deletes the specified managed policy. Before you can delete a managed policy, you must first detach the policy from all users, groups, and roles that it is attached to. In addition you must delete all the policy's versions.
execution: true
name: aws-iam-delete-policy
- arguments:
- default: false
- description: The Amazon Resource Name (ARN) of the IAM policy to which you want
- to add a new version.
+ description: The Amazon Resource Name (ARN) of the IAM policy to which you want to add a new version.
isArray: false
name: policyArn
required: true
secret: false
- default: false
- description: The JSON policy document that you want to use as the content for
- this new version of the policy.
+ description: The JSON policy document that you want to use as the content for this new version of the policy.
isArray: false
name: policyDocument
required: true
@@ -1870,21 +1760,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Creates a new version of the specified managed policy. To update
- a managed policy, you create a new policy version. A managed policy can have
- up to five versions. If the policy has five versions, you must delete an existing
- version using DeletePolicyVersion before you create a new version. Optionally,
- you can set the new version as the policy's default version. The default version
- is the version that is in effect for the IAM users, groups, and roles to which
- the policy is attached.
+ description: Creates a new version of the specified managed policy. To update a managed policy, you create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must delete an existing version using DeletePolicyVersion before you create a new version. Optionally, you can set the new version as the policy's default version. The default version is the version that is in effect for the IAM users, groups, and roles to which the policy is attached.
execution: true
name: aws-iam-create-policy-version
outputs:
@@ -1898,13 +1780,11 @@ script:
description: The identifier for the policy version.
type: string
- contextPath: AWS.IAM.Policies.Versions.CreateDate
- description: The date and time, in ISO 8601 date-time format , when the policy
- version was created.
+ description: The date and time, in ISO 8601 date-time format , when the policy version was created.
type: string
- arguments:
- default: false
- description: The Amazon Resource Name (ARN) of the IAM policy from which you
- want to delete a version.
+ description: The Amazon Resource Name (ARN) of the IAM policy from which you want to delete a version.
isArray: false
name: policyArn
required: true
@@ -1928,31 +1808,24 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Deletes the specified version from the specified managed policy. You
- cannot delete the default version from a policy using this API. To delete the
- default version from a policy, use DeletePolicy . To find out which version
- of a policy is marked as the default version, use ListPolicyVersions .
+ description: Deletes the specified version from the specified managed policy. You cannot delete the default version from a policy using this API. To delete the default version from a policy, use DeletePolicy . To find out which version of a policy is marked as the default version, use ListPolicyVersions .
execution: true
name: aws-iam-delete-policy-version
- arguments:
- default: false
- description: The Amazon Resource Name (ARN) of the IAM policy for which you
- want the versions.
+ description: The Amazon Resource Name (ARN) of the IAM policy for which you want the versions.
isArray: false
name: policyArn
required: true
secret: false
deprecated: false
- description: Lists information about the versions of the specified managed policy,
- including the version that is currently set as the policy's default version.
+ description: Lists information about the versions of the specified managed policy, including the version that is currently set as the policy's default version.
execution: false
name: aws-iam-list-policy-versions
outputs:
@@ -1963,17 +1836,14 @@ script:
description: The identifier for the policy version.
type: string
- contextPath: AWS.IAM.Policies.Versions.IsDefaultVersion
- description: Specifies whether the policy version is set as the policy's default
- version.
+ description: Specifies whether the policy version is set as the policy's default version.
type: boolean
- contextPath: AWS.IAM.Policies.Versions.CreateDate
- description: The date and time, in ISO 8601 date-time format , when the policy
- version was created.
+ description: The date and time, in ISO 8601 date-time format , when the policy version was created.
type: date
- arguments:
- default: false
- description: The Amazon Resource Name (ARN) of the managed policy that you want
- information about.
+ description: The Amazon Resource Name (ARN) of the managed policy that you want information about.
isArray: false
name: policyArn
required: true
@@ -1997,16 +1867,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Retrieves information about the specified version of the specified
- managed policy, including the policy document.
+ description: Retrieves information about the specified version of the specified managed policy, including the policy document.
execution: false
name: aws-iam-get-policy-version
outputs:
@@ -2017,17 +1884,14 @@ script:
description: The identifier for the policy version.
type: Unknown
- contextPath: AWS.IAM.Policies.Versions.IsDefaultVersion
- description: Specifies whether the policy version is set as the policy's default
- version.
+ description: Specifies whether the policy version is set as the policy's default version.
type: Unknown
- contextPath: AWS.IAM.Policies.Versions.CreateDate
- description: The date and time, in ISO 8601 date-time format , when the policy
- version was created.
+ description: The date and time, in ISO 8601 date-time format , when the policy version was created.
type: Unknown
- arguments:
- default: false
- description: The Amazon Resource Name (ARN) of the IAM policy whose default
- version you want to set.
+ description: The Amazon Resource Name (ARN) of the IAM policy whose default version you want to set.
isArray: false
name: policyArn
required: true
@@ -2051,17 +1915,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Sets the specified version of the specified policy as the policy's
- default (operative) version. This operation affects all users, groups, and
- roles that the policy is attached to.
+ description: Sets the specified version of the specified policy as the policy's default (operative) version. This operation affects all users, groups, and roles that the policy is attached to.
execution: true
name: aws-iam-set-default-policy-version
- arguments:
@@ -2084,9 +1944,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -2115,9 +1973,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -2145,8 +2001,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Specifies whether IAM user passwords must contain at least one
- of the non-alphanumeric characters. Can be "True" or "False".
+ description: Specifies whether IAM user passwords must contain at least one of the non-alphanumeric characters. Can be "True" or "False".
isArray: false
name: requireSymbols
predefined:
@@ -2156,8 +2011,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Specifies whether IAM user passwords must contain at least one
- numeric character (0 to 9). Can be "True" or "False".
+ description: Specifies whether IAM user passwords must contain at least one numeric character (0 to 9). Can be "True" or "False".
isArray: false
name: requireNumbers
predefined:
@@ -2167,9 +2021,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Specifies whether IAM user passwords must contain at least one
- uppercase character from the ISO basic Latin alphabet (A to Z). Can be "True"
- or "False".
+ description: Specifies whether IAM user passwords must contain at least one uppercase character from the ISO basic Latin alphabet (A to Z). Can be "True" or "False".
isArray: false
name: requireUppercaseCharacters
predefined:
@@ -2179,9 +2031,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Specifies whether IAM user passwords must contain at least one
- lowercase character from the ISO basic Latin alphabet (a to z). Can be "True"
- or "False".
+ description: Specifies whether IAM user passwords must contain at least one lowercase character from the ISO basic Latin alphabet (a to z). Can be "True" or "False".
isArray: false
name: requireLowercaseCharacters
predefined:
@@ -2191,8 +2041,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Allows all IAM users in your account to use the AWS Management
- Console to change their own passwords. Can be "True" or "False".
+ description: Allows all IAM users in your account to use the AWS Management Console to change their own passwords. Can be "True" or "False".
isArray: false
name: allowUsersToChangePassword
predefined:
@@ -2209,8 +2058,7 @@ script:
required: false
secret: false
- default: false
- description: Specifies the number of previous passwords that IAM users are prevented
- from reusing.
+ description: Specifies the number of previous passwords that IAM users are prevented from reusing.
isArray: false
name: passwordReusePrevention
predefined:
@@ -2219,8 +2067,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Prevents IAM users from setting a new password after their password
- has expired. Can be "True" or "False".
+ description: Prevents IAM users from setting a new password after their password has expired. Can be "True" or "False".
isArray: false
name: hardExpiry
predefined:
@@ -2240,8 +2087,7 @@ script:
required: true
secret: false
deprecated: false
- description: Lists the names of the inline policies that are embedded in the specified
- IAM role.
+ description: Lists the names of the inline policies that are embedded in the specified IAM role.
execution: false
name: aws-iam-list-role-policies
outputs:
@@ -2262,8 +2108,7 @@ script:
required: true
secret: false
deprecated: false
- description: Retrieves the specified inline policy document that is embedded with
- the specified IAM role.
+ description: Retrieves the specified inline policy document that is embedded with the specified IAM role.
execution: false
name: aws-iam-get-role-policy
outputs:
@@ -2272,16 +2117,13 @@ script:
type: string
- arguments:
- default: false
- description: The Amazon Resource Name (ARN) of the managed policy that you want
- information about.
+ description: The Amazon Resource Name (ARN) of the managed policy that you want information about.
isArray: false
name: policyArn
required: true
secret: false
deprecated: false
- description: Retrieves information about the specified managed policy, including
- the policy's default version and the total number of IAM users, groups, and
- roles to which the policy is attached.
+ description: Retrieves information about the specified managed policy, including the policy's default version and the total number of IAM users, groups, and roles to which the policy is attached.
execution: false
name: aws-iam-get-policy
outputs:
@@ -2292,8 +2134,7 @@ script:
description: The stable and unique string identifying the policy.
type: string
- contextPath: AWS.IAM.Policy.Arn
- description: The Amazon Resource Name (ARN). ARNs are unique identifiers for
- Amazon Web Services resources.
+ description: The Amazon Resource Name (ARN). ARNs are unique identifiers for Amazon Web Services resources.
type: string
- contextPath: AWS.IAM.Policy.Path
description: The path to the policy.
@@ -2303,8 +2144,7 @@ script:
type: string
- arguments:
- default: false
- description: The name (friendly name, not ARN) of the user to list attached
- policies for.
+ description: The name (friendly name, not ARN) of the user to list attached policies for.
isArray: false
name: userName
required: true
@@ -2317,8 +2157,7 @@ script:
required: false
secret: false
- default: false
- description: Page number you would like to view. Each page contains page_size
- values. Must be used along with page_size.
+ description: Page number you would like to view. Each page contains page_size values. Must be used along with page_size.
isArray: false
name: page
required: false
@@ -2330,21 +2169,18 @@ script:
required: false
secret: false
- default: false
- description: Starting item of the next page to view. Retrieved from a previous run of the command
- (InlinePoliciesMarker).
+ description: Starting item of the next page to view. Retrieved from a previous run of the command (InlinePoliciesMarker).
isArray: false
name: marker
required: false
secret: false
deprecated: false
- description: Lists the names of the inline policies embedded in the specified
- IAM user.
+ description: Lists the names of the inline policies embedded in the specified IAM user.
execution: false
name: aws-iam-list-user-policies
outputs:
- contextPath: AWS.IAM.UserPolicies.UserName
- description: The name (friendly name, not ARN) of the user to list attached
- policies for.
+ description: The name (friendly name, not ARN) of the user to list attached policies for.
type: String
- contextPath: AWS.IAM.UserPolicies.PolicyName
description: The name of the policy.
@@ -2354,8 +2190,7 @@ script:
type: String
- arguments:
- default: false
- description: The name (friendly name, not ARN) of the user to list attached
- policies for.
+ description: The name (friendly name, not ARN) of the user to list attached policies for.
isArray: false
name: userName
required: true
@@ -2368,8 +2203,7 @@ script:
required: false
secret: false
- default: false
- description: Page number you would like to view. Each page contains page_size
- values. Must be used along with page_size.
+ description: Page number you would like to view. Each page contains page_size values. Must be used along with page_size.
isArray: false
name: page
required: false
@@ -2381,21 +2215,18 @@ script:
required: false
secret: false
- default: false
- description: Starting item of the next page to view. Can be retrieved from context
- (AttachedPoliciesMarker).
+ description: Starting item of the next page to view. Can be retrieved from context (AttachedPoliciesMarker).
isArray: false
name: marker
required: false
secret: false
deprecated: false
- description: Lists all managed policies that are attached to the specified IAM
- user.
+ description: Lists all managed policies that are attached to the specified IAM user.
execution: false
name: aws-iam-list-attached-user-policies
outputs:
- contextPath: AWS.IAM.AttachedUserPolicies.UserName
- description: The name (friendly name, not ARN) of the user to list attached
- policies for.
+ description: The name (friendly name, not ARN) of the user to list attached policies for.
type: String
- contextPath: AWS.IAM.AttachedUserPolicies.PolicyName
description: The name of the attached policy
@@ -2408,8 +2239,7 @@ script:
type: String
- arguments:
- default: false
- description: The name (friendly name, not ARN) of the group to list attached
- policies for.
+ description: The name (friendly name, not ARN) of the group to list attached policies for.
isArray: false
name: groupName
required: true
@@ -2422,8 +2252,7 @@ script:
required: false
secret: false
- default: false
- description: Page number you would like to view. Each page contains page_size
- values. Must be used along with page_size.
+ description: Page number you would like to view. Each page contains page_size values. Must be used along with page_size.
isArray: false
name: page
required: false
@@ -2435,21 +2264,18 @@ script:
required: false
secret: false
- default: false
- description: Starting item of the next page to view. Can be retrieved from context
- (AttachedPoliciesMarker).
+ description: Starting item of the next page to view. Can be retrieved from context (AttachedPoliciesMarker).
isArray: false
name: marker
required: false
secret: false
deprecated: false
- description: Lists all managed policies that are attached to the specified IAM
- group.
+ description: Lists all managed policies that are attached to the specified IAM group.
execution: false
name: aws-iam-list-attached-group-policies
outputs:
- contextPath: AWS.IAM.AttachedGroupPolicies.GroupName
- description: The name (friendly name, not ARN) of the group to list attached
- policies for.
+ description: The name (friendly name, not ARN) of the group to list attached policies for.
type: String
- contextPath: AWS.IAM.AttachedGroupPolicies.PolicyName
description: The name of the attached policy
@@ -2462,8 +2288,7 @@ script:
type: String
- arguments:
- default: false
- description: The name (friendly name, not ARN) of the user to list attached
- policies for.
+ description: The name (friendly name, not ARN) of the user to list attached policies for.
isArray: false
name: userName
required: true
@@ -2477,10 +2302,9 @@ script:
description: The date when the password for the user was created.
type: Date
- contextPath: AWS.IAM.Users.LoginProfile.PasswordResetRequired
- description: Specifies whether the user is required to set a new password on
- next sign-in.
+ description: Specifies whether the user is required to set a new password on next sign-in.
type: Boolean
- dockerimage: demisto/boto3py3:1.0.0.41082
+ dockerimage: demisto/boto3py3:1.0.0.48904
feed: false
isfetch: false
longRunning: false
diff --git a/Packs/AWS-IAM/ReleaseNotes/1_1_18.md b/Packs/AWS-IAM/ReleaseNotes/1_1_18.md
new file mode 100644
index 000000000000..350a8e718e0b
--- /dev/null
+++ b/Packs/AWS-IAM/ReleaseNotes/1_1_18.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - Identity and Access Management
+- Fixed an issue where the **region** argument was not being taken into account in some commands.
diff --git a/Packs/AWS-IAM/ReleaseNotes/1_1_19.md b/Packs/AWS-IAM/ReleaseNotes/1_1_19.md
new file mode 100644
index 000000000000..c7beef88e8c8
--- /dev/null
+++ b/Packs/AWS-IAM/ReleaseNotes/1_1_19.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - Identity and Access Management
+- Fixed a typo in **AWSApiModule**.
diff --git a/Packs/AWS-IAM/ReleaseNotes/1_1_20.md b/Packs/AWS-IAM/ReleaseNotes/1_1_20.md
new file mode 100644
index 000000000000..ab305ebea213
--- /dev/null
+++ b/Packs/AWS-IAM/ReleaseNotes/1_1_20.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AWS - Identity and Access Management
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.43484*.
diff --git a/Packs/AWS-IAM/ReleaseNotes/1_1_21.md b/Packs/AWS-IAM/ReleaseNotes/1_1_21.md
new file mode 100644
index 000000000000..95827ea61a20
--- /dev/null
+++ b/Packs/AWS-IAM/ReleaseNotes/1_1_21.md
@@ -0,0 +1,5 @@
+
+#### Integrations
+##### AWS - Identity and Access Management
+- Added support for sections infrastructure.
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.43720*.
diff --git a/Packs/AWS-IAM/ReleaseNotes/1_1_22.md b/Packs/AWS-IAM/ReleaseNotes/1_1_22.md
new file mode 100644
index 000000000000..e4256acd8fff
--- /dev/null
+++ b/Packs/AWS-IAM/ReleaseNotes/1_1_22.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AWS - Identity and Access Management
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.46675*.
diff --git a/Packs/AWS-IAM/ReleaseNotes/1_1_23.md b/Packs/AWS-IAM/ReleaseNotes/1_1_23.md
new file mode 100644
index 000000000000..6aa2f53a0df2
--- /dev/null
+++ b/Packs/AWS-IAM/ReleaseNotes/1_1_23.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AWS - Identity and Access Management
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.48904*.
diff --git a/Packs/AWS-IAM/pack_metadata.json b/Packs/AWS-IAM/pack_metadata.json
index d9be426b1f67..285c8eed3edd 100644
--- a/Packs/AWS-IAM/pack_metadata.json
+++ b/Packs/AWS-IAM/pack_metadata.json
@@ -3,7 +3,7 @@
"description": "Amazon Web Services Identity and Access Management (IAM)",
"support": "xsoar",
"author": "Cortex XSOAR",
- "currentVersion": "1.1.17",
+ "currentVersion": "1.1.23",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
"created": "2020-04-14T00:00:00Z",
diff --git a/Packs/AWS-ILM/Integrations/AWSILM/AWSILM.yml b/Packs/AWS-ILM/Integrations/AWSILM/AWSILM.yml
index d3a8bcf9b466..4f4bf67f60c1 100644
--- a/Packs/AWS-ILM/Integrations/AWSILM/AWSILM.yml
+++ b/Packs/AWS-ILM/Integrations/AWSILM/AWSILM.yml
@@ -1,4 +1,4 @@
-category: Identity and Access Management
+category: Authentication & Identity Management
commonfields:
id: AWS-ILM
version: -1
@@ -374,7 +374,7 @@ script:
type: Unknown
description: Permanently removes a group.
execution: true
- dockerimage: demisto/python3:3.10.9.40422
+ dockerimage: demisto/python3:3.10.10.48392
isfetch: false
longRunning: false
longRunningPort: false
diff --git a/Packs/AWS-ILM/ReleaseNotes/1_0_10.md b/Packs/AWS-ILM/ReleaseNotes/1_0_10.md
new file mode 100644
index 000000000000..2721258b173f
--- /dev/null
+++ b/Packs/AWS-ILM/ReleaseNotes/1_0_10.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AWS - IAM (user lifecycle management)
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AWS-ILM/ReleaseNotes/1_0_11.md b/Packs/AWS-ILM/ReleaseNotes/1_0_11.md
new file mode 100644
index 000000000000..3ed4faf76c88
--- /dev/null
+++ b/Packs/AWS-ILM/ReleaseNotes/1_0_11.md
@@ -0,0 +1,5 @@
+
+#### Integrations
+
+##### AWS - IAM (user lifecycle management)
+Updated the pack category to *Authentication & Identity Management*.
\ No newline at end of file
diff --git a/Packs/AWS-ILM/ReleaseNotes/1_0_7.md b/Packs/AWS-ILM/ReleaseNotes/1_0_7.md
new file mode 100644
index 000000000000..aff5cb5dbda5
--- /dev/null
+++ b/Packs/AWS-ILM/ReleaseNotes/1_0_7.md
@@ -0,0 +1,5 @@
+
+#### Integrations
+##### AWS - IAM (user lifecycle management)
+- Updated the IAMApiModule.
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AWS-ILM/ReleaseNotes/1_0_8.md b/Packs/AWS-ILM/ReleaseNotes/1_0_8.md
new file mode 100644
index 000000000000..6555c676be78
--- /dev/null
+++ b/Packs/AWS-ILM/ReleaseNotes/1_0_8.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AWS - IAM (user lifecycle management)
+- Updated the Docker image to: *demisto/python3:3.10.9.45313*.
diff --git a/Packs/AWS-ILM/ReleaseNotes/1_0_9.md b/Packs/AWS-ILM/ReleaseNotes/1_0_9.md
new file mode 100644
index 000000000000..63e827656f1b
--- /dev/null
+++ b/Packs/AWS-ILM/ReleaseNotes/1_0_9.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AWS - IAM (user lifecycle management)
+- Updated the Docker image to: *demisto/python3:3.10.10.47713*.
diff --git a/Packs/AWS-ILM/pack_metadata.json b/Packs/AWS-ILM/pack_metadata.json
index da73ef752a2e..a445471d82e0 100644
--- a/Packs/AWS-ILM/pack_metadata.json
+++ b/Packs/AWS-ILM/pack_metadata.json
@@ -2,12 +2,12 @@
"name": "AWS-ILM",
"description": "IAM Integration for AWS-ILM. This pack handles user account auto-provisioning",
"support": "xsoar",
- "currentVersion": "1.0.6",
+ "currentVersion": "1.0.11",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
"categories": [
- "Authentication"
+ "Authentication & Identity Management"
],
"tags": [],
"useCases": [],
diff --git a/Packs/AWS-Lambda/Integrations/AWS-Lambda/AWS-Lambda.yml b/Packs/AWS-Lambda/Integrations/AWS-Lambda/AWS-Lambda.yml
index 84008e22148b..b157d67191b6 100644
--- a/Packs/AWS-Lambda/Integrations/AWS-Lambda/AWS-Lambda.yml
+++ b/Packs/AWS-Lambda/Integrations/AWS-Lambda/AWS-Lambda.yml
@@ -618,7 +618,7 @@ script:
- contextPath: AWS.Lambda.AccountUsage. FunctionCount
description: The number of Lambda functions.
type: number
- dockerimage: demisto/boto3py3:1.0.0.41271
+ dockerimage: demisto/boto3py3:1.0.0.45936
isfetch: false
runonce: false
subtype: python3
diff --git a/Packs/AWS-Lambda/ReleaseNotes/1_2_30.md b/Packs/AWS-Lambda/ReleaseNotes/1_2_30.md
new file mode 100644
index 000000000000..1741035308c2
--- /dev/null
+++ b/Packs/AWS-Lambda/ReleaseNotes/1_2_30.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - Lambda
+- Fixed an issue where the **region** argument was not being taken into account in some commands.
diff --git a/Packs/AWS-Lambda/ReleaseNotes/1_2_31.md b/Packs/AWS-Lambda/ReleaseNotes/1_2_31.md
new file mode 100644
index 000000000000..5cec64f0cafe
--- /dev/null
+++ b/Packs/AWS-Lambda/ReleaseNotes/1_2_31.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - Lambda
+- Fixed a typo in **AWSApiModule**.
diff --git a/Packs/AWS-Lambda/ReleaseNotes/1_2_32.md b/Packs/AWS-Lambda/ReleaseNotes/1_2_32.md
new file mode 100644
index 000000000000..b9b909e1787b
--- /dev/null
+++ b/Packs/AWS-Lambda/ReleaseNotes/1_2_32.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AWS - Lambda
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.43484*.
diff --git a/Packs/AWS-Lambda/ReleaseNotes/1_2_33.md b/Packs/AWS-Lambda/ReleaseNotes/1_2_33.md
new file mode 100644
index 000000000000..450f4f421049
--- /dev/null
+++ b/Packs/AWS-Lambda/ReleaseNotes/1_2_33.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AWS - Lambda
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.45936*.
diff --git a/Packs/AWS-Lambda/pack_metadata.json b/Packs/AWS-Lambda/pack_metadata.json
index 0b575f833f7e..c0e035488ff9 100644
--- a/Packs/AWS-Lambda/pack_metadata.json
+++ b/Packs/AWS-Lambda/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AWS - Lambda",
"description": "Amazon Web Services Serverless Compute service (lambda)",
"support": "xsoar",
- "currentVersion": "1.2.29",
+ "currentVersion": "1.2.33",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AWS-NetworkFirewall/Integrations/AWS-NetworkFirewall/AWS-NetworkFirewall-config.json b/Packs/AWS-NetworkFirewall/Integrations/AWS-NetworkFirewall/AWS-NetworkFirewall-config.json
deleted file mode 100755
index 969569b6f4b5..000000000000
--- a/Packs/AWS-NetworkFirewall/Integrations/AWS-NetworkFirewall/AWS-NetworkFirewall-config.json
+++ /dev/null
@@ -1,89 +0,0 @@
-{
- "aws-network-firewall-associate-firewall-policy": {
- "context_extraction_string": "AWS-NetworkFirewall.AssociationResults.FirewallPolicy(val.FirewallArn === obj.FirewallArn)': response"
- },
- "aws-network-firewall-associate-subnets": {
- "context_extraction_string": "AWS-NetworkFirewall.AssociationResults.Subnets(val.FirewallArn === obj.FirewallArn)': response"
- },
- "aws-network-firewall-create-firewall": {
- "context_extraction_string": "AWS-NetworkFirewall.Firewall(val.Firewall.FirewallArn === obj.Firewall.FirewallArn)': response"
- },
- "aws-network-firewall-create-firewall-policy": {
- "context_extraction_string": "AWS-NetworkFirewall.FirewallPolicy(val.FirewallPolicyResponse.FirewallPolicyArn === obj.FirewallPolicyResponse.FirewallPolicyArn)': response"
- },
- "aws-network-firewall-create-rule-group": {
- "context_extraction_string": "AWS-NetworkFirewall.RuleGroup(val.RuleGroupResponse.RuleGroupArn === obj.RuleGroupResponse.RuleGroupArn)': response"
- },
- "aws-network-firewall-delete-firewall": {
- "context_extraction_string": "AWS-NetworkFirewall.Firewall(val.Firewall.FirewallArn === obj.Firewall.FirewallArn)': response"
- },
- "aws-network-firewall-delete-firewall-policy": {
- "context_extraction_string": "AWS-NetworkFirewall.FirewallPolicy(val.FirewallPolicyResponse.FirewallPolicyArn === obj.FirewallPolicyResponse.FirewallPolicyArn)': response"
- },
- "aws-network-firewall-delete-resource-policy": {
- "context_extraction_string": null
- },
- "aws-network-firewall-delete-rule-group": {
- "context_extraction_string": "AWS-NetworkFirewall.RuleGroup(val.RuleGroupResponse.RuleGroupArn === obj.RuleGroupResponse.RuleGroupArn)': response"
- },
- "aws-network-firewall-describe-firewall": {
- "context_extraction_string": "AWS-NetworkFirewall.Firewall(val.Firewall.FirewallArn === obj.Firewall.FirewallArn)': response"
- },
- "aws-network-firewall-describe-firewall-policy": {
- "context_extraction_string": "AWS-NetworkFirewall.FirewallPolicy(val.FirewallPolicyResponse.FirewallPolicyArn === obj.FirewallPolicyResponse.FirewallPolicyArn)': response"
- },
- "aws-network-firewall-describe-logging-configuration": {
- "context_extraction_string": "AWS-NetworkFirewall.Logging(val.FirewallArn === obj.FirewallArn)': response"
- },
- "aws-network-firewall-describe-resource-policy": {
- "context_extraction_string": null
- },
- "aws-network-firewall-describe-rule-group": {
- "context_extraction_string": "AWS-NetworkFirewall.RuleGroup(val.RuleGroupResponse.RuleGroupArn === obj.RuleGroupResponse.RuleGroupArn)': response"
- },
- "aws-network-firewall-disassociate-subnets": {
- "context_extraction_string": "AWS-NetworkFirewall.AssociationResults.Subnets(val.FirewallArn === obj.FirewallArn)': response"
- },
- "aws-network-firewall-list-firewall-policies": {
- "context_extraction_string": "AWS-NetworkFirewall.FirewallPolicies(val.Arn === obj.Arn)': response.get('FirewallPolicies')"
- },
- "aws-network-firewall-list-firewalls": {
- "context_extraction_string": "AWS-NetworkFirewall.Firewalls(val.FirewallArn === obj.FirewallArn)': response.get('Firewalls')"
- },
- "aws-network-firewall-list-rule-groups": {
- "context_extraction_string": "AWS-NetworkFirewall.RuleGroups(val.Arn === obj.Arn)': response.get('RuleGroups')"
- },
- "aws-network-firewall-list-tags-for-resource": {
- "context_extraction_string": null
- },
- "aws-network-firewall-put-resource-policy": {
- "context_extraction_string": null
- },
- "aws-network-firewall-tag-resource": {
- "context_extraction_string": null
- },
- "aws-network-firewall-untag-resource": {
- "context_extraction_string": null
- },
- "aws-network-firewall-update-firewall-delete-protection": {
- "context_extraction_string": "AWS-NetworkFirewall.FirewallAttributes(val.FirewallArn === obj.FirewallArn)': response"
- },
- "aws-network-firewall-update-firewall-description": {
- "context_extraction_string": "AWS-NetworkFirewall.FirewallAttributes(val.FirewallArn === obj.FirewallArn)': response"
- },
- "aws-network-firewall-update-firewall-policy": {
- "context_extraction_string": "AWS-NetworkFirewall.FirewallPolicy(val.FirewallPolicyResponse.FirewallPolicyArn === obj.FirewallPolicyResponse.FirewallPolicyArn)': response"
- },
- "aws-network-firewall-update-firewall-policy-change-protection": {
- "context_extraction_string": "AWS-NetworkFirewall.FirewallAttributes(val.FirewallArn === obj.FirewallArn)': response"
- },
- "aws-network-firewall-update-logging-configuration": {
- "context_extraction_string": "AWS-NetworkFirewall.Logging(val.FirewallArn === obj.FirewallArn)': response"
- },
- "aws-network-firewall-update-rule-group": {
- "context_extraction_string": "AWS-NetworkFirewall.RuleGroup(val.RuleGroupResponse.RuleGroupArn === obj.RuleGroupResponse.RuleGroupArn)': response"
- },
- "aws-network-firewall-update-subnet-change-protection": {
- "context_extraction_string": "AWS-NetworkFirewall.FirewallAttributes(val.FirewallArn === obj.FirewallArn)': response"
- }
-}
diff --git a/Packs/AWS-S3/Integrations/AWS-S3/AWS-S3.yml b/Packs/AWS-S3/Integrations/AWS-S3/AWS-S3.yml
index cfd2684dfeb2..7a44882a9cef 100644
--- a/Packs/AWS-S3/Integrations/AWS-S3/AWS-S3.yml
+++ b/Packs/AWS-S3/Integrations/AWS-S3/AWS-S3.yml
@@ -1,4 +1,7 @@
category: IT Services
+sectionOrder:
+- Connect
+- Collect
commonfields:
id: AWS - S3
version: -1
@@ -7,10 +10,12 @@ configuration:
name: roleArn
required: false
type: 0
+ section: Connect
- display: Role Session Name
name: roleSessionName
required: false
type: 0
+ section: Connect
- display: AWS Default Region
name: defaultRegion
options:
@@ -34,50 +39,60 @@ configuration:
- us-gov-west-1
required: false
type: 15
+ section: Connect
+ advanced: true
- display: Role Session Duration
name: sessionDuration
required: false
type: 0
+ section: Connect
+ advanced: true
- display: Access Key
name: credentials
required: false
type: 9
displaypassword: Secret Key
+ section: Connect
- display: Access Key
name: access_key
required: false
type: 0
hidden: true
+ section: Connect
- display: Secret Key
name: secret_key
required: false
type: 4
hidden: true
+ section: Connect
- display: Timeout
name: timeout
- additionalinfo: The time in seconds till a timeout exception is reached. You can
- specify just the read timeout (for example 60) or also the connect timeout followed
- after a comma (for example 60,10). If a connect timeout is not specified, a default
- of 10 second will be used.
+ additionalinfo: The time in seconds till a timeout exception is reached. You can specify just the read timeout (for example 60) or also the connect timeout followed after a comma (for example 60,10). If a connect timeout is not specified, a default of 10 second will be used.
defaultvalue: 60,10
required: false
type: 0
+ section: Connect
+ advanced: true
- display: Retries
name: retries
defaultvalue: 5
- additionalinfo: "The maximum number of retry attempts when connection or throttling errors
- are encountered. Set to 0 to disable retries. The default value is 5 and the limit is 10.
- Note: Increasing the number of retries will increase the execution time."
+ additionalinfo: "The maximum number of retry attempts when connection or throttling errors are encountered. Set to 0 to disable retries. The default value is 5 and the limit is 10. Note: Increasing the number of retries will increase the execution time."
required: false
type: 0
+ section: Connect
+ advanced: true
- display: Trust any certificate (not secure)
name: insecure
required: false
type: 8
+ section: Connect
+ advanced: true
- display: Use system proxy settings
name: proxy
required: false
type: 8
+ section: Connect
+ advanced: true
description: Amazon Web Services Simple Storage Service (S3)
display: AWS - S3
name: AWS - S3
@@ -103,15 +118,13 @@ script:
required: false
secret: false
- default: false
- description: Specifies the region where the bucket will be created. If you don't
- specify a region, the bucket will be created in US Standard.
+ description: Specifies the region where the bucket will be created. If you don't specify a region, the bucket will be created in US Standard.
isArray: false
name: locationConstraint
required: false
secret: false
- default: false
- description: Allows grantee the read, write, read ACP, and write ACP permissions
- on the bucket.
+ description: Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.
isArray: false
name: grantFullControl
required: false
@@ -129,8 +142,7 @@ script:
required: false
secret: false
- default: false
- description: Allows grantee to create, overwrite, and delete any object in the
- bucket.
+ description: Allows grantee to create, overwrite, and delete any object in the bucket.
isArray: false
name: grantWrite
required: false
@@ -160,9 +172,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -204,9 +214,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -235,9 +243,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -279,9 +285,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -344,9 +348,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -387,9 +389,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -436,9 +436,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -472,8 +470,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Set this parameter to true to confirm that you want to remove your
- permissions to change this bucket policy in the future.
+ description: Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future.
isArray: false
name: confirmRemoveSelfBucketAccess
predefined:
@@ -500,16 +497,13 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
secret: false
deprecated: false
- description: Replaces a policy on a bucket. If the bucket already has a policy,
- the one in this request completely replaces it.
+ description: Replaces a policy on a bucket. If the bucket already has a policy, the one in this request completely replaces it.
execution: false
name: aws-s3-put-bucket-policy
- arguments:
@@ -576,9 +570,7 @@ script:
required: false
secret: false
- default: false
- description: The duration, in seconds, of the role session. The value can range
- from 900 seconds (15 minutes) up to the maximum session duration setting for
- the role.
+ description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
isArray: false
name: roleSessionDuration
required: false
@@ -589,8 +581,7 @@ script:
name: aws-s3-upload-file
- arguments:
- default: false
- description: The name of the Amazon S3 bucket whose PublicAccessBlock configuration
- you want to retrieve.
+ description: The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want to retrieve.
isArray: false
name: bucket
required: true
@@ -601,25 +592,20 @@ script:
name: aws-s3-get-public-access-block
outputs:
- contextPath: AWS.S3.Buckets.BucketName.PublicAccessBlockConfiguration.BlockPublicAcls
- description: Specifies whether Amazon S3 should block public access control
- lists (ACLs) for this bucket and objects in this bucket.
+ description: Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket.
type: Boolean
- contextPath: AWS.S3.Buckets.BucketName.PublicAccessBlockConfiguration.IgnorePublicAcls
- description: Specifies whether Amazon S3 should ignore public ACLs for this
- bucket and objects in this bucket.
+ description: Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket.
type: Boolean
- contextPath: AWS.S3.Buckets.BucketName.PublicAccessBlockConfiguration.BlockPublicPolicy
- description: Specifies whether Amazon S3 should block public bucket policies
- for this bucket.
+ description: Specifies whether Amazon S3 should block public bucket policies for this bucket.
type: Boolean
- contextPath: AWS.S3.Buckets.BucketName.PublicAccessBlockConfiguration.RestrictPublicBuckets
- description: Specifies whether Amazon S3 should restrict public bucket policies
- for this bucket.
+ description: Specifies whether Amazon S3 should restrict public bucket policies for this bucket.
type: Boolean
- arguments:
- default: false
- description: The name of the Amazon S3 bucket whose PublicAccessBlock configuration
- you want to retrieve.
+ description: The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want to retrieve.
isArray: false
name: bucket
required: true
@@ -652,7 +638,7 @@ script:
description: Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket.
execution: false
name: aws-s3-put-public-access-block
- dockerimage: demisto/boto3py3:1.0.0.41271
+ dockerimage: demisto/boto3py3:1.0.0.45868
isfetch: false
runonce: false
script: ''
diff --git a/Packs/AWS-S3/ReleaseNotes/1_2_11.md b/Packs/AWS-S3/ReleaseNotes/1_2_11.md
new file mode 100644
index 000000000000..069ec9fff4cb
--- /dev/null
+++ b/Packs/AWS-S3/ReleaseNotes/1_2_11.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - S3
+- Fixed an issue where the **region** argument was not being taken into account in some commands.
diff --git a/Packs/AWS-S3/ReleaseNotes/1_2_12.md b/Packs/AWS-S3/ReleaseNotes/1_2_12.md
new file mode 100644
index 000000000000..912fbf49e695
--- /dev/null
+++ b/Packs/AWS-S3/ReleaseNotes/1_2_12.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - S3
+- Fixed a typo in **AWSApiModule**.
diff --git a/Packs/AWS-S3/ReleaseNotes/1_2_13.md b/Packs/AWS-S3/ReleaseNotes/1_2_13.md
new file mode 100644
index 000000000000..b61e46d08b71
--- /dev/null
+++ b/Packs/AWS-S3/ReleaseNotes/1_2_13.md
@@ -0,0 +1,5 @@
+
+#### Integrations
+##### AWS - S3
+- Added support for sections infrastructure.
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.45868*.
diff --git a/Packs/AWS-S3/TestPlaybooks/playbook-AWS_-_S3_Test_Playbook.yml b/Packs/AWS-S3/TestPlaybooks/playbook-AWS_-_S3_Test_Playbook.yml
index 15649177bf8a..d549e329d442 100644
--- a/Packs/AWS-S3/TestPlaybooks/playbook-AWS_-_S3_Test_Playbook.yml
+++ b/Packs/AWS-S3/TestPlaybooks/playbook-AWS_-_S3_Test_Playbook.yml
@@ -85,7 +85,7 @@ tasks:
- "6"
scriptarguments:
bucket:
- simple: demisto-test12344
+ simple: demisto-test1234
entryID:
simple: ${File.EntryID}
key:
@@ -124,7 +124,7 @@ tasks:
- "5"
scriptarguments:
bucket:
- simple: demisto-test12344
+ simple: demisto-test1234
key:
simple: ${File.Name}
separatecontext: false
@@ -161,7 +161,7 @@ tasks:
- "9"
scriptarguments:
bucket:
- simple: demisto-test12344
+ simple: demisto-test1234
separatecontext: false
continueonerrortype: ""
view: |-
@@ -196,7 +196,7 @@ tasks:
- "4"
scriptarguments:
bucket:
- simple: demisto-test12344
+ simple: demisto-test1234
separatecontext: false
continueonerrortype: ""
view: |-
@@ -304,7 +304,7 @@ tasks:
brand: AWS - S3
scriptarguments:
bucket:
- simple: demisto-test12344
+ simple: demisto-test1234
separatecontext: false
continueonerrortype: ""
view: |-
diff --git a/Packs/AWS-S3/pack_metadata.json b/Packs/AWS-S3/pack_metadata.json
index be800c14f096..88fa4c1bc481 100644
--- a/Packs/AWS-S3/pack_metadata.json
+++ b/Packs/AWS-S3/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AWS - S3",
"description": "Amazon Web Services Simple Storage Service (S3)",
"support": "xsoar",
- "currentVersion": "1.2.10",
+ "currentVersion": "1.2.13",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
@@ -15,6 +15,7 @@
"keywords": [],
"marketplaces": [
"xsoar",
- "marketplacev2"
+ "marketplacev2",
+ "xpanse"
]
}
\ No newline at end of file
diff --git a/Packs/AWS-SNS/ReleaseNotes/1_0_3.md b/Packs/AWS-SNS/ReleaseNotes/1_0_3.md
new file mode 100644
index 000000000000..d255423ddc58
--- /dev/null
+++ b/Packs/AWS-SNS/ReleaseNotes/1_0_3.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - SNS
+- Fixed an issue where the **region** argument was not being taken into account in some commands.
diff --git a/Packs/AWS-SNS/ReleaseNotes/1_0_4.md b/Packs/AWS-SNS/ReleaseNotes/1_0_4.md
new file mode 100644
index 000000000000..f9bdf06d7ed3
--- /dev/null
+++ b/Packs/AWS-SNS/ReleaseNotes/1_0_4.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - SNS
+- Fixed a typo in **AWSApiModule**.
diff --git a/Packs/AWS-SNS/pack_metadata.json b/Packs/AWS-SNS/pack_metadata.json
index b40c92aa36c9..1842bb3d407a 100644
--- a/Packs/AWS-SNS/pack_metadata.json
+++ b/Packs/AWS-SNS/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AWS - SNS",
"description": "This is the integration content pack which can create or delete topic/subscription on AWS Simple Notification System and send the message via SNS as well.",
"support": "community",
- "currentVersion": "1.0.2",
+ "currentVersion": "1.0.4",
"author": "Jie Liau",
"url": "",
"email": "",
diff --git a/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.py b/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.py
index 256514ef325f..c4087bae5256 100644
--- a/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.py
+++ b/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.py
@@ -44,7 +44,7 @@ def list_queues(args, client):
if args.get('queueNamePrefix') is not None:
kwargs.update({'QueueNamePrefix': args.get('queueNamePrefix')})
response = client.list_queues(**kwargs)
- for queue in response['QueueUrls']:
+ for queue in response.get('QueueUrls', []):
data.append({'QueueUrl': queue})
ec = {'AWS.SQS.Queues': data}
diff --git a/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.yml b/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.yml
index 383f9033a114..bafbc79bbc44 100644
--- a/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.yml
+++ b/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.yml
@@ -266,7 +266,7 @@ script:
- name: roleSessionDuration
description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role.
description: Deletes the messages in a queue specified by the QueueURL parameter.
- dockerimage: demisto/boto3py3:1.0.0.41271
+ dockerimage: demisto/boto3py3:1.0.0.48955
isfetch: true
runonce: false
tests:
diff --git a/Packs/AWS-SQS/ReleaseNotes/1_2_18.md b/Packs/AWS-SQS/ReleaseNotes/1_2_18.md
new file mode 100644
index 000000000000..9d8495ef73bf
--- /dev/null
+++ b/Packs/AWS-SQS/ReleaseNotes/1_2_18.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - SQS
+- Fixed an issue where the **region** argument was not being taken into account in some commands.
diff --git a/Packs/AWS-SQS/ReleaseNotes/1_2_19.md b/Packs/AWS-SQS/ReleaseNotes/1_2_19.md
new file mode 100644
index 000000000000..165d55722488
--- /dev/null
+++ b/Packs/AWS-SQS/ReleaseNotes/1_2_19.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - SQS
+- Fixed a typo in **AWSApiModule**.
diff --git a/Packs/AWS-SQS/ReleaseNotes/1_2_20.md b/Packs/AWS-SQS/ReleaseNotes/1_2_20.md
new file mode 100644
index 000000000000..10ac3422f154
--- /dev/null
+++ b/Packs/AWS-SQS/ReleaseNotes/1_2_20.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AWS - SQS
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.45936*.
diff --git a/Packs/AWS-SQS/ReleaseNotes/1_2_21.md b/Packs/AWS-SQS/ReleaseNotes/1_2_21.md
new file mode 100644
index 000000000000..9553cfef60c4
--- /dev/null
+++ b/Packs/AWS-SQS/ReleaseNotes/1_2_21.md
@@ -0,0 +1,7 @@
+
+#### Integrations
+
+##### AWS - SQS
+
+- Fixed an issue where **aws-sqs-list-queues** returned a vague exception when no SQS queue was available.
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.48955*.
\ No newline at end of file
diff --git a/Packs/AWS-SQS/pack_metadata.json b/Packs/AWS-SQS/pack_metadata.json
index 92845f6992ae..523d2e0ff2e5 100644
--- a/Packs/AWS-SQS/pack_metadata.json
+++ b/Packs/AWS-SQS/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AWS - SQS",
"description": "Amazon Web Services Simple Queuing Service (SQS)",
"support": "xsoar",
- "currentVersion": "1.2.17",
+ "currentVersion": "1.2.21",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
@@ -10,11 +10,11 @@
"categories": [
"Cloud Services"
],
- "tags": [
- "marketplacev2:Data Source"
- ],
+ "tags": [],
"useCases": [],
- "keywords": [],
+ "keywords": [
+ "Amazon"
+ ],
"marketplaces": [
"xsoar",
"marketplacev2"
diff --git a/Packs/AWS-SecurityHub/ReleaseNotes/1_1_28.md b/Packs/AWS-SecurityHub/ReleaseNotes/1_1_28.md
new file mode 100644
index 000000000000..e0cc64b6f9ab
--- /dev/null
+++ b/Packs/AWS-SecurityHub/ReleaseNotes/1_1_28.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - Security Hub
+- Fixed an issue where the **region** argument was not being taken into account in some commands.
diff --git a/Packs/AWS-SecurityHub/ReleaseNotes/1_1_29.md b/Packs/AWS-SecurityHub/ReleaseNotes/1_1_29.md
new file mode 100644
index 000000000000..cbe25ac0bd9d
--- /dev/null
+++ b/Packs/AWS-SecurityHub/ReleaseNotes/1_1_29.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AWS - Security Hub
+- Fixed a typo in **AWSApiModule**.
diff --git a/Packs/AWS-SecurityHub/pack_metadata.json b/Packs/AWS-SecurityHub/pack_metadata.json
index 01368e9688c8..8eecb5499a0e 100644
--- a/Packs/AWS-SecurityHub/pack_metadata.json
+++ b/Packs/AWS-SecurityHub/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AWS - Security Hub",
"description": "Amazon Web Services Security Hub Service .",
"support": "xsoar",
- "currentVersion": "1.1.27",
+ "currentVersion": "1.1.29",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
@@ -10,11 +10,11 @@
"categories": [
"Cloud Services"
],
- "tags": [
- "marketplacev2:Data Source"
- ],
+ "tags": [],
"useCases": [],
- "keywords": [],
+ "keywords": [
+ "Amazon"
+ ],
"marketplaces": [
"xsoar",
"marketplacev2"
diff --git a/Packs/AWS_DynamoDB/Integrations/AWS_DynamoDB/AWS_DynamoDB.yml b/Packs/AWS_DynamoDB/Integrations/AWS_DynamoDB/AWS_DynamoDB.yml
index 98b352fb8f0f..32aa79a8b953 100644
--- a/Packs/AWS_DynamoDB/Integrations/AWS_DynamoDB/AWS_DynamoDB.yml
+++ b/Packs/AWS_DynamoDB/Integrations/AWS_DynamoDB/AWS_DynamoDB.yml
@@ -6236,7 +6236,7 @@ script:
for items in the table.
- contextPath: AWS-DynamoDB.TimeToLiveSpecification
description: Represents the output of an UpdateTimeToLive operation.
- dockerimage: demisto/boto3py3:1.0.0.41082
+ dockerimage: demisto/boto3py3:1.0.0.41926
runonce: false
script: ''
subtype: python3
diff --git a/Packs/AWS_DynamoDB/ReleaseNotes/1_0_31.md b/Packs/AWS_DynamoDB/ReleaseNotes/1_0_31.md
new file mode 100644
index 000000000000..d40204950569
--- /dev/null
+++ b/Packs/AWS_DynamoDB/ReleaseNotes/1_0_31.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Amazon DynamoDB
+- Updated the Docker image to: *demisto/boto3py3:1.0.0.41926*.
diff --git a/Packs/AWS_DynamoDB/pack_metadata.json b/Packs/AWS_DynamoDB/pack_metadata.json
index c45c987d1d47..3eeacd1d9d23 100644
--- a/Packs/AWS_DynamoDB/pack_metadata.json
+++ b/Packs/AWS_DynamoDB/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Amazon DynamoDB",
"description": "Amazon DynamoDB Amazon DynamoDB is a fully managed NoSQL database service that provides fast and predictable performance with seamless scalability. DynamoDB lets you offload the administrative burdens of operating and scaling a distributed database, so that you don't have to worry about hardware provisioning, setup and configuration, replication, software patching, or cluster scaling. With DynamoDB, you can create database tables that can store and retrieve any amount of data, and serve any level of request traffic. You can scale up or scale down your tables' throughput capacity without downtime or performance degradation, and use the AWS Management Console to monitor resource utilization and performance metrics. DynamoDB automatically spreads the data and traffic for your tables over a sufficient number of servers to handle your throughput and storage requirements, while maintaining consistent and fast performance. All of your data is stored on solid state disks (SSDs) and automatically replicated across multiple Availability Zones in an AWS region, providing built-in high availability and data durability. ",
"support": "xsoar",
- "currentVersion": "1.0.30",
+ "currentVersion": "1.0.31",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AbnormalSecurity/Integrations/AbnormalSecurity/AbnormalSecurity.py b/Packs/AbnormalSecurity/Integrations/AbnormalSecurity/AbnormalSecurity.py
index 1671e058f924..124713bf4da8 100644
--- a/Packs/AbnormalSecurity/Integrations/AbnormalSecurity/AbnormalSecurity.py
+++ b/Packs/AbnormalSecurity/Integrations/AbnormalSecurity/AbnormalSecurity.py
@@ -1,9 +1,9 @@
import demistomock as demisto
from CommonServerPython import *
-import requests
+import urllib3
-requests.packages.urllib3.disable_warnings()
+urllib3.disable_warnings()
class Client(BaseClient):
diff --git a/Packs/AbnormalSecurity/Integrations/AbnormalSecurity/AbnormalSecurity.yml b/Packs/AbnormalSecurity/Integrations/AbnormalSecurity/AbnormalSecurity.yml
index 50ee7c9f8957..525c10bea3d7 100644
--- a/Packs/AbnormalSecurity/Integrations/AbnormalSecurity/AbnormalSecurity.yml
+++ b/Packs/AbnormalSecurity/Integrations/AbnormalSecurity/AbnormalSecurity.yml
@@ -576,7 +576,7 @@ script:
required: false
description: Get the latest threat intel feed.
name: abnormal-security-get-latest-threat-intel-feed
- dockerimage: demisto/python3:3.10.5.31928
+ dockerimage: demisto/python3:3.10.10.48392
isfetch: false
script: ''
subtype: python3
diff --git a/Packs/AbnormalSecurity/Integrations/AbnormalSecurityEventCollector/AbnormalSecurityEventCollector.yml b/Packs/AbnormalSecurity/Integrations/AbnormalSecurityEventCollector/AbnormalSecurityEventCollector.yml
index 0b3588b142a9..ade9e515aab8 100644
--- a/Packs/AbnormalSecurity/Integrations/AbnormalSecurityEventCollector/AbnormalSecurityEventCollector.yml
+++ b/Packs/AbnormalSecurity/Integrations/AbnormalSecurityEventCollector/AbnormalSecurityEventCollector.yml
@@ -41,7 +41,7 @@ script:
- 'True'
- 'False'
required: true
- dockerimage: demisto/python3:3.10.7.33922
+ dockerimage: demisto/python3:3.10.10.48392
feed: false
isfetch: false
longRunning: false
diff --git a/Packs/AbnormalSecurity/ModelingRules/AbnormalSecurityEventCollector_1_3/AbnormalSecurityEventCollector_1_3.xif b/Packs/AbnormalSecurity/ModelingRules/AbnormalSecurityEventCollector_1_3/AbnormalSecurityEventCollector_1_3.xif
index e2a056630ebf..4bab3a23dda0 100644
--- a/Packs/AbnormalSecurity/ModelingRules/AbnormalSecurityEventCollector_1_3/AbnormalSecurityEventCollector_1_3.xif
+++ b/Packs/AbnormalSecurity/ModelingRules/AbnormalSecurityEventCollector_1_3/AbnormalSecurityEventCollector_1_3.xif
@@ -7,7 +7,7 @@ alter
senderIpAddress = coalesce(arraycreate(senderIpAddress))
| alter
xdm.event.id = threatId,
- xdm.event.outcome = remediationStatus,
+ xdm.event.outcome = if(remediationStatus = "Auto-Remediated", XDM_CONST.OUTCOME_SUCCESS, remediationStatus = "Post Remediated", XDM_CONST.OUTCOME_SUCCESS, remediationStatus = "Remediated", XDM_CONST.OUTCOME_SUCCESS, remediationStatus = "No Action Done", XDM_CONST.OUTCOME_FAILED, remediationStatus = "Would Remediate", XDM_CONST.OUTCOME_PARTIAL, remediationStatus = "Remediation Attempted", XDM_CONST.OUTCOME_PARTIAL, remediationStatus = null, null, to_string(remediationStatus)),
xdm.email.recipients = toAddresses,
xdm.email.attachment.filename = attachmentNames,
xdm.email.subject = subject,
@@ -18,6 +18,5 @@ alter
xdm.alert.category = attackType,
xdm.alert.name = attackStrategy,
xdm.alert.description = summaryInsights,
- xdm.observer.product = abxPortalUrl,
xdm.observer.unique_identifier = to_string(abxMessageId),
xdm.target.host.ipv4_addresses = senderIpAddress;
\ No newline at end of file
diff --git a/Packs/AbnormalSecurity/ReleaseNotes/2_0_3.md b/Packs/AbnormalSecurity/ReleaseNotes/2_0_3.md
new file mode 100644
index 000000000000..3dd931793022
--- /dev/null
+++ b/Packs/AbnormalSecurity/ReleaseNotes/2_0_3.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Abnormal Security
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AbnormalSecurity/ReleaseNotes/2_0_4.md b/Packs/AbnormalSecurity/ReleaseNotes/2_0_4.md
new file mode 100644
index 000000000000..8c44b1bcb5ff
--- /dev/null
+++ b/Packs/AbnormalSecurity/ReleaseNotes/2_0_4.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Abnormal Security Event Collector
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AbnormalSecurity/ReleaseNotes/2_0_5.md b/Packs/AbnormalSecurity/ReleaseNotes/2_0_5.md
new file mode 100644
index 000000000000..d11711c3e08f
--- /dev/null
+++ b/Packs/AbnormalSecurity/ReleaseNotes/2_0_5.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Abnormal Security
+- Updated the Docker image to: *demisto/python3:3.10.9.45313*.
diff --git a/Packs/AbnormalSecurity/ReleaseNotes/2_0_6.md b/Packs/AbnormalSecurity/ReleaseNotes/2_0_6.md
new file mode 100644
index 000000000000..c8267c31df51
--- /dev/null
+++ b/Packs/AbnormalSecurity/ReleaseNotes/2_0_6.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Abnormal Security
+- Updated the Docker image to: *demisto/python3:3.10.10.47713*.
diff --git a/Packs/AbnormalSecurity/ReleaseNotes/2_0_7.md b/Packs/AbnormalSecurity/ReleaseNotes/2_0_7.md
new file mode 100644
index 000000000000..e1f5a19da019
--- /dev/null
+++ b/Packs/AbnormalSecurity/ReleaseNotes/2_0_7.md
@@ -0,0 +1,4 @@
+
+#### Modeling Rules
+##### Abnormal Security Event Collector
+- Updated Modeling Rules
diff --git a/Packs/AbnormalSecurity/ReleaseNotes/2_0_8.md b/Packs/AbnormalSecurity/ReleaseNotes/2_0_8.md
new file mode 100644
index 000000000000..5901ceb6c00b
--- /dev/null
+++ b/Packs/AbnormalSecurity/ReleaseNotes/2_0_8.md
@@ -0,0 +1,5 @@
+#### Integrations
+##### Abnormal Security
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
+##### Abnormal Security Event Collector
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AbnormalSecurity/pack_metadata.json b/Packs/AbnormalSecurity/pack_metadata.json
index 1f1f7fb3d740..4158939d5853 100644
--- a/Packs/AbnormalSecurity/pack_metadata.json
+++ b/Packs/AbnormalSecurity/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Abnormal Security",
"description": "Abnormal Security detects and protects against the whole spectrum of email attacks",
"support": "partner",
- "currentVersion": "2.0.2",
+ "currentVersion": "2.0.8",
"author": "Abnormal Security",
"url": "",
"email": "support@abnormalsecurity.com",
diff --git a/Packs/Absolute/Integrations/Absolute/Absolute.py b/Packs/Absolute/Integrations/Absolute/Absolute.py
index 16fc6a38a227..0dba39d7fa1d 100644
--- a/Packs/Absolute/Integrations/Absolute/Absolute.py
+++ b/Packs/Absolute/Integrations/Absolute/Absolute.py
@@ -91,14 +91,14 @@
]
DEVICE_GET_LOCATION_COMMAND_RETURN_FIELDS = [
- "geoData.location.coordinates",
- "geoData.geoAddress.city",
- "geoData.geoAddress.state",
- "geoData.geoAddress.countryCode",
- "geoData.geoAddress.country",
- "geoData.locationTechnology",
- "geoData.accuracy",
- "geoData.lastUpdate",
+ "geoData.location.point.coordinates",
+ "geoData.location.geoAddress.city",
+ "geoData.location.geoAddress.state",
+ "geoData.location.geoAddress.countryCode",
+ "geoData.location.geoAddress.country",
+ "geoData.location.locationTechnology",
+ "geoData.location.accuracy",
+ "geoData.location.lastUpdateDateTimeUtc",
]
@@ -685,14 +685,23 @@ def create_filter_query_from_args_helper(args, arg_name, source_name, query):
return query
-def create_filter_query_from_args(args: dict, change_device_name_to_system=False):
+def create_filter_query_from_args(args: dict, change_device_name_to_system=False, change_device_id=False):
+ """
+
+ Args:
+ args: args given from the user.
+ change_device_name_to_system: True if to filter by "systemName" parameter and False to filter by "deviceName".
+ change_device_id: True if to filter by "id" parameter and False to filter by "deviceUid".
+
+ Returns: filter query to send to the API.
+
+ """
custom_filter = args.get('filter')
if custom_filter:
return f"$filter={custom_filter}"
query = ""
query = create_filter_query_from_args_helper(args, 'account_uids', "accountUid", query)
- query = create_filter_query_from_args_helper(args, 'device_ids', "deviceUid", query)
query = create_filter_query_from_args_helper(args, 'app_names', "appName", query)
query = create_filter_query_from_args_helper(args, 'app_publishers', "appPublisher", query)
query = create_filter_query_from_args_helper(args, 'user_names', "userName", query)
@@ -707,6 +716,12 @@ def create_filter_query_from_args(args: dict, change_device_name_to_system=False
else:
query = add_list_to_filter_string("deviceName", device_names, query)
+ device_ids = remove_duplicates_from_list_arg(args, 'device_ids')
+ if device_ids and change_device_id:
+ query = add_list_to_filter_string("id", device_ids, query)
+ else:
+ query = add_list_to_filter_string("deviceUid", device_ids, query)
+
if args.get('agent_status'):
agent_status = ABSOLUTE_AGET_STATUS[args.get('agent_status')] # type: ignore
query = add_value_to_filter_string("agentStatus", agent_status, query)
@@ -775,7 +790,7 @@ def parse_geo_location_outputs(response):
parsed_device['Coordinates'] = geo_data.get('point', {}).get('coordinates')
parsed_device['LocationTechnology'] = geo_data.get('locationTechnology')
parsed_device['Accuracy'] = geo_data.get('accuracy')
- parsed_device['LastUpdate'] = geo_data.get('lastUpdate')
+ parsed_device['LastUpdate'] = geo_data.get('lastUpdateDateTimeUtc')
parsed_device['City'] = geo_data.get('geoAddress', {}).get('city')
parsed_device['State'] = geo_data.get('geoAddress', {}).get('state')
parsed_device['CountryCode'] = geo_data.get('geoAddress', {}).get('countryCode')
@@ -838,7 +853,7 @@ def get_device_command(args, client) -> CommandResults:
raise_demisto_exception(
"at least one of the commands args (device_ids, device_names, local_ips, public_ips must be provided.")
- query_string = create_filter_query_from_args(args, change_device_name_to_system=True)
+ query_string = create_filter_query_from_args(args, change_device_name_to_system=True, change_device_id=True)
custom_fields_to_return = remove_duplicates_from_list_arg(args, 'fields')
if custom_fields_to_return:
custom_fields_to_return.extend(DEVICE_GET_COMMAND_RETURN_FIELDS)
@@ -860,7 +875,7 @@ def get_device_command(args, client) -> CommandResults:
def get_device_location_command(args, client) -> CommandResults:
- query_string = create_filter_query_from_args(args)
+ query_string = create_filter_query_from_args(args, change_device_id=True)
query_string = parse_return_fields(",".join(DEVICE_GET_LOCATION_COMMAND_RETURN_FIELDS), query_string)
res = client.api_request_absolute('GET', '/v2/reporting/devices', query_string=query_string)
diff --git a/Packs/Absolute/Integrations/Absolute/Absolute.yml b/Packs/Absolute/Integrations/Absolute/Absolute.yml
index 041375e6c654..c5c3803491f8 100644
--- a/Packs/Absolute/Integrations/Absolute/Absolute.yml
+++ b/Packs/Absolute/Integrations/Absolute/Absolute.yml
@@ -115,10 +115,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: 'The type of Freeze. You cannot freeze a device that has been reported stolen in the Absolute console.
- - OnDemand: Freezes a device on its next connection to the Absolute Monitoring Center, which is typically within 15 minutes. This applies for all supported operating systems.
- - Scheduled: Freezes a device on its next connection to the Absolute Monitoring Center on or after a specified date and time. This applies to Windows and Mac devices. The scheduled freeze date is specified in the scheduled_freeze_date argument. Scheduled Freeze requests are only supported on Windows and Mac devices with an active Absolute agent that is regularly connecting to the Absolute Monitoring Center.
- - Offline: Freezes a device if it has been offline for a specified period of time. Applies to Windows and Mac devices. Offline period is specified in the offline_time_seconds argument. Offline freeze is not available if your Absolute account has been migrated to Offline Freeze Rules. For more information, see the console Help.'
+ description: 'The type of Freeze. You cannot freeze a device that has been reported stolen in the Absolute console. - OnDemand: Freezes a device on its next connection to the Absolute Monitoring Center, which is typically within 15 minutes. This applies for all supported operating systems. - Scheduled: Freezes a device on its next connection to the Absolute Monitoring Center on or after a specified date and time. This applies to Windows and Mac devices. The scheduled freeze date is specified in the scheduled_freeze_date argument. Scheduled Freeze requests are only supported on Windows and Mac devices with an active Absolute agent that is regularly connecting to the Absolute Monitoring Center. - Offline: Freezes a device if it has been offline for a specified period of time. Applies to Windows and Mac devices. Offline period is specified in the offline_time_seconds argument. Offline freeze is not available if your Absolute account has been migrated to Offline Freeze Rules. For more information, see the console Help.'
isArray: false
name: device_freeze_type
predefined:
@@ -142,10 +139,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: 'The type of passcode to unfreeze a device.
- - UserDefined: Manually set the passcode in passcode. You must specify the passcode argument.
- - RandomForEach: A unique passcode is randomly generated for each device. You must specify the passcode_length argument.
- - RandomForAll: A passcode is randomly generated and is the same for all devices. You must specify the passcode_length argument.'
+ description: 'The type of passcode to unfreeze a device. - UserDefined: Manually set the passcode in passcode. You must specify the passcode argument. - RandomForEach: A unique passcode is randomly generated for each device. You must specify the passcode_length argument. - RandomForAll: A passcode is randomly generated and is the same for all devices. You must specify the passcode_length argument.'
isArray: false
name: passcode_type
predefined:
@@ -563,7 +557,7 @@ script:
secret: false
- default: false
defaultValue: '50'
- description: Maximum number of results to return.
+ description: Maximum number of results to return.
isArray: false
name: limit
required: false
@@ -681,7 +675,7 @@ script:
secret: false
- default: false
defaultValue: '50'
- description: Maximum number of results to return.
+ description: Maximum number of results to return.
isArray: false
name: limit
required: false
@@ -922,7 +916,7 @@ script:
script: '-'
type: python
subtype: python3
- dockerimage: demisto/python3:3.10.8.39276
+ dockerimage: demisto/python3:3.10.10.48392
fromversion: 6.0.0
tests:
- Absolute_TestPlaybook
diff --git a/Packs/Absolute/Integrations/Absolute/test_data/device_location_get.json b/Packs/Absolute/Integrations/Absolute/test_data/device_location_get.json
index 10fc7ceb21ce..0c5a37fc98dd 100644
--- a/Packs/Absolute/Integrations/Absolute/test_data/device_location_get.json
+++ b/Packs/Absolute/Integrations/Absolute/test_data/device_location_get.json
@@ -20,7 +20,7 @@
},
"locationTechnology": "gps",
"accuracy": 10,
- "lastUpdate": 1605747972853
+ "lastUpdateDateTimeUtc": 1605747972853
}
}
},
@@ -45,7 +45,7 @@
},
"locationTechnology": "gps",
"accuracy": 15,
- "lastUpdate": 1605747972853
+ "lastUpdateDateTimeUtc": 1605747972853
}
}
}
diff --git a/Packs/Absolute/ReleaseNotes/1_0_12.md b/Packs/Absolute/ReleaseNotes/1_0_12.md
new file mode 100644
index 000000000000..c7a4c7ae71ec
--- /dev/null
+++ b/Packs/Absolute/ReleaseNotes/1_0_12.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Absolute
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/Absolute/ReleaseNotes/1_0_13.md b/Packs/Absolute/ReleaseNotes/1_0_13.md
new file mode 100644
index 000000000000..08c0aa1fc7aa
--- /dev/null
+++ b/Packs/Absolute/ReleaseNotes/1_0_13.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Absolute
+- Updated the Docker image to: *demisto/python3:3.10.9.42476*.
diff --git a/Packs/Absolute/ReleaseNotes/1_0_14.md b/Packs/Absolute/ReleaseNotes/1_0_14.md
new file mode 100644
index 000000000000..171dab7b0a5b
--- /dev/null
+++ b/Packs/Absolute/ReleaseNotes/1_0_14.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Absolute
+- Updated the Docker image to: *demisto/python3:3.10.9.46032*.
diff --git a/Packs/Absolute/ReleaseNotes/1_0_15.md b/Packs/Absolute/ReleaseNotes/1_0_15.md
new file mode 100644
index 000000000000..8d543c8ba898
--- /dev/null
+++ b/Packs/Absolute/ReleaseNotes/1_0_15.md
@@ -0,0 +1,4 @@
+#### Integrations
+##### Absolute
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
+- Fixed an issue where **absolute-device-location-get** did not select the correct geoData information from the API.
diff --git a/Packs/Absolute/pack_metadata.json b/Packs/Absolute/pack_metadata.json
index fbe788057caa..08c4ec2eec4a 100644
--- a/Packs/Absolute/pack_metadata.json
+++ b/Packs/Absolute/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Absolute",
"description": "Absolute is an adaptive endpoint security solution that delivers device security, data security and asset management of endpoints",
"support": "xsoar",
- "currentVersion": "1.0.11",
+ "currentVersion": "1.0.15",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AbuseDB/.pack-ignore b/Packs/AbuseDB/.pack-ignore
index 963a95ee62c2..fa9d27f7cc90 100644
--- a/Packs/AbuseDB/.pack-ignore
+++ b/Packs/AbuseDB/.pack-ignore
@@ -1,5 +1,2 @@
-[file:AbuseDB.yml]
-ignore=IN145
-
[known_words]
AbuseIPDB
diff --git a/Packs/AbuseDB/Integrations/AbuseDB/AbuseDB.py b/Packs/AbuseDB/Integrations/AbuseDB/AbuseDB.py
index 148e6cd8f345..8f92ab70c70b 100644
--- a/Packs/AbuseDB/Integrations/AbuseDB/AbuseDB.py
+++ b/Packs/AbuseDB/Integrations/AbuseDB/AbuseDB.py
@@ -4,18 +4,18 @@
''' IMPORTS '''
import csv
import os
-
+import urllib3
import requests
# disable insecure warnings
-requests.packages.urllib3.disable_warnings()
+urllib3.disable_warnings()
''' GLOBALS '''
VERBOSE = True
SERVER = demisto.params().get('server')
if not SERVER.endswith('/'):
SERVER += '/'
-API_KEY = demisto.params().get('apikey')
+API_KEY = demisto.params().get('credentials', {}).get('password') or demisto.params().get('apikey')
MAX_AGE = demisto.params().get('days')
THRESHOLD = demisto.params().get('threshold')
INSECURE = demisto.params().get('insecure')
diff --git a/Packs/AbuseDB/Integrations/AbuseDB/AbuseDB.yml b/Packs/AbuseDB/Integrations/AbuseDB/AbuseDB.yml
index 2401bb80f4d4..c26ab53b8c1b 100644
--- a/Packs/AbuseDB/Integrations/AbuseDB/AbuseDB.yml
+++ b/Packs/AbuseDB/Integrations/AbuseDB/AbuseDB.yml
@@ -1,4 +1,7 @@
category: Data Enrichment & Threat Intelligence
+sectionOrder:
+- Connect
+- Collect
commonfields:
id: AbuseIPDB
version: -1
@@ -8,14 +11,25 @@ configuration:
name: server
required: true
type: 0
+ section: Connect
+- name: credentials
+ required: false
+ type: 9
+ displaypassword: API Key (v2)
+ hiddenusername: true
+ section: Connect
- display: API Key (v2)
name: apikey
- required: true
+ required: false
type: 4
-- additionalinfo: Reliability of the source providing the intelligence data.
- defaultvalue: C - Fairly reliable
+ hidden: true
+ section: Connect
+- defaultvalue: 'C - Fairly reliable'
display: Source Reliability
name: integrationReliability
+ required: true
+ type: 15
+ additionalinfo: Reliability of the source providing the intelligence data.
options:
- A+ - 3rd party enrichment
- A - Completely reliable
@@ -24,33 +38,40 @@ configuration:
- D - Not usually reliable
- E - Unreliable
- F - Reliability cannot be judged
- required: true
- type: 15
+ section: Collect
- defaultvalue: '80'
display: Minimum score threshold
name: threshold
required: false
type: 0
-- defaultvalue: '30'
- display: Maximum reports age (in days)
+ section: Collect
+ advanced: true
+- display: Maximum reports age (in days)
name: days
required: false
type: 0
+ defaultvalue: '30'
+ section: Collect
+ advanced: true
- display: Disregard quota errors
name: disregard_quota
required: false
type: 8
+ section: Collect
+ advanced: true
- display: Trust any certificate (not secure)
name: insecure
required: false
type: 8
+ section: Connect
+ advanced: true
- display: Use system proxy settings
name: proxy
required: false
type: 8
-description: Central repository to report and identify IP addresses that have been
- associated with malicious activity online. Check the Detailed Information section
- for more information on how to configure the integration.
+ section: Connect
+ advanced: true
+description: Central repository to report and identify IP addresses that have been associated with malicious activity online. Check the Detailed Information section for more information on how to configure the integration.
display: AbuseIPDB
name: AbuseIPDB
script:
@@ -66,15 +87,13 @@ script:
name: days
- auto: PREDEFINED
defaultValue: 'true'
- description: The length of the report. "true" returns the full report, "false"
- does not return reported categories. Default is "true".
+ description: The length of the report. "true" returns the full report, "false" does not return reported categories. Default is "true".
name: verbose
predefined:
- 'true'
- 'false'
- defaultValue: '80'
- description: The minimum score from AbuseIPDB to consider whether the IP address
- is malicious (must be greater than 20). Default is 80.
+ description: The minimum score from AbuseIPDB to consider whether the IP address is malicious (must be greater than 20). Default is 80.
name: threshold
description: Checks the specified IP address against the AbuseIP database.
name: ip
@@ -150,8 +169,7 @@ script:
description: The maximum number of IPs to check. Default is 40.
name: limit
- defaultValue: '80'
- description: The minimum score from AbuseIPDB to consider whether the IP address
- is malicious (must be greater than 20). Default is 80.
+ description: The minimum score from AbuseIPDB to consider whether the IP address is malicious (must be greater than 20). Default is 80.
name: threshold
description: Queries a block of IP addresses to check against the database.
name: abuseipdb-check-cidr-block
@@ -231,8 +249,7 @@ script:
name: limit
- auto: PREDEFINED
defaultValue: 'false'
- description: Whether to save a list of IPs on a block list in the Context Data in
- Demisto. Default is false.
+ description: Whether to save a list of IPs on a block list in the Context Data in Demisto. Default is false.
name: saveToContext
predefined:
- 'true'
@@ -257,7 +274,7 @@ script:
script: ''
subtype: python3
type: python
- dockerimage: demisto/python3:3.10.7.33922
+ dockerimage: demisto/python3:3.10.10.48392
fromversion: 5.0.0
tests:
- AbuseIPDB Test
diff --git a/Packs/AbuseDB/ReleaseNotes/1_0_16.md b/Packs/AbuseDB/ReleaseNotes/1_0_16.md
new file mode 100644
index 000000000000..3e3e99726ead
--- /dev/null
+++ b/Packs/AbuseDB/ReleaseNotes/1_0_16.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AbuseIPDB
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AbuseDB/ReleaseNotes/1_0_17.md b/Packs/AbuseDB/ReleaseNotes/1_0_17.md
new file mode 100644
index 000000000000..8b24677164d2
--- /dev/null
+++ b/Packs/AbuseDB/ReleaseNotes/1_0_17.md
@@ -0,0 +1,5 @@
+
+#### Integrations
+##### AbuseIPDB
+- Added the *API Key* integration parameter to support credentials fetching object.
+- Updated the Docker image to: *demisto/python3:3.10.9.42476*.
diff --git a/Packs/AbuseDB/ReleaseNotes/1_0_18.md b/Packs/AbuseDB/ReleaseNotes/1_0_18.md
new file mode 100644
index 000000000000..c518ba72b58f
--- /dev/null
+++ b/Packs/AbuseDB/ReleaseNotes/1_0_18.md
@@ -0,0 +1,5 @@
+
+#### Integrations
+##### AbuseIPDB
+- Note: Organized the the integrations' parameters by sections. Relevant for XSIAM and XSOAR 8.1 and above.
+- Updated the Docker image to: *demisto/python3:3.10.9.44472*.
diff --git a/Packs/AbuseDB/ReleaseNotes/1_0_19.md b/Packs/AbuseDB/ReleaseNotes/1_0_19.md
new file mode 100644
index 000000000000..efb6fb069f10
--- /dev/null
+++ b/Packs/AbuseDB/ReleaseNotes/1_0_19.md
@@ -0,0 +1,4 @@
+
+#### Scripts
+##### AbuseIPDBPopulateIndicators
+- Updated the Docker image to: *demisto/python3:3.10.9.46807*.
\ No newline at end of file
diff --git a/Packs/AbuseDB/ReleaseNotes/1_0_20.md b/Packs/AbuseDB/ReleaseNotes/1_0_20.md
new file mode 100644
index 000000000000..6b2a3506d5a0
--- /dev/null
+++ b/Packs/AbuseDB/ReleaseNotes/1_0_20.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AbuseIPDB
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AbuseDB/Scripts/AbuseIPDBPopulateIndicators/AbuseIPDBPopulateIndicators.yml b/Packs/AbuseDB/Scripts/AbuseIPDBPopulateIndicators/AbuseIPDBPopulateIndicators.yml
index 954a1883a8ff..1a2cfae2db47 100644
--- a/Packs/AbuseDB/Scripts/AbuseIPDBPopulateIndicators/AbuseIPDBPopulateIndicators.yml
+++ b/Packs/AbuseDB/Scripts/AbuseIPDBPopulateIndicators/AbuseIPDBPopulateIndicators.yml
@@ -29,4 +29,4 @@ runas: DBotWeakRole
tests:
- AbuseIPDB PopulateIndicators Test
fromversion: 5.0.0
-dockerimage: demisto/python3:3.10.7.33922
+dockerimage: demisto/python3:3.10.9.46807
diff --git a/Packs/AbuseDB/pack_metadata.json b/Packs/AbuseDB/pack_metadata.json
index 65bbf5c9691b..fb10be83b98c 100644
--- a/Packs/AbuseDB/pack_metadata.json
+++ b/Packs/AbuseDB/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AbuseIPDB",
"description": "Central repository to report and identify IP addresses that have been associated with malicious activity online. Check the Detailed Information section for more information on how to configure the integration.",
"support": "xsoar",
- "currentVersion": "1.0.15",
+ "currentVersion": "1.0.20",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AcalvioShadowplex/Integrations/acalvioapp/acalvioapp.py b/Packs/AcalvioShadowplex/Integrations/acalvioapp/acalvioapp.py
index 0ad767f528fe..050e13272980 100644
--- a/Packs/AcalvioShadowplex/Integrations/acalvioapp/acalvioapp.py
+++ b/Packs/AcalvioShadowplex/Integrations/acalvioapp/acalvioapp.py
@@ -1,8 +1,9 @@
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
+import urllib3
# Disable insecure warnings
-requests.packages.urllib3.disable_warnings()
+urllib3.disable_warnings()
''' CONSTANTS '''
OK_HTTP_CODES = (200, 201)
diff --git a/Packs/AcalvioShadowplex/Integrations/acalvioapp/acalvioapp.yml b/Packs/AcalvioShadowplex/Integrations/acalvioapp/acalvioapp.yml
index 94beba35a938..e149133ac271 100644
--- a/Packs/AcalvioShadowplex/Integrations/acalvioapp/acalvioapp.yml
+++ b/Packs/AcalvioShadowplex/Integrations/acalvioapp/acalvioapp.yml
@@ -192,7 +192,7 @@ script:
- contextPath: Acalvio.UnmuteDeceptionEndpoint.DateTime
description: Date including Time
type: String
- dockerimage: demisto/python3:3.10.5.31928
+ dockerimage: demisto/python3:3.10.10.48392
feed: false
isfetch: false
longRunning: false
diff --git a/Packs/AcalvioShadowplex/ReleaseNotes/1_0_8.md b/Packs/AcalvioShadowplex/ReleaseNotes/1_0_8.md
new file mode 100644
index 000000000000..f9b9988983c8
--- /dev/null
+++ b/Packs/AcalvioShadowplex/ReleaseNotes/1_0_8.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Acalvio ShadowPlex
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AcalvioShadowplex/ReleaseNotes/1_0_9.md b/Packs/AcalvioShadowplex/ReleaseNotes/1_0_9.md
new file mode 100644
index 000000000000..1bd34c1e691d
--- /dev/null
+++ b/Packs/AcalvioShadowplex/ReleaseNotes/1_0_9.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Acalvio ShadowPlex
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AcalvioShadowplex/pack_metadata.json b/Packs/AcalvioShadowplex/pack_metadata.json
index 5513adc19684..1247c10701c3 100644
--- a/Packs/AcalvioShadowplex/pack_metadata.json
+++ b/Packs/AcalvioShadowplex/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Acalvio ShadowPlex",
"description": "Acalvio ShadowPlex Autonomous Deception Solution",
"support": "partner",
- "currentVersion": "1.0.7",
+ "currentVersion": "1.0.9",
"author": "Acalvio Technologies",
"url": "https://www.acalvio.com",
"email": "support@acalvio.com",
diff --git a/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/ACTIIndicatorQuery.yml b/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/ACTIIndicatorQuery.yml
index 11ec81040be5..8a9d2fad14ed 100644
--- a/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/ACTIIndicatorQuery.yml
+++ b/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/ACTIIndicatorQuery.yml
@@ -404,7 +404,7 @@ script:
description: The actual score.
type: String
- dockerimage: demisto/python3:3.10.8.39276
+ dockerimage: demisto/python3:3.10.10.48392
feed: false
isfetch: false
longRunning: false
diff --git a/Packs/AccentureCTI/Integrations/ACTIVulnerabilityQuery/ACTIVulnerabilityQuery.py b/Packs/AccentureCTI/Integrations/ACTIVulnerabilityQuery/ACTIVulnerabilityQuery.py
index 1f88389cd5f7..74f59c384f23 100644
--- a/Packs/AccentureCTI/Integrations/ACTIVulnerabilityQuery/ACTIVulnerabilityQuery.py
+++ b/Packs/AccentureCTI/Integrations/ACTIVulnerabilityQuery/ACTIVulnerabilityQuery.py
@@ -1,11 +1,11 @@
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
-import requests
+import urllib3
import traceback
from typing import List
# Disable insecure warnings
-requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
+urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
diff --git a/Packs/AccentureCTI/Integrations/ACTIVulnerabilityQuery/ACTIVulnerabilityQuery.yml b/Packs/AccentureCTI/Integrations/ACTIVulnerabilityQuery/ACTIVulnerabilityQuery.yml
index 7aacd055ec0e..67de097c23c7 100644
--- a/Packs/AccentureCTI/Integrations/ACTIVulnerabilityQuery/ACTIVulnerabilityQuery.yml
+++ b/Packs/AccentureCTI/Integrations/ACTIVulnerabilityQuery/ACTIVulnerabilityQuery.yml
@@ -88,7 +88,7 @@ script:
script: '-'
type: python
subtype: python3
- dockerimage: demisto/python3:3.10.5.31928
+ dockerimage: demisto/python3:3.10.10.48392
feed: false
longRunning: false
longRunningPort: false
diff --git a/Packs/AccentureCTI/Playbooks/playbook-ACTI_Create_Report-Indicator_Associations_README.md b/Packs/AccentureCTI/Playbooks/playbook-ACTI_Create_Report-Indicator_Associations_README.md
index dc692b97bcd7..d5a64492e32e 100644
--- a/Packs/AccentureCTI/Playbooks/playbook-ACTI_Create_Report-Indicator_Associations_README.md
+++ b/Packs/AccentureCTI/Playbooks/playbook-ACTI_Create_Report-Indicator_Associations_README.md
@@ -25,14 +25,14 @@ This playbook does not use any commands.
| **Name** | **Description** | **Default Value** | **Required** |
| --- | --- | --- | --- |
| IP | The extracted IP address. | ${IP.Address} | Optional |
-| IA | The Intelligence Alert associated with the indicator. | ${intelligence_alerts}.None | Optional |
-| IR | The Intelligence Report associated with the indicator. | ${intelligence_reports}.None | Optional |
+| IA | The Intelligence Alert associated with the indicator. | ${intelligence_alerts} | Optional |
+| IR | The Intelligence Report associated with the indicator. | ${intelligence_reports} | Optional |
| URL | The extracted URL. | ${URL.Data} | Optional |
| Domain | The extracted Domain. | ${Domain.Name} | Optional |
-| MFam | The Malware Family associated with the indicator. | acti_malware_family_uuid.None | Optional |
-| TA | The Threat Actor associated with the indicator. | acti_threat_actors_uuid.None | Optional |
-| TG | The Threat Group associated with the indicator. | acti_threat_groups_uuid.None | Optional |
-| TC | The Threat Campaign associated with the indicator. | acti_threat_campaigns_uuid.None | Optional |
+| MFam | The Malware Family associated with the indicator. | acti_malware_family_uuid | Optional |
+| TA | The Threat Actor associated with the indicator. | acti_threat_actors_uuid | Optional |
+| TG | The Threat Group associated with the indicator. | acti_threat_groups_uuid | Optional |
+| TC | The Threat Campaign associated with the indicator. | acti_threat_campaigns_uuid | Optional |
## Playbook Outputs
---
diff --git a/Packs/AccentureCTI/Playbooks/playbook-ACTI_Incident_Enrichment_README.md b/Packs/AccentureCTI/Playbooks/playbook-ACTI_Incident_Enrichment_README.md
index 035145726d3f..1473a5b987bb 100644
--- a/Packs/AccentureCTI/Playbooks/playbook-ACTI_Incident_Enrichment_README.md
+++ b/Packs/AccentureCTI/Playbooks/playbook-ACTI_Incident_Enrichment_README.md
@@ -21,12 +21,12 @@ This playbook does not use any scripts.
| **Name** | **Description** | **Default Value** | **Required** |
| --- | --- | --- | --- |
-| ia_uuid | Intelligence Alert unique ID. | ${intelligence_alerts}.None | Optional |
-| ir_uuid | Intelligence Report unique ID. | ${intelligence_reports}.None | Optional |
-| MalwareFamily_uuid | Malware Family unique ID. | ${acti_malware_family_uuid}.None | Optional |
-| ThreatGroup_uuid | Threat Group unique ID. | ${acti_threat_groups_uuid}.None | Optional |
-| ThreatCampaign_uuid | Threat Campaign unique ID. | ${acti_threat_campaigns_uuid}.None | Optional |
-| ThreatActor_uuid | Threat Actor unique ID. | ${acti_threat_actors_uuid}.None | Optional |
+| ia_uuid | Intelligence Alert unique ID. | ${intelligence_alerts} | Optional |
+| ir_uuid | Intelligence Report unique ID. | ${intelligence_reports} | Optional |
+| MalwareFamily_uuid | Malware Family unique ID. | ${acti_malware_family_uuid} | Optional |
+| ThreatGroup_uuid | Threat Group unique ID. | ${acti_threat_groups_uuid} | Optional |
+| ThreatCampaign_uuid | Threat Campaign unique ID. | ${acti_threat_campaigns_uuid} | Optional |
+| ThreatActor_uuid | Threat Actor unique ID. | ${acti_threat_actors_uuid} | Optional |
## Playbook Image
---
diff --git a/Packs/AccentureCTI/Playbooks/playbook-ACTI_Report_Enrichment_README.md b/Packs/AccentureCTI/Playbooks/playbook-ACTI_Report_Enrichment_README.md
index 097e017cae30..d89e30934009 100644
--- a/Packs/AccentureCTI/Playbooks/playbook-ACTI_Report_Enrichment_README.md
+++ b/Packs/AccentureCTI/Playbooks/playbook-ACTI_Report_Enrichment_README.md
@@ -22,8 +22,8 @@ This playbook does not use any scripts.
| **Name** | **Description** | **Default Value** | **Required** |
| --- | --- | --- | --- |
-| ia_uuid | The Intelligence Alert uuid. | ${intelligence_alerts}.None | Optional |
-| ir_uuid | The Intelligence Report uuid. | ${intelligence_reports}.None | Optional |
+| ia_uuid | The Intelligence Alert uuid. | ${intelligence_alerts} | Optional |
+| ir_uuid | The Intelligence Report uuid. | ${intelligence_reports} | Optional |
| Domain | The extarcted Domain. | ${Domain} | Optional |
| IP | The extracted IP. | ${IP} | Optional |
| URL | The extracted URL. | ${URL} | Optional |
diff --git a/Packs/AccentureCTI/ReleaseNotes/2_2_3.md b/Packs/AccentureCTI/ReleaseNotes/2_2_3.md
new file mode 100644
index 000000000000..2360a65cc2f6
--- /dev/null
+++ b/Packs/AccentureCTI/ReleaseNotes/2_2_3.md
@@ -0,0 +1,5 @@
+#### Integrations
+##### ACTI Indicator Query
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
+##### ACTI Vulnerability Query
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AccentureCTI/ReleaseNotes/2_2_4.md b/Packs/AccentureCTI/ReleaseNotes/2_2_4.md
new file mode 100644
index 000000000000..440c241054b5
--- /dev/null
+++ b/Packs/AccentureCTI/ReleaseNotes/2_2_4.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### ACTI Indicator Query
+- Updated the Docker image to: *demisto/python3:3.10.9.45313*.
diff --git a/Packs/AccentureCTI/ReleaseNotes/2_2_5.md b/Packs/AccentureCTI/ReleaseNotes/2_2_5.md
new file mode 100644
index 000000000000..2101c787e265
--- /dev/null
+++ b/Packs/AccentureCTI/ReleaseNotes/2_2_5.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### ACTI Indicator Query
+- Updated the Docker image to: *demisto/python3:3.10.10.47713*.
diff --git a/Packs/AccentureCTI/ReleaseNotes/2_2_6.md b/Packs/AccentureCTI/ReleaseNotes/2_2_6.md
new file mode 100644
index 000000000000..18273dcf6da6
--- /dev/null
+++ b/Packs/AccentureCTI/ReleaseNotes/2_2_6.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### ACTI Indicator Query
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AccentureCTI/ReleaseNotes/2_2_7.md b/Packs/AccentureCTI/ReleaseNotes/2_2_7.md
new file mode 100644
index 000000000000..b34f638eda2b
--- /dev/null
+++ b/Packs/AccentureCTI/ReleaseNotes/2_2_7.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### ACTI Vulnerability Query
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AccentureCTI/pack_metadata.json b/Packs/AccentureCTI/pack_metadata.json
index 0e00ff6a5e95..1701a5a0f732 100644
--- a/Packs/AccentureCTI/pack_metadata.json
+++ b/Packs/AccentureCTI/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Accenture CTI v2",
"description": "Accenture CTI provides intelligence regarding security threats and vulnerabilities.",
"support": "partner",
- "currentVersion": "2.2.2",
+ "currentVersion": "2.2.7",
"author": "Accenture",
"url": "https://www.accenture.com/us-en/services/security/cyber-defense",
"email": "CTI.AcctManagement@accenture.com",
diff --git a/Packs/AccentureCTI_Feed/Integrations/ACTIIndicatorFeed/ACTIIndicatorFeed.yml b/Packs/AccentureCTI_Feed/Integrations/ACTIIndicatorFeed/ACTIIndicatorFeed.yml
index e4c9dd147e06..eb30c26716e9 100644
--- a/Packs/AccentureCTI_Feed/Integrations/ACTIIndicatorFeed/ACTIIndicatorFeed.yml
+++ b/Packs/AccentureCTI_Feed/Integrations/ACTIIndicatorFeed/ACTIIndicatorFeed.yml
@@ -176,7 +176,7 @@ script:
description: Gets the feed indicators.
execution: false
name: acti-get-indicators
- dockerimage: demisto/jmespath:1.0.0.23980
+ dockerimage: demisto/py3-tools:1.0.0.48698
feed: true
isfetch: false
longRunning: false
diff --git a/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_2.md b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_2.md
new file mode 100644
index 000000000000..a6427a18c6f7
--- /dev/null
+++ b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_2.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### ACTI Indicator Feed
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.41100*.
diff --git a/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_3.md b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_3.md
new file mode 100644
index 000000000000..e95c8078d065
--- /dev/null
+++ b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_3.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### ACTI Indicator Feed
+Fixed an issue where an indicator did not have a tag.
diff --git a/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_4.md b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_4.md
new file mode 100644
index 000000000000..d4ee1c8dd8d7
--- /dev/null
+++ b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_4.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### ACTI Indicator Feed
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.45198*.
diff --git a/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_5.md b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_5.md
new file mode 100644
index 000000000000..40b49adbb39f
--- /dev/null
+++ b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_5.md
@@ -0,0 +1,4 @@
+#### Integrations
+##### ACTI Indicator Feed
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.47376*.
+- Fixed an issue where in some cases the integration did not fetch any indicator due to memory leaks.
diff --git a/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_6.md b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_6.md
new file mode 100644
index 000000000000..4a1082a9b656
--- /dev/null
+++ b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_6.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### ACTI Indicator Feed
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.47433*.
diff --git a/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_7.md b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_7.md
new file mode 100644
index 000000000000..6eeadd7930fe
--- /dev/null
+++ b/Packs/AccentureCTI_Feed/ReleaseNotes/1_1_7.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### ACTI Indicator Feed
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.48698*.
diff --git a/Packs/AccentureCTI_Feed/pack_metadata.json b/Packs/AccentureCTI_Feed/pack_metadata.json
index e0ce678dd5a2..a9d96effaf5a 100644
--- a/Packs/AccentureCTI_Feed/pack_metadata.json
+++ b/Packs/AccentureCTI_Feed/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Accenture CTI Feed",
"description": "Accenture Cyber Threat Intelligence Feed",
"support": "partner",
- "currentVersion": "1.1.1",
+ "currentVersion": "1.1.7",
"author": "Accenture",
"url": "https://www.accenture.com/us-en/services/security/cyber-defense",
"email": "CTI.AcctManagement@accenture.com",
diff --git a/Packs/AccessInvestigation/LayoutRules/Access.json b/Packs/AccessInvestigation/LayoutRules/Access.json
new file mode 100644
index 000000000000..5a7bbbe76976
--- /dev/null
+++ b/Packs/AccessInvestigation/LayoutRules/Access.json
@@ -0,0 +1,18 @@
+{
+ "rule_id": "Access_layout_rule",
+ "layout_id": "Access",
+ "description": "",
+ "rule_name": "Access Layout Rule",
+ "alerts_filter": {
+ "filter": {
+ "AND": [
+ {
+ "SEARCH_FIELD": "alert_type",
+ "SEARCH_TYPE": "EQ",
+ "SEARCH_VALUE": "Access"
+ }
+ ]
+ }
+ },
+ "fromVersion": "6.10.0"
+}
\ No newline at end of file
diff --git a/Packs/AccessInvestigation/ReleaseNotes/1_2_3.md b/Packs/AccessInvestigation/ReleaseNotes/1_2_3.md
new file mode 100644
index 000000000000..dd8120341e1a
--- /dev/null
+++ b/Packs/AccessInvestigation/ReleaseNotes/1_2_3.md
@@ -0,0 +1,4 @@
+
+#### Layout Rules
+##### New: Access Layout Rule
+- Added support for layouts and layout rules in XSIAM.
diff --git a/Packs/AccessInvestigation/pack_metadata.json b/Packs/AccessInvestigation/pack_metadata.json
index cd52a38b49fe..5a9f4822bca5 100644
--- a/Packs/AccessInvestigation/pack_metadata.json
+++ b/Packs/AccessInvestigation/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Access Investigation",
"description": "This Content Pack automates response to unauthorised access incidents and contains customer access incident views and layouts to aid investigation.",
"support": "xsoar",
- "currentVersion": "1.2.2",
+ "currentVersion": "1.2.3",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
@@ -10,10 +10,10 @@
"categories": [
"Network Security"
],
- "tags": ["Use Case"],
- "useCases": [
- "Access"
+ "tags": [
+ "Use Case"
],
+ "useCases": [],
"keywords": [],
"dependencies": {
"Active_Directory_Query": {
@@ -35,6 +35,7 @@
},
"marketplaces": [
"xsoar",
- "marketplacev2"
+ "marketplacev2",
+ "xpanse"
]
-}
+}
\ No newline at end of file
diff --git a/Packs/ActiveMQ/Integrations/ActiveMQ/ActiveMQ.yml b/Packs/ActiveMQ/Integrations/ActiveMQ/ActiveMQ.yml
index 84e87c5fbabd..adbfbba9715b 100644
--- a/Packs/ActiveMQ/Integrations/ActiveMQ/ActiveMQ.yml
+++ b/Packs/ActiveMQ/Integrations/ActiveMQ/ActiveMQ.yml
@@ -1,4 +1,4 @@
-category: Messaging
+category: Messaging and Conferencing
commonfields:
id: ActiveMQ
version: -1
@@ -106,7 +106,7 @@ script:
description: Subscribes to and reads messages from a topic or queue. Must provide either queue-name or topic-name. You can't provide both.
execution: false
name: activemq-subscribe
- dockerimage: demisto/py3-tools:1.0.0.40800
+ dockerimage: demisto/py3-tools:1.0.0.49475
feed: false
isfetch: true
longRunning: false
@@ -116,3 +116,5 @@ script:
subtype: python3
type: python
fromversion: 5.0.0
+tests:
+- No tests (auto formatted)
diff --git a/Packs/ActiveMQ/ReleaseNotes/1_1_11.md b/Packs/ActiveMQ/ReleaseNotes/1_1_11.md
new file mode 100644
index 000000000000..7b5753c5a20b
--- /dev/null
+++ b/Packs/ActiveMQ/ReleaseNotes/1_1_11.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### ActiveMQ
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.41748*.
diff --git a/Packs/ActiveMQ/ReleaseNotes/1_1_12.md b/Packs/ActiveMQ/ReleaseNotes/1_1_12.md
new file mode 100644
index 000000000000..88ec42d6766e
--- /dev/null
+++ b/Packs/ActiveMQ/ReleaseNotes/1_1_12.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### ActiveMQ
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.44868*.
diff --git a/Packs/ActiveMQ/ReleaseNotes/1_1_13.md b/Packs/ActiveMQ/ReleaseNotes/1_1_13.md
new file mode 100644
index 000000000000..d3667623de68
--- /dev/null
+++ b/Packs/ActiveMQ/ReleaseNotes/1_1_13.md
@@ -0,0 +1,6 @@
+
+#### Integrations
+
+##### ActiveMQ
+- Updated the pack category to *Messaging and Conferencing*.
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.49475*.
diff --git a/Packs/ActiveMQ/pack_metadata.json b/Packs/ActiveMQ/pack_metadata.json
index 067c91201d56..25456250f6aa 100644
--- a/Packs/ActiveMQ/pack_metadata.json
+++ b/Packs/ActiveMQ/pack_metadata.json
@@ -2,13 +2,13 @@
"name": "ActiveMQ",
"description": "Uses Durable Topic Subscribers to fetch messages and ingest them as incidents in Demisto.",
"support": "xsoar",
- "currentVersion": "1.1.10",
+ "currentVersion": "1.1.13",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
"created": "2020-03-11T12:58:02Z",
"categories": [
- "Messaging"
+ "Messaging and Conferencing"
],
"tags": [],
"useCases": [],
diff --git a/Packs/Active_Directory_Query/.pack-ignore b/Packs/Active_Directory_Query/.pack-ignore
index d00b84728824..e387a0d74ad1 100644
--- a/Packs/Active_Directory_Query/.pack-ignore
+++ b/Packs/Active_Directory_Query/.pack-ignore
@@ -10,6 +10,11 @@ ignore=BA101
[file:classifier-User_Profile_-_Active_Directory_(Outgoing).json]
ignore=BA101
+[file:IAMInitADUser.yml]
+ignore=CJ105
+
[known_words]
LDAP
-TLS
\ No newline at end of file
+TLS
+userAccountControl
+
diff --git a/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.py b/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.py
index 896aece2e0a5..4143c32c0f6e 100644
--- a/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.py
+++ b/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.py
@@ -677,13 +677,14 @@ def search_users(default_base_dn, page_size):
accounts = [account_entry(entry, custom_attributes) for entry in entries['flat']]
if 'userAccountControl' in attributes:
for user in entries['flat']:
- user_account_control = user.get('userAccountControl')[0]
- user['userAccountControlFields'] = user_account_to_boolean_fields(user_account_control)
-
- # display a literal translation of the numeric account control flag
- if args.get('user-account-control-out', '') == 'true':
- user['userAccountControl'] = COMMON_ACCOUNT_CONTROL_FLAGS.get(
- user_account_control) or user_account_control
+ if user.get('userAccountControl'):
+ user_account_control = user.get('userAccountControl')[0]
+ user['userAccountControlFields'] = user_account_to_boolean_fields(user_account_control)
+
+ # display a literal translation of the numeric account control flag
+ if args.get('user-account-control-out', '') == 'true':
+ user['userAccountControl'] = COMMON_ACCOUNT_CONTROL_FLAGS.get(
+ user_account_control) or user_account_control
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
diff --git a/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.yml b/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.yml
index 224251e9b17e..05feac2ae9ce 100644
--- a/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.yml
+++ b/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.yml
@@ -3,7 +3,7 @@ commonfields:
version: -1
name: Active Directory Query v2
display: Active Directory Query v2
-category: Data Enrichment & Threat Intelligence
+category: Authentication & Identity Management
description: The Active Directory Query integration enables you to access and manage Active Directory objects (users, contacts, and computers).
configuration:
- display: Server IP address (for example, 192.168.0.1)
@@ -804,7 +804,7 @@ script:
description: Value to set "Password Never Expire".
description: 'Modifies the AD account attribute "Password Never Expire".'
execution: true
- dockerimage: demisto/py3-tools:1.0.0.41100
+ dockerimage: demisto/py3-tools:1.0.0.49159
runonce: false
ismappable: true
isremotesyncout: true
diff --git a/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query_test.py b/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query_test.py
index 4cdd5ab5cfe7..a4147403ddc3 100644
--- a/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query_test.py
+++ b/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query_test.py
@@ -746,3 +746,63 @@ def test_get_ssl_version(ssl_version, expected_ssl_version):
from Active_Directory_Query import get_ssl_version
ssl_version_value = get_ssl_version(ssl_version)
assert ssl_version_value == expected_ssl_version
+
+
+def test_search_users_empty_userAccountControl(mocker):
+ """
+ Given:
+ The 'userAccountControl' attribute was returned empty
+ When:
+ Run the 'ad-get-user' command
+ Then:
+ The result returns without raise IndexError: list index out of range
+ """
+
+ import Active_Directory_Query
+
+ class EntryMocker:
+ def entry_to_json(self):
+ return '{"attributes": {"displayName": [], "mail": [], "manager": [], "memberOf": ["memberOf"], ' \
+ '"name": ["Guest"], "sAMAccountName": ["Guest"], "userAccountControl": []}, "dn": "test_dn"}'
+
+ class ConnectionMocker:
+ entries = [EntryMocker()]
+ result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': b''}}}}
+
+ def search(self, *args, **kwargs):
+ time.sleep(1)
+ return
+
+ expected_results = {'ContentsFormat': 'json',
+ 'Type': 1,
+ 'Contents': [{'attributes': {'displayName': [], 'mail': [], 'manager': [],
+ 'memberOf': ['memberOf'], 'name': ['Guest'],
+ 'sAMAccountName': ['Guest'],
+ 'userAccountControl': []}, 'dn': 'test_dn'}],
+ 'ReadableContentsFormat': 'markdown',
+ 'HumanReadable': '### Active Directory - Get Users\n|displayName|dn|mail|manager|memberOf|name'
+ '|sAMAccountName|userAccountControl|\n|---|---|---|---|---|---|---|---|\n| |'
+ ' test_dn | | | memberOf | Guest | Guest | |\n',
+ 'EntryContext': {'ActiveDirectory.Users(obj.dn == val.dn)': [{'dn': 'test_dn',
+ 'displayName': [], 'mail': [],
+ 'manager': [],
+ 'memberOf': ['memberOf'],
+ 'name': ['Guest'],
+ 'sAMAccountName': ['Guest'],
+ 'userAccountControl': []}],
+ 'Account(obj.ID == val.ID)': [{'Type': 'AD', 'ID': 'test_dn', 'Email': [],
+ 'Username': ['Guest'], 'DisplayName': [],
+ 'Managr': [], 'Manager': [],
+ 'Groups': ['memberOf']}],
+ 'ActiveDirectory(true)':
+ {'UsersPageCookie': base64.b64encode(b'').decode('utf-8')}}}
+
+ expected_results = f'demisto results: {json.dumps(expected_results, indent=4, sort_keys=True)}'
+
+ mocker.patch.object(demisto, 'args', return_value={'page-size': '1'})
+
+ Active_Directory_Query.conn = ConnectionMocker()
+
+ with patch('logging.Logger.info') as mock:
+ Active_Directory_Query.search_users('dc', 1)
+ mock.assert_called_with(expected_results)
diff --git a/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Pipfile.lock b/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Pipfile.lock
index 9b9989a65503..15e7e5bc6d80 100644
--- a/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Pipfile.lock
+++ b/Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Pipfile.lock
@@ -29,21 +29,11 @@
},
"pyasn1": {
"hashes": [
- "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359",
- "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576",
- "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf",
- "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7",
- "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d",
- "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00",
- "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8",
- "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86",
- "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12",
- "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776",
- "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba",
- "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2",
- "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"
- ],
- "version": "==0.4.8"
+ "sha256:760db2dafe04091b000af018c45dff6e3d7a204cd9341b760d72689217a611cc",
+ "sha256:8fcd953d1e34ef6db82a5296bb5ca3762ce4d17f2241c48ac0de2739b2e8fbf2"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
+ "version": "==0.5.0rc2"
}
},
"develop": {
@@ -57,11 +47,11 @@
},
"attrs": {
"hashes": [
- "sha256:50f3c9b216dc9021042f71b392859a773b904ce1a029077f58f6598272432045",
- "sha256:8f7335278dedd26b58c38e006338242cc0977f06d51579b2b8b87b9b33bff66c"
+ "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836",
+ "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
- "version": "==21.3.0"
+ "markers": "python_version >= '3.6'",
+ "version": "==22.2.0"
},
"flake8": {
"hashes": [
@@ -80,54 +70,36 @@
},
"isort": {
"hashes": [
- "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7",
- "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"
+ "sha256:6db30c5ded9815d813932c04c2f85a360bcdd35fed496f4d8f35495ef0a261b6",
+ "sha256:c033fd0edb91000a7f09527fe5c75321878f98322a77ddcc81adbd83724afb7b"
],
- "markers": "python_version < '4.0' and python_full_version >= '3.6.1'",
- "version": "==5.10.1"
+ "markers": "python_version >= '3.7'",
+ "version": "==5.11.4"
},
"lazy-object-proxy": {
"hashes": [
- "sha256:043651b6cb706eee4f91854da4a089816a6606c1428fd391573ef8cb642ae4f7",
- "sha256:07fa44286cda977bd4803b656ffc1c9b7e3bc7dff7d34263446aec8f8c96f88a",
- "sha256:12f3bb77efe1367b2515f8cb4790a11cffae889148ad33adad07b9b55e0ab22c",
- "sha256:2052837718516a94940867e16b1bb10edb069ab475c3ad84fd1e1a6dd2c0fcfc",
- "sha256:2130db8ed69a48a3440103d4a520b89d8a9405f1b06e2cc81640509e8bf6548f",
- "sha256:39b0e26725c5023757fc1ab2a89ef9d7ab23b84f9251e28f9cc114d5b59c1b09",
- "sha256:46ff647e76f106bb444b4533bb4153c7370cdf52efc62ccfc1a28bdb3cc95442",
- "sha256:4dca6244e4121c74cc20542c2ca39e5c4a5027c81d112bfb893cf0790f96f57e",
- "sha256:553b0f0d8dbf21890dd66edd771f9b1b5f51bd912fa5f26de4449bfc5af5e029",
- "sha256:677ea950bef409b47e51e733283544ac3d660b709cfce7b187f5ace137960d61",
- "sha256:6a24357267aa976abab660b1d47a34aaf07259a0c3859a34e536f1ee6e76b5bb",
- "sha256:6a6e94c7b02641d1311228a102607ecd576f70734dc3d5e22610111aeacba8a0",
- "sha256:6aff3fe5de0831867092e017cf67e2750c6a1c7d88d84d2481bd84a2e019ec35",
- "sha256:6ecbb350991d6434e1388bee761ece3260e5228952b1f0c46ffc800eb313ff42",
- "sha256:7096a5e0c1115ec82641afbdd70451a144558ea5cf564a896294e346eb611be1",
- "sha256:70ed0c2b380eb6248abdef3cd425fc52f0abd92d2b07ce26359fcbc399f636ad",
- "sha256:8561da8b3dd22d696244d6d0d5330618c993a215070f473b699e00cf1f3f6443",
- "sha256:85b232e791f2229a4f55840ed54706110c80c0a210d076eee093f2b2e33e1bfd",
- "sha256:898322f8d078f2654d275124a8dd19b079080ae977033b713f677afcfc88e2b9",
- "sha256:8f3953eb575b45480db6568306893f0bd9d8dfeeebd46812aa09ca9579595148",
- "sha256:91ba172fc5b03978764d1df5144b4ba4ab13290d7bab7a50f12d8117f8630c38",
- "sha256:9d166602b525bf54ac994cf833c385bfcc341b364e3ee71e3bf5a1336e677b55",
- "sha256:a57d51ed2997e97f3b8e3500c984db50a554bb5db56c50b5dab1b41339b37e36",
- "sha256:b9e89b87c707dd769c4ea91f7a31538888aad05c116a59820f28d59b3ebfe25a",
- "sha256:bb8c5fd1684d60a9902c60ebe276da1f2281a318ca16c1d0a96db28f62e9166b",
- "sha256:c19814163728941bb871240d45c4c30d33b8a2e85972c44d4e63dd7107faba44",
- "sha256:c4ce15276a1a14549d7e81c243b887293904ad2d94ad767f42df91e75fd7b5b6",
- "sha256:c7a683c37a8a24f6428c28c561c80d5f4fd316ddcf0c7cab999b15ab3f5c5c69",
- "sha256:d609c75b986def706743cdebe5e47553f4a5a1da9c5ff66d76013ef396b5a8a4",
- "sha256:d66906d5785da8e0be7360912e99c9188b70f52c422f9fc18223347235691a84",
- "sha256:dd7ed7429dbb6c494aa9bc4e09d94b778a3579be699f9d67da7e6804c422d3de",
- "sha256:df2631f9d67259dc9620d831384ed7732a198eb434eadf69aea95ad18c587a28",
- "sha256:e368b7f7eac182a59ff1f81d5f3802161932a41dc1b1cc45c1f757dc876b5d2c",
- "sha256:e40f2013d96d30217a51eeb1db28c9ac41e9d0ee915ef9d00da639c5b63f01a1",
- "sha256:f769457a639403073968d118bc70110e7dce294688009f5c24ab78800ae56dc8",
- "sha256:fccdf7c2c5821a8cbd0a9440a456f5050492f2270bd54e94360cac663398739b",
- "sha256:fd45683c3caddf83abbb1249b653a266e7069a09f486daa8863fb0e7496a9fdb"
+ "sha256:0c1c7c0433154bb7c54185714c6929acc0ba04ee1b167314a779b9025517eada",
+ "sha256:14010b49a2f56ec4943b6cf925f597b534ee2fe1f0738c84b3bce0c1a11ff10d",
+ "sha256:4e2d9f764f1befd8bdc97673261b8bb888764dfdbd7a4d8f55e4fbcabb8c3fb7",
+ "sha256:4fd031589121ad46e293629b39604031d354043bb5cdf83da4e93c2d7f3389fe",
+ "sha256:5b51d6f3bfeb289dfd4e95de2ecd464cd51982fe6f00e2be1d0bf94864d58acd",
+ "sha256:6850e4aeca6d0df35bb06e05c8b934ff7c533734eb51d0ceb2d63696f1e6030c",
+ "sha256:6f593f26c470a379cf7f5bc6db6b5f1722353e7bf937b8d0d0b3fba911998858",
+ "sha256:71d9ae8a82203511a6f60ca5a1b9f8ad201cac0fc75038b2dc5fa519589c9288",
+ "sha256:7e1561626c49cb394268edd00501b289053a652ed762c58e1081224c8d881cec",
+ "sha256:8f6ce2118a90efa7f62dd38c7dbfffd42f468b180287b748626293bf12ed468f",
+ "sha256:ae032743794fba4d171b5b67310d69176287b5bf82a21f588282406a79498891",
+ "sha256:afcaa24e48bb23b3be31e329deb3f1858f1f1df86aea3d70cb5c8578bfe5261c",
+ "sha256:b70d6e7a332eb0217e7872a73926ad4fdc14f846e85ad6749ad111084e76df25",
+ "sha256:c219a00245af0f6fa4e95901ed28044544f50152840c5b6a3e7b2568db34d156",
+ "sha256:ce58b2b3734c73e68f0e30e4e725264d4d6be95818ec0a0be4bb6bf9a7e79aa8",
+ "sha256:d176f392dbbdaacccf15919c77f526edf11a34aece58b55ab58539807b85436f",
+ "sha256:e20bfa6db17a39c706d24f82df8352488d2943a3b7ce7d4c22579cb89ca8896e",
+ "sha256:eac3a9a5ef13b332c059772fd40b4b1c3d45a3a2b05e33a361dee48e54a4dad0",
+ "sha256:eb329f8d8145379bf5dbe722182410fe8863d186e51bf034d2075eb8d85ee25b"
],
- "markers": "python_version >= '3.6'",
- "version": "==1.7.1"
+ "markers": "python_version >= '3.7'",
+ "version": "==1.8.0"
},
"mccabe": {
"hashes": [
@@ -138,11 +110,11 @@
},
"packaging": {
"hashes": [
- "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb",
- "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"
+ "sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3",
+ "sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3"
],
- "markers": "python_version >= '3.6'",
- "version": "==21.3"
+ "markers": "python_version >= '3.7'",
+ "version": "==22.0"
},
"pluggy": {
"hashes": [
@@ -184,14 +156,6 @@
"index": "pypi",
"version": "==3.0.0a4"
},
- "pyparsing": {
- "hashes": [
- "sha256:04ff808a5b90911829c55c4e26f75fa5ca8a2f5f36aa3a51f68e27033341d3e4",
- "sha256:d9bdec0013ef1eb5a84ab39a3b3868911598afa494f5faa038647101504e2b81"
- ],
- "markers": "python_version >= '3.6'",
- "version": "==3.0.6"
- },
"pytest": {
"hashes": [
"sha256:8fc363e0b7407a9397e660ef81e1634e4504faaeb6ad1d2416da4c38d29a0f45",
@@ -210,11 +174,11 @@
},
"setuptools": {
"hashes": [
- "sha256:a4377723c53721515f72a3dfc1bfacdcd61edfa19a4cccf82e72e4f50d9cecbd",
- "sha256:ad0ea3d172404abb14d8f7bd7f54f2ccd4ed9dd00c9da0b1398862e69eb22c03"
+ "sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31",
+ "sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f"
],
- "markers": "python_version >= '3.7'",
- "version": "==60.1.0"
+ "index": "pypi",
+ "version": "==65.5.1"
},
"toml": {
"hashes": [
@@ -226,11 +190,11 @@
},
"tomli": {
"hashes": [
- "sha256:b5bde28da1fed24b9bd1d4d2b8cba62300bfb4ec9a6187a957e8ddb9434c5224",
- "sha256:c292c34f58502a1eb2bbb9f5bbc9a5ebc37bee10ffb8c2d6bbdfa8eb13cc14e1"
+ "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
+ "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"
],
"markers": "python_version >= '3.7'",
- "version": "==2.0.0"
+ "version": "==2.0.1"
},
"wrapt": {
"hashes": [
diff --git a/Packs/Active_Directory_Query/ReleaseNotes/1_6_0.md b/Packs/Active_Directory_Query/ReleaseNotes/1_6_0.md
new file mode 100644
index 000000000000..11089e2d198d
--- /dev/null
+++ b/Packs/Active_Directory_Query/ReleaseNotes/1_6_0.md
@@ -0,0 +1,4 @@
+
+#### Scripts
+##### IAMInitADUser
+- Added arguments for configuring the complexity of the generated password. These arguments are applicable when using "GeneratePassword" as the password generation script.
diff --git a/Packs/Active_Directory_Query/ReleaseNotes/1_6_1.md b/Packs/Active_Directory_Query/ReleaseNotes/1_6_1.md
new file mode 100644
index 000000000000..cca83191f49a
--- /dev/null
+++ b/Packs/Active_Directory_Query/ReleaseNotes/1_6_1.md
@@ -0,0 +1,5 @@
+
+#### Integrations
+##### Active Directory Query v2
+- Updated the IAMApiModule.
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.41100*.
diff --git a/Packs/Active_Directory_Query/ReleaseNotes/1_6_2.md b/Packs/Active_Directory_Query/ReleaseNotes/1_6_2.md
new file mode 100644
index 000000000000..66c16896b05a
--- /dev/null
+++ b/Packs/Active_Directory_Query/ReleaseNotes/1_6_2.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Active Directory Query v2
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.44868*.
diff --git a/Packs/Active_Directory_Query/ReleaseNotes/1_6_3.md b/Packs/Active_Directory_Query/ReleaseNotes/1_6_3.md
new file mode 100644
index 000000000000..240672377b03
--- /dev/null
+++ b/Packs/Active_Directory_Query/ReleaseNotes/1_6_3.md
@@ -0,0 +1,4 @@
+#### Integrations
+##### Active Directory Query v2
+- Fixed an issue where an error was raised in ***ad-get-user*** command if the 'userAccountControl' attribute was returned empty.
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.45904*.
\ No newline at end of file
diff --git a/Packs/Active_Directory_Query/ReleaseNotes/1_6_4.md b/Packs/Active_Directory_Query/ReleaseNotes/1_6_4.md
new file mode 100644
index 000000000000..330a5d2fa878
--- /dev/null
+++ b/Packs/Active_Directory_Query/ReleaseNotes/1_6_4.md
@@ -0,0 +1,4 @@
+
+#### Scripts
+##### SendEmailToManager
+- Updated the Docker image to: *demisto/python3:3.10.10.47713*.
\ No newline at end of file
diff --git a/Packs/Active_Directory_Query/ReleaseNotes/1_6_5.md b/Packs/Active_Directory_Query/ReleaseNotes/1_6_5.md
new file mode 100644
index 000000000000..de721efc421a
--- /dev/null
+++ b/Packs/Active_Directory_Query/ReleaseNotes/1_6_5.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Active Directory Query v2
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.49159*.
diff --git a/Packs/Active_Directory_Query/ReleaseNotes/1_6_6.md b/Packs/Active_Directory_Query/ReleaseNotes/1_6_6.md
new file mode 100644
index 000000000000..555fab45afcb
--- /dev/null
+++ b/Packs/Active_Directory_Query/ReleaseNotes/1_6_6.md
@@ -0,0 +1,6 @@
+
+#### Integrations
+
+##### Active Directory Query v2
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.49159*.
+- Updated the pack category to *Authentication & Identity Management*.
\ No newline at end of file
diff --git a/Packs/Active_Directory_Query/Scripts/IAMInitADUser/IAMInitADUser.py b/Packs/Active_Directory_Query/Scripts/IAMInitADUser/IAMInitADUser.py
index dc3f13e04b1b..1e8ee678f30b 100644
--- a/Packs/Active_Directory_Query/Scripts/IAMInitADUser/IAMInitADUser.py
+++ b/Packs/Active_Directory_Query/Scripts/IAMInitADUser/IAMInitADUser.py
@@ -1,6 +1,8 @@
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
+DEFAULT_PWD_GENERATION_SCRIPT = "GeneratePassword"
+
def main():
outputs: Dict[str, Any] = {}
@@ -15,11 +17,30 @@ def main():
to_email = args.get("to_email")
inc_id = args.get("inc_id")
email_subject = args.get("email_subject")
+ min_lcase = args.get("min_lcase", 0)
+ max_lcase = args.get("max_lcase", 10)
+ min_ucase = args.get("min_ucase", 0)
+ max_ucase = args.get("max_ucase", 10)
+ min_digits = args.get("min_digits", 0)
+ max_digits = args.get("max_digits", 10)
+ min_symbols = args.get("min_symbols", 0)
+ max_symbols = args.get("max_symbols", 10)
password = None
try:
# Generate a random password
- pwd_generation_script_output = demisto.executeCommand(pwd_generation_script, {})
+ if pwd_generation_script == DEFAULT_PWD_GENERATION_SCRIPT:
+ pwd_generation_script_output = demisto.executeCommand(pwd_generation_script,
+ {"min_lcase": min_lcase,
+ "max_lcase": max_lcase,
+ "min_ucase": min_ucase,
+ "max_ucase": max_ucase,
+ "min_digits": min_digits,
+ "max_digits": max_digits,
+ "min_symbols": min_symbols,
+ "max_symbols": max_symbols})
+ else:
+ pwd_generation_script_output = demisto.executeCommand(pwd_generation_script, {})
if is_error(pwd_generation_script_output):
raise Exception(f'An error occurred while trying to generate a new password for the user. '
f'Error is:\n{get_error(pwd_generation_script_output)}')
diff --git a/Packs/Active_Directory_Query/Scripts/IAMInitADUser/IAMInitADUser.yml b/Packs/Active_Directory_Query/Scripts/IAMInitADUser/IAMInitADUser.yml
index b4ab55a2db2c..d9216ae31622 100644
--- a/Packs/Active_Directory_Query/Scripts/IAMInitADUser/IAMInitADUser.yml
+++ b/Packs/Active_Directory_Query/Scripts/IAMInitADUser/IAMInitADUser.yml
@@ -1,52 +1,51 @@
args:
-- default: false
- defaultValue: GeneratePassword
+- defaultValue: GeneratePassword
description: The password generator script.
- isArray: false
name: pwdGenerationScript
- required: false
- secret: false
-- default: false
- description: The sAMAccountName of the employee.
- isArray: false
+- description: The sAMAccountName of the employee.
name: sAMAccountName
required: true
- secret: false
-- default: false
- description: The email of the employee.
- isArray: false
+- description: The email of the employee.
name: email
required: true
- secret: false
-- default: false
- description: The display name of the employee.
- isArray: false
+- description: The display name of the employee.
name: displayname
- required: false
- secret: false
-- default: false
- description: The email address that the password will send to.
- isArray: false
+- description: The email address that the password will send to.
name: to_email
required: true
- secret: false
-- default: false
- description: The incident ID.
- isArray: false
+- description: The incident ID.
name: inc_id
- required: false
- secret: false
-- default: false
- description: The subject of the email sent to IT.
- isArray: false
+- description: The subject of the email sent to IT.
name: email_subject
- required: false
- secret: false
+- name: min_lcase
+ description: Minimum number of lower case characters to include in password. Used with the GeneratePassword script.
+ defaultValue: "0"
+- name: max_lcase
+ description: Maximum number of lower case characters to include in password. Used with the GeneratePassword script.
+ defaultValue: "10"
+- name: min_ucase
+ description: Minimum number of upper case characters to include in password. Used with the GeneratePassword script.
+ defaultValue: "0"
+- name: max_ucase
+ description: Maximum number of upper case characters to include in password. Used with the GeneratePassword script.
+ defaultValue: "10"
+- name: min_digits
+ description: Minimum number of digits to include in password. Used with the GeneratePassword script.
+ defaultValue: "0"
+- name: max_digits
+ description: Maximum number of digits to include in password. Used with the GeneratePassword script.
+ defaultValue: "10"
+- name: min_symbols
+ description: Minimum number of symbols to include in password. Used with the GeneratePassword script.
+ defaultValue: "0"
+- name: max_symbols
+ description: Maximum number of symbols to include in password. Used with the GeneratePassword script.
+ defaultValue: "10"
comment: |-
Generates password,
Set an AD user account with this password.
Enable the account.
- Send mail to the user with tha account information.
+ Send mail to the user with the account information.
This script is running `send-mail` command, make sure there is a matching Integration configurated.
commonfields:
id: IAMInitADUser
@@ -55,12 +54,10 @@ enabled: true
name: IAMInitADUser
outputs:
- contextPath: IAM.InitADUser.success
- description: True if the Active Directory user was successfully activated, false
- otherwise.
+ description: True if the Active Directory user was successfully activated, false otherwise.
type: Boolean
- contextPath: IAM.InitADUser.sentMail
- description: True if the mail containing the information about the user activation
- and its auto-generated password was successfully sent to IT, false otherwise.
+ description: True if the mail containing the information about the user activation and its auto-generated password was successfully sent to IT, false otherwise.
type: Boolean
- contextPath: IAM.InitADUser.errorDetails
description: The error details, if exists.
@@ -68,18 +65,21 @@ outputs:
- contextPath: IAM.InitADUser.sendMailError
description: The error received from send-mail command, if exists.
type: String
-script: '-'
+script: ''
subtype: python3
-system: false
+system: true
tags:
- IAM
- active directory
- Utility
-timeout: '0'
type: python
-dockerimage: demisto/python3:3.9.8.24399
+dockerimage: demisto/python3:3.10.9.40422
runas: DBotWeakRole
runonce: false
tests:
-- No tests (auto formatted)
+- Active Directory Test
fromversion: 5.0.0
+contentitemexportablefields:
+ contentitemfields:
+ fromServerVersion: ""
+scripttarget: 0
diff --git a/Packs/CommonScripts/Scripts/RegexGroups/README.md b/Packs/Active_Directory_Query/Scripts/IAMInitADUser/README.md
similarity index 100%
rename from Packs/CommonScripts/Scripts/RegexGroups/README.md
rename to Packs/Active_Directory_Query/Scripts/IAMInitADUser/README.md
diff --git a/Packs/Active_Directory_Query/Scripts/SendEmailToManager/SendEmailToManager.yml b/Packs/Active_Directory_Query/Scripts/SendEmailToManager/SendEmailToManager.yml
index 4341699bce75..ebccee8ed236 100644
--- a/Packs/Active_Directory_Query/Scripts/SendEmailToManager/SendEmailToManager.yml
+++ b/Packs/Active_Directory_Query/Scripts/SendEmailToManager/SendEmailToManager.yml
@@ -42,4 +42,4 @@ dependson:
timeout: 0s
tests:
- No test
-dockerimage: demisto/python3:3.10.7.33922
+dockerimage: demisto/python3:3.10.10.47713
diff --git a/Packs/Active_Directory_Query/pack_metadata.json b/Packs/Active_Directory_Query/pack_metadata.json
index 5e9a1d5bd83a..24f9c13cb019 100644
--- a/Packs/Active_Directory_Query/pack_metadata.json
+++ b/Packs/Active_Directory_Query/pack_metadata.json
@@ -2,13 +2,13 @@
"name": "Active Directory Query",
"description": "Active Directory Query integration enables you to access and manage Active Directory objects (users, contacts, and computers).",
"support": "xsoar",
- "currentVersion": "1.5.9",
+ "currentVersion": "1.6.6",
"author": "Cortex XSOAR",
"url": "",
"email": "",
"created": "2020-03-09T14:47:00Z",
"categories": [
- "Authentication"
+ "Authentication & Identity Management"
],
"tags": [],
"useCases": [],
diff --git a/Packs/Aella_StarLight/Integrations/AellaStarLight/AellaStarLight.py b/Packs/Aella_StarLight/Integrations/AellaStarLight/AellaStarLight.py
new file mode 100644
index 000000000000..0e7e775d3f99
--- /dev/null
+++ b/Packs/Aella_StarLight/Integrations/AellaStarLight/AellaStarLight.py
@@ -0,0 +1,367 @@
+import demistomock as demisto # noqa: F401
+from CommonServerPython import * # noqa: F401
+import requests
+import json
+import time
+import os
+from typing import Any, Dict
+
+import urllib3
+
+from urllib3.exceptions import InsecureRequestWarning
+
+urllib3.disable_warnings(InsecureRequestWarning)
+
+if not demisto.params()['proxy']:
+ del os.environ['HTTP_PROXY']
+ del os.environ['HTTPS_PROXY']
+ del os.environ['http_proxy']
+ del os.environ['https_proxy']
+
+''' GLOBAL VARS '''
+URL = demisto.getParam('url') + '/aellaelastic'
+USERNAME = demisto.getParam('credentials')['identifier']
+PASSWORD = demisto.getParam('credentials')['password']
+FETCH_INTERVAL = demisto.getParam('fetch_interval')
+VALIDATE_CERT = not demisto.params().get('insecure', True)
+
+''' HELPER FUNCTIONS '''
+
+
+def make_rest_call(end_point, username, password, action_result,
+ headers={}, params=None,
+ data=None, method='get'):
+ headers.update({'Accept': 'application/json'})
+ headers.update({'Content-Type': 'application/json'})
+
+ resp_json = None
+ request_func = getattr(requests, method)
+ if (not request_func):
+ action_result['status'] = 'Unsupported method {}'.format(method)
+ return
+ try:
+ r = request_func(end_point, auth=(username, password),
+ data=json.dumps(data) if data else None,
+ headers=headers,
+ verify=VALIDATE_CERT, params=params)
+ except Exception as e:
+ action_result['status'] = 'Server REST API exception {}'.format(e)
+ return
+
+ if r is not None:
+ action_result['r_text'] = r.text
+ action_result['headers'] = r.headers
+ action_result['r_status_code'] = r.status_code
+
+ try:
+ resp_json = r.json()
+ except Exception as e:
+ demisto.debug(f"Error while parsing response JSON: {e}")
+ action_result['status'] = 'Json parse error {}'.format(
+ r.text.replace('{', ' ').replace('}', ' '))
+ return
+
+ if 200 <= r.status_code <= 399:
+ action_result['status'] = 'Success'
+ else:
+ action_result['status'] = 'Failed'
+
+ action_result['data'] = resp_json
+ return
+
+
+''' FUNCTIONS '''
+
+
+def fetch_incidents_command():
+ fetch_interval = demisto.getParam('fetch_interval')
+ if fetch_interval is None:
+ fetch_interval = 15 * 60 # 15 minutes
+ else:
+ try:
+ fetch_interval = int(fetch_interval) * 60
+ if fetch_interval < 15 * 60:
+ # Min is 15 minutes
+ fetch_interval = 15 * 60
+ except ValueError as e:
+ demisto.debug(f"Error in parsing fetch_interval: {e}")
+ fetch_interval = 15 * 60
+
+ cur_t = time.time()
+
+ checkTime = cur_t - fetch_interval
+
+ event = demisto.getParam('event_name')
+ if event is None:
+ event = '*'
+
+ score = demisto.getParam('severity')
+ if score:
+ try:
+ score = int(score)
+ if score < 0 or score > 100:
+ score = 50
+ except ValueError:
+ demisto.debug(f"Failed to convert the value of severity to an integer: {score}")
+ score = 50
+ else:
+ # Default score
+ score = 50
+
+ index_str = 'aella-ser*'
+
+ query_str = 'event_name:{} AND severity:>{}'.format(event, score)
+
+ ts_str = str(int(checkTime * 1000))
+
+ query_json = {
+ 'query': {
+ 'bool': {
+ 'must': [
+ {
+ 'query_string': {
+ 'query': query_str,
+ 'analyze_wildcard': True
+ }
+ },
+ {
+ 'range': {
+ 'timestamp': {
+ 'gt': ts_str
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+
+ end_point = URL + '/{0}/{1}/_search'.format(index_str, 'amsg')
+
+ action_result: Dict[Any, Any] = {}
+ make_rest_call(end_point,
+ USERNAME, PASSWORD, action_result, data=query_json
+ )
+
+ if action_result['status'] == 'Success':
+ demisto.info('Poll incidents ok')
+ data = action_result.get('data')
+ if not isinstance(data, dict):
+ demisto.error('Data returned in wrong format {}'.format(data))
+ demisto.incidents([])
+ return
+ hits = data.get('hits', {}).get('hits', [])
+ incidents = []
+
+ try:
+ cached_event = demisto.getLastRun().get("cached_event", {})
+ except Exception as e:
+ demisto.debug('Error while accessing the last run data: {}'.format(e))
+ cached_event = {}
+
+ new_cached_event = {}
+
+ for hit in hits:
+ source = hit.get('_source', None)
+ if not source:
+ continue
+ event_name = source.get('event_name', None)
+ try:
+ event_severity = int(source.get('severity', None))
+ if event_severity > 75:
+ severity = 3
+ elif event_severity > 50:
+ severity = 2
+ else:
+ severity = 1
+ except ValueError as e:
+ demisto.debug('Error while converting the severity value to int: {}'.format(e))
+ severity = 0
+
+ if not event_name:
+ continue
+ eid = hit['_id']
+
+ new_cached_event[eid] = True
+ if cached_event.get(eid, False):
+ continue
+
+ sdi = '{}_{}'.format(event_name, eid)
+ incident = {
+ 'name': sdi,
+ 'severity': severity,
+ 'rawJSON': json.dumps({'name': sdi,
+ 'label': 'Starlight event',
+ 'aella_eid': eid,
+ 'aella_event': event_name,
+ 'event_severity': event_severity
+ })
+ }
+ incidents.append(incident)
+ demisto.info('Incidents is {}'.format(incidents))
+ demisto.setLastRun({'cached_event': new_cached_event})
+ demisto.incidents(incidents)
+
+ else:
+ demisto.info('Poll incidents failed {}'.format(action_result))
+ demisto.incidents([])
+
+
+def aella_get_event_command():
+ demisto.info('Aella started get-event with {}'.format(demisto.args()['event_id']))
+ event_id = demisto.args()['event_id']
+ query_json = {'query': {'match': {'_id': event_id}}}
+ end_point = URL + '/{0}/{1}/_search'.format('aella-ser*', 'amsg')
+
+ action_result: Dict[Any, Any] = {}
+
+ make_rest_call(end_point,
+ USERNAME, PASSWORD, action_result, data=query_json
+ )
+ if action_result['status'] == 'Success':
+ demisto.info('Run Query is successful')
+ response = action_result.get('data', {})
+ timed_out = response.get('timed_out', False)
+ hits = response.get('hits', {}).get('hits', [])
+ source = {}
+ dbot_scores = []
+ if len(hits) == 0:
+ demisto.info('Get event got empty result')
+ for item in hits:
+ index = item.get('_index', '')
+ source = item.get('_source', {})
+ if index:
+ source['_index'] = index
+ source['timed_out'] = timed_out
+ demisto.debug('This is my run_query result aellaEvent {}'.format(source))
+
+ # Check url reputation
+ url_str = source.get('url', '')
+ if url_str:
+ url_reputation = source.get('url_reputation', '')
+ if url_reputation and url_reputation != 'Good':
+ dbot_score = {
+ 'Vendor': 'Aella Data',
+ 'Indicator': url_str,
+ 'Type': 'url',
+ 'Score': 3,
+ 'Malicious': {
+ 'Vendor': 'Aella Data',
+ 'Detections': 'URL reputation {0}'.format(url_reputation),
+ 'URL': url_str
+ }
+ }
+ else:
+ dbot_score = {
+ 'Vendor': 'Aella Data',
+ 'Indicator': url_str,
+ 'Type': 'url',
+ 'Malicious': None
+ }
+ if url_reputation is None:
+ # Unknonw
+ dbot_score['Score'] = 0
+ else:
+ # Good
+ dbot_score['Score'] = 1
+ dbot_scores.append(dbot_score)
+
+ # Check src ip reputation
+ srcip_str = source.get('srcip', '')
+ if srcip_str:
+ srcip_reputation = source.get('srcip_reputation', '')
+ if srcip_reputation and srcip_reputation != 'Good':
+ dbot_score = {
+ 'Vendor': 'Aella Data',
+ 'Indicator': srcip_str,
+ 'Type': 'ip',
+ 'Score': 3,
+ 'Malicious': {
+ 'Vendor': 'Aella Data',
+ 'Detections': 'Source IP reputation {0}'.format(srcip_reputation),
+ 'IP': srcip_str
+ }
+ }
+ else:
+ dbot_score = {
+ 'Vendor': 'Aella Data',
+ 'Indicator': srcip_str,
+ 'Type': 'ip',
+ 'Malicious': None
+ }
+ if srcip_reputation is None:
+ # Unknonw
+ dbot_score['Score'] = 0
+ else:
+ # Good
+ dbot_score['Score'] = 1
+ dbot_scores.append(dbot_score)
+
+ # Check dst ip reputation
+ dstip_str = source.get('dstip', '')
+ if dstip_str:
+ dstip_reputation = source.get('dstip_reputation', '')
+ if dstip_reputation and dstip_reputation != 'Good':
+ dbot_score = {
+ 'Vendor': 'Aella Data',
+ 'Indicator': dstip_str,
+ 'Type': 'ip',
+ 'Score': 3,
+ 'Malicious': {
+ 'Vendor': 'Aella Data',
+ 'Detections': 'Destination IP reputation {0}'.format(dstip_reputation),
+ 'IP': dstip_str
+ }
+ }
+ else:
+ dbot_score = {
+ 'Vendor': 'Aella Data',
+ 'Indicator': dstip_str,
+ 'Type': 'ip',
+ 'Malicious': None
+ }
+ if dstip_reputation is None:
+ # Unknonw
+ dbot_score['Score'] = 0
+ else:
+ # Good
+ dbot_score['Score'] = 1
+ dbot_scores.append(dbot_score)
+
+ break
+ demisto.results({
+ 'Type': entryTypes['note'],
+ 'ContentsFormat': formats['json'],
+ 'Contents': source,
+ 'HumanReadable': tableToMarkdown('Aella Star Light Event <{0}>'.format(event_id), source),
+ 'EntryContext': {
+ 'Aella.Event(val._id==obj._id)': source,
+ 'DBotScore': createContext(dbot_scores, removeNull=True),
+ }
+ })
+ else:
+ demisto.info('Get event failed {}'.format(action_result))
+ demisto.results(return_error('Failed to get event'))
+
+
+''' EXECUTION CODE '''
+demisto.info('Command is {}'.format(demisto.command()))
+
+if demisto.command() == 'test-module':
+ # This is the call made when pressing the integration test button.
+ action_result: Dict[Any, Any] = {}
+
+ make_rest_call(URL + '/_cluster/health',
+ USERNAME, PASSWORD, action_result
+ )
+
+ if action_result['status'] == 'Success':
+ demisto.results('ok')
+ else:
+ demisto.results('failed')
+
+if demisto.command() == 'fetch-incidents':
+ fetch_incidents_command()
+
+if demisto.command() == 'aella-get-event':
+ aella_get_event_command()
diff --git a/Packs/Aella_StarLight/Integrations/AellaStarLight/AellaStarLight.yml b/Packs/Aella_StarLight/Integrations/AellaStarLight/AellaStarLight.yml
new file mode 100644
index 000000000000..f75dd00ddfbd
--- /dev/null
+++ b/Packs/Aella_StarLight/Integrations/AellaStarLight/AellaStarLight.yml
@@ -0,0 +1,93 @@
+commonfields:
+ id: Aella Star Light
+ version: -1
+name: Aella Star Light
+display: Aella Star Light
+category: Data Enrichment & Threat Intelligence
+description: Aella Star Light Integration
+configuration:
+- display: Server URL (e.g. https://starlight.companyname.com:8889)
+ name: url
+ defaultvalue: ""
+ type: 0
+ required: true
+- display: User name
+ name: credentials
+ defaultvalue: ""
+ type: 9
+ required: true
+- display: Fetch incidents
+ name: isFetch
+ type: 8
+ required: false
+- display: Incident type
+ name: incidentType
+ type: 13
+ required: false
+- display: Fetching interval in minutes (default is 15, minimum is 15 )
+ name: fetch_interval
+ defaultvalue: "15"
+ type: 0
+ required: false
+- display: The specific security event to look for. Default is all events
+ name: event_name
+ defaultvalue: ""
+ type: 0
+ required: false
+- display: Security event severity threshold, between 0-100
+ name: severity
+ defaultvalue: "50"
+ type: 0
+ required: false
+- display: Trust any certificate (not secure)
+ name: insecure
+ type: 8
+ required: false
+- display: Use system proxy settings
+ name: proxy
+ defaultvalue: "false"
+ type: 8
+ required: false
+script:
+ script: ''
+ type: python
+ subtype: python3
+ commands:
+ - name: aella-get-event
+ arguments:
+ - name: event_id
+ required: true
+ description: event id from the Star Light incident
+ outputs:
+ - contextPath: Aella.Event.event_name
+ description: The event name
+ type: string
+ - contextPath: Aella.Event.severity
+ description: The severity score
+ type: string
+ - contextPath: Aella.Event.dstip
+ description: The Destination IP
+ type: string
+ - contextPath: Aella.Event.srcip
+ description: The source IP
+ type: string
+ - contextPath: Aella.Event.tenantid
+ description: The tenant ID
+ type: string
+ - contextPath: Aella.Event.srcip_reputation
+ description: The source IP reputation
+ type: string
+ - contextPath: Aella.Event.dstip_reputation
+ description: The destination IP reputation
+ type: string
+ - contextPath: Aella.Event.dstip_geo
+ description: The destination IP geo location
+ - contextPath: Aella.Event.srcip_geo
+ description: The source IP geo location
+ description: Query the details for a specific Start Light event
+ isfetch: true
+ runonce: false
+ dockerimage: demisto/python3:3.10.10.48392
+tests:
+- No test
+fromversion: 6.5.0
diff --git a/Packs/Aella_StarLight/Integrations/AellaStarLight/AellaStarLight_image.png b/Packs/Aella_StarLight/Integrations/AellaStarLight/AellaStarLight_image.png
new file mode 100644
index 000000000000..c652f427853b
Binary files /dev/null and b/Packs/Aella_StarLight/Integrations/AellaStarLight/AellaStarLight_image.png differ
diff --git a/Packs/Aella_StarLight/Integrations/integration-Aella_StarLight_README.md b/Packs/Aella_StarLight/Integrations/AellaStarLight/README.md
similarity index 100%
rename from Packs/Aella_StarLight/Integrations/integration-Aella_StarLight_README.md
rename to Packs/Aella_StarLight/Integrations/AellaStarLight/README.md
diff --git a/Packs/Aella_StarLight/Integrations/integration-Aella_StarLight.yml b/Packs/Aella_StarLight/Integrations/integration-Aella_StarLight.yml
deleted file mode 100644
index 58c2292876c4..000000000000
--- a/Packs/Aella_StarLight/Integrations/integration-Aella_StarLight.yml
+++ /dev/null
@@ -1,461 +0,0 @@
-commonfields:
- id: Aella Star Light
- version: -1
-name: Aella Star Light
-display: Aella Star Light
-category: Data Enrichment & Threat Intelligence
-image: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAANUAAAAuCAYAAAC/FVxZAAAACXBIWXMAAAWJAAAFiQFtaJ36AAAOgUlEQVR42u1dS29bxxX++JJESRYph0HsODJpNK5RFA0ZYJrWXUQM+ti0DVnkB4hapOhOzLItUFPbbiyvUsAL0Q36QFHDtIHs2phOgBopBghZIEldoxGZh23ZtEValkiKIm8XOlcaXc19kLqkEvUegJBEDueee+Z8c745c+bKpSgK7BDXud++ieMnlwFUARQ0H0cAROD2PAPf0JvKbOJfcMSRQypeWwA1+3oSoeM/BXDCsKHPB0xMNgGkHdM7cljFbVM/cTypbpq28g2XAcQdszvigMpckmjUw2ib4Gp4ZAxA1HX53YhjekccUOlTvxiAMADg4f26bkOPB3B7QtsgdMQRB1S6ktr+7cGdin6U8q9Iv+OIIw6oJNRPlSe1KV0K6B9rC385FNARB1Sm1E8VGQXcTf3gUEBHHFBZoX5GFHA39XMooCMOqCxRPyMKuJv6ORTQkUMtezZ//7xc+8mYx/3E7It/K3zwwh7qp9LCRw+aytPHhw2oHwDgO09N/LLcaP3Joq6l8IivZOfNM8bSAM5LPjrFOS8d5MAwxjIAzonvcc5d3bbpk25xANctNK1hp7qmACDPOc/9X4FqfulBxAX85ajPs/bcsDfkcemPz/e+FV1eSssDzfOjQ/cDXs8UABwb8q48O+ydlLWbGvH9GMDPLepaKzdakfCIr2rj/acNaG3GmXO7Ao1MggCm6fdpAHOMsRqABQALnPPqIJVljMUABDnn+UFGqrgC+B+22v7qZqf29VFfwO+WM8TPmi2PXqeVjfZUwLv18VGfZx2AFFS1zc6JSa/Hqq4BAFm7Ehw024YdUO1LCpzzuAVbR7BVSRMHMEPRNcUYS3LOCwPUd4HA3ddo7tZLHrQVJfDx2ga+aG7WZF/88EkzpNfparuzxfxcLoy4Xbr1gI8321CAehf6JsqNll11g0aJkjBjzMlO2iSc8xLnPMs5TwE4BeAqTWh5ih6HM1Exv/QgIoTqbVne2Ax8vNasbHR2qtk3OsrKw1Zbt9O2omCt3fls0us2BcyTdqfSpc6ZcqO1rwQHYyxoIeKlHDj0DWBJAJeIfeRpPA4fqAzWF6h3lNCHa816dbO9DgBLjY22WceVjXboKZ/HFDAPW+2pLnUOUBjfjySpH1GK2qh42Ab7SwauFIAbNo3nlw9U80sPgmYzswL4P6m3RkuN1kpxtREy67jRUfxjHrcpYHqggLBhvaOdQMo6fTrRqr+SpGTHDK27DoV4BScLWPnCneYmLt9/XHlpYjQ04dXf5nphfLg+6nFX1tsdU2A9aXcqRywAUGWq4RFfz4tbGryodgHLOc8xxsqa5EW6l1mUIlyKFuZi1UkRW9mybL8zUAa6xciZ49g6PKrqdkNNPpA9SgOIVlXGWBbAHNk6bYGyx+ilinooNqdNetB3xLZBej8uSbhUdXxFvaYI+pJwzT12clOUsrz4/2it0a53lNB71bX1/9Y3GnrtzgZGK0e9npCVPruggMXwiM/uKAUAOc1PMWER69Jp02T08wASGpBGsZX9us4Yyw9ydmaMBRljeQAfYCv7Nq3RbZpecwCWSL9B0N+sELX0dM+QTRdJP1WvCNn4HIAPGGM5jc4xbO2nqS91Mr2uecUktloAsETjOKPpc4beX6J2u0F1r7n5u/V2J2DVAv+s1UMA0AFGP1prjvyjtl5paY7k+91uRPy+qYDX7bfSZxcU0A46pu2jKMw2WYsg1Bv8LBnbij2nARQGkf0iR8vLElEm+vUdWBRdajSBBXVsqm5wzwKY5JzHOOdxznmENrtnicInNGNYAPCK8FLXza9oXgXJJDtH7X/GOXfR9eKc86DmmnME+h3690Wz9Yt7GzgKuJjXhcdGBthsbfjuvX+jtSvKAPi7C/Asf972tJoNABjz+9v4za/f8rhcWPh3+QcfP157zsy4z4/7H7z10jfeNmhS2A/towGSJSiy4gBLKGCSMRY026gkw87oRViaXcOSpEueMRbrM93KSygvsHvzNiLRL0oOFu8z7gsE4hjpCoGmzZDzxvTGgHOeZYzlqJ8E2VOldGJ/VWqfNxjHFOlSBBA3uWaB+k8zxrY3s73XoierAH70avHTdFuRluxsy+d3736BxvqeDdMWgNbEUaBaqeHhcmCtiksqTXtn+ZEatg3lbr0ZDo/4vt3nwUsZUD/tLCU6flIniomDf07isGnOeVbD0bOaiKFuavfFcclJohLdklrnovvIamkhYywyANBPC7ROO14ps0mN1mcZ8rUkjCs9rCTBkhauWRDWhNsTwnam4Vr05AKAF7E3tbzj+HfvGD/YJRgKYOprlbFTZwoGTqsrrsvv9m3DlahFQvP2VYmzZHugnRmJ08ZFQNEglKgC4aqWakkWz3aJrN+4bLam92R0d1Brv5gkO1juIqlTssFHwjp+YTQh7F5TiX9ci54s0CBc0jbsdDr1e/fumF5h/EQkdOKHifkzN28nAUB57eWqEVAlRjzIKKVy/KLE6SMG2cRpSTaxYJIsqfW6dutSqpTZU19XTXQbaD2eiQT2C5QeQV3Yx0Swt0qd6GDq1eKnOZq1AwBQqdyvADDN0gVOTH1G7a6cuXn7As3i6gL+IEGVtgIqIVqdl3w/bVHnBRPaUKJMnBg5E31KBKRtsNOBSA8V9/F9Xi+P7usCI6agEsCVe7X4aYwcL1oulyylvUcnQ2IafQ5A/Phk8Fd3VyxNgAHX5XeTymsv23o8gDJsYQn101MqJwFVUsfh4hLqF2OMWYkge/QcZIGpWrWNnT2rOPSLjAch1R7vIwiTfa4+2C5CAWPGMqgIWCUAse++/d61e+VPforhEWPq9/QxuNx70ujRI37/H+ub7XvV1dVjFqOV3Wdu0gYZOz2pYXemMExV1VrdghLKcr1HPYN9doQU2TcCeTbwoMSUdpETqxu/ESFCxIRxKtp5XzTpqNeLCeMj0v0bWvpv6Qm17//191tl508900AwNGKB+u35KDQ+rlQVVwVrqyEYP2o6abMj6RXPJnqgXKk+AH4Qs2qKKKmV/bPyAUSriF6igbZB0hrHLVJUq9J9lQiQwX1MaOI11agX1oBH1TEv/Ixor2kKKtfs6ztZs4fLI1itVXAiEoLbY0b9dmdEXAgODw2h6ZmsYW01gM3WoChg0qIzWZGEhT0rs4N7ttMfC4DS29JQndPQSfoM+CBFl5o24yboXgYwD0kpkqZ93AZ9skTpigTYnFEmUJbA8lp0yh3ZaIRQutXA8fAI/GNm1G83t/F6KssdJYTxCaBZr6G+HhgABUzZ7AcpC4mIOL4EQg4r03UeOidvD6CwNSVLGgmAMtyEtdleCwSoS1RF3xNl7x5UAKAoI7hTAiYmK3j62ZAJ9duWIx63Z1n9Y9gfgNe3RQc7nb5QQJ10d7kLoMWxd1NXW2Sb11wjYKUCY0Aii9LznPPMQa3rDNa7Wc37qo7JAdpyjiJmNxNxrBdQ6a87Hq+EsP6kgmMnQ0bUT6CAk8NuF5rqgUePN4SJyTrWVhW0NkY1FDCuvPZyvg8JipzVzUQqQ9GCKqzJ0hV0rpuxSDO2wc45tztKyPrLWZhIBhVJM7RuKUrGJEw2KXU5ifSqS1yYJLEfULlN1lPmSm62Qr7HD+sut7tlRYMJr3tZ85YfY0dGMTpewe4HzdgRrWR9ZK1+mWbIq0ZgpWzgno1cIxpFWSVtKjbfB7+tWgSaGNmTAwKUyAL0UuGlLru1g+oXuriHiCzouO1Afutx9T+02Lxh1vaIxy1/0svQcAhHghV4vLaAirJG2ixWuYd9IBkIk5qKau26JYCtCvSkjjPJAJTpg+/K7nVBtqAnXQuQZ/4iNgMqJUTMNwyYQ7CLPi2fCdQ5GVDt4V6la2tbQAUge+vs6dKts6fjtAjWFa/LFfLpPfrM7Q7hSGAdw/4GgLDr8rv7ORaRsmoEk2gli0QB0Ta0RilK2lxhjCl0NinPGCtRZk07+LP9KFjVcdYwts5zlTR6XTFwygVqF9snmCJUTb5I15rnnOuNyVUAUSsZPQLpeR1WIZtkYhJbqUdQklaOuxB9Twj+HjFdUxH1s5qK3ubpt86ezpy5eTuPvdXOO9HK61551GpP6vQ1Cv8o4PU+wtpqCj3skusUz1pZTxjd34wEtFnNWiQP+eaj0TmmN7SFtzbLLOQp9TD096O0G98ByKvIgxacXj19mxRsU8ZWBX/OhCEkAORo0z2vM87bGTuK9glsPf4sC6CqSXJkKRmxQBUvOc3nC0RJ84yxtM41I9QuAeACjfk5AmMOQNBrQ5QqKosXd82yt86ezp+5eTsmGGa3lb3u9qOWybNjfENH4R9LorfSE1mUKu4jGshAtetIBB09iAuDbCZlAJk+A0o99xMkhwtY0ClFILBSqxlFd3ta4t6P2bGKHGNMnRCu0zm3Ar0i9FInqwtqjSNjTK1wWKLlSFyMRkKfiwAWCVyvcM7znPMM2WqOrqk+/qCEnaqKKE06s+rYkW4JACsAbtgBKqlT3Dp7ugogeebm7bR2QFUK2DL7J95DQ+/sY4E+b1cigAZ4XmetUdIkNlKU1UrTIExrIoD6jAozMFnRN29R/wWaudPYeW6GWNqjPm9BjRx54QGYUUHvkpBAmO9iLArQeQ6EhQkhL9gyTs6rrt3fIFtWhe/EaX0Yk60phQONSUgqOTjnabJVSoiwAbpmSWdSiAl2zbtk/52eqN8Vi/d+ShuptCJErW1qVGm1jShgEUBa+f4383DEka+YuO2mfjpRSz2ndUGkgDrNLwCIO4By5Ksq3n5QPwM6mFaTGBIKWKPolHWGxZFDFanovyN2nfXrAlw54p43nhnafoJtjaKTAyhHDi39Ex/f9KKyeNGFvUfsy1aonw6wSrfOno4PuV1/GFfaFQJUwRkORw6DSBMVuo1nX89iJ118QVm8mHZM6Igj1iKVVJTFiykhYjlUzRFH9huphIiVURYvZhzzOeLIXvkf8C/oAqGVi1cAAAAASUVORK5CYII=
-description: Aella Star Light Integration
-configuration:
-- display: Server URL (e.g. https://starlight.companyname.com:8889)
- name: url
- defaultvalue: ""
- type: 0
- required: true
-- display: User name
- name: credentials
- defaultvalue: ""
- type: 9
- required: true
-- display: Fetch incidents
- name: isFetch
- type: 8
- required: false
-- display: Incident type
- name: incidentType
- type: 13
- required: false
-- display: Fetching interval in minutes (default is 15, minimum is 15 )
- name: fetch_interval
- defaultvalue: "15"
- type: 0
- required: false
-- display: The specific security event to look for. Default is all events
- name: event_name
- defaultvalue: ""
- type: 0
- required: false
-- display: Security event severity threshold, between 0-100
- name: severity
- defaultvalue: "50"
- type: 0
- required: false
-- display: Trust any certificate (not secure)
- name: insecure
- type: 8
- required: false
-- display: Use system proxy settings
- name: proxy
- defaultvalue: "false"
- type: 8
- required: false
-script:
- script: >-
- import requests
-
- import json
-
- import time
-
- import datetime
-
- import os
-
-
- import urllib3
-
- urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
-
-
- from requests.packages.urllib3.exceptions import InsecureRequestWarning
-
- requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
-
-
- if not demisto.params()['proxy']:
- del os.environ['HTTP_PROXY']
- del os.environ['HTTPS_PROXY']
- del os.environ['http_proxy']
- del os.environ['https_proxy']
-
- ''' GLOBAL VARS '''
-
- URL = demisto.getParam('url') + '/aellaelastic'
-
- USERNAME = demisto.getParam('credentials')['identifier']
-
- PASSWORD = demisto.getParam('credentials')['password']
-
- FETCH_INTERVAL = demisto.getParam('fetch_interval')
-
- VALIDATE_CERT = not demisto.params().get('insecure', True)
-
-
- ''' HELPER FUNCTIONS '''
-
- def make_rest_call(end_point, username, password, action_result,
- headers={}, params=None,
- data=None, method='get'):
- headers.update({'Accept': 'application/json'})
- headers.update({'Content-Type': 'application/json'})
-
- resp_json = None
- request_func = getattr(requests, method)
- if (not request_func):
- action_result['status']='Unsupported method {}'.format(method)
- return
- try:
- r = request_func(end_point, auth=(username, password),
- data=json.dumps(data) if data else None,
- headers=headers,
- verify=VALIDATE_CERT,params=params)
- except Exception as e:
- action_result['status']='Server REST API exception {}'.format(e)
- return
-
- if r is not None:
- action_result['r_text']=r.text
- action_result['headers']=r.headers
- action_result['r_status_code']=r.status_code
-
- try:
- resp_json = r.json()
- except Exception as e:
- action_result['status']='Json parse error {}'.format(
- r.text.replace('{', ' ').replace('}', ' '))
- return
-
- if (200 <= r.status_code <= 399):
- action_result['status'] = 'Success'
- else:
- action_result['status'] = 'Failed'
-
- action_result['data'] = resp_json
- return
-
- ''' FUNCTIONS '''
-
- def fetch_incidents_command():
- fetch_interval = demisto.getParam('fetch_interval')
- if fetch_interval is None:
- fetch_interval = 15 * 60 # 15 minutes
- else:
- try:
- fetch_interval = int(fetch_interval) * 60
- if fetch_interval < 15 * 60:
- # Min is 15 minutes
- fetch_interval = 15 * 60
- except:
- fetch_interval = 15 * 60
-
- cur_t = time.time()
-
- checkTime = cur_t - fetch_interval
-
- event = demisto.getParam('event_name')
- if event is None:
- event = '*'
-
- score = demisto.getParam('severity')
- if score:
- try:
- score = int(score)
- if score < 0 or score > 100:
- score = 50
- except:
- score = 50
- else:
- # Default score
- score = 50
-
- index_str = 'aella-ser*'
-
- query_str = 'event_name:{} AND severity:>{}'.format(event, score)
-
- ts_str = str(long(checkTime * 1000))
-
- query_json = { 'query':
- { 'bool':
- { 'must':
- [ { 'query_string':
- {
- 'query': query_str,
- 'analyze_wildcard': True
- }
- },
- { 'range':
- { 'timestamp':
- { 'gt': ts_str }
- }
- }
- ]
- }
- }
- }
- end_point = URL + '/{0}/{1}/_search'.format(index_str, 'amsg')
-
- action_result = {}
- make_rest_call(end_point,
- USERNAME, PASSWORD, action_result, data=query_json
- )
-
- if action_result['status'] == 'Success':
- demisto.info('Poll incidents ok')
- data = action_result.get('data')
- if not isinstance(data, dict):
- demisto.error('Data returned in wrong format {}'.format(data))
- demisto.incidents([])
- return
- hits = data.get('hits', {}).get('hits', [])
- incidents = []
-
- try:
- cached_event = demisto.getLastRun().get("cached_event", {})
- except:
- cached_event = {}
-
- new_cached_event = {}
-
- for hit in hits:
- source = hit.get('_source', None)
- if not source:
- continue
- event_name = source.get('event_name', None)
- try:
- event_severity = int(source.get('severity', None))
- if event_severity > 75:
- severity = 3
- elif event_severity > 50:
- severity = 2
- else:
- severity = 1
- except:
- severity = 0
-
- if not event_name:
- continue
- eid = hit['_id']
-
- new_cached_event[eid] = True
- if cached_event.get(eid, False):
- continue
-
- sdi = '{}_{}'.format(event_name, eid)
- incident = {
- 'name' : sdi,
- 'severity' : severity,
- 'rawJSON':json.dumps({ 'name':sdi,
- 'label':'Starlight event',
- 'aella_eid': eid,
- 'aella_event':event_name,
- 'event_severity': event_severity
- })
- }
- incidents.append(incident)
- demisto.info('Incidents is {}'.format(incidents))
- demisto.setLastRun({'cached_event': new_cached_event})
- demisto.incidents(incidents)
-
- else:
- demisto.info('Poll incidents failed {}'.format(action_result))
- demisto.incidents([])
-
- def aella_get_event_command():
- demisto.info('Aella started get-event with {}'.format(demisto.args()['event_id']))
- event_id = demisto.args()['event_id']
- query_json = { 'query': { 'match': { '_id': event_id } } }
- end_point = URL + '/{0}/{1}/_search'.format('aella-ser*', 'amsg')
-
- action_result = {}
- make_rest_call(end_point,
- USERNAME, PASSWORD, action_result, data=query_json
- )
- if action_result['status'] == 'Success':
- demisto.info('Run Query is successful')
- response = action_result.get('data')
- timed_out = response.get('timed_out', False)
- hits = response.get('hits', {}).get('hits', [])
- source = {}
- dbot_scores = []
- if len(hits) == 0:
- demisto.info('Get event got empty result')
- for item in hits:
- index = item.get('_index', '')
- source = item.get('_source', {})
- if index:
- source['_index'] = index
- source['timed_out'] = timed_out
- demisto.debug('This is my run_query result aellaEvent {}'.format(source))
-
- # Check url reputation
- url_str = source.get('url', '')
- if url_str:
- url_reputation = source.get('url_reputation', '')
- if url_reputation and url_reputation != 'Good':
- dbot_score = {
- 'Vendor' : 'Aella Data',
- 'Indicator' : url_str,
- 'Type' : 'url',
- 'Score' : 3,
- 'Malicious' : {
- 'Vendor' : 'Aella Data',
- 'Detections' : 'URL reputation {0}'.format(url_reputation),
- 'URL' : url_str
- }
- }
- else:
- dbot_score = {
- 'Vendor' : 'Aella Data',
- 'Indicator' : url_str,
- 'Type' : 'url',
- 'Malicious' : None
- }
- if url_reputation is None:
- # Unknonw
- dbot_score['Score'] = 0
- else:
- # Good
- dbot_score['Score'] = 1
- dbot_scores.append(dbot_score)
-
- # Check src ip reputation
- srcip_str = source.get('srcip', '')
- if srcip_str:
- srcip_reputation = source.get('srcip_reputation', '')
- if srcip_reputation and srcip_reputation != 'Good':
- dbot_score = {
- 'Vendor' : 'Aella Data',
- 'Indicator' : srcip_str,
- 'Type' : 'ip',
- 'Score' : 3,
- 'Malicious' : {
- 'Vendor' : 'Aella Data',
- 'Detections' : 'Source IP reputation {0}'.format(srcip_reputation),
- 'IP' : srcip_str
- }
- }
- else:
- dbot_score = {
- 'Vendor' : 'Aella Data',
- 'Indicator' : srcip_str,
- 'Type' : 'ip',
- 'Malicious' : None
- }
- if srcip_reputation is None:
- # Unknonw
- dbot_score['Score'] = 0
- else:
- # Good
- dbot_score['Score'] = 1
- dbot_scores.append(dbot_score)
-
- # Check dst ip reputation
- dstip_str = source.get('dstip', '')
- if dstip_str:
- dstip_reputation = source.get('dstip_reputation', '')
- if dstip_reputation and dstip_reputation != 'Good':
- dbot_score = {
- 'Vendor' : 'Aella Data',
- 'Indicator' : dstip_str,
- 'Type' : 'ip',
- 'Score' : 3,
- 'Malicious' : {
- 'Vendor' : 'Aella Data',
- 'Detections' : 'Destination IP reputation {0}'.format(dstip_reputation),
- 'IP' : dstip_str
- }
- }
- else:
- dbot_score = {
- 'Vendor' : 'Aella Data',
- 'Indicator' : dstip_str,
- 'Type' : 'ip',
- 'Malicious' : None
- }
- if dstip_reputation is None:
- # Unknonw
- dbot_score['Score'] = 0
- else:
- # Good
- dbot_score['Score'] = 1
- dbot_scores.append(dbot_score)
-
- break
- demisto.results({
- 'Type': entryTypes['note'],
- 'ContentsFormat': formats['json'],
- 'Contents': source,
- 'HumanReadable': tableToMarkdown('Aella Star Light Event <{0}>'.format(event_id), source),
- 'EntryContext': {
- 'Aella.Event(val._id==obj._id)': source,
- 'DBotScore' : createContext(dbot_scores, removeNull=True),
- }
- })
- else:
- demisto.info('Get event failed {}'.format(action_result))
- demisto.results(return_error('Failed to get event'))
-
- ''' EXECUTION CODE '''
-
- demisto.info('Command is {}'.format(demisto.command()))
-
-
- if demisto.command() == 'test-module':
- # This is the call made when pressing the integration test button.
- action_result = {}
- make_rest_call(URL + '/_cluster/health',
- USERNAME, PASSWORD, action_result
- )
-
- if action_result['status'] == 'Success':
- demisto.results('ok')
- else:
- demisto.results('failed')
-
- if demisto.command() == 'fetch-incidents':
- fetch_incidents_command()
-
- if demisto.command() == 'aella-get-event':
- aella_get_event_command()
-
- type: python
- subtype: python2
- commands:
- - name: aella-get-event
- arguments:
- - name: event_id
- required: true
- description: event id from the Star Light incident
- outputs:
- - contextPath: Aella.Event.event_name
- description: The event name
- type: string
- - contextPath: Aella.Event.severity
- description: The severity score
- type: string
- - contextPath: Aella.Event.dstip
- description: The Destination IP
- type: string
- - contextPath: Aella.Event.srcip
- description: The source IP
- type: string
- - contextPath: Aella.Event.tenantid
- description: The tenant ID
- type: string
- - contextPath: Aella.Event.srcip_reputation
- description: The source IP reputation
- type: string
- - contextPath: Aella.Event.dstip_reputation
- description: The destination IP reputation
- type: string
- - contextPath: Aella.Event.dstip_geo
- description: The destination IP geo location
- - contextPath: Aella.Event.srcip_geo
- description: The source IP geo location
- description: Query the details for a specific Start Light event
- isfetch: true
- runonce: false
- dockerimage: demisto/python:2.7.18.27799
-tests:
-- No test
-fromversion: 5.0.0
diff --git a/Packs/Aella_StarLight/ReleaseNotes/1_0_7.md b/Packs/Aella_StarLight/ReleaseNotes/1_0_7.md
new file mode 100644
index 000000000000..faf5033da2ac
--- /dev/null
+++ b/Packs/Aella_StarLight/ReleaseNotes/1_0_7.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### Aella Star Light
+- Updated the Docker image to: *demisto/python3:3.10.10.47713*.
diff --git a/Packs/Aella_StarLight/ReleaseNotes/1_0_8.md b/Packs/Aella_StarLight/ReleaseNotes/1_0_8.md
new file mode 100644
index 000000000000..3c22381de457
--- /dev/null
+++ b/Packs/Aella_StarLight/ReleaseNotes/1_0_8.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### Aella Star Light
+- Fixed an issue where the integration wasn't supported by lower version machines.
diff --git a/Packs/Aella_StarLight/ReleaseNotes/1_0_9.md b/Packs/Aella_StarLight/ReleaseNotes/1_0_9.md
new file mode 100644
index 000000000000..9782adac5a68
--- /dev/null
+++ b/Packs/Aella_StarLight/ReleaseNotes/1_0_9.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Aella Star Light
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/Aella_StarLight/pack_metadata.json b/Packs/Aella_StarLight/pack_metadata.json
index ff2d34c620ed..25b88ab04ae8 100644
--- a/Packs/Aella_StarLight/pack_metadata.json
+++ b/Packs/Aella_StarLight/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Aella Star Light",
"description": "Aella Star Light Integration",
"support": "community",
- "currentVersion": "1.0.6",
+ "currentVersion": "1.0.9",
"author": "Aella Star Light",
"url": "",
"email": "",
diff --git a/Packs/AgariPhishingDefense/Integrations/AgariPhishingDefense/AgariPhishingDefense.yml b/Packs/AgariPhishingDefense/Integrations/AgariPhishingDefense/AgariPhishingDefense.yml
index bb49fd376a6d..e9e531081e7b 100644
--- a/Packs/AgariPhishingDefense/Integrations/AgariPhishingDefense/AgariPhishingDefense.yml
+++ b/Packs/AgariPhishingDefense/Integrations/AgariPhishingDefense/AgariPhishingDefense.yml
@@ -443,7 +443,7 @@ script:
description: Remediate suspected message.
execution: false
name: apd-remediate-message
- dockerimage: demisto/python3:3.10.8.37233
+ dockerimage: demisto/python3:3.10.10.48392
feed: false
isfetch: true
longRunning: false
diff --git a/Packs/AgariPhishingDefense/Layouts/layoutscontainer-Agari_Phishing_Defense_Policy_Event.json b/Packs/AgariPhishingDefense/Layouts/layoutscontainer-Agari_Phishing_Defense_Policy_Event.json
index 519eca1bc297..b0a3def03334 100644
--- a/Packs/AgariPhishingDefense/Layouts/layoutscontainer-Agari_Phishing_Defense_Policy_Event.json
+++ b/Packs/AgariPhishingDefense/Layouts/layoutscontainer-Agari_Phishing_Defense_Policy_Event.json
@@ -715,5 +715,6 @@
"name": "Agari Phishing Defense Policy Event",
"system": false,
"version": -1,
- "fromVersion": "6.0.0"
+ "fromVersion": "6.0.0",
+ "marketplaces": ["xsoar"]
}
\ No newline at end of file
diff --git a/Packs/AgariPhishingDefense/ReleaseNotes/1_1_3.md b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_3.md
new file mode 100644
index 000000000000..2ca3932a40dd
--- /dev/null
+++ b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_3.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Agari Phishing Defense
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AgariPhishingDefense/ReleaseNotes/1_1_4.md b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_4.md
new file mode 100644
index 000000000000..1660b127e0e2
--- /dev/null
+++ b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_4.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Agari Phishing Defense
+- Updated the Docker image to: *demisto/python3:3.10.9.45313*.
diff --git a/Packs/AgariPhishingDefense/ReleaseNotes/1_1_5.md b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_5.md
new file mode 100644
index 000000000000..b482ee059d84
--- /dev/null
+++ b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_5.md
@@ -0,0 +1,4 @@
+
+#### Layouts
+##### Agari Phishing Defense Policy Event
+- This item is no longer supported in XSIAM.
diff --git a/Packs/AgariPhishingDefense/ReleaseNotes/1_1_6.md b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_6.md
new file mode 100644
index 000000000000..abaf6e63495a
--- /dev/null
+++ b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_6.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Agari Phishing Defense
+- Updated the Docker image to: *demisto/python3:3.10.10.47713*.
diff --git a/Packs/AgariPhishingDefense/ReleaseNotes/1_1_7.md b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_7.md
new file mode 100644
index 000000000000..1f3ad7495b99
--- /dev/null
+++ b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_7.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Agari Phishing Defense
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AgariPhishingDefense/pack_metadata.json b/Packs/AgariPhishingDefense/pack_metadata.json
index bd249581c039..570cf1ef09ea 100644
--- a/Packs/AgariPhishingDefense/pack_metadata.json
+++ b/Packs/AgariPhishingDefense/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Agari Phishing Defense",
"description": "Use the Agari Phishing Defense integration to retrieve Policy Events as Incidents, retrieve messages and remediate suspected messages.",
"support": "partner",
- "currentVersion": "1.1.2",
+ "currentVersion": "1.1.7",
"author": "Agari",
"url": "https://www.agari.com/support/",
"email": "support@agari.com",
diff --git a/Packs/Akamai_SIEM/.pack-ignore b/Packs/Akamai_SIEM/.pack-ignore
index bbcc8ca5e05d..7fbf2c4801fa 100644
--- a/Packs/Akamai_SIEM/.pack-ignore
+++ b/Packs/Akamai_SIEM/.pack-ignore
@@ -1,2 +1,6 @@
[file:Akamai_SIEM.yml]
ignore=IN126,BA108,BA109,IN145
+
+[known_words]
+Akamai
+WAF
diff --git a/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM.py b/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM.py
index 009b4efbec38..54f7f05b20c7 100644
--- a/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM.py
+++ b/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM.py
@@ -36,8 +36,8 @@
class Client(BaseClient):
- def get_events(self, config_ids: str, offset: Optional[str] = None, limit: Optional[Union[str, int]] = None,
- from_epoch: Optional[str] = None, to_epoch: Optional[str] = None) \
+ def get_events(self, config_ids: str, offset: Optional[str] = '', limit: Optional[Union[str, int]] = None,
+ from_epoch: Optional[str] = '', to_epoch: Optional[str] = '') \
-> Tuple[List[Any], Any]:
"""
Get security events from Akamai WAF service by - https://developer.akamai.com/api/cloud_security/siem/v1.html,
@@ -81,8 +81,10 @@ def get_events(self, config_ids: str, offset: Optional[str] = None, limit: Optio
events: List = []
if '{ "total": 0' not in raw_response:
events = [json.loads(event) for event in raw_response.split('\n')[:-2]]
- offset_new = json.loads(raw_response.split('\n')[-2]).get('offset')
- return events, offset_new
+ new_offset = str(max([int(event.get('httpMessage', {}).get('start')) for event in events]))
+ else:
+ new_offset = str(from_epoch)
+ return events, new_offset
'''HELPER FUNCIONS'''
@@ -269,15 +271,8 @@ def fetch_incidents_command(
"""
raw_response: Optional[List] = []
if not last_run:
- datetime_new_last_run, _ = parse_date_range(date_range=fetch_time,
- date_format='%s')
- raw_response, offset = client.get_events(config_ids=config_ids,
- from_epoch=datetime_new_last_run,
- limit=fetch_limit)
- else:
- raw_response, offset = client.get_events(config_ids=config_ids,
- offset=last_run,
- limit=fetch_limit)
+ last_run, _ = parse_date_range(date_range=fetch_time, date_format='%s')
+ raw_response, offset = client.get_events(config_ids=config_ids, from_epoch=last_run, limit=fetch_limit)
incidents = []
if raw_response:
diff --git a/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM.yml b/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM.yml
index 1099053d34fc..c1a2dd8aaa12 100644
--- a/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM.yml
+++ b/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM.yml
@@ -171,7 +171,7 @@ script:
- contextPath: IP.Geo.Country
description: The country in which the IP address is located.
type: String
- dockerimage: demisto/akamai:1.0.0.40905
+ dockerimage: demisto/akamai:1.0.0.45817
isfetch: true
longRunning: false
longRunningPort: false
diff --git a/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM_test.py b/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM_test.py
index da395300d5aa..6682104e38de 100644
--- a/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM_test.py
+++ b/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/Akamai_SIEM_test.py
@@ -51,7 +51,7 @@ def test_fetch_incidents_command_1(self, client, datadir, requests_mock):
config_ids='50170',
last_run={})
expected_incidents = load_params_from_json(datadir['expected_fetch.json'], type='incidents')
- expected_last_run = {'lastRun': "318d8"}
+ expected_last_run = {'lastRun': "1576002507"}
assert expected_incidents == tested_incidents, "Incidents - No last time exsits and event available"
assert tested_last_run == expected_last_run, "Last run - No last time exsits and event available"
@@ -59,14 +59,14 @@ def test_fetch_incidents_command_1(self, client, datadir, requests_mock):
def test_fetch_incidents_command_2(self, client, datadir, requests_mock):
"""Test - Last time exsits and events available"""
from Akamai_SIEM import fetch_incidents_command
- requests_mock.get(f'{BASE_URL}/50170?offset=318d8&limit=5', text=SEC_EVENTS_TXT)
+ requests_mock.get(f'{BASE_URL}/50170?from=1575966002&limit=5', text=SEC_EVENTS_TXT)
tested_incidents, tested_last_run = fetch_incidents_command(client=client,
fetch_time='12 hours',
fetch_limit='5',
config_ids='50170',
- last_run='318d8')
+ last_run='1575966002')
expected_incidents = load_params_from_json(datadir['expected_fetch.json'], type='incidents')
- expected_last_run = {'lastRun': "318d8"}
+ expected_last_run = {'lastRun': "1576002507"}
assert expected_incidents == tested_incidents, "Incidents - Last time exsits and events available"
assert tested_last_run == expected_last_run, "Last run - No last time exsits and event available"
@@ -74,13 +74,13 @@ def test_fetch_incidents_command_2(self, client, datadir, requests_mock):
def test_fetch_incidents_command_3(self, client, datadir, requests_mock):
"""Test - Last time exsits and no available data"""
from Akamai_SIEM import fetch_incidents_command
- requests_mock.get(f'{BASE_URL}/50170?offset=318d8&limit=5', text=SEC_EVENTS_EMPTY_TXT)
+ requests_mock.get(f'{BASE_URL}/50170?from=1575966002&limit=5', text=SEC_EVENTS_EMPTY_TXT)
tested_incidents, tested_last_run = fetch_incidents_command(client=client,
fetch_time='12 hours',
fetch_limit=5,
config_ids='50170',
- last_run='318d8')
- expected_last_run = {'lastRun': "318d8"}
+ last_run='1575966002')
+ expected_last_run = {'lastRun': "1575966002"}
expected_incidents = []
assert expected_incidents == tested_incidents, "Incidents - Last time exsits and no available data"
assert tested_last_run == expected_last_run, "Last run - No last time exsits and event available"
@@ -95,7 +95,7 @@ def test_fetch_incidents_command_4(self, client, datadir, requests_mock):
fetch_limit=5,
config_ids='50170',
last_run={})
- expected_last_run = {'lastRun': "318d8"}
+ expected_last_run = {'lastRun': "1575966002"}
expected_incidents = []
assert expected_incidents == tested_incidents, "Incidents - No last time exsits and no available data"
assert tested_last_run == expected_last_run, "Last run - No last time exsits and no available data"
diff --git a/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/pytest.ini b/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/pytest.ini
deleted file mode 100644
index 460f63e5f209..000000000000
--- a/Packs/Akamai_SIEM/Integrations/Akamai_SIEM/pytest.ini
+++ /dev/null
@@ -1,6 +0,0 @@
-[pytest]
-markers =
- helper: run all helper tests
- commands: run all commands tests
- fetch: run all fetch tests
- get_events: run all get events tests
diff --git a/Packs/Akamai_SIEM/ReleaseNotes/1_0_10.md b/Packs/Akamai_SIEM/ReleaseNotes/1_0_10.md
new file mode 100644
index 000000000000..4242d1dafa77
--- /dev/null
+++ b/Packs/Akamai_SIEM/ReleaseNotes/1_0_10.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Akamai WAF SIEM
+- Updated the Docker image to: *demisto/akamai:1.0.0.45817*.
diff --git a/Packs/Akamai_SIEM/ReleaseNotes/1_0_8.md b/Packs/Akamai_SIEM/ReleaseNotes/1_0_8.md
new file mode 100644
index 000000000000..4f30efa43595
--- /dev/null
+++ b/Packs/Akamai_SIEM/ReleaseNotes/1_0_8.md
@@ -0,0 +1,5 @@
+
+#### Integrations
+##### Akamai WAF SIEM
+- Fixed an issue with the ***fetch incidents*** command when the *offset* parameter was expired.
+
diff --git a/Packs/Akamai_SIEM/ReleaseNotes/1_0_9.md b/Packs/Akamai_SIEM/ReleaseNotes/1_0_9.md
new file mode 100644
index 000000000000..7b85bff5da08
--- /dev/null
+++ b/Packs/Akamai_SIEM/ReleaseNotes/1_0_9.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Akamai WAF SIEM
+- Updated the Docker image to: *demisto/akamai:1.0.0.43032*.
diff --git a/Packs/Akamai_SIEM/pack_metadata.json b/Packs/Akamai_SIEM/pack_metadata.json
index dbe9217247ec..231b6fb6a8a6 100644
--- a/Packs/Akamai_SIEM/pack_metadata.json
+++ b/Packs/Akamai_SIEM/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Akamai WAF SIEM",
"description": "Use the Akamai WAF SIEM integration to retrieve security events from Akamai Web Application Firewall (WAF) service.",
"support": "xsoar",
- "currentVersion": "1.0.7",
+ "currentVersion": "1.0.10",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/Akamai_WAF/Integrations/Akamai_WAF/Akamai_WAF.py b/Packs/Akamai_WAF/Integrations/Akamai_WAF/Akamai_WAF.py
index 3f7114be6ade..de7bc9cdf832 100644
--- a/Packs/Akamai_WAF/Integrations/Akamai_WAF/Akamai_WAF.py
+++ b/Packs/Akamai_WAF/Integrations/Akamai_WAF/Akamai_WAF.py
@@ -163,6 +163,7 @@ def list_enrollments(self,
return self._http_request(method='GET',
url_suffix='/cps/v2/enrollments',
headers=headers,
+ timeout=(60, 180),
params=params
)
@@ -250,6 +251,29 @@ def acknowledge_warning(self, change_path: str) -> dict:
data=payload
)
+ def acknowledge_pre_verification_warning(self, change_path: str) -> dict:
+ """
+ Acknowledge the pre verification warning message after initiate an enrollment change
+
+ Args:
+ change_path: The path that includes enrollmentId and changeId: e.g. /cps/v2/enrollments/enrollmentId/changes/changeId
+
+ Returns:
+ Json response as dictionary
+ """
+ headers = {
+ "Content-Type": "application/vnd.akamai.cps.acknowledgement.v1+json",
+ "Accept": "application/vnd.akamai.cps.change-id.v1+json"
+ }
+
+ payload = '{"acknowledgement": "acknowledge"}'
+ return self._http_request(
+ method='POST',
+ url_suffix=f"{change_path}/input/update/pre-verification-warnings-ack",
+ headers=headers,
+ data=payload
+ )
+
# Created by C.L. Oct-06-22
def get_production_deployment(self,
@@ -1443,19 +1467,21 @@ def list_security_policy(self,
# created by D.S.
def clone_appsec_config_version(self,
config_id: str,
- create_from_version: str) -> Dict:
+ create_from_version: str,
+ rule_update: bool = True) -> Dict:
"""
Create a new version of security configuration from a previous version
Args:
- config_id
- versionId
+ config_id: AppSec configuration ID
+ create_from_version: AppSec configuration version number to create from
+ rule_update: Specifies whether the application rules should be migrated to the latest version.
Returns:
"""
body = {
"createFromVersion": int(create_from_version),
- "ruleUpdate": True
+ "ruleUpdate": rule_update
}
return self._http_request(method='Post',
url_suffix=f'appsec/v1/configs/{config_id}/versions',
@@ -2714,7 +2740,18 @@ def acknowledge_warning_command(client: Client, change_path: str) -> Tuple[objec
else:
return f'{INTEGRATION_NAME} - Could not find any results for given query', {}, {}
-# Created by C.L. Oct-06-22
+
+@logger
+def acknowledge_pre_verification_warning_command(client: Client, change_path: str) -> Tuple[object, dict, Union[List, Dict]]:
+
+ raw_response: Dict = client.acknowledge_pre_verification_warning(change_path)
+
+ if raw_response:
+ human_readable = f'{INTEGRATION_NAME} - Acknowledge_warning'
+
+ return human_readable, {"Akamai.Acknowledge": raw_response}, raw_response
+ else:
+ return f'{INTEGRATION_NAME} - Could not find any results for given query', {}, {}
def get_production_deployment_command(client: Client, enrollment_id: str) -> Tuple[object, dict, Union[List, Dict]]:
@@ -4150,12 +4187,14 @@ def clone_appsec_config_version_command(client: Client,
config_id: str,
create_from_version: str,
do_not_clone: str,
+ rule_update: bool = True,
) -> Tuple[str, Dict[str, Any], Union[List, Dict]]:
"""
Appsec Configurtion - create a new version by clone the latest version
Args:
- config_id
- create_from_version
+ config_id: AppSec configuration ID
+ create_from_version: AppSec configuration version number to create from
+ rule_update: Specifies whether the application rules should be migrated to the latest version.
do_not_clone: Do not clone to create a new version, use in the test
Returns:
@@ -4169,6 +4208,7 @@ def clone_appsec_config_version_command(client: Client,
else:
raw_response = client.clone_appsec_config_version(config_id=config_id,
create_from_version=create_from_version,
+ rule_update=rule_update,
)
title = f'{INTEGRATION_NAME} - Appsec Configurtion - create a new version by clone the latest version'
@@ -4738,7 +4778,9 @@ def main():
f'{INTEGRATION_COMMAND_NAME}-update-appsec-config-version-notes': update_appsec_config_version_notes_command,
f'{INTEGRATION_COMMAND_NAME}-new-or-renew-match-target': new_or_renew_match_target_command,
f'{INTEGRATION_COMMAND_NAME}-patch-papi-property-rule-generic': patch_papi_property_rule_command,
- f'{INTEGRATION_COMMAND_NAME}-get-papi-property-rule': get_papi_property_rule_command
+ f'{INTEGRATION_COMMAND_NAME}-get-papi-property-rule': get_papi_property_rule_command,
+ f'{INTEGRATION_COMMAND_NAME}-acknowledge-pre-verification-warning': acknowledge_pre_verification_warning_command,
+
}
try:
readable_output, outputs, raw_response = commands[command](client=client, **demisto.args())
diff --git a/Packs/Akamai_WAF/Integrations/Akamai_WAF/Akamai_WAF.yml b/Packs/Akamai_WAF/Integrations/Akamai_WAF/Akamai_WAF.yml
index b57a009755cd..a369d3621b29 100644
--- a/Packs/Akamai_WAF/Integrations/Akamai_WAF/Akamai_WAF.yml
+++ b/Packs/Akamai_WAF/Integrations/Akamai_WAF/Akamai_WAF.yml
@@ -993,6 +993,14 @@ script:
- description: AppSec configuration version.
name: create_from_version
required: true
+ - description: Specifies whether the application rules should be migrated to the latest version.
+ name: rule_update
+ required: false
+ auto: PREDEFINED
+ defaultValue: 'True'
+ predefined:
+ - 'True'
+ - 'False'
- description: Do not clone to create a new version. Use in the test.
name: do_not_clone
required: true
@@ -1201,6 +1209,7 @@ script:
predefined:
- 'yes'
- 'no'
+ auto: PREDEFINED
description: Generic JSON patch command for Papi Property Default Rule
name: akamai-patch-papi-property-rule-generic
- arguments:
@@ -1224,7 +1233,13 @@ script:
outputs:
- contextPath: Akamai.PapiProperty.DefaultRule
description: Papi Property default rule
- dockerimage: demisto/akamai:1.0.0.39599
+ - arguments:
+ - description: The path that includes enrollmentId and changeId.
+ name: change_path
+ required: true
+ description: acknowledge pre verification warning
+ name: akamai-acknowledge-pre-verification-warning
+ dockerimage: demisto/akamai:1.0.0.48112
runonce: false
script: ''
subtype: python3
diff --git a/Packs/Akamai_WAF/Integrations/Akamai_WAF/README.md b/Packs/Akamai_WAF/Integrations/Akamai_WAF/README.md
index e770326144b9..00868cabc059 100644
--- a/Packs/Akamai_WAF/Integrations/Akamai_WAF/README.md
+++ b/Packs/Akamai_WAF/Integrations/Akamai_WAF/README.md
@@ -1517,3 +1517,22 @@ get papi property rule json and dump into string
| **Path** | **Type** | **Description** |
| --- | --- | --- |
| Akamai.PapiProperty.DefaultRule | unknown | Papi Property default rule |
+
+### akamai-patch-papi-property-rule-generic
+***
+Generic JSON patch command for Papi Property Default Rule
+
+
+#### Base Command
+
+`akamai-acknowledge-pre-verification-warning`
+#### Input
+
+| **Argument Name** | **Description** | **Required** |
+| --- | --- | --- |
+| change_path | The path that includes enrollmentId and changeId. | Required |
+
+
+#### Context Output
+
+There is no context output for this command
\ No newline at end of file
diff --git a/Packs/Akamai_WAF/ReleaseNotes/2_0_3.md b/Packs/Akamai_WAF/ReleaseNotes/2_0_3.md
new file mode 100644
index 000000000000..4f54fbf2e9a2
--- /dev/null
+++ b/Packs/Akamai_WAF/ReleaseNotes/2_0_3.md
@@ -0,0 +1,19 @@
+
+#### Integrations
+##### Akamai WAF
+- Adding one new command and modifying two commands.
+- Updated the Docker image to: *demisto/akamai:1.0.0.48112*.
+
+Command added:
+acknowledge_pre_verification_warning
+This command is to acknowledge the pre-verification warning message after initiate an enrollment change. When there is a conflict between the existing certificate and the new certificate, there will be a warning message popped out, which needs to be acknowledged either manually or automatcally by API Call. This command is the one to acknowledge the warning. After it is acknowledged, the status of the enrollment will move to the next phase.
+
+
+Command modified:
+list_enrollments:
+Changing the read timeout value from 60 to 180 to avoid the potential timeout issue.
+
+clone_appsec_config_version:
+Adding a new parameter "rule_update" .
+This is to allow "rule_update" can be set to "False" thus the command execution will take less than 60 seconds by eliminating rule schema update.
+And for any Akamai deployment with "ASE" enable for Security configuration, rule schema update is conducted automatically anyway.
diff --git a/Packs/Akamai_WAF/pack_metadata.json b/Packs/Akamai_WAF/pack_metadata.json
index e8487d7e4141..732683c7dffa 100644
--- a/Packs/Akamai_WAF/pack_metadata.json
+++ b/Packs/Akamai_WAF/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Akamai WAF",
"description": "Use the Akamai WAF integration to manage common sets of lists used by various Akamai security products and features.",
"support": "xsoar",
- "currentVersion": "2.0.2",
+ "currentVersion": "2.0.3",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/Alexa/Integrations/Alexa/Alexa.yml b/Packs/Alexa/Integrations/Alexa/Alexa.yml
index fbf5c4f45bda..cebead4f1bb2 100644
--- a/Packs/Alexa/Integrations/Alexa/Alexa.yml
+++ b/Packs/Alexa/Integrations/Alexa/Alexa.yml
@@ -36,8 +36,9 @@ configuration:
name: insecure
required: false
type: 8
-description: Alexa provides website ranking information that can be useful in determining if the domain in question has a strong web presence.
-display: Alexa Rank Indicator
+description: Deprecated. Vendor has declared end of life for this product. No available replacement.
+deprecated: true
+display: Alexa Rank Indicator (Deprecated)
name: Alexa Rank Indicator
script:
commands:
diff --git a/Packs/Alexa/Integrations/AlexaV2/AlexaV2.yml b/Packs/Alexa/Integrations/AlexaV2/AlexaV2.yml
index 44613ea7ed28..51f411a89b17 100644
--- a/Packs/Alexa/Integrations/AlexaV2/AlexaV2.yml
+++ b/Packs/Alexa/Integrations/AlexaV2/AlexaV2.yml
@@ -46,9 +46,9 @@ configuration:
- F - Reliability cannot be judged
required: true
type: 15
-description: Alexa provides website ranking information that can be useful when determining
- if a domain has a strong web presence.
-display: Alexa Rank Indicator v2
+description: Deprecated. Vendor has declared end of life for this product. No available replacement.
+deprecated: true
+display: Alexa Rank Indicator v2 (Deprecated)
name: Alexa Rank Indicator v2
script:
commands:
@@ -88,7 +88,7 @@ script:
- contextPath: Alexa.Domain.Rank
description: Alexa rank as determined by Amazon.
type: String
- dockerimage: demisto/python3:3.10.8.39276
+ dockerimage: demisto/python3:3.10.9.42476
feed: false
isfetch: false
longRunning: false
diff --git a/Packs/Alexa/ReleaseNotes/2_0_21.md b/Packs/Alexa/ReleaseNotes/2_0_21.md
new file mode 100644
index 000000000000..d0ebfaf9b5c2
--- /dev/null
+++ b/Packs/Alexa/ReleaseNotes/2_0_21.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Alexa Rank Indicator v2
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/Alexa/ReleaseNotes/2_0_22.md b/Packs/Alexa/ReleaseNotes/2_0_22.md
new file mode 100644
index 000000000000..c44f25a75449
--- /dev/null
+++ b/Packs/Alexa/ReleaseNotes/2_0_22.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Alexa Rank Indicator v2
+- Updated the Docker image to: *demisto/python3:3.10.9.42476*.
diff --git a/Packs/Alexa/ReleaseNotes/2_0_23.md b/Packs/Alexa/ReleaseNotes/2_0_23.md
new file mode 100644
index 000000000000..02ce94bbc5a9
--- /dev/null
+++ b/Packs/Alexa/ReleaseNotes/2_0_23.md
@@ -0,0 +1,6 @@
+#### Integrations
+##### Alexa Rank Indicator (Deprecated)
+- Deprecated. Vendor has declared end of life for this product. No available replacement.
+
+##### Alexa Rank Indicator v2 (Deprecated)
+- Deprecated. Vendor has declared end of life for this product. No available replacement.
diff --git a/Packs/Alexa/pack_metadata.json b/Packs/Alexa/pack_metadata.json
index 80c27137795f..a9adc1ccfbc0 100644
--- a/Packs/Alexa/pack_metadata.json
+++ b/Packs/Alexa/pack_metadata.json
@@ -1,8 +1,9 @@
{
- "name": "Alexa Rank Indicator",
- "description": "Alexa provides website ranking information that can be useful in determining if the domain in question has a strong web presence.",
+ "name": "Alexa Rank Indicator (Deprecated)",
+ "description": "Deprecated. Vendor has declared end of life for this product. No available replacement.",
"support": "xsoar",
- "currentVersion": "2.0.20",
+ "hidden": true,
+ "currentVersion": "2.0.23",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
@@ -17,4 +18,4 @@
"xsoar",
"marketplacev2"
]
-}
\ No newline at end of file
+}
diff --git a/Packs/Algosec/ReleaseNotes/1_0_9.md b/Packs/Algosec/ReleaseNotes/1_0_9.md
new file mode 100644
index 000000000000..5763dee034a9
--- /dev/null
+++ b/Packs/Algosec/ReleaseNotes/1_0_9.md
@@ -0,0 +1,8 @@
+
+#### Scripts
+##### AlgosecCreateTicket
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
+##### AlgosecGetApplications
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
+##### AlgosecGetNetworkObject
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
\ No newline at end of file
diff --git a/Packs/Algosec/Scripts/AlgosecCreateTicket/AlgosecCreateTicket.yml b/Packs/Algosec/Scripts/AlgosecCreateTicket/AlgosecCreateTicket.yml
index 1f979aecadc7..47ef81fa8292 100644
--- a/Packs/Algosec/Scripts/AlgosecCreateTicket/AlgosecCreateTicket.yml
+++ b/Packs/Algosec/Scripts/AlgosecCreateTicket/AlgosecCreateTicket.yml
@@ -48,6 +48,6 @@ dependson:
must:
- algosec-create-ticket
fromversion: 5.0.0
-dockerimage: demisto/python3:3.10.6.33415
+dockerimage: demisto/python3:3.10.10.48392
tests:
- No tests (auto formatted)
diff --git a/Packs/Algosec/Scripts/AlgosecGetApplications/AlgosecGetApplications.yml b/Packs/Algosec/Scripts/AlgosecGetApplications/AlgosecGetApplications.yml
index 79430fbbde2b..6d24bdc38bab 100644
--- a/Packs/Algosec/Scripts/AlgosecGetApplications/AlgosecGetApplications.yml
+++ b/Packs/Algosec/Scripts/AlgosecGetApplications/AlgosecGetApplications.yml
@@ -28,6 +28,6 @@ dependson:
must:
- algosec-get-applications
fromversion: 5.0.0
-dockerimage: demisto/python3:3.10.6.33415
+dockerimage: demisto/python3:3.10.10.48392
tests:
- No tests (auto formatted)
diff --git a/Packs/Algosec/Scripts/AlgosecGetNetworkObject/AlgosecGetNetworkObject.yml b/Packs/Algosec/Scripts/AlgosecGetNetworkObject/AlgosecGetNetworkObject.yml
index 4e24e6d56dbf..0831de39df47 100644
--- a/Packs/Algosec/Scripts/AlgosecGetNetworkObject/AlgosecGetNetworkObject.yml
+++ b/Packs/Algosec/Scripts/AlgosecGetNetworkObject/AlgosecGetNetworkObject.yml
@@ -28,6 +28,6 @@ dependson:
must:
- algosec-get-network-object
fromversion: 5.0.0
-dockerimage: demisto/python3:3.10.6.33415
+dockerimage: demisto/python3:3.10.10.48392
tests:
- No tests (auto formatted)
diff --git a/Packs/Algosec/pack_metadata.json b/Packs/Algosec/pack_metadata.json
index 96cd4a0968ea..9d1ab928f82a 100644
--- a/Packs/Algosec/pack_metadata.json
+++ b/Packs/Algosec/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AlgoSec",
"description": "Algosec BusinessFlow(ABF), Firewall Analyzer (AFA) and FireFlow(AFF).",
"support": "xsoar",
- "currentVersion": "1.0.8",
+ "currentVersion": "1.0.9",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AlibabaActionTrail/CorrelationRules/AlibabaActionTrail_Correlation.yml b/Packs/AlibabaActionTrail/CorrelationRules/AlibabaActionTrail_Correlation.yml
new file mode 100644
index 000000000000..95dd87f4ec73
--- /dev/null
+++ b/Packs/AlibabaActionTrail/CorrelationRules/AlibabaActionTrail_Correlation.yml
@@ -0,0 +1,32 @@
+alert_category: EXECUTION
+alert_description: This alert will trigger in an event where multiple attempts of unauthorized actions were detected
+alert_fields:
+ actor_image:
+ actor_path: xdm.source.user.user_type
+ cmd:
+ domain:
+ hash:
+ hostname:
+ local_ip:
+ remote_ip: xdm.source.ipv4
+ remote_port:
+ username: xdm.source.user.username
+alert_name: Alibaba ActionTrail - multiple unauthorized action attempts detected by a user
+crontab: '*/10 * * * *'
+dataset: alerts
+description: This alert will trigger in an event where multiple attempts of unauthorized actions were detected in the Alibaba ActionTrail account
+drilldown_query_timeframe: ALERT
+execution_mode: SCHEDULED
+global_rule_id: bb268634-30a7-4989-90b6-cb833b5591cf
+investigation_query_link:
+mapping_strategy: CUSTOM
+mitre_defs: {}
+name: Alibaba ActionTrail - multiple unauthorized action attempts detected by a user
+search_window: 10 minutes
+severity: SEV_030_MEDIUM
+suppression_duration:
+suppression_enabled: false
+suppression_fields:
+user_defined_category:
+user_defined_severity:
+xql_query: "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.event.type=\"ApiCall\"\r\n|filter xdm.event.outcome in (\"This API is not authorized for caller.\",\"no permission\",\"The user has no permission\",\"caller has no permission\")\r\n|comp count() as TotalCount by xdm.source.user.username, xdm.source.ipv4, xdm.source.user.user_type\r\n|filter TotalCount > 5"
diff --git a/Packs/AlibabaActionTrail/CorrelationRules/Alibaba_Correlation.yml b/Packs/AlibabaActionTrail/CorrelationRules/Alibaba_Correlation.yml
deleted file mode 100644
index 067f961cae4b..000000000000
--- a/Packs/AlibabaActionTrail/CorrelationRules/Alibaba_Correlation.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-
- alert_category: EXECUTION
- alert_description: This alert will trigger in an event where multiple attempts of
- unauthorized actions were detected
- alert_fields:
- actor_image: null
- actor_path: xdm.source.user.user_type
- cmd: null
- domain: null
- hash: null
- hostname: null
- local_ip: null
- remote_ip: xdm.source.ipv4
- remote_port: null
- username: xdm.source.user.username
- alert_name: Alibaba ActionTrail - multiple unauthorized action attempts detected
- by a user
- crontab: '*/10 * * * *'
- dataset: alerts
- description: This alert will trigger in an event where multiple attempts of unauthorized
- actions were detected in the Alibaba ActionTrail account
- drilldown_query_timeframe: ALERT
- execution_mode: SCHEDULED
- global_rule_id: bb268634-30a7-4989-90b6-cb833b5591cf
- investigation_query_link: null
- mapping_strategy: CUSTOM
- mitre_defs: {}
- name: Alibaba ActionTrail - multiple unauthorized action attempts detected by a
- user
- search_window: 10 minutes
- severity: SEV_030_MEDIUM
- suppression_duration: null
- suppression_enabled: false
- suppression_fields: null
- user_defined_category: null
- fromversion: 6.10.0
- user_defined_severity: null
- xql_query: "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"\
- action-trail\"\r\n|filter xdm.event.type=\"ApiCall\"\r\n|filter xdm.event.outcome\
- \ in (\"This API is not authorized for caller.\",\"no permission\",\"The user\
- \ has no permission\",\"caller has no permission\")\r\n|comp count() as TotalCount\
- \ by xdm.source.user.username, xdm.source.ipv4, xdm.source.user.user_type\r\n\
- |filter TotalCount > 5"
diff --git a/Packs/AlibabaActionTrail/ModelingRules/AlibabaModelingRules_1_3/AlibabaModelingRules_1_3.xif b/Packs/AlibabaActionTrail/ModelingRules/AlibabaModelingRules_1_3/AlibabaModelingRules_1_3.xif
index 1e3d64ee557e..400fae265b61 100644
--- a/Packs/AlibabaActionTrail/ModelingRules/AlibabaModelingRules_1_3/AlibabaModelingRules_1_3.xif
+++ b/Packs/AlibabaActionTrail/ModelingRules/AlibabaModelingRules_1_3/AlibabaModelingRules_1_3.xif
@@ -9,10 +9,10 @@ filter
xdm.target.resource.name = event_resourcename,
xdm.target.resource.type = event_resourcetype,
xdm.source.ipv4 = event_sourceipaddress,
- xdm.source.user.user_type = event_useridentity_type,
+ xdm.source.user.user_type = if(event_useridentity_type in("root-account","cloudsso-user","saml-user","alibaba-cloud-account"),XDM_CONST.USER_TYPE_REGULAR, event_useridentity_type in("ram-user","assumed-role"),XDM_CONST.USER_TYPE_SERVICE_ACCOUNT ,to_string(event_useridentity_type)),
xdm.source.user.identifier = event_useridentity_principalid,
xdm.source.user.username = event_useridentity_username,
- xdm.event.outcome=event_errormessage,
+ xdm.event.description=event_errormessage,
xdm.observer.vendor=_vendor,
xdm.observer.product=_product;
@@ -23,10 +23,10 @@ filter
xdm.event.id = event_eventid,
xdm.event.operation = event_eventname,
xdm.source.ipv4 = event_sourceipaddress,
- xdm.source.user.user_type = event_useridentity_type,
+ xdm.source.user.user_type = if(event_useridentity_type in("root-account","cloudsso-user","saml-user","alibaba-cloud-account"),XDM_CONST.USER_TYPE_REGULAR, event_useridentity_type in("ram-user","assumed-role"),XDM_CONST.USER_TYPE_SERVICE_ACCOUNT ,to_string(event_useridentity_type)),
xdm.source.user.identifier = event_useridentity_principalid,
xdm.source.user.username = event_useridentity_username,
xdm.session_context_id=event_useridentity_accesskeyid,
- xdm.event.outcome=event_errormessage,
+ xdm.event.description=event_errormessage,
xdm.observer.vendor=_vendor,
xdm.observer.product=_product;
\ No newline at end of file
diff --git a/Packs/AlibabaActionTrail/ModelingRules/AlibabaModelingRules_1_3/AlibabaModelingRules_1_3_schema.json b/Packs/AlibabaActionTrail/ModelingRules/AlibabaModelingRules_1_3/AlibabaModelingRules_1_3_schema.json
index 52fd0e833c83..500d11389aac 100644
--- a/Packs/AlibabaActionTrail/ModelingRules/AlibabaModelingRules_1_3/AlibabaModelingRules_1_3_schema.json
+++ b/Packs/AlibabaActionTrail/ModelingRules/AlibabaModelingRules_1_3/AlibabaModelingRules_1_3_schema.json
@@ -39,10 +39,15 @@
"event_useridentity_principalid": {
"type": "string",
"is_array": false
+ },
+ "event_errormessage": {
+ "type": "string",
+ "is_array": false
},
"event_useridentity_username": {
"type": "string",
"is_array": false
}
+
}
}
diff --git a/Packs/AlibabaActionTrail/ModelingRules/AlibabaModelingRules_1_3/AlibabaModelingRules_1_3_testdata.json b/Packs/AlibabaActionTrail/ModelingRules/AlibabaModelingRules_1_3/AlibabaModelingRules_1_3_testdata.json
new file mode 100644
index 000000000000..0f835137e776
--- /dev/null
+++ b/Packs/AlibabaActionTrail/ModelingRules/AlibabaModelingRules_1_3/AlibabaModelingRules_1_3_testdata.json
@@ -0,0 +1,58 @@
+{
+ "data": [
+ {
+ "test_data_event_id": "60ace5ee-448f-46dc-95e5-982a502bbe43",
+ "vendor": "alibaba",
+ "product": "action_trail",
+ "dataset": "alibaba_action_trail_raw",
+ "event_data": {
+ "event": "null",
+ "_raw_log": "",
+ "event_eventid": "abc123",
+ "event_eventrw": "Read",
+ "event_acsregion": "TLV",
+ "event_errorcode": "null",
+ "event_eventname": "DescribeDBClusters",
+ "event_eventtype": "ApiCall",
+ "event_requestid": "abc123",
+ "event_useragent": "audit.log.aliyuncs.com",
+ "event_apiversion": "2017-08-01",
+ "event_eventsource": "polardb.aliyuncs.com",
+ "event_servicename": "polardb",
+ "event_errormessage": "null",
+ "event_eventversion": "1",
+ "event_resourcename": "null",
+ "event_resourcetype": "null",
+ "event_sourceipaddress": "audit.log.aliyuncs.com",
+ "event_useridentity_type": "testuser:Common_Data_Access",
+ "event_requestparameterjson": "{\"stsTokenPrincipalName\":\"aliyunserviceroleforslsaudit/Common_Data_Access\",\"AcsProduct\":\"polardb\",\"PageSize\":30,\"PageNumber\":1,\"UserAgent\":\"AlibabaCloud (linux\",\"ClientPort\":2962,\"SignatureType\":\"\",\"RegionId\":\"TLV\",\"stsTokenPlayerUid\":abc123}",
+ "event_useridentity_username": "aliyunserviceroleforslsaudit:Common_Data_Access",
+ "event_requestparameters_name": "null",
+ "event_useridentity_accountid": "abc123",
+ "event_requestparameters_hostid": "null",
+ "event_requestparameters_region": "null",
+ "event_useridentity_accesskeyid": "abc123.abc123",
+ "event_useridentity_principalid": "7067606:Common_Data_Access",
+ "event_insightdetails_insighttype": "null",
+ "event_additionaleventdata_mfachecked": "null",
+ "event_insightdetails_sourceipaddress": "null",
+ "event_insightdetails_insightcontext_statistics_insightcount": "null"
+ },
+ "expected_values": {
+ "xdm.event.type": "ApiCall",
+ "xdm.observer.product": "action_trail",
+ "xdm.observer.vendor": "alibaba",
+ "xdm.event.operation": "DescribeDBClusters",
+ "xdm.target.resource.name": null,
+ "xdm.target.resource.type": null,
+ "xdm.event.description": null,
+ "xdm.target.cloud.region": "TLV",
+ "xdm.source.user.username": "aliyunserviceroleforslsaudit:Common_Data_Access",
+ "xdm.source.user.user_type": "testuser:Common_Data_Access",
+ "xdm.source.user.identifier": "7067606:Common_Data_Access",
+ "xdm.source.ipv4": "audit.log.aliyuncs.com",
+ "xdm.event.id": "abc123"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/Packs/AlibabaActionTrail/ReleaseNotes/1_0_11.md b/Packs/AlibabaActionTrail/ReleaseNotes/1_0_11.md
new file mode 100644
index 000000000000..97ee14b2020d
--- /dev/null
+++ b/Packs/AlibabaActionTrail/ReleaseNotes/1_0_11.md
@@ -0,0 +1,4 @@
+
+#### Modeling Rules
+##### Alibaba Modeling Rule
+- Fixed XDM mapping
diff --git a/Packs/AlibabaActionTrail/ReleaseNotes/1_0_12.md b/Packs/AlibabaActionTrail/ReleaseNotes/1_0_12.md
new file mode 100644
index 000000000000..d06b62681304
--- /dev/null
+++ b/Packs/AlibabaActionTrail/ReleaseNotes/1_0_12.md
@@ -0,0 +1,4 @@
+
+#### Modeling Rules
+##### Alibaba Modeling Rule
+- Updated Modeling Rules
diff --git a/Packs/AlibabaActionTrail/ReleaseNotes/1_0_13.md b/Packs/AlibabaActionTrail/ReleaseNotes/1_0_13.md
new file mode 100644
index 000000000000..1169de6c31f5
--- /dev/null
+++ b/Packs/AlibabaActionTrail/ReleaseNotes/1_0_13.md
@@ -0,0 +1,4 @@
+
+#### Modeling Rules
+##### Alibaba Modeling Rule
+- Update Modeling Rules.
diff --git a/Packs/AlibabaActionTrail/Triggers/Trigger_-Alibaba_ActionTrail_-_Multiple_Unauthorized_Action_Attempts_Detected_By_a_User.json b/Packs/AlibabaActionTrail/Triggers/Trigger_-Alibaba_ActionTrail_-_Multiple_Unauthorized_Action_Attempts_Detected_By_a_User.json
index 7fc8988254a3..49e825f1cb82 100644
--- a/Packs/AlibabaActionTrail/Triggers/Trigger_-Alibaba_ActionTrail_-_Multiple_Unauthorized_Action_Attempts_Detected_By_a_User.json
+++ b/Packs/AlibabaActionTrail/Triggers/Trigger_-Alibaba_ActionTrail_-_Multiple_Unauthorized_Action_Attempts_Detected_By_a_User.json
@@ -1,6 +1,6 @@
{
- "trigger_id": "73545719a1bdeba6ba91f6a16044c021",
- "playbook_id": "Alibaba ActionTrail - multiple unauthorized action_attempts detected by a user.yml",
+ "trigger_id": "abfa084a92abfb031de08b57b3f78ec8",
+ "playbook_id": "Alibaba ActionTrail - multiple unauthorized action_attempts detected by a user",
"suggestion_reason": "Recommended for multiple unauthorized action_attempts detected by a user related alerts",
"description": "This trigger is responsible for handling 'Alibaba ActionTrail - multiple unauthorized action_attempts detected by a user' alerts",
"trigger_name": "Alibaba ActionTrail - Multiple Unauthorized Action Attempts Detected By a User Alerts",
@@ -10,7 +10,7 @@
{
"SEARCH_FIELD": "alert_name",
"SEARCH_TYPE": "EQ",
- "SEARCH_VALUE": "Alibaba ActionTrail - multiple unauthorized action attempts detected by a user.yml"
+ "SEARCH_VALUE": "Alibaba ActionTrail - multiple unauthorized action attempts detected by a user"
}
]
}
diff --git a/Packs/AlibabaActionTrail/XSIAMDashboards/AlibabaActionTrail_Dashboard.json b/Packs/AlibabaActionTrail/XSIAMDashboards/AlibabaActionTrail_Dashboard.json
new file mode 100644
index 000000000000..1528a286b786
--- /dev/null
+++ b/Packs/AlibabaActionTrail/XSIAMDashboards/AlibabaActionTrail_Dashboard.json
@@ -0,0 +1,550 @@
+{
+ "dashboards_data": [
+ {
+ "name": "Alibaba Overview Dashboard",
+ "description": "This dashboard displays key information from the Alibaba account such as Top IP locations, TOP users with unauthorized events, etc.",
+ "status": "ENABLED",
+ "layout": [
+ {
+ "id": "row-1768",
+ "data": [
+ {
+ "key": "xql_1668676732415",
+ "data": {
+ "type": "Custom XQL",
+ "width": 50,
+ "height": 434,
+ "phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.target.cloud.region!=null\r\n|alter countrycode=arrayindex(regextract(xdm.target.cloud.region,\"(\\w+)-\"),0)\r\n|comp count(countrycode) as Count by countrycode\r\n| view graph type = map header = \"Alibaba Cloud Regions\" xaxis = countrycode yaxis = Count headerfontsize = 15 legendfontsize = 10 legend = `false` ",
+ "time_frame": {
+ "relativeTime": 2592000000
+ },
+ "viewOptions": {
+ "type": "map",
+ "commands": [
+ {
+ "command": {
+ "op": "=",
+ "name": "header",
+ "value": "\"Alibaba Cloud Regions\""
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "xaxis",
+ "value": "countrycode"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "yaxis",
+ "value": "Count"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "headerfontsize",
+ "value": "15"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "legendfontsize",
+ "value": "10"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "legend",
+ "value": "`false`"
+ }
+ }
+ ]
+ }
+ }
+ },
+ {
+ "key": "xql_1668676934356",
+ "data": {
+ "type": "Custom XQL",
+ "width": 50,
+ "height": 434,
+ "params": [],
+ "phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.source.ipv4!=null\r\n|comp count(xdm.source.ipv4) as Count by xdm.source.ipv4 \r\n|iploc xdm.source.ipv4 loc_country as country\r\n| view graph type = map header = \"IP Locations\" xaxis = country yaxis = Count headerfontsize = 15 legendfontsize = 10 legend = `false` ",
+ "time_frame": {
+ "relativeTime": 2592000000
+ },
+ "viewOptions": {
+ "type": "map",
+ "commands": [
+ {
+ "command": {
+ "op": "=",
+ "name": "header",
+ "value": "\"IP Locations\""
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "xaxis",
+ "value": "country"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "yaxis",
+ "value": "Count"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "headerfontsize",
+ "value": "15"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "legendfontsize",
+ "value": "10"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "legend",
+ "value": "`false`"
+ }
+ }
+ ]
+ }
+ }
+ }
+ ]
+ },
+ {
+ "id": "row-7469",
+ "data": [
+ {
+ "key": "xql_1668676995963",
+ "data": {
+ "type": "Custom XQL",
+ "width": 33.333333333333336,
+ "height": 434,
+ "phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.event.type=\"ApiCall\"\r\n|filter xdm.event.outcome in (\"This API is not authorized for caller.\",\"no permission\",\"The user has no permission\",\"caller has no permission\")\r\n|alter username=arrayindex(regextract(xdm.source.user.username,\"\\:(\\w+)\\@\"),0)\r\n|comp count() as TotalCount by username\r\n| view graph type = pie header = \"Unauthorized events by Username\" xaxis = username yaxis = TotalCount ",
+ "time_frame": {
+ "relativeTime": 2592000000
+ },
+ "viewOptions": {
+ "type": "pie",
+ "commands": [
+ {
+ "command": {
+ "op": "=",
+ "name": "header",
+ "value": "\"Unauthorized events by Username\""
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "xaxis",
+ "value": "username"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "yaxis",
+ "value": "TotalCount"
+ }
+ }
+ ]
+ }
+ }
+ },
+ {
+ "key": "xql_1668676812432",
+ "data": {
+ "type": "Custom XQL",
+ "width": 33.333333333333336,
+ "height": 434,
+ "phrase": "dataset = incidents \r\n|filter description contains \"Alibaba\"\r\n|filter status contains \"New\" \r\n|comp count (severity) as Count by severity\r\n| view graph type = pie header = \"Open Incidents by Severity (Last 30 days)\" xaxis = severity yaxis = Count ",
+ "time_frame": {
+ "relativeTime": 604800000
+ },
+ "viewOptions": {
+ "type": "pie",
+ "commands": [
+ {
+ "command": {
+ "op": "=",
+ "name": "header",
+ "value": "\"Open Incidents by Severity (Last 30 days)\""
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "xaxis",
+ "value": "severity"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "yaxis",
+ "value": "Count"
+ }
+ }
+ ]
+ }
+ }
+ },
+ {
+ "key": "xql_1668677213404",
+ "data": {
+ "type": "Custom XQL",
+ "width": 33.333333333333336,
+ "height": 434,
+ "phrase": "datamodel \n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\n|comp count() as Count by xdm.event.type\n| view graph type = column subtype = grouped header = \"Count of Event Types\" xaxis = xdm.event.type yaxis = Count seriescolor(\"Count\",\"#7cecc6\") ",
+ "time_frame": {
+ "relativeTime": 2592000000
+ },
+ "viewOptions": {
+ "type": "column",
+ "commands": [
+ {
+ "command": {
+ "op": "=",
+ "name": "subtype",
+ "value": "grouped"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "header",
+ "value": "\"Count of Event Types\""
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "xaxis",
+ "value": "xdm.event.type"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "yaxis",
+ "value": "Count"
+ }
+ },
+ {
+ "func": {
+ "args": [
+ "\"Count\"",
+ "\"#7cecc6\""
+ ],
+ "name": "seriescolor"
+ }
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ],
+ "default_dashboard_id": 1,
+ "global_id": "f9c52470483a41e4a6afa65c93f70a4b"
+ }
+ ],
+ "widgets_data": [
+ {
+ "widget_key": "xql_1668676732415",
+ "title": "Alibaba Cloud Regions",
+ "creation_time": 1668676732415,
+ "description": "This widget disaplys the cloud regions associated to the account and their usage volume",
+ "data": {
+ "phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.target.cloud.region!=null\r\n|alter countrycode=arrayindex(regextract(xdm.target.cloud.region,\"(\\w+)-\"),0)\r\n|comp count(countrycode) as Count by countrycode\r\n| view graph type = map header = \"Alibaba Cloud Regions\" xaxis = countrycode yaxis = Count headerfontsize = 15 legendfontsize = 10 legend = `false` ",
+ "time_frame": {
+ "relativeTime": 2592000000
+ },
+ "viewOptions": {
+ "type": "map",
+ "commands": [
+ {
+ "command": {
+ "op": "=",
+ "name": "header",
+ "value": "\"Alibaba Cloud Regions\""
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "xaxis",
+ "value": "countrycode"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "yaxis",
+ "value": "Count"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "headerfontsize",
+ "value": "15"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "legendfontsize",
+ "value": "10"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "legend",
+ "value": "`false`"
+ }
+ }
+ ]
+ }
+ },
+ "support_time_range": true,
+ "additional_info": {
+ "query_tables": [],
+ "query_uses_library": false
+ }
+ },
+ {
+ "widget_key": "xql_1668676812432",
+ "title": "Alibaba Open Incidents",
+ "creation_time": 1668676812432,
+ "description": "This widget displays the open incidents related to Alibaba in XSIAM",
+ "data": {
+ "phrase": "dataset = incidents \r\n|filter description contains \"Alibaba\"\r\n|filter status contains \"New\" \r\n|comp count (severity) as Count by severity\r\n| view graph type = pie header = \"Open Incidents by Severity (Last 30 days)\" xaxis = severity yaxis = Count ",
+ "time_frame": {
+ "relativeTime": 604800000
+ },
+ "viewOptions": {
+ "type": "pie",
+ "commands": [
+ {
+ "command": {
+ "op": "=",
+ "name": "header",
+ "value": "\"Open Incidents by Severity (Last 30 days)\""
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "xaxis",
+ "value": "severity"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "yaxis",
+ "value": "Count"
+ }
+ }
+ ]
+ }
+ },
+ "support_time_range": true,
+ "additional_info": {
+ "query_tables": [
+ "incidents"
+ ],
+ "query_uses_library": false
+ }
+ },
+ {
+ "widget_key": "xql_1668676934356",
+ "title": "Alibaba Top IP Sources",
+ "creation_time": 1668676934356,
+ "description": "This widget displays the top IP sources acceesing the Alibaba account",
+ "data": {
+ "params": [],
+ "phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.source.ipv4!=null\r\n|comp count(xdm.source.ipv4) as Count by xdm.source.ipv4 \r\n|iploc xdm.source.ipv4 loc_country as country\r\n| view graph type = map header = \"IP Locations\" xaxis = country yaxis = Count headerfontsize = 15 legendfontsize = 10 legend = `false` ",
+ "time_frame": {
+ "relativeTime": 2592000000
+ },
+ "viewOptions": {
+ "type": "map",
+ "commands": [
+ {
+ "command": {
+ "op": "=",
+ "name": "header",
+ "value": "\"IP Locations\""
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "xaxis",
+ "value": "country"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "yaxis",
+ "value": "Count"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "headerfontsize",
+ "value": "15"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "legendfontsize",
+ "value": "10"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "legend",
+ "value": "`false`"
+ }
+ }
+ ]
+ }
+ },
+ "support_time_range": true,
+ "additional_info": {
+ "query_tables": [],
+ "query_uses_library": false
+ }
+ },
+ {
+ "widget_key": "xql_1668676995963",
+ "title": "Alibaba Top unauthorized events by Username",
+ "creation_time": 1668676995963,
+ "description": "This widget displays count of unauthorized events made a user within the account",
+ "data": {
+ "phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.event.type=\"ApiCall\"\r\n|filter xdm.event.outcome in (\"This API is not authorized for caller.\",\"no permission\",\"The user has no permission\",\"caller has no permission\")\r\n|alter username=arrayindex(regextract(xdm.source.user.username,\"\\:(\\w+)\\@\"),0)\r\n|comp count() as TotalCount by username\r\n| view graph type = pie header = \"Unauthorized events by Username\" xaxis = username yaxis = TotalCount ",
+ "time_frame": {
+ "relativeTime": 2592000000
+ },
+ "viewOptions": {
+ "type": "pie",
+ "commands": [
+ {
+ "command": {
+ "op": "=",
+ "name": "header",
+ "value": "\"Unauthorized events by Username\""
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "xaxis",
+ "value": "username"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "yaxis",
+ "value": "TotalCount"
+ }
+ }
+ ]
+ }
+ },
+ "support_time_range": true,
+ "additional_info": {
+ "query_tables": [],
+ "query_uses_library": false
+ }
+ },
+ {
+ "widget_key": "xql_1668677213404",
+ "title": "Alibaba TOP Event Types in Account",
+ "creation_time": 1668677213404,
+ "description": "This widget disaplys the top event types occuring in the Alibaba account",
+ "data": {
+ "phrase": "datamodel \n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\n|comp count() as Count by xdm.event.type\n| view graph type = column subtype = grouped header = \"Count of Event Types\" xaxis = xdm.event.type yaxis = Count seriescolor(\"Count\",\"#7cecc6\") ",
+ "time_frame": {
+ "relativeTime": 2592000000
+ },
+ "viewOptions": {
+ "type": "column",
+ "commands": [
+ {
+ "command": {
+ "op": "=",
+ "name": "subtype",
+ "value": "grouped"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "header",
+ "value": "\"Count of Event Types\""
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "xaxis",
+ "value": "xdm.event.type"
+ }
+ },
+ {
+ "command": {
+ "op": "=",
+ "name": "yaxis",
+ "value": "Count"
+ }
+ },
+ {
+ "func": {
+ "args": [
+ "\"Count\"",
+ "\"#7cecc6\""
+ ],
+ "name": "seriescolor"
+ }
+ }
+ ]
+ }
+ },
+ "support_time_range": true,
+ "additional_info": {
+ "query_tables": [],
+ "query_uses_library": false
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/Packs/AlibabaActionTrail/XSIAMDashboards/AlibabaDashboard_image.png b/Packs/AlibabaActionTrail/XSIAMDashboards/AlibabaActionTrail_Dashboard_image.png
similarity index 100%
rename from Packs/AlibabaActionTrail/XSIAMDashboards/AlibabaDashboard_image.png
rename to Packs/AlibabaActionTrail/XSIAMDashboards/AlibabaActionTrail_Dashboard_image.png
diff --git a/Packs/AlibabaActionTrail/XSIAMDashboards/AlibabaDashboard.json b/Packs/AlibabaActionTrail/XSIAMDashboards/AlibabaDashboard.json
deleted file mode 100644
index a828f95c7ac2..000000000000
--- a/Packs/AlibabaActionTrail/XSIAMDashboards/AlibabaDashboard.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "fromVersion": "6.10.0", "dashboards_data": [{"name": "Alibaba Overview Dashboard", "description": "This dashboard displays key information from the Alibaba account such as Top IP locations, TOP users with unauthorized events, etc.", "status": "ENABLED", "layout": [{"id": "row-1768", "data": [{"key": "xql_1668676732415", "data": {"type": "Custom XQL", "width": 50, "height": 434, "phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.target.cloud.region!=null\r\n|alter countrycode=arrayindex(regextract(xdm.target.cloud.region,\"(\\w+)-\"),0)\r\n|comp count(countrycode) as Count by countrycode\r\n| view graph type = map header = \"Alibaba Cloud Regions\" xaxis = countrycode yaxis = Count headerfontsize = 15 legendfontsize = 10 legend = `false` ", "time_frame": {"relativeTime": 2592000000}, "viewOptions": {"type": "map", "commands": [{"command": {"op": "=", "name": "header", "value": "\"Alibaba Cloud Regions\""}}, {"command": {"op": "=", "name": "xaxis", "value": "countrycode"}}, {"command": {"op": "=", "name": "yaxis", "value": "Count"}}, {"command": {"op": "=", "name": "headerfontsize", "value": "15"}}, {"command": {"op": "=", "name": "legendfontsize", "value": "10"}}, {"command": {"op": "=", "name": "legend", "value": "`false`"}}]}}}, {"key": "xql_1668676934356", "data": {"type": "Custom XQL", "width": 50, "height": 434, "params": [], "phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.source.ipv4!=null\r\n|comp count(xdm.source.ipv4) as Count by xdm.source.ipv4 \r\n|iploc xdm.source.ipv4 loc_country as country\r\n| view graph type = map header = \"IP Locations\" xaxis = country yaxis = Count headerfontsize = 15 legendfontsize = 10 legend = `false` ", "time_frame": {"relativeTime": 2592000000}, "viewOptions": {"type": "map", "commands": [{"command": {"op": "=", "name": "header", "value": "\"IP Locations\""}}, {"command": {"op": "=", "name": "xaxis", "value": "country"}}, {"command": {"op": "=", "name": "yaxis", "value": "Count"}}, {"command": {"op": "=", "name": "headerfontsize", "value": "15"}}, {"command": {"op": "=", "name": "legendfontsize", "value": "10"}}, {"command": {"op": "=", "name": "legend", "value": "`false`"}}]}}}]}, {"id": "row-7469", "data": [{"key": "xql_1668676995963", "data": {"type": "Custom XQL", "width": 33.333333333333336, "height": 434, "phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.event.type=\"ApiCall\"\r\n|filter xdm.event.outcome in (\"This API is not authorized for caller.\",\"no permission\",\"The user has no permission\",\"caller has no permission\")\r\n|alter username=arrayindex(regextract(xdm.source.user.username,\"\\:(\\w+)\\@\"),0)\r\n|comp count() as TotalCount by username\r\n| view graph type = pie header = \"Unauthorized events by Username\" xaxis = username yaxis = TotalCount ", "time_frame": {"relativeTime": 2592000000}, "viewOptions": {"type": "pie", "commands": [{"command": {"op": "=", "name": "header", "value": "\"Unauthorized events by Username\""}}, {"command": {"op": "=", "name": "xaxis", "value": "username"}}, {"command": {"op": "=", "name": "yaxis", "value": "TotalCount"}}]}}}, {"key": "xql_1668676812432", "data": {"type": "Custom XQL", "width": 33.333333333333336, "height": 434, "phrase": "dataset = incidents \r\n|filter description contains \"Alibaba\"\r\n|filter status contains \"New\" \r\n|comp count (severity) as Count by severity\r\n| view graph type = pie header = \"Open Incidents by Severity (Last 30 days)\" xaxis = severity yaxis = Count ", "time_frame": {"relativeTime": 604800000}, "viewOptions": {"type": "pie", "commands": [{"command": {"op": "=", "name": "header", "value": "\"Open Incidents by Severity (Last 30 days)\""}}, {"command": {"op": "=", "name": "xaxis", "value": "severity"}}, {"command": {"op": "=", "name": "yaxis", "value": "Count"}}]}}}, {"key": "xql_1668677213404", "data": {"type": "Custom XQL", "width": 33.333333333333336, "height": 434, "phrase": "datamodel \n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\n|comp count() as Count by xdm.event.type\n| view graph type = column subtype = grouped header = \"Count of Event Types\" xaxis = xdm.event.type yaxis = Count seriescolor(\"Count\",\"#7cecc6\") ", "time_frame": {"relativeTime": 2592000000}, "viewOptions": {"type": "column", "commands": [{"command": {"op": "=", "name": "subtype", "value": "grouped"}}, {"command": {"op": "=", "name": "header", "value": "\"Count of Event Types\""}}, {"command": {"op": "=", "name": "xaxis", "value": "xdm.event.type"}}, {"command": {"op": "=", "name": "yaxis", "value": "Count"}}, {"func": {"args": ["\"Count\"", "\"#7cecc6\""], "name": "seriescolor"}}]}}}]}], "default_dashboard_id": 1, "global_id": "f9c52470483a41e4a6afa65c93f70a4b"}], "widgets_data": [{"widget_key": "xql_1668676732415", "title": "Alibaba Cloud Regions", "creation_time": 1668676732415, "description": "This widget disaplys the cloud regions associated to the account and their usage volume", "data": {"phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.target.cloud.region!=null\r\n|alter countrycode=arrayindex(regextract(xdm.target.cloud.region,\"(\\w+)-\"),0)\r\n|comp count(countrycode) as Count by countrycode\r\n| view graph type = map header = \"Alibaba Cloud Regions\" xaxis = countrycode yaxis = Count headerfontsize = 15 legendfontsize = 10 legend = `false` ", "time_frame": {"relativeTime": 2592000000}, "viewOptions": {"type": "map", "commands": [{"command": {"op": "=", "name": "header", "value": "\"Alibaba Cloud Regions\""}}, {"command": {"op": "=", "name": "xaxis", "value": "countrycode"}}, {"command": {"op": "=", "name": "yaxis", "value": "Count"}}, {"command": {"op": "=", "name": "headerfontsize", "value": "15"}}, {"command": {"op": "=", "name": "legendfontsize", "value": "10"}}, {"command": {"op": "=", "name": "legend", "value": "`false`"}}]}}, "support_time_range": true, "additional_info": {"query_tables": [], "query_uses_library": false}, "creator_mail": ""}, {"widget_key": "xql_1668676812432", "title": "Alibaba Open Incidents", "creation_time": 1668676812432, "description": "This widget displays the open incidents related to Alibaba in XSIAM", "data": {"phrase": "dataset = incidents \r\n|filter description contains \"Alibaba\"\r\n|filter status contains \"New\" \r\n|comp count (severity) as Count by severity\r\n| view graph type = pie header = \"Open Incidents by Severity (Last 30 days)\" xaxis = severity yaxis = Count ", "time_frame": {"relativeTime": 604800000}, "viewOptions": {"type": "pie", "commands": [{"command": {"op": "=", "name": "header", "value": "\"Open Incidents by Severity (Last 30 days)\""}}, {"command": {"op": "=", "name": "xaxis", "value": "severity"}}, {"command": {"op": "=", "name": "yaxis", "value": "Count"}}]}}, "support_time_range": true, "additional_info": {"query_tables": ["incidents"], "query_uses_library": false}, "creator_mail": ""}, {"widget_key": "xql_1668676934356", "title": "Alibaba Top IP Sources", "creation_time": 1668676934356, "description": "This widget displays the top IP sources acceesing the Alibaba account", "data": {"params": [], "phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.source.ipv4!=null\r\n|comp count(xdm.source.ipv4) as Count by xdm.source.ipv4 \r\n|iploc xdm.source.ipv4 loc_country as country\r\n| view graph type = map header = \"IP Locations\" xaxis = country yaxis = Count headerfontsize = 15 legendfontsize = 10 legend = `false` ", "time_frame": {"relativeTime": 2592000000}, "viewOptions": {"type": "map", "commands": [{"command": {"op": "=", "name": "header", "value": "\"IP Locations\""}}, {"command": {"op": "=", "name": "xaxis", "value": "country"}}, {"command": {"op": "=", "name": "yaxis", "value": "Count"}}, {"command": {"op": "=", "name": "headerfontsize", "value": "15"}}, {"command": {"op": "=", "name": "legendfontsize", "value": "10"}}, {"command": {"op": "=", "name": "legend", "value": "`false`"}}]}}, "support_time_range": true, "additional_info": {"query_tables": [], "query_uses_library": false}, "creator_mail": ""}, {"widget_key": "xql_1668676995963", "title": "Alibaba Top unauthorized events by Username", "creation_time": 1668676995963, "description": "This widget displays count of unauthorized events made a user within the account", "data": {"phrase": "datamodel \r\n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\r\n|filter xdm.event.type=\"ApiCall\"\r\n|filter xdm.event.outcome in (\"This API is not authorized for caller.\",\"no permission\",\"The user has no permission\",\"caller has no permission\")\r\n|alter username=arrayindex(regextract(xdm.source.user.username,\"\\:(\\w+)\\@\"),0)\r\n|comp count() as TotalCount by username\r\n| view graph type = pie header = \"Unauthorized events by Username\" xaxis = username yaxis = TotalCount ", "time_frame": {"relativeTime": 2592000000}, "viewOptions": {"type": "pie", "commands": [{"command": {"op": "=", "name": "header", "value": "\"Unauthorized events by Username\""}}, {"command": {"op": "=", "name": "xaxis", "value": "username"}}, {"command": {"op": "=", "name": "yaxis", "value": "TotalCount"}}]}}, "support_time_range": true, "additional_info": {"query_tables": [], "query_uses_library": false}, "creator_mail": ""}, {"widget_key": "xql_1668677213404", "title": "Alibaba TOP Event Types in Account", "creation_time": 1668677213404, "description": "This widget disaplys the top event types occuring in the Alibaba account", "data": {"phrase": "datamodel \n|filter xdm.observer.vendor=\"alibaba\" and xdm.observer.product=\"action-trail\"\n|comp count() as Count by xdm.event.type\n| view graph type = column subtype = grouped header = \"Count of Event Types\" xaxis = xdm.event.type yaxis = Count seriescolor(\"Count\",\"#7cecc6\") ", "time_frame": {"relativeTime": 2592000000}, "viewOptions": {"type": "column", "commands": [{"command": {"op": "=", "name": "subtype", "value": "grouped"}}, {"command": {"op": "=", "name": "header", "value": "\"Count of Event Types\""}}, {"command": {"op": "=", "name": "xaxis", "value": "xdm.event.type"}}, {"command": {"op": "=", "name": "yaxis", "value": "Count"}}, {"func": {"args": ["\"Count\"", "\"#7cecc6\""], "name": "seriescolor"}}]}}, "support_time_range": true, "additional_info": {"query_tables": [], "query_uses_library": false}, "creator_mail": ""}]}
\ No newline at end of file
diff --git a/Packs/AlibabaActionTrail/pack_metadata.json b/Packs/AlibabaActionTrail/pack_metadata.json
index b0dfb9c72903..2faaf231e22a 100644
--- a/Packs/AlibabaActionTrail/pack_metadata.json
+++ b/Packs/AlibabaActionTrail/pack_metadata.json
@@ -2,16 +2,14 @@
"name": "Alibaba Action Trail",
"description": "An Integration Pack to fetch Alibaba action trail events.",
"support": "xsoar",
- "currentVersion": "1.0.10",
+ "currentVersion": "1.0.13",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
"categories": [
"Analytics & SIEM"
],
- "tags": [
- "marketplacev2:Data Source"
- ],
+ "tags": [],
"useCases": [],
"keywords": [],
"marketplaces": [
diff --git a/Packs/AlienVault_OTX/.pack-ignore b/Packs/AlienVault_OTX/.pack-ignore
index efdd537f8c13..f7b3ccfc5b34 100644
--- a/Packs/AlienVault_OTX/.pack-ignore
+++ b/Packs/AlienVault_OTX/.pack-ignore
@@ -1,5 +1,5 @@
[file:AlienVault_OTX_v2.yml]
-ignore=BA108,BA109,IN145
+ignore=BA108,BA109
[file:README.md]
ignore=RM106
diff --git a/Packs/AlienVault_OTX/Integrations/AlienVault_OTX_v2/AlienVault_OTX_v2.py b/Packs/AlienVault_OTX/Integrations/AlienVault_OTX_v2/AlienVault_OTX_v2.py
index fa52781940c9..cea6ba5d0ffb 100644
--- a/Packs/AlienVault_OTX/Integrations/AlienVault_OTX_v2/AlienVault_OTX_v2.py
+++ b/Packs/AlienVault_OTX/Integrations/AlienVault_OTX_v2/AlienVault_OTX_v2.py
@@ -872,7 +872,7 @@ def main():
proxy = params.get('proxy')
default_threshold = int(params.get('default_threshold', 2))
max_indicator_relationships = arg_to_number(params.get('max_indicator_relationships', 0))
- token = params.get('api_token')
+ token = params.get('credentials', {}).get('password', '') or params.get('api_token', '')
reliability = params.get('integrationReliability')
reliability = reliability if reliability else DBotScoreReliability.C
if DBotScoreReliability.is_valid_type(reliability):
diff --git a/Packs/AlienVault_OTX/Integrations/AlienVault_OTX_v2/AlienVault_OTX_v2.yml b/Packs/AlienVault_OTX/Integrations/AlienVault_OTX_v2/AlienVault_OTX_v2.yml
index 85cdd4f80882..5ff73b754683 100644
--- a/Packs/AlienVault_OTX/Integrations/AlienVault_OTX_v2/AlienVault_OTX_v2.yml
+++ b/Packs/AlienVault_OTX/Integrations/AlienVault_OTX_v2/AlienVault_OTX_v2.yml
@@ -1,4 +1,7 @@
category: Data Enrichment & Threat Intelligence
+sectionOrder:
+- Connect
+- Collect
commonfields:
id: AlienVault OTX v2
version: -1
@@ -8,27 +11,39 @@ configuration:
name: url
required: true
type: 0
+ section: Connect
+- name: credentials
+ type: 9
+ displaypassword: API Token
+ hiddenusername: true
+ section: Connect
- display: API Token
name: api_token
required: false
type: 4
+ hidden: true
+ section: Connect
- defaultvalue: '2'
- display: Indicator Threshold. The minimum number of pulses to consider the indicator
- as malicious.
+ display: Indicator Threshold. The minimum number of pulses to consider the indicator as malicious.
name: default_threshold
required: false
type: 0
+ section: Collect
+ advanced: true
- defaultvalue: '10'
display: Maximum number of relationships for indicators
name: max_indicator_relationships
required: false
type: 0
- additionalinfo: If not provided, no relationships will be added. This field is relevant
- only for url, file hash and ip / domain indicators.
+ additionalinfo: If not provided, no relationships will be added. This field is relevant only for url, file hash and ip / domain indicators.
+ section: Collect
+ advanced: true
- additionalinfo: Reliability of the source providing the intelligence data.
defaultvalue: C - Fairly reliable
display: Source Reliability
name: integrationReliability
+ required: true
+ type: 15
options:
- A+ - 3rd party enrichment
- A - Completely reliable
@@ -37,22 +52,27 @@ configuration:
- D - Not usually reliable
- E - Unreliable
- F - Reliability cannot be judged
- required: true
- type: 15
+ section: Collect
- defaultvalue: 'true'
additionalinfo: Create relationships between indicators as part of Enrichment.
display: Create relationships
name: create_relationships
required: false
type: 8
+ section: Collect
+ advanced: true
- display: Trust any certificate (not secure)
name: insecure
required: false
type: 8
+ section: Connect
+ advanced: true
- display: Use system proxy settings
name: proxy
required: false
type: 8
+ section: Connect
+ advanced: true
description: Query Indicators of Compromise in AlienVault OTX.
display: AlienVault OTX v2
name: AlienVault OTX v2
@@ -66,9 +86,7 @@ script:
required: true
secret: false
- default: false
- description: If the number of pulses is bigger than the threshold, the IP address
- is considered as malicious. If the threshold is not specified, the default
- indicator threshold is used, which is configured in the instance settings.
+ description: If the number of pulses is bigger than the threshold, the IP address is considered as malicious. If the threshold is not specified, the default indicator threshold is used, which is configured in the instance settings.
isArray: false
name: threshold
required: false
@@ -88,8 +106,7 @@ script:
description: The country where the IP address is located.
type: String
- contextPath: IP.Geo.Location
- description: 'The geolocation where the IP address is located, in the format:
- latitude:longitude.'
+ description: 'The geolocation where the IP address is located, in the format: latitude:longitude.'
type: String
- contextPath: AlienVaultOTX.IP.Reputation
description: The reputation of the IP address.
@@ -132,9 +149,7 @@ script:
required: true
secret: false
- default: false
- description: If the number of pulses is bigger than the threshold, the domain
- is considered as malicious. If the threshold is not specified, the default
- indicator threshold is used, which is configured in the instance settings.
+ description: If the number of pulses is bigger than the threshold, the domain is considered as malicious. If the threshold is not specified, the default indicator threshold is used, which is configured in the instance settings.
isArray: false
name: threshold
required: false
@@ -188,9 +203,7 @@ script:
required: true
secret: false
- default: false
- description: If the number of pulses is bigger than the threshold, the IP address
- is considered as malicious. If the threshold is not specified, the default
- indicator threshold is used, which is configured in the instance settings.
+ description: If the number of pulses is bigger than the threshold, the IP address is considered as malicious. If the threshold is not specified, the default indicator threshold is used, which is configured in the instance settings.
isArray: false
name: threshold
required: false
@@ -229,9 +242,7 @@ script:
required: true
secret: false
- default: false
- description: If the number of pulses is bigger than the threshold, the host
- name is considered as malicious. If the threshold is not specified, the default
- indicator threshold is used, which is configured in the instance settings.
+ description: If the number of pulses is bigger than the threshold, the host name is considered as malicious. If the threshold is not specified, the default indicator threshold is used, which is configured in the instance settings.
isArray: false
name: threshold
required: false
@@ -273,9 +284,7 @@ script:
required: true
secret: false
- default: false
- description: If the number of pulses is bigger than the threshold, the file
- is considered as malicious. If the threshold is not specified, the default
- indicator threshold is used, which is configured in the instance settings.
+ description: If the number of pulses is bigger than the threshold, the file is considered as malicious. If the threshold is not specified, the default indicator threshold is used, which is configured in the instance settings.
isArray: false
name: threshold
required: false
@@ -298,8 +307,7 @@ script:
description: IDs of pulses which are marked as malicious.
type: String
- contextPath: File.Type
- description: The file type, as determined by libmagic (same as displayed in
- file entries).
+ description: The file type, as determined by libmagic (same as displayed in file entries).
type: String
- contextPath: File.Size
description: The size of the file in bytes.
@@ -342,9 +350,7 @@ script:
required: true
secret: false
- default: false
- description: If the number of pulses is bigger than the threshold, the CVE is
- considered as malicious. If the threshold is not specified, the default indicator
- threshold is used, which is configured in the instance settings.
+ description: If the number of pulses is bigger than the threshold, the CVE is considered as malicious. If the threshold is not specified, the default indicator threshold is used, which is configured in the instance settings.
isArray: false
name: threshold
required: false
@@ -384,8 +390,7 @@ script:
- arguments:
- auto: PREDEFINED
default: false
- description: 'The indicator type. Can be: "IPv4", "IPv6", "domain", "hostname",
- or "url".'
+ description: 'The indicator type. Can be: "IPv4", "IPv6", "domain", "hostname", or "url".'
isArray: false
name: indicator_type
predefined:
@@ -562,9 +567,7 @@ script:
required: true
secret: false
- default: false
- description: If the number of pulses is bigger than the threshold, the URL is
- considered as malicious. If threshold is not specified, the default indicator
- threshold is used, which is configured in the instance settings.
+ description: If the number of pulses is bigger than the threshold, the URL is considered as malicious. If threshold is not specified, the default indicator threshold is used, which is configured in the instance settings.
isArray: false
name: threshold
required: false
@@ -619,7 +622,7 @@ script:
- contextPath: URL.Relationships.EntityBType
description: The type of the destination of the relationship.
type: string
- dockerimage: demisto/python3:3.10.8.39276
+ dockerimage: demisto/python3:3.10.10.48392
isfetch: false
longRunning: false
longRunningPort: false
diff --git a/Packs/AlienVault_OTX/ReleaseNotes/1_1_26.md b/Packs/AlienVault_OTX/ReleaseNotes/1_1_26.md
new file mode 100644
index 000000000000..5178b1462556
--- /dev/null
+++ b/Packs/AlienVault_OTX/ReleaseNotes/1_1_26.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AlienVault OTX v2
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AlienVault_OTX/ReleaseNotes/1_1_27.md b/Packs/AlienVault_OTX/ReleaseNotes/1_1_27.md
new file mode 100644
index 000000000000..b56cfb7c470f
--- /dev/null
+++ b/Packs/AlienVault_OTX/ReleaseNotes/1_1_27.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AlienVault OTX v2
+- Updated the Docker image to: *demisto/python3:3.10.9.42476*.
diff --git a/Packs/AlienVault_OTX/ReleaseNotes/1_1_28.md b/Packs/AlienVault_OTX/ReleaseNotes/1_1_28.md
new file mode 100644
index 000000000000..42d8915d1033
--- /dev/null
+++ b/Packs/AlienVault_OTX/ReleaseNotes/1_1_28.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AlienVault OTX v2
+- Added the *API Token* integration parameter to support credentials fetching object.
diff --git a/Packs/AlienVault_OTX/ReleaseNotes/1_1_29.md b/Packs/AlienVault_OTX/ReleaseNotes/1_1_29.md
new file mode 100644
index 000000000000..dc4c634da8ca
--- /dev/null
+++ b/Packs/AlienVault_OTX/ReleaseNotes/1_1_29.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### AlienVault OTX v2
+- Note: Organized the the integrations' parameters by sections. Relevant for XSIAM and XSOAR 8.1 and above.
diff --git a/Packs/AlienVault_OTX/ReleaseNotes/1_1_30.md b/Packs/AlienVault_OTX/ReleaseNotes/1_1_30.md
new file mode 100644
index 000000000000..a7c95fdb96f6
--- /dev/null
+++ b/Packs/AlienVault_OTX/ReleaseNotes/1_1_30.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AlienVault OTX v2
+- Updated the Docker image to: *demisto/python3:3.10.9.46807*.
diff --git a/Packs/AlienVault_OTX/ReleaseNotes/1_1_31.md b/Packs/AlienVault_OTX/ReleaseNotes/1_1_31.md
new file mode 100644
index 000000000000..e45b56efcde1
--- /dev/null
+++ b/Packs/AlienVault_OTX/ReleaseNotes/1_1_31.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AlienVault OTX v2
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AlienVault_OTX/pack_metadata.json b/Packs/AlienVault_OTX/pack_metadata.json
index d0babe0502e4..bb6f79870574 100644
--- a/Packs/AlienVault_OTX/pack_metadata.json
+++ b/Packs/AlienVault_OTX/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AlienVault OTX",
"description": "Query Indicators of Compromise in AlienVault OTX.",
"support": "xsoar",
- "currentVersion": "1.1.25",
+ "currentVersion": "1.1.31",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AlienVault_USM_Anywhere/Integrations/AlienVault_USM_Anywhere/AlienVault_USM_Anywhere.py b/Packs/AlienVault_USM_Anywhere/Integrations/AlienVault_USM_Anywhere/AlienVault_USM_Anywhere.py
index bed91d5af5a6..9e6aa6b41dea 100644
--- a/Packs/AlienVault_USM_Anywhere/Integrations/AlienVault_USM_Anywhere/AlienVault_USM_Anywhere.py
+++ b/Packs/AlienVault_USM_Anywhere/Integrations/AlienVault_USM_Anywhere/AlienVault_USM_Anywhere.py
@@ -6,11 +6,12 @@
import json
import requests
import dateparser
+import urllib3
from datetime import datetime
from typing import Dict
# Disable insecure warnings
-requests.packages.urllib3.disable_warnings()
+urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
diff --git a/Packs/AlienVault_USM_Anywhere/Integrations/AlienVault_USM_Anywhere/AlienVault_USM_Anywhere.yml b/Packs/AlienVault_USM_Anywhere/Integrations/AlienVault_USM_Anywhere/AlienVault_USM_Anywhere.yml
index 7eab5f8d29da..d2e245b44198 100644
--- a/Packs/AlienVault_USM_Anywhere/Integrations/AlienVault_USM_Anywhere/AlienVault_USM_Anywhere.yml
+++ b/Packs/AlienVault_USM_Anywhere/Integrations/AlienVault_USM_Anywhere/AlienVault_USM_Anywhere.yml
@@ -412,7 +412,7 @@ script:
- contextPath: AlienVault.Event.Subcategory
description: The event subcategory.
type: String
- dockerimage: demisto/python3:3.10.5.31928
+ dockerimage: demisto/python3:3.10.10.48392
isfetch: true
longRunning: false
longRunningPort: false
diff --git a/Packs/AlienVault_USM_Anywhere/ReleaseNotes/1_0_13.md b/Packs/AlienVault_USM_Anywhere/ReleaseNotes/1_0_13.md
new file mode 100644
index 000000000000..16b09cf7a8b8
--- /dev/null
+++ b/Packs/AlienVault_USM_Anywhere/ReleaseNotes/1_0_13.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AlienVault USM Anywhere
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AlienVault_USM_Anywhere/ReleaseNotes/1_0_14.md b/Packs/AlienVault_USM_Anywhere/ReleaseNotes/1_0_14.md
new file mode 100644
index 000000000000..ee6c11080540
--- /dev/null
+++ b/Packs/AlienVault_USM_Anywhere/ReleaseNotes/1_0_14.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AlienVault USM Anywhere
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AlienVault_USM_Anywhere/pack_metadata.json b/Packs/AlienVault_USM_Anywhere/pack_metadata.json
index 3c4c6dd8d2d5..338a1af4f99a 100644
--- a/Packs/AlienVault_USM_Anywhere/pack_metadata.json
+++ b/Packs/AlienVault_USM_Anywhere/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AlienVault USM Anywhere",
"description": "Searches for and monitors alarms and events from AlienVault USM Anywhere.",
"support": "xsoar",
- "currentVersion": "1.0.12",
+ "currentVersion": "1.0.14",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AlphaVantage/Integrations/AlphaVantage/AlphaVantage.yml b/Packs/AlphaVantage/Integrations/AlphaVantage/AlphaVantage.yml
index db40ab10649f..de614c3aa25a 100755
--- a/Packs/AlphaVantage/Integrations/AlphaVantage/AlphaVantage.yml
+++ b/Packs/AlphaVantage/Integrations/AlphaVantage/AlphaVantage.yml
@@ -117,7 +117,7 @@ script:
- contextPath: AlphaVantage.StockHistory.Time Series
description: 'List of all stock data samples'
type: Unknown
- dockerimage: demisto/python3:3.10.5.31928
+ dockerimage: demisto/python3:3.10.10.48392
feed: false
isfetch: false
longRunning: false
diff --git a/Packs/AlphaVantage/ReleaseNotes/1_0_8.md b/Packs/AlphaVantage/ReleaseNotes/1_0_8.md
new file mode 100644
index 000000000000..150bb121a4f2
--- /dev/null
+++ b/Packs/AlphaVantage/ReleaseNotes/1_0_8.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AlphaVantage
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AlphaVantage/ReleaseNotes/1_0_9.md b/Packs/AlphaVantage/ReleaseNotes/1_0_9.md
new file mode 100644
index 000000000000..7195c9c6ae71
--- /dev/null
+++ b/Packs/AlphaVantage/ReleaseNotes/1_0_9.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### AlphaVantage
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AlphaVantage/pack_metadata.json b/Packs/AlphaVantage/pack_metadata.json
index 2bb38fb9b32b..293a2d1fe0d0 100755
--- a/Packs/AlphaVantage/pack_metadata.json
+++ b/Packs/AlphaVantage/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AlphaVantage",
"description": "The Alpha Vantage content pack provides accessible APIs for financial market data such as stock prices. Utilize this pack to get open stock prices, high/low price, trade volume, and so on.",
"support": "community",
- "currentVersion": "1.0.7",
+ "currentVersion": "1.0.9",
"author": "Ohad Valtzer",
"url": "https://live.paloaltonetworks.com/t5/cortex-xsoar-discussions/bd-p/Cortex_XSOAR_Discussions",
"email": "",
diff --git a/Packs/Anomali_Enterprise/Integrations/Anomali_Enterprise/Anomali_Enterprise.yml b/Packs/Anomali_Enterprise/Integrations/Anomali_Enterprise/Anomali_Enterprise.yml
index c05d1b3eb7b7..b6869b6ba9b6 100644
--- a/Packs/Anomali_Enterprise/Integrations/Anomali_Enterprise/Anomali_Enterprise.yml
+++ b/Packs/Anomali_Enterprise/Integrations/Anomali_Enterprise/Anomali_Enterprise.yml
@@ -205,7 +205,7 @@ script:
- contextPath: Domain.Malicious.Description
description: A description of the malicious domain.
type: String
- dockerimage: demisto/python3:3.10.8.39276
+ dockerimage: demisto/python3:3.10.10.48392
feed: false
isfetch: false
longRunning: false
diff --git a/Packs/Anomali_Enterprise/ReleaseNotes/1_0_25.md b/Packs/Anomali_Enterprise/ReleaseNotes/1_0_25.md
new file mode 100644
index 000000000000..23110f630470
--- /dev/null
+++ b/Packs/Anomali_Enterprise/ReleaseNotes/1_0_25.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Anomali Match
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/Anomali_Enterprise/ReleaseNotes/1_0_26.md b/Packs/Anomali_Enterprise/ReleaseNotes/1_0_26.md
new file mode 100644
index 000000000000..e3acf6f731e2
--- /dev/null
+++ b/Packs/Anomali_Enterprise/ReleaseNotes/1_0_26.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Anomali Match
+- Updated the Docker image to: *demisto/python3:3.10.9.42476*.
diff --git a/Packs/Anomali_Enterprise/ReleaseNotes/1_0_27.md b/Packs/Anomali_Enterprise/ReleaseNotes/1_0_27.md
new file mode 100644
index 000000000000..486a89120b01
--- /dev/null
+++ b/Packs/Anomali_Enterprise/ReleaseNotes/1_0_27.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Anomali Match
+- Updated the Docker image to: *demisto/python3:3.10.9.46032*.
diff --git a/Packs/Anomali_Enterprise/ReleaseNotes/1_0_28.md b/Packs/Anomali_Enterprise/ReleaseNotes/1_0_28.md
new file mode 100644
index 000000000000..c109808abf88
--- /dev/null
+++ b/Packs/Anomali_Enterprise/ReleaseNotes/1_0_28.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Anomali Match
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/Anomali_Enterprise/pack_metadata.json b/Packs/Anomali_Enterprise/pack_metadata.json
index ea99af0b2ddb..8e658cfc03d1 100644
--- a/Packs/Anomali_Enterprise/pack_metadata.json
+++ b/Packs/Anomali_Enterprise/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Anomali Enterprise",
"description": "Use Anomali Match to query IOCs and conduct forensic searches.",
"support": "xsoar",
- "currentVersion": "1.0.24",
+ "currentVersion": "1.0.28",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/Anomali_ThreatStream/.pack-ignore b/Packs/Anomali_ThreatStream/.pack-ignore
index 5ea7a548ab31..efb425242274 100644
--- a/Packs/Anomali_ThreatStream/.pack-ignore
+++ b/Packs/Anomali_ThreatStream/.pack-ignore
@@ -2,11 +2,14 @@
ignore=RM104
[file:Anomali_ThreatStream_v2.yml]
-ignore=BA108,BA109,IN145
+ignore=BA108,BA109
[file:AnomaliThreatStream.yml]
ignore=IN144,IN145
+[file:AnomaliThreatStream_image.png]
+ignore=IM111
+
[known_words]
Anomali
ThreatStream
diff --git a/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3.py b/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3.py
index 1c7cf1ea7051..77b913f96a83 100644
--- a/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3.py
+++ b/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3.py
@@ -985,6 +985,10 @@ def import_ioc_without_approval(client: Client, file_id, classification, confide
tags = argToList(tags)
if trustedcircles:
trustedcircles = argToList(trustedcircles)
+ if confidence:
+ confidence = int(confidence)
+ if source_confidence_weight:
+ source_confidence_weight = int(source_confidence_weight)
try:
# entry id of uploaded file to war room
file_info = demisto.getFilePath(file_id)
@@ -994,7 +998,8 @@ def import_ioc_without_approval(client: Client, file_id, classification, confide
raise DemistoException(f'{THREAT_STREAM} - Entry {file_id} does not contain a valid json file.')
except Exception:
raise DemistoException(f'{THREAT_STREAM} - Entry {file_id} does not contain a file.')
- ioc_to_import.update({'meta': assign_params(
+ meta = ioc_to_import.get('meta', {})
+ meta |= assign_params(
classification=classification,
confidence=confidence,
allow_unresolved=argToBoolean(allow_unresolved),
@@ -1003,8 +1008,9 @@ def import_ioc_without_approval(client: Client, file_id, classification, confide
severity=severity,
tags=tags,
trustedcircles=trustedcircles
- )})
+ )
+ ioc_to_import.update({"meta": meta})
client.http_request("PATCH", "v1/intelligence/", json=ioc_to_import, resp_type='text')
return "The data was imported successfully."
@@ -1178,17 +1184,18 @@ def file_name_to_valid_string(file_name):
return file_name
-def submit_report(client: Client, submission_type, submission_value, submission_classification="private",
- report_platform="WINDOWS7",
+def submit_report(client: Client, submission_type, submission_value, import_indicators=True,
+ submission_classification="private", report_platform="WINDOWS7",
premium_sandbox="false", detail=None):
"""
Detonates URL or file that was uploaded to war room to ThreatStream sandbox.
"""
-
+ import_indicators = argToBoolean(import_indicators)
data = {
'report_radio-classification': submission_classification,
'report_radio-platform': report_platform,
'use_premium_sandbox': premium_sandbox,
+ 'import_indicators': import_indicators
}
if detail:
data['detail'] = detail
@@ -1325,6 +1332,13 @@ def search_intelligence(client: Client, **kwargs):
url = 'v2/intelligence/'
if 'query' in kwargs:
url += f"?q={kwargs.pop('query')}"
+ if 'confidence' in kwargs:
+ conf = kwargs.get('confidence', '').split(' ')
+ if len(conf) > 1:
+ if conf[0] not in {'gt', 'lt'}:
+ raise DemistoException(f'Confidence operator must be on of gt or lt, if used.{conf[0]} is not a legal value.')
+ kwargs[f'confidence__{conf[0]}'] = conf[1]
+ del kwargs['confidence']
intelligence_list = client.http_request('GET', url, params=kwargs).get('objects', None)
if not intelligence_list:
return 'No intelligence found from ThreatStream'
diff --git a/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3.yml b/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3.yml
index b7b3ddc589a6..ea880d0d6467 100644
--- a/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3.yml
+++ b/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3.yml
@@ -1,4 +1,7 @@
category: Data Enrichment & Threat Intelligence
+sectionOrder:
+- Connect
+- Collect
commonfields:
id: Anomali ThreatStream v3
version: -1
@@ -8,37 +11,52 @@ configuration:
name: url
required: true
type: 0
+ section: Connect
- display: Username
name: credentials
required: true
type: 9
+ displaypassword: API Key
+ section: Connect
- display: URL threshold
name: url_threshold
required: false
type: 0
+ section: Collect
+ advanced: true
- display: IP threshold
name: ip_threshold
required: false
type: 0
+ section: Collect
+ advanced: true
- display: Domain threshold
name: domain_threshold
required: false
type: 0
+ section: Collect
+ advanced: true
- display: File threshold
name: file_threshold
required: false
type: 0
+ section: Collect
+ advanced: true
- display: Email threshold
additionalinfo: Email indicators with confidence value above this threshold are considered malicious.
name: email_threshold
required: false
type: 0
+ section: Collect
+ advanced: true
- defaultvalue: 'false'
additionalinfo: Whether to include inactive indicators in reputation commands.
display: Include inactive results
name: include_inactive
required: false
type: 8
+ section: Collect
+ advanced: true
- additionalinfo: Reliability of the source providing the intelligence data.
defaultvalue: B - Usually reliable
display: Source Reliability
@@ -53,21 +71,28 @@ configuration:
- F - Reliability cannot be judged
required: true
type: 15
+ section: Collect
- defaultvalue: 'false'
display: Trust any certificate (not secure)
name: insecure
required: false
type: 8
+ section: Connect
+ advanced: true
- display: Use system proxy settings
name: proxy
required: false
type: 8
+ section: Connect
+ advanced: true
- defaultvalue: 'true'
additionalinfo: Create relationships between indicators as part of enrichment.
display: Create relationships
name: create_relationships
required: false
type: 8
+ section: Collect
+ advanced: true
defaultclassifier: 'null'
description: Use Anomali ThreatStream to query and submit threats.
display: Anomali ThreatStream v3
@@ -2807,6 +2832,16 @@ script:
name: detail
required: false
secret: false
+ - default: true
+ auto: PREDEFINED
+ description: If you want to initiate an import job for observables discovered during detonation, set this value to true. Default value is true.
+ isArray: false
+ name: import_indicators
+ required: false
+ secret: false
+ predefined:
+ - 'false'
+ - 'true'
deprecated: false
description: Submits a file or URL to the ThreatStream-hosted sandbox for detonation.
execution: false
@@ -4335,7 +4370,7 @@ script:
- contextPath: ThreatStream.Intelligence.severity
description: The severity of the intelligence.
type: String
- dockerimage: demisto/py3-tools:1.0.0.40800
+ dockerimage: demisto/py3-tools:1.0.0.48698
feed: false
isfetch: false
longRunning: false
diff --git a/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3_test.py b/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3_test.py
index b2e837194a49..ea7d10096646 100644
--- a/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3_test.py
+++ b/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/AnomaliThreatStreamv3_test.py
@@ -16,15 +16,15 @@ def util_load_json(path):
return json.loads(f.read())
-def util_tmp_json_file():
+def util_tmp_json_file(mock_object, file_name: str):
tmp_dir = mkdtemp()
- file_name = 'test_file.txt'
+ file_name = f'{file_name}.txt'
file_obj = {
'name': file_name,
'path': os.path.join(tmp_dir, file_name)
}
with open(file_obj['path'], 'w') as f:
- json.dump(MOCK_OBJECTS, f)
+ json.dump(mock_object, f)
return file_obj
@@ -43,6 +43,38 @@ def mock_client():
MOCK_OBJECTS = {"objects": [{"srcip": "8.8.8.8", "itype": "mal_ip", "confidence": 50},
{"srcip": "1.1.1.1", "itype": "apt_ip"}]}
+MOCK_OBJECTS_2 = {
+ "objects": [
+ {
+ "email": "email_test@domain.com",
+ "itype": "compromised_email",
+ "confidence": 50
+ },
+ {
+ "srcip": "78.78.78.67",
+ "classification": "private",
+ "itype": "bot_ip",
+ "confidence": 50,
+ "severity": "low"
+ },
+ {
+ "domain": "szqylwjzq.biz",
+ "classification": "private",
+ "itype": "mal_domain",
+ "confidence": 95,
+ "severity": "very-high"
+ }
+ ],
+ "meta": {
+ "confidence": 50,
+ "classification": "Private",
+ "allow_unresolved": True,
+ "tags": [
+ "test1",
+ "test2"
+ ]
+ }
+}
INDICATOR = [{
"resource_uri": "/api/v2/intelligence/123456789/",
@@ -367,9 +399,10 @@ def test_import_indicator_with_approval__happy_path(self, mocker, import_type):
"""
# prepare
- mocked_file_path = util_tmp_json_file()
+ mocked_file_path = util_tmp_json_file(MOCK_OBJECTS, 'test_file')
mocker.patch.object(demisto, 'getFilePath', return_value=mocked_file_path)
- mocker.patch.object(Client, 'http_request', return_value={'success': True, 'import_session_id': 'test_session_id'})
+ mocker.patch.object(Client, 'http_request',
+ return_value={'success': True, 'import_session_id': 'test_session_id'})
# run
result = import_ioc_with_approval(mock_client(), import_type, 'test_value')
@@ -388,7 +421,49 @@ def test_import_indicator_with_approval__happy_path(self, mocker, import_type):
assert result.outputs == 'test_session_id'
- def test_import_indicator_without_approval__happy_path(self, mocker):
+ @pytest.mark.parametrize(
+ 'mock_object, file_name, args, expected_meta_data_keys, expected_meta_data_changed',
+ [
+ (
+ MOCK_OBJECTS,
+ 'test_file',
+ {
+ 'file_id': 'test_file_id',
+ 'classification': 'Private',
+ 'confidence': "50",
+ 'severity': 'low',
+ 'allow_unresolved': True
+ },
+ ('classification', 'confidence', 'severity', 'allow_unresolved'),
+ {
+ 'classification': 'Private',
+ 'confidence': 50,
+ 'severity': 'low',
+ 'allow_unresolved': True
+ }
+ ),
+ (
+ MOCK_OBJECTS_2,
+ 'test_file',
+ {
+ 'file_id': 'test_file_id',
+ 'classification': 'Private',
+ 'confidence': "70",
+ 'severity': 'high',
+ 'allow_unresolved': True
+ },
+ ('classification', 'confidence', 'severity', 'allow_unresolved', 'tags'),
+ {'severity': 'high', 'confidence': 70}
+ )
+ ]
+ )
+ def test_import_indicator_without_approval__happy_path(self,
+ mocker,
+ mock_object: dict,
+ file_name: str,
+ args: dict,
+ expected_meta_data_keys: tuple,
+ expected_meta_data_changed: dict):
"""
Given:
- Indicator to import without approval
@@ -401,23 +476,25 @@ def test_import_indicator_without_approval__happy_path(self, mocker):
"""
# prepare
- mocked_file_path = util_tmp_json_file()
+ mocked_file_path = util_tmp_json_file(mock_object, file_name)
mocker.patch.object(demisto, 'getFilePath', return_value=mocked_file_path)
mocker.patch.object(Client, 'http_request')
# run
result = import_ioc_without_approval(
mock_client(),
- file_id='test_file_id',
- classification='Private',
- confidence=50,
- severity='low',
- allow_unresolved=True,
+ file_id=args['file_id'],
+ classification=args['classification'],
+ confidence=args.get('confidence'),
+ severity=args.get('severity'),
+ allow_unresolved=args.get('allow_unresolved'),
)
# validate
json_data = Client.http_request.call_args[1]['json']['meta']
- assert all(key in json_data for key in ['classification', 'confidence', 'severity', 'allow_unresolved'])
+ assert set(expected_meta_data_keys).issubset(json_data.keys())
+ for key in expected_meta_data_changed:
+ assert json_data[key] == expected_meta_data_changed[key]
assert result == 'The data was imported successfully.'
@pytest.mark.parametrize(argnames='command', argvalues=[import_ioc_with_approval, import_ioc_without_approval])
@@ -466,6 +543,7 @@ class TestGetCommands:
"""
Group the 'get' commands test
"""
+
@staticmethod
def mocked_http_get_response(command, **kwargs):
mocked_file_path = 'test_data/mocked_get_commands_response.json'
@@ -770,7 +848,7 @@ def test_submit_file_report__happy_path(self, mocker):
"""
# prepare
- file_obj = util_tmp_json_file()
+ file_obj = util_tmp_json_file(MOCK_OBJECTS, 'test_file')
mocker.patch.object(demisto, 'getFilePath', return_value=file_obj)
mocked_report = dict(success=True, reports=dict(test_platform=dict(id='report_id')))
mocker.patch.object(Client, 'http_request', return_value=mocked_report)
@@ -961,3 +1039,28 @@ def test_search_intelligence(mocker):
assert result.outputs[0].get('itype') == 'c2_ip'
assert result.outputs_prefix == 'ThreatStream.Intelligence'
+
+
+def test_search_intelligence_with_confidence(mocker):
+ """
+
+ Given:
+ - Various parameters to search intelligence by
+
+ When:
+ - Call search_intelligence command
+
+ Then:
+ - Validate the params passed correctly
+
+ """
+ mocked_ip_result = util_load_json('test_data/mocked_ip_response.json')
+ mocker.patch.object(Client, 'http_request', return_value=mocked_ip_result)
+
+ args = {'uuid': '9807794e-3de0-4340-91ca-cd82dd7b6d24',
+ 'confidence': 'lt 80'}
+ client = mock_client()
+ search_intelligence(client, **args)
+ http_call_args = client.http_request.call_args.kwargs.get('params')
+ assert 'confidence' not in http_call_args
+ assert 'confidence__lt' in http_call_args
diff --git a/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/README.md b/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/README.md
index 1f9f8b1cf5b8..9a3a5917568b 100644
--- a/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/README.md
+++ b/Packs/Anomali_ThreatStream/Integrations/AnomaliThreatStreamv3/README.md
@@ -1763,6 +1763,7 @@ Submits a file or URL to the ThreatStream-hosted sandbox for detonation.
| submission_value | The submission value. Possible values are a valid URL or a file ID that was uploaded to the War Room to detonate. | Required |
| premium_sandbox | Whether the premium sandbox should be used for detonation. Possible values are: false, true. Default is false. | Optional |
| detail | A comma-separated list of additional details for the indicator. This information is displayed in the Tag column of the ThreatStream UI. | Optional |
+| import_indicators | If you want to initiate an import job for observables discovered during detonation, set this value to true. Default value is true. | Optional |
#### Context Output
diff --git a/Packs/Anomali_ThreatStream/Integrations/Anomali_ThreatStream_v2/Anomali_ThreatStream_v2.py b/Packs/Anomali_ThreatStream/Integrations/Anomali_ThreatStream_v2/Anomali_ThreatStream_v2.py
index 9cf37592c767..4a11e20dd1fb 100644
--- a/Packs/Anomali_ThreatStream/Integrations/Anomali_ThreatStream_v2/Anomali_ThreatStream_v2.py
+++ b/Packs/Anomali_ThreatStream/Integrations/Anomali_ThreatStream_v2/Anomali_ThreatStream_v2.py
@@ -6,10 +6,11 @@
import json
import requests
+import urllib3
from requests.exceptions import MissingSchema, ConnectionError
# Disable insecure warnings
-requests.packages.urllib3.disable_warnings()
+urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
@@ -1026,8 +1027,8 @@ def main():
params = demisto.params()
- user_name = params.get('username')
- api_key = params.get('apikey')
+ user_name = params.get('credentials', {}).get('identifier', '') or params.get('username', '')
+ api_key = params.get('credentials', {}).get('password', '') or params.get('apikey', '')
server_url = params.get('url', '').strip('/')
CREDENTIALS['username'] = user_name
diff --git a/Packs/Anomali_ThreatStream/Integrations/Anomali_ThreatStream_v2/Anomali_ThreatStream_v2.yml b/Packs/Anomali_ThreatStream/Integrations/Anomali_ThreatStream_v2/Anomali_ThreatStream_v2.yml
index 7f55e2e6391c..140594020960 100644
--- a/Packs/Anomali_ThreatStream/Integrations/Anomali_ThreatStream_v2/Anomali_ThreatStream_v2.yml
+++ b/Packs/Anomali_ThreatStream/Integrations/Anomali_ThreatStream_v2/Anomali_ThreatStream_v2.yml
@@ -2,20 +2,28 @@ category: Data Enrichment & Threat Intelligence
commonfields:
id: Anomali ThreatStream v2
version: -1
+deprecated: true
configuration:
- defaultvalue: https://api.threatstream.com
display: Server URL (e.g., https://www.test.com)
name: url
required: true
type: 0
+- display: Username
+ name: credentials
+ required: false
+ type: 9
+ displaypassword: API Key
- display: Username
name: username
- required: true
+ required: false
type: 0
+ hidden: true
- display: API Key
name: apikey
- required: true
+ required: false
type: 4
+ hidden: true
- defaultvalue: high
display: Threshold of the indicator.
name: default_threshold
@@ -26,10 +34,12 @@ configuration:
- very-high
required: true
type: 15
-- additionalinfo: Reliability of the source providing the intelligence data.
- defaultvalue: B - Usually reliable
+- defaultvalue: 'B - Usually reliable'
display: Source Reliability
name: integrationReliability
+ required: true
+ type: 15
+ additionalinfo: Reliability of the source providing the intelligence data.
options:
- A+ - 3rd party enrichment
- A - Completely reliable
@@ -38,20 +48,18 @@ configuration:
- D - Not usually reliable
- E - Unreliable
- F - Reliability cannot be judged
- required: true
- type: 15
-- defaultvalue: 'false'
- display: Trust any certificate (not secure)
+- display: Trust any certificate (not secure)
name: insecure
required: false
type: 8
+ defaultvalue: 'false'
- display: Use system proxy settings
name: proxy
required: false
type: 8
defaultclassifier: 'null'
-description: Use Anomali ThreatStream to query and submit threats.
-display: Anomali ThreatStream v2
+description: Deprecated. Use Anomali ThreatStream v3 integration instead.
+display: Anomali ThreatStream v2 (Deprecated)
name: Anomali ThreatStream v2
script:
commands:
@@ -64,9 +72,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: If severity is greater than or equal to the threshold, then the
- IP address will be considered malicious. This argument will override the default
- threshold defined as a parameter. Can be "low", "medium", "high", or "very-high".
+ description: If severity is greater than or equal to the threshold, then the IP address will be considered malicious. This argument will override the default threshold defined as a parameter. Can be "low", "medium", "high", or "very-high".
isArray: false
name: threshold
predefined:
@@ -79,8 +85,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: 'False'
- description: Whether to include results with an inactive status. Default is
- "False".
+ description: Whether to include results with an inactive status. Default is "False".
isArray: false
name: include_inactive
predefined:
@@ -127,23 +132,19 @@ script:
description: The indicator type.
type: String
- contextPath: ThreatStream.IP.Modified
- description: 'The time when the indicator was last updated. The date format
- is: YYYYMMDDThhmmss, where "T" denotes the start of the value for time, in
- UTC time.'
+ description: 'The time when the indicator was last updated. The date format is: YYYYMMDDThhmmss, where "T" denotes the start of the value for time, in UTC time.'
type: String
- contextPath: ThreatStream.IP.Severity
description: The indicator severity ("very-high", "high", "medium", or "low").
type: String
- contextPath: ThreatStream.IP.Confidence
- description: The observable certainty level of a reported indicator type. Confidence
- score can range from 0-100, in increasing order of confidence.
+ description: The observable certainty level of a reported indicator type. Confidence score can range from 0-100, in increasing order of confidence.
type: String
- contextPath: ThreatStream.IP.Status
description: The status assigned to the indicator.
type: String
- contextPath: ThreatStream.IP.Organization
- description: The name of the business that owns the IP address associated with
- the indicator.
+ description: The name of the business that owns the IP address associated with the indicator.
type: String
- contextPath: ThreatStream.IP.Source
description: The source of the indicator.
@@ -169,9 +170,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: If the severity is greater than or equal to the threshold, the
- IP address is considered as malicious. This argument will override the default
- threshold defined as a parameter. Can be "low", "medium", "high", or "very-high".
+ description: If the severity is greater than or equal to the threshold, the IP address is considered as malicious. This argument will override the default threshold defined as a parameter. Can be "low", "medium", "high", or "very-high".
isArray: false
name: threshold
predefined:
@@ -184,8 +183,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: 'False'
- description: Whether to include results with an inactive status. Default is
- "False".
+ description: Whether to include results with an inactive status. Default is "False".
isArray: false
name: include_inactive
predefined:
@@ -246,15 +244,13 @@ script:
description: The indicator severity ("very-high", "high", "medium", "low").
type: String
- contextPath: ThreatStream.Domain.Confidence
- description: The observable certainty level of a reported indicator type. Confidence
- score can range from 0-100, in increasing order of confidence.
+ description: The observable certainty level of a reported indicator type. Confidence score can range from 0-100, in increasing order of confidence.
type: String
- contextPath: ThreatStream.Domain.Status
description: The status assigned to the indicator.
type: String
- contextPath: ThreatStream.Domain.Organization
- description: The name of the business that owns the IP address associated with
- the indicator.
+ description: The name of the business that owns the IP address associated with the indicator.
type: String
- contextPath: ThreatStream.Domain.Source
description: The source of the indicator.
@@ -289,9 +285,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: If the severity is greater than or equal to the threshold, the
- hash of file is considered as malicious. This argument will override the default
- threshold defined as a parameter. Can be "low", "medium", "high", or "very-high".
+ description: If the severity is greater than or equal to the threshold, the hash of file is considered as malicious. This argument will override the default threshold defined as a parameter. Can be "low", "medium", "high", or "very-high".
isArray: false
name: threshold
predefined:
@@ -304,8 +298,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: 'False'
- description: Whether to include results with an inactive status. Default is
- "False".
+ description: Whether to include results with an inactive status. Default is "False".
isArray: false
name: include_inactive
predefined:
@@ -349,8 +342,7 @@ script:
description: The indicator severity ("very-high", "high", "medium", "low").
type: String
- contextPath: ThreatStream.File.Confidence
- description: The observable certainty level of a reported indicator type. Confidence
- score can range from 0-100, in increasing order of confidence.
+ description: The observable certainty level of a reported indicator type. Confidence score can range from 0-100, in increasing order of confidence.
type: String
- contextPath: ThreatStream.File.Status
description: The status assigned to the indicator.
@@ -393,9 +385,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: If the severity is greater or equal than the threshold, the IP
- address is considered as malicious. This argument will override the default
- threshold defined as a parameter. Can be "low", "medium", "high", "very-high".
+ description: If the severity is greater or equal than the threshold, the IP address is considered as malicious. This argument will override the default threshold defined as a parameter. Can be "low", "medium", "high", "very-high".
isArray: false
name: threshold
predefined:
@@ -408,8 +398,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: 'False'
- description: Whether to include results with an inactive status. Default is
- "False".
+ description: Whether to include results with an inactive status. Default is "False".
isArray: false
name: include_inactive
predefined:
@@ -438,8 +427,7 @@ script:
description: The indicator severity ("very-high", "high", "medium", "low").
type: String
- contextPath: ThreatStream.EmailReputation.Confidence
- description: The observable certainty level of a reported indicator type. Confidence
- score can range from 0-100, in increasing order of confidence.
+ description: The observable certainty level of a reported indicator type. Confidence score can range from 0-100, in increasing order of confidence.
type: String
- contextPath: ThreatStream.EmailReputation.Status
description: The status assigned to the indicator.
@@ -516,8 +504,7 @@ script:
- arguments:
- default: false
defaultValue: '50'
- description: The observable certainty level of a reported indicator type. Default
- is 50.
+ description: The observable certainty level of a reported indicator type. Default is 50.
isArray: false
name: confidence
required: false
@@ -525,8 +512,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: private
- description: Whether the indicator data is public or private to the organization.
- Default is "private".
+ description: Whether the indicator data is public or private to the organization. Default is "private".
isArray: false
name: classification
predefined:
@@ -537,8 +523,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: exploit
- description: Type of threat associated with the imported observables. Pre-defined
- values, such as "adware", "bot", "malware", etc. Default is "exploit".
+ description: Type of threat associated with the imported observables. Pre-defined values, such as "adware", "bot", "malware", etc. Default is "exploit".
isArray: false
name: threat_type
predefined:
@@ -575,9 +560,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: low
- description: The potential impact of the indicator type with which the observable
- is believed to be associated. Can be "high", "medium", or "high", or "very-high".
- Default is "low".
+ description: The potential impact of the indicator type with which the observable is believed to be associated. Can be "high", "medium", or "high", or "very-high". Default is "low".
isArray: false
name: severity
predefined:
@@ -589,8 +572,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: The import type of the indicator. Can be "datatext", "file-id",
- or "url".
+ description: The import type of the indicator. Can be "datatext", "file-id", or "url".
isArray: false
name: import_type
predefined:
@@ -600,87 +582,67 @@ script:
required: true
secret: false
- default: false
- description: 'The source of imported data. Can be one of the following: url,
- datatext of file-id of uploaded file to the War Room. Supported file types
- for file-id are: CSV, HTML, IOC, JSON, PDF, TXT.'
+ description: 'The source of imported data. Can be one of the following: url, datatext of file-id of uploaded file to the War Room. Supported file types for file-id are: CSV, HTML, IOC, JSON, PDF, TXT.'
isArray: false
name: import_value
required: true
secret: false
- default: false
- description: Indicator type to assign if a specific type is not associated with
- an observable. This is a global setting that applies to any imported IP-type
- observable when an explicit itype is not specified for it.
+ description: Indicator type to assign if a specific type is not associated with an observable. This is a global setting that applies to any imported IP-type observable when an explicit itype is not specified for it.
isArray: false
name: ip_mapping
required: false
secret: false
- default: false
- description: Indicator type to assign if a specific type is not associated with
- an observable. This is a global setting that applies to any imported domain-type
- observable when an explicit itype is not specified for it.
+ description: Indicator type to assign if a specific type is not associated with an observable. This is a global setting that applies to any imported domain-type observable when an explicit itype is not specified for it.
isArray: false
name: domain_mapping
required: false
secret: false
- default: false
- description: Indicator type to assign if a specific type is not associated with
- an observable. This is a global setting that applies to any imported URL-type
- observable when an explicit itype is not specified for it.
+ description: Indicator type to assign if a specific type is not associated with an observable. This is a global setting that applies to any imported URL-type observable when an explicit itype is not specified for it.
isArray: false
name: url_mapping
required: false
secret: false
- default: false
- description: Indicator type to assign if a specific type is not associated with
- an observable. This is a global setting that applies to any imported email-type
- observable when an explicit itype is not specified for it.
+ description: Indicator type to assign if a specific type is not associated with an observable. This is a global setting that applies to any imported email-type observable when an explicit itype is not specified for it.
isArray: false
name: email_mapping
required: false
secret: false
- default: false
- description: Indicator type to assign if a specific type is not associated with
- an observable. This is a global setting that applies to any imported MD5-type
- observable when an explicit itype is not specified for it.
+ description: Indicator type to assign if a specific type is not associated with an observable. This is a global setting that applies to any imported MD5-type observable when an explicit itype is not specified for it.
isArray: false
name: md5_mapping
required: false
secret: false
deprecated: false
- description: 'Imports indicators (observables) into ThreatStream. Approval of
- the imported data is required, using the ThreatStream UI. The data can be imported
- using one of three methods: plain-text, file, or URL. Only one argument can
- be used.'
+ description: 'Imports indicators (observables) into ThreatStream. Approval of the imported data is required, using the ThreatStream UI. The data can be imported using one of three methods: plain-text, file, or URL. Only one argument can be used.'
execution: false
name: threatstream-import-indicator-with-approval
- arguments:
- default: false
defaultValue: '50'
- description: The observable certainty level of a reported indicator type. Default
- is 50.
+ description: The observable certainty level of a reported indicator type. Default is 50.
isArray: false
name: confidence
required: false
secret: false
- default: false
- description: To use your specified confidence entirely and not re-assess the
- value using machine learning algorithms, set source_confidence_ weight to
- 100.
+ description: To use your specified confidence entirely and not re-assess the value using machine learning algorithms, set source_confidence_ weight to 100.
isArray: false
name: source_confidence_weight
required: false
secret: false
- default: false
- description: The time stamp of when intelligence will expire on ThreatStream,
- in ISO format. For example, 2020-12-24T00:00:00.
+ description: The time stamp of when intelligence will expire on ThreatStream, in ISO format. For example, 2020-12-24T00:00:00.
isArray: false
name: expiration_ts
required: false
secret: false
- default: false
- description: The severity you want to assign to the observable when it is imported.
- Can be "low", "medium", "high" , or "very-high".
+ description: The severity you want to assign to the observable when it is imported. Can be "low", "medium", "high" , or "very-high".
isArray: false
name: severity
predefined:
@@ -697,17 +659,14 @@ script:
required: false
secret: false
- default: false
- description: The ID of the trusted circle with which this threat data should
- be shared. If you want to import the threat data to multiple trusted circles,
- enter a list of comma-separated IDs.
+ description: The ID of the trusted circle with which this threat data should be shared. If you want to import the threat data to multiple trusted circles, enter a list of comma-separated IDs.
isArray: false
name: trustedcircles
required: false
secret: false
- auto: PREDEFINED
default: false
- description: Denotes whether the indicator data is public or private to the
- organization.
+ description: Denotes whether the indicator data is public or private to the organization.
isArray: false
name: classification
predefined:
@@ -717,8 +676,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Whether domain observables that are included in the file and do
- not resolve will be accepted as valid in ThreatStream and imported.
+ description: Whether domain observables that are included in the file and do not resolve will be accepted as valid in ThreatStream and imported.
isArray: false
name: allow_unresolved
predefined:
@@ -727,23 +685,19 @@ script:
required: false
secret: false
- default: false
- description: The entry ID of an uploaded file to the War Room containing a json
- with "objects" array and "meta" maps.
+ description: The entry ID of an uploaded file to the War Room containing a json with "objects" array and "meta" maps.
isArray: false
name: file_id
required: true
secret: false
deprecated: false
- description: Imports indicators (observables) into ThreatStream. Approval is not
- required for the imported data. You must have the Approve Intel user permission
- to import without approval using the API.
+ description: Imports indicators (observables) into ThreatStream. Approval is not required for the imported data. You must have the Approve Intel user permission to import without approval using the API.
execution: false
name: threatstream-import-indicator-without-approval
- arguments:
- auto: PREDEFINED
default: false
- description: The threat model of the returned list. Can be "actor", "campaign",
- "incident", "signature", "ttp", "vulnerability", or "tipreport".
+ description: The threat model of the returned list. Can be "actor", "campaign", "incident", "signature", "ttp", "vulnerability", or "tipreport".
isArray: false
name: model
predefined:
@@ -758,9 +712,7 @@ script:
secret: false
- default: false
defaultValue: '50'
- description: Limits the list of models size. Specifying limit=0 returns up to
- a maximum of 1000 models. In case of limit=0 the output won't be set in the
- context.
+ description: Limits the list of models size. Specifying limit=0 returns up to a maximum of 1000 models. In case of limit=0 the output won't be set in the context.
isArray: false
name: limit
required: false
@@ -780,15 +732,12 @@ script:
description: The ID of the threat model.
type: String
- contextPath: ThreatStream.List.CreatedTime
- description: 'The date and time of threat model creation. The date format is:
- YYYYMMDDThhmmss, where "T" denotes the start of the value for time, in UTC
- time.'
+ description: 'The date and time of threat model creation. The date format is: YYYYMMDDThhmmss, where "T" denotes the start of the value for time, in UTC time.'
type: String
- arguments:
- auto: PREDEFINED
default: false
- description: The threat model. Can be "actor", "campaign", "incident", "signature",
- "ttp", "vulnerability", or "tipreport".
+ description: The threat model. Can be "actor", "campaign", "incident", "signature", "ttp", "vulnerability", or "tipreport".
isArray: false
name: model
predefined:
@@ -821,8 +770,7 @@ script:
- arguments:
- auto: PREDEFINED
default: false
- description: The threat model. Can be "actor", "campaign", "incident", "signature",
- "ttp", "vulnerability", or "tipreport".
+ description: The threat model. Can be "actor", "campaign", "incident", "signature", "ttp", "vulnerability", or "tipreport".
isArray: false
name: model
predefined:
@@ -849,8 +797,7 @@ script:
required: false
secret: false
deprecated: false
- description: Returns a list of indicators associated with the specified model
- and ID of the model.
+ description: Returns a list of indicators associated with the specified model and ID of the model.
execution: false
name: threatstream-get-indicators-by-model
outputs:
@@ -879,8 +826,7 @@ script:
description: The country of the indicator associated with the specified model
type: String
- contextPath: ThreatStream.Model.Indicators.Organization
- description: The organization of the indicator associated with the specified
- model.
+ description: The organization of the indicator associated with the specified model.
type: String
- contextPath: ThreatStream.Model.Indicators.ASN
description: The ASN of the indicator associated with the specified model.
@@ -904,8 +850,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: private
- description: Classification of the Sandbox submission. Can be "private" or "public".
- Default is "private".
+ description: Classification of the Sandbox submission. Can be "private" or "public". Default is "private".
isArray: false
name: submission_classification
predefined:
@@ -916,9 +861,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: WINDOWS7
- description: The platform on which the submitted URL or file will be run. To
- obtain a list supported platforms run the threatstream-supported-platforms
- command. Can be "WINDOWS7", or "WINDOWSXP". Default is "WINDOWS7".
+ description: The platform on which the submitted URL or file will be run. To obtain a list supported platforms run the threatstream-supported-platforms command. Can be "WINDOWS7", or "WINDOWSXP". Default is "WINDOWS7".
isArray: false
name: report_platform
predefined:
@@ -938,8 +881,7 @@ script:
required: true
secret: false
- default: false
- description: The submission value. Possible values are a valid URL or a file
- ID that was uploaded to the War Room to detonate.
+ description: The submission value. Possible values are a valid URL or a file ID that was uploaded to the War Room to detonate.
isArray: false
name: submission_value
required: true
@@ -947,8 +889,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: 'false'
- description: Whether the premium sandbox should be used for detonation. Default
- is "false".
+ description: Whether the premium sandbox should be used for detonation. Default is "false".
isArray: false
name: premium_sandbox
predefined:
@@ -957,8 +898,7 @@ script:
required: false
secret: false
- default: false
- description: A comma separated list of additional details for the indicator.
- This information is displayed in the Tag column of the ThreatStream UI.
+ description: A comma separated list of additional details for the indicator. This information is displayed in the Tag column of the ThreatStream UI.
isArray: false
name: detail
required: false
@@ -985,8 +925,7 @@ script:
required: true
secret: false
deprecated: false
- description: Returns the current status of the report that was submitted to the
- sandbox. The report ID is returned from the threatstream-submit-to-sandbox command.
+ description: Returns the current status of the report that was submitted to the sandbox. The report ID is returned from the threatstream-submit-to-sandbox command.
execution: false
name: threatstream-get-analysis-status
outputs:
@@ -994,15 +933,13 @@ script:
description: The report ID of the file or URL that was detonated to sandbox.
type: String
- contextPath: ThreatStream.Analysis.Status
- description: The report status of the file or URL that was detonated in the
- sandbox.
+ description: The report status of the file or URL that was detonated in the sandbox.
type: String
- contextPath: ThreatStream.Analysis.Platform
description: The platform that was used for detonation.
type: String
- contextPath: ThreatStream.Analysis.Verdict
- description: The report verdict of the file or URL that was detonated in the
- sandbox. The verdict will remain "benign" until detonation is complete.
+ description: The report verdict of the file or URL that was detonated in the sandbox. The verdict will remain "benign" until detonation is complete.
type: String
- arguments:
- default: false
@@ -1090,8 +1027,7 @@ script:
type: String
- arguments:
- default: false
- description: The Anomali Observable Search Filter Language query to filter indicator
- results. If a query is passed as an argument, it overrides all other arguments.
+ description: The Anomali Observable Search Filter Language query to filter indicator results. If a query is passed as an argument, it overrides all other arguments.
isArray: false
name: query
required: false
@@ -1134,8 +1070,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Whether the classification of the indicator is public. Default
- is "false".
+ description: Whether the classification of the indicator is public. Default is "false".
isArray: false
name: is_public
predefined:
@@ -1150,16 +1085,14 @@ script:
required: false
secret: false
- default: false
- description: The registered owner (organization) of the IP address associated
- with the indicator.
+ description: The registered owner (organization) of the IP address associated with the indicator.
isArray: false
name: org
required: false
secret: false
- auto: PREDEFINED
default: false
- description: The status assigned to the indicator. Can be "active", "inactive",
- or "falsepos".
+ description: The status assigned to the indicator. Can be "active", "inactive", or "falsepos".
isArray: false
name: status
predefined:
@@ -1176,8 +1109,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: The type of indicator. Can be "domain", "email", "ip", "MD5", "string",
- or "url".
+ description: The type of indicator. Can be "domain", "email", "ip", "MD5", "string", or "url".
isArray: false
name: type
predefined:
@@ -1197,15 +1129,13 @@ script:
secret: false
- default: false
defaultValue: '20'
- description: The maximum number of results to return from ThreatStream. Default
- is 20.
+ description: The maximum number of results to return from ThreatStream. Default is 20.
isArray: false
name: limit
required: false
secret: false
deprecated: false
- description: Return filtered indicators from ThreatStream. If a query is defined,
- it overrides all other arguments that were passed to the command.
+ description: Return filtered indicators from ThreatStream. If a query is defined, it overrides all other arguments that were passed to the command.
execution: false
name: threatstream-get-indicators
outputs:
@@ -1227,8 +1157,7 @@ script:
description: The indicator status.
type: String
- contextPath: ThreatStream.Indicators.Organization
- description: The registered owner (organization) of the IP address associated
- with the indicator.
+ description: The registered owner (organization) of the IP address associated with the indicator.
type: String
- contextPath: ThreatStream.Indicators.Country
description: The country associated with the indicator.
@@ -1252,9 +1181,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: intelligence
- description: The type of threat model entity on which to add the tag. Can be
- "actor", "campaign", "incident", "intelligence", "signature", "tipreport",
- "ttp", or "vulnerability". Default is "intelligence" (indicator).
+ description: The type of threat model entity on which to add the tag. Can be "actor", "campaign", "incident", "intelligence", "signature", "tipreport", "ttp", or "vulnerability". Default is "intelligence" (indicator).
isArray: false
name: model
predefined:
@@ -1269,8 +1196,7 @@ script:
required: false
secret: false
- default: false
- description: 'A comma separated list of tags applied to the specified threat
- model entities or observable. '
+ description: 'A comma separated list of tags applied to the specified threat model entities or observable. '
isArray: true
name: tags
required: true
@@ -1288,8 +1214,7 @@ script:
- arguments:
- auto: PREDEFINED
default: false
- description: The type of threat model to create. Can be "actor", "campaign",
- "incident", "ttp", "vulnerability", or "tipreport".
+ description: The type of threat model to create. Can be "actor", "campaign", "incident", "ttp", "vulnerability", or "tipreport".
isArray: false
name: model
predefined:
@@ -1321,8 +1246,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: red
- description: The Traffic Light Protocol designation for the threat model. Can
- be "red", "amber", "green", or "white".
+ description: The Traffic Light Protocol designation for the threat model. Can be "red", "amber", "green", or "white".
isArray: false
name: tlp
predefined:
@@ -1339,8 +1263,7 @@ script:
required: false
secret: false
- default: false
- description: A comma separated list of indicators IDs associated with the threat
- model on the ThreatStream platform.
+ description: A comma separated list of indicators IDs associated with the threat model on the ThreatStream platform.
isArray: false
name: intelligence
required: false
@@ -1381,8 +1304,7 @@ script:
description: The country of the indicator associated with the specified model
type: String
- contextPath: ThreatStream.Model.Indicators.Organization
- description: The organization of the indicator associated with the specified
- model.
+ description: The organization of the indicator associated with the specified model.
type: String
- contextPath: ThreatStream.Model.Indicators.ASN
description: The ASN of the indicator associated with the specified model.
@@ -1405,8 +1327,7 @@ script:
- arguments:
- auto: PREDEFINED
default: false
- description: The type of threat model to update. Can be "actor", "campaign",
- "incident", "ttp", "vulnerability", or "tipreport".
+ description: The type of threat model to update. Can be "actor", "campaign", "incident", "ttp", "vulnerability", or "tipreport".
isArray: false
name: model
predefined:
@@ -1444,8 +1365,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: red
- description: The Traffic Light Protocol designation for the threat model. Can
- be "red", "amber", "green", or "white".
+ description: The Traffic Light Protocol designation for the threat model. Can be "red", "amber", "green", or "white".
isArray: false
name: tlp
predefined:
@@ -1462,8 +1382,7 @@ script:
required: false
secret: false
- default: false
- description: A comma separated list of indicators IDs associated with the threat
- model on the ThreatStream platform.
+ description: A comma separated list of indicators IDs associated with the threat model on the ThreatStream platform.
isArray: false
name: intelligence
required: false
@@ -1475,8 +1394,7 @@ script:
required: false
secret: false
deprecated: false
- description: Updates a threat model with specific parameters. If one or more optional
- parameters are defined, the command overrides previous data stored in ThreatStream.
+ description: Updates a threat model with specific parameters. If one or more optional parameters are defined, the command overrides previous data stored in ThreatStream.
execution: false
name: threatstream-update-model
outputs:
@@ -1505,8 +1423,7 @@ script:
description: The country of the indicator associated with the specified model.
type: String
- contextPath: ThreatStream.Model.Indicators.Organization
- description: The organization of the indicator associated with the specified
- model.
+ description: The organization of the indicator associated with the specified model.
type: String
- contextPath: ThreatStream.Model.Indicators.ASN
description: The ASN of the indicator associated with the specified model.
@@ -1570,9 +1487,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: If the severity is greater than or equal to the threshold, the
- URL is considered as malicious. This argument will override the default threshold
- defined as a parameter. Can be "low", "medium", "high", or "very-high".
+ description: If the severity is greater than or equal to the threshold, the URL is considered as malicious. This argument will override the default threshold defined as a parameter. Can be "low", "medium", "high", or "very-high".
isArray: false
name: threshold
predefined:
@@ -1585,8 +1500,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: 'False'
- description: Whether to include results with an inactive status. Default is
- "False".
+ description: Whether to include results with an inactive status. Default is "False".
isArray: false
name: include_inactive
predefined:
@@ -1623,15 +1537,13 @@ script:
for time, in UTC time.
type: String
- contextPath: ThreatStream.URL.Confidence
- description: The observable certainty level of a reported indicator type. Confidence
- score can range from 0-100, in increasing order of confidence.
+ description: The observable certainty level of a reported indicator type. Confidence score can range from 0-100, in increasing order of confidence.
type: String
- contextPath: ThreatStream.URL.Status
description: The status of the indicator.
type: String
- contextPath: ThreatStream.URL.Organization
- description: The name of the business that owns the IP address associated with
- the indicator.
+ description: The name of the business that owns the IP address associated with the indicator.
type: String
- contextPath: ThreatStream.URL.Address
description: The URL of the indicator.
@@ -1654,7 +1566,7 @@ script:
- contextPath: URL.Tags
description: (List) Tags of the URL.
type: Unknown
- dockerimage: demisto/py3-tools:0.0.1.26536
+ dockerimage: demisto/py3-tools:1.0.0.43697
feed: false
isfetch: false
longRunning: false
diff --git a/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_22.md b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_22.md
new file mode 100644
index 000000000000..3e8a77b6f46a
--- /dev/null
+++ b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_22.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Anomali ThreatStream v3
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.41748*.
diff --git a/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_23.md b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_23.md
new file mode 100644
index 000000000000..56bee4e6cb30
--- /dev/null
+++ b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_23.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Anomali ThreatStream v3
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.43697*.
diff --git a/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_24.md b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_24.md
new file mode 100644
index 000000000000..57f00399c5e4
--- /dev/null
+++ b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_24.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### Anomali ThreatStream v2
+- Added the *Username* and *API Key* integration parameters to support credentials fetching object.
diff --git a/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_25.md b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_25.md
new file mode 100644
index 000000000000..b50af6eb19d4
--- /dev/null
+++ b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_25.md
@@ -0,0 +1,8 @@
+
+#### Integrations
+##### Anomali ThreatStream v2 (Deprecated)
+- Deprecated. Use Anomali ThreatStream v3 integration instead.
+##### Anomali ThreatStream v3
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.45904*.
+- Added the *import_indicators* to the ***threatstream-submit-to-sandbox*** command.
+- Changed the `password` parameter display name to `API Key`.
diff --git a/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_26.md b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_26.md
new file mode 100644
index 000000000000..731563d9bd2b
--- /dev/null
+++ b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_26.md
@@ -0,0 +1,4 @@
+
+#### Integrations
+##### Anomali ThreatStream v3
+- Note: Organized the the integrations' parameters by sections. Relevant for XSIAM and XSOAR 8.1 and above.
\ No newline at end of file
diff --git a/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_27.md b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_27.md
new file mode 100644
index 000000000000..93734edcde6f
--- /dev/null
+++ b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_27.md
@@ -0,0 +1,5 @@
+
+#### Integrations
+##### Anomali ThreatStream v3
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.47433*.
+- Fixed an issue in the ***threatstream-search-intelligence*** command where specifying *gt* or *lt* for the *confidence* argument did not work properly.
\ No newline at end of file
diff --git a/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_28.md b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_28.md
new file mode 100644
index 000000000000..4db7ee3d30b1
--- /dev/null
+++ b/Packs/Anomali_ThreatStream/ReleaseNotes/2_0_28.md
@@ -0,0 +1,5 @@
+#### Integrations
+##### Anomali ThreatStream v3
+- Updated the Docker image to: *demisto/py3-tools:1.0.0.48698*.
+- Fixed an issue where the ***threatstream-import-indicator-without-approval*** command deleted data from metadata of the file.
+- Fixed an issue where the ***threatstream-import-indicator-without-approval*** command did not convert the `confidence` and `source_confidence_weight` arguments to numeric.
\ No newline at end of file
diff --git a/Packs/Anomali_ThreatStream/pack_metadata.json b/Packs/Anomali_ThreatStream/pack_metadata.json
index 3259ebab9150..38e15cc87d3f 100644
--- a/Packs/Anomali_ThreatStream/pack_metadata.json
+++ b/Packs/Anomali_ThreatStream/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Anomali ThreatStream",
"description": "Use Anomali ThreatStream to query and submit threats.",
"support": "xsoar",
- "currentVersion": "2.0.21",
+ "currentVersion": "2.0.28",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AnsibleAlibabaCloud/Integrations/AnsibleAlibabaCloud/README.md b/Packs/AnsibleAlibabaCloud/Integrations/AnsibleAlibabaCloud/README.md
index b9505148e502..b58e49a6a5f6 100644
--- a/Packs/AnsibleAlibabaCloud/Integrations/AnsibleAlibabaCloud/README.md
+++ b/Packs/AnsibleAlibabaCloud/Integrations/AnsibleAlibabaCloud/README.md
@@ -349,7 +349,7 @@ Further documentation available at https://docs.ansible.com/ansible/2.9/modules/
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -359,4 +359,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleAlibabaCloud/ReleaseNotes/1_0_4.md b/Packs/AnsibleAlibabaCloud/ReleaseNotes/1_0_4.md
new file mode 100644
index 000000000000..c98d7ff0fc60
--- /dev/null
+++ b/Packs/AnsibleAlibabaCloud/ReleaseNotes/1_0_4.md
@@ -0,0 +1 @@
+Documentation and metadata improvements.
\ No newline at end of file
diff --git a/Packs/AnsibleAlibabaCloud/pack_metadata.json b/Packs/AnsibleAlibabaCloud/pack_metadata.json
index 9df7934a7905..dbd9cb2030a0 100644
--- a/Packs/AnsibleAlibabaCloud/pack_metadata.json
+++ b/Packs/AnsibleAlibabaCloud/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Ansible Alibaba Cloud",
"description": "Manage and control Alibaba Cloud Compute services.",
"support": "xsoar",
- "currentVersion": "1.0.3",
+ "currentVersion": "1.0.4",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AnsibleAzure/Integrations/AnsibleAzure/README.md b/Packs/AnsibleAzure/Integrations/AnsibleAzure/README.md
index 55f85ce0fb6f..b7e91fcb2365 100644
--- a/Packs/AnsibleAzure/Integrations/AnsibleAzure/README.md
+++ b/Packs/AnsibleAzure/Integrations/AnsibleAzure/README.md
@@ -6127,7 +6127,7 @@ Further documentation available at https://docs.ansible.com/ansible/2.9/modules/
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -6137,4 +6137,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleAzure/ReleaseNotes/1_0_4.md b/Packs/AnsibleAzure/ReleaseNotes/1_0_4.md
new file mode 100644
index 000000000000..c98d7ff0fc60
--- /dev/null
+++ b/Packs/AnsibleAzure/ReleaseNotes/1_0_4.md
@@ -0,0 +1 @@
+Documentation and metadata improvements.
\ No newline at end of file
diff --git a/Packs/AnsibleAzure/pack_metadata.json b/Packs/AnsibleAzure/pack_metadata.json
index efdb18293059..5ad638fec5cb 100644
--- a/Packs/AnsibleAzure/pack_metadata.json
+++ b/Packs/AnsibleAzure/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Ansible Azure",
"description": "Manage and control Azure services.",
"support": "xsoar",
- "currentVersion": "1.0.3",
+ "currentVersion": "1.0.4",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AnsibleCiscoIOS/Integrations/AnsibleCiscoIOS/README.md b/Packs/AnsibleCiscoIOS/Integrations/AnsibleCiscoIOS/README.md
index 2be53b402752..b6c37496c84d 100644
--- a/Packs/AnsibleCiscoIOS/Integrations/AnsibleCiscoIOS/README.md
+++ b/Packs/AnsibleCiscoIOS/Integrations/AnsibleCiscoIOS/README.md
@@ -1891,7 +1891,7 @@ Further documentation available at https://docs.ansible.com/ansible/2.9/modules/
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -1901,4 +1901,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleCiscoIOS/ReleaseNotes/1_0_4.md b/Packs/AnsibleCiscoIOS/ReleaseNotes/1_0_4.md
new file mode 100644
index 000000000000..c98d7ff0fc60
--- /dev/null
+++ b/Packs/AnsibleCiscoIOS/ReleaseNotes/1_0_4.md
@@ -0,0 +1 @@
+Documentation and metadata improvements.
\ No newline at end of file
diff --git a/Packs/AnsibleCiscoIOS/pack_metadata.json b/Packs/AnsibleCiscoIOS/pack_metadata.json
index 3fbcbc450296..6be636abcdac 100644
--- a/Packs/AnsibleCiscoIOS/pack_metadata.json
+++ b/Packs/AnsibleCiscoIOS/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Ansible Cisco IOS",
"description": "Manage and control Cisco IOS based network devices.",
"support": "xsoar",
- "currentVersion": "1.0.3",
+ "currentVersion": "1.0.4",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AnsibleCiscoNXOS/.secrets-ignore b/Packs/AnsibleCiscoNXOS/.secrets-ignore
index 790edf8b882d..f91a933ddc8a 100644
--- a/Packs/AnsibleCiscoNXOS/.secrets-ignore
+++ b/Packs/AnsibleCiscoNXOS/.secrets-ignore
@@ -2,3 +2,4 @@
192.168.3.0
11:11:11:11:11:11:11:11
1.1.1
+Chicago
\ No newline at end of file
diff --git a/Packs/AnsibleCiscoNXOS/Integrations/AnsibleCiscoNXOS/README.md b/Packs/AnsibleCiscoNXOS/Integrations/AnsibleCiscoNXOS/README.md
index c1d52f5882cb..42f0d10dfbec 100644
--- a/Packs/AnsibleCiscoNXOS/Integrations/AnsibleCiscoNXOS/README.md
+++ b/Packs/AnsibleCiscoNXOS/Integrations/AnsibleCiscoNXOS/README.md
@@ -4289,7 +4289,7 @@ Creates a Virtual Network Identifier member (VNI)
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -4299,4 +4299,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleCiscoNXOS/ReleaseNotes/1_0_4.md b/Packs/AnsibleCiscoNXOS/ReleaseNotes/1_0_4.md
new file mode 100644
index 000000000000..c98d7ff0fc60
--- /dev/null
+++ b/Packs/AnsibleCiscoNXOS/ReleaseNotes/1_0_4.md
@@ -0,0 +1 @@
+Documentation and metadata improvements.
\ No newline at end of file
diff --git a/Packs/AnsibleCiscoNXOS/pack_metadata.json b/Packs/AnsibleCiscoNXOS/pack_metadata.json
index 94e5cd4fb13c..aba130d909ca 100644
--- a/Packs/AnsibleCiscoNXOS/pack_metadata.json
+++ b/Packs/AnsibleCiscoNXOS/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Ansible Cisco NXOS",
"description": "Manage and control Cisco NXOS based network devices.",
"support": "xsoar",
- "currentVersion": "1.0.3",
+ "currentVersion": "1.0.4",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AnsibleHetznerCloud/Integrations/AnsibleHCloud/README.md b/Packs/AnsibleHetznerCloud/Integrations/AnsibleHCloud/README.md
index 1201e3b29838..0b137859befd 100644
--- a/Packs/AnsibleHetznerCloud/Integrations/AnsibleHCloud/README.md
+++ b/Packs/AnsibleHetznerCloud/Integrations/AnsibleHCloud/README.md
@@ -1135,7 +1135,7 @@ Further documentation available at https://docs.ansible.com/ansible/2.9/modules/
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -1145,4 +1145,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleHetznerCloud/ReleaseNotes/1_0_4.md b/Packs/AnsibleHetznerCloud/ReleaseNotes/1_0_4.md
new file mode 100644
index 000000000000..c98d7ff0fc60
--- /dev/null
+++ b/Packs/AnsibleHetznerCloud/ReleaseNotes/1_0_4.md
@@ -0,0 +1 @@
+Documentation and metadata improvements.
\ No newline at end of file
diff --git a/Packs/AnsibleHetznerCloud/pack_metadata.json b/Packs/AnsibleHetznerCloud/pack_metadata.json
index 1638d374e193..449fc7ee09b0 100644
--- a/Packs/AnsibleHetznerCloud/pack_metadata.json
+++ b/Packs/AnsibleHetznerCloud/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Ansible Hetzner Cloud",
"description": "Manage and control Hetzner Cloud services.",
"support": "xsoar",
- "currentVersion": "1.0.3",
+ "currentVersion": "1.0.4",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AnsibleKubernetes/Integrations/AnsibleKubernetes/README.md b/Packs/AnsibleKubernetes/Integrations/AnsibleKubernetes/README.md
index 1bcd258793a0..daecd184c76c 100644
--- a/Packs/AnsibleKubernetes/Integrations/AnsibleKubernetes/README.md
+++ b/Packs/AnsibleKubernetes/Integrations/AnsibleKubernetes/README.md
@@ -828,7 +828,7 @@ Further documentation available at https://docs.ansible.com/ansible/2.9/modules/
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -838,4 +838,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleKubernetes/ReleaseNotes/1_0_4.md b/Packs/AnsibleKubernetes/ReleaseNotes/1_0_4.md
new file mode 100644
index 000000000000..c98d7ff0fc60
--- /dev/null
+++ b/Packs/AnsibleKubernetes/ReleaseNotes/1_0_4.md
@@ -0,0 +1 @@
+Documentation and metadata improvements.
\ No newline at end of file
diff --git a/Packs/AnsibleKubernetes/pack_metadata.json b/Packs/AnsibleKubernetes/pack_metadata.json
index 1382ec2d0dc7..25b27d774118 100644
--- a/Packs/AnsibleKubernetes/pack_metadata.json
+++ b/Packs/AnsibleKubernetes/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Ansible Kubernetes",
"description": "Manage and control Kubernetes clusters.",
"support": "xsoar",
- "currentVersion": "1.0.3",
+ "currentVersion": "1.0.4",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AnsibleLinux/Integrations/AnsibleACME/README.md b/Packs/AnsibleLinux/Integrations/AnsibleACME/README.md
index 730e9d866077..eff2a201c210 100644
--- a/Packs/AnsibleLinux/Integrations/AnsibleACME/README.md
+++ b/Packs/AnsibleLinux/Integrations/AnsibleACME/README.md
@@ -551,7 +551,7 @@ Further documentation available at https://docs.ansible.com/ansible/2.9/modules/
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -561,4 +561,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleLinux/Integrations/AnsibleDNS/README.md b/Packs/AnsibleLinux/Integrations/AnsibleDNS/README.md
index 80e68799c805..81c808d5f538 100644
--- a/Packs/AnsibleLinux/Integrations/AnsibleDNS/README.md
+++ b/Packs/AnsibleLinux/Integrations/AnsibleDNS/README.md
@@ -147,7 +147,7 @@ Further documentation available at https://docs.ansible.com/ansible/2.9/modules/
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -157,4 +157,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleLinux/Integrations/AnsibleLinux/README.md b/Packs/AnsibleLinux/Integrations/AnsibleLinux/README.md
index ed6275a01aae..fc0086c4d0d4 100644
--- a/Packs/AnsibleLinux/Integrations/AnsibleLinux/README.md
+++ b/Packs/AnsibleLinux/Integrations/AnsibleLinux/README.md
@@ -8272,7 +8272,7 @@ Further documentation available at https://docs.ansible.com/ansible/2.9/modules/
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -8282,4 +8282,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleLinux/Integrations/AnsibleOpenSSL/README.md b/Packs/AnsibleLinux/Integrations/AnsibleOpenSSL/README.md
index 1f6b115511f8..92ac34403c3c 100644
--- a/Packs/AnsibleLinux/Integrations/AnsibleOpenSSL/README.md
+++ b/Packs/AnsibleLinux/Integrations/AnsibleOpenSSL/README.md
@@ -1187,7 +1187,7 @@ Further documentation available at https://docs.ansible.com/ansible/2.9/modules/
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -1197,4 +1197,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleLinux/ReleaseNotes/1_0_7.md b/Packs/AnsibleLinux/ReleaseNotes/1_0_7.md
new file mode 100644
index 000000000000..c98d7ff0fc60
--- /dev/null
+++ b/Packs/AnsibleLinux/ReleaseNotes/1_0_7.md
@@ -0,0 +1 @@
+Documentation and metadata improvements.
\ No newline at end of file
diff --git a/Packs/AnsibleLinux/pack_metadata.json b/Packs/AnsibleLinux/pack_metadata.json
index 6be8313283d1..f83ec8a84e8c 100644
--- a/Packs/AnsibleLinux/pack_metadata.json
+++ b/Packs/AnsibleLinux/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Ansible Linux",
"description": "Manage and control Linux hosts.",
"support": "xsoar",
- "currentVersion": "1.0.6",
+ "currentVersion": "1.0.7",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AnsibleMicrosoftWindows/Integrations/AnsibleMicrosoftWindows/README.md b/Packs/AnsibleMicrosoftWindows/Integrations/AnsibleMicrosoftWindows/README.md
index 7ec1f726d27b..b1ea1cedddc3 100644
--- a/Packs/AnsibleMicrosoftWindows/Integrations/AnsibleMicrosoftWindows/README.md
+++ b/Packs/AnsibleMicrosoftWindows/Integrations/AnsibleMicrosoftWindows/README.md
@@ -7074,7 +7074,7 @@ Further documentation available at https://docs.ansible.com/ansible/2.9/modules/
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -7084,4 +7084,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleMicrosoftWindows/ReleaseNotes/1_0_6.md b/Packs/AnsibleMicrosoftWindows/ReleaseNotes/1_0_6.md
new file mode 100644
index 000000000000..c98d7ff0fc60
--- /dev/null
+++ b/Packs/AnsibleMicrosoftWindows/ReleaseNotes/1_0_6.md
@@ -0,0 +1 @@
+Documentation and metadata improvements.
\ No newline at end of file
diff --git a/Packs/AnsibleMicrosoftWindows/pack_metadata.json b/Packs/AnsibleMicrosoftWindows/pack_metadata.json
index 4e3942f150b5..985c5ad35143 100644
--- a/Packs/AnsibleMicrosoftWindows/pack_metadata.json
+++ b/Packs/AnsibleMicrosoftWindows/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Ansible Microsoft Windows",
"description": "Manage and control Windows hosts.",
"support": "xsoar",
- "currentVersion": "1.0.5",
+ "currentVersion": "1.0.6",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AnsibleTower/Integrations/AnsibleTower/AnsibleTower.yml b/Packs/AnsibleTower/Integrations/AnsibleTower/AnsibleTower.yml
index 5373c5425951..c384215708e0 100644
--- a/Packs/AnsibleTower/Integrations/AnsibleTower/AnsibleTower.yml
+++ b/Packs/AnsibleTower/Integrations/AnsibleTower/AnsibleTower.yml
@@ -1655,7 +1655,7 @@ script:
- contextPath: AnsibleAWX.JobEvents.event_data
description: Job's raw event data
type: String
- dockerimage: demisto/python3:3.10.8.39276
+ dockerimage: demisto/python3:3.10.10.48392
feed: false
isfetch: false
longRunning: false
diff --git a/Packs/AnsibleTower/ReleaseNotes/1_0_27.md b/Packs/AnsibleTower/ReleaseNotes/1_0_27.md
new file mode 100644
index 000000000000..d94be062d8cf
--- /dev/null
+++ b/Packs/AnsibleTower/ReleaseNotes/1_0_27.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Ansible Tower
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AnsibleTower/ReleaseNotes/1_0_28.md b/Packs/AnsibleTower/ReleaseNotes/1_0_28.md
new file mode 100644
index 000000000000..55c7b2068456
--- /dev/null
+++ b/Packs/AnsibleTower/ReleaseNotes/1_0_28.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Ansible Tower
+- Updated the Docker image to: *demisto/python3:3.10.9.42476*.
diff --git a/Packs/AnsibleTower/ReleaseNotes/1_0_29.md b/Packs/AnsibleTower/ReleaseNotes/1_0_29.md
new file mode 100644
index 000000000000..0fd35c2949fe
--- /dev/null
+++ b/Packs/AnsibleTower/ReleaseNotes/1_0_29.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Ansible Tower
+- Updated the Docker image to: *demisto/python3:3.10.9.46032*.
diff --git a/Packs/AnsibleTower/ReleaseNotes/1_0_30.md b/Packs/AnsibleTower/ReleaseNotes/1_0_30.md
new file mode 100644
index 000000000000..26e0632fc438
--- /dev/null
+++ b/Packs/AnsibleTower/ReleaseNotes/1_0_30.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Ansible Tower
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AnsibleTower/TestPlaybooks/AnsibleTower-_Test_playbook.yml b/Packs/AnsibleTower/TestPlaybooks/AnsibleTower-_Test_playbook.yml
index 1ac136351678..d37fd4d40eb9 100644
--- a/Packs/AnsibleTower/TestPlaybooks/AnsibleTower-_Test_playbook.yml
+++ b/Packs/AnsibleTower/TestPlaybooks/AnsibleTower-_Test_playbook.yml
@@ -137,3 +137,5 @@ inputs:
playbookInputQuery: null
outputs: []
fromversion: 5.0.0
+marketplaces:
+ - xsoar
diff --git a/Packs/AnsibleTower/pack_metadata.json b/Packs/AnsibleTower/pack_metadata.json
index ef5f1a5aecc9..19ba7e8a745e 100644
--- a/Packs/AnsibleTower/pack_metadata.json
+++ b/Packs/AnsibleTower/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Ansible Tower",
"description": "Scale IT automation, manage complex deployments and speed productivity.",
"support": "xsoar",
- "currentVersion": "1.0.26",
+ "currentVersion": "1.0.30",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AnsibleVMware/Integrations/AnsibleVMware/README.md b/Packs/AnsibleVMware/Integrations/AnsibleVMware/README.md
index 8e9d179b8e7c..92fda203da6a 100644
--- a/Packs/AnsibleVMware/Integrations/AnsibleVMware/README.md
+++ b/Packs/AnsibleVMware/Integrations/AnsibleVMware/README.md
@@ -9189,7 +9189,7 @@ Further documentation available at https://docs.ansible.com/ansible/2.9/modules/
### Troubleshooting
The Ansible-Runner container is not suitable for running as a non-root user.
-Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide).
+Therefore, the Ansible integrations will fail if you follow the instructions in the Cortex XSOAR [Docker Hardening Guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Docker-Hardening-Guide).
The `docker.run.internal.asuser` server configuration causes the software that is run inside of the Docker containers utilized by Cortex XSOAR to run as a non-root user account inside the container.
@@ -9199,4 +9199,4 @@ This is a limitation of the Ansible-Runner software itself https://github.com/an
A workaround is to use the `docker.run.internal.asuser.ignore` server setting and to configure Cortex XSOAR to ignore the Ansible container image by setting the value of `demisto/ansible-runner` and afterwards running /reset_containers to reload any containers that might be running to ensure they receive the configuration.
-See step 2 of this [guide](https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-5/cortex-xsoar-admin/docker/docker-hardening-guide/run-docker-with-non-root-internal-users) for complete instructions.
+See step 2 of this [guide](https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Run-Docker-with-Non-Root-Internal-Users) for complete instructions.
diff --git a/Packs/AnsibleVMware/ReleaseNotes/1_0_4.md b/Packs/AnsibleVMware/ReleaseNotes/1_0_4.md
new file mode 100644
index 000000000000..c98d7ff0fc60
--- /dev/null
+++ b/Packs/AnsibleVMware/ReleaseNotes/1_0_4.md
@@ -0,0 +1 @@
+Documentation and metadata improvements.
\ No newline at end of file
diff --git a/Packs/AnsibleVMware/pack_metadata.json b/Packs/AnsibleVMware/pack_metadata.json
index 2ba83fb76251..daa426527d5f 100644
--- a/Packs/AnsibleVMware/pack_metadata.json
+++ b/Packs/AnsibleVMware/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Ansible VMware",
"description": "Manage and control VMware virtualisation hosts.",
"support": "xsoar",
- "currentVersion": "1.0.3",
+ "currentVersion": "1.0.4",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/Ansible_Powered_Integrations/.secrets-ignore b/Packs/Ansible_Powered_Integrations/.secrets-ignore
index 51b50dd904f7..a27530798d4e 100644
--- a/Packs/Ansible_Powered_Integrations/.secrets-ignore
+++ b/Packs/Ansible_Powered_Integrations/.secrets-ignore
@@ -7,4 +7,5 @@ https://www.rfc-editor.org
http://partnerweb.vmware.com
1.3.6.1
4.1.1.0
-4.1.1.1
\ No newline at end of file
+4.1.1.1
+Chicago
\ No newline at end of file
diff --git a/Packs/Ansible_Powered_Integrations/Integrations/ACME/ACME.yml b/Packs/Ansible_Powered_Integrations/Integrations/ACME/ACME.yml
index 7e4aceabe801..a8ad6f81dab6 100644
--- a/Packs/Ansible_Powered_Integrations/Integrations/ACME/ACME.yml
+++ b/Packs/Ansible_Powered_Integrations/Integrations/ACME/ACME.yml
@@ -3,21 +3,18 @@ commonfields:
id: ACME
version: -1
configuration:
-- additionalinfo: The credentials to associate with the instance. SSH keys can be
- configured using the credential manager.
+- additionalinfo: The credentials to associate with the instance. SSH keys can be configured using the credential manager, under the Certificate field.
display: Username
name: creds
required: true
type: 9
-- additionalinfo: The default port to use if one is not specified in the commands
- `host` argument.
+- additionalinfo: The default port to use if one is not specified in the commands `host` argument.
defaultvalue: "22"
display: Default SSH Port
name: port
required: true
type: 0
-- additionalinfo: If multiple hosts are specified in a command, how many hosts should
- be interacted with concurrently.
+- additionalinfo: If multiple hosts are specified in a command, how many hosts should be interacted with concurrently.
defaultvalue: "4"
display: Concurrecy Factor
name: concurrency
@@ -29,9 +26,7 @@ name: ACME
script:
commands:
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -87,9 +82,7 @@ script:
`Warning`: the content will be written into a temporary file, which will be deleted by Ansible when the module completes. Since this is an important private key — it can be used to change the account key, or to revoke your certificates without knowing their private keys —, this might not be acceptable.
In case `cryptography` is used, the content is not written into a temporary file. It can still happen that it is written to disk by Ansible in the process of moving the module with its argument to the node where it is executed.
name: account_key_content
- - description: If specified, assumes that the account URI is as given. If the
- account key does not match this account, or an account with this URI does
- not exist, the module fails.
+ - description: If specified, assumes that the account URI is as given. If the account key does not match this account, or an account with this URI does not exist, the module fails.
name: account_uri
- auto: PREDEFINED
defaultValue: "1"
@@ -129,9 +122,7 @@ script:
description: Create, modify or delete ACME accounts
name: acme-account
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -159,9 +150,7 @@ script:
`Warning`: the content will be written into a temporary file, which will be deleted by Ansible when the module completes. Since this is an important private key — it can be used to change the account key, or to revoke your certificates without knowing their private keys —, this might not be acceptable.
In case `cryptography` is used, the content is not written into a temporary file. It can still happen that it is written to disk by Ansible in the process of moving the module with its argument to the node where it is executed.
name: account_key_content
- - description: If specified, assumes that the account URI is as given. If the
- account key does not match this account, or an account with this URI does
- not exist, the module fails.
+ - description: If specified, assumes that the account URI is as given. If the account key does not match this account, or an account with this URI does not exist, the module fails.
name: account_uri
- auto: PREDEFINED
defaultValue: "1"
@@ -207,9 +196,7 @@ script:
description: the account's status
type: string
- contextPath: ACME.account.orders
- description: '[''A URL where a list of orders can be retrieved for this account.'',
- ''Use the I(retrieve_orders) option to query this URL and retrieve the complete
- list of orders.'']'
+ description: '[''A URL where a list of orders can be retrieved for this account.'', ''Use the I(retrieve_orders) option to query this URL and retrieve the complete list of orders.'']'
type: string
- contextPath: ACME.account.public_account_key
description: the public account key as a L(JSON Web Key,https://tools.ietf.org/html/rfc7517).
@@ -218,26 +205,18 @@ script:
description: The order's status.
type: string
- contextPath: ACME.orders.expires
- description: '[''When the order expires.'', ''Timestamp should be formatted
- as described in RFC3339.'', ''Only required to be included in result when
- I(status) is C(pending) or C(valid).'']'
+ description: '[''When the order expires.'', ''Timestamp should be formatted as described in RFC3339.'', ''Only required to be included in result when I(status) is C(pending) or C(valid).'']'
type: string
- contextPath: ACME.orders.identifiers
description: '[''List of identifiers this order is for.'']'
- contextPath: ACME.orders.notBefore
- description: '[''The requested value of the C(notBefore) field in the certificate.'',
- ''Date should be formatted as described in RFC3339.'', ''Server is not required
- to return this.'']'
+ description: '[''The requested value of the C(notBefore) field in the certificate.'', ''Date should be formatted as described in RFC3339.'', ''Server is not required to return this.'']'
type: string
- contextPath: ACME.orders.notAfter
- description: '[''The requested value of the C(notAfter) field in the certificate.'',
- ''Date should be formatted as described in RFC3339.'', ''Server is not required
- to return this.'']'
+ description: '[''The requested value of the C(notAfter) field in the certificate.'', ''Date should be formatted as described in RFC3339.'', ''Server is not required to return this.'']'
type: string
- contextPath: ACME.orders.error
- description: '[''In case an error occurred during processing, this contains
- information about the error.'', ''The field is structured as a problem document
- (RFC7807).'']'
+ description: '[''In case an error occurred during processing, this contains information about the error.'', ''The field is structured as a problem document (RFC7807).'']'
- contextPath: ACME.orders.authorizations
description: '[''A list of URLs for authorizations for this order.'']'
- contextPath: ACME.orders.finalize
@@ -247,9 +226,7 @@ script:
description: '[''The URL for retrieving the certificate.'']'
type: string
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -305,8 +282,7 @@ script:
The destination file for the full chain (i.e. certificate followed by chain of intermediate certificates).
Required if `dest` is not specified.
name: fullchain_dest
- - description: If specified, the intermediate certificate will be written to this
- file.
+ - description: If specified, the intermediate certificate will be written to this file.
name: chain_dest
- defaultValue: "10"
description: |-
@@ -324,10 +300,7 @@ script:
This is especially helpful when having an updated CSR e.g. with additional domains for which a new certificate is desired.
name: force
- defaultValue: "False"
- description: When set to `yes`, will retrieve all alternate chains offered by
- the ACME CA. These will not be written to disk, but will be returned together
- with the main chain as `all_chains`. See the documentation for the `all_chains`
- return value for details.
+ description: When set to `yes`, will retrieve all alternate chains offered by the ACME CA. These will not be written to disk, but will be returned together with the main chain as `all_chains`. See the documentation for the `all_chains` return value for details.
name: retrieve_all_alternates
- description: |-
Path to a file containing the ACME account RSA or Elliptic Curve key.
@@ -342,9 +315,7 @@ script:
`Warning`: the content will be written into a temporary file, which will be deleted by Ansible when the module completes. Since this is an important private key — it can be used to change the account key, or to revoke your certificates without knowing their private keys —, this might not be acceptable.
In case `cryptography` is used, the content is not written into a temporary file. It can still happen that it is written to disk by Ansible in the process of moving the module with its argument to the node where it is executed.
name: account_key_content
- - description: If specified, assumes that the account URI is as given. If the
- account key does not match this account, or an account with this URI does
- not exist, the module fails.
+ - description: If specified, assumes that the account URI is as given. If the account key does not match this account, or an account with this URI does not exist, the module fails.
name: account_uri
- auto: PREDEFINED
defaultValue: "1"
@@ -388,17 +359,10 @@ script:
description: The challenge resource that must be created for validation.
type: string
- contextPath: ACME.challenge_data.resource_original
- description: '[''The original challenge resource including type identifier for
- C(tls-alpn-01) challenges.'']'
+ description: '[''The original challenge resource including type identifier for C(tls-alpn-01) challenges.'']'
type: string
- contextPath: ACME.challenge_data.resource_value
- description: '[''The value the resource has to produce for the validation.'',
- ''For C(http-01) and C(dns-01) challenges, the value can be used as-is.'',
- ''For C(tls-alpn-01) challenges, note that this return value contains a Base64
- encoded version of the correct binary blob which has to be put into the acmeValidation
- x509 extension; see U(https://www.rfc-editor.org/rfc/rfc8737.html#section-3)
- for details. To do this, you might need the C(b64decode) Jinja filter to extract
- the binary blob from this return value.'']'
+ description: '[''The value the resource has to produce for the validation.'', ''For C(http-01) and C(dns-01) challenges, the value can be used as-is.'', ''For C(tls-alpn-01) challenges, note that this return value contains a Base64 encoded version of the correct binary blob which has to be put into the acmeValidation x509 extension; see U(https://www.rfc-editor.org/rfc/rfc8737.html#section-3) for details. To do this, you might need the C(b64decode) Jinja filter to extract the binary blob from this return value.'']'
type: string
- contextPath: ACME.challenge_data.record
description: The full DNS record's name for the challenge.
@@ -407,17 +371,13 @@ script:
description: '[''The leaf certificate itself, in PEM format.'']'
type: string
- contextPath: ACME.all_chains.chain
- description: '[''The certificate chain, excluding the root, as concatenated
- PEM certificates.'']'
+ description: '[''The certificate chain, excluding the root, as concatenated PEM certificates.'']'
type: string
- contextPath: ACME.all_chains.full_chain
- description: '[''The certificate chain, excluding the root, but including the
- leaf certificate, as concatenated PEM certificates.'']'
+ description: '[''The certificate chain, excluding the root, but including the leaf certificate, as concatenated PEM certificates.'']'
type: string
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -450,9 +410,7 @@ script:
One of the revocation reasonCodes defined in `Section 5.3.1 of RFC5280,https://tools.ietf.org/html/rfc5280#section-5.3.1`.
Possible values are `0` (unspecified), `1` (keyCompromise), `2` (cACompromise), `3` (affiliationChanged), `4` (superseded), `5` (cessationOfOperation), `6` (certificateHold), `8` (removeFromCRL), `9` (privilegeWithdrawn), `10` (aACompromise)
name: revoke_reason
- - description: If specified, assumes that the account URI is as given. If the
- account key does not match this account, or an account with this URI does
- not exist, the module fails.
+ - description: If specified, assumes that the account URI is as given. If the account key does not match this account, or an account with this URI does not exist, the module fails.
name: account_uri
- auto: PREDEFINED
defaultValue: "1"
@@ -492,9 +450,7 @@ script:
description: Revoke certificates with the ACME protocol
name: acme-certificate-revoke
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -504,8 +460,7 @@ script:
predefined:
- tls-alpn-01
required: true
- - description: The `challenge_data` entry provided by `acme_certificate` for the
- challenge.
+ - description: The `challenge_data` entry provided by `acme_certificate` for the challenge.
isArray: true
name: challenge_data
required: true
@@ -520,9 +475,7 @@ script:
description: Prepare certificates required for ACME challenges such as C(tls-alpn-01)
name: acme-challenge-cert-helper
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -547,8 +500,7 @@ script:
Required when `method` is `post`, and not allowed otherwise.
name: content
- defaultValue: "True"
- description: If `method` is `post` or `get`, make the module fail in case an
- ACME error is returned.
+ description: If `method` is `post` or `get`, make the module fail in case an ACME error is returned.
name: fail_on_acme_error
- description: |-
Path to a file containing the ACME account RSA or Elliptic Curve key.
@@ -563,9 +515,7 @@ script:
`Warning`: the content will be written into a temporary file, which will be deleted by Ansible when the module completes. Since this is an important private key — it can be used to change the account key, or to revoke your certificates without knowing their private keys —, this might not be acceptable.
In case `cryptography` is used, the content is not written into a temporary file. It can still happen that it is written to disk by Ansible in the process of moving the module with its argument to the node where it is executed.
name: account_key_content
- - description: If specified, assumes that the account URI is as given. If the
- account key does not match this account, or an account with this URI does
- not exist, the module fails.
+ - description: If specified, assumes that the account URI is as given. If the account key does not match this account, or an account with this URI does not exist, the module fails.
name: account_uri
- auto: PREDEFINED
defaultValue: "1"
diff --git a/Packs/Ansible_Powered_Integrations/Integrations/CiscoIOS/CiscoIOS.yml b/Packs/Ansible_Powered_Integrations/Integrations/CiscoIOS/CiscoIOS.yml
index 14a08d0a6884..a32379717399 100644
--- a/Packs/Ansible_Powered_Integrations/Integrations/CiscoIOS/CiscoIOS.yml
+++ b/Packs/Ansible_Powered_Integrations/Integrations/CiscoIOS/CiscoIOS.yml
@@ -3,21 +3,18 @@ commonfields:
id: CiscoIOS
version: -1
configuration:
-- additionalinfo: The credentials to associate with the instance. SSH keys can be
- configured using the credential manager.
+- additionalinfo: The credentials to associate with the instance. SSH keys can be configured using the credential manager, under the Certificate field.
display: Username
name: creds
required: true
type: 9
-- additionalinfo: The default port to use if one is not specified in the commands
- `host` argument.
+- additionalinfo: The default port to use if one is not specified in the commands `host` argument.
defaultvalue: "22"
display: Default SSH Port
name: port
required: true
type: 0
-- additionalinfo: If multiple hosts are specified in a command, how many hosts should
- be interacted with concurrently.
+- additionalinfo: If multiple hosts are specified in a command, how many hosts should be interacted with concurrently.
defaultvalue: "4"
display: Concurrecy Factor
name: concurrency
@@ -29,15 +26,12 @@ name: CiscoIOS
script:
commands:
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
- description: Specifies which banner should be configured on the remote device.
- In Ansible 2.4 and earlier only `login` and `motd` were supported.
+ description: Specifies which banner should be configured on the remote device. In Ansible 2.4 and earlier only `login` and `motd` were supported.
name: banner
predefined:
- login
@@ -46,15 +40,12 @@ script:
- incoming
- slip-ppp
required: true
- - description: The banner text that should be present in the remote device running
- configuration. This argument accepts a multiline string, with no empty lines.
- Requires `state=present`.
+ - description: The banner text that should be present in the remote device running configuration. This argument accepts a multiline string, with no empty lines. Requires `state=present`.
name: text
- auto: PREDEFINED
default: false
defaultValue: present
- description: Specifies whether or not the configuration is present in the current
- devices active running configuration.
+ description: Specifies whether or not the configuration is present in the current devices active running configuration.
name: state
predefined:
- present
@@ -62,9 +53,7 @@ script:
description: Manage multiline banners on Cisco IOS devices
name: ios-banner
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -88,94 +77,52 @@ script:
description: Configure global BGP protocol settings on Cisco IOS.
name: ios-bgp
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: List of commands to send to the remote ios device over the configured
- provider. The resulting output from the command is returned. If the `wait_for`
- argument is provided, the module is not returned until the condition is satisfied
- or the number of retries has expired. If a command sent to the device requires
- answering a prompt, it is possible to pass a dict containing `command`, `answer`
- and `prompt`. Common answers are 'y' or "\r" (carriage return, must be double
- quotes). See examples.
+ - description: List of commands to send to the remote ios device over the configured provider. The resulting output from the command is returned. If the `wait_for` argument is provided, the module is not returned until the condition is satisfied or the number of retries has expired. If a command sent to the device requires answering a prompt, it is possible to pass a dict containing `command`, `answer` and `prompt`. Common answers are 'y' or "\r" (carriage return, must be double quotes). See examples.
name: commands
required: true
- - description: List of conditions to evaluate against the output of the command.
- The task will wait for each condition to be true before moving forward. If
- the conditional is not true within the configured number of retries, the task
- fails. See examples.
+ - description: List of conditions to evaluate against the output of the command. The task will wait for each condition to be true before moving forward. If the conditional is not true within the configured number of retries, the task fails. See examples.
name: wait_for
- auto: PREDEFINED
default: false
defaultValue: all
- description: The `match` argument is used in conjunction with the `wait_for`
- argument to specify the match policy. Valid values are `all` or `any`. If
- the value is set to `all` then all conditionals in the wait_for must be satisfied. If
- the value is set to `any` then only one of the values must be satisfied.
+ description: The `match` argument is used in conjunction with the `wait_for` argument to specify the match policy. Valid values are `all` or `any`. If the value is set to `all` then all conditionals in the wait_for must be satisfied. If the value is set to `any` then only one of the values must be satisfied.
name: match
predefined:
- any
- all
- default: false
defaultValue: "10"
- description: Specifies the number of retries a command should by tried before
- it is considered failed. The command is run on the target device every retry
- and evaluated against the `wait_for` conditions.
+ description: Specifies the number of retries a command should by tried before it is considered failed. The command is run on the target device every retry and evaluated against the `wait_for` conditions.
name: retries
- default: false
defaultValue: "1"
- description: Configures the interval in seconds to wait between retries of the
- command. If the command does not pass the specified conditions, the interval
- indicates how long to wait before trying the command again.
+ description: Configures the interval in seconds to wait between retries of the command. If the command does not pass the specified conditions, the interval indicates how long to wait before trying the command again.
name: interval
description: Run commands on remote devices running Cisco IOS
name: ios-command
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: The ordered set of commands that should be configured in the section. The
- commands must be the exact same commands as found in the device running-config. Be
- sure to note the configuration command syntax as some commands are automatically
- modified by the device config parser.
+ - description: The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration command syntax as some commands are automatically modified by the device config parser.
name: lines
- - description: The ordered set of parents that uniquely identify the section or
- hierarchy the commands should be checked against. If the parents argument
- is omitted, the commands are checked against the set of top level or global
- commands.
+ - description: The ordered set of parents that uniquely identify the section or hierarchy the commands should be checked against. If the parents argument is omitted, the commands are checked against the set of top level or global commands.
name: parents
- - description: Specifies the source path to the file that contains the configuration
- or configuration template to load. The path to the source file can either
- be the full path on the Ansible control host or a relative path from the playbook
- or role root directory. This argument is mutually exclusive with `lines`,
- `parents`.
+ - description: Specifies the source path to the file that contains the configuration or configuration template to load. The path to the source file can either be the full path on the Ansible control host or a relative path from the playbook or role root directory. This argument is mutually exclusive with `lines`, `parents`.
name: src
- - description: The ordered set of commands to push on to the command stack if
- a change needs to be made. This allows the playbook designer the opportunity
- to perform configuration commands prior to pushing any changes without affecting
- how the set of commands are matched against the system.
+ - description: The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched against the system.
name: before
- - description: The ordered set of commands to append to the end of the command
- stack if a change needs to be made. Just like with `before` this allows the
- playbook designer to append a set of commands to be executed after the command
- set.
+ - description: The ordered set of commands to append to the end of the command stack if a change needs to be made. Just like with `before` this allows the playbook designer to append a set of commands to be executed after the command set.
name: after
- auto: PREDEFINED
default: false
defaultValue: line
- description: Instructs the module on the way to perform the matching of the
- set of commands against the current device config. If match is set to `line`,
- commands are matched line by line. If match is set to `strict`, command lines
- are matched with respect to position. If match is set to `exact`, command
- lines must be an equal match. Finally, if match is set to `none`, the module
- will not attempt to compare the source configuration with the running configuration
- on the remote device.
+ description: Instructs the module on the way to perform the matching of the set of commands against the current device config. If match is set to `line`, commands are matched line by line. If match is set to `strict`, command lines are matched with respect to position. If match is set to `exact`, command lines must be an equal match. Finally, if match is set to `none`, the module will not attempt to compare the source configuration with the running configuration on the remote device.
name: match
predefined:
- line
@@ -185,56 +132,29 @@ script:
- auto: PREDEFINED
default: false
defaultValue: line
- description: Instructs the module on the way to perform the configuration on
- the device. If the replace argument is set to `line` then the modified lines
- are pushed to the device in configuration mode. If the replace argument is
- set to `block` then the entire command block is pushed to the device in configuration
- mode if any line is not correct.
+ description: Instructs the module on the way to perform the configuration on the device. If the replace argument is set to `line` then the modified lines are pushed to the device in configuration mode. If the replace argument is set to `block` then the entire command block is pushed to the device in configuration mode if any line is not correct.
name: replace
predefined:
- line
- block
- default: false
defaultValue: '@'
- description: This argument is used when pushing a multiline configuration element
- to the IOS device. It specifies the character to use as the delimiting character. This
- only applies to the configuration action.
+ description: This argument is used when pushing a multiline configuration element to the IOS device. It specifies the character to use as the delimiting character. This only applies to the configuration action.
name: multiline_delimiter
- default: false
defaultValue: "no"
- description: This argument will cause the module to create a full backup of
- the current `running-config` from the remote device before any changes are
- made. If the `backup_options` value is not given, the backup file is written
- to the `backup` folder in the playbook root directory or role root directory,
- if playbook is part of an ansible role. If the directory does not exist, it
- is created.
+ description: This argument will cause the module to create a full backup of the current `running-config` from the remote device before any changes are made. If the `backup_options` value is not given, the backup file is written to the `backup` folder in the playbook root directory or role root directory, if playbook is part of an ansible role. If the directory does not exist, it is created.
name: backup
- - description: The module, by default, will connect to the remote device and retrieve
- the current running-config to use as a base for comparing against the contents
- of source. There are times when it is not desirable to have the task get the
- current running-config for every task in a playbook. The `running_config`
- argument allows the implementer to pass in the configuration to use as the
- base config for comparison.
+ - description: The module, by default, will connect to the remote device and retrieve the current running-config to use as a base for comparing against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The `running_config` argument allows the implementer to pass in the configuration to use as the base config for comparison.
name: running_config
- default: false
defaultValue: "no"
- description: This argument specifies whether or not to collect all defaults
- when getting the remote device running config. When enabled, the module will
- get the current config by issuing the command `show running-config all`.
+ description: This argument specifies whether or not to collect all defaults when getting the remote device running config. When enabled, the module will get the current config by issuing the command `show running-config all`.
name: defaults
- auto: PREDEFINED
default: false
defaultValue: never
- description: When changes are made to the device running-configuration, the
- changes are not copied to non-volatile storage by default. Using this argument
- will change that before. If the argument is set to `always`, then the running-config
- will always be copied to the startup-config and the `modified` flag will always
- be set to True. If the argument is set to `modified`, then the running-config
- will only be copied to the startup-config if it has changed since the last
- save to startup-config. If the argument is set to `never`, the running-config
- will never be copied to the startup-config. If the argument is set to `changed`,
- then the running-config will only be copied to the startup-config if the task
- has made a change. `changed` was added in Ansible 2.5.
+ description: When changes are made to the device running-configuration, the changes are not copied to non-volatile storage by default. Using this argument will change that before. If the argument is set to `always`, then the running-config will always be copied to the startup-config and the `modified` flag will always be set to True. If the argument is set to `modified`, then the running-config will only be copied to the startup-config if it has changed since the last save to startup-config. If the argument is set to `never`, the running-config will never be copied to the startup-config. If the argument is set to `changed`, then the running-config will only be copied to the startup-config if the task has made a change. `changed` was added in Ansible 2.5.
name: save_when
predefined:
- always
@@ -252,29 +172,17 @@ script:
- running
- startup
- intended
- - description: Use this argument to specify one or more lines that should be ignored
- during the diff. This is used for lines in the configuration that are automatically
- updated by the system. This argument takes a list of regular expressions
- or exact line matches.
+ - description: Use this argument to specify one or more lines that should be ignored during the diff. This is used for lines in the configuration that are automatically updated by the system. This argument takes a list of regular expressions or exact line matches.
name: diff_ignore_lines
- - description: The `intended_config` provides the master configuration that the
- node should conform to and is used to check the final running-config against.
- This argument will not modify any settings on the remote device and is strictly
- used to check the compliance of the current device's configuration against. When
- specifying this argument, the task should also modify the `diff_against` value
- and set it to `intended`.
+ - description: The `intended_config` provides the master configuration that the node should conform to and is used to check the final running-config against. This argument will not modify any settings on the remote device and is strictly used to check the compliance of the current device's configuration against. When specifying this argument, the task should also modify the `diff_against` value and set it to `intended`.
name: intended_config
- - description: This is a dict object containing configurable options related to
- backup file path. The value of this option is read only when `backup` is set
- to `yes`, if `backup` is set to `no` this option will be silently ignored.
+ - description: This is a dict object containing configurable options related to backup file path. The value of this option is read only when `backup` is set to `yes`, if `backup` is set to `no` this option will be silently ignored.
isArray: true
name: backup_options
description: Manage Cisco IOS configuration sections
name: ios-config
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -286,20 +194,12 @@ script:
Specify a list of values to include a larger subset.
Use a value with an initial `!` to collect all facts except that subset.
name: gather_subset
- - description: When supplied, this argument will restrict the facts collected
- to a given subset. Possible values for this argument include all and the resources
- like interfaces, vlans etc. Can specify a list of values to include a larger
- subset. Values can also be used with an initial `M(!`) to specify that a specific
- subset should not be collected. Valid subsets are 'all', 'interfaces', 'l2_interfaces',
- 'vlans', 'lag_interfaces', 'lacp', 'lacp_interfaces', 'lldp_global', 'lldp_interfaces',
- 'l3_interfaces'.
+ - description: When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include all and the resources like interfaces, vlans etc. Can specify a list of values to include a larger subset. Values can also be used with an initial `M(!`) to specify that a specific subset should not be collected. Valid subsets are 'all', 'interfaces', 'l2_interfaces', 'vlans', 'lag_interfaces', 'lacp', 'lacp_interfaces', 'lldp_global', 'lldp_interfaces', 'l3_interfaces'.
name: gather_network_resources
description: Collect facts from remote devices running Cisco IOS
name: ios-facts
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -319,9 +219,7 @@ script:
description: Manages interface attributes of Cisco IOS network devices
name: ios-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -341,9 +239,7 @@ script:
description: Manage Layer-2 interface on Cisco IOS devices.
name: ios-l2-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -363,9 +259,7 @@ script:
description: Manage Layer-3 interface on Cisco IOS devices.
name: ios-l3-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -381,13 +275,10 @@ script:
- merged
- replaced
- deleted
- description: Manage Global Link Aggregation Control Protocol (LACP) on Cisco IOS
- devices.
+ description: Manage Global Link Aggregation Control Protocol (LACP) on Cisco IOS devices.
name: ios-lacp
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -404,13 +295,10 @@ script:
- replaced
- overridden
- deleted
- description: Manage Link Aggregation Control Protocol (LACP) on Cisco IOS devices
- interface.
+ description: Manage Link Aggregation Control Protocol (LACP) on Cisco IOS devices interface.
name: ios-lacp-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -430,14 +318,11 @@ script:
description: Manage Link Aggregation on Cisco IOS devices.
name: ios-lag-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: Channel-group number for the port-channel Link aggregation group.
- Range 1-255.
+ - description: Channel-group number for the port-channel Link aggregation group. Range 1-255.
name: group
- auto: PREDEFINED
description: Mode of the link aggregation group.
@@ -467,17 +352,14 @@ script:
description: Manage link aggregation groups on Cisco IOS network devices
name: ios-linkagg
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
default: false
defaultValue: present
- description: State of the LLDP configuration. If value is `present` lldp will
- be enabled else if it is `absent` it will be disabled.
+ description: State of the LLDP configuration. If value is `present` lldp will be enabled else if it is `absent` it will be disabled.
name: state
predefined:
- present
@@ -485,9 +367,7 @@ script:
description: Manage LLDP configuration on Cisco IOS network devices.
name: ios-lldp
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -503,13 +383,10 @@ script:
- merged
- replaced
- deleted
- description: Configure and manage Link Layer Discovery Protocol(LLDP) attributes
- on IOS platforms.
+ description: Configure and manage Link Layer Discovery Protocol(LLDP) attributes on IOS platforms.
name: ios-lldp-global
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -526,13 +403,10 @@ script:
- replaced
- overridden
- deleted
- description: Manage link layer discovery protocol (LLDP) attributes of interfaces
- on Cisco IOS devices.
+ description: Manage link layer discovery protocol (LLDP) attributes of interfaces on Cisco IOS devices.
name: ios-lldp-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -552,8 +426,7 @@ script:
name: name
- default: false
defaultValue: "4096"
- description: Size of buffer. The acceptable value is in range from 4096 to 4294967295
- bytes.
+ description: Size of buffer. The acceptable value is in range from 4096 to 4294967295 bytes.
name: size
- description: Set logging facility.
name: facility
@@ -584,9 +457,7 @@ script:
description: Manage logging on network devices
name: ios-logging
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -619,9 +490,7 @@ script:
description: Manages core NTP configuration.
name: ios-ntp
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -629,8 +498,7 @@ script:
defaultValue: "5"
description: Number of packets to send.
name: count
- - description: The IP Address or hostname (resolvable by switch) of the remote
- node.
+ - description: The IP Address or hostname (resolvable by switch) of the remote node.
name: dest
required: true
- description: The source IP Address.
@@ -650,9 +518,7 @@ script:
description: Tests reachability using ping from Cisco IOS network devices
name: ios-ping
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -687,42 +553,26 @@ script:
description: Manage static IP routes on Cisco IOS network devices
name: ios-static-route
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: Configure the device hostname parameter. This option takes an ASCII
- string value.
+ - description: Configure the device hostname parameter. This option takes an ASCII string value.
name: hostname
- - description: Configure the IP domain name on the remote device to the provided
- value. Value should be in the dotted name form and will be appended to the
- `hostname` to create a fully-qualified domain name.
+ - description: Configure the IP domain name on the remote device to the provided value. Value should be in the dotted name form and will be appended to the `hostname` to create a fully-qualified domain name.
name: domain_name
- - description: Provides the list of domain suffixes to append to the hostname
- for the purpose of doing name resolution. This argument accepts a list of
- names and will be reconciled with the current active configuration on the
- running node.
+ - description: Provides the list of domain suffixes to append to the hostname for the purpose of doing name resolution. This argument accepts a list of names and will be reconciled with the current active configuration on the running node.
name: domain_search
- - description: Provides one or more source interfaces to use for performing DNS
- lookups. The interface provided in `lookup_source` must be a valid interface
- configured on the device.
+ - description: Provides one or more source interfaces to use for performing DNS lookups. The interface provided in `lookup_source` must be a valid interface configured on the device.
name: lookup_source
- - description: Administrative control for enabling or disabling DNS lookups. When
- this argument is set to True, lookups are performed and when it is set to
- False, lookups are not performed.
+ - description: Administrative control for enabling or disabling DNS lookups. When this argument is set to True, lookups are performed and when it is set to False, lookups are not performed.
name: lookup_enabled
- - description: List of DNS name servers by IP address to use to perform name resolution
- lookups. This argument accepts either a list of DNS servers See examples.
+ - description: List of DNS name servers by IP address to use to perform name resolution lookups. This argument accepts either a list of DNS servers See examples.
name: name_servers
- auto: PREDEFINED
default: false
defaultValue: present
- description: State of the configuration values in the device's current active
- configuration. When set to `present`, the values should be configured in
- the device active configuration and when set to `absent` the values should
- not be in the device active configuration
+ description: State of the configuration values in the device's current active configuration. When set to `present`, the values should be configured in the device active configuration and when set to `absent` the values should not be in the device active configuration
name: state
predefined:
- present
@@ -730,31 +580,20 @@ script:
description: Manage the system attributes on Cisco IOS devices
name: ios-system
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: The set of username objects to be configured on the remote Cisco
- IOS device. The list entries can either be the username or a hash of username
- and properties. This argument is mutually exclusive with the `name` argument.
+ - description: The set of username objects to be configured on the remote Cisco IOS device. The list entries can either be the username or a hash of username and properties. This argument is mutually exclusive with the `name` argument.
name: aggregate
- - description: The username to be configured on the Cisco IOS device. This argument
- accepts a string value and is mutually exclusive with the `aggregate` argument.
- Please note that this option is not same as `provider username`.
+ - description: The username to be configured on the Cisco IOS device. This argument accepts a string value and is mutually exclusive with the `aggregate` argument. Please note that this option is not same as `provider username`.
name: name
- - description: The password to be configured on the Cisco IOS device. The password
- needs to be provided in clear and it will be encrypted on the device. Please
- note that this option is not same as `provider password`.
+ - description: The password to be configured on the Cisco IOS device. The password needs to be provided in clear and it will be encrypted on the device. Please note that this option is not same as `provider password`.
name: configured_password
- auto: PREDEFINED
default: false
defaultValue: always
- description: Since passwords are encrypted in the device running config, this
- argument will instruct the module when to change the password. When set to
- `always`, the password will always be updated in the device and when set to
- `on_create` the password will be updated only if the username is created.
+ description: Since passwords are encrypted in the device running config, this argument will instruct the module when to change the password. When set to `always`, the password will always be updated in the device and when set to `on_create` the password will be updated only if the username is created.
name: update_password
predefined:
- on_create
@@ -762,42 +601,31 @@ script:
- auto: PREDEFINED
default: false
defaultValue: secret
- description: This argument determines whether a 'password' or 'secret' will
- be configured.
+ description: This argument determines whether a 'password' or 'secret' will be configured.
name: password_type
predefined:
- secret
- password
- description: This option allows configuring hashed passwords on Cisco IOS devices.
name: hashed_password
- - description: The `privilege` argument configures the privilege level of the
- user when logged into the system. This argument accepts integer values in
- the range of 1 to 15.
+ - description: The `privilege` argument configures the privilege level of the user when logged into the system. This argument accepts integer values in the range of 1 to 15.
name: privilege
- - description: Configures the view for the username in the device running configuration.
- The argument accepts a string value defining the view name. This argument
- does not check if the view has been configured on the device.
+ - description: Configures the view for the username in the device running configuration. The argument accepts a string value defining the view name. This argument does not check if the view has been configured on the device.
name: view
- description: |-
Specifies one or more SSH public key(s) to configure for the given username.
This argument accepts a valid SSH key value.
name: sshkey
- - description: Defines the username without assigning a password. This will allow
- the user to login to the system without being authenticated by a password.
+ - description: Defines the username without assigning a password. This will allow the user to login to the system without being authenticated by a password.
name: nopassword
- default: false
defaultValue: "False"
- description: Instructs the module to consider the resource definition absolute.
- It will remove any previously configured usernames on the device with the
- exception of the `admin` user (the current defined set of users).
+ description: Instructs the module to consider the resource definition absolute. It will remove any previously configured usernames on the device with the exception of the `admin` user (the current defined set of users).
name: purge
- auto: PREDEFINED
default: false
defaultValue: present
- description: Configures the state of the username definition as it relates to
- the device operational configuration. When set to `present`, the username(s)
- should be configured in the device active configuration and when set to `absent`
- the username(s) should not be in the device active configuration
+ description: Configures the state of the username definition as it relates to the device operational configuration. When set to `present`, the username(s) should be configured in the device active configuration and when set to `absent` the username(s) should not be in the device active configuration
name: state
predefined:
- present
@@ -805,9 +633,7 @@ script:
description: Manage the aggregate of local users on Cisco IOS device
name: ios-user
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -827,84 +653,55 @@ script:
description: Manage VLANs on Cisco IOS devices.
name: ios-vlans
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: The set of VRF definition objects to be configured on the remote
- IOS device. Ths list entries can either be the VRF name or a hash of VRF
- definitions and attributes. This argument is mutually exclusive with the
- `name` argument.
+ - description: The set of VRF definition objects to be configured on the remote IOS device. Ths list entries can either be the VRF name or a hash of VRF definitions and attributes. This argument is mutually exclusive with the `name` argument.
name: vrfs
- - description: The name of the VRF definition to be managed on the remote IOS
- device. The VRF definition name is an ASCII string name used to uniquely
- identify the VRF. This argument is mutually exclusive with the `vrfs` argument
+ - description: The name of the VRF definition to be managed on the remote IOS device. The VRF definition name is an ASCII string name used to uniquely identify the VRF. This argument is mutually exclusive with the `vrfs` argument
name: name
- - description: Provides a short description of the VRF definition in the current
- active configuration. The VRF definition value accepts alphanumeric characters
- used to provide additional information about the VRF.
+ - description: Provides a short description of the VRF definition in the current active configuration. The VRF definition value accepts alphanumeric characters used to provide additional information about the VRF.
name: description
- - description: The router-distinguisher value uniquely identifies the VRF to routing
- processes on the remote IOS system. The RD value takes the form of `A:B`
- where `A` and `B` are both numeric values.
+ - description: The router-distinguisher value uniquely identifies the VRF to routing processes on the remote IOS system. The RD value takes the form of `A:B` where `A` and `B` are both numeric values.
name: rd
- - description: Identifies the set of interfaces that should be configured in the
- VRF. Interfaces must be routed interfaces in order to be placed into a VRF.
+ - description: Identifies the set of interfaces that should be configured in the VRF. Interfaces must be routed interfaces in order to be placed into a VRF.
name: interfaces
- - description: This is a intent option and checks the operational state of the
- for given vrf `name` for associated interfaces. If the value in the `associated_interfaces`
- does not match with the operational state of vrf interfaces on device it will
- result in failure.
+ - description: This is a intent option and checks the operational state of the for given vrf `name` for associated interfaces. If the value in the `associated_interfaces` does not match with the operational state of vrf interfaces on device it will result in failure.
name: associated_interfaces
- default: false
defaultValue: "10"
- description: Time in seconds to wait before checking for the operational state
- on remote device.
+ description: Time in seconds to wait before checking for the operational state on remote device.
name: delay
- default: false
defaultValue: "False"
- description: Instructs the module to consider the VRF definition absolute. It
- will remove any previously configured VRFs on the device.
+ description: Instructs the module to consider the VRF definition absolute. It will remove any previously configured VRFs on the device.
name: purge
- auto: PREDEFINED
default: false
defaultValue: present
- description: Configures the state of the VRF definition as it relates to the
- device operational configuration. When set to `present`, the VRF should be
- configured in the device active configuration and when set to `absent` the
- VRF should not be in the device active configuration
+ description: Configures the state of the VRF definition as it relates to the device operational configuration. When set to `present`, the VRF should be configured in the device active configuration and when set to `absent` the VRF should not be in the device active configuration
name: state
predefined:
- present
- absent
- - description: Adds an export and import list of extended route target communities
- to the VRF.
+ - description: Adds an export and import list of extended route target communities to the VRF.
name: route_both
- - description: Adds an export list of extended route target communities to the
- VRF.
+ - description: Adds an export list of extended route target communities to the VRF.
name: route_export
- - description: Adds an import list of extended route target communities to the
- VRF.
+ - description: Adds an import list of extended route target communities to the VRF.
name: route_import
- - description: Adds an export and import list of extended route target communities
- in address-family configuration submode to the VRF.
+ - description: Adds an export and import list of extended route target communities in address-family configuration submode to the VRF.
name: route_both_ipv4
- - description: Adds an export list of extended route target communities in address-family
- configuration submode to the VRF.
+ - description: Adds an export list of extended route target communities in address-family configuration submode to the VRF.
name: route_export_ipv4
- - description: Adds an import list of extended route target communities in address-family
- configuration submode to the VRF.
+ - description: Adds an import list of extended route target communities in address-family configuration submode to the VRF.
name: route_import_ipv4
- - description: Adds an export and import list of extended route target communities
- in address-family configuration submode to the VRF.
+ - description: Adds an export and import list of extended route target communities in address-family configuration submode to the VRF.
name: route_both_ipv6
- - description: Adds an export list of extended route target communities in address-family
- configuration submode to the VRF.
+ - description: Adds an export list of extended route target communities in address-family configuration submode to the VRF.
name: route_export_ipv6
- - description: Adds an import list of extended route target communities in address-family
- configuration submode to the VRF.
+ - description: Adds an import list of extended route target communities in address-family configuration submode to the VRF.
name: route_import_ipv6
description: Manage the collection of VRF definitions on Cisco IOS devices
name: ios-vrf
diff --git a/Packs/Ansible_Powered_Integrations/Integrations/CiscoNXOS/CiscoNXOS.yml b/Packs/Ansible_Powered_Integrations/Integrations/CiscoNXOS/CiscoNXOS.yml
index 98b9bb3ec2db..a790013dc38b 100644
--- a/Packs/Ansible_Powered_Integrations/Integrations/CiscoNXOS/CiscoNXOS.yml
+++ b/Packs/Ansible_Powered_Integrations/Integrations/CiscoNXOS/CiscoNXOS.yml
@@ -3,21 +3,18 @@ commonfields:
id: CiscoNX-OS
version: -1
configuration:
-- additionalinfo: The credentials to associate with the instance. SSH keys can be
- configured using the credential manager.
+- additionalinfo: The credentials to associate with the instance. SSH keys can be configured using the credential manager, under the Certificate field.
display: Username
name: creds
required: true
type: 9
-- additionalinfo: The default port to use if one is not specified in the commands
- `host` argument.
+- additionalinfo: The default port to use if one is not specified in the commands `host` argument.
defaultvalue: "22"
display: Default SSH Port
name: port
required: true
type: 0
-- additionalinfo: If multiple hosts are specified in a command, how many hosts should
- be interacted with concurrently.
+- additionalinfo: If multiple hosts are specified in a command, how many hosts should be interacted with concurrently.
defaultvalue: "4"
display: Concurrecy Factor
name: concurrency
@@ -29,9 +26,7 @@ name: CiscoNX-OS
script:
commands:
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -45,21 +40,17 @@ script:
- description: Global AAA shared secret or keyword 'default'.
name: global_key
- auto: PREDEFINED
- description: The state of encryption applied to the entered global key. O clear
- text, 7 encrypted. Type-6 encryption is not supported.
+ description: The state of encryption applied to the entered global key. O clear text, 7 encrypted. Type-6 encryption is not supported.
name: encrypt_type
predefined:
- "0"
- "7"
- - description: Duration for which a non-reachable AAA server is skipped, in minutes
- or keyword 'default. Range is 1-1440. Device default is 0.
+ - description: Duration for which a non-reachable AAA server is skipped, in minutes or keyword 'default. Range is 1-1440. Device default is 0.
name: deadtime
- - description: Global AAA server timeout period, in seconds or keyword 'default.
- Range is 1-60. Device default is 5.
+ - description: Global AAA server timeout period, in seconds or keyword 'default. Range is 1-60. Device default is 5.
name: server_timeout
- auto: PREDEFINED
- description: Enables direct authentication requests to AAA server or keyword
- 'default' Device default is disabled.
+ description: Enables direct authentication requests to AAA server or keyword 'default' Device default is disabled.
name: directed_request
predefined:
- enabled
@@ -75,9 +66,7 @@ script:
description: Manages AAA server global configuration.
name: nxos-aaa-server
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -94,14 +83,12 @@ script:
- description: Shared secret for the specified host or keyword 'default'.
name: key
- auto: PREDEFINED
- description: The state of encryption applied to the entered key. O for clear
- text, 7 for encrypted. Type-6 encryption is not supported.
+ description: The state of encryption applied to the entered key. O for clear text, 7 for encrypted. Type-6 encryption is not supported.
name: encrypt_type
predefined:
- "0"
- "7"
- - description: Timeout period for specified host, in seconds or keyword 'default.
- Range is 1-60.
+ - description: Timeout period for specified host, in seconds or keyword 'default. Range is 1-60.
name: host_timeout
- description: Alternate UDP port for RADIUS authentication or keyword 'default'.
name: auth_port
@@ -120,9 +107,7 @@ script:
description: Manages AAA server host-specific configuration.
name: nxos-aaa-server-host
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -142,8 +127,7 @@ script:
name: remark
- description: Port number or protocol (as supported by the switch).
name: proto
- - description: Source ip and mask using IP/MASK notation and supports keyword
- 'any'.
+ - description: Source ip and mask using IP/MASK notation and supports keyword 'any'.
name: src
- auto: PREDEFINED
description: Source port operands such as eq, neq, gt, lt, range.
@@ -159,8 +143,7 @@ script:
name: src_port1
- description: Second (end) port when using range operand.
name: src_port2
- - description: Destination ip and mask using IP/MASK notation and supports the
- keyword 'any'.
+ - description: Destination ip and mask using IP/MASK notation and supports the keyword 'any'.
name: dest
- auto: PREDEFINED
description: Destination port operands such as eq, neq, gt, lt, range.
@@ -272,9 +255,7 @@ script:
description: Manages access list entries for ACLs.
name: nxos-acl
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -302,29 +283,23 @@ script:
description: Manages applying ACLs to interfaces.
name: nxos-acl-interface
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
- description: Specifies which banner that should be configured on the remote
- device.
+ description: Specifies which banner that should be configured on the remote device.
name: banner
predefined:
- exec
- motd
required: true
- - description: The banner text that should be present in the remote device running
- configuration. This argument accepts a multiline string, with no empty lines.
- Requires `state=present`.
+ - description: The banner text that should be present in the remote device running configuration. This argument accepts a multiline string, with no empty lines. Requires `state=present`.
name: text
- auto: PREDEFINED
default: false
defaultValue: present
- description: Specifies whether or not the configuration is present in the current
- devices active running configuration.
+ description: Specifies whether or not the configuration is present in the current devices active running configuration.
name: state
predefined:
- present
@@ -332,9 +307,7 @@ script:
description: Manage multiline banners on Cisco NXOS devices
name: nxos-banner
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -386,9 +359,7 @@ script:
description: Bidirectional Forwarding Detection (BFD) global-level configuration
name: nxos-bfd-global
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -408,40 +379,30 @@ script:
description: Manages BFD attributes of nxos interfaces.
name: nxos-bfd-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: BGP autonomous system number. Valid values are String, Integer
- in ASPLAIN or ASDOT notation.
+ - description: BGP autonomous system number. Valid values are String, Integer in ASPLAIN or ASDOT notation.
name: asn
required: true
- - description: Name of the VRF. The name 'default' is a valid VRF representing
- the global BGP.
+ - description: Name of the VRF. The name 'default' is a valid VRF representing the global BGP.
name: vrf
- - description: Enable/Disable MED comparison on paths from different autonomous
- systems.
+ - description: Enable/Disable MED comparison on paths from different autonomous systems.
name: bestpath_always_compare_med
- - description: Enable/Disable load sharing across the providers with different
- (but equal-length) AS paths.
+ - description: Enable/Disable load sharing across the providers with different (but equal-length) AS paths.
name: bestpath_aspath_multipath_relax
- description: Enable/Disable comparison of router IDs for identical eBGP paths.
name: bestpath_compare_routerid
- - description: Enable/Disable neighborid. Use this when more paths available than
- max path config.
+ - description: Enable/Disable neighborid. Use this when more paths available than max path config.
name: bestpath_compare_neighborid
- description: Enable/Disable Ignores the cost community for BGP best-path calculations.
name: bestpath_cost_community_ignore
- - description: Enable/Disable enforcement of bestpath to do a MED comparison only
- between paths originated within a confederation.
+ - description: Enable/Disable enforcement of bestpath to do a MED comparison only between paths originated within a confederation.
name: bestpath_med_confed
- - description: Enable/Disable assigns the value of infinity to received routes
- that do not carry the MED attribute, making these routes the least desirable.
+ - description: Enable/Disable assigns the value of infinity to received routes that do not carry the MED attribute, making these routes the least desirable.
name: bestpath_med_missing_as_worst
- - description: Enable/Disable deterministic selection of the best MED pat from
- among the paths from the same autonomous system.
+ - description: Enable/Disable deterministic selection of the best MED pat from among the paths from the same autonomous system.
name: bestpath_med_non_deterministic
- description: Route Reflector Cluster-ID.
name: cluster_id
@@ -449,18 +410,13 @@ script:
name: confederation_id
- description: AS confederation parameters.
name: confederation_peers
- - description: Enable/Disable the batching evaluation of prefix advertisement
- to all peers.
+ - description: Enable/Disable the batching evaluation of prefix advertisement to all peers.
name: disable_policy_batching
- - description: Enable/Disable the batching evaluation of prefix advertisements
- to all peers with prefix list.
+ - description: Enable/Disable the batching evaluation of prefix advertisements to all peers with prefix list.
name: disable_policy_batching_ipv4_prefix_list
- - description: Enable/Disable the batching evaluation of prefix advertisements
- to all peers with prefix list.
+ - description: Enable/Disable the batching evaluation of prefix advertisements to all peers with prefix list.
name: disable_policy_batching_ipv6_prefix_list
- - description: Enable/Disable enforces the neighbor autonomous system to be the
- first AS number listed in the AS path attribute for eBGP. On NX-OS, this property
- is only supported in the global BGP context.
+ - description: Enable/Disable enforces the neighbor autonomous system to be the first AS number listed in the AS path attribute for eBGP. On NX-OS, this property is only supported in the global BGP context.
name: enforce_first_as
- auto: PREDEFINED
description: Enable/Disable cli event history buffer.
@@ -498,11 +454,9 @@ script:
- size_large
- size_disable
- default
- - description: Enable/Disable immediately reset the session if the link to a directly
- connected BGP peer goes down. Only supported in the global BGP context.
+ - description: Enable/Disable immediately reset the session if the link to a directly connected BGP peer goes down. Only supported in the global BGP context.
name: fast_external_fallover
- - description: Enable/Disable flush routes in RIB upon controlled restart. On
- NX-OS, this property is only supported in the global BGP context.
+ - description: Enable/Disable flush routes in RIB upon controlled restart. On NX-OS, this property is only supported in the global BGP context.
name: flush_routes
- description: Enable/Disable graceful restart.
name: graceful_restart
@@ -510,8 +464,7 @@ script:
name: graceful_restart_helper
- description: Set maximum time for a restart sent to the BGP peer.
name: graceful_restart_timers_restart
- - description: Set maximum time that BGP keeps the stale routes from the restarting
- BGP peer.
+ - description: Set maximum time that BGP keeps the stale routes from the restarting BGP peer.
name: graceful_restart_timers_stalepath_time
- description: Enable/Disable isolate this router from BGP perspective.
name: isolate
@@ -519,20 +472,17 @@ script:
name: local_as
- description: Enable/Disable message logging for neighbor up/down event.
name: log_neighbor_changes
- - description: Specify Maximum number of AS numbers allowed in the AS-path attribute.
- Valid values are between 1 and 512.
+ - description: Specify Maximum number of AS numbers allowed in the AS-path attribute. Valid values are between 1 and 512.
name: maxas_limit
- description: Enable/Disable handle BGP neighbor down event, due to various reasons.
name: neighbor_down_fib_accelerate
- - description: The BGP reconnection interval for dropped sessions. Valid values
- are between 1 and 60.
+ - description: The BGP reconnection interval for dropped sessions. Valid values are between 1 and 60.
name: reconnect_interval
- description: Router Identifier (ID) of the BGP router VRF instance.
name: router_id
- description: Administratively shutdown the BGP protocol.
name: shutdown
- - description: Enable/Disable advertise only routes programmed in hardware to
- peers.
+ - description: Enable/Disable advertise only routes programmed in hardware to peers.
name: suppress_fib_pending
- description: Specify timeout for the first best path after a restart, in seconds.
name: timer_bestpath_limit
@@ -551,18 +501,14 @@ script:
description: Manages BGP configuration.
name: nxos-bgp
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: BGP autonomous system number. Valid values are String, Integer
- in ASPLAIN or ASDOT notation.
+ - description: BGP autonomous system number. Valid values are String, Integer in ASPLAIN or ASDOT notation.
name: asn
required: true
- - description: Name of the VRF. The name 'default' is a valid VRF representing
- the global bgp.
+ - description: Name of the VRF. The name 'default' is a valid VRF representing the global bgp.
name: vrf
required: true
- auto: PREDEFINED
@@ -583,94 +529,59 @@ script:
- multicast
- evpn
required: true
- - description: Install a backup path into the forwarding table and provide prefix
- independent convergence (PIC) in case of a PE-CE link failure.
+ - description: Install a backup path into the forwarding table and provide prefix independent convergence (PIC) in case of a PE-CE link failure.
name: additional_paths_install
- - description: Enables the receive capability of additional paths for all of the
- neighbors under this address family for which the capability has not been
- disabled.
+ - description: Enables the receive capability of additional paths for all of the neighbors under this address family for which the capability has not been disabled.
name: additional_paths_receive
- - description: Configures the capability of selecting additional paths for a prefix.
- Valid values are a string defining the name of the route-map.
+ - description: Configures the capability of selecting additional paths for a prefix. Valid values are a string defining the name of the route-map.
name: additional_paths_selection
- - description: Enables the send capability of additional paths for all of the
- neighbors under this address family for which the capability has not been
- disabled.
+ - description: Enables the send capability of additional paths for all of the neighbors under this address family for which the capability has not been disabled.
name: additional_paths_send
- description: Advertise evpn routes.
name: advertise_l2vpn_evpn
- description: Configure client-to-client route reflection.
name: client_to_client
- - description: Specify dampen value for IGP metric-related changes, in seconds.
- Valid values are integer and keyword 'default'.
+ - description: Specify dampen value for IGP metric-related changes, in seconds. Valid values are integer and keyword 'default'.
name: dampen_igp_metric
- description: Enable/disable route-flap dampening.
name: dampening_state
- - description: Specify decay half-life in minutes for route-flap dampening. Valid
- values are integer and keyword 'default'.
+ - description: Specify decay half-life in minutes for route-flap dampening. Valid values are integer and keyword 'default'.
name: dampening_half_time
- - description: Specify max suppress time for route-flap dampening stable route.
- Valid values are integer and keyword 'default'.
+ - description: Specify max suppress time for route-flap dampening stable route. Valid values are integer and keyword 'default'.
name: dampening_max_suppress_time
- - description: Specify route reuse time for route-flap dampening. Valid values
- are integer and keyword 'default'.
+ - description: Specify route reuse time for route-flap dampening. Valid values are integer and keyword 'default'.
name: dampening_reuse_time
- - description: Specify route-map for route-flap dampening. Valid values are a
- string defining the name of the route-map.
+ - description: Specify route-map for route-flap dampening. Valid values are a string defining the name of the route-map.
name: dampening_routemap
- - description: Specify route suppress time for route-flap dampening. Valid values
- are integer and keyword 'default'.
+ - description: Specify route suppress time for route-flap dampening. Valid values are integer and keyword 'default'.
name: dampening_suppress_time
- description: Default information originate.
name: default_information_originate
- - description: Sets default metrics for routes redistributed into BGP. Valid values
- are Integer or keyword 'default'
+ - description: Sets default metrics for routes redistributed into BGP. Valid values are Integer or keyword 'default'
name: default_metric
- - description: Sets the administrative distance for eBGP routes. Valid values
- are Integer or keyword 'default'.
+ - description: Sets the administrative distance for eBGP routes. Valid values are Integer or keyword 'default'.
name: distance_ebgp
- - description: Sets the administrative distance for iBGP routes. Valid values
- are Integer or keyword 'default'.
+ - description: Sets the administrative distance for iBGP routes. Valid values are Integer or keyword 'default'.
name: distance_ibgp
- - description: Sets the administrative distance for local BGP routes. Valid values
- are Integer or keyword 'default'.
+ - description: Sets the administrative distance for local BGP routes. Valid values are Integer or keyword 'default'.
name: distance_local
- - description: An array of route-map names which will specify prefixes to inject.
- Each array entry must first specify the inject-map name, secondly an exist-map
- name, and optionally the copy-attributes keyword which indicates that attributes
- should be copied from the aggregate. For example [['lax_inject_map', 'lax_exist_map'],
- ['nyc_inject_map', 'nyc_exist_map', 'copy-attributes'], ['fsd_inject_map',
- 'fsd_exist_map']].
+ - description: An array of route-map names which will specify prefixes to inject. Each array entry must first specify the inject-map name, secondly an exist-map name, and optionally the copy-attributes keyword which indicates that attributes should be copied from the aggregate. For example [['lax_inject_map', 'lax_exist_map'], ['nyc_inject_map', 'nyc_exist_map', 'copy-attributes'], ['fsd_inject_map', 'fsd_exist_map']].
name: inject_map
- - description: Configures the maximum number of equal-cost paths for load sharing.
- Valid value is an integer in the range 1-64.
+ - description: Configures the maximum number of equal-cost paths for load sharing. Valid value is an integer in the range 1-64.
name: maximum_paths
- - description: Configures the maximum number of ibgp equal-cost paths for load
- sharing. Valid value is an integer in the range 1-64.
+ - description: Configures the maximum number of ibgp equal-cost paths for load sharing. Valid value is an integer in the range 1-64.
name: maximum_paths_ibgp
- - description: Networks to configure. Valid value is a list of network prefixes
- to advertise. The list must be in the form of an array. Each entry in the
- array must include a prefix address and an optional route-map. For example
- [['10.0.0.0/16', 'routemap_LA'], ['192.168.1.1', 'Chicago'], ['192.168.2.0/24'],
- ['192.168.3.0/24', 'routemap_NYC']].
+ - description: Networks to configure. Valid value is a list of network prefixes to advertise. The list must be in the form of an array. Each entry in the array must include a prefix address and an optional route-map. For example [['10.0.0.0/16', 'routemap_LA'], ['192.168.1.1', 'Chicago'], ['192.168.2.0/24'], ['192.168.3.0/24', 'routemap_NYC']].
name: networks
- - description: Configure a route-map for valid nexthops. Valid values are a string
- defining the name of the route-map.
+ - description: Configure a route-map for valid nexthops. Valid values are a string defining the name of the route-map.
name: next_hop_route_map
- - description: A list of redistribute directives. Multiple redistribute entries
- are allowed. The list must be in the form of a nested array. the first entry
- of each array defines the source-protocol to redistribute from; the second
- entry defines a route-map name. A route-map is highly advised but may be optional
- on some platforms, in which case it may be omitted from the array list. For
- example [['direct', 'rm_direct'], ['lisp', 'rm_lisp']].
+ - description: A list of redistribute directives. Multiple redistribute entries are allowed. The list must be in the form of a nested array. the first entry of each array defines the source-protocol to redistribute from; the second entry defines a route-map name. A route-map is highly advised but may be optional on some platforms, in which case it may be omitted from the array list. For example [['direct', 'rm_direct'], ['lisp', 'rm_lisp']].
name: redistribute
- description: Advertises only active routes to peers.
name: suppress_inactive
- - description: Apply table-map to filter routes downloaded into URIB. Valid values
- are a string.
+ - description: Apply table-map to filter routes downloaded into URIB. Valid values are a string.
name: table_map
- - description: Filters routes rejected by the route-map and does not download
- them to the RIB.
+ - description: Filters routes rejected by the route-map and does not download them to the RIB.
name: table_map_filter
- auto: PREDEFINED
default: false
@@ -683,23 +594,18 @@ script:
description: Manages BGP Address-family configuration.
name: nxos-bgp-af
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: BGP autonomous system number. Valid values are string, Integer
- in ASPLAIN or ASDOT notation.
+ - description: BGP autonomous system number. Valid values are string, Integer in ASPLAIN or ASDOT notation.
name: asn
required: true
- default: false
defaultValue: default
- description: Name of the VRF. The name 'default' is a valid VRF representing
- the global bgp.
+ description: Name of the VRF. The name 'default' is a valid VRF representing the global bgp.
name: vrf
- - description: Neighbor Identifier. Valid values are string. Neighbors may use
- IPv4 or IPv6 notation, with or without prefix length.
+ - description: Neighbor Identifier. Valid values are string. Neighbors may use IPv4 or IPv6 notation, with or without prefix length.
name: neighbor
required: true
- description: Description of the neighbor.
@@ -718,47 +624,34 @@ script:
name: capability_negotiation
- description: Configure whether or not to enable dynamic capability.
name: dynamic_capability
- - description: Specify multihop TTL for a remote peer. Valid values are integers
- between 2 and 255, or keyword 'default' to disable this property.
+ - description: Specify multihop TTL for a remote peer. Valid values are integers between 2 and 255, or keyword 'default' to disable this property.
name: ebgp_multihop
- - description: Specify the local-as number for the eBGP neighbor. Valid values
- are String or Integer in ASPLAIN or ASDOT notation, or 'default', which means
- not to configure it.
+ - description: Specify the local-as number for the eBGP neighbor. Valid values are String or Integer in ASPLAIN or ASDOT notation, or 'default', which means not to configure it.
name: local_as
- auto: PREDEFINED
- description: Specify whether or not to enable log messages for neighbor up/down
- event.
+ description: Specify whether or not to enable log messages for neighbor up/down event.
name: log_neighbor_changes
predefined:
- enable
- disable
- inherit
- - description: Specify whether or not to shut down this neighbor under memory
- pressure.
+ - description: Specify whether or not to shut down this neighbor under memory pressure.
name: low_memory_exempt
- - description: Specify Maximum number of peers for this neighbor prefix Valid
- values are between 1 and 1000, or 'default', which does not impose the limit.
- Note that this parameter is accepted only on neighbors with address/prefix.
+ - description: Specify Maximum number of peers for this neighbor prefix Valid values are between 1 and 1000, or 'default', which does not impose the limit. Note that this parameter is accepted only on neighbors with address/prefix.
name: maximum_peers
- description: Specify the password for neighbor. Valid value is string.
name: pwd
- auto: PREDEFINED
- description: Specify the encryption type the password will use. Valid values
- are '3des' or 'cisco_type_7' encryption or keyword 'default'.
+ description: Specify the encryption type the password will use. Valid values are '3des' or 'cisco_type_7' encryption or keyword 'default'.
name: pwd_type
predefined:
- 3des
- cisco_type_7
- default
- - description: Specify Autonomous System Number of the neighbor. Valid values
- are String or Integer in ASPLAIN or ASDOT notation, or 'default', which means
- not to configure it.
+ - description: Specify Autonomous System Number of the neighbor. Valid values are String or Integer in ASPLAIN or ASDOT notation, or 'default', which means not to configure it.
name: remote_as
- auto: PREDEFINED
- description: Specify the config to remove private AS number from outbound updates.
- Valid values are 'enable' to enable this config, 'disable' to disable this
- config, 'all' to remove all private AS number, or 'replace-as', to replace
- the private AS number.
+ description: Specify the config to remove private AS number from outbound updates. Valid values are 'enable' to enable this config, 'disable' to disable this config, 'all' to remove all private AS number, or 'replace-as', to replace the private AS number.
name: remove_private_as
predefined:
- enable
@@ -769,16 +662,11 @@ script:
name: shutdown
- description: Configure to suppress 4-byte AS Capability.
name: suppress_4_byte_as
- - description: Specify keepalive timer value. Valid values are integers between
- 0 and 3600 in terms of seconds, or 'default', which is 60.
+ - description: Specify keepalive timer value. Valid values are integers between 0 and 3600 in terms of seconds, or 'default', which is 60.
name: timers_keepalive
- - description: Specify holdtime timer value. Valid values are integers between
- 0 and 3600 in terms of seconds, or 'default', which is 180.
+ - description: Specify holdtime timer value. Valid values are integers between 0 and 3600 in terms of seconds, or 'default', which is 180.
name: timers_holdtime
- - description: Specify whether or not to only allow passive connection setup.
- Valid values are 'true', 'false', and 'default', which defaults to 'false'.
- This property can only be configured when the neighbor is in 'ip' address
- format without prefix length.
+ - description: Specify whether or not to only allow passive connection setup. Valid values are 'true', 'false', and 'default', which defaults to 'false'. This property can only be configured when the neighbor is in 'ip' address format without prefix length.
name: transport_passive_only
- description: Specify source interface of BGP session and updates.
name: update_source
@@ -793,23 +681,18 @@ script:
description: Manages BGP neighbors configurations.
name: nxos-bgp-neighbor
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: BGP autonomous system number. Valid values are String, Integer
- in ASPLAIN or ASDOT notation.
+ - description: BGP autonomous system number. Valid values are String, Integer in ASPLAIN or ASDOT notation.
name: asn
required: true
- default: false
defaultValue: default
- description: Name of the VRF. The name 'default' is a valid VRF representing
- the global bgp.
+ description: Name of the VRF. The name 'default' is a valid VRF representing the global bgp.
name: vrf
- - description: Neighbor Identifier. Valid values are string. Neighbors may use
- IPv4 or IPv6 notation, with or without prefix length.
+ - description: Neighbor Identifier. Valid values are string. Neighbors may use IPv4 or IPv6 notation, with or without prefix length.
name: neighbor
required: true
- auto: PREDEFINED
@@ -831,49 +714,32 @@ script:
- evpn
required: true
- auto: PREDEFINED
- description: Valid values are enable for basic command enablement; disable for
- disabling the command at the neighbor af level (it adds the disable keyword
- to the basic command); and inherit to remove the command at this level (the
- command value is inherited from a higher BGP layer).
+ description: Valid values are enable for basic command enablement; disable for disabling the command at the neighbor af level (it adds the disable keyword to the basic command); and inherit to remove the command at this level (the command value is inherited from a higher BGP layer).
name: additional_paths_receive
predefined:
- enable
- disable
- inherit
- auto: PREDEFINED
- description: Valid values are enable for basic command enablement; disable for
- disabling the command at the neighbor af level (it adds the disable keyword
- to the basic command); and inherit to remove the command at this level (the
- command value is inherited from a higher BGP layer).
+ description: Valid values are enable for basic command enablement; disable for disabling the command at the neighbor af level (it adds the disable keyword to the basic command); and inherit to remove the command at this level (the command value is inherited from a higher BGP layer).
name: additional_paths_send
predefined:
- enable
- disable
- inherit
- - description: Conditional route advertisement. This property requires two route
- maps, an advertise-map and an exist-map. Valid values are an array specifying
- both the advertise-map name and the exist-map name, or simply 'default' e.g.
- ['my_advertise_map', 'my_exist_map']. This command is mutually exclusive with
- the advertise_map_non_exist property.
+ - description: Conditional route advertisement. This property requires two route maps, an advertise-map and an exist-map. Valid values are an array specifying both the advertise-map name and the exist-map name, or simply 'default' e.g. ['my_advertise_map', 'my_exist_map']. This command is mutually exclusive with the advertise_map_non_exist property.
name: advertise_map_exist
- - description: Conditional route advertisement. This property requires two route
- maps, an advertise-map and an exist-map. Valid values are an array specifying
- both the advertise-map name and the non-exist-map name, or simply 'default'
- e.g. ['my_advertise_map', 'my_non_exist_map']. This command is mutually exclusive
- with the advertise_map_exist property.
+ - description: Conditional route advertisement. This property requires two route maps, an advertise-map and an exist-map. Valid values are an array specifying both the advertise-map name and the non-exist-map name, or simply 'default' e.g. ['my_advertise_map', 'my_non_exist_map']. This command is mutually exclusive with the advertise_map_exist property.
name: advertise_map_non_exist
- description: Activate allowas-in property
name: allowas_in
- - description: Max-occurrences value for allowas_in. Valid values are an integer
- value or 'default'. This is mutually exclusive with allowas_in.
+ - description: Max-occurrences value for allowas_in. Valid values are an integer value or 'default'. This is mutually exclusive with allowas_in.
name: allowas_in_max
- description: Activate the as-override feature.
name: as_override
- description: Activate the default-originate feature.
name: default_originate
- - description: Route-map for the default_originate property. Valid values are
- a string defining a route-map name, or 'default'. This is mutually exclusive
- with default_originate.
+ - description: Route-map for the default_originate property. Valid values are a string defining a route-map name, or 'default'. This is mutually exclusive with default_originate.
name: default_originate_route_map
- description: Disable checking of peer AS-number while advertising
name: disable_peer_as_check
@@ -881,17 +747,13 @@ script:
name: filter_list_in
- description: Valid values are a string defining a filter-list name, or 'default'.
name: filter_list_out
- - description: maximum-prefix limit value. Valid values are an integer value or
- 'default'.
+ - description: maximum-prefix limit value. Valid values are an integer value or 'default'.
name: max_prefix_limit
- - description: Optional restart interval. Valid values are an integer. Requires
- max_prefix_limit. May not be combined with max_prefix_warning.
+ - description: Optional restart interval. Valid values are an integer. Requires max_prefix_limit. May not be combined with max_prefix_warning.
name: max_prefix_interval
- - description: Optional threshold percentage at which to generate a warning. Valid
- values are an integer value. Requires max_prefix_limit.
+ - description: Optional threshold percentage at which to generate a warning. Valid values are an integer value. Requires max_prefix_limit.
name: max_prefix_threshold
- - description: Optional warning-only keyword. Requires max_prefix_limit. May not
- be combined with max_prefix_interval.
+ - description: Optional warning-only keyword. Requires max_prefix_limit. May not be combined with max_prefix_interval.
name: max_prefix_warning
- description: Activate the next-hop-self feature.
name: next_hop_self
@@ -917,21 +779,17 @@ script:
- standard
- default
- auto: PREDEFINED
- description: Valid values are 'enable' for basic command enablement; 'always'
- to add the always keyword to the basic command; and 'inherit' to remove the
- command at this level (the command value is inherited from a higher BGP layer).
+ description: Valid values are 'enable' for basic command enablement; 'always' to add the always keyword to the basic command; and 'inherit' to remove the command at this level (the command value is inherited from a higher BGP layer).
name: soft_reconfiguration_in
predefined:
- enable
- always
- inherit
- - description: Site-of-origin. Valid values are a string defining a VPN extcommunity
- or 'default'.
+ - description: Site-of-origin. Valid values are a string defining a VPN extcommunity or 'default'.
name: soo
- description: suppress-inactive feature.
name: suppress_inactive
- - description: unsuppress-map. Valid values are a string defining a route-map
- name or 'default'.
+ - description: unsuppress-map. Valid values are a string defining a route-map name or 'default'.
name: unsuppress_map
- description: Weight value. Valid values are an integer value or 'default'.
name: weight
@@ -946,9 +804,7 @@ script:
description: Manages BGP address-family's neighbors configuration.
name: nxos-bgp-neighbor-af
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -957,83 +813,43 @@ script:
The `commands` argument also accepts an alternative form that allows for complex values that specify the command to run and the output format to return. This can be done on a command by command basis. The complex argument supports the keywords `command` and `output` where `command` is the command to run and `output` is one of 'text' or 'json'.
name: commands
required: true
- - description: Specifies what to evaluate from the output of the command and what
- conditionals to apply. This argument will cause the task to wait for a particular
- conditional to be true before moving forward. If the conditional is not
- true by the configured retries, the task fails. See examples.
+ - description: Specifies what to evaluate from the output of the command and what conditionals to apply. This argument will cause the task to wait for a particular conditional to be true before moving forward. If the conditional is not true by the configured retries, the task fails. See examples.
name: wait_for
- default: false
defaultValue: all
- description: The `match` argument is used in conjunction with the `wait_for`
- argument to specify the match policy. Valid values are `all` or `any`. If
- the value is set to `all` then all conditionals in the `wait_for` must be
- satisfied. If the value is set to `any` then only one of the values must
- be satisfied.
+ description: The `match` argument is used in conjunction with the `wait_for` argument to specify the match policy. Valid values are `all` or `any`. If the value is set to `all` then all conditionals in the `wait_for` must be satisfied. If the value is set to `any` then only one of the values must be satisfied.
name: match
- default: false
defaultValue: "10"
- description: Specifies the number of retries a command should by tried before
- it is considered failed. The command is run on the target device every retry
- and evaluated against the `wait_for` conditionals.
+ description: Specifies the number of retries a command should by tried before it is considered failed. The command is run on the target device every retry and evaluated against the `wait_for` conditionals.
name: retries
- default: false
defaultValue: "1"
- description: Configures the interval in seconds to wait between retries of the
- command. If the command does not pass the specified conditional, the interval
- indicates how to long to wait before trying the command again.
+ description: Configures the interval in seconds to wait between retries of the command. If the command does not pass the specified conditional, the interval indicates how to long to wait before trying the command again.
name: interval
description: Run arbitrary command on Cisco NXOS devices
name: nxos-command
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: The ordered set of commands that should be configured in the section. The
- commands must be the exact same commands as found in the device running-config. Be
- sure to note the configuration command syntax as some commands are automatically
- modified by the device config parser.
+ - description: The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration command syntax as some commands are automatically modified by the device config parser.
name: lines
- - description: The ordered set of parents that uniquely identify the section or
- hierarchy the commands should be checked against. If the parents argument
- is omitted, the commands are checked against the set of top level or global
- commands.
+ - description: The ordered set of parents that uniquely identify the section or hierarchy the commands should be checked against. If the parents argument is omitted, the commands are checked against the set of top level or global commands.
name: parents
- - description: The `src` argument provides a path to the configuration file to
- load into the remote system. The path can either be a full system path to
- the configuration file if the value starts with / or relative to the root
- of the implemented role or playbook. This argument is mutually exclusive with
- the `lines` and `parents` arguments.
+ - description: The `src` argument provides a path to the configuration file to load into the remote system. The path can either be a full system path to the configuration file if the value starts with / or relative to the root of the implemented role or playbook. This argument is mutually exclusive with the `lines` and `parents` arguments.
name: src
- - description: The `replace_src` argument provides path to the configuration file
- to load into the remote system. This argument is used to replace the entire
- config with a flat-file. This is used with argument `replace` with value `config`.
- This is mutually exclusive with the `lines` and `src` arguments. This argument
- is supported on Nexus 9K device. Use `nxos_file_copy` module to copy the flat
- file to remote device and then use the path with this argument.
+ - description: The `replace_src` argument provides path to the configuration file to load into the remote system. This argument is used to replace the entire config with a flat-file. This is used with argument `replace` with value `config`. This is mutually exclusive with the `lines` and `src` arguments. This argument is supported on Nexus 9K device. Use `nxos_file_copy` module to copy the flat file to remote device and then use the path with this argument.
name: replace_src
- - description: The ordered set of commands to push on to the command stack if
- a change needs to be made. This allows the playbook designer the opportunity
- to perform configuration commands prior to pushing any changes without affecting
- how the set of commands are matched against the system.
+ - description: The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched against the system.
name: before
- - description: The ordered set of commands to append to the end of the command
- stack if a change needs to be made. Just like with `before` this allows the
- playbook designer to append a set of commands to be executed after the command
- set.
+ - description: The ordered set of commands to append to the end of the command stack if a change needs to be made. Just like with `before` this allows the playbook designer to append a set of commands to be executed after the command set.
name: after
- auto: PREDEFINED
default: false
defaultValue: line
- description: Instructs the module on the way to perform the matching of the
- set of commands against the current device config. If match is set to `line`,
- commands are matched line by line. If match is set to `strict`, command lines
- are matched with respect to position. If match is set to `exact`, command
- lines must be an equal match. Finally, if match is set to `none`, the module
- will not attempt to compare the source configuration with the running configuration
- on the remote device.
+ description: Instructs the module on the way to perform the matching of the set of commands against the current device config. If match is set to `line`, commands are matched line by line. If match is set to `strict`, command lines are matched with respect to position. If match is set to `exact`, command lines must be an equal match. Finally, if match is set to `none`, the module will not attempt to compare the source configuration with the running configuration on the remote device.
name: match
predefined:
- line
@@ -1043,12 +859,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: line
- description: Instructs the module on the way to perform the configuration on
- the device. If the replace argument is set to `line` then the modified lines
- are pushed to the device in configuration mode. If the replace argument is
- set to `block` then the entire command block is pushed to the device in configuration
- mode if any line is not correct. replace `config` is supported only on Nexus
- 9K device.
+ description: Instructs the module on the way to perform the configuration on the device. If the replace argument is set to `line` then the modified lines are pushed to the device in configuration mode. If the replace argument is set to `block` then the entire command block is pushed to the device in configuration mode if any line is not correct. replace `config` is supported only on Nexus 9K device.
name: replace
predefined:
- line
@@ -1056,40 +867,18 @@ script:
- config
- default: false
defaultValue: "no"
- description: This argument will cause the module to create a full backup of
- the current `running-config` from the remote device before any changes are
- made. If the `backup_options` value is not given, the backup file is written
- to the `backup` folder in the playbook root directory or role root directory,
- if playbook is part of an ansible role. If the directory does not exist, it
- is created.
+ description: This argument will cause the module to create a full backup of the current `running-config` from the remote device before any changes are made. If the `backup_options` value is not given, the backup file is written to the `backup` folder in the playbook root directory or role root directory, if playbook is part of an ansible role. If the directory does not exist, it is created.
name: backup
- - description: The module, by default, will connect to the remote device and retrieve
- the current running-config to use as a base for comparing against the contents
- of source. There are times when it is not desirable to have the task get
- the current running-config for every task in a playbook. The `running_config`
- argument allows the implementer to pass in the configuration to use as the
- base config for comparison.
+ - description: The module, by default, will connect to the remote device and retrieve the current running-config to use as a base for comparing against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The `running_config` argument allows the implementer to pass in the configuration to use as the base config for comparison.
name: running_config
- default: false
defaultValue: "no"
- description: The `defaults` argument will influence how the running-config is
- collected from the device. When the value is set to true, the command used
- to collect the running-config is append with the all keyword. When the value
- is set to false, the command is issued without the all keyword
+ description: The `defaults` argument will influence how the running-config is collected from the device. When the value is set to true, the command used to collect the running-config is append with the all keyword. When the value is set to false, the command is issued without the all keyword
name: defaults
- auto: PREDEFINED
default: false
defaultValue: never
- description: When changes are made to the device running-configuration, the
- changes are not copied to non-volatile storage by default. Using this argument
- will change that before. If the argument is set to `always`, then the running-config
- will always be copied to the startup-config and the `modified` flag will always
- be set to True. If the argument is set to `modified`, then the running-config
- will only be copied to the startup-config if it has changed since the last
- save to startup-config. If the argument is set to `never`, the running-config
- will never be copied to the startup-config. If the argument is set to `changed`,
- then the running-config will only be copied to the startup-config if the task
- has made a change. `changed` was added in Ansible 2.6.
+ description: When changes are made to the device running-configuration, the changes are not copied to non-volatile storage by default. Using this argument will change that before. If the argument is set to `always`, then the running-config will always be copied to the startup-config and the `modified` flag will always be set to True. If the argument is set to `modified`, then the running-config will only be copied to the startup-config if it has changed since the last save to startup-config. If the argument is set to `never`, the running-config will never be copied to the startup-config. If the argument is set to `changed`, then the running-config will only be copied to the startup-config if the task has made a change. `changed` was added in Ansible 2.6.
name: save_when
predefined:
- always
@@ -1109,29 +898,17 @@ script:
- startup
- intended
- running
- - description: Use this argument to specify one or more lines that should be ignored
- during the diff. This is used for lines in the configuration that are automatically
- updated by the system. This argument takes a list of regular expressions
- or exact line matches.
+ - description: Use this argument to specify one or more lines that should be ignored during the diff. This is used for lines in the configuration that are automatically updated by the system. This argument takes a list of regular expressions or exact line matches.
name: diff_ignore_lines
- - description: The `intended_config` provides the master configuration that the
- node should conform to and is used to check the final running-config against. This
- argument will not modify any settings on the remote device and is strictly
- used to check the compliance of the current device's configuration against. When
- specifying this argument, the task should also modify the `diff_against` value
- and set it to `intended`.
+ - description: The `intended_config` provides the master configuration that the node should conform to and is used to check the final running-config against. This argument will not modify any settings on the remote device and is strictly used to check the compliance of the current device's configuration against. When specifying this argument, the task should also modify the `diff_against` value and set it to `intended`.
name: intended_config
- - description: This is a dict object containing configurable options related to
- backup file path. The value of this option is read only when `backup` is set
- to `True`, if `backup` is set to `false` this option will be silently ignored.
+ - description: This is a dict object containing configurable options related to backup file path. The value of this option is read only when `backup` is set to `True`, if `backup` is set to `false` this option will be silently ignored.
isArray: true
name: backup_options
description: Manage Cisco NXOS configuration sections
name: nxos-config
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1141,21 +918,17 @@ script:
description: Handles the EVPN control plane for VXLAN.
name: nxos-evpn-global
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- description: The EVPN VXLAN Network Identifier.
name: vni
required: true
- - description: The VPN Route Distinguisher (RD). The RD is combined with the IPv4
- or IPv6 prefix learned by the PE router to create a globally unique address.
+ - description: The VPN Route Distinguisher (RD). The RD is combined with the IPv4 or IPv6 prefix learned by the PE router to create a globally unique address.
name: route_distinguisher
required: true
- - description: Enables/Disables route-target settings for both import and export
- target communities using a single property.
+ - description: Enables/Disables route-target settings for both import and export target communities using a single property.
name: route_target_both
- description: Sets the route-target 'import' extended communities.
name: route_target_import
@@ -1172,34 +945,20 @@ script:
description: Manages Cisco EVPN VXLAN Network Identifier (VNI).
name: nxos-evpn-vni
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- default: false
defaultValue: '!config'
- description: When supplied, this argument will restrict the facts collected
- to a given subset. Possible values for this argument include all, hardware,
- config, legacy, and interfaces. Can specify a list of values to include a
- larger subset. Values can also be used with an initial `M(!`) to specify
- that a specific subset should not be collected.
+ description: When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include all, hardware, config, legacy, and interfaces. Can specify a list of values to include a larger subset. Values can also be used with an initial `M(!`) to specify that a specific subset should not be collected.
name: gather_subset
- - description: When supplied, this argument will restrict the facts collected
- to a given subset. Possible values for this argument include all and the resources
- like interfaces, vlans etc. Can specify a list of values to include a larger
- subset. Values can also be used with an initial `M(!`) to specify that a specific
- subset should not be collected. Valid subsets are 'all', 'bfd_interfaces',
- 'lag_interfaces', 'telemetry', 'vlans', 'lacp', 'lacp_interfaces', 'interfaces',
- 'l3_interfaces', 'l2_interfaces', 'lldp_global'.
+ - description: When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include all and the resources like interfaces, vlans etc. Can specify a list of values to include a larger subset. Values can also be used with an initial `M(!`) to specify that a specific subset should not be collected. Valid subsets are 'all', 'bfd_interfaces', 'lag_interfaces', 'telemetry', 'vlans', 'lacp', 'lacp_interfaces', 'interfaces', 'l3_interfaces', 'l2_interfaces', 'lldp_global'.
name: gather_network_resources
description: Gets facts about NX-OS switches
name: nxos-facts
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1217,37 +976,20 @@ script:
description: Manage features in NX-OS switches.
name: nxos-feature
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: When `system_mode_maintenance=true` it puts all enabled protocols
- in maintenance mode (using the isolate command). When `system_mode_maintenance=false`
- it puts all enabled protocols in normal mode (using the no isolate command).
+ - description: When `system_mode_maintenance=true` it puts all enabled protocols in maintenance mode (using the isolate command). When `system_mode_maintenance=false` it puts all enabled protocols in normal mode (using the no isolate command).
name: system_mode_maintenance
- - description: When `system_mode_maintenance_dont_generate_profile=true` it prevents
- the dynamic searching of enabled protocols and executes commands configured
- in a maintenance-mode profile. Use this option if you want the system to use
- a maintenance-mode profile that you have created. When `system_mode_maintenance_dont_generate_profile=false`
- it prevents the dynamic searching of enabled protocols and executes commands
- configured in a normal-mode profile. Use this option if you want the system
- to use a normal-mode profile that you have created.
+ - description: When `system_mode_maintenance_dont_generate_profile=true` it prevents the dynamic searching of enabled protocols and executes commands configured in a maintenance-mode profile. Use this option if you want the system to use a maintenance-mode profile that you have created. When `system_mode_maintenance_dont_generate_profile=false` it prevents the dynamic searching of enabled protocols and executes commands configured in a normal-mode profile. Use this option if you want the system to use a normal-mode profile that you have created.
name: system_mode_maintenance_dont_generate_profile
- - description: Keeps the switch in maintenance mode for a specified number of
- minutes. Range is 5-65535.
+ - description: Keeps the switch in maintenance mode for a specified number of minutes. Range is 5-65535.
name: system_mode_maintenance_timeout
- - description: Shuts down all protocols, vPC domains, and interfaces except the
- management interface (using the shutdown command). This option is disruptive
- while `system_mode_maintenance` (which uses the isolate command) is not.
+ - description: Shuts down all protocols, vPC domains, and interfaces except the management interface (using the shutdown command). This option is disruptive while `system_mode_maintenance` (which uses the isolate command) is not.
name: system_mode_maintenance_shutdown
- auto: PREDEFINED
- description: Boots the switch into maintenance mode automatically in the event
- of a specified system crash. Note that not all reset reasons are applicable
- for all platforms. Also if reset reason is set to match_any, it is not idempotent
- as it turns on all reset reasons. If reset reason is match_any and state is
- absent, it turns off all the reset reasons.
+ description: Boots the switch into maintenance mode automatically in the event of a specified system crash. Note that not all reset reasons are applicable for all platforms. Also if reset reason is set to match_any, it is not idempotent as it turns on all reset reasons. If reset reason is match_any and state is absent, it turns off all the reset reasons.
name: system_mode_maintenance_on_reload_reset_reason
predefined:
- hw_error
@@ -1272,9 +1014,7 @@ script:
description: Trigger a graceful removal or insertion (GIR) of the switch.
name: nxos-gir
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1298,9 +1038,7 @@ script:
description: Create a maintenance-mode or normal-mode profile for GIR.
name: nxos-gir-profile-management
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1328,11 +1066,7 @@ script:
- disabled
- description: HSRP virtual IP address or keyword 'default'
name: vip
- - description: Authentication string. If this needs to be hidden(for md5 type),
- the string should be 7 followed by the key string. Otherwise, it can be 0
- followed by key string or just key string (for backward compatibility). For
- text type, this should be just be a key string. if this is 'default', authentication
- is removed.
+ - description: Authentication string. If this needs to be hidden(for md5 type), the string should be 7 followed by the key string. Otherwise, it can be 0 followed by key string or just key string (for backward compatibility). For text type, this should be just be a key string. if this is 'default', authentication is removed.
name: auth_string
- auto: PREDEFINED
description: Authentication type.
@@ -1351,17 +1085,13 @@ script:
description: Manages HSRP configuration on NX-OS switches.
name: nxos-hsrp
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: Removes routes when the IGMP process is restarted. By default,
- routes are not flushed.
+ - description: Removes routes when the IGMP process is restarted. By default, routes are not flushed.
name: flush_routes
- - description: Enables or disables the enforce router alert option check for IGMPv2
- and IGMPv3 packets.
+ - description: Enables or disables the enforce router alert option check for IGMPv2 and IGMPv3 packets.
name: enforce_rtr_alert
- description: Restarts the igmp process (using an exec config command).
name: restart
@@ -1376,9 +1106,7 @@ script:
description: Manages IGMP global configuration.
name: nxos-igmp
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1392,61 +1120,35 @@ script:
- "2"
- "3"
- default
- - description: Query interval used when the IGMP process starts up. The range
- is from 1 to 18000 or keyword 'default'. The default is 31.
+ - description: Query interval used when the IGMP process starts up. The range is from 1 to 18000 or keyword 'default'. The default is 31.
name: startup_query_interval
- - description: Query count used when the IGMP process starts up. The range is
- from 1 to 10 or keyword 'default'. The default is 2.
+ - description: Query count used when the IGMP process starts up. The range is from 1 to 10 or keyword 'default'. The default is 2.
name: startup_query_count
- - description: Sets the robustness variable. Values can range from 1 to 7 or keyword
- 'default'. The default is 2.
+ - description: Sets the robustness variable. Values can range from 1 to 7 or keyword 'default'. The default is 2.
name: robustness
- - description: Sets the querier timeout that the software uses when deciding to
- take over as the querier. Values can range from 1 to 65535 seconds or keyword
- 'default'. The default is 255 seconds.
+ - description: Sets the querier timeout that the software uses when deciding to take over as the querier. Values can range from 1 to 65535 seconds or keyword 'default'. The default is 255 seconds.
name: querier_timeout
- - description: Sets the response time advertised in IGMP queries. Values can range
- from 1 to 25 seconds or keyword 'default'. The default is 10 seconds.
+ - description: Sets the response time advertised in IGMP queries. Values can range from 1 to 25 seconds or keyword 'default'. The default is 10 seconds.
name: query_mrt
- - description: Sets the frequency at which the software sends IGMP host query
- messages. Values can range from 1 to 18000 seconds or keyword 'default'. The
- default is 125 seconds.
+ - description: Sets the frequency at which the software sends IGMP host query messages. Values can range from 1 to 18000 seconds or keyword 'default'. The default is 125 seconds.
name: query_interval
- - description: Sets the query interval waited after sending membership reports
- before the software deletes the group state. Values can range from 1 to 25
- seconds or keyword 'default'. The default is 1 second.
+ - description: Sets the query interval waited after sending membership reports before the software deletes the group state. Values can range from 1 to 25 seconds or keyword 'default'. The default is 1 second.
name: last_member_qrt
- - description: Sets the number of times that the software sends an IGMP query
- in response to a host leave message. Values can range from 1 to 5 or keyword
- 'default'. The default is 2.
+ - description: Sets the number of times that the software sends an IGMP query in response to a host leave message. Values can range from 1 to 5 or keyword 'default'. The default is 2.
name: last_member_query_count
- - description: Sets the group membership timeout for IGMPv2. Values can range
- from 3 to 65,535 seconds or keyword 'default'. The default is 260 seconds.
+ - description: Sets the group membership timeout for IGMPv2. Values can range from 3 to 65,535 seconds or keyword 'default'. The default is 260 seconds.
name: group_timeout
- - description: Configures report-link-local-groups. Enables sending reports for
- groups in 224.0.0.0/24. Reports are always sent for nonlink local groups.
- By default, reports are not sent for link local groups.
+ - description: Configures report-link-local-groups. Enables sending reports for groups in 224.0.0.0/24. Reports are always sent for nonlink local groups. By default, reports are not sent for link local groups.
name: report_llg
- - description: Enables the device to remove the group entry from the multicast
- routing table immediately upon receiving a leave message for the group. Use
- this command to minimize the leave latency of IGMPv2 group memberships on
- a given IGMP interface because the device does not send group-specific queries.
- The default is disabled.
+ - description: Enables the device to remove the group entry from the multicast routing table immediately upon receiving a leave message for the group. Use this command to minimize the leave latency of IGMPv2 group memberships on a given IGMP interface because the device does not send group-specific queries. The default is disabled.
name: immediate_leave
- - description: Configure a routemap for static outgoing interface (OIF) or keyword
- 'default'.
+ - description: Configure a routemap for static outgoing interface (OIF) or keyword 'default'.
name: oif_routemap
- - description: This argument is deprecated, please use oif_ps instead. Configure
- a prefix for static outgoing interface (OIF).
+ - description: This argument is deprecated, please use oif_ps instead. Configure a prefix for static outgoing interface (OIF).
name: oif_prefix
- - description: This argument is deprecated, please use oif_ps instead. Configure
- a source for static outgoing interface (OIF).
+ - description: This argument is deprecated, please use oif_ps instead. Configure a source for static outgoing interface (OIF).
name: oif_source
- - description: Configure prefixes and sources for static outgoing interface (OIF).
- This is a list of dict where each dict has source and prefix defined or just
- prefix if source is not needed. The specified values will be configured on
- the device and if any previous prefix/sources exist, they will be removed.
- Keyword 'default' is also accepted which removes all existing prefix/sources.
+ - description: Configure prefixes and sources for static outgoing interface (OIF). This is a list of dict where each dict has source and prefix defined or just prefix if source is not needed. The specified values will be configured on the device and if any previous prefix/sources exist, they will be removed. Keyword 'default' is also accepted which removes all existing prefix/sources.
name: oif_ps
- default: false
defaultValue: "False"
@@ -1464,16 +1166,13 @@ script:
description: Manages IGMP interface configuration.
name: nxos-igmp-interface
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- description: Enables/disables IGMP snooping on the switch.
name: snooping
- - description: Group membership timeout value for all VLANs on the device. Accepted
- values are integer in range 1-10080, `never` and `default`.
+ - description: Group membership timeout value for all VLANs on the device. Accepted values are integer in range 1-10080, `never` and `default`.
name: group_timeout
- description: Global link-local groups suppression.
name: link_local_grp_supp
@@ -1492,17 +1191,14 @@ script:
description: Manages IGMP snooping global configuration.
name: nxos-igmp-snooping
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- description: Name of the system (or combined) image file on flash.
name: system_image_file
required: true
- - description: Name of the kickstart image file on flash. (Not required on all
- Nexus platforms)
+ - description: Name of the kickstart image file on flash. (Not required on all Nexus platforms)
name: kickstart_image_file
- auto: PREDEFINED
default: false
@@ -1521,9 +1217,7 @@ script:
description: Set boot options like boot, kickstart image and issu.
name: nxos-install-os
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1533,9 +1227,7 @@ script:
- description: Name of the ospf instance.
name: ospf
required: true
- - description: Ospf area associated with this cisco_interface_ospf instance. Valid
- values are a string, formatted as an IP address (i.e. "0.0.0.0") or as an
- integer.
+ - description: Ospf area associated with this cisco_interface_ospf instance. Valid values are a string, formatted as an IP address (i.e. "0.0.0.0") or as an integer.
name: area
required: true
- auto: PREDEFINED
@@ -1550,40 +1242,30 @@ script:
- default
- description: The cost associated with this cisco_interface_ospf instance.
name: cost
- - description: Time between sending successive hello packets. Valid values are
- an integer or the keyword 'default'.
+ - description: Time between sending successive hello packets. Valid values are an integer or the keyword 'default'.
name: hello_interval
- - description: Time interval an ospf neighbor waits for a hello packet before
- tearing down adjacencies. Valid values are an integer or the keyword 'default'.
+ - description: Time interval an ospf neighbor waits for a hello packet before tearing down adjacencies. Valid values are an integer or the keyword 'default'.
name: dead_interval
- - description: Enable or disable passive-interface state on this interface. true
- - (enable) Prevent OSPF from establishing an adjacency or sending routing
- updates on this interface. false - (disable) Override global 'passive-interface
- default' for this interface.
+ - description: Enable or disable passive-interface state on this interface. true - (enable) Prevent OSPF from establishing an adjacency or sending routing updates on this interface. false - (disable) Override global 'passive-interface default' for this interface.
name: passive_interface
- auto: PREDEFINED
- description: Specifies interface ospf network type. Valid values are 'point-to-point'
- or 'broadcast'.
+ description: Specifies interface ospf network type. Valid values are 'point-to-point' or 'broadcast'.
name: network
predefined:
- point-to-point
- broadcast
- description: Enables or disables the usage of message digest authentication.
name: message_digest
- - description: Md5 authentication key-id associated with the ospf instance. If
- this is present, message_digest_encryption_type, message_digest_algorithm_type
- and message_digest_password are mandatory. Valid value is an integer and 'default'.
+ - description: Md5 authentication key-id associated with the ospf instance. If this is present, message_digest_encryption_type, message_digest_algorithm_type and message_digest_password are mandatory. Valid value is an integer and 'default'.
name: message_digest_key_id
- auto: PREDEFINED
- description: Algorithm used for authentication among neighboring routers within
- an area. Valid values are 'md5' and 'default'.
+ description: Algorithm used for authentication among neighboring routers within an area. Valid values are 'md5' and 'default'.
name: message_digest_algorithm_type
predefined:
- md5
- default
- auto: PREDEFINED
- description: Specifies the scheme used for encrypting message_digest_password.
- Valid values are '3des' or 'cisco_type_7' encryption or 'default'.
+ description: Specifies the scheme used for encrypting message_digest_password. Valid values are '3des' or 'cisco_type_7' encryption or 'default'.
name: message_digest_encryption_type
predefined:
- cisco_type_7
@@ -1602,9 +1284,7 @@ script:
description: Manages configuration of an OSPF interface instance.
name: nxos-interface-ospf
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1624,9 +1304,7 @@ script:
description: Manages interface attributes of NX-OS Interfaces
name: nxos-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1646,9 +1324,7 @@ script:
description: Manages Layer-2 Interfaces attributes of NX-OS Interfaces
name: nxos-l2-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1668,9 +1344,7 @@ script:
description: Manages Layer-3 Interfaces attributes of NX-OS Interfaces
name: nxos-l3-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1686,13 +1360,10 @@ script:
- merged
- replaced
- deleted
- description: Manage Global Link Aggregation Control Protocol (LACP) on Cisco NX-OS
- devices.
+ description: Manage Global Link Aggregation Control Protocol (LACP) on Cisco NX-OS devices.
name: nxos-lacp
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1709,13 +1380,10 @@ script:
- replaced
- overridden
- deleted
- description: Manage Link Aggregation Control Protocol (LACP) attributes of interfaces
- on Cisco NX-OS devices.
+ description: Manage Link Aggregation Control Protocol (LACP) attributes of interfaces on Cisco NX-OS devices.
name: nxos-lacp-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1735,17 +1403,14 @@ script:
description: Manages link aggregation groups of NX-OS Interfaces
name: nxos-lag-interfaces
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
default: false
defaultValue: present
- description: State of the LLDP configuration. If value is `present` lldp will
- be enabled else if it is `absent` it will be disabled.
+ description: State of the LLDP configuration. If value is `present` lldp will be enabled else if it is `absent` it will be disabled.
name: state
predefined:
- present
@@ -1753,9 +1418,7 @@ script:
description: Manage LLDP configuration on Cisco NXOS network devices.
name: nxos-lldp
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1771,13 +1434,10 @@ script:
- merged
- replaced
- deleted
- description: Configure and manage Link Layer Discovery Protocol(LLDP) attributes
- on NX-OS platforms.
+ description: Configure and manage Link Layer Discovery Protocol(LLDP) attributes on NX-OS platforms.
name: nxos-lldp-global
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1794,8 +1454,7 @@ script:
name: remote_server
- description: VRF to be used while configuring remote logging (when dest is 'server').
name: use_vrf
- - description: Interface to be used while configuring source-interface for logging
- (e.g., 'Ethernet1/2', 'mgmt0')
+ - description: Interface to be used while configuring source-interface for logging (e.g., 'Ethernet1/2', 'mgmt0')
name: interface
- description: If value of `dest` is `logfile` it indicates file-name.
name: name
@@ -1824,16 +1483,14 @@ script:
- trunk-enable
- trunk-default
- auto: PREDEFINED
- description: Add interface description to interface syslogs. Does not work with
- version 6.0 images using nxapi as a transport.
+ description: Add interface description to interface syslogs. Does not work with version 6.0 images using nxapi as a transport.
name: interface_message
predefined:
- add-interface-description
- description: Set logfile size
name: file_size
- auto: PREDEFINED
- description: Set logging facility ethpm link status. Not idempotent with version
- 6.0 images.
+ description: Set logging facility ethpm link status. Not idempotent with version 6.0 images.
name: facility_link_status
predefined:
- link-down-notif
@@ -1849,16 +1506,12 @@ script:
- seconds
- default: false
defaultValue: "False"
- description: Remove any switch logging configuration that does not match what
- has been configured Not supported for ansible_connection local. All nxos-logging
- tasks must use the same ansible_connection type.
+ description: Remove any switch logging configuration that does not match what has been configured Not supported for ansible_connection local. All nxos-logging tasks must use the same ansible_connection type.
name: purge
description: Manage logging on network devices
name: nxos-logging
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1866,24 +1519,19 @@ script:
name: server
- description: Network address of NTP peer.
name: peer
- - description: Authentication key identifier to use with given NTP server or peer
- or keyword 'default'.
+ - description: Authentication key identifier to use with given NTP server or peer or keyword 'default'.
name: key_id
- auto: PREDEFINED
- description: Makes given NTP server or peer the preferred NTP server or peer
- for the device.
+ description: Makes given NTP server or peer the preferred NTP server or peer for the device.
name: prefer
predefined:
- enabled
- disabled
- - description: Makes the device communicate with the given NTP server or peer
- over a specific VRF or keyword 'default'.
+ - description: Makes the device communicate with the given NTP server or peer over a specific VRF or keyword 'default'.
name: vrf_name
- - description: Local source address from which NTP messages are sent or keyword
- 'default'
+ - description: Local source address from which NTP messages are sent or keyword 'default'
name: source_addr
- - description: Local source interface from which NTP messages are sent. Must be
- fully qualified interface name or keyword 'default'
+ - description: Local source interface from which NTP messages are sent. Must be fully qualified interface name or keyword 'default'
name: source_int
- auto: PREDEFINED
default: false
@@ -1896,9 +1544,7 @@ script:
description: Manages core NTP configuration.
name: nxos-ntp
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1909,8 +1555,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: text
- description: Whether the given md5string is in cleartext or has been encrypted.
- If in cleartext, the device will encrypt it before storing it.
+ description: Whether the given md5string is in cleartext or has been encrypted. If in cleartext, the device will encrypt it before storing it.
name: auth_type
predefined:
- text
@@ -1918,8 +1563,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: "false"
- description: Whether the given key is required to be supplied by a time source
- for the device to synchronize to the time source.
+ description: Whether the given key is required to be supplied by a time source for the device to synchronize to the time source.
name: trusted_key
predefined:
- "false"
@@ -1941,16 +1585,13 @@ script:
description: Manages NTP authentication.
name: nxos-ntp-auth
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- description: Sets whether the device is an authoritative NTP server.
name: master
- - description: If `master=true`, an optional stratum can be supplied (1-15). The
- device default is 8.
+ - description: If `master=true`, an optional stratum can be supplied (1-15). The device default is 8.
name: stratum
- description: Sets whether NTP logging is enabled on the device.
name: logging
@@ -1965,87 +1606,58 @@ script:
description: Manages NTP options.
name: nxos-ntp-options
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- default: false
defaultValue: "80"
- description: Configure the port with which the HTTP server will listen on for
- requests. By default, NXAPI will bind the HTTP service to the standard HTTP
- port 80. This argument accepts valid port values in the range of 1 to 65535.
+ description: Configure the port with which the HTTP server will listen on for requests. By default, NXAPI will bind the HTTP service to the standard HTTP port 80. This argument accepts valid port values in the range of 1 to 65535.
name: http_port
- default: false
defaultValue: "True"
- description: Controls the operating state of the HTTP protocol as one of the
- underlying transports for NXAPI. By default, NXAPI will enable the HTTP transport
- when the feature is first configured. To disable the use of the HTTP transport,
- set the value of this argument to False.
+ description: Controls the operating state of the HTTP protocol as one of the underlying transports for NXAPI. By default, NXAPI will enable the HTTP transport when the feature is first configured. To disable the use of the HTTP transport, set the value of this argument to False.
name: http
- default: false
defaultValue: "443"
- description: Configure the port with which the HTTPS server will listen on for
- requests. By default, NXAPI will bind the HTTPS service to the standard HTTPS
- port 443. This argument accepts valid port values in the range of 1 to 65535.
+ description: Configure the port with which the HTTPS server will listen on for requests. By default, NXAPI will bind the HTTPS service to the standard HTTPS port 443. This argument accepts valid port values in the range of 1 to 65535.
name: https_port
- default: false
defaultValue: "False"
- description: Controls the operating state of the HTTPS protocol as one of the
- underlying transports for NXAPI. By default, NXAPI will disable the HTTPS
- transport when the feature is first configured. To enable the use of the
- HTTPS transport, set the value of this argument to True.
+ description: Controls the operating state of the HTTPS protocol as one of the underlying transports for NXAPI. By default, NXAPI will disable the HTTPS transport when the feature is first configured. To enable the use of the HTTPS transport, set the value of this argument to True.
name: https
- default: false
defaultValue: "False"
- description: The NXAPI feature provides a web base UI for developers for entering
- commands. This feature is initially disabled when the NXAPI feature is configured
- for the first time. When the `sandbox` argument is set to True, the developer
- sandbox URL will accept requests and when the value is set to False, the sandbox
- URL is unavailable. This is supported on NX-OS 7K series.
+ description: The NXAPI feature provides a web base UI for developers for entering commands. This feature is initially disabled when the NXAPI feature is configured for the first time. When the `sandbox` argument is set to True, the developer sandbox URL will accept requests and when the value is set to False, the sandbox URL is unavailable. This is supported on NX-OS 7K series.
name: sandbox
- auto: PREDEFINED
default: false
defaultValue: present
- description: The `state` argument controls whether or not the NXAPI feature
- is configured on the remote device. When the value is `present` the NXAPI
- feature configuration is present in the device running-config. When the values
- is `absent` the feature configuration is removed from the running-config.
+ description: The `state` argument controls whether or not the NXAPI feature is configured on the remote device. When the value is `present` the NXAPI feature configuration is present in the device running-config. When the values is `absent` the feature configuration is removed from the running-config.
name: state
predefined:
- present
- absent
- default: false
defaultValue: "False"
- description: Controls the use of whether strong or weak ciphers are configured.
- By default, this feature is disabled and weak ciphers are configured. To
- enable the use of strong ciphers, set the value of this argument to True.
+ description: Controls the use of whether strong or weak ciphers are configured. By default, this feature is disabled and weak ciphers are configured. To enable the use of strong ciphers, set the value of this argument to True.
name: ssl_strong_ciphers
- default: false
defaultValue: "True"
- description: Controls the use of the Transport Layer Security version 1.0 is
- configured. By default, this feature is enabled. To disable the use of TLSV1.0,
- set the value of this argument to True.
+ description: Controls the use of the Transport Layer Security version 1.0 is configured. By default, this feature is enabled. To disable the use of TLSV1.0, set the value of this argument to True.
name: tlsv1_0
- default: false
defaultValue: "False"
- description: Controls the use of the Transport Layer Security version 1.1 is
- configured. By default, this feature is disabled. To enable the use of TLSV1.1,
- set the value of this argument to True.
+ description: Controls the use of the Transport Layer Security version 1.1 is configured. By default, this feature is disabled. To enable the use of TLSV1.1, set the value of this argument to True.
name: tlsv1_1
- default: false
defaultValue: "False"
- description: Controls the use of the Transport Layer Security version 1.2 is
- configured. By default, this feature is disabled. To enable the use of TLSV1.2,
- set the value of this argument to True.
+ description: Controls the use of the Transport Layer Security version 1.2 is configured. By default, this feature is disabled. To enable the use of TLSV1.2, set the value of this argument to True.
name: tlsv1_2
description: Manage NXAPI configuration on an NXOS device.
name: nxos-nxapi
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2063,57 +1675,41 @@ script:
description: Manages configuration of an ospf instance.
name: nxos-ospf
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- default: false
defaultValue: default
- description: Name of the resource instance. Valid value is a string. The name
- 'default' is a valid VRF representing the global OSPF.
+ description: Name of the resource instance. Valid value is a string. The name 'default' is a valid VRF representing the global OSPF.
name: vrf
- description: Name of the OSPF instance.
name: ospf
required: true
- description: Router Identifier (ID) of the OSPF router VRF instance.
name: router_id
- - description: Specify the default Metric value. Valid values are an integer or
- the keyword 'default'.
+ - description: Specify the default Metric value. Valid values are an integer or the keyword 'default'.
name: default_metric
- auto: PREDEFINED
- description: Controls the level of log messages generated whenever a neighbor
- changes state. Valid values are 'log', 'detail', and 'default'.
+ description: Controls the level of log messages generated whenever a neighbor changes state. Valid values are 'log', 'detail', and 'default'.
name: log_adjacency
predefined:
- log
- detail
- default
- - description: Specify the start interval for rate-limiting Link-State Advertisement
- (LSA) generation. Valid values are an integer, in milliseconds, or the keyword
- 'default'.
+ - description: Specify the start interval for rate-limiting Link-State Advertisement (LSA) generation. Valid values are an integer, in milliseconds, or the keyword 'default'.
name: timer_throttle_lsa_start
- - description: Specify the hold interval for rate-limiting Link-State Advertisement
- (LSA) generation. Valid values are an integer, in milliseconds, or the keyword
- 'default'.
+ - description: Specify the hold interval for rate-limiting Link-State Advertisement (LSA) generation. Valid values are an integer, in milliseconds, or the keyword 'default'.
name: timer_throttle_lsa_hold
- - description: Specify the max interval for rate-limiting Link-State Advertisement
- (LSA) generation. Valid values are an integer, in milliseconds, or the keyword
- 'default'.
+ - description: Specify the max interval for rate-limiting Link-State Advertisement (LSA) generation. Valid values are an integer, in milliseconds, or the keyword 'default'.
name: timer_throttle_lsa_max
- - description: Specify initial Shortest Path First (SPF) schedule delay. Valid
- values are an integer, in milliseconds, or the keyword 'default'.
+ - description: Specify initial Shortest Path First (SPF) schedule delay. Valid values are an integer, in milliseconds, or the keyword 'default'.
name: timer_throttle_spf_start
- - description: Specify minimum hold time between Shortest Path First (SPF) calculations.
- Valid values are an integer, in milliseconds, or the keyword 'default'.
+ - description: Specify minimum hold time between Shortest Path First (SPF) calculations. Valid values are an integer, in milliseconds, or the keyword 'default'.
name: timer_throttle_spf_hold
- - description: Specify the maximum wait time between Shortest Path First (SPF)
- calculations. Valid values are an integer, in milliseconds, or the keyword
- 'default'.
+ - description: Specify the maximum wait time between Shortest Path First (SPF) calculations. Valid values are an integer, in milliseconds, or the keyword 'default'.
name: timer_throttle_spf_max
- - description: Specifies the reference bandwidth used to assign OSPF cost. Valid
- values are an integer, in Mbps, or the keyword 'default'.
+ - description: Specifies the reference bandwidth used to assign OSPF cost. Valid values are an integer, in Mbps, or the keyword 'default'.
name: auto_cost
- auto: PREDEFINED
description: |-
@@ -2136,9 +1732,7 @@ script:
description: Manages a VRF for an OSPF router.
name: nxos-ospf-vrf
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2148,9 +1742,7 @@ script:
description: Configures anycast gateway MAC of the switch.
name: nxos-overlay-global
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2162,19 +1754,13 @@ script:
predefined:
- enable
- disable
- - description: Configure group ranges for Source Specific Multicast (SSM). Valid
- values are multicast addresses or the keyword `none` or keyword `default`.
- `none` removes all SSM group ranges. `default` will set ssm_range to the default
- multicast address. If you set multicast address, please ensure that it is
- not the same as the `default`, otherwise use the `default` option.
+ - description: Configure group ranges for Source Specific Multicast (SSM). Valid values are multicast addresses or the keyword `none` or keyword `default`. `none` removes all SSM group ranges. `default` will set ssm_range to the default multicast address. If you set multicast address, please ensure that it is not the same as the `default`, otherwise use the `default` option.
name: ssm_range
required: true
description: Manages configuration of a PIM instance.
name: nxos-pim
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2240,23 +1826,18 @@ script:
description: Manages PIM interface configuration.
name: nxos-pim-interface
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: Configures a Protocol Independent Multicast (PIM) static rendezvous
- point (RP) address. Valid values are unicast addresses.
+ - description: Configures a Protocol Independent Multicast (PIM) static rendezvous point (RP) address. Valid values are unicast addresses.
name: rp_address
required: true
- description: Group range for static RP. Valid values are multicast addresses.
name: group_list
- - description: Prefix list policy for static RP. Valid values are prefix-list
- policy names.
+ - description: Prefix list policy for static RP. Valid values are prefix-list policy names.
name: prefix_list
- - description: Route map policy for static RP. Valid values are route-map policy
- names.
+ - description: Route map policy for static RP. Valid values are route-map policy names.
name: route_map
- description: Group range is treated in PIM bidirectional mode.
name: bidir
@@ -2273,9 +1854,7 @@ script:
description: Manages configuration of an PIM static RP address instance.
name: nxos-pim-rp-address
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2301,9 +1880,7 @@ script:
description: Tests reachability using ping from Nexus switch.
name: nxos-ping
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2314,23 +1891,18 @@ script:
description: Reboot a network device.
name: nxos-reboot
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- description: Name of checkpoint file to create. Mutually exclusive with rollback_to.
name: checkpoint_file
- - description: Name of checkpoint file to rollback to. Mutually exclusive with
- checkpoint_file.
+ - description: Name of checkpoint file to rollback to. Mutually exclusive with checkpoint_file.
name: rollback_to
description: Set a checkpoint or rollback to a checkpoint.
name: nxos-rollback
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2339,16 +1911,14 @@ script:
required: true
- default: false
defaultValue: bootflash
- description: The remote file system of the device. If omitted, devices that
- support a file_system parameter will use their default values.
+ description: The remote file system of the device. If omitted, devices that support a file_system parameter will use their default values.
name: file_system
- description: List of RPM/patch definitions.
name: aggregate
- auto: PREDEFINED
default: false
defaultValue: present
- description: If the state is present, the rpm will be installed, If the state
- is absent, it will be removed.
+ description: If the state is present, the rpm will be installed, If the state is absent, it will be removed.
name: state
predefined:
- present
@@ -2356,24 +1926,19 @@ script:
description: Install patch or feature rpms on Cisco NX-OS devices.
name: nxos-rpm
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- description: Name of the remote package.
name: pkg
required: true
- - description: The remote file system of the device. If omitted, devices that
- support a file_system parameter will use their default values.
+ - description: The remote file system of the device. If omitted, devices that support a file_system parameter will use their default values.
name: file_system
description: Perform SMUs on Cisco NX-OS devices.
name: nxos-smu
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2395,8 +1960,7 @@ script:
name: snapshot1
- description: Second snapshot to be used when `action=compare`.
name: snapshot2
- - description: Name of the file where snapshots comparison will be stored when
- `action=compare`.
+ - description: Name of the file where snapshots comparison will be stored when `action=compare`.
name: comparison_results_file
- auto: PREDEFINED
description: Snapshot options to be used when `action=compare`.
@@ -2409,32 +1973,24 @@ script:
name: section
- description: Specify a new show command, to be used when `action=add`.
name: show_command
- - description: Specifies the tag of each row entry of the show command's XML output,
- to be used when `action=add`.
+ - description: Specifies the tag of each row entry of the show command's XML output, to be used when `action=add`.
name: row_id
- - description: Specify the tags used to distinguish among row entries, to be used
- when `action=add`.
+ - description: Specify the tags used to distinguish among row entries, to be used when `action=add`.
name: element_key1
- - description: Specify the tags used to distinguish among row entries, to be used
- when `action=add`.
+ - description: Specify the tags used to distinguish among row entries, to be used when `action=add`.
name: element_key2
- default: false
defaultValue: "no"
- description: Specify to locally store a new created snapshot, to be used when
- `action=create`.
+ description: Specify to locally store a new created snapshot, to be used when `action=create`.
name: save_snapshot_locally
- default: false
defaultValue: ./
- description: Specify the path of the file where new created snapshot or snapshots
- comparison will be stored, to be used when `action=create` and `save_snapshot_locally=true`
- or `action=compare`.
+ description: Specify the path of the file where new created snapshot or snapshots comparison will be stored, to be used when `action=create` and `save_snapshot_locally=true` or `action=compare`.
name: path
description: Manage snapshots of the running states of selected features.
name: nxos-snapshot
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2462,9 +2018,7 @@ script:
description: Manages SNMP community configs.
name: nxos-snmp-community
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2483,9 +2037,7 @@ script:
description: Manages SNMP contact info.
name: nxos-snmp-contact
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2513,30 +2065,21 @@ script:
description: UDP port number (0-65535).
name: udp
- auto: PREDEFINED
- description: type of message to send to host. If this is not specified, trap
- type is used.
+ description: type of message to send to host. If this is not specified, trap type is used.
name: snmp_type
predefined:
- trap
- inform
- - description: VRF to use to source traffic to source. If state = absent, the
- vrf is removed.
+ - description: VRF to use to source traffic to source. If state = absent, the vrf is removed.
name: vrf
- - description: Name of VRF to filter. If state = absent, the vrf is removed from
- the filter.
+ - description: Name of VRF to filter. If state = absent, the vrf is removed from the filter.
name: vrf_filter
- - description: Source interface. Must be fully qualified interface name. If state
- = absent, the interface is removed.
+ - description: Source interface. Must be fully qualified interface name. If state = absent, the interface is removed.
name: src_intf
- auto: PREDEFINED
default: false
defaultValue: present
- description: Manage the state of the resource. If state = present, the host
- is added to the configuration. If only vrf and/or vrf_filter and/or src_intf
- are given, they will be added to the existing host configuration. If state
- = absent, the host is removed if community parameter is given. It is possible
- to remove only vrf and/or src_int and/or vrf_filter by providing only those
- parameters and no community parameter.
+ description: Manage the state of the resource. If state = present, the host is added to the configuration. If only vrf and/or vrf_filter and/or src_intf are given, they will be added to the existing host configuration. If state = absent, the host is removed if community parameter is given. It is possible to remove only vrf and/or src_int and/or vrf_filter by providing only those parameters and no community parameter.
name: state
predefined:
- present
@@ -2544,9 +2087,7 @@ script:
description: Manages SNMP host configuration.
name: nxos-snmp-host
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2564,9 +2105,7 @@ script:
description: Manages SNMP location information.
name: nxos-snmp-location
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2616,21 +2155,14 @@ script:
description: Manages SNMP traps.
name: nxos-snmp-traps
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- description: Name of the user.
name: user
required: true
- - description: Group to which the user will belong to. If state = present, and
- the user is existing, the group is added to the user. If the user is not existing,
- user entry is created with this group argument. If state = absent, only the
- group is removed from the user entry. However, to maintain backward compatibility,
- if the existing user belongs to only one group, and if group argument is same
- as the existing user's group, then the user entry also is deleted.
+ - description: Group to which the user will belong to. If state = present, and the user is existing, the group is added to the user. If the user is not existing, user entry is created with this group argument. If state = absent, only the group is removed from the user entry. However, to maintain backward compatibility, if the existing user belongs to only one group, and if group argument is same as the existing user's group, then the user entry also is deleted.
name: group
- auto: PREDEFINED
description: Authentication parameters for the user.
@@ -2655,17 +2187,14 @@ script:
description: Manages SNMP users for monitoring.
name: nxos-snmp-user
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- description: Destination prefix of static route.
name: prefix
required: true
- - description: Next hop address or interface of static route. If interface, it
- must be the fully-qualified interface name.
+ - description: Next hop address or interface of static route. If interface, it must be the fully-qualified interface name.
name: next_hop
required: true
- default: false
@@ -2674,16 +2203,13 @@ script:
name: vrf
- description: Route tag value (numeric) or keyword 'default'.
name: tag
- - description: Name of the route or keyword 'default'. Used with the name parameter
- on the CLI.
+ - description: Name of the route or keyword 'default'. Used with the name parameter on the CLI.
name: route_name
- - description: Preference or administrative difference of route (range 1-255)
- or keyword 'default'.
+ - description: Preference or administrative difference of route (range 1-255) or keyword 'default'.
name: pref
- description: List of static route definitions
name: aggregate
- - description: Track value (range 1 - 512). Track must already be configured on
- the device before adding the route.
+ - description: Track value (range 1 - 512). Track must already be configured on the device before adding the route.
name: track
- auto: PREDEFINED
default: false
@@ -2696,43 +2222,26 @@ script:
description: Manages static route configuration
name: nxos-static-route
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: Configure the device hostname parameter. This option takes an ASCII
- string value or keyword 'default'
+ - description: Configure the device hostname parameter. This option takes an ASCII string value or keyword 'default'
name: hostname
- - description: Configures the default domain name suffix to be used when referencing
- this node by its FQDN. This argument accepts either a list of domain names
- or a list of dicts that configure the domain name and VRF name or keyword
- 'default'. See examples.
+ - description: Configures the default domain name suffix to be used when referencing this node by its FQDN. This argument accepts either a list of domain names or a list of dicts that configure the domain name and VRF name or keyword 'default'. See examples.
name: domain_name
- - description: Enables or disables the DNS lookup feature in Cisco NXOS. This
- argument accepts boolean values. When enabled, the system will try to resolve
- hostnames using DNS and when disabled, hostnames will not be resolved.
+ - description: Enables or disables the DNS lookup feature in Cisco NXOS. This argument accepts boolean values. When enabled, the system will try to resolve hostnames using DNS and when disabled, hostnames will not be resolved.
name: domain_lookup
- - description: Configures a list of domain name suffixes to search when performing
- DNS name resolution. This argument accepts either a list of domain names or
- a list of dicts that configure the domain name and VRF name or keyword 'default'.
- See examples.
+ - description: Configures a list of domain name suffixes to search when performing DNS name resolution. This argument accepts either a list of domain names or a list of dicts that configure the domain name and VRF name or keyword 'default'. See examples.
name: domain_search
- - description: List of DNS name servers by IP address to use to perform name resolution
- lookups. This argument accepts either a list of DNS servers or a list of
- hashes that configure the name server and VRF name or keyword 'default'. See
- examples.
+ - description: List of DNS name servers by IP address to use to perform name resolution lookups. This argument accepts either a list of DNS servers or a list of hashes that configure the name server and VRF name or keyword 'default'. See examples.
name: name_servers
- description: Specifies the mtu, must be an integer or keyword 'default'.
name: system_mtu
- auto: PREDEFINED
default: false
defaultValue: present
- description: State of the configuration values in the device's current active
- configuration. When set to `present`, the values should be configured in
- the device active configuration and when set to `absent` the values should
- not be in the device active configuration
+ description: State of the configuration values in the device's current active configuration. When set to `present`, the values should be configured in the device active configuration and when set to `absent` the values should not be in the device active configuration
name: state
predefined:
- present
@@ -2740,9 +2249,7 @@ script:
description: Manage the system attributes on Cisco NXOS devices
name: nxos-system
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2761,9 +2268,7 @@ script:
description: Telemetry Monitoring Service (TMS) configuration
name: nxos-telemetry
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2777,14 +2282,12 @@ script:
name: msg_time
- default: false
defaultValue: "no"
- description: Ability to reset all ports shut down by UDLD. 'state' parameter
- cannot be 'absent' when this is present.
+ description: Ability to reset all ports shut down by UDLD. 'state' parameter cannot be 'absent' when this is present.
name: reset
- auto: PREDEFINED
default: false
defaultValue: present
- description: Manage the state of the resource. When set to 'absent', aggressive
- and msg_time are set to their default values.
+ description: Manage the state of the resource. When set to 'absent', aggressive and msg_time are set to their default values.
name: state
predefined:
- present
@@ -2792,9 +2295,7 @@ script:
description: Manages UDLD global configuration params.
name: nxos-udld
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2820,57 +2321,36 @@ script:
description: Manages UDLD interface configuration params.
name: nxos-udld-interface
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: The set of username objects to be configured on the remote Cisco
- Nexus device. The list entries can either be the username or a hash of username
- and properties. This argument is mutually exclusive with the `name` argument.
+ - description: The set of username objects to be configured on the remote Cisco Nexus device. The list entries can either be the username or a hash of username and properties. This argument is mutually exclusive with the `name` argument.
name: aggregate
- - description: The username to be configured on the remote Cisco Nexus device. This
- argument accepts a string value and is mutually exclusive with the `aggregate`
- argument.
+ - description: The username to be configured on the remote Cisco Nexus device. This argument accepts a string value and is mutually exclusive with the `aggregate` argument.
name: name
- - description: The password to be configured on the network device. The password
- needs to be provided in cleartext and it will be encrypted on the device.
- Please note that this option is not same as `provider password`.
+ - description: The password to be configured on the network device. The password needs to be provided in cleartext and it will be encrypted on the device. Please note that this option is not same as `provider password`.
name: configured_password
- auto: PREDEFINED
default: false
defaultValue: always
- description: Since passwords are encrypted in the device running config, this
- argument will instruct the module when to change the password. When set to
- `always`, the password will always be updated in the device and when set to
- `on_create` the password will be updated only if the username is created.
+ description: Since passwords are encrypted in the device running config, this argument will instruct the module when to change the password. When set to `always`, the password will always be updated in the device and when set to `on_create` the password will be updated only if the username is created.
name: update_password
predefined:
- on_create
- always
- - description: The `role` argument configures the role for the username in the
- device running configuration. The argument accepts a string value defining
- the role name. This argument does not check if the role has been configured
- on the device.
+ - description: The `role` argument configures the role for the username in the device running configuration. The argument accepts a string value defining the role name. This argument does not check if the role has been configured on the device.
name: role
- - description: The `sshkey` argument defines the SSH public key to configure for
- the username. This argument accepts a valid SSH key value.
+ - description: The `sshkey` argument defines the SSH public key to configure for the username. This argument accepts a valid SSH key value.
name: sshkey
- default: false
defaultValue: "no"
- description: The `purge` argument instructs the module to consider the resource
- definition absolute. It will remove any previously configured usernames on
- the device with the exception of the `admin` user which cannot be deleted
- per nxos constraints.
+ description: The `purge` argument instructs the module to consider the resource definition absolute. It will remove any previously configured usernames on the device with the exception of the `admin` user which cannot be deleted per nxos constraints.
name: purge
- auto: PREDEFINED
default: false
defaultValue: present
- description: The `state` argument configures the state of the username definition
- as it relates to the device operational configuration. When set to `present`,
- the username(s) should be configured in the device active configuration and
- when set to `absent` the username(s) should not be in the device active configuration
+ description: The `state` argument configures the state of the username definition as it relates to the device operational configuration. When set to `present`, the username(s) should be configured in the device active configuration and when set to `absent` the username(s) should not be in the device active configuration
name: state
predefined:
- present
@@ -2878,9 +2358,7 @@ script:
description: Manage the collection of local users on Nexus devices
name: nxos-user
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2900,9 +2378,7 @@ script:
description: Create VLAN and manage VLAN configurations on NX-OS Interfaces
name: nxos-vlans
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2957,9 +2433,7 @@ script:
description: Manages global VPC configuration
name: nxos-vpc
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2982,9 +2456,7 @@ script:
description: Manages interface VPC configuration
name: nxos-vpc-interface
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2999,20 +2471,13 @@ script:
predefined:
- up
- down
- - description: Specify virtual network identifier. Valid values are Integer or
- keyword 'default'.
+ - description: Specify virtual network identifier. Valid values are Integer or keyword 'default'.
name: vni
- - description: VPN Route Distinguisher (RD). Valid values are a string in one
- of the route-distinguisher formats (ASN2:NN, ASN4:NN, or IPV4:NN); the keyword
- 'auto', or the keyword 'default'.
+ - description: VPN Route Distinguisher (RD). Valid values are a string in one of the route-distinguisher formats (ASN2:NN, ASN4:NN, or IPV4:NN); the keyword 'auto', or the keyword 'default'.
name: rd
- - description: List of interfaces to check the VRF has been configured correctly
- or keyword 'default'.
+ - description: List of interfaces to check the VRF has been configured correctly or keyword 'default'.
name: interfaces
- - description: This is a intent option and checks the operational state of the
- for given vrf `name` for associated interfaces. If the value in the `associated_interfaces`
- does not match with the operational state of vrf interfaces on device it will
- result in failure.
+ - description: This is a intent option and checks the operational state of the for given vrf `name` for associated interfaces. If the value in the `associated_interfaces` does not match with the operational state of vrf interfaces on device it will result in failure.
name: associated_interfaces
- description: List of VRFs definitions.
name: aggregate
@@ -3032,15 +2497,12 @@ script:
name: description
- default: false
defaultValue: "10"
- description: Time in seconds to wait before checking for the operational state
- on remote device. This wait is applicable for operational state arguments.
+ description: Time in seconds to wait before checking for the operational state on remote device. This wait is applicable for operational state arguments.
name: delay
description: Manages global VRF configuration.
name: nxos-vrf
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3054,8 +2516,7 @@ script:
- ipv4
- ipv6
required: true
- - description: Enable/Disable the EVPN route-target 'auto' setting for both import
- and export target communities.
+ - description: Enable/Disable the EVPN route-target 'auto' setting for both import and export target communities.
name: route_target_both_auto_evpn
- auto: PREDEFINED
default: false
@@ -3068,9 +2529,7 @@ script:
description: Manages VRF AF.
name: nxos-vrf-af
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3091,9 +2550,7 @@ script:
description: Manages interface specific VRF configuration.
name: nxos-vrf-interface
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3139,9 +2596,7 @@ script:
description: Manages VRRP configuration on NX-OS switches.
name: nxos-vrrp
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3151,9 +2606,7 @@ script:
description: Manages VTP domain configuration.
name: nxos-vtp-domain
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3170,9 +2623,7 @@ script:
description: Manages VTP password configuration.
name: nxos-vtp-password
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3186,9 +2637,7 @@ script:
description: Manages VTP version configuration.
name: nxos-vtp-version
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3201,23 +2650,17 @@ script:
name: host_reachability
- description: Administratively shutdown the NVE interface.
name: shutdown
- - description: Specify the loopback interface whose IP address should be used
- for the NVE interface.
+ - description: Specify the loopback interface whose IP address should be used for the NVE interface.
name: source_interface
- - description: Suppresses advertisement of the NVE loopback address until the
- overlay has converged.
+ - description: Suppresses advertisement of the NVE loopback address until the overlay has converged.
name: source_interface_hold_down_time
- - description: Global multicast ip prefix for L3 VNIs or the keyword 'default'
- This is available on NX-OS 9K series running 9.2.x or higher.
+ - description: Global multicast ip prefix for L3 VNIs or the keyword 'default' This is available on NX-OS 9K series running 9.2.x or higher.
name: global_mcast_group_L3
- - description: Global multicast ip prefix for L2 VNIs or the keyword 'default'
- This is available on NX-OS 9K series running 9.2.x or higher.
+ - description: Global multicast ip prefix for L2 VNIs or the keyword 'default' This is available on NX-OS 9K series running 9.2.x or higher.
name: global_mcast_group_L2
- - description: Enables ARP suppression for all VNIs This is available on NX-OS
- 9K series running 9.2.x or higher.
+ - description: Enables ARP suppression for all VNIs This is available on NX-OS 9K series running 9.2.x or higher.
name: global_suppress_arp
- - description: Configures ingress replication protocol as bgp for all VNIs This
- is available on NX-OS 9K series running 9.2.x or higher.
+ - description: Configures ingress replication protocol as bgp for all VNIs This is available on NX-OS 9K series running 9.2.x or higher.
name: global_ingress_replication_bgp
- auto: PREDEFINED
default: false
@@ -3230,9 +2673,7 @@ script:
description: Manages VXLAN Network Virtualization Endpoint (NVE).
name: nxos-vxlan-vtep
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3242,9 +2683,7 @@ script:
- description: ID of the Virtual Network Identifier.
name: vni
required: true
- - description: This attribute is used to identify and separate processing VNIs
- that are associated with a VRF and used for routing. The VRF and VNI specified
- with this command must match the configuration of the VNI under the VRF.
+ - description: This attribute is used to identify and separate processing VNIs that are associated with a VRF and used for routing. The VRF and VNI specified with this command must match the configuration of the VNI under the VRF.
name: assoc_vrf
- auto: PREDEFINED
description: Specifies mechanism for host reachability advertisement.
@@ -3253,16 +2692,13 @@ script:
- bgp
- static
- default
- - description: The multicast group (range) of the VNI. Valid values are string
- and keyword 'default'.
+ - description: The multicast group (range) of the VNI. Valid values are string and keyword 'default'.
name: multicast_group
- - description: Set the ingress-replication static peer list. Valid values are
- an array, a space-separated string of ip addresses, or the keyword 'default'.
+ - description: Set the ingress-replication static peer list. Valid values are an array, a space-separated string of ip addresses, or the keyword 'default'.
name: peer_list
- description: Suppress arp under layer 2 VNI.
name: suppress_arp
- - description: Overrides the global ARP suppression config. This is available
- on NX-OS 9K series running 9.2.x or higher.
+ - description: Overrides the global ARP suppression config. This is available on NX-OS 9K series running 9.2.x or higher.
name: suppress_arp_disable
- auto: PREDEFINED
default: false
diff --git a/Packs/Ansible_Powered_Integrations/Integrations/Linux/Linux.yml b/Packs/Ansible_Powered_Integrations/Integrations/Linux/Linux.yml
index b2082205dd9c..eebc3609a032 100644
--- a/Packs/Ansible_Powered_Integrations/Integrations/Linux/Linux.yml
+++ b/Packs/Ansible_Powered_Integrations/Integrations/Linux/Linux.yml
@@ -3,21 +3,18 @@ commonfields:
id: Linux
version: -1
configuration:
-- additionalinfo: The credentials to associate with the instance. SSH keys can be
- configured using the credential manager.
+- additionalinfo: The credentials to associate with the instance. SSH keys can be configured using the credential manager, under the Certificate field.
display: Username
name: creds
required: true
type: 9
-- additionalinfo: The default port to use if one is not specified in the commands
- `host` argument.
+- additionalinfo: The default port to use if one is not specified in the commands `host` argument.
defaultvalue: "22"
display: Default SSH Port
name: port
required: true
type: 0
-- additionalinfo: If multiple hosts are specified in a command, how many hosts should
- be interacted with concurrently.
+- additionalinfo: If multiple hosts are specified in a command, how many hosts should be interacted with concurrently.
defaultvalue: "4"
display: Concurrecy Factor
name: concurrency
@@ -29,9 +26,7 @@ name: Linux
script:
commands:
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -52,9 +47,7 @@ script:
description: Manages alternative programs for common commands
name: linux-alternatives
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -62,13 +55,11 @@ script:
name: command
- description: An existing script file to be executed in the future.
name: script_file
- - description: The count of units in the future to execute the command or script
- file.
+ - description: The count of units in the future to execute the command or script file.
name: count
required: true
- auto: PREDEFINED
- description: The type of units in the future to execute the command or script
- file.
+ description: The type of units in the future to execute the command or script file.
name: units
predefined:
- minutes
@@ -79,8 +70,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: present
- description: The state dictates if the command or script file should be evaluated
- as present(added) or absent(deleted).
+ description: The state dictates if the command or script file should be evaluated as present(added) or absent(deleted).
name: state
predefined:
- absent
@@ -92,14 +82,11 @@ script:
description: Schedule the execution of a command or script file via the at command
name: linux-at
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: The username on the remote host whose authorized_keys file will
- be modified.
+ - description: The username on the remote host whose authorized_keys file will be modified.
name: user
required: true
- description: The SSH public key(s), as a string or (since Ansible 1.9) url (https://github.com/username.keys).
@@ -120,14 +107,12 @@ script:
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether the given key (with the given key_options) should or should
- not be in the file.
+ description: Whether the given key (with the given key_options) should or should not be in the file.
name: state
predefined:
- absent
- present
- - description: A string of ssh key options to be prepended to the key in the authorized_keys
- file.
+ - description: A string of ssh key options to be prepended to the key in the authorized_keys file.
name: key_options
- default: false
defaultValue: "False"
@@ -157,17 +142,14 @@ script:
description: Adds or removes an SSH authorized key
name: linux-authorized-key
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- description: Specifies the path to the file to be managed.
name: path
required: true
- - description: Desired capability to set (with operator and flags, if state is
- `present`) or remove (if state is `absent`)
+ - description: Desired capability to set (with operator and flags, if state is `present`) or remove (if state is `absent`)
name: capability
required: true
- auto: PREDEFINED
@@ -181,9 +163,7 @@ script:
description: Manage Linux capabilities
name: linux-capabilities
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -205,8 +185,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether to ensure the job or environment variable is present or
- absent.
+ description: Whether to ensure the job or environment variable is present or absent.
name: state
predefined:
- absent
@@ -220,8 +199,7 @@ script:
name: cron_file
- default: false
defaultValue: "False"
- description: If set, create a backup of the crontab before it is modified. The
- location of the backup is returned in the `backup_file` variable by this module.
+ description: If set, create a backup of the crontab before it is modified. The location of the backup is returned in the `backup_file` variable by this module.
name: backup
- default: false
defaultValue: '*'
@@ -241,13 +219,11 @@ script:
name: month
- default: false
defaultValue: '*'
- description: Day of the week that the job should run ( 0-6 for Sunday-Saturday,
- *, etc )
+ description: Day of the week that the job should run ( 0-6 for Sunday-Saturday, *, etc )
name: weekday
- default: false
defaultValue: "False"
- description: If the job should be run at reboot. This option is deprecated.
- Users should use special_time.
+ description: If the job should be run at reboot. This option is deprecated. Users should use special_time.
name: reboot
- auto: PREDEFINED
description: Special time specification nickname.
@@ -284,9 +260,7 @@ script:
description: Manage cron.d and crontab entries
name: linux-cron
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -301,8 +275,7 @@ script:
If specified, the variable will be inserted after the variable specified.
Used with `state=present`.
name: insertafter
- - description: Used with `state=present`. If specified, the variable will be inserted
- just before the variable specified.
+ - description: Used with `state=present`. If specified, the variable will be inserted just before the variable specified.
name: insertbefore
- auto: PREDEFINED
default: false
@@ -323,24 +296,19 @@ script:
name: cron_file
- default: false
defaultValue: "False"
- description: If set, create a backup of the crontab before it is modified. The
- location of the backup is returned in the `backup` variable by this module.
+ description: If set, create a backup of the crontab before it is modified. The location of the backup is returned in the `backup` variable by this module.
name: backup
description: Manage variables in crontabs
name: linux-cronvar
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- description: A dconf key to modify or read from the dconf database.
name: key
required: true
- - description: Value to set for the specified dconf key. Value should be specified
- in GVariant format. Due to complexity of this format, it is best to have a
- look at existing values in the dconf database. Required for `state=present`.
+ - description: Value to set for the specified dconf key. Value should be specified in GVariant format. Due to complexity of this format, it is best to have a look at existing values in the dconf database. Required for `state=present`.
name: value
- auto: PREDEFINED
default: false
@@ -354,9 +322,7 @@ script:
description: Modify and read dconf database
name: linux-dconf
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -391,9 +357,7 @@ script:
description: Configure a .deb package
name: linux-debconf
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -427,8 +391,7 @@ script:
required: true
- default: false
defaultValue: "no"
- description: If `yes`, allows to create new filesystem on devices that already
- has filesystem.
+ description: If `yes`, allows to create new filesystem on devices that already has filesystem.
name: force
- default: false
defaultValue: "no"
@@ -443,9 +406,7 @@ script:
description: Makes a filesystem
name: linux-filesystem
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -498,17 +459,14 @@ script:
defaultValue: "0"
description: The amount of time the rule should be in effect for when non-permanent.
name: timeout
- - description: The masquerade setting you would like to enable/disable to/from
- zones within firewalld.
+ - description: The masquerade setting you would like to enable/disable to/from zones within firewalld.
name: masquerade
- description: Whether to run this module even when firewalld is offline.
name: offline
description: Manage arbitrary ports/services with firewalld
name: linux-firewalld
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -519,19 +477,14 @@ script:
description: Gathers facts about remote hosts
name: linux-gather-facts
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: A GConf preference key is an element in the GConf repository that
- corresponds to an application preference. See man gconftool-2(1)
+ - description: A GConf preference key is an element in the GConf repository that corresponds to an application preference. See man gconftool-2(1)
name: key
required: true
- - description: Preference keys typically have simple values such as strings, integers,
- or lists of strings and integers. This is ignored if the state is "get". See
- man gconftool-2(1)
+ - description: Preference keys typically have simple values such as strings, integers, or lists of strings and integers. This is ignored if the state is "get". See man gconftool-2(1)
name: value
- auto: PREDEFINED
description: The type of value being set. This is ignored if the state is "get".
@@ -549,38 +502,30 @@ script:
- get
- present
required: true
- - description: Specify a configuration source to use rather than the default path.
- See man gconftool-2(1)
+ - description: Specify a configuration source to use rather than the default path. See man gconftool-2(1)
name: config_source
- default: false
defaultValue: "no"
- description: Access the config database directly, bypassing server. If direct
- is specified then the config_source must be specified as well. See man gconftool-2(1)
+ description: Access the config database directly, bypassing server. If direct is specified then the config_source must be specified as well. See man gconftool-2(1)
name: direct
description: Edit GNOME Configurations
name: linux-gconftool2
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: The name of a getent database supported by the target system (passwd,
- group, hosts, etc).
+ - description: The name of a getent database supported by the target system (passwd, group, hosts, etc).
name: database
required: true
- default: false
- description: Key from which to return values from the specified database, otherwise
- the full contents are returned.
+ description: Key from which to return values from the specified database, otherwise the full contents are returned.
name: key
- description: |-
Override all databases with the specified service
The underlying system must support the service flag which is not always available.
name: service
- - description: "Character used to split the database values into lists/arrays\
- \ such as ':' or '\t', otherwise it will try to pick one depending on the\
- \ database."
+ - description: "Character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database."
name: split
- default: false
defaultValue: "yes"
@@ -589,9 +534,7 @@ script:
description: A wrapper to the unix getent utility
name: linux-getent
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -628,9 +571,7 @@ script:
description: Add or remove groups
name: linux-group
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -656,9 +597,7 @@ script:
description: Manage hostname
name: linux-hostname
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -668,27 +607,20 @@ script:
name: dest
- description: Name of the interface, required for value changes or option remove
name: iface
- - description: Address family of the interface, useful if same interface name
- is used for both inet and inet6
+ - description: Address family of the interface, useful if same interface name is used for both inet and inet6
name: address_family
- description: Name of the option, required for value changes or option remove
name: option
- - description: If `option` is not presented for the `interface` and `state` is
- `present` option will be added. If `option` already exists and is not `pre-up`,
- `up`, `post-up` or `down`, it's value will be updated. `pre-up`, `up`, `post-up`
- and `down` options can't be updated, only adding new options, removing existing
- ones or cleaning the whole option set are supported
+ - description: If `option` is not presented for the `interface` and `state` is `present` option will be added. If `option` already exists and is not `pre-up`, `up`, `post-up` or `down`, it's value will be updated. `pre-up`, `up`, `post-up` and `down` options can't be updated, only adding new options, removing existing ones or cleaning the whole option set are supported
name: value
- default: false
defaultValue: "no"
- description: Create a backup file including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
+ description: Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
name: backup
- auto: PREDEFINED
default: false
defaultValue: present
- description: If set to `absent` the option or section will be removed if present
- instead of created.
+ description: If set to `absent` the option or section will be removed if present instead of created.
name: state
predefined:
- present
@@ -701,11 +633,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -747,9 +677,7 @@ script:
- contextPath: Linux.ifaces.ifaces
description: interface dictionary
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -890,8 +818,7 @@ script:
Since there is no way to tell the source or destination ports of such a packet (or ICMP type), such a packet will not match any rules which specify them.
When the "!" argument precedes fragment argument, the rule will only match head fragments, or unfragmented packets.
name: fragment
- - description: This enables the administrator to initialize the packet and byte
- counters of a rule (during `INSERT`, `APPEND`, `REPLACE` operations).
+ - description: This enables the administrator to initialize the packet and byte counters of a rule (during `INSERT`, `APPEND`, `REPLACE` operations).
name: set_counters
- description: |-
Source port or port range specification.
@@ -900,12 +827,7 @@ script:
If the first port is omitted, `0` is assumed; if the last is omitted, `65535` is assumed.
If the first port is greater than the second one they will be swapped.
name: source_port
- - description: 'Destination port or port range specification. This can either
- be a service name or a port number. An inclusive range can also be specified,
- using the format first:last. If the first port is omitted, ''0'' is assumed;
- if the last is omitted, ''65535'' is assumed. If the first port is greater
- than the second one they will be swapped. This is only valid if the rule also
- specifies one of the following protocols: tcp, udp, dccp or sctp.'
+ - description: 'Destination port or port range specification. This can either be a service name or a port number. An inclusive range can also be specified, using the format first:last. If the first port is omitted, ''0'' is assumed; if the last is omitted, ''65535'' is assumed. If the first port is greater than the second one they will be swapped. This is only valid if the rule also specifies one of the following protocols: tcp, udp, dccp or sctp.'
name: destination_port
- description: |-
This specifies a destination port or range of ports to use, without this, the destination port is never altered.
@@ -963,12 +885,9 @@ script:
name: uid_owner
- description: Specifies the GID or group to use in match by owner rule.
name: gid_owner
- - description: 'Specifies the error packet type to return while rejecting. It
- implies "jump: REJECT"'
+ - description: 'Specifies the error packet type to return while rejecting. It implies "jump: REJECT"'
name: reject_with
- - description: This allows specification of the ICMP type, which can be a numeric
- ICMP type, type/code pair, or one of the ICMP type names shown by the command
- 'iptables -p icmp -h'
+ - description: This allows specification of the ICMP type, which can be a numeric ICMP type, type/code pair, or one of the ICMP type names shown by the command 'iptables -p icmp -h'
name: icmp_type
- description: |-
Flushes the specified table and chain of all rules.
@@ -990,9 +909,7 @@ script:
description: Modify iptables rules
name: linux-iptables
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1045,9 +962,7 @@ script:
description: Uses keytool to import/remove key from java keystore (cacerts)
name: linux-java-cert
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1079,9 +994,7 @@ script:
description: Create or delete a Java keystore in JKS format.
name: linux-java-keystore
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1101,9 +1014,7 @@ script:
description: Block list kernel modules
name: linux-kernel-blacklist
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1136,9 +1047,7 @@ script:
description: Add or remove a host from the C(known_hosts) file
name: linux-known-hosts
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1150,9 +1059,7 @@ script:
- contextPath: Linux.ansible_facts.udp_listen
description: A list of processes that are listening on a UDP port.
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1170,9 +1077,7 @@ script:
description: Creates or removes locales
name: linux-locale-gen
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1193,9 +1098,7 @@ script:
description: Load or unload kernel modules
name: linux-modprobe
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1258,15 +1161,12 @@ script:
name: boot
- default: false
defaultValue: "False"
- description: Create a backup file including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
+ description: Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
name: backup
description: Control active and configured mount points
name: linux-mount
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1294,15 +1194,12 @@ script:
Whether the list of target nodes on the portal should be (re)discovered and added to the persistent iSCSI database.
Keep in mind that `iscsiadm` discovery resets configuration, like `node.startup` to manual, hence combined with `auto_node_startup=yes` will always return a changed state.
name: discover
- - description: Whether the list of nodes in the persistent iSCSI database should
- be returned by the module.
+ - description: Whether the list of nodes in the persistent iSCSI database should be returned by the module.
name: show_nodes
description: Manage iSCSI targets with Open-iSCSI
name: linux-open-iscsi
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1346,20 +1243,15 @@ script:
required: true
- default: false
defaultValue: "no"
- description: Create a backup file including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
+ description: Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
name: backup
- default: false
defaultValue: "no"
- description: If set to `yes`, the minimal value will be used or conserved. If
- the specified value is inferior to the value in the file, file content is
- replaced with the new value, else content is not modified.
+ description: If set to `yes`, the minimal value will be used or conserved. If the specified value is inferior to the value in the file, file content is replaced with the new value, else content is not modified.
name: use_min
- default: false
defaultValue: "no"
- description: If set to `yes`, the maximal value will be used or conserved. If
- the specified value is superior to the value in the file, file content is
- replaced with the new value, else content is not modified.
+ description: If set to `yes`, the maximal value will be used or conserved. If the specified value is superior to the value in the file, file content is replaced with the new value, else content is not modified.
name: use_max
- default: false
defaultValue: /etc/security/limits.conf
@@ -1371,14 +1263,11 @@ script:
description: Modify Linux PAM limits
name: linux-pam-limits
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: The name generally refers to the PAM service file to change, for
- example system-auth.
+ - description: The name generally refers to the PAM service file to change, for example system-auth.
name: name
required: true
- auto: PREDEFINED
@@ -1454,15 +1343,12 @@ script:
name: path
- default: false
defaultValue: "False"
- description: Create a backup file including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
+ description: Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
name: backup
description: Manage PAM Modules
name: linux-pamd
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1545,8 +1431,7 @@ script:
Where the partition will end as offset from the beginning of the disk, that is, the "distance" from the start of the disk.
The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, e.g. `10GiB`, `15%`.
name: part_end
- - description: Sets the name for the partition number (GPT, Mac, MIPS and PC98
- only).
+ - description: Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
name: name
- description: A list of the flags that has to be set on the partition.
isArray: true
@@ -1570,22 +1455,17 @@ script:
- contextPath: Linux.partition_info.partitions
description: List of device partitions.
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- description: the name of the process you want to get PID for.
name: name
required: true
- description: Retrieves process IDs list if the process is running otherwise return
- empty list
+ description: Retrieves process IDs list if the process is running otherwise return empty list
name: linux-pids
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1595,13 +1475,10 @@ script:
Data to return for the `ping` return value.
If this parameter is set to `crash`, the module will cause an exception.
name: data
- description: Try to connect to host, verify a usable python and return C(pong)
- on success
+ description: Try to connect to host, verify a usable python and return C(pong) on success
name: linux-ping
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1611,9 +1488,7 @@ script:
description: Show python path and assert dependency versions
name: linux-python-requirements-info
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1642,8 +1517,7 @@ script:
name: connect_timeout
- default: false
defaultValue: whoami
- description: Command to run on the rebooted host and expect success from to
- determine the machine is ready for further tasks.
+ description: Command to run on the rebooted host and expect success from to determine the machine is ready for further tasks.
name: test_command
- default: false
defaultValue: Reboot initiated by Ansible
@@ -1659,9 +1533,7 @@ script:
description: Reboot a machine
name: linux-reboot
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1677,15 +1549,12 @@ script:
required: true
- default: false
defaultValue: "False"
- description: Useful for scenarios (chrooted environment) that you can't get
- the real SELinux state.
+ description: Useful for scenarios (chrooted environment) that you can't get the real SELinux state.
name: ignore_selinux_state
description: Toggles SELinux booleans
name: linux-seboolean
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1739,20 +1608,16 @@ script:
name: reload
- default: false
defaultValue: "False"
- description: Useful for scenarios (chrooted environment) that you can't get
- the real SELinux state.
+ description: Useful for scenarios (chrooted environment) that you can't get the real SELinux state.
name: ignore_selinux_state
description: Manages SELinux file context mapping definitions
name: linux-sefcontext
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: The name of the SELinux policy to use (e.g. `targeted`) will be
- required if state is not `disabled`.
+ - description: The name of the SELinux policy to use (e.g. `targeted`) will be required if state is not `disabled`.
name: policy
- auto: PREDEFINED
description: The SELinux mode.
@@ -1769,15 +1634,12 @@ script:
description: Change policy and state of SELinux
name: linux-selinux
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- default: false
- description: The domain that will be added or removed from the list of permissive
- domains.
+ description: The domain that will be added or removed from the list of permissive domains.
name: domain
required: true
- description: Indicate if the domain should or should not be set as permissive.
@@ -1795,9 +1657,7 @@ script:
description: Change permissive domain in SELinux policy
name: linux-selinux-permissive
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1809,8 +1669,7 @@ script:
required: true
- default: false
defaultValue: s0
- description: MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for
- SELinux login mapping defaults to the SELinux user record range.
+ description: MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
name: selevel
- auto: PREDEFINED
default: false
@@ -1832,9 +1691,7 @@ script:
description: Manages linux user to SELinux user mapping
name: linux-selogin
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1873,9 +1730,7 @@ script:
description: Manages SELinux network port type definitions
name: linux-seport
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1925,9 +1780,7 @@ script:
description: Manage services
name: linux-service
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1937,23 +1790,13 @@ script:
- contextPath: Linux.ansible_facts.services
description: States of the services with service name as key.
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- default: false
defaultValue: all
- description: 'If supplied, restrict the additional facts collected to the given
- subset. Possible values: `all`, `min`, `hardware`, `network`, `virtual`, `ohai`,
- and `facter`. Can specify a list of values to specify a larger subset. Values
- can also be used with an initial `!` to specify that that specific subset
- should not be collected. For instance: `!hardware,!network,!virtual,!ohai,!facter`.
- If `!all` is specified then only the min subset is collected. To avoid collecting
- even the min subset, specify `!all,!min`. To collect only specific facts,
- use `!all,!min`, and specify the particular fact subsets. Use the filter parameter
- if you do not want to display some collected facts.'
+ description: 'If supplied, restrict the additional facts collected to the given subset. Possible values: `all`, `min`, `hardware`, `network`, `virtual`, `ohai`, and `facter`. Can specify a list of values to specify a larger subset. Values can also be used with an initial `!` to specify that that specific subset should not be collected. For instance: `!hardware,!network,!virtual,!ohai,!facter`. If `!all` is specified then only the min subset is collected. To avoid collecting even the min subset, specify `!all,!min`. To collect only specific facts, use `!all,!min`, and specify the particular fact subsets. Use the filter parameter if you do not want to display some collected facts.'
name: gather_subset
- default: false
defaultValue: "10"
@@ -1961,24 +1804,16 @@ script:
name: gather_timeout
- default: false
defaultValue: '*'
- description: If supplied, only return facts that match this shell-style (fnmatch)
- wildcard.
+ description: If supplied, only return facts that match this shell-style (fnmatch) wildcard.
name: filter
- default: false
defaultValue: /etc/ansible/facts.d
- description: Path used for local ansible facts (`*.fact`) - files in this dir
- will be run (if executable) and their results be added to `ansible_local`
- facts if a file is not executable it is read. Check notes for Windows options.
- (from 2.1 on) File/results format can be JSON or INI-format. The default `fact_path`
- can be specified in `ansible.cfg` for when setup is automatically called as
- part of `gather_facts`.
+ description: Path used for local ansible facts (`*.fact`) - files in this dir will be run (if executable) and their results be added to `ansible_local` facts if a file is not executable it is read. Check notes for Windows options. (from 2.1 on) File/results format can be JSON or INI-format. The default `fact_path` can be specified in `ansible.cfg` for when setup is automatically called as part of `gather_facts`.
name: fact_path
description: Gathers facts about remote hosts
name: linux-setup
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2001,8 +1836,7 @@ script:
name: ignoreerrors
- default: false
defaultValue: "yes"
- description: If `yes`, performs a `/sbin/sysctl -p` if the `sysctl_file` is
- updated. If `no`, does not reload `sysctl` even if the `sysctl_file` is updated.
+ description: If `yes`, performs a `/sbin/sysctl -p` if the `sysctl_file` is updated. If `no`, does not reload `sysctl` even if the `sysctl_file` is updated.
name: reload
- default: false
defaultValue: /etc/sysctl.conf
@@ -2015,9 +1849,7 @@ script:
description: Manage entries in sysctl.conf.
name: linux-sysctl
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2026,22 +1858,18 @@ script:
When using in a chroot environment you always need to specify the full name i.e. (crond.service).
name: name
- auto: PREDEFINED
- description: '`started`/`stopped` are idempotent actions that will not run commands
- unless necessary. `restarted` will always bounce the service. `reloaded` will
- always reload.'
+ description: '`started`/`stopped` are idempotent actions that will not run commands unless necessary. `restarted` will always bounce the service. `reloaded` will always reload.'
name: state
predefined:
- reloaded
- restarted
- started
- stopped
- - description: Whether the service should start on boot. `At least one of state
- and enabled are required.`
+ - description: Whether the service should start on boot. `At least one of state and enabled are required.`
name: enabled
- description: Whether to override existing symlinks.
name: force
- - description: Whether the unit should be masked or not, a masked unit is impossible
- to start.
+ - description: Whether the unit should be masked or not, a masked unit is impossible to start.
name: masked
- default: false
defaultValue: "False"
@@ -2051,8 +1879,7 @@ script:
name: daemon_reload
- default: false
defaultValue: "False"
- description: Run daemon_reexec command before doing any other operations, the
- systemd manager will serialize the manager state.
+ description: Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state.
name: daemon_reexec
- default: false
defaultValue: "False"
@@ -2071,15 +1898,12 @@ script:
- global
- default: false
defaultValue: "False"
- description: Do not synchronously wait for the requested operation to finish.
- Enqueued job will continue without Ansible blocking on its completion.
+ description: Do not synchronously wait for the requested operation to finish. Enqueued job will continue without Ansible blocking on its completion.
name: no_block
description: Manage services
name: linux-systemd
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2087,23 +1911,18 @@ script:
name: name
required: true
- auto: PREDEFINED
- description: '`started`/`stopped` are idempotent actions that will not run commands
- unless necessary. Not all init scripts support `restarted` nor `reloaded`
- natively, so these will both trigger a stop and start as needed.'
+ description: '`started`/`stopped` are idempotent actions that will not run commands unless necessary. Not all init scripts support `restarted` nor `reloaded` natively, so these will both trigger a stop and start as needed.'
name: state
predefined:
- started
- stopped
- restarted
- reloaded
- - description: Whether the service should start on boot. `At least one of state
- and enabled are required.`
+ - description: Whether the service should start on boot. `At least one of state and enabled are required.`
name: enabled
- default: false
defaultValue: "1"
- description: If the service is being `restarted` or `reloaded` then sleep this
- many seconds between the stop and start command. This helps to workaround
- badly behaving services.
+ description: If the service is being `restarted` or `reloaded` then sleep this many seconds between the stop and start command. This helps to workaround badly behaving services.
name: sleep
- description: |-
A substring to look for as would be found in the output of the `ps` command as a stand-in for a status result.
@@ -2114,8 +1933,7 @@ script:
The runlevels this script should be enabled/disabled from.
Use this to override the defaults set by the package or init script itself.
name: runlevels
- - description: Additional arguments provided on the command line that some init
- scripts accept.
+ - description: Additional arguments provided on the command line that some init scripts accept.
name: arguments
- default: false
defaultValue: "False"
@@ -2126,9 +1944,7 @@ script:
description: Manage SysV services.
name: linux-sysvinit
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2156,9 +1972,7 @@ script:
- contextPath: Linux.diff.after
description: The values after change
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2268,9 +2082,7 @@ script:
description: Manage firewall with UFW
name: linux-ufw
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2287,8 +2099,7 @@ script:
name: hidden
- default: false
defaultValue: "False"
- description: Optionally when used with the -u option, this option allows to
- change the user ID to a non-unique value.
+ description: Optionally when used with the -u option, this option allows to change the user ID to a non-unique value.
name: non_unique
- description: Optionally sets the seuser type (user_u) on selinux enabled systems.
name: seuser
@@ -2328,8 +2139,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether the account should exist or not, taking action if the state
- is different from what is stated.
+ description: Whether the account should exist or not, taking action if the state is different from what is stated.
name: state
predefined:
- absent
@@ -2342,9 +2152,7 @@ script:
name: create_home
- default: false
defaultValue: "False"
- description: 'If set to `yes` when used with `home: `, attempt to move the user''s
- old home directory to the specified directory if it isn''t there already and
- the old home exists.'
+ description: 'If set to `yes` when used with `home: `, attempt to move the user''s old home directory to the specified directory if it isn''t there already and the old home exists.'
name: move_home
- default: false
defaultValue: "False"
@@ -2450,9 +2258,7 @@ script:
description: Manage user accounts
name: linux-user
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2464,8 +2270,7 @@ script:
- group
- project
required: true
- - description: The name of the user, group or project to apply the quota to, if
- other than default.
+ - description: The name of the user, group or project to apply the quota to, if other than default.
name: name
- description: The mount point on which to apply the quotas.
name: mountpoint
@@ -2503,9 +2308,7 @@ script:
description: Manage quotas on XFS filesystems
name: linux-xfs-quota
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2522,10 +2325,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: apr_md5_crypt
- description: Encryption scheme to be used. As well as the four choices listed
- here, you can also use any other hash supported by passlib, such as md5_crypt
- and sha256_crypt, which are linux passwd hashes. If you do so the password
- file will not be compatible with Apache or Nginx
+ description: Encryption scheme to be used. As well as the four choices listed here, you can also use any other hash supported by passlib, such as md5_crypt and sha256_crypt, which are linux passwd hashes. If you do so the password file will not be compatible with Apache or Nginx
name: crypt_scheme
predefined:
- apr_md5_crypt
@@ -2542,9 +2342,7 @@ script:
- absent
- default: false
defaultValue: "yes"
- description: Used with `state=present`. If specified, the file will be created
- if it does not already exist. If set to "no", will fail if the file does not
- exist
+ description: Used with `state=present`. If specified, the file will be created if it does not already exist. If set to "no", will fail if the file does not exist
name: create
- description: |-
The permissions the resulting file or directory should have.
@@ -2554,11 +2352,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -2597,9 +2393,7 @@ script:
description: manage user files for basic authentication
name: linux-htpasswd
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2628,25 +2422,21 @@ script:
- absent
- signalled
required: true
- - description: The signal to send to the program/group, when combined with the
- 'signalled' state. Required when l(state=signalled).
+ - description: The signal to send to the program/group, when combined with the 'signalled' state. Required when l(state=signalled).
name: signal
- description: path to supervisorctl executable
name: supervisorctl_path
description: Manage the state of a program or group of programs running via supervisord
name: linux-supervisorctl
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether the host or user certificate should exist or not, taking
- action if the state is different from what is stated.
+ description: Whether the host or user certificate should exist or not, taking action if the state is different from what is stated.
name: state
predefined:
- present
@@ -2661,8 +2451,7 @@ script:
- user
- default: false
defaultValue: "False"
- description: Should the certificate be regenerated even if it already exists
- and is valid.
+ description: Should the certificate be regenerated even if it already exists and is valid.
name: force
- description: Path of the file containing the certificate.
name: path
@@ -2683,14 +2472,9 @@ script:
The point in time the certificate is valid to. Time can be specified either as relative time or as absolute timestamp. Time will always be interpreted as UTC. Valid formats are: `[+-]timespec | YYYY-MM-DD | YYYY-MM-DDTHH:MM:SS | YYYY-MM-DD HH:MM:SS | forever` where timespec can be an integer + `[w | d | h | m | s]` (e.g. `+32w1d2h`. Note that if using relative time this module is NOT idempotent.
Required if `state` is `present`.
name: valid_to
- - description: Check if the certificate is valid at a certain point in time. If
- it is not the certificate will be regenerated. Time will always be interpreted
- as UTC. Mainly to be used with relative timespec for `valid_from` and / or
- `valid_to`. Note that if using relative time this module is NOT idempotent.
+ - description: Check if the certificate is valid at a certain point in time. If it is not the certificate will be regenerated. Time will always be interpreted as UTC. Mainly to be used with relative timespec for `valid_from` and / or `valid_to`. Note that if using relative time this module is NOT idempotent.
name: valid_at
- - description: Certificates may be limited to be valid for a set of principal
- (user/host) names. By default, generated certificates are valid for all users
- or hosts.
+ - description: Certificates may be limited to be valid for a set of principal (user/host) names. By default, generated certificates are valid for all users or hosts.
isArray: true
name: principals
- description: |-
@@ -2711,14 +2495,9 @@ script:
At present, no options are valid for host keys.
isArray: true
name: options
- - description: Specify the key identity when signing a public key. The identifier
- that is logged by the server when the certificate is used for authentication.
+ - description: Specify the key identity when signing a public key. The identifier that is logged by the server when the certificate is used for authentication.
name: identifier
- - description: 'Specify the certificate serial number. The serial number is logged
- by the server when the certificate is used for authentication. The certificate
- serial number may be used in a KeyRevocationList. The serial number may be
- omitted for checks, but must be specified again for a new certificate. Note:
- The default value set by ssh-keygen is 0.'
+ - description: 'Specify the certificate serial number. The serial number is logged by the server when the certificate is used for authentication. The certificate serial number may be used in a KeyRevocationList. The serial number may be omitted for checks, but must be specified again for a new certificate. Note: The default value set by ssh-keygen is 0.'
name: serial_number
- description: |-
The permissions the resulting file or directory should have.
@@ -2728,11 +2507,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -2771,36 +2548,24 @@ script:
description: Generate OpenSSH host or user certificates.
name: linux-openssh-cert
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether the private and public keys should exist or not, taking
- action if the state is different from what is stated.
+ description: Whether the private and public keys should exist or not, taking action if the state is different from what is stated.
name: state
predefined:
- present
- absent
- - description: 'Specifies the number of bits in the private key to create. For
- RSA keys, the minimum size is 1024 bits and the default is 4096 bits. Generally,
- 2048 bits is considered sufficient. DSA keys must be exactly 1024 bits as
- specified by FIPS 186-2. For ECDSA keys, size determines the key length by
- selecting from one of three elliptic curve sizes: 256, 384 or 521 bits. Attempting
- to use bit lengths other than these three values for ECDSA keys will cause
- this module to fail. Ed25519 keys have a fixed length and the size will be
- ignored.'
+ - description: 'Specifies the number of bits in the private key to create. For RSA keys, the minimum size is 1024 bits and the default is 4096 bits. Generally, 2048 bits is considered sufficient. DSA keys must be exactly 1024 bits as specified by FIPS 186-2. For ECDSA keys, size determines the key length by selecting from one of three elliptic curve sizes: 256, 384 or 521 bits. Attempting to use bit lengths other than these three values for ECDSA keys will cause this module to fail. Ed25519 keys have a fixed length and the size will be ignored.'
name: size
- auto: PREDEFINED
default: false
defaultValue: rsa
- description: The algorithm used to generate the SSH private key. `rsa1` is for
- protocol version 1. `rsa1` is deprecated and may not be supported by every
- version of ssh-keygen.
+ description: The algorithm used to generate the SSH private key. `rsa1` is for protocol version 1. `rsa1` is deprecated and may not be supported by every version of ssh-keygen.
name: type
predefined:
- rsa
@@ -2812,12 +2577,10 @@ script:
defaultValue: "False"
description: Should the key be regenerated even if it already exists
name: force
- - description: Name of the files containing the public and private key. The file
- containing the public key will have the extension `.pub`.
+ - description: Name of the files containing the public and private key. The file containing the public key will have the extension `.pub`.
name: path
required: true
- - description: Provides a new comment to the public key. When checking if the
- key is in the correct state this will be ignored.
+ - description: Provides a new comment to the public key. When checking if the key is in the correct state this will be ignored.
name: comment
- description: |-
The permissions the resulting file or directory should have.
@@ -2827,11 +2590,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -2870,9 +2631,7 @@ script:
description: Generate OpenSSH private and public keys.
name: linux-openssh-keypair
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -2900,20 +2659,17 @@ script:
If the target is a directory, setting this to `yes` will make it the default ACL for entities created inside the directory.
Setting `default` to `yes` causes an error if the path is a file.
name: default
- - description: The actual user or group that the ACL applies to when matching
- entity types user or group are selected.
+ - description: The actual user or group that the ACL applies to when matching entity types user or group are selected.
name: entity
- auto: PREDEFINED
- description: The entity type of the ACL to apply, see `setfacl` documentation
- for more info.
+ description: The entity type of the ACL to apply, see `setfacl` documentation for more info.
name: etype
predefined:
- group
- mask
- other
- user
- - description: The permissions to apply/remove can be any combination of `r`,
- `w` and `x` (read, write and execute respectively)
+ - description: The permissions to apply/remove can be any combination of `r`, `w` and `x` (read, write and execute respectively)
name: permissions
- description: |-
DEPRECATED.
@@ -2948,14 +2704,11 @@ script:
description: Set and retrieve file ACL information.
name: linux-acl
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: Remote absolute path, glob, or list of paths or globs for the file
- or files to compress or archive.
+ - description: Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
isArray: true
name: path
required: true
@@ -2976,8 +2729,7 @@ script:
The file name of the destination archive.
This is required when `path` refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
name: dest
- - description: Remote absolute path, glob, or list of paths or globs for the file
- or files to exclude from the archive.
+ - description: Remote absolute path, glob, or list of paths or globs for the file or files to exclude from the archive.
isArray: true
name: exclude_path
- default: false
@@ -2998,11 +2750,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -3041,9 +2791,7 @@ script:
description: Creates a compressed archive of one or more files or trees
name: linux-archive
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3055,8 +2803,7 @@ script:
required: true
- default: false
defaultValue: "False"
- description: Create a backup file (if `yes`), including the timestamp information
- so you can get the original file back if you somehow clobbered it incorrectly.
+ description: Create a backup file (if `yes`), including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
name: backup
- description: A delimiter to separate the file contents.
name: delimiter
@@ -3074,8 +2821,7 @@ script:
name: regexp
- default: false
defaultValue: "False"
- description: A boolean that controls if files that start with a '.' will be
- included or not.
+ description: A boolean that controls if files that start with a '.' will be included or not.
name: ignore_hidden
- description: |-
The validation command to run before copying into place.
@@ -3094,11 +2840,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -3137,9 +2881,7 @@ script:
description: Assemble configuration files from fragments
name: linux-assemble
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3194,18 +2936,15 @@ script:
name: create
- default: false
defaultValue: "False"
- description: Create a backup file including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
+ description: Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
name: backup
- default: false
defaultValue: BEGIN
- description: This will be inserted at `{mark}` in the opening ansible block
- marker.
+ description: This will be inserted at `{mark}` in the opening ansible block marker.
name: marker_begin
- default: false
defaultValue: END
- description: This will be inserted at `{mark}` in the closing ansible block
- marker.
+ description: This will be inserted at `{mark}` in the closing ansible block marker.
name: marker_end
- description: |-
The permissions the resulting file or directory should have.
@@ -3215,11 +2954,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -3263,9 +3000,7 @@ script:
description: Insert/update/remove a text block surrounded by marker lines
name: linux-blockinfile
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3304,10 +3039,7 @@ script:
name: recurse
- default: false
defaultValue: "False"
- description: 'Force the creation of the symlinks in two cases: the source file
- does not exist (but will appear later); the destination exists and is a file
- (so, we need to unlink the `path` file and create symlink to the `src` file
- in place of it).'
+ description: 'Force the creation of the symlinks in two cases: the source file does not exist (but will appear later); the destination exists and is a file (so, we need to unlink the `path` file and create symlink to the `src` file in place of it).'
name: force
- default: false
defaultValue: "True"
@@ -3345,11 +3077,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -3388,9 +3118,7 @@ script:
description: Manage files and file properties
name: linux-file
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3414,11 +3142,9 @@ script:
Items whose basenames match an `excludes` pattern are culled from `patterns` matches. Multiple patterns can be specified using a list.
isArray: true
name: excludes
- - description: A regular expression or pattern which should be matched against
- the file content.
+ - description: A regular expression or pattern which should be matched against the file content.
name: contains
- - description: List of paths of directories to search. All paths must be fully
- qualified.
+ - description: List of paths of directories to search. All paths must be fully qualified.
isArray: true
name: paths
required: true
@@ -3436,8 +3162,7 @@ script:
- link
- default: false
defaultValue: "False"
- description: If target is a directory, recursively descend into the directory
- looking for files.
+ description: If target is a directory, recursively descend into the directory looking for files.
name: recurse
- description: |-
Select files whose size is equal to or greater than the specified size.
@@ -3456,13 +3181,11 @@ script:
- mtime
- default: false
defaultValue: "False"
- description: Set this to `yes` to include hidden files, otherwise they will
- be ignored.
+ description: Set this to `yes` to include hidden files, otherwise they will be ignored.
name: hidden
- default: false
defaultValue: "False"
- description: Set this to `yes` to follow symlinks in path for systems with python
- 2.6+.
+ description: Set this to `yes` to follow symlinks in path for systems with python 2.6+.
name: follow
- default: false
defaultValue: "False"
@@ -3482,9 +3205,7 @@ script:
description: Return a list of files based on specific criteria
name: linux-find
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3509,14 +3230,12 @@ script:
name: value
- default: false
defaultValue: "False"
- description: Create a backup file including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
+ description: Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
name: backup
- auto: PREDEFINED
default: false
defaultValue: present
- description: If set to `absent` the option or section will be removed if present
- instead of created.
+ description: If set to `absent` the option or section will be removed if present instead of created.
name: state
predefined:
- absent
@@ -3543,11 +3262,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -3586,9 +3303,7 @@ script:
description: Tweak settings in INI files
name: linux-ini-file
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3613,15 +3328,12 @@ script:
name: force
- default: false
defaultValue: 7z
- description: The path to the `7z` executable to use for extracting files from
- the ISO.
+ description: The path to the `7z` executable to use for extracting files from the ISO.
name: executable
description: Extract files from an ISO image
name: linux-iso-extract
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3697,8 +3409,7 @@ script:
name: create
- default: false
defaultValue: "False"
- description: Create a backup file including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
+ description: Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
name: backup
- default: false
defaultValue: "False"
@@ -3716,11 +3427,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -3764,9 +3473,7 @@ script:
description: Manage lines in text files
name: linux-lineinfile
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3804,8 +3511,7 @@ script:
name: before
- default: false
defaultValue: "False"
- description: Create a backup file including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
+ description: Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
name: backup
- description: All arguments accepted by the `file` module also work here.
name: others
@@ -3821,11 +3527,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -3866,13 +3570,10 @@ script:
The path to the file to validate is passed in via '%s' which must be present as in the examples below.
The command is passed securely so shell features like expansion and pipes will not work.
name: validate
- description: Replace all instances of a particular string in a file using a back-referenced
- regular expression
+ description: Replace all instances of a particular string in a file using a back-referenced regular expression
name: linux-replace
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -3953,8 +3654,7 @@ script:
description: Numeric id representing the group of the owner
type: number
- contextPath: Linux.stat.size
- description: Size in bytes for a plain file, amount of data for some special
- files
+ description: Size in bytes for a plain file, amount of data for some special files
type: number
- contextPath: Linux.stat.inode
description: Inode number of the path
@@ -4002,8 +3702,7 @@ script:
description: Tells you if the invoking user's id matches the owner's id
type: boolean
- contextPath: Linux.stat.isgid
- description: Tells you if the invoking user's group id matches the owner's group
- id
+ description: Tells you if the invoking user's group id matches the owner's group id
type: boolean
- contextPath: Linux.stat.lnk_source
description: Target of the symlink normalized for the remote filesystem
@@ -4012,8 +3711,7 @@ script:
description: Target of the symlink. Note that relative paths remain relative
type: string
- contextPath: Linux.stat.md5
- description: md5 hash of the path; this will be removed in Ansible 2.9 in favor
- of the checksum return value
+ description: md5 hash of the path; this will be removed in Ansible 2.9 in favor of the checksum return value
type: string
- contextPath: Linux.stat.checksum
description: hash of the path
@@ -4042,9 +3740,7 @@ script:
- contextPath: Linux.stat.attributes
description: list of file attributes
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4076,14 +3772,11 @@ script:
- push
- default: false
defaultValue: "True"
- description: Mirrors the rsync archive flag, enables recursive, links, perms,
- times, owner, group flags and -D.
+ description: Mirrors the rsync archive flag, enables recursive, links, perms, times, owner, group flags and -D.
name: archive
- default: false
defaultValue: "False"
- description: Skip based on checksum, rather than mod-time & size; Note that
- that "archive" option is still enabled by default - the "checksum" option
- will not disable it.
+ description: Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will not disable it.
name: checksum
- default: false
defaultValue: "True"
@@ -4116,8 +3809,7 @@ script:
name: links
- default: false
defaultValue: "False"
- description: Copy symlinks as the item that they point to (the referent) is
- copied, rather than the symlink.
+ description: Copy symlinks as the item that they point to (the referent) is copied, rather than the symlink.
name: copy_links
- description: |-
Preserve permissions.
@@ -4160,26 +3852,21 @@ script:
name: rsync_opts
- default: false
defaultValue: "False"
- description: Tells rsync to keep the partial file which should make a subsequent
- transfer of the rest of the file much faster.
+ description: Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster.
name: partial
- default: false
defaultValue: "False"
description: Verify destination host key.
name: verify_host
- - description: Specify the private key to use for SSH-based rsync connections
- (e.g. `~/.ssh/id_rsa`).
+ - description: Specify the private key to use for SSH-based rsync connections (e.g. `~/.ssh/id_rsa`).
name: private_key
- description: Add a destination to hard link against during the rsync.
isArray: true
name: link_dest
- description: A wrapper around rsync to make common tasks in your playbooks quick
- and easy
+ description: A wrapper around rsync to make common tasks in your playbooks quick and easy
name: linux-synchronize
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4205,9 +3892,7 @@ script:
description: Creates temporary files and directories
name: linux-tempfile
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4226,22 +3911,18 @@ script:
This option has been deprecated in favor of `remote_src`.
This option is mutually exclusive with `remote_src`.
name: copy
- - description: If the specified absolute path (file or directory) already exists,
- this step will `not` be run.
+ - description: If the specified absolute path (file or directory) already exists, this step will `not` be run.
name: creates
- default: false
defaultValue: "False"
- description: If set to True, return the list of files that are contained in
- the tarball.
+ description: If set to True, return the list of files that are contained in the tarball.
name: list_files
- - description: List the directory and file entries that you would like to exclude
- from the unarchive action.
+ - description: List the directory and file entries that you would like to exclude from the unarchive action.
isArray: true
name: exclude
- default: false
defaultValue: "False"
- description: Do not replace existing files that are newer than files from the
- archive.
+ description: Do not replace existing files that are newer than files from the archive.
name: keep_newer
- default: false
description: |-
@@ -4275,11 +3956,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -4318,9 +3997,7 @@ script:
description: Unpacks an archive after (optionally) copying it from the local machine.
name: linux-unarchive
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4409,8 +4086,7 @@ script:
- yaml
- default: false
defaultValue: "False"
- description: Create a backup file including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
+ description: Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
name: backup
- default: false
defaultValue: "False"
@@ -4435,9 +4111,7 @@ script:
description: Manage bits and pieces of XML files or strings
name: linux-xml
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4450,16 +4124,13 @@ script:
name: removes
- description: Change into this directory before running the command.
name: chdir
- - description: Mapping of expected string/regex and string to respond with. If
- the response is a list, successive matches return successive responses. List
- functionality is new in 2.1.
+ - description: Mapping of expected string/regex and string to respond with. If the response is a list, successive matches return successive responses. List functionality is new in 2.1.
isArray: true
name: responses
required: true
- default: false
defaultValue: "30"
- description: Amount of time in seconds to wait for the expected strings. Use
- `null` to disable timeout.
+ description: Amount of time in seconds to wait for the expected strings. Use `null` to disable timeout.
name: timeout
- default: false
defaultValue: "False"
@@ -4468,9 +4139,7 @@ script:
description: Executes a command and responds to prompts.
name: linux-expect
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4478,8 +4147,7 @@ script:
name: name
- default: false
defaultValue: "no"
- description: Install packages from local cache, if the packages were installed
- before
+ description: Install packages from local cache, if the packages were installed before
name: offline
- default: false
defaultValue: "no"
@@ -4504,9 +4172,7 @@ script:
description: Manage bower packages with bower
name: linux-bower
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4515,31 +4181,24 @@ script:
- auto: PREDEFINED
default: false
defaultValue: present
- description: The desired state of the Gem bundle. `latest` updates gems to the
- most recent, acceptable version
+ description: The desired state of the Gem bundle. `latest` updates gems to the most recent, acceptable version
name: state
predefined:
- present
- latest
- default: false
defaultValue: temporary working directory
- description: The directory to execute the bundler commands from. This directory
- needs to contain a valid Gemfile or .bundle/ directory
+ description: The directory to execute the bundler commands from. This directory needs to contain a valid Gemfile or .bundle/ directory
name: chdir
- - description: A list of Gemfile groups to exclude during operations. This only
- applies when state is `present`. Bundler considers this a 'remembered' property
- for the Gemfile and will automatically exclude groups in future operations
- even if `exclude_groups` is not set
+ - description: A list of Gemfile groups to exclude during operations. This only applies when state is `present`. Bundler considers this a 'remembered' property for the Gemfile and will automatically exclude groups in future operations even if `exclude_groups` is not set
name: exclude_groups
- default: false
defaultValue: "no"
- description: Only applies if state is `present`. If set removes any gems on
- the target host that are not in the gemfile
+ description: Only applies if state is `present`. If set removes any gems on the target host that are not in the gemfile
name: clean
- default: false
defaultValue: Gemfile in current directory
- description: Only applies if state is `present`. The path to the gemfile to
- use to install gems.
+ description: Only applies if state is `present`. The path to the gemfile to use to install gems.
name: gemfile
- default: false
defaultValue: "no"
@@ -4547,34 +4206,24 @@ script:
name: local
- default: false
defaultValue: "no"
- description: Only applies if state is `present`. If set it will install gems
- in ./vendor/bundle instead of the default location. Requires a Gemfile.lock
- file to have been created prior
+ description: Only applies if state is `present`. If set it will install gems in ./vendor/bundle instead of the default location. Requires a Gemfile.lock file to have been created prior
name: deployment_mode
- default: false
defaultValue: "yes"
- description: Only applies if state is `present`. Installs gems in the local
- user's cache or for all users
+ description: Only applies if state is `present`. Installs gems in the local user's cache or for all users
name: user_install
- default: false
defaultValue: RubyGems gem paths
- description: Only applies if state is `present`. Specifies the directory to
- install the gems into. If `chdir` is set then this path is relative to `chdir`
+ description: Only applies if state is `present`. Specifies the directory to install the gems into. If `chdir` is set then this path is relative to `chdir`
name: gem_path
- - description: Only applies if state is `present`. Specifies the directory to
- install any gem bins files to. When executed the bin files will run within
- the context of the Gemfile and fail if any required gem dependencies are not
- installed. If `chdir` is set then this path is relative to `chdir`
+ - description: Only applies if state is `present`. Specifies the directory to install any gem bins files to. When executed the bin files will run within the context of the Gemfile and fail if any required gem dependencies are not installed. If `chdir` is set then this path is relative to `chdir`
name: binstub_directory
- - description: A space separated string of additional commands that can be applied
- to the Bundler command. Refer to the Bundler documentation for more information
+ - description: A space separated string of additional commands that can be applied to the Bundler command. Refer to the Bundler documentation for more information
name: extra_args
description: Manage Ruby Gem dependencies with Bundler
name: linux-bundler
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4600,8 +4249,7 @@ script:
name: prefer_source
- default: false
defaultValue: "False"
- description: Forces installation from package dist even for dev versions (see
- --prefer-dist).
+ description: Forces installation from package dist even for dev versions (see --prefer-dist).
name: prefer_dist
- default: false
defaultValue: "True"
@@ -4609,8 +4257,7 @@ script:
name: no_dev
- default: false
defaultValue: "False"
- description: Skips the execution of all scripts defined in composer.json (see
- --no-scripts).
+ description: Skips the execution of all scripts defined in composer.json (see --no-scripts).
name: no_scripts
- default: false
defaultValue: "False"
@@ -4636,20 +4283,16 @@ script:
name: apcu_autoloader
- default: false
defaultValue: "False"
- description: Ignore php, hhvm, lib-* and ext-* requirements and force the installation
- even if the local machine does not fulfill these.
+ description: Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
name: ignore_platform_reqs
description: Dependency Manager for PHP
name: linux-composer
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: The name of the Perl library to install. You may use the "full
- distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
+ - description: The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
name: name
- description: The local directory from where to install
name: from_path
@@ -4682,9 +4325,7 @@ script:
description: Manages Perl library dependencies.
name: linux-cpanm
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4694,8 +4335,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: present
- description: The desired state of the gem. `latest` ensures that the latest
- version is installed.
+ description: The desired state of the gem. `latest` ensures that the latest version is installed.
name: state
predefined:
- present
@@ -4715,9 +4355,7 @@ script:
name: user_install
- description: Override the path to the gem executable
name: executable
- - description: Install the gems into a specific directory. These gems will be
- independent from the global installed ones. Specifying this requires user_install
- to be false.
+ - description: Install the gems into a specific directory. These gems will be independent from the global installed ones. Specifying this requires user_install to be false.
name: install_dir
- default: false
defaultValue: "no"
@@ -4742,9 +4380,7 @@ script:
description: Manage Ruby gems
name: linux-gem
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4771,11 +4407,9 @@ script:
Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
Use file://... if the repository is local, added in version 2.6
name: repository_url
- - description: The username to authenticate as to the Maven Repository. Use AWS
- secret key of the repository is hosted on S3
+ - description: The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
name: username
- - description: The password to authenticate with to the Maven Repository. Use
- AWS secret access key of the repository is hosted on S3
+ - description: The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
name: password
- description: Add custom HTTP headers to a request in hash/dict format.
isArray: true
@@ -4799,8 +4433,7 @@ script:
name: timeout
- default: false
defaultValue: "yes"
- description: If `no`, SSL certificates will not be validated. This should only
- be set to `no` when no other option exists.
+ description: If `no`, SSL certificates will not be validated. This should only be set to `no` when no other option exists.
name: validate_certs
- default: false
defaultValue: "no"
@@ -4830,11 +4463,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -4873,9 +4504,7 @@ script:
description: Downloads an Artifact from a Maven Repository
name: linux-maven-artifact
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4903,8 +4532,7 @@ script:
name: unsafe_perm
- default: false
defaultValue: "False"
- description: Install packages based on package-lock file, same as running npm
- ci
+ description: Install packages based on package-lock file, same as running npm ci
name: ci
- default: false
defaultValue: "False"
@@ -4924,9 +4552,7 @@ script:
description: Manage node.js packages with npm
name: linux-npm
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4947,9 +4573,7 @@ script:
description: Manage pear/pecl packages
name: linux-pear
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -4958,36 +4582,21 @@ script:
This can be a list (since 2.2) and contain version specifiers (since 2.7).
isArray: true
name: name
- - description: The version number to install of the Python library specified in
- the `name` parameter.
+ - description: The version number to install of the Python library specified in the `name` parameter.
name: version
- - description: The path to a pip requirements file, which should be local to the
- remote system. File can be specified as a relative path if using the chdir
- option.
+ - description: The path to a pip requirements file, which should be local to the remote system. File can be specified as a relative path if using the chdir option.
name: requirements
- - description: An optional path to a `virtualenv` directory to install into. It
- cannot be specified together with the 'executable' parameter (added in 2.1).
- If the virtualenv does not exist, it will be created before installing packages.
- The optional virtualenv_site_packages, virtualenv_command, and virtualenv_python
- options affect the creation of the virtualenv.
+ - description: An optional path to a `virtualenv` directory to install into. It cannot be specified together with the 'executable' parameter (added in 2.1). If the virtualenv does not exist, it will be created before installing packages. The optional virtualenv_site_packages, virtualenv_command, and virtualenv_python options affect the creation of the virtualenv.
name: virtualenv
- default: false
defaultValue: "no"
- description: Whether the virtual environment will inherit packages from the
- global site-packages directory. Note that if this setting is changed on an
- already existing virtual environment it will not have any effect, the environment
- must be deleted and newly created.
+ description: Whether the virtual environment will inherit packages from the global site-packages directory. Note that if this setting is changed on an already existing virtual environment it will not have any effect, the environment must be deleted and newly created.
name: virtualenv_site_packages
- default: false
defaultValue: virtualenv
- description: The command or a pathname to the command to create the virtual
- environment with. For example `pyvenv`, `virtualenv`, `virtualenv2`, `~/bin/virtualenv`,
- `/usr/local/bin/virtualenv`.
+ description: The command or a pathname to the command to create the virtual environment with. For example `pyvenv`, `virtualenv`, `virtualenv2`, `~/bin/virtualenv`, `/usr/local/bin/virtualenv`.
name: virtualenv_command
- - description: The Python executable used for creating the virtual environment.
- For example `python3.5`, `python2.7`. When not specified, the Python version
- used to run the ansible module is used. This parameter should not be used
- when `virtualenv_command` is using `pyvenv` or the `-m venv` module.
+ - description: The Python executable used for creating the virtual environment. For example `python3.5`, `python2.7`. When not specified, the Python version used to run the ansible module is used. This parameter should not be used when `virtualenv_command` is using `pyvenv` or the `-m venv` module.
name: virtualenv_python
- auto: PREDEFINED
default: false
@@ -5015,37 +4624,27 @@ script:
Does not affect the Ansible Python interpreter.
The setuptools package must be installed for both the Ansible Python interpreter and for the version of Python specified by this option.
name: executable
- - description: The system umask to apply before installing the pip package. This
- is useful, for example, when installing on systems that have a very restrictive
- umask by default (e.g., "0077") and you want to pip install packages which
- are to be used by all users. Note that this requires you to specify desired
- umask mode as an octal string, (e.g., "0022").
+ - description: The system umask to apply before installing the pip package. This is useful, for example, when installing on systems that have a very restrictive umask by default (e.g., "0077") and you want to pip install packages which are to be used by all users. Note that this requires you to specify desired umask mode as an octal string, (e.g., "0022").
name: umask
description: Manages Python library dependencies
name: linux-pip
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- default: false
defaultValue: '[''pip'']'
- description: A list of the pip executables that will be used to get the packages.
- They can be supplied with the full path or just the executable name, i.e `pip3.7`.
+ description: A list of the pip executables that will be used to get the packages. They can be supplied with the full path or just the executable name, i.e `pip3.7`.
isArray: true
name: clients
description: pip package information
name: linux-pip-package-info
outputs:
- contextPath: Linux.packages.python
- description: A dictionary with each pip client which then contains a list of
- dicts with python package information
+ description: A dictionary with each pip client which then contains a list of dicts with python package information
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5093,23 +4692,17 @@ script:
description: Manage node.js packages with Yarn
name: linux-yarn
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- default: false
defaultValue: "no"
- description: During upgrade, reset versioned world dependencies and change logic
- to prefer replacing or downgrading packages (instead of holding them) if the
- currently installed package is no longer available from any repository.
+ description: During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them) if the currently installed package is no longer available from any repository.
name: available
- description: A package name, like `foo`, or multiple packages, like `foo, bar`.
name: name
- - description: A package repository or multiple repositories. Unlike with the
- underlying apk command, this list will override the system repositories rather
- than supplement them.
+ - description: A package repository or multiple repositories. Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
name: repository
- auto: PREDEFINED
default: false
@@ -5126,8 +4719,7 @@ script:
- latest
- default: false
defaultValue: "no"
- description: Update repository indexes. Can be run with other steps or on it's
- own.
+ description: Update repository indexes. Can be run with other steps or on it's own.
name: update_cache
- default: false
defaultValue: "no"
@@ -5136,23 +4728,16 @@ script:
description: Manages apk packages
name: linux-apk
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: A list of package names, like `foo`, or package specifier with
- version, like `foo=1.0`. Name wildcards (fnmatch) like `apt*` and version
- wildcards like `foo=1.0*` are also supported.
+ - description: A list of package names, like `foo`, or package specifier with version, like `foo=1.0`. Name wildcards (fnmatch) like `apt*` and version wildcards like `foo=1.0*` are also supported.
name: name
- auto: PREDEFINED
default: false
defaultValue: present
- description: Indicates the desired package state. `latest` ensures that the
- latest version is installed. `build-dep` ensures the package build dependencies
- are installed. `fixed` attempt to correct a system with broken dependencies
- in place.
+ description: Indicates the desired package state. `latest` ensures that the latest version is installed. `build-dep` ensures the package build dependencies are installed. `fixed` attempt to correct a system with broken dependencies in place.
name: state
predefined:
- absent
@@ -5162,8 +4747,7 @@ script:
- fixed
- default: false
defaultValue: "no"
- description: Run the equivalent of `apt-get update` before the operation. Can
- be run as part of the package installation or as a separate step.
+ description: Run the equivalent of `apt-get update` before the operation. Can be run as part of the package installation or as a separate step.
name: update_cache
- default: false
defaultValue: "0"
@@ -5173,15 +4757,11 @@ script:
name: cache_valid_time
- default: false
defaultValue: "no"
- description: Will force purging of configuration files if the module state is
- set to `absent`.
+ description: Will force purging of configuration files if the module state is set to `absent`.
name: purge
- description: Corresponds to the `-t` option for `apt` and sets pin priorities
name: default_release
- - description: Corresponds to the `--no-install-recommends` option for `apt`.
- `yes` installs recommended packages. `no` does not install recommended packages.
- By default, Ansible will use the same defaults as the operating system. Suggested
- packages are never installed.
+ - description: Corresponds to the `--no-install-recommends` option for `apt`. `yes` installs recommended packages. `no` does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
name: install_recommends
- default: false
defaultValue: "no"
@@ -5232,8 +4812,7 @@ script:
name: autoremove
- default: false
defaultValue: "no"
- description: If `yes`, cleans the local repository of retrieved package files
- that can no longer be downloaded.
+ description: If `yes`, cleans the local repository of retrieved package files that can no longer be downloaded.
name: autoclean
- description: |-
Force the exit code of /usr/sbin/policy-rc.d.
@@ -5252,9 +4831,7 @@ script:
description: Manages apt-packages
name: linux-apt
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5284,16 +4861,12 @@ script:
- present
- default: false
defaultValue: "yes"
- description: If `no`, SSL certificates for the target url will not be validated.
- This should only be used on personally controlled sites using self-signed
- certificates.
+ description: If `no`, SSL certificates for the target url will not be validated. This should only be used on personally controlled sites using self-signed certificates.
name: validate_certs
description: Add or remove an apt key
name: linux-apt-key
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5321,9 +4894,7 @@ script:
description: Manage APT repositories via apt-repo
name: linux-apt-repo
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5344,29 +4915,20 @@ script:
name: mode
- default: false
defaultValue: "yes"
- description: Run the equivalent of `apt-get update` when a change occurs. Cache
- updates are run after making changes.
+ description: Run the equivalent of `apt-get update` when a change occurs. Cache updates are run after making changes.
name: update_cache
- default: false
defaultValue: "yes"
- description: If `no`, SSL certificates for the target repo will not be validated.
- This should only be used on personally controlled sites using self-signed
- certificates.
+ description: If `no`, SSL certificates for the target repo will not be validated. This should only be used on personally controlled sites using self-signed certificates.
name: validate_certs
- - description: Sets the name of the source list file in sources.list.d. Defaults
- to a file name based on the repository source url. The .list extension will
- be automatically added.
+ - description: Sets the name of the source list file in sources.list.d. Defaults to a file name based on the repository source url. The .list extension will be automatically added.
name: filename
- - description: Override the distribution codename to use for PPA repositories.
- Should usually only be set when working with a PPA on a non-Ubuntu target
- (e.g. Debian or Mint)
+ - description: Override the distribution codename to use for PPA repositories. Should usually only be set when working with a PPA on a non-Ubuntu target (e.g. Debian or Mint)
name: codename
description: Add and remove APT repositories
name: linux-apt-repository
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5388,9 +4950,7 @@ script:
description: apt_rpm package manager
name: linux-apt-rpm
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5409,9 +4969,7 @@ script:
description: Dpkg package selection selections
name: linux-dpkg-selections
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5457,9 +5015,7 @@ script:
description: Manage flatpaks
name: linux-flatpak
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5502,9 +5058,7 @@ script:
description: Manage flatpak repository remotes
name: linux-flatpak-remote
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5512,11 +5066,7 @@ script:
name: name
- default: false
defaultValue: /usr/local/bin
- description: A ':' separated list of paths to search for 'brew' executable.
- Since a package (`formula` in homebrew parlance) location is prefixed relative
- to the actual path of `brew` command, providing an alternative `brew` path
- enables managing different set of packages in an alternative location in the
- system.
+ description: A ':' separated list of paths to search for 'brew' executable. Since a package (`formula` in homebrew parlance) location is prefixed relative to the actual path of `brew` command, providing an alternative `brew` path enables managing different set of packages in an alternative location in the system.
name: path
- auto: PREDEFINED
default: false
@@ -5543,9 +5093,7 @@ script:
description: Package manager for Homebrew
name: linux-homebrew
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5569,8 +5117,7 @@ script:
name: sudo_password
- default: false
defaultValue: "no"
- description: update homebrew itself first. Note that `brew cask update` is a
- synonym for `brew update`.
+ description: update homebrew itself first. Note that `brew cask update` is a synonym for `brew update`.
name: update_homebrew
- description: options flags to install a package
name: install_options
@@ -5588,15 +5135,12 @@ script:
name: upgrade
- default: false
defaultValue: "no"
- description: upgrade casks that auto update; passes --greedy to brew cask outdated
- when checking if an installed cask has a newer version available
+ description: upgrade casks that auto update; passes --greedy to brew cask outdated when checking if an installed cask has a newer version available
name: greedy
description: Install/uninstall homebrew casks.
name: linux-homebrew-cask
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5618,25 +5162,19 @@ script:
description: Tap a Homebrew repository.
name: linux-homebrew-tap
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: The overlay id to install, synchronize, or uninstall. Use 'ALL'
- to sync all of the installed overlays (can be used only when `state=updated`).
+ - description: The overlay id to install, synchronize, or uninstall. Use 'ALL' to sync all of the installed overlays (can be used only when `state=updated`).
name: name
required: true
- - description: An URL of the alternative overlays list that defines the overlay
- to install. This list will be fetched and saved under `${overlay_defs}`/${name}.xml),
- where `overlay_defs` is readed from the Layman's configuration.
+ - description: An URL of the alternative overlays list that defines the overlay to install. This list will be fetched and saved under `${overlay_defs}`/${name}.xml), where `overlay_defs` is readed from the Layman's configuration.
name: list_url
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether to install (`present`), sync (`updated`), or uninstall
- (`absent`) the overlay.
+ description: Whether to install (`present`), sync (`updated`), or uninstall (`absent`) the overlay.
name: state
predefined:
- present
@@ -5644,16 +5182,12 @@ script:
- updated
- default: false
defaultValue: "yes"
- description: If `no`, SSL certificates will not be validated. This should only
- be set to `no` when no other option exists. Prior to 1.9.3 the code defaulted
- to `no`.
+ description: If `no`, SSL certificates will not be validated. This should only be set to `no` when no other option exists. Prior to 1.9.3 the code defaulted to `no`.
name: validate_certs
description: Manage Gentoo overlays
name: linux-layman
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5677,9 +5211,7 @@ script:
description: Generic OS package manager
name: linux-package
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5701,10 +5233,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: first
- description: This option controls how the module queries the package managers
- on the system. `first` means it will return only information for the first
- supported package manager available. `all` will return information for all
- supported and available package managers on the system.
+ description: This option controls how the module queries the package managers on the system. `first` means it will return only information for the first supported package manager available. `all` will return information for all supported and available package managers on the system.
name: strategy
predefined:
- first
@@ -5713,15 +5242,9 @@ script:
name: linux-package-facts
outputs:
- contextPath: Linux.ansible_facts.packages
- description: '[''Maps the package name to a non-empty list of dicts with package
- information.'', ''Every dict in the list corresponds to one installed version
- of the package.'', ''The fields described below are present for all package
- managers. Depending on the package manager, there might be more fields for
- a package.'']'
+ description: '[''Maps the package name to a non-empty list of dicts with package information.'', ''Every dict in the list corresponds to one installed version of the package.'', ''The fields described below are present for all package managers. Depending on the package manager, there might be more fields for a package.'']'
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -5776,18 +5299,15 @@ script:
name: conf_file
- default: false
defaultValue: "no"
- description: Whether to disable the GPG checking of signatures of packages being
- installed. Has an effect only if state is `present` or `latest`.
+ description: Whether to disable the GPG checking of signatures of packages being installed. Has an effect only if state is `present` or `latest`.
name: disable_gpg_check
- default: false
defaultValue: "no"
- description: Skip packages with broken dependencies(devsolve) and are causing
- problems.
+ description: Skip packages with broken dependencies(devsolve) and are causing problems.
name: skip_broken
- default: false
defaultValue: "no"
- description: Force yum to check if cache is out of date and redownload if needed.
- Has an effect only if state is `present` or `latest`.
+ description: Force yum to check if cache is out of date and redownload if needed. Has an effect only if state is `present` or `latest`.
name: update_cache
- default: false
defaultValue: "yes"
@@ -5804,37 +5324,25 @@ script:
name: update_only
- default: false
defaultValue: /
- description: Specifies an alternative installroot, relative to which all packages
- will be installed.
+ description: Specifies an alternative installroot, relative to which all packages will be installed.
name: installroot
- default: false
defaultValue: "no"
- description: If set to `yes`, and `state=latest` then only installs updates
- that have been marked security related.
+ description: If set to `yes`, and `state=latest` then only installs updates that have been marked security related.
name: security
- default: false
defaultValue: "no"
- description: If set to `yes`, and `state=latest` then only installs updates
- that have been marked bugfix related.
+ description: If set to `yes`, and `state=latest` then only installs updates that have been marked bugfix related.
name: bugfix
- default: false
defaultValue: "no"
- description: Specify if the named package and version is allowed to downgrade
- a maybe already installed higher version of that package. Note that setting
- allow_downgrade=True can make this module behave in a non-idempotent way.
- The task could end up with a set of packages that does not match the complete
- list of specified packages to install (because dependencies between the downgraded
- package and others can cause changes to the packages which were in the earlier
- transaction).
+ description: Specify if the named package and version is allowed to downgrade a maybe already installed higher version of that package. Note that setting allow_downgrade=True can make this module behave in a non-idempotent way. The task could end up with a set of packages that does not match the complete list of specified packages to install (because dependencies between the downgraded package and others can cause changes to the packages which were in the earlier transaction).
name: allow_downgrade
- - description: '`Plugin` name to enable for the install/update operation. The
- enabled plugin will not persist beyond the transaction.'
+ - description: '`Plugin` name to enable for the install/update operation. The enabled plugin will not persist beyond the transaction.'
name: enable_plugin
- - description: '`Plugin` name to disable for the install/update operation. The
- disabled plugins will not persist beyond the transaction.'
+ - description: '`Plugin` name to disable for the install/update operation. The disabled plugins will not persist beyond the transaction.'
name: disable_plugin
- - description: Specifies an alternative release from which all packages will be
- installed.
+ - description: Specifies an alternative release from which all packages will be installed.
name: releasever
- default: false
defaultValue: "no"
@@ -5869,16 +5377,13 @@ script:
description: Manages packages with the I(yum) package manager
name: linux-yum
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- default: false
defaultValue: "yes"
- description: If set to `yes` Yum will download packages and metadata from this
- repo in parallel, if possible.
+ description: If set to `yes` Yum will download packages and metadata from this repo in parallel, if possible.
name: async
- default: false
defaultValue: "0"
@@ -5893,22 +5398,15 @@ script:
name: baseurl
- default: false
defaultValue: "1000"
- description: Relative cost of accessing this repository. Useful for weighing
- one repo's packages as greater/less than any other.
+ description: Relative cost of accessing this repository. Useful for weighing one repo's packages as greater/less than any other.
name: cost
- default: false
defaultValue: "100"
- description: When the relative size of deltarpm metadata vs pkgs is larger than
- this, deltarpm metadata is not downloaded from the repo. Note that you can
- give values over `100`, so `200` means that the metadata is required to be
- half the size of the packages. Use `0` to turn off this check, and always
- download metadata.
+ description: When the relative size of deltarpm metadata vs pkgs is larger than this, deltarpm metadata is not downloaded from the repo. Note that you can give values over `100`, so `200` means that the metadata is required to be half the size of the packages. Use `0` to turn off this check, and always download metadata.
name: deltarpm_metadata_percentage
- default: false
defaultValue: "75"
- description: When the relative size of delta vs pkg is larger than this, delta
- is not used. Use `0` to turn off delta rpm processing. Local repositories
- (with file:// `baseurl`) have delta rpms turned off by default.
+ description: When the relative size of delta vs pkg is larger than this, delta is not used. Use `0` to turn off delta rpm processing. Local repositories (with file:// `baseurl`) have delta rpms turned off by default.
name: deltarpm_percentage
- description: |-
A human readable string describing the repository. This option corresponds to the "name" property in the repo file.
@@ -5920,8 +5418,7 @@ script:
name: enabled
- default: false
defaultValue: "yes"
- description: Determines whether yum will allow the use of package groups for
- this repository.
+ description: Determines whether yum will allow the use of package groups for this repository.
name: enablegroups
- description: |-
List of packages to exclude from updates or installs. This should be a space separated list. Shell globs using wildcards (eg. `*` and `?`) are allowed.
@@ -5937,8 +5434,7 @@ script:
predefined:
- roundrobin
- priority
- - description: File name without the `.repo` extension to save the repo in. Defaults
- to the value of `name`.
+ - description: File name without the `.repo` extension to save the repo in. Defaults to the value of `name`.
name: file
- description: A URL pointing to the ASCII-armored CA key file for the repository.
name: gpgcakey
@@ -5963,10 +5459,7 @@ script:
- all
- packages
- none
- - description: Include external configuration file. Both, local path and URL is
- supported. Configuration file will be inserted at the position of the `include=`
- line. Included files may contain further include lines. Yum will abort with
- an error if an inclusion loop is detected.
+ - description: Include external configuration file. Both, local path and URL is supported. Configuration file will be inserted at the position of the `include=` line. Included files may contain further include lines. Yum will abort with an error if an inclusion loop is detected.
name: include
- description: |-
List of packages you want to only use from a repository. This should be a space separated list. Shell globs using wildcards (eg. `*` and `?`) are allowed. Substitution variables (e.g. `$releasever`) are honored here.
@@ -5988,15 +5481,12 @@ script:
- whatever
- default: false
defaultValue: "no"
- description: This tells yum whether or not HTTP/1.1 keepalive should be used
- with this repository. This can improve transfer speeds by using one connection
- when downloading multiple files from a repository.
+ description: This tells yum whether or not HTTP/1.1 keepalive should be used with this repository. This can improve transfer speeds by using one connection when downloading multiple files from a repository.
name: keepalive
- auto: PREDEFINED
default: false
defaultValue: "1"
- description: Either `1` or `0`. Determines whether or not yum keeps the cache
- of headers and packages after successful installation.
+ description: Either `1` or `0`. Determines whether or not yum keeps the cache of headers and packages after successful installation.
name: keepcache
predefined:
- "0"
@@ -6054,8 +5544,7 @@ script:
defaultValue: "no"
description: Protect packages from updates from other repositories.
name: protect
- - description: URL to the proxy server that yum should use. Set to `_none_` to
- disable the global proxy setting.
+ - description: URL to the proxy server that yum should use. Set to `_none_` to disable the global proxy setting.
name: proxy
- description: Password for this proxy.
name: proxy_password
@@ -6063,8 +5552,7 @@ script:
name: proxy_username
- default: false
defaultValue: "no"
- description: This tells yum whether or not it should perform a GPG signature
- check on the repodata from this repository.
+ description: This tells yum whether or not it should perform a GPG signature check on the repodata from this repository.
name: repo_gpgcheck
- default: false
defaultValue: /etc/yum.repos.d
@@ -6072,8 +5560,7 @@ script:
name: reposdir
- default: false
defaultValue: "10"
- description: Set the number of times any attempt to retrieve a file should retry
- before returning an error. Setting this to `0` makes yum try forever.
+ description: Set the number of times any attempt to retrieve a file should retry before returning an error. Setting this to `0` makes yum try forever.
name: retries
- default: false
defaultValue: "no"
@@ -6083,9 +5570,7 @@ script:
name: s3_enabled
- default: false
defaultValue: "no"
- description: If set to `yes` yum will continue running if this repository cannot
- be contacted for any reason. This should be set carefully as all repos are
- consulted for any given command.
+ description: If set to `yes` yum will continue running if this repository cannot be contacted for any reason. This should be set carefully as all repos are consulted for any given command.
name: skip_if_unavailable
- default: false
defaultValue: "no"
@@ -6093,14 +5578,11 @@ script:
Whether yum should check the permissions on the paths for the certificates on the repository (both remote and local).
If we can't read any of the files then yum will force `skip_if_unavailable` to be `yes`. This is most useful for non-root processes which use yum on repos that have client cert files which are readable only by root.
name: ssl_check_cert_permissions
- - description: Path to the directory containing the databases of the certificate
- authorities yum should use to verify SSL certificates.
+ - description: Path to the directory containing the databases of the certificate authorities yum should use to verify SSL certificates.
name: sslcacert
- - description: Path to the SSL client certificate yum should use to connect to
- repos/remote sites.
+ - description: Path to the SSL client certificate yum should use to connect to repos/remote sites.
name: sslclientcert
- - description: Path to the SSL client key yum should use to connect to repos/remote
- sites.
+ - description: Path to the SSL client key yum should use to connect to repos/remote sites.
name: sslclientkey
- default: false
defaultValue: "yes"
@@ -6124,12 +5606,9 @@ script:
name: timeout
- default: false
defaultValue: releasever basearch
- description: When a repository id is displayed, append these yum variables to
- the string if they are used in the `baseurl`/etc. Variables are appended in
- the order listed (and found).
+ description: When a repository id is displayed, append these yum variables to the string if they are used in the `baseurl`/etc. Variables are appended in the order listed (and found).
name: ui_repoid_vars
- - description: Username to use for basic authentication to a repo or really any
- url.
+ - description: Username to use for basic authentication to a repo or really any url.
name: username
- description: |-
The permissions the resulting file or directory should have.
@@ -6139,11 +5618,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -6182,9 +5659,7 @@ script:
description: Add or remove YUM repositories
name: linux-yum-repository
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -6225,30 +5700,23 @@ script:
name: extra_args_precommand
- default: false
defaultValue: "no"
- description: Whether to disable to GPG signature checking of the package signature
- being installed. Has an effect only if state is `present` or `latest`.
+ description: Whether to disable to GPG signature checking of the package signature being installed. Has an effect only if state is `present` or `latest`.
name: disable_gpg_check
- default: false
defaultValue: "yes"
- description: Corresponds to the `--no-recommends` option for `zypper`. Default
- behavior (`yes`) modifies zypper's default behavior; `no` does install recommended
- packages.
+ description: Corresponds to the `--no-recommends` option for `zypper`. Default behavior (`yes`) modifies zypper's default behavior; `no` does install recommended packages.
name: disable_recommends
- default: false
defaultValue: "no"
- description: Adds `--force` option to `zypper`. Allows to downgrade packages
- and change vendor or architecture.
+ description: Adds `--force` option to `zypper`. Allows to downgrade packages and change vendor or architecture.
name: force
- default: false
defaultValue: "no"
- description: Run the equivalent of `zypper refresh` before the operation. Disabled
- in check mode.
+ description: Run the equivalent of `zypper refresh` before the operation. Disabled in check mode.
name: update_cache
- default: false
defaultValue: "no"
- description: Adds `--oldpackage` option to `zypper`. Allows to downgrade packages
- with less side-effects than force. This is implied as soon as a version is
- specified as part of the package name.
+ description: Adds `--oldpackage` option to `zypper`. Allows to downgrade packages with less side-effects than force. This is implied as soon as a version is specified as part of the package name.
name: oldpackage
- description: |-
Add additional options to `zypper` command.
@@ -6257,9 +5725,7 @@ script:
description: Manage packages on SUSE and openSUSE
name: linux-zypper
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -6293,8 +5759,7 @@ script:
name: priority
- default: false
defaultValue: "no"
- description: Overwrite multiple repository entries, if repositories with both
- name and URL already exist.
+ description: Overwrite multiple repository entries, if repositories with both name and URL already exist.
name: overwrite_multiple
- default: false
defaultValue: "no"
@@ -6317,9 +5782,7 @@ script:
description: Add and remove Zypper repositories
name: linux-zypper-repository
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -6336,30 +5799,23 @@ script:
- present
- default: false
defaultValue: "False"
- description: Confinement policy. The classic confinement allows a snap to have
- the same level of access to the system as "classic" packages, like those managed
- by APT. This option corresponds to the --classic argument. This option can
- only be specified if there is a single snap in the task.
+ description: Confinement policy. The classic confinement allows a snap to have the same level of access to the system as "classic" packages, like those managed by APT. This option corresponds to the --classic argument. This option can only be specified if there is a single snap in the task.
name: classic
- default: false
defaultValue: stable
- description: Define which release of a snap is installed and tracked for updates.
- This option can only be specified if there is a single snap in the task.
+ description: Define which release of a snap is installed and tracked for updates. This option can only be specified if there is a single snap in the task.
name: channel
description: Manages snaps
name: linux-snap
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
default: false
defaultValue: present
- description: whether to register and subscribe (`present`), or unregister (`absent`)
- a system
+ description: whether to register and subscribe (`present`), or unregister (`absent`) a system
name: state
predefined:
- present
@@ -6368,11 +5824,9 @@ script:
name: username
- description: access.redhat.com or Sat6 password
name: password
- - description: Specify an alternative Red Hat Subscription Management or Sat6
- server
+ - description: Specify an alternative Red Hat Subscription Management or Sat6 server
name: server_hostname
- - description: Enable or disable https server certificate verification when connecting
- to `server_hostname`
+ - description: Enable or disable https server certificate verification when connecting to `server_hostname`
name: server_insecure
- description: Specify CDN baseurl
name: rhsm_baseurl
@@ -6396,8 +5850,7 @@ script:
name: activationkey
- description: Organization ID to use in conjunction with activationkey
name: org_id
- - description: Register with a specific environment in the destination org. Used
- with Red Hat Satellite 6.x or Katello
+ - description: Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello
name: environment
- default: false
defaultValue: ^$
@@ -6432,22 +5885,13 @@ script:
name: release
- default: false
defaultValue: '{}'
- description: Set syspurpose attributes in file `/etc/rhsm/syspurpose/syspurpose.json`
- and synchronize these attributes with RHSM server. Syspurpose attributes help
- attach the most appropriate subscriptions to the system automatically. When
- `syspurpose.json` file already contains some attributes, then new attributes
- overwrite existing attributes. When some attribute is not listed in the new
- list of attributes, the existing attribute will be removed from `syspurpose.json`
- file. Unknown attributes are ignored.
+ description: Set syspurpose attributes in file `/etc/rhsm/syspurpose/syspurpose.json` and synchronize these attributes with RHSM server. Syspurpose attributes help attach the most appropriate subscriptions to the system automatically. When `syspurpose.json` file already contains some attributes, then new attributes overwrite existing attributes. When some attribute is not listed in the new list of attributes, the existing attribute will be removed from `syspurpose.json` file. Unknown attributes are ignored.
isArray: true
name: syspurpose
- description: Manage registration and subscriptions to RHSM using the C(subscription-manager)
- command
+ description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command
name: linux-redhat-subscription
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -6459,8 +5903,7 @@ script:
required: true
- default: false
defaultValue: present
- description: Whether the channel should be present or not, taking action if
- the state is different from what is stated.
+ description: Whether the channel should be present or not, taking action if the state is different from what is stated.
name: state
- description: The full URL to the RHN/Satellite API.
name: url
@@ -6474,9 +5917,7 @@ script:
description: Adds or removes Red Hat software channels
name: linux-rhn-channel
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -6506,8 +5947,7 @@ script:
name: systemorgid
- default: false
defaultValue: '[]'
- description: Optionally specify a list of channels to subscribe to upon successful
- registration.
+ description: Optionally specify a list of channels to subscribe to upon successful registration.
isArray: true
name: channels
- default: false
@@ -6516,15 +5956,12 @@ script:
name: enable_eus
- default: false
defaultValue: "False"
- description: If `yes`, the registered node will not upload its installed packages
- information to Satellite server.
+ description: If `yes`, the registered node will not upload its installed packages information to Satellite server.
name: nopackages
description: Manage Red Hat Network registration using the C(rhnreg_ks) command
name: linux-rhn-register
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -6534,17 +5971,14 @@ script:
description: Set or Unset RHSM Release version
name: linux-rhsm-release
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
default: false
defaultValue: present
- description: If state is equal to present or disabled, indicates the desired
- repository state.
+ description: If state is equal to present or disabled, indicates the desired repository state.
name: state
predefined:
- present
@@ -6559,22 +5993,16 @@ script:
required: true
- default: false
defaultValue: "False"
- description: Disable all currently enabled repositories that are not not specified
- in `name`. Only set this to `True` if passing in a list of repositories to
- the `name` field. Using this with `loop` will most likely not have the desired
- result.
+ description: Disable all currently enabled repositories that are not not specified in `name`. Only set this to `True` if passing in a list of repositories to the `name` field. Using this with `loop` will most likely not have the desired result.
name: purge
description: Manage RHSM repositories using the subscription-manager command
name: linux-rhsm-repository
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- - description: Key that will be modified. Can be a url, a file on the managed
- node, or a keyid if the key already exists in the database.
+ - description: Key that will be modified. Can be a url, a file on the managed node, or a keyid if the key already exists in the database.
name: key
required: true
- auto: PREDEFINED
@@ -6598,9 +6026,7 @@ script:
description: Adds or removes a gpg key from the rpm db
name: linux-rpm-key
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -6628,14 +6054,10 @@ script:
name: force
- default: false
defaultValue: "False"
- description: Create a backup file including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
+ description: Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
name: backup
- default: false
- description: If a SHA-256 checksum is passed to this parameter, the digest of
- the destination file will be calculated after it is downloaded to ensure its
- integrity and verify that the transfer completed successfully. This option
- is deprecated. Use `checksum` instead.
+ description: If a SHA-256 checksum is passed to this parameter, the digest of the destination file will be calculated after it is downloaded to ensure its integrity and verify that the transfer completed successfully. This option is deprecated. Use `checksum` instead.
name: sha256sum
- default: false
description: |-
@@ -6646,8 +6068,7 @@ script:
name: checksum
- default: false
defaultValue: "True"
- description: if `no`, it will not use a proxy, even if one is defined in an
- environment variable on the target hosts.
+ description: if `no`, it will not use a proxy, even if one is defined in an environment variable on the target hosts.
name: use_proxy
- default: false
defaultValue: "True"
@@ -6701,11 +6122,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
diff --git a/Packs/Ansible_Powered_Integrations/Integrations/OpenSSL/OpenSSL.yml b/Packs/Ansible_Powered_Integrations/Integrations/OpenSSL/OpenSSL.yml
index fd0991b73456..761746861163 100644
--- a/Packs/Ansible_Powered_Integrations/Integrations/OpenSSL/OpenSSL.yml
+++ b/Packs/Ansible_Powered_Integrations/Integrations/OpenSSL/OpenSSL.yml
@@ -3,21 +3,18 @@ commonfields:
id: OpenSSL
version: -1
configuration:
-- additionalinfo: The credentials to associate with the instance. SSH keys can be
- configured using the credential manager.
+- additionalinfo: The credentials to associate with the instance. SSH keys can be configured using the credential manager, under the Certificate field.
display: Username
name: creds
required: true
type: 9
-- additionalinfo: The default port to use if one is not specified in the commands
- `host` argument.
+- additionalinfo: The default port to use if one is not specified in the commands `host` argument.
defaultvalue: "22"
display: Default SSH Port
name: port
required: true
type: 0
-- additionalinfo: If multiple hosts are specified in a command, how many hosts should
- be interacted with concurrently.
+- additionalinfo: If multiple hosts are specified in a command, how many hosts should be interacted with concurrently.
defaultvalue: "4"
display: Concurrecy Factor
name: concurrency
@@ -29,23 +26,19 @@ name: OpenSSL
script:
commands:
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether the certificate should exist or not, taking action if the
- state is different from what is stated.
+ description: Whether the certificate should exist or not, taking action if the state is different from what is stated.
name: state
predefined:
- absent
- present
- - description: Remote absolute path where the generated certificate file should
- be created or is already located.
+ - description: Remote absolute path where the generated certificate file should be created or is already located.
name: path
required: true
- auto: PREDEFINED
@@ -431,11 +424,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -474,9 +465,7 @@ script:
description: Generate and/or check OpenSSL certificates
name: openssl-certificate
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -514,25 +503,21 @@ script:
description: The Base64 encoded value (in DER format) of the extension
type: string
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether the certificate signing request should exist or not, taking
- action if the state is different from what is stated.
+ description: Whether the certificate signing request should exist or not, taking action if the state is different from what is stated.
name: state
predefined:
- absent
- present
- default: false
defaultValue: sha256
- description: The digest used when signing the certificate signing request with
- the private key.
+ description: The digest used when signing the certificate signing request with the private key.
name: digest
- description: |-
The path to the private key to use when signing the certificate signing request.
@@ -550,11 +535,9 @@ script:
name: version
- default: false
defaultValue: "False"
- description: Should the certificate signing request be forced regenerated by
- this ansible module.
+ description: Should the certificate signing request be forced regenerated by this ansible module.
name: force
- - description: The name of the file into which the generated OpenSSL certificate
- signing request will be written.
+ - description: The name of the file into which the generated OpenSSL certificate signing request will be written.
name: path
required: true
- description: |-
@@ -564,15 +547,13 @@ script:
name: subject
- description: The countryName field of the certificate signing request subject.
name: country_name
- - description: The stateOrProvinceName field of the certificate signing request
- subject.
+ - description: The stateOrProvinceName field of the certificate signing request subject.
name: state_or_province_name
- description: The localityName field of the certificate signing request subject.
name: locality_name
- description: The organizationName field of the certificate signing request subject.
name: organization_name
- - description: The organizationalUnitName field of the certificate signing request
- subject.
+ - description: The organizationalUnitName field of the certificate signing request subject.
name: organizational_unit_name
- description: The commonName field of the certificate signing request subject.
name: common_name
@@ -590,17 +571,14 @@ script:
name: subject_alt_name_critical
- default: false
defaultValue: "True"
- description: If set to `yes`, the module will fill the common name in for `subject_alt_name`
- with `DNS:` prefix if no SAN is specified.
+ description: If set to `yes`, the module will fill the common name in for `subject_alt_name` with `DNS:` prefix if no SAN is specified.
name: use_common_name_for_san
- - description: This defines the purpose (e.g. encipherment, signature, certificate
- signing) of the key contained in the certificate.
+ - description: This defines the purpose (e.g. encipherment, signature, certificate signing) of the key contained in the certificate.
isArray: true
name: key_usage
- description: Should the keyUsage extension be considered as critical.
name: key_usage_critical
- - description: Additional restrictions (e.g. client authentication, server authentication)
- on the allowed purposes for which the public key may be used.
+ - description: Additional restrictions (e.g. client authentication, server authentication) on the allowed purposes for which the public key may be used.
isArray: true
name: extended_key_usage
- description: Should the extkeyUsage extension be considered as critical.
@@ -610,8 +588,7 @@ script:
name: basic_constraints
- description: Should the basicConstraints extension be considered as critical.
name: basic_constraints_critical
- - description: Indicates that the certificate should contain the OCSP Must Staple
- extension (`https://tools.ietf.org/html/rfc7633`).
+ - description: Indicates that the certificate should contain the OCSP Must Staple extension (`https://tools.ietf.org/html/rfc7633`).
name: ocsp_must_staple
- description: |-
Should the OCSP Must Staple extension be considered as critical
@@ -633,8 +610,7 @@ script:
- pyopenssl
- default: false
defaultValue: "False"
- description: Create a backup file including a timestamp so you can get the original
- CSR back if you overwrote it with a new one by accident.
+ description: Create a backup file including a timestamp so you can get the original CSR back if you overwrote it with a new one by accident.
name: backup
- default: false
defaultValue: "False"
@@ -682,11 +658,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -725,9 +699,7 @@ script:
description: Generate OpenSSL Certificate Signing Request (CSR)
name: openssl-csr
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -758,17 +730,14 @@ script:
description: The Base64 encoded value (in DER format) of the extension
type: string
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether the parameters should exist or not, taking action if the
- state is different from what is stated.
+ description: Whether the parameters should exist or not, taking action if the state is different from what is stated.
name: state
predefined:
- absent
@@ -786,8 +755,7 @@ script:
required: true
- default: false
defaultValue: "False"
- description: Create a backup file including a timestamp so you can get the original
- DH params back if you overwrote them with new ones by accident.
+ description: Create a backup file including a timestamp so you can get the original DH params back if you overwrote them with new ones by accident.
name: backup
- description: |-
The permissions the resulting file or directory should have.
@@ -797,11 +765,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -840,9 +806,7 @@ script:
description: Generate OpenSSL Diffie-Hellman Parameters
name: openssl-dhparam
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -854,8 +818,7 @@ script:
predefined:
- export
- parse
- - description: List of other certificates to include. Pre 2.8 this parameter was
- called `ca_certificates`
+ - description: List of other certificates to include. Pre 2.8 this parameter was called `ca_certificates`
isArray: true
name: other_certificates
- description: |-
@@ -888,8 +851,7 @@ script:
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether the file should exist or not. All parameters except `path`
- are ignored when state is `absent`.
+ description: Whether the file should exist or not. All parameters except `path` are ignored when state is `absent`.
name: state
predefined:
- absent
@@ -898,8 +860,7 @@ script:
name: src
- default: false
defaultValue: "False"
- description: Create a backup file including a timestamp so you can get the original
- output file back if you overwrote it with a new one by accident.
+ description: Create a backup file including a timestamp so you can get the original output file back if you overwrote it with a new one by accident.
name: backup
- description: |-
The permissions the resulting file or directory should have.
@@ -909,11 +870,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -952,17 +911,14 @@ script:
description: Generate OpenSSL PKCS#12 archive
name: openssl-pkcs12
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether the private key should exist or not, taking action if the
- state is different from what is stated.
+ description: Whether the private key should exist or not, taking action if the state is different from what is stated.
name: state
predefined:
- absent
@@ -1016,8 +972,7 @@ script:
defaultValue: "False"
description: Should the key be regenerated even if it already exists.
name: force
- - description: Name of the file in which the generated TLS/SSL private key will
- be written. It will have 0600 mode.
+ - description: Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
name: path
required: true
- description: The passphrase for the private key.
@@ -1042,8 +997,7 @@ script:
- pyopenssl
- default: false
defaultValue: "False"
- description: Create a backup file including a timestamp so you can get the original
- private key back if you overwrote it with a new one by accident.
+ description: Create a backup file including a timestamp so you can get the original private key back if you overwrote it with a new one by accident.
name: backup
- description: |-
The permissions the resulting file or directory should have.
@@ -1053,11 +1007,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -1096,9 +1048,7 @@ script:
description: Generate OpenSSL private keys
name: openssl-privatekey
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1131,17 +1081,14 @@ script:
description: Provide information for OpenSSL private keys
name: openssl-privatekey-info
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
- auto: PREDEFINED
default: false
defaultValue: present
- description: Whether the public key should exist or not, taking action if the
- state is different from what is stated.
+ description: Whether the public key should exist or not, taking action if the state is different from what is stated.
name: state
predefined:
- absent
@@ -1158,8 +1105,7 @@ script:
predefined:
- OpenSSH
- PEM
- - description: Name of the file in which the generated TLS/SSL public key will
- be written.
+ - description: Name of the file in which the generated TLS/SSL public key will be written.
name: path
required: true
- description: |-
@@ -1170,8 +1116,7 @@ script:
name: privatekey_passphrase
- default: false
defaultValue: "False"
- description: Create a backup file including a timestamp so you can get the original
- public key back if you overwrote it with a different one by accident.
+ description: Create a backup file including a timestamp so you can get the original public key back if you overwrote it with a different one by accident.
name: backup
- auto: PREDEFINED
default: false
@@ -1194,11 +1139,9 @@ script:
As of Ansible 2.6, the mode may also be the special string `preserve`.
When set to `preserve` the file will be given the same permissions as the source file.
name: mode
- - description: Name of the user that should own the file/directory, as would be
- fed to `chown`.
+ - description: Name of the user that should own the file/directory, as would be fed to `chown`.
name: owner
- - description: Name of the group that should own the file/directory, as would
- be fed to `chown`.
+ - description: Name of the group that should own the file/directory, as would be fed to `chown`.
name: group
- description: |-
The user part of the SELinux file context.
@@ -1237,9 +1180,7 @@ script:
description: Generate an OpenSSL public key from its private key.
name: openssl-publickey
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
@@ -1268,9 +1209,7 @@ script:
description: Complete certificate chain given a set of untrusted and root certificates
name: openssl-certificate-complete-chain
- arguments:
- - description: hostname or IP of target. Optionally the port can be specified
- using :PORT. If multiple targets are specified using an array, the integration
- will use the configured concurrency factor for high performance.
+ - description: hostname or IP of target. Optionally the port can be specified using :PORT. If multiple targets are specified using an array, the integration will use the configured concurrency factor for high performance.
isArray: true
name: host
required: true
diff --git a/Packs/Ansible_Powered_Integrations/ReleaseNotes/2_0_3.md b/Packs/Ansible_Powered_Integrations/ReleaseNotes/2_0_3.md
new file mode 100644
index 000000000000..4429c40aec8c
--- /dev/null
+++ b/Packs/Ansible_Powered_Integrations/ReleaseNotes/2_0_3.md
@@ -0,0 +1,12 @@
+
+#### Integrations
+##### Ansible ACME (Deprecated)
+- Documentation and metadata improvements.
+##### Ansible Cisco NXOS (Deprecated)
+- Documentation and metadata improvements.
+##### Ansible Linux (Deprecated)
+- Documentation and metadata improvements.
+##### Ansible Cisco IOS (Deprecated)
+- Documentation and metadata improvements.
+##### Ansible OpenSSL (Deprecated)
+- Documentation and metadata improvements.
diff --git a/Packs/Ansible_Powered_Integrations/pack_metadata.json b/Packs/Ansible_Powered_Integrations/pack_metadata.json
index cc66f7956d96..8c179c5bd65e 100644
--- a/Packs/Ansible_Powered_Integrations/pack_metadata.json
+++ b/Packs/Ansible_Powered_Integrations/pack_metadata.json
@@ -3,7 +3,7 @@
"description": "Deprecated. Use Ansible Microsoft Windows instead.",
"hidden": true,
"support": "community",
- "currentVersion": "2.0.2",
+ "currentVersion": "2.0.3",
"author": "Serge Bakharev",
"url": "https://github.com/SergeBakharev/Ansible-for-XSOAR",
"email": "serge.bakharev@gmail.com",
@@ -18,4 +18,4 @@
"xsoar",
"marketplacev2"
]
-}
+}
\ No newline at end of file
diff --git a/Packs/ApacheTomcat/ParsingRules/ApacheTomcatParsingRules/ApacheTomcatParsingRules.xif b/Packs/ApacheTomcat/ParsingRules/ApacheTomcatParsingRules/ApacheTomcatParsingRules.xif
index 8195e85e5394..6c1dc64b2715 100644
--- a/Packs/ApacheTomcat/ParsingRules/ApacheTomcatParsingRules/ApacheTomcatParsingRules.xif
+++ b/Packs/ApacheTomcat/ParsingRules/ApacheTomcatParsingRules/ApacheTomcatParsingRules.xif
@@ -1,18 +1,12 @@
-[INGEST:vendor="apache", product="tomcat", target_dataset="apache_tomcat_raw", no_hit=drop]
+[INGEST:vendor="apache", product="tomcat", target_dataset="apache_tomcat_raw", no_hit=keep]
alter
tmp_get_date = arrayindex(regextract(_raw_log, "\[(\d+\/\w{3}\/\d{4})\:"), 0),
tmp_get_time = arrayindex(regextract(_raw_log, "\:(\d{2}\:\d{2}\:\d{2})\s"), 0),
- tmp_zhrs_part = to_integer(arraystring(regextract(_raw_log, "\:\d{2}\:\d{2}\:\d{2}\s[\+|\-](\d{2})"), "")),
- tmp_zmins_part = to_integer(arraystring(regextract(_raw_log, "\:\d{2}\:\d{2}\:\d{2}\s[\+|\-]\d{2}(\d{2})\]"), ""))
+ tmp_get_zone = arrayindex(regextract(_raw_log, "\[\d+\/\w{3}\/\d{4}\:\d{2}\:\d{2}\:\d{2}\s([\+|\-]\d{4})]"), 0)
| alter
- tmp_timestamp = to_epoch(parse_timestamp("%d/%b/%Y %H:%M:%S", arraystring(arraycreate(tmp_get_date, tmp_get_time), " ")), "seconds"),
- tmp_zhrssec_part = if(tmp_zhrs_part <= 24, multiply(tmp_zhrs_part, 3600), 0),
- tmp_zminsec_part = if(tmp_zmins_part <= 60, multiply(tmp_zmins_part, 60), 0)
+ tmp_date_time = arraystring(arraycreate(tmp_get_date, tmp_get_time), " ")
| alter
- tmp_change_seconds = add(tmp_zhrssec_part, tmp_zminsec_part),
- tmp_zone_validate_po_ne = arraystring(regextract(_raw_log, "\:\d{2}\:\d{2}\:\d{2}\s(\+|\-)\d{4}\]"), "")
+ tmp_full_time = arraystring(arraycreate(tmp_date_time, tmp_get_zone), " ")
| alter
- tmp_timeseconds = if(tmp_zone_validate_po_ne = "+", add(tmp_timestamp, tmp_change_seconds), subtract(tmp_timestamp, tmp_change_seconds))
-| alter
- _time = if( tmp_timeseconds != null, to_timestamp(to_integer(tmp_timeseconds), "seconds"), _insert_time)
-| fields -tmp_get_date, tmp_get_time, tmp_zhrs_part, tmp_zmins_part, tmp_timestamp, tmp_zhrssec_part, tmp_zminsec_part, tmp_change_seconds, tmp_zone_validate_po_ne, tmp_timeseconds;
+ _time = parse_timestamp("%d/%b/%Y %H:%M:%S %z", tmp_full_time)
+| fields -tmp_get_date, tmp_get_time, tmp_get_zone, tmp_date_time, tmp_full_time;
\ No newline at end of file
diff --git a/Packs/ApacheTomcat/README.md b/Packs/ApacheTomcat/README.md
index b728a583b190..4792764cc054 100644
--- a/Packs/ApacheTomcat/README.md
+++ b/Packs/ApacheTomcat/README.md
@@ -1,5 +1,5 @@
### Broker VM
-You will need to use the information described [here](https://docs.paloaltonetworks.com/cortex/cortex-xdr/cortex-xdr-pro-admin/broker-vm/set-up-broker-vm/configure-your-broker-vm).\
+You will need to use the information described [here](https://docs-cortex.paloaltonetworks.com/r/Cortex-XDR/Cortex-XDR-Pro-Administrator-Guide/Configure-the-Broker-VM).\
You can configure the specific vendor and product for this instance.
1. Navigate to **Settings** -> **Configuration** -> **Data Broker** -> **Broker VMs**.
2. Right-click, and select **Syslog Collector** -> **Configure**.
diff --git a/Packs/ApacheTomcat/ReleaseNotes/1_0_5.md b/Packs/ApacheTomcat/ReleaseNotes/1_0_5.md
new file mode 100644
index 000000000000..86aef82a491c
--- /dev/null
+++ b/Packs/ApacheTomcat/ReleaseNotes/1_0_5.md
@@ -0,0 +1,4 @@
+
+#### Parsing Rules
+##### Apache Tomcat Parsing Rule
+- Maintenance and stability.
diff --git a/Packs/ApacheTomcat/pack_metadata.json b/Packs/ApacheTomcat/pack_metadata.json
index 976c1d9405c1..a065bfb34e8d 100644
--- a/Packs/ApacheTomcat/pack_metadata.json
+++ b/Packs/ApacheTomcat/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Apache Tomcat",
"description": "Modeling Rules for the Apache Tomcat logs collector",
"support": "xsoar",
- "currentVersion": "1.0.4",
+ "currentVersion": "1.0.5",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/ApacheWebServer/ModelingRules/ApacheWebServerModelingRules_1_3/ApacheWebServerModelingRules_1_3.xif b/Packs/ApacheWebServer/ModelingRules/ApacheWebServerModelingRules_1_3/ApacheWebServerModelingRules_1_3.xif
index d4cbdaaa905c..2148e07d8be7 100644
--- a/Packs/ApacheWebServer/ModelingRules/ApacheWebServerModelingRules_1_3/ApacheWebServerModelingRules_1_3.xif
+++ b/Packs/ApacheWebServer/ModelingRules/ApacheWebServerModelingRules_1_3/ApacheWebServerModelingRules_1_3.xif
@@ -1,8 +1,6 @@
[MODEL: dataset="apache_httpd_raw"]
-filter
- _raw_log contains "emerg" or _raw_log contains "alert" or _raw_log contains "error" or _raw_log contains "warn" or _raw_log contains "notice" or _raw_log contains "info" or _raw_log contains "debug" or _raw_log contains "trace1"
-| alter log_level = arrayindex(regextract(_raw_log,"\[\w+\:(\w+)\]"),0),
- Tid = arrayindex(regextract(_raw_log,"tid\s(\d+)"),0),
+filter _raw_log contains "[info]" or _raw_log contains "[alert]" or _raw_log contains "[crit]" or _raw_log contains "[error]" or _raw_log contains "[warn]" or _raw_log contains "[notice]" or _raw_log contains "[debug]"
+| alter Tid = arrayindex(regextract(_raw_log,"tid\s(\d+)"),0),
sourceipv4 = arrayindex(regextract(_raw_log,"client\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"),0),
sourceipv6 = arrayindex(regextract(_raw_log,"client\s(\w+\:\w+\:\w+\:\w+\:\w+\:\w+\:\w+\:\w+)"),0)
| alter sourceip = coalesce(sourceipv4,sourceipv6)
@@ -12,33 +10,34 @@ filter
message = arrayindex(regextract(_raw_log,"client\s\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\S+[^\:]+\:([^\@]+)"),0)
| alter
xdm.source.process.pid = to_number(pid),
- xdm.alert.severity = log_level,
+ xdm.alert.severity = arrayindex(regextract(_raw_log, "]:\s*\[([^\]]+)\]"),0),
xdm.source.process.thread_id = to_number(tid),
xdm.source.ipv4 = sourceip,
xdm.source.user.identifier = error_status_code,
xdm.target.resource.id = Source_code_file_line,
- xdm.alert.description = message;
-filter
- _raw_log contains "GET" or _raw_log contains "HEAD" or _raw_log contains "POST" or _raw_log contains "DELETE" or _raw_log contains "CONNECT" or _raw_log contains "OPTIONS" or _raw_log contains "TRACE" or _raw_log contains "PATCH"
-| alter
- Username = arrayindex(regextract(_raw_log,"(\S+)\s\[\d+\/"),0),
- requested_line = arrayindex(regextract(_raw_log,"\"[A-Z]+\s(\S+)\sHTTP"),0),
- statuscode = arrayindex(regextract(_raw_log,"\"\s(\d+)\s\d+"),0),
- User_agent = arrayindex(regextract(_raw_log,"\"\s\"([^\"]+)"),0),
- Referrer = arrayindex(regextract(_raw_log,"(http[^\"]+)"),0),
- bytes_size = arrayindex(regextract(_raw_log,"\d\s(\d+)"),0),
- httpMethod=arrayindex(regextract(_raw_log, "\"([A-Z]+)\s\S+\sHTTP"),0),
+ xdm.alert.description = message,
+ xdm.event.type = "Error Logs";
+filter _raw_log contains "\"ACL" or _raw_log contains "\"BASELINE_CONTROL" or _raw_log contains "\"BIND" or _raw_log contains "\"CHECKIN" or _raw_log contains "\"CHECKOUT" or _raw_log contains "\"CONNECT" or _raw_log contains "\"COPY" or _raw_log contains "\"DELETE" or _raw_log contains "\"GET" or _raw_log contains "\"HEAD" or _raw_log contains "\"LABEL" or _raw_log contains "\"LINK" or _raw_log contains "\"LOCK" or _raw_log contains "\"MERGE" or _raw_log contains "\"MKACTIVITY" or _raw_log contains "\"MKCALENDAR" or _raw_log contains "\"MKCOL" or _raw_log contains "\"MKREDIRECTREF" or _raw_log contains "\"MKWORKSPACE" or _raw_log contains "\"MOVE" or _raw_log contains "\"OPTIONS" or _raw_log contains "\"ORDERPATCH" or _raw_log contains "\"PATCH" or _raw_log contains "\"POST" or _raw_log contains "\"PRI" or _raw_log contains "\"PROPFIND" or _raw_log contains "\"PROPPATCH" or _raw_log contains "\"PUT" or _raw_log contains "\"REBIND" or _raw_log contains "\"REPORT" or _raw_log contains "\"SEARCH" or _raw_log contains "\"TRACE" or _raw_log contains "\"UNBIND" or _raw_log contains "\"UNCHECKOUT" or _raw_log contains "\"UNLINK" or _raw_log contains "\"UNLOCK" or _raw_log contains "\"UPDATE" or _raw_log contains "\"UPDATEREDIRECTREF" or _raw_log contains "\"VERSION_CONTROL"
+// Extract fields
+| alter Username = arrayindex(regextract(_raw_log,"(\S+)\s\[\d+\/"),0),
+ http_method = arrayindex(regextract(_raw_log,"]\s*\"([A-Z]+)\s*"),0),
+ http_url = arrayindex(regextract(_raw_log,"]\s*\"[A-Z]+\s*([^\s^\"]+)\s*"),0),
+ http_response_code = arrayindex(regextract(_raw_log,"]\s*\"[^\"]+\"\s*(\d+)"),0),
+ bytes_size = arrayindex(regextract(_raw_log,"]\s*\"[^\"]+\"\s*[\d|-]+\s(\d+)"),0),
+ Referrer = arrayindex(regextract(_raw_log,"]\s*\"[^\"]+\"\s*[\d|-]+\s[\d|-]+\s\"(http[^\"]+)\""),0),
+ User_agent = arrayindex(regextract(_raw_log,"]\s*\"[^\"]+\"\s*[\d|-]+\s[\d|-]+\s\"[^\"]*\"\s\"([^\"]+)\""),0),
// extract source_ip
- sourceipv4 = arrayindex(regextract(_raw_log, "(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s"),0),
- sourceipv6 = arrayindex(regextract(_raw_log,"(\w+\:\w+\:\w+\:\w+\:\w+\:\w+\:\w+\:\w+)"),0)
-| alter sourceip = coalesce(sourceipv4,sourceipv6)
-// end extract source_ip
-| alter
- xdm.source.user.username = username,
- xdm.network.http.url = requested_line,
- xdm.network.http.referrer = Referrer,
- xdm.target.sent_bytes = to_number(bytes_size),
- xdm.source.ipv4 = sourceip,
- xdm.network.http.method=httpMethod,
- xdm.network.http.response_code = statuscode,
- xdm.source.user_agent=User_agent;
\ No newline at end of file
+ sourceipv4 = arrayindex(regextract(_raw_log, "]:\s*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s"),0),
+ sourceipv6 = arrayindex(regextract(_raw_log, "]:\s*(\w+\:\w+\:\w+\:\w+\:\w+\:\w+\:\w+\:\w+)"),0)
+// // end extract source_ip
+| alter xdm.source.user.username = if(Username = "-", null, Username),
+ xdm.network.http.url = http_url,
+ xdm.network.http.referrer = Referrer,
+ xdm.target.sent_bytes = to_number(bytes_size),
+ xdm.source.ipv4 = sourceipv4,
+ xdm.source.ipv6 = sourceipv6,
+ xdm.network.http.method = http_method,
+ xdm.network.http.response_code = http_response_code,
+ xdm.source.user_agent = if(User_agent = "-", null, User_agent),
+ xdm.observer.name = arrayindex(regextract(_raw_log, "\s\d+:\d+:\d+\s([\S]+)\s"),0),
+ xdm.event.type = "Access Logs";
\ No newline at end of file
diff --git a/Packs/ApacheWebServer/README.md b/Packs/ApacheWebServer/README.md
index 091dab762034..a564787ec278 100644
--- a/Packs/ApacheWebServer/README.md
+++ b/Packs/ApacheWebServer/README.md
@@ -1,5 +1,5 @@
### Broker VM
-You will need to use the information described [here](https://docs.paloaltonetworks.com/cortex/cortex-xdr/cortex-xdr-pro-admin/broker-vm/set-up-broker-vm/configure-your-broker-vm).\
+You will need to use the information described [here](https://docs-cortex.paloaltonetworks.com/r/Cortex-XDR/Cortex-XDR-Pro-Administrator-Guide/Configure-the-Broker-VM).\
You can configure the specific vendor and product for this instance.
1. Navigate to **Settings** -> **Configuration** -> **Data Broker** -> **Broker VMs**.
2. Right-click, and select **Syslog Collector** -> **Configure**.
diff --git a/Packs/ApacheWebServer/ReleaseNotes/1_0_6.md b/Packs/ApacheWebServer/ReleaseNotes/1_0_6.md
new file mode 100644
index 000000000000..f7e6ad8fc6bc
--- /dev/null
+++ b/Packs/ApacheWebServer/ReleaseNotes/1_0_6.md
@@ -0,0 +1,4 @@
+#### Modeling Rules
+##### Apache Web Server
+- Modified the filter of the "Error Logs".
+- Modified the "Access Logs" modeling rule.
diff --git a/Packs/ApacheWebServer/pack_metadata.json b/Packs/ApacheWebServer/pack_metadata.json
index cf2b5ecc475a..d68c495d622c 100644
--- a/Packs/ApacheWebServer/pack_metadata.json
+++ b/Packs/ApacheWebServer/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Apache Web Server",
"description": "Modeling Rules for the Apache Web Server logs collector",
"support": "xsoar",
- "currentVersion": "1.0.5",
+ "currentVersion": "1.0.6",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/ApiModules/.pack-ignore b/Packs/ApiModules/.pack-ignore
index 3354bddf7981..8fab122e3a83 100644
--- a/Packs/ApiModules/.pack-ignore
+++ b/Packs/ApiModules/.pack-ignore
@@ -4,3 +4,5 @@ ignore=RM106
[known_words]
TLP
ansii
+pylint
+linter
\ No newline at end of file
diff --git a/Packs/ApiModules/.secrets-ignore b/Packs/ApiModules/.secrets-ignore
index 6ec373c7db8f..58f36742734e 100644
--- a/Packs/ApiModules/.secrets-ignore
+++ b/Packs/ApiModules/.secrets-ignore
@@ -1756,4 +1756,6 @@ https://ais2.cisa.dhs.gov
10.2.25.44
10.2.25.255
255.255.254.0
-10.2.24.0
\ No newline at end of file
+10.2.24.0
+1111:1111::/28
+https://www.test.com/ipranges/goog.json
\ No newline at end of file
diff --git a/Packs/ApiModules/ReleaseNotes/2_2_12.md b/Packs/ApiModules/ReleaseNotes/2_2_12.md
new file mode 100644
index 000000000000..6dc40892253b
--- /dev/null
+++ b/Packs/ApiModules/ReleaseNotes/2_2_12.md
@@ -0,0 +1,5 @@
+
+#### Scripts
+##### TAXII2ApiModule
+- Added support for limiting the number of fetched indicators.
+- Improved implementation for polling collections.
\ No newline at end of file
diff --git a/Packs/ApiModules/ReleaseNotes/2_2_13.md b/Packs/ApiModules/ReleaseNotes/2_2_13.md
new file mode 100644
index 000000000000..36f227f97165
--- /dev/null
+++ b/Packs/ApiModules/ReleaseNotes/2_2_13.md
@@ -0,0 +1,4 @@
+
+#### Scripts
+##### TAXII2ApiModule
+- Fixed an issue where getting indicators failed to parse empty response.
\ No newline at end of file
diff --git a/Packs/ApiModules/ReleaseNotes/2_2_14.md b/Packs/ApiModules/ReleaseNotes/2_2_14.md
new file mode 100644
index 000000000000..45554b0ab9f5
--- /dev/null
+++ b/Packs/ApiModules/ReleaseNotes/2_2_14.md
@@ -0,0 +1,3 @@
+#### Scripts
+##### JSONFeedApiModule
+- Fixed an issue where in some cases the script would crash due to memory issues.
\ No newline at end of file
diff --git a/Packs/ApiModules/ReleaseNotes/2_2_15.md b/Packs/ApiModules/ReleaseNotes/2_2_15.md
new file mode 100644
index 000000000000..c51a59ce9506
--- /dev/null
+++ b/Packs/ApiModules/ReleaseNotes/2_2_15.md
@@ -0,0 +1,3 @@
+#### Scripts
+##### JSONFeedApiModule
+- Fixed an issue with pylint error coming from xsoar-linter.
\ No newline at end of file
diff --git a/Packs/ApiModules/Scripts/AWSApiModule/AWSApiModule.py b/Packs/ApiModules/Scripts/AWSApiModule/AWSApiModule.py
index 1acd7d1e77af..3a9bfb3b0cf3 100644
--- a/Packs/ApiModules/Scripts/AWSApiModule/AWSApiModule.py
+++ b/Packs/ApiModules/Scripts/AWSApiModule/AWSApiModule.py
@@ -18,14 +18,14 @@ def validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws
raise DemistoException('Role session name is required when using role ARN.')
-def extract_session_from_access(access_key, session_token):
+def extract_session_from_secret(secret_key, session_token):
"""
- Extract the session token from the access_key field.
+ Extract the session token from the secret_key field.
"""
- if access_key and '@@@' in access_key and not session_token:
- return access_key.split('@@@')[0], access_key.split('@@@')[1]
+ if secret_key and '@@@' in secret_key and not session_token:
+ return secret_key.split('@@@')[0], secret_key.split('@@@')[1]
else:
- return access_key, session_token
+ return secret_key, session_token
class AWSClient:
@@ -40,7 +40,7 @@ def __init__(self, aws_default_region, aws_role_arn, aws_role_session_name, aws_
self.aws_role_session_duration = aws_role_session_duration
self.aws_role_policy = aws_role_policy
self.aws_access_key_id = aws_access_key_id
- self.aws_secret_access_key, self.aws_session_token = extract_session_from_access(aws_secret_access_key, aws_session_token)
+ self.aws_secret_access_key, self.aws_session_token = extract_session_from_secret(aws_secret_access_key, aws_session_token)
self.verify_certificate = verify_certificate
proxies = handle_proxy(proxy_param_name='proxy', checkbox_default_value=False)
@@ -101,7 +101,7 @@ def aws_session(self, service, region=None, role_arn=None, role_session_name=Non
if not self.aws_access_key_id:
sts_client = boto3.client('sts', config=self.config, verify=self.verify_certificate,
- region_name=self.aws_default_region)
+ region_name=region if region else self.aws_default_region)
sts_response = sts_client.assume_role(**kwargs)
client = boto3.client(
service_name=service,
@@ -127,7 +127,7 @@ def aws_session(self, service, region=None, role_arn=None, role_session_name=Non
sts_response = sts_client.assume_role(**kwargs)
client = boto3.client(
service_name=service,
- region_name=self.aws_default_region,
+ region_name=region if region else self.aws_default_region,
aws_access_key_id=sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],
aws_session_token=sts_response['Credentials']['SessionToken'],
diff --git a/Packs/ApiModules/Scripts/AWSApiModule/AWSApiModule_test.py b/Packs/ApiModules/Scripts/AWSApiModule/AWSApiModule_test.py
index b89f0f5c5dd4..be76ae92ec55 100644
--- a/Packs/ApiModules/Scripts/AWSApiModule/AWSApiModule_test.py
+++ b/Packs/ApiModules/Scripts/AWSApiModule/AWSApiModule_test.py
@@ -178,9 +178,9 @@ def test_AWSClient_without_session_token():
print('failed to create session:' + Exception)
-@pytest.mark.parametrize('access_key, session_token, expected',
+@pytest.mark.parametrize('secret_key, session_token, expected',
[
- ('access_key@@@session_token', None, ('access_key', 'session_token')),
+ ('secret_key@@@session_token', None, ('secret_key', 'session_token')),
('test1', None, ('test1', None)),
('test1', 'test2', ('test1', 'test2')),
('test1@@@test2', 'test3', ('test1@@@test2', 'test3')),
@@ -188,17 +188,17 @@ def test_AWSClient_without_session_token():
(None, '', (None, '')),
(None, None, (None, None))
])
-def test_extract_session_from_access(access_key, session_token, expected):
+def test_extract_session_from_secret(secret_key, session_token, expected):
"""
Given
- - Access key and session token
+ - Secret key and session token
When
- - Calling the extract_session_from_access function
+ - Calling the extract_session_from_secret function
Then
- - Check that the function returns the expected access key and session token
+ - Check that the function returns the expected secret key and session token
"""
- result = extract_session_from_access(access_key, session_token)
+ result = extract_session_from_secret(secret_key, session_token)
assert result == expected
diff --git a/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule.py b/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule.py
index 0a30742b0c84..6df0ed3e2367 100644
--- a/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule.py
+++ b/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule.py
@@ -10,7 +10,7 @@
urllib3.disable_warnings()
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
-XSOAR_RESOLVED_STATUS = {
+XSOAR_RESOLVED_STATUS_TO_XDR = {
'Other': 'resolved_other',
'Duplicate': 'resolved_duplicate',
'False Positive': 'resolved_false_positive',
@@ -147,42 +147,6 @@ def __init__(self, base_url: str, headers: dict, timeout: int = 120, proxy: bool
super().__init__(base_url=base_url, headers=headers, proxy=proxy, verify=verify)
self.timeout = timeout
- def update_incident(self, incident_id, status=None, assigned_user_mail=None, assigned_user_pretty_name=None, severity=None,
- resolve_comment=None, unassign_user=None):
- update_data = {}
-
- if unassign_user and (assigned_user_mail or assigned_user_pretty_name):
- raise ValueError("Can't provide both assignee_email/assignee_name and unassign_user")
- if unassign_user:
- update_data['assigned_user_mail'] = 'none'
-
- if assigned_user_mail:
- update_data['assigned_user_mail'] = assigned_user_mail
-
- if assigned_user_pretty_name:
- update_data['assigned_user_pretty_name'] = assigned_user_pretty_name
-
- if status:
- update_data['status'] = status
-
- if severity:
- update_data['manual_severity'] = severity
-
- if resolve_comment:
- update_data['resolve_comment'] = resolve_comment
-
- request_data = {
- 'incident_id': incident_id,
- 'update_data': update_data,
- }
-
- self._http_request(
- method='POST',
- url_suffix='/incidents/update_incident/',
- json_data={'request_data': request_data},
- timeout=self.timeout
- )
-
def get_endpoints(self,
endpoint_id_list=None,
dist_name=None,
@@ -248,6 +212,26 @@ def get_endpoints(self,
endpoints = reply.get('reply').get('endpoints', [])
return endpoints
+ def set_endpoints_alias(self, filters: list[dict[str, str]], new_alias_name: str | None) -> dict: # pragma: no cover
+ """
+ This func is used to set the alias name of an endpoint.
+
+ args:
+ filters: list of filters to get the endpoints
+ new_alias_name: the new alias name to set
+
+ returns: dict of the response(True if success else error message)
+ """
+
+ request_data = {'filters': filters, 'alias': new_alias_name}
+
+ return self._http_request(
+ method='POST',
+ url_suffix='/endpoints/update_agent_name/',
+ json_data={'request_data': request_data},
+ timeout=self.timeout,
+ )
+
def isolate_endpoint(self, endpoint_id, incident_id=None):
request_data = {
'endpoint_id': endpoint_id,
@@ -1094,6 +1078,9 @@ def get_script_execution_result_files(self, action_id: str, endpoint_id: str) ->
timeout=self.timeout,
)
link = response.get('reply', {}).get('DATA')
+ # If the link is None, the API call will result in a 'Connection Timeout Error', so we raise an exception
+ if not link:
+ raise DemistoException(f'Failed getting response files for {action_id=}, {endpoint_id=}')
return self._http_request(
method='GET',
full_url=link,
@@ -1308,14 +1295,20 @@ def run_polling_command(client: CoreClient,
polling_value: List,
stop_polling: bool = False) -> CommandResults:
"""
- args: demito args
- cmd: the command to schedule by after the current command
- command_function: the function which is runs the actual command
- command_decision_field: the field in the response based on it what the command status and if the command occurred
- results_function: the function which we are polling on and retrieves the status of the command_function
- polling_field: the field which from the result of the results_function which we are interested in its value
- polling_value: list of values of the polling_field we want to check
- stop_polling: yes - polling_value is stopping, not - polling_value not stopping
+ Arguments:
+ args: args
+ cmd: the scheduled command's name (as appears in the yml file) to run in the following polling.
+ command_function: the pythonic function that executes the command.
+ command_decision_field: the field that is retrieved from the command_function's response that indicates
+ the command_function status.
+ results_function: the pythonic result function which we want to poll on.
+ polling_field: the field that is retrieved from the results_function's response and indicates the polling status.
+ polling_value: list of values of the polling_field we want to check. The list can contain values to stop or
+ continue polling on, not both.
+ stop_polling: True - polling_value stops the polling. False - polling_value does not stop the polling.
+
+ Return:
+ command_results(CommandResults)
"""
ScheduledCommand.raise_error_if_not_supported()
@@ -1799,6 +1792,63 @@ def get_endpoints_command(client, args):
)
+def endpoint_alias_change_command(client: CoreClient, **args) -> CommandResults:
+ # get arguments
+ endpoint_id_list = argToList(args.get('endpoint_id_list'))
+ dist_name_list = argToList(args.get('dist_name'))
+ ip_list = argToList(args.get('ip_list'))
+ group_name_list = argToList(args.get('group_name'))
+ platform_list = argToList(args.get('platform'))
+ alias_name_list = argToList(args.get('alias_name'))
+ isolate = args.get('isolate')
+ hostname_list = argToList(args.get('hostname'))
+ status = args.get('status')
+ scan_status = args.get('scan_status')
+ username_list = argToList(args.get('username'))
+ new_alias_name = args.get('new_alias_name')
+
+ # This is a workaround that is needed because of a specific behaviour of the system
+ # that converts an empty string to a string with double quotes.
+ if new_alias_name == '""':
+ new_alias_name = ""
+
+ first_seen_gte = arg_to_timestamp(
+ arg=args.get('first_seen_gte'),
+ arg_name='first_seen_gte'
+ )
+
+ first_seen_lte = arg_to_timestamp(
+ arg=args.get('first_seen_lte'),
+ arg_name='first_seen_lte'
+ )
+
+ last_seen_gte = arg_to_timestamp(
+ arg=args.get('last_seen_gte'),
+ arg_name='last_seen_gte'
+ )
+
+ last_seen_lte = arg_to_timestamp(
+ arg=args.get('last_seen_lte'),
+ arg_name='last_seen_lte'
+ )
+
+ # create filters
+ filters: list[dict[str, str]] = create_request_filters(
+ status=status, username=username_list, endpoint_id_list=endpoint_id_list, dist_name=dist_name_list,
+ ip_list=ip_list, group_name=group_name_list, platform=platform_list, alias_name=alias_name_list, isolate=isolate,
+ hostname=hostname_list, first_seen_gte=first_seen_gte, first_seen_lte=first_seen_lte,
+ last_seen_gte=last_seen_gte, last_seen_lte=last_seen_lte, scan_status=scan_status
+ )
+ if not filters:
+ raise DemistoException('Please provide at least one filter.')
+ # importent: the API will return True even if the endpoint does not exist, so its a good idea to check
+ # the results by a get_endpoints command
+ client.set_endpoints_alias(filters=filters, new_alias_name=new_alias_name)
+
+ return CommandResults(
+ readable_output="The endpoint alias was changed successfully.")
+
+
def unisolate_endpoint_command(client, args):
endpoint_id = args.get('endpoint_id')
incident_id = arg_to_number(args.get('incident_id'))
@@ -2040,7 +2090,7 @@ def blocklist_files_command(client, args):
headers=['added_hashes'],
headerTransform=pascalToSpace),
outputs={f'{args.get("integration_context_brand", "CoreApiModule")}.'
- f'blocklist.added_hashes.fileHash(val.fileHash == obj.fileHash)': hash_list},
+ f'{args.get("prefix", "blocklist")}.added_hashes.fileHash(val.fileHash == obj.fileHash)': hash_list},
raw_response=res
)
@@ -2077,7 +2127,7 @@ def allowlist_files_command(client, args):
if detailed_response:
return CommandResults(
readable_output=tableToMarkdown('Allowlist Files', res),
- outputs_prefix=f'{args.get("integration_context_brand", "CoreApiModule")}.blocklist',
+ outputs_prefix=f'{args.get("integration_context_brand", "CoreApiModule")}.allowlist',
outputs=res,
raw_response=res
)
@@ -2090,7 +2140,7 @@ def allowlist_files_command(client, args):
headers=['added_hashes'],
headerTransform=pascalToSpace),
outputs={f'{args.get("integration_context_brand", "CoreApiModule")}.'
- f'allowlist.added_hashes.fileHash(val.fileHash == obj.fileHash)': hash_list},
+ f'{args.get("prefix", "allowlist")}.added_hashes.fileHash(val.fileHash == obj.fileHash)': hash_list},
raw_response=res
)
@@ -2329,28 +2379,6 @@ def get_indicators_context(incident):
return file_context, process_context, domain_context, ip_context
-def update_incident_command(client, args):
- incident_id = args.get('incident_id')
- assigned_user_mail = args.get('assigned_user_mail')
- assigned_user_pretty_name = args.get('assigned_user_pretty_name')
- status = args.get('status')
- severity = args.get('manual_severity')
- unassign_user = args.get('unassign_user') == 'true'
- resolve_comment = args.get('resolve_comment')
-
- client.update_incident(
- incident_id=incident_id,
- assigned_user_mail=assigned_user_mail,
- assigned_user_pretty_name=assigned_user_pretty_name,
- unassign_user=unassign_user,
- status=status,
- severity=severity,
- resolve_comment=resolve_comment
- )
-
- return f'Incident {incident_id} has been updated', None, None
-
-
def endpoint_command(client, args):
endpoint_id_list = argToList(args.get('id'))
endpoint_ip_list = argToList(args.get('ip'))
@@ -2586,14 +2614,13 @@ def handle_outgoing_issue_closure(remote_args):
current_remote_status = remote_args.data.get('status') if remote_args.data else None
# force closing remote incident only if:
# The XSOAR incident is closed
- # and the closingUserId was changed
# and the remote incident isn't already closed
if remote_args.inc_status == 2 and \
- update_args.get('closingUserId') and \
current_remote_status not in XDR_RESOLVED_STATUS_TO_XSOAR:
- update_args['resolve_comment'] = update_args.get('closeNotes', '')
- update_args['status'] = XSOAR_RESOLVED_STATUS.get(update_args.get('closeReason', 'Other'))
+ if close_notes := update_args.get('closeNotes'):
+ update_args['resolve_comment'] = close_notes
+ update_args['status'] = XSOAR_RESOLVED_STATUS_TO_XDR.get(update_args.get('closeReason', 'Other'))
demisto.debug(f"Closing Remote incident with status {update_args['status']}")
@@ -2606,33 +2633,6 @@ def get_update_args(remote_args):
return remote_args.delta
-def update_remote_system_command(client, args):
- remote_args = UpdateRemoteSystemArgs(args)
-
- if remote_args.delta:
- demisto.debug(f'Got the following delta keys {str(list(remote_args.delta.keys()))} to update'
- f'incident {remote_args.remote_incident_id}')
- try:
- if remote_args.incident_changed:
- update_args = get_update_args(remote_args)
-
- update_args['incident_id'] = remote_args.remote_incident_id
- demisto.debug(f'Sending incident with remote ID [{remote_args.remote_incident_id}]\n')
- update_incident_command(client, update_args)
-
- else:
- demisto.debug(f'Skipping updating remote incident fields [{remote_args.remote_incident_id}] '
- f'as it is not new nor changed')
-
- return remote_args.remote_incident_id
-
- except Exception as e:
- demisto.debug(f"Error in outgoing mirror for incident {remote_args.remote_incident_id} \n"
- f"Error message: {str(e)}")
-
- return remote_args.remote_incident_id
-
-
def get_distribution_versions_command(client, args):
versions = client.get_distribution_versions()
@@ -3347,6 +3347,7 @@ def create_request_filters(
first_seen_lte=None,
last_seen_gte=None,
last_seen_lte=None,
+ scan_status=None,
):
filters = []
@@ -3448,6 +3449,13 @@ def create_request_filters(
'value': last_seen_lte
})
+ if scan_status:
+ filters.append({
+ 'field': 'scan_status',
+ 'operator': 'IN',
+ 'value': [scan_status]
+ })
+
return filters
diff --git a/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule_test.py b/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule_test.py
index 929c864229df..90a870af0913 100644
--- a/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule_test.py
+++ b/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule_test.py
@@ -16,14 +16,6 @@
)
-def test_client_update_incident():
- with pytest.raises(ValueError, match="Can't provide both assignee_email/assignee_name and unassign_user"):
- test_client.update_incident(incident_id='1',
- status='new',
- unassign_user="user",
- assigned_user_mail="user")
-
-
Core_URL = 'https://api.xdrurl.com'
''' HELPER FUNCTIONS '''
@@ -68,25 +60,6 @@ def return_extra_data_result(*args):
return {}, {}, {"incident": incident_from_extra_data_command}
-def test_update_incident(requests_mock):
- from CoreIRApiModule import update_incident_command, CoreClient
-
- update_incident_response = load_test_data('./test_data/update_incident.json')
- requests_mock.post(f'{Core_URL}/public_api/v1/incidents/update_incident/', json=update_incident_response)
-
- client = CoreClient(
- base_url=f'{Core_URL}/public_api/v1', headers={}
- )
- args = {
- 'incident_id': '1',
- 'status': 'new'
- }
- readable_output, outputs, _ = update_incident_command(client, args)
-
- assert outputs is None
- assert readable_output == 'Incident 1 has been updated'
-
-
def test_get_endpoints(requests_mock):
from CoreIRApiModule import get_endpoints_command, CoreClient
@@ -104,7 +77,7 @@ def test_get_endpoints(requests_mock):
res = get_endpoints_command(client, args)
assert get_endpoints_response.get('reply').get('endpoints') == \
- res.outputs['CoreApiModule.Endpoint(val.endpoint_id == obj.endpoint_id)']
+ res.outputs['CoreApiModule.Endpoint(val.endpoint_id == obj.endpoint_id)']
def test_get_all_endpoints_using_limit(requests_mock):
@@ -2286,49 +2259,6 @@ def test_get_endpoint_properties(endpoint, expected):
assert status == expected
-def test_get_update_args_when_getting_close_reason():
- """
- Given:
- - closingUserId from update_remote_system
- When
- - An incident in XSOAR was closed with "Duplicate" as a close reason.
- Then
- - The status that the incident is getting to be mirrored out is "resolved_duplicate"
- """
- from CoreIRApiModule import get_update_args
- from CommonServerPython import UpdateRemoteSystemArgs
- remote_args = UpdateRemoteSystemArgs({
- 'delta': {
- 'closeReason': 'Duplicate', 'closeNote': 'Closed as Duplicate.',
- 'closingUserId': 'Admin'},
- 'data': {'status': 'new'},
- 'status': 2}
- )
- update_args = get_update_args(remote_args)
- assert update_args.get('status') == 'resolved_duplicate'
- assert update_args.get('closeNote') == 'Closed as Duplicate.'
-
-
-def test_get_update_args_when_not_getting_closing_user_id():
- """
- Given:
- - delta from update_remote_system
- When
- - An incident in XSOAR was closed and update_remote_system has occurred.
- Then
- - Because There is no change in the "closingUserId" value, the status should not change.
- """
- from CoreIRApiModule import get_update_args
- from CommonServerPython import UpdateRemoteSystemArgs
- remote_args = UpdateRemoteSystemArgs({
- 'delta': {'someChange': '1234'},
- 'data': {'status': 'new'},
- 'status': 2}
- )
- update_args = get_update_args(remote_args)
- assert update_args.get('status') is None
-
-
def test_remove_blocklist_files_command(requests_mock):
"""
Given:
@@ -3128,3 +3058,48 @@ def test_add_or_remove_tag_endpoint_command(requests_mock, args, expected_filter
'tag': 'test'
}
}
+
+
+excepted_output_1 = {'filters': [{'field': 'endpoint_status',
+ 'operator': 'IN', 'value': ['connected']}], 'new_alias_name': 'test'}
+excepted_output_2 = {'filters': [{'field': 'endpoint_status',
+ 'operator': 'IN', 'value': ['connected']}], 'new_alias_name': ""}
+
+
+@pytest.mark.parametrize('input, expected_output', [("test", excepted_output_1),
+ ('""', excepted_output_2)])
+def test_endpoint_alias_change_command__diffrent_alias_new_names(mocker, input, expected_output):
+ """
+ Given:
+ - valid new alias name as string - empty new alias name (due to xsoar limitation,
+ represented by a string of double quote)
+
+ When:
+ - executing the endpoint-alias-change command
+
+ Then:
+ - Makes sure the request body is created correctly.
+
+ """
+ client = CoreClient(base_url=f'{Core_URL}/public_api/v1/', headers={})
+ mocker_set = mocker.patch.object(client, 'set_endpoints_alias')
+ from CoreIRApiModule import endpoint_alias_change_command
+ endpoint_alias_change_command(client=client, status="connected", new_alias_name=input)
+ assert mocker_set.call_args[1] == expected_output
+
+
+def test_endpoint_alias_change_command__no_filters(mocker):
+ """
+ Given:
+ - command withot endpoint filters
+ when:
+ - executing the endpoint-alias-change command
+ then:
+ - make sure the correct error message wil raise.
+ """
+ client = CoreClient(base_url=f'{Core_URL}/public_api/v1/', headers={})
+ mocker.patch.object(client, 'set_endpoints_alias')
+ from CoreIRApiModule import endpoint_alias_change_command
+ with pytest.raises(Exception) as e:
+ endpoint_alias_change_command(client=client, new_alias_name='test')
+ assert e.value.message == "Please provide at least one filter."
diff --git a/Packs/ApiModules/Scripts/IAMApiModule/IAMApiModule.py b/Packs/ApiModules/Scripts/IAMApiModule/IAMApiModule.py
index a030e14e37ce..7d6097bd09b8 100644
--- a/Packs/ApiModules/Scripts/IAMApiModule/IAMApiModule.py
+++ b/Packs/ApiModules/Scripts/IAMApiModule/IAMApiModule.py
@@ -131,6 +131,7 @@ class IAMUserProfile:
CREATE_INCIDENT_TYPE = 'User Profile - Create'
UPDATE_INCIDENT_TYPE = 'User Profile - Update'
DISABLE_INCIDENT_TYPE = 'User Profile - Disable'
+ ENABLE_INCIDENT_TYPE = 'User Profile - Enable'
def __init__(self, user_profile, mapper: str, incident_type: str, user_profile_delta=None):
self._user_profile = safe_load_json(user_profile)
@@ -241,7 +242,8 @@ def map_object(self, mapper_name, incident_type, map_old_data: bool = False):
return {k: v for k, v in self.mapped_user_profile.items() if k != 'olduserdata'}
return self.mapped_user_profile
if incident_type not in [IAMUserProfile.CREATE_INCIDENT_TYPE, IAMUserProfile.UPDATE_INCIDENT_TYPE,
- IAMUserProfile.DISABLE_INCIDENT_TYPE]:
+ IAMUserProfile.DISABLE_INCIDENT_TYPE,
+ IAMUserProfile.ENABLE_INCIDENT_TYPE]:
raise DemistoException('You must provide a valid incident type to the map_object function.')
if not self._user_profile:
raise DemistoException('You must provide the user profile data.')
@@ -299,6 +301,14 @@ def set_user_is_already_disabled(self, details):
details=details
)
+ def set_user_is_already_enabled(self, details):
+ self.set_result(
+ action=IAMActions.ENABLE_USER,
+ skip=True,
+ skip_reason='User is already enabled.',
+ details=details
+ )
+
class IAMUserAppData:
""" Holds user attributes retrieved from an application.
@@ -448,6 +458,50 @@ def disable_user(self, client, args):
return user_profile
+ def enable_user(self, client, args):
+ """ Enables a user in the application and updates the user profile object with the updated data.
+ If not found, the command will be skipped.
+
+ :param client: (Client) The integration Client object that implements get_user(),
+ enable_user() and handle_exception methods
+ :param args: (dict) The `iam-enable-user` command arguments
+ :return: (IAMUserProfile) The user profile object.
+ """
+ user_profile = IAMUserProfile(user_profile=args.get('user-profile'), mapper=self.mapper_out,
+ incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE)
+ if not self.is_enable_enabled:
+ user_profile.set_result(action=IAMActions.ENABLE_USER,
+ skip=True,
+ skip_reason='Command is disabled.')
+ else:
+ try:
+ iam_attribute, iam_attribute_val = user_profile.get_first_available_iam_user_attr(
+ self.get_user_iam_attrs)
+ user_app_data = client.get_user(iam_attribute, iam_attribute_val)
+ if not user_app_data:
+ _, error_message = IAMErrors.USER_DOES_NOT_EXIST
+ user_profile.set_result(action=IAMActions.ENABLE_USER,
+ skip=True,
+ skip_reason=error_message)
+ else:
+ if not user_app_data.is_active:
+ enabled_user = client.enable_user(user_app_data.id)
+ user_profile.set_result(
+ action=IAMActions.ENABLE_USER,
+ active=True,
+ iden=enabled_user.id,
+ email=user_profile.get_attribute('email') or user_app_data.email,
+ username=enabled_user.username,
+ details=enabled_user.full_data
+ )
+ else:
+ user_profile.set_user_is_already_enabled(user_app_data.full_data)
+
+ except Exception as e:
+ client.handle_exception(user_profile, e, IAMActions.ENABLE_USER)
+
+ return user_profile
+
def create_user(self, client, args):
""" Creates a user in the application and updates the user profile object with the data.
If a user in the app already holds the email in the given user profile, updates
diff --git a/Packs/ApiModules/Scripts/IAMApiModule/IAMApiModule_test.py b/Packs/ApiModules/Scripts/IAMApiModule/IAMApiModule_test.py
index ff0e3f7dce23..0aaacc2eb753 100644
--- a/Packs/ApiModules/Scripts/IAMApiModule/IAMApiModule_test.py
+++ b/Packs/ApiModules/Scripts/IAMApiModule/IAMApiModule_test.py
@@ -1,11 +1,12 @@
from IAMApiModule import *
+import pytest
+from copy import deepcopy
APP_USER_OUTPUT = {
"user_id": "mock_id",
"user_name": "mock_user_name",
"first_name": "mock_first_name",
"last_name": "mock_last_name",
- "active": "true",
"email": "testdemisto2@paloaltonetworks.com"
}
@@ -240,3 +241,87 @@ def test_disable_user_command__non_existing_user(mocker):
assert outputs.get('success') is True
assert outputs.get('skipped') is True
assert outputs.get('reason') == IAMErrors.USER_DOES_NOT_EXIST[1]
+
+
+@pytest.mark.parametrize("not_existing", (" ", "testdemisto2@paloaltonetworks.com"))
+def test_enable_user_command__non_existing_user(mocker, not_existing):
+ """
+ Given:
+ - An app client object
+ - A user-profile argument that contains an email of a user
+ When:
+ - create-if-not-exists parameter is unchecked
+ - The user does not exist in the application
+ - Calling function enable_user_command
+ Then:
+ - Ensure the command is considered successful and skipped
+ """
+ client = MockCLient()
+ args = {'user-profile': {'email': not_existing}}
+
+ mocker.patch.object(client, 'get_user', return_value=None)
+
+ user_profile = IAMCommand().enable_user(client, args)
+ outputs = get_outputs_from_user_profile(user_profile)
+
+ assert outputs.get('action') == IAMActions.ENABLE_USER
+ assert outputs.get('success') is True
+ assert outputs.get('skipped') is True
+ assert outputs.get('reason') == IAMErrors.USER_DOES_NOT_EXIST[1]
+
+
+@pytest.mark.parametrize("given_name, is_correct", [("mock_given_name", True), ("wrong_name", False)])
+def test_enable_user_command__with_wrong_and_correct_given_name(mocker, given_name, is_correct):
+ """
+ Given:
+ - An app client object
+ - A user-profile argument that contains an email of a user and a given name
+ When:
+ - The given name is correct and matches an existing user
+ - The given name is wrong and dos not match an existing user
+ Then:
+ - That name will be saved under the givenname section.
+ """
+ client = MockCLient()
+ args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com', 'givenname': given_name}}
+ disabled_user_data = IAMUserAppData("mock_userid", "mock_username", False, {"user_id": "mock_id",
+ "user_name": "mock_user_name",
+ "first_name": given_name,
+ "last_name": "mock_last_name",
+ "email": "testdemisto2@paloaltonetworks.com"})
+ enabled_user_data = deepcopy(disabled_user_data)
+ enabled_user_data.is_active = True
+ mocker.patch.object(client, 'get_user', return_value=disabled_user_data)
+ mocker.patch.object(client, 'enable_user', return_value=enabled_user_data)
+
+ user_profile = IAMCommand().enable_user(client, args)
+ outputs = get_outputs_from_user_profile(user_profile)
+
+ assert outputs.get('action') == IAMActions.ENABLE_USER
+ assert outputs.get('details', {}).get('first_name') == given_name
+
+
+@pytest.mark.parametrize("input", [{'user-profile': {'email': ""}}, {'user-profile': {}}])
+def test_enable_user_command__empty_json_as_argument(input):
+ """
+ Given:
+ - An app client object
+ - A user-profile argument that contains an empty json with no user profile
+ When:
+ - Calling function enable_user_command
+ Then:
+ - Ensure the command will return the correct error
+ """
+ class NewMockClient():
+ @staticmethod
+ def handle_exception(user_profile: IAMUserProfile,
+ e: Union[DemistoException, Exception],
+ action: IAMActions):
+ raise e
+
+ client = NewMockClient()
+ iamcommand = IAMCommand(get_user_iam_attrs=['id', 'username', 'email'])
+
+ with pytest.raises(DemistoException) as e:
+ iamcommand.enable_user(client, input)
+ assert e.value.message == ("Your user profile argument must contain at least one attribute that is mapped into one of the following attributes in the outgoing mapper: ['id', 'username', 'email']") # noqa: E501
diff --git a/Packs/ApiModules/Scripts/JSONFeedApiModule/JSONFeedApiModule.py b/Packs/ApiModules/Scripts/JSONFeedApiModule/JSONFeedApiModule.py
index 3d8c3153c755..e03665d446b8 100644
--- a/Packs/ApiModules/Scripts/JSONFeedApiModule/JSONFeedApiModule.py
+++ b/Packs/ApiModules/Scripts/JSONFeedApiModule/JSONFeedApiModule.py
@@ -1,3 +1,4 @@
+# pylint: disable=E9010
from CommonServerPython import *
''' IMPORTS '''
@@ -246,11 +247,16 @@ def fetch_indicators_command(client: Client, indicator_type: str, feedTags: list
item = {indicator_field: item}
indicator_value = item.get(indicator_field)
+ if indicator_value is None:
+ continue
if indicator_value not in indicators_values:
indicators_values_indexes[indicator_value] = len(indicators_values)
indicators_values.add(indicator_value)
else:
- indicators[indicators_values_indexes[indicator_value]]['rawJSON']['service'] += f", {service_name}"
+ service = indicators[indicators_values_indexes[indicator_value]].get('rawJSON', {}).get('service', '')
+ if service and service_name not in service.split(','):
+ service_name += f', {service}'
+ indicators[indicators_values_indexes[indicator_value]]['rawJSON']['service'] = service_name
continue
indicators.extend(
diff --git a/Packs/ApiModules/Scripts/JSONFeedApiModule/JSONFeedApiModule_test.py b/Packs/ApiModules/Scripts/JSONFeedApiModule/JSONFeedApiModule_test.py
index f4d93eddd941..c1a29e84fbf3 100644
--- a/Packs/ApiModules/Scripts/JSONFeedApiModule/JSONFeedApiModule_test.py
+++ b/Packs/ApiModules/Scripts/JSONFeedApiModule/JSONFeedApiModule_test.py
@@ -1,5 +1,6 @@
from JSONFeedApiModule import Client, fetch_indicators_command, jmespath, get_no_update_value
from CommonServerPython import *
+import pytest
import requests_mock
import demistomock as demisto
@@ -25,33 +26,62 @@ def test_json_feed_no_config():
assert len(jmespath.search(expression="[].rawJSON.service", data=indicators)) == 1117
-def test_json_feed_with_config():
+CONFIG_PARAMETERS = [
+ (
+ {
+ 'AMAZON': {
+ 'url': 'https://ip-ranges.amazonaws.com/ip-ranges.json',
+ 'extractor': "prefixes[?service=='AMAZON']",
+ 'indicator': 'ip_prefix',
+ 'indicator_type': FeedIndicatorType.CIDR,
+ 'fields': ['region', 'service']
+ }
+ },
+ 1117,
+ 0
+ ),
+ (
+ {
+ 'AMAZON': {
+ 'url': 'https://ip-ranges.amazonaws.com/ip-ranges.json',
+ 'extractor': "prefixes[?service=='AMAZON']",
+ 'indicator': 'ip_prefix',
+ 'indicator_type': FeedIndicatorType.CIDR,
+ 'fields': ['region', 'service']
+ },
+ 'CLOUDFRONT': {
+ 'url': 'https://ip-ranges.amazonaws.com/ip-ranges.json',
+ 'extractor': "prefixes[?service=='CLOUDFRONT']",
+ 'indicator': 'ip_prefix',
+ 'indicator_type': FeedIndicatorType.CIDR,
+ 'fields': ['region', 'service']
+ }
+ },
+ 1148,
+ 36
+ )
+]
+
+
+@pytest.mark.parametrize('config, total_indicators, indicator_with_several_tags', CONFIG_PARAMETERS)
+def test_json_feed_with_config(config, total_indicators, indicator_with_several_tags):
with open('test_data/amazon_ip_ranges.json') as ip_ranges_json:
ip_ranges = json.load(ip_ranges_json)
- feed_name_to_config = {
- 'AMAZON': {
- 'url': 'https://ip-ranges.amazonaws.com/ip-ranges.json',
- 'extractor': "prefixes[?service=='AMAZON']",
- 'indicator': 'ip_prefix',
- 'indicator_type': FeedIndicatorType.CIDR,
- 'fields': ['region', 'service']
- }
- }
-
with requests_mock.Mocker() as m:
m.get('https://ip-ranges.amazonaws.com/ip-ranges.json', json=ip_ranges)
client = Client(
url='https://ip-ranges.amazonaws.com/ip-ranges.json',
credentials={'username': 'test', 'password': 'test'},
- feed_name_to_config=feed_name_to_config,
+ feed_name_to_config=config,
insecure=True
)
indicators, _ = fetch_indicators_command(client=client, indicator_type='CIDR', feedTags=['test'],
auto_detect=False)
- assert len(jmespath.search(expression="[].rawJSON.service", data=indicators)) == 1117
+ assert len(jmespath.search(expression="[].rawJSON.service", data=indicators)) == total_indicators
+ assert len([i for i in indicators if ',' in i.get('rawJSON').get('service', '')]) == indicator_with_several_tags
def test_json_feed_with_config_mapping():
@@ -286,3 +316,37 @@ class MockResponse:
def test_version_6_2_0(mocker):
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
+
+
+def test_fetch_indicators_command_google_ip_ranges(mocker):
+ """
+ Given
+ - indicators response from google ip feed
+
+ When
+ - Running fetch indicators command
+
+ Then
+ - Ensure that all indicators values exist and are not 'None'
+ """
+ from JSONFeedApiModule import fetch_indicators_command
+ client = Client(
+ url='',
+ headers={},
+ feed_name_to_config={
+ 'CIDR': {
+ 'url': 'https://www.test.com/ipranges/goog.json',
+ 'extractor': 'prefixes[]', 'indicator': 'ipv4Prefix', 'indicator_type': 'CIDR'
+ }
+ }
+ )
+
+ mocker.patch.object(
+ client, 'build_iterator', return_value=(
+ [{'ipv4Prefix': '1.1.1.1'}, {'ipv4Prefix': '1.2.3.4'}, {'ipv6Prefix': '1111:1111::/28'}], True
+ ),
+ )
+
+ indicators, _ = fetch_indicators_command(client, indicator_type=None, feedTags=[], auto_detect=None, limit=100)
+ for indicator in indicators:
+ assert indicator.get('value')
diff --git a/Packs/ApiModules/Scripts/MicrosoftApiModule/MicrosoftApiModule.py b/Packs/ApiModules/Scripts/MicrosoftApiModule/MicrosoftApiModule.py
index 0b52fba9241a..facc4cb62a16 100644
--- a/Packs/ApiModules/Scripts/MicrosoftApiModule/MicrosoftApiModule.py
+++ b/Packs/ApiModules/Scripts/MicrosoftApiModule/MicrosoftApiModule.py
@@ -1,3 +1,4 @@
+# pylint: disable=E9010, E9011
import traceback
import demistomock as demisto
@@ -17,6 +18,13 @@ class Scopes:
management_azure = 'https://management.azure.com/.default'
+class Resources:
+ graph = 'https://graph.microsoft.com/'
+ security_center = 'https://api.securitycenter.microsoft.com/'
+ management_azure = 'https://management.azure.com/'
+ manage_office = 'https://manage.office.com/'
+
+
# authorization types
OPROXY_AUTH_TYPE = 'oproxy'
SELF_DEPLOYED_AUTH_TYPE = 'self_deployed'
@@ -50,6 +58,10 @@ class Scopes:
'https://microsoftgraph.chinacloudapi.cn': 'cn'
}
+# Azure Managed Identities
+MANAGED_IDENTITIES_TOKEN_URL = 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01'
+MANAGED_IDENTITIES_SYSTEM_ASSIGNED = 'SYSTEM_ASSIGNED'
+
class MicrosoftClient(BaseClient):
def __init__(self, tenant_id: str = '',
@@ -58,6 +70,7 @@ def __init__(self, tenant_id: str = '',
token_retrieval_url: str = '{endpoint}/{tenant_id}/oauth2/v2.0/token',
app_name: str = '',
refresh_token: str = '',
+ refresh_token_param: Optional[str] = '',
auth_code: str = '',
scope: str = '{graph_endpoint}/.default',
grant_type: str = CLIENT_CREDENTIALS,
@@ -73,6 +86,8 @@ def __init__(self, tenant_id: str = '',
certificate_thumbprint: Optional[str] = None,
retry_on_rate_limit: bool = False,
private_key: Optional[str] = None,
+ managed_identities_client_id: Optional[str] = None,
+ managed_identities_resource_uri: Optional[str] = None,
*args, **kwargs):
"""
Microsoft Client class that implements logic to authenticate with oproxy or self deployed applications.
@@ -82,6 +97,8 @@ def __init__(self, tenant_id: str = '',
auth_id: If self deployed it's the client id, otherwise (oproxy) it's the auth id and may also
contain the token url
enc_key: If self deployed it's the client secret, otherwise (oproxy) it's the encryption key
+ refresh_token: The current used refresh token.
+ refresh_token_param: The refresh token from the integration's parameters (i.e instance configuration).
scope: The scope of the application (only if self deployed)
resource: The resource of the application (only if self deployed)
multi_resource: Where or not module uses a multiple resources (self-deployed, auth_code grant type only)
@@ -90,6 +107,8 @@ def __init__(self, tenant_id: str = '',
self_deployed: Indicates whether the integration mode is self deployed or oproxy
certificate_thumbprint: Certificate's thumbprint that's associated to the app
private_key: Private key of the certificate
+ managed_identities_client_id: The Azure Managed Identities client id
+ managed_identities_resource_uri: The resource uri to get token for by Azure Managed Identities
retry_on_rate_limit: If the http request returns with a 429 - Rate limit reached response,
retry the request using a scheduled command.
"""
@@ -111,6 +130,7 @@ def __init__(self, tenant_id: str = '',
self.enc_key = enc_key
self.tenant_id = tenant_id
self.refresh_token = refresh_token
+ self.refresh_token_param = refresh_token_param
else:
self.token_retrieval_url = token_retrieval_url.format(tenant_id=tenant_id,
@@ -146,10 +166,18 @@ def __init__(self, tenant_id: str = '',
self.resources = resources if resources else []
self.resource_to_access_token: Dict[str, str] = {}
+ # for Azure Managed Identities purpose
+ self.managed_identities_client_id = managed_identities_client_id
+ self.managed_identities_resource_uri = managed_identities_resource_uri
+
def is_command_executed_from_integration(self):
ctx = demisto.callingContext.get('context', {})
- executed_command = ctx.get('ExecutedCommands', [{'moduleBrand': 'Scripts'}])[0]
- return executed_command.get('moduleBrand') != 'Scripts'
+ executed_commands = ctx.get('ExecutedCommands', [{'moduleBrand': 'Scripts'}])
+
+ if executed_commands:
+ return executed_commands[0].get('moduleBrand', "") != 'Scripts'
+
+ return True
def http_request(
self, *args, resp_type='json', headers=None,
@@ -306,18 +334,46 @@ def get_access_token(self, resource: str = '', scope: Optional[str] = None) -> s
return access_token
- def _oproxy_authorize(self, resource: str = '', scope: Optional[str] = None) -> Tuple[str, int, str]:
+ def _raise_authentication_error(self, oproxy_response: requests.Response):
"""
- Gets a token by authorizing with oproxy.
+ Raises an exception for authentication error with the Oproxy server.
+ Args:
+ oproxy_response: Raw response from the Oproxy server to parse.
+ """
+ msg = 'Error in authentication. Try checking the credentials you entered.'
+ try:
+ demisto.info('Authentication failure from server: {} {} {}'.format(
+ oproxy_response.status_code, oproxy_response.reason, oproxy_response.text))
+ err_response = oproxy_response.json()
+ server_msg = err_response.get('message')
+ if not server_msg:
+ title = err_response.get('title')
+ detail = err_response.get('detail')
+ if title:
+ server_msg = f'{title}. {detail}'
+ elif detail:
+ server_msg = detail
+ if server_msg:
+ msg += ' Server message: {}'.format(server_msg)
+ except Exception as ex:
+ demisto.error('Failed parsing error response - Exception: {}'.format(ex))
+ raise Exception(msg)
+
+ def _oproxy_authorize_build_request(self, headers: Dict[str, str], content: str,
+ scope: Optional[str] = None, resource: str = ''
+ ) -> requests.Response:
+ """
+ Build the Post request sent to the Oproxy server.
Args:
+ headers: The headers of the request.
+ content: The content for the request (usually contains the refresh token).
scope: A scope to add to the request. Do not use it.
resource: Resource to get.
- Returns:
- tuple: An access token, its expiry and refresh token.
+
+ Returns: The response from the Oproxy server.
+
"""
- content = self.refresh_token or self.tenant_id
- headers = self._add_info_headers()
- oproxy_response = requests.post(
+ return requests.post(
self.token_retrieval_url,
headers=headers,
json={
@@ -330,25 +386,44 @@ def _oproxy_authorize(self, resource: str = '', scope: Optional[str] = None) ->
verify=self.verify
)
+ def _oproxy_authorize(self, resource: str = '', scope: Optional[str] = None) -> Tuple[str, int, str]:
+ """
+ Gets a token by authorizing with oproxy.
+ Args:
+ scope: A scope to add to the request. Do not use it.
+ resource: Resource to get.
+ Returns:
+ tuple: An access token, its expiry and refresh token.
+ """
+ content = self.refresh_token or self.tenant_id
+ headers = self._add_info_headers()
+ oproxy_response = self._oproxy_authorize_build_request(headers, content, scope, resource)
+
if not oproxy_response.ok:
- msg = 'Error in authentication. Try checking the credentials you entered.'
- try:
- demisto.info('Authentication failure from server: {} {} {}'.format(
- oproxy_response.status_code, oproxy_response.reason, oproxy_response.text))
- err_response = oproxy_response.json()
- server_msg = err_response.get('message')
- if not server_msg:
- title = err_response.get('title')
- detail = err_response.get('detail')
- if title:
- server_msg = f'{title}. {detail}'
- elif detail:
- server_msg = detail
- if server_msg:
- msg += ' Server message: {}'.format(server_msg)
- except Exception as ex:
- demisto.error('Failed parsing error response - Exception: {}'.format(ex))
- raise Exception(msg)
+ # Try to send request to the Oproxy server with the refresh token from the integration parameters
+ # (instance configuration).
+ # Relevant for cases where the user re-generated his credentials therefore the refresh token was updated.
+ if self.refresh_token_param:
+ demisto.error('Error in authentication: Oproxy server returned error, perform a second attempt'
+ ' authorizing with the Oproxy, this time using the refresh token from the integration'
+ ' parameters (instance configuration).')
+ content = self.refresh_token_param
+ oproxy_second_try_response = self._oproxy_authorize_build_request(headers, content, scope, resource)
+
+ if not oproxy_second_try_response.ok:
+ demisto.error('Authentication failure from server (second attempt - using refresh token from the'
+ ' integration parameters: {} {} {}'.format(oproxy_second_try_response.status_code,
+ oproxy_second_try_response.reason,
+ oproxy_second_try_response.text))
+ self._raise_authentication_error(oproxy_response)
+
+ else: # Second try succeeded
+ oproxy_response = oproxy_second_try_response
+
+ else: # no refresh token for a second auth try
+ self._raise_authentication_error(oproxy_response)
+
+ # Oproxy authentication succeeded
try:
gcloud_function_exec_id = oproxy_response.headers.get('Function-Execution-Id')
demisto.info(f'Google Cloud Function Execution ID: {gcloud_function_exec_id}')
@@ -367,6 +442,17 @@ def _get_self_deployed_token(self,
scope: Optional[str] = None,
integration_context: Optional[dict] = None
) -> Tuple[str, int, str]:
+ if self.managed_identities_client_id:
+
+ if not self.multi_resource:
+ return self._get_managed_identities_token()
+
+ expires_in = -1 # init variable as an int
+ for resource in self.resources:
+ access_token, expires_in, refresh_token = self._get_managed_identities_token(resource=resource)
+ self.resource_to_access_token[resource] = access_token
+ return '', expires_in, refresh_token
+
if self.grant_type == AUTHORIZATION_CODE:
if not self.multi_resource:
return self._get_self_deployed_token_auth_code(refresh_token, scope=scope)
@@ -484,6 +570,35 @@ def _get_self_deployed_token_auth_code(
return access_token, expires_in, refresh_token
+ def _get_managed_identities_token(self, resource=None):
+ """
+ Gets a token based on the Azure Managed Identities mechanism
+ in case user was configured the Azure VM and the other Azure resource correctly
+ """
+ try:
+ # system assigned are restricted to one per resource and is tied to the lifecycle of the Azure resource
+ # see https://learn.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview
+ use_system_assigned = (self.managed_identities_client_id == MANAGED_IDENTITIES_SYSTEM_ASSIGNED)
+ resource = resource or self.managed_identities_resource_uri
+
+ demisto.debug('try to get Managed Identities token')
+
+ params = {'resource': resource}
+ if not use_system_assigned:
+ params['client_id'] = self.managed_identities_client_id
+
+ response_json = requests.get(MANAGED_IDENTITIES_TOKEN_URL, params=params, headers={'Metadata': 'True'}).json()
+ access_token = response_json.get('access_token')
+ expires_in = int(response_json.get('expires_in', 3595))
+ if access_token:
+ return access_token, expires_in, ''
+
+ err = response_json.get('error_description')
+ except Exception as e:
+ err = f'{str(e)}'
+
+ return_error(f'Error in Microsoft authorization with Azure Managed Identities: {err}')
+
def _get_token_device_code(
self, refresh_token: str = '', scope: Optional[str] = None, integration_context: Optional[dict] = None
) -> Tuple[str, int, str]:
@@ -698,3 +813,43 @@ class NotFoundError(Exception):
def __init__(self, message):
self.message = message
+
+
+def get_azure_managed_identities_client_id(params: dict) -> Optional[str]:
+ """"extract the Azure Managed Identities from the demisto params
+
+ Args:
+ params (dict): the demisto params
+
+ Returns:
+ Optional[str]: if the use_managed_identities are True
+ the managed_identities_client_id or MANAGED_IDENTITIES_SYSTEM_ASSIGNED
+ will return, otherwise - None
+
+ """
+ auth_type = params.get('auth_type') or params.get('authentication_type')
+ if params and (argToBoolean(params.get('use_managed_identities') or auth_type == 'Azure Managed Identities')):
+ client_id = params.get('managed_identities_client_id', {}).get('password')
+ return client_id or MANAGED_IDENTITIES_SYSTEM_ASSIGNED
+ return None
+
+
+def generate_login_url(client: MicrosoftClient) -> CommandResults:
+
+ assert client.tenant_id \
+ and client.scope \
+ and client.client_id \
+ and client.redirect_uri, 'Please make sure you entered the Authorization configuration correctly.'
+
+ login_url = f'https://login.microsoftonline.com/{client.tenant_id}/oauth2/v2.0/authorize?' \
+ f'response_type=code&scope=offline_access%20{client.scope.replace(" ", "%20")}' \
+ f'&client_id={client.client_id}&redirect_uri={client.redirect_uri}'
+
+ result_msg = f"""### Authorization instructions
+1. Click on the [login URL]({login_url}) to sign in and grant Cortex XSOAR permissions for your Azure Service Management.
+You will be automatically redirected to a link with the following structure:
+```REDIRECT_URI?code=AUTH_CODE&session_state=SESSION_STATE```
+2. Copy the `AUTH_CODE` (without the `code=` prefix, and the `session_state` parameter)
+and paste it in your instance configuration under the **Authorization code** parameter.
+ """
+ return CommandResults(readable_output=result_msg)
diff --git a/Packs/ApiModules/Scripts/MicrosoftApiModule/MicrosoftApiModule_test.py b/Packs/ApiModules/Scripts/MicrosoftApiModule/MicrosoftApiModule_test.py
index 20bffb15726f..509bb09a0e7f 100644
--- a/Packs/ApiModules/Scripts/MicrosoftApiModule/MicrosoftApiModule_test.py
+++ b/Packs/ApiModules/Scripts/MicrosoftApiModule/MicrosoftApiModule_test.py
@@ -7,6 +7,7 @@
TOKEN = 'dummy_token'
TENANT = 'dummy_tenant'
REFRESH_TOKEN = 'dummy_refresh'
+REFRESH_TOKEN_PARAM = 'dummy_refresh_token_param'
AUTH_ID = 'dummy_auth_id'
ENC_KEY = 'dummy_enc_key'
TOKEN_URL = 'mock://dummy_url'
@@ -49,7 +50,8 @@ def oproxy_client_multi_resource():
def oproxy_client_refresh():
- refresh_token = REFRESH_TOKEN
+ refresh_token = REFRESH_TOKEN # represents the refresh token from the integration context
+ refresh_token_param = REFRESH_TOKEN_PARAM # represents the token from the current instance config
auth_id = f'{AUTH_ID}@{TOKEN_URL}'
enc_key = ENC_KEY
app_name = APP_NAME
@@ -57,7 +59,8 @@ def oproxy_client_refresh():
ok_codes = OK_CODES
return MicrosoftClient(self_deployed=False, auth_id=auth_id, enc_key=enc_key, app_name=app_name,
- refresh_token=refresh_token, base_url=base_url, verify=True, proxy=False, ok_codes=ok_codes)
+ refresh_token=refresh_token, base_url=base_url, verify=True, proxy=False, ok_codes=ok_codes,
+ refresh_token_param=refresh_token_param)
def self_deployed_client():
@@ -281,6 +284,52 @@ def get_encrypted(content, key):
assert req_res == res
+def test_oproxy_auth_first_attempt_failed(mocker, requests_mock):
+ """
+ This test checks the 'two attempts logic' of the authentication with the oproxy server.
+ 'Two attempts logic' - In general we send to the oproxy server a refresh token that was saved in the integration
+ context, If for some reason the authentication request was failed, we will perform a second auth attempt in which
+ we will send the refresh token from the integration parameters - i.e the token is currently configured in the
+ instance.
+
+ In the test, we simulate a case where the oproxy server returns an error when we send an auth request, in this case
+ the 'Two attempts logic' should occur.
+ Given:
+ - A client generated with a refresh_token and a refresh_token_param (represents the token from the integration
+ parameters - i.e current instance config).
+ - An error mock response for the request post command to the oproxy server.
+ When:
+ - running the client._oproxy_authorize() function
+
+ Then:
+ - Verify that the client._oproxy_authorize() function called twice: first attempt with the refresh_token,
+ and second attempt with the refresh_token_param.
+ - Verify that an exception with the expected error message was raised.
+ """
+
+ # Initialize Client
+ client = oproxy_client_refresh()
+
+ # Set Mockers
+ def get_encrypted(content, key):
+ return content + key
+ mocker.patch.object(demisto, 'error')
+ mocker.patch.object(client, '_add_info_headers')
+ mocker.patch.object(client, 'get_encrypted', side_effect=get_encrypted)
+ post_req_mock = requests_mock._adapter.register_uri('POST',
+ TOKEN_URL,
+ json={'error': 'Permission Denied'},
+ status_code=400)
+
+ # Verify results
+ with pytest.raises(Exception) as err:
+ client._oproxy_authorize()
+ assert post_req_mock.call_count == 2
+ assert REFRESH_TOKEN in post_req_mock.request_history[0].text
+ assert REFRESH_TOKEN_PARAM in post_req_mock.request_history[1].text
+ assert err.value.args[0] == 'Error in authentication. Try checking the credentials you entered.'
+
+
def test_self_deployed_request(requests_mock):
import urllib
# Set
@@ -514,3 +563,59 @@ def test_general_error_metrics(requests_mock, mocker):
metric_results = demisto.results.call_args_list[0][0][0]
assert metric_results.get('Contents') == 'Metrics reported successfully.'
assert metric_results.get('APIExecutionMetrics') == [{'Type': 'GeneralError', 'APICallsCount': 1}]
+
+
+@pytest.mark.parametrize(argnames='client_id', argvalues=['test_client_id', None])
+def test_get_token_managed_identities(requests_mock, mocker, client_id):
+ """
+ Given:
+ managed identity client id or None
+ When:
+ get access token
+ Then:
+ Verify that the result are as expected
+ """
+ test_token = 'test_token'
+ import MicrosoftApiModule
+
+ mock_token = {'access_token': test_token, 'expires_in': '86400'}
+
+ get_mock = requests_mock.get(MANAGED_IDENTITIES_TOKEN_URL, json=mock_token)
+ mocker.patch.object(MicrosoftApiModule, 'get_integration_context', return_value={})
+
+ client = self_deployed_client()
+ client.managed_identities_resource_uri = Resources.graph
+ client.managed_identities_client_id = client_id or MANAGED_IDENTITIES_SYSTEM_ASSIGNED
+
+ assert test_token == client.get_access_token()
+ qs = get_mock.last_request.qs
+ assert qs['resource'] == [Resources.graph]
+ assert client_id and qs['client_id'] == [client_id] or 'client_id' not in qs
+
+
+def test_get_token_managed_identities__error(requests_mock, mocker):
+ """
+ Given:
+ managed identity client id
+ When:
+ get access token
+ Then:
+ Verify that the result are as expected
+ """
+
+ import MicrosoftApiModule
+
+ mock_token = {'error_description': 'test_error_description'}
+ requests_mock.get(MANAGED_IDENTITIES_TOKEN_URL, json=mock_token)
+ mocker.patch.object(MicrosoftApiModule, 'return_error', side_effect=Exception())
+ mocker.patch.object(MicrosoftApiModule, 'get_integration_context', return_value={})
+
+ client = self_deployed_client()
+ client.managed_identities_client_id = 'test_client_id'
+ client.managed_identities_resource_uri = Resources.graph
+
+ with pytest.raises(Exception):
+ client.get_access_token()
+
+ err_message = 'Error in Microsoft authorization with Azure Managed Identities'
+ assert err_message in MicrosoftApiModule.return_error.call_args[0][0]
diff --git a/Packs/ApiModules/Scripts/MicrosoftAzureStorageApiModule/MicrosoftAzureStorageApiModule.py b/Packs/ApiModules/Scripts/MicrosoftAzureStorageApiModule/MicrosoftAzureStorageApiModule.py
index f23381d224e9..af25b51eeaa8 100644
--- a/Packs/ApiModules/Scripts/MicrosoftAzureStorageApiModule/MicrosoftAzureStorageApiModule.py
+++ b/Packs/ApiModules/Scripts/MicrosoftAzureStorageApiModule/MicrosoftAzureStorageApiModule.py
@@ -1,6 +1,9 @@
-import demistomock as demisto
-from CommonServerPython import *
-from CommonServerUserPython import *
+from CommonServerPython import * # noqa: F401
+import demistomock as demisto # noqa: F401
+
+MANAGED_IDENTITIES_TOKEN_URL = 'http://169.254.169.254/metadata/identity/oauth2/token?' \
+ 'api-version=2018-02-01&resource=https://storage.azure.com/'
+MANAGED_IDENTITIES_SYSTEM_ASSIGNED = 'SYSTEM_ASSIGNED'
class MicrosoftStorageClient(BaseClient):
@@ -8,13 +11,20 @@ class MicrosoftStorageClient(BaseClient):
Microsoft Azure Storage API Client
"""
- def __init__(self, server_url, verify, proxy, account_sas_token, storage_account_name, api_version):
+ def __init__(self, server_url, verify, proxy,
+ account_sas_token, storage_account_name,
+ api_version, managed_identities_client_id: Optional[str] = None):
super().__init__(base_url=server_url, verify=verify, proxy=proxy)
self._account_sas_token = account_sas_token
self._storage_account_name = storage_account_name
self._api_version = api_version
self._base_url = server_url
+ self._managed_identities_client_id = managed_identities_client_id
+ if self._managed_identities_client_id:
+ token, _ = self._get_managed_identities_token()
+ self._headers = {'Authorization': f'Bearer {token}'}
+
def http_request(
self, *args, url_suffix="", params=None, resp_type='response', headers=None,
return_empty_response=False, full_url="", **kwargs):
@@ -46,7 +56,8 @@ def http_request(
# The updated url_suffix after performing this logic will be:
# url_suffix = 'container?sv=2020-08-04&ss=ay&spr=https&sig=s5&restype=directory&comp=list'
params_query = self.params_dict_to_query_string(params, prefix='')
- url_suffix = f'{url_suffix}{self._account_sas_token}{params_query}'
+ uri_token_part = self._account_sas_token if self._account_sas_token else '?'
+ url_suffix = f'{url_suffix}{uri_token_part}{params_query}'
params = None
default_headers = {'x-ms-version': self._api_version}
@@ -54,6 +65,9 @@ def http_request(
if headers:
default_headers.update(headers)
+ if self._headers:
+ default_headers.update(self._headers)
+
response = super()._http_request( # type: ignore[misc]
*args, url_suffix=url_suffix, params=params, resp_type='response', headers=default_headers,
full_url=full_url, **kwargs)
@@ -88,6 +102,32 @@ def http_request(
except ValueError as exception:
raise DemistoException('Failed to parse json object from response: {}'.format(response.content), exception)
+ def _get_managed_identities_token(self):
+ """
+ Gets a token based on the Azure Managed Identities mechanism
+ in case user was configured the Azure VM and the other Azure resource correctly
+ """
+ try:
+ # system assigned are restricted to one per resource and is tied to the lifecycle of the Azure resource
+ # see https://learn.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview
+ demisto.debug('try to get token based on the Managed Identities')
+ use_system_assigned = (self._managed_identities_client_id == MANAGED_IDENTITIES_SYSTEM_ASSIGNED)
+ params = {}
+ if not use_system_assigned:
+ params['client_id'] = self._managed_identities_client_id
+ response_json = requests.get(MANAGED_IDENTITIES_TOKEN_URL,
+ params=params, headers={'Metadata': 'True'}).json()
+ access_token = response_json.get('access_token')
+ expires_in = int(response_json.get('expires_in', 3595))
+ if access_token:
+ return access_token, expires_in
+
+ err = response_json.get('error_description')
+ except Exception as e:
+ err = f'{str(e)}'
+
+ return_error(f'Error in Microsoft authorization with Azure Managed Identities: {err}')
+
def params_dict_to_query_string(self, params: dict = None, prefix: str = "") -> str:
"""
Convert request params to string query.
@@ -116,3 +156,22 @@ class NotFoundError(Exception):
def __init__(self, message):
self.message = message
+
+
+def get_azure_managed_identities_client_id(params: dict) -> Optional[str]:
+ """"extract the Azure Managed Identities from the demisto params
+
+ Args:
+ params (dict): the demisto params
+
+ Returns:
+ Optional[str]: if the use_managed_identities are True
+ the managed_identities_client_id or MANAGED_IDENTITIES_SYSTEM_ASSIGNED
+ will return, otherwise - None
+
+ """
+ auth_type = params.get('auth_type') or params.get('authentication_type')
+ if params and (argToBoolean(params.get('use_managed_identities') or auth_type == 'Azure Managed Identities')):
+ client_id = params.get('managed_identities_client_id', {}).get('password')
+ return client_id or MANAGED_IDENTITIES_SYSTEM_ASSIGNED
+ return None
diff --git a/Packs/ApiModules/Scripts/NGINXApiModule/NGINXApiModule.py b/Packs/ApiModules/Scripts/NGINXApiModule/NGINXApiModule.py
index 384fea18016d..1a6b7408daed 100644
--- a/Packs/ApiModules/Scripts/NGINXApiModule/NGINXApiModule.py
+++ b/Packs/ApiModules/Scripts/NGINXApiModule/NGINXApiModule.py
@@ -244,7 +244,7 @@ def test_nginx_server(port: int, params: Dict):
nginx_process.terminate()
nginx_process.wait(1.0)
except Exception as ex:
- demisto.error(f'failed stoping test nginx process: {ex}')
+ demisto.error(f'failed stopping test nginx process: {ex}')
def try_parse_integer(int_to_parse: Any, err_msg: str) -> int:
@@ -314,7 +314,7 @@ def run_long_running(params: Dict = None, is_test: bool = False):
server_process.terminate()
server_process.join(1.0)
except Exception as ex:
- demisto.error(f'failed stoping test wsgi server process: {ex}')
+ demisto.error(f'failed stopping test wsgi server process: {ex}')
else:
nginx_process = start_nginx_server(nginx_port, params)
diff --git a/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule.py b/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule.py
index 5a44e13d9acb..016936bf7fa3 100644
--- a/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule.py
+++ b/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule.py
@@ -1,8 +1,9 @@
+# pylint: disable=E9010, E9011
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
-from typing import Union, Optional, List, Dict, Tuple
+from typing import Optional, List, Dict, Tuple
from requests.sessions import merge_setting, CaseInsensitiveDict
import re
import copy
@@ -10,6 +11,7 @@
import urllib3
from taxii2client import v20, v21
from taxii2client.common import TokenAuth, _HTTPConnection
+from taxii2client.exceptions import InvalidJSONError
import tempfile
# disable insecure warnings
@@ -25,8 +27,6 @@
ERR_NO_COLL = "No collection is available for this user, please make sure you entered the configuration correctly"
-DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
-
# Pattern Regexes - used to extract indicator type and value
INDICATOR_OPERATOR_VAL_FORMAT_PATTERN = r"(\w.*?{value}{operator})'(.*?)'"
INDICATOR_EQUALS_VAL_PATTERN = INDICATOR_OPERATOR_VAL_FORMAT_PATTERN.format(
@@ -175,6 +175,10 @@
'ZA': 'South Africa', 'ZM': 'Zambia', 'ZW': 'Zimbabwe'}
+def reached_limit(limit: int, element_count: int):
+ return element_count >= limit > -1
+
+
class Taxii2FeedClient:
def __init__(
self,
@@ -314,12 +318,18 @@ def init_roots(self):
logging.disable(logging.NOTSET)
def set_api_root(self):
- roots_to_api = {str(api_root.url).split('/')[-2]: api_root
- for api_root in self.server.api_roots} # type: ignore[attr-defined]
+ roots_to_api = {}
+ for api_root in self.server.api_roots: # type: ignore[attr-defined]
+ # ApiRoots are initialized with wrong _conn because we are not providing auth or cert to Server
+ # closing wrong unused connections
+ api_root_name = str(api_root.url).split('/')[-2]
+ demisto.debug(f'closing api_root._conn for {api_root_name}')
+ api_root._conn.close()
+ roots_to_api[api_root_name] = api_root
if self.default_api_root:
if not roots_to_api.get(self.default_api_root):
- raise DemistoException(f'The given default API root {self.default_api_root} doesn\'t exists.'
+ raise DemistoException(f'The given default API root {self.default_api_root} doesn\'t exist. '
f'Available API roots are {list(roots_to_api.keys())}.')
self.api_root = roots_to_api.get(self.default_api_root)
@@ -1001,7 +1011,6 @@ def build_iterator(self, limit: int = -1, **kwargs) -> List[Dict[str, str]]:
:param limit: max amount of indicators to fetch
:return: Cortex indicators list
"""
-
if not isinstance(self.collection_to_fetch, (v20.Collection, v21.Collection)):
raise DemistoException(
"Could not find a collection to fetch from. "
@@ -1013,12 +1022,18 @@ def build_iterator(self, limit: int = -1, **kwargs) -> List[Dict[str, str]]:
page_size = self.get_page_size(limit, limit)
if page_size <= 0:
return []
- envelopes = self.poll_collection(page_size, **kwargs) # got data from server
- indicators = self.load_stix_objects_from_envelope(envelopes, limit)
+
+ try:
+ envelopes = self.poll_collection(page_size, **kwargs) # got data from server
+ indicators = self.load_stix_objects_from_envelope(envelopes, limit)
+ except InvalidJSONError as e:
+ demisto.debug(f'Excepted InvalidJSONError, continuing with empty result.\nError: {e}')
+ # raised when the response is empty, because {} is parsed into 'ç½'
+ indicators = []
return indicators
- def load_stix_objects_from_envelope(self, envelopes: Dict[str, Any], limit: int = -1):
+ def load_stix_objects_from_envelope(self, envelopes: types.GeneratorType, limit: int = -1):
parse_stix_2_objects = {
"indicator": self.parse_indicator,
@@ -1045,118 +1060,65 @@ def load_stix_objects_from_envelope(self, envelopes: Dict[str, Any], limit: int
"location": self.parse_location,
"vulnerability": self.parse_vulnerability
}
- indicators = []
- # TAXII 2.0
- if isinstance(list(envelopes.values())[0], types.GeneratorType):
- indicators.extend(self.parse_generator_type_envelope(envelopes, parse_stix_2_objects))
- # TAXII 2.1
- else:
- indicators.extend(self.parse_dict_envelope(envelopes, parse_stix_2_objects, limit))
+ indicators, relationships_lst = self.parse_generator_type_envelope(envelopes, parse_stix_2_objects, limit)
+ if relationships_lst:
+ indicators.extend(self.parse_relationships(relationships_lst))
demisto.debug(
f"TAXII 2 Feed has extracted {len(indicators)} indicators"
)
- if limit > -1:
- return indicators[:limit]
+
return indicators
- def parse_generator_type_envelope(self, envelopes: Dict[str, Any],
- parse_objects_func):
+ def parse_generator_type_envelope(self, envelopes: types.GeneratorType, parse_objects_func, limit: int = -1):
indicators = []
relationships_lst = []
- for obj_type, envelope in envelopes.items():
- for sub_envelope in envelope:
- stix_objects = sub_envelope.get("objects")
- if not stix_objects:
- # no fetched objects
- break
- # now we have a list of objects, go over each obj, save id with obj, parse the obj
- if obj_type != "relationship":
- for obj in stix_objects:
- # we currently don't support extension object
- if obj.get('type') == 'extension-definition':
- continue
- self.id_to_object[obj.get('id')] = obj
- result = parse_objects_func[obj_type](obj)
- if not result:
- continue
- indicators.extend(result)
- self.update_last_modified_indicator_date(obj.get("modified"))
- else:
- relationships_lst.extend(stix_objects)
- if relationships_lst:
- indicators.extend(self.parse_relationships(relationships_lst))
+ for envelope in envelopes:
+ stix_objects = envelope.get("objects")
+ if not stix_objects:
+ # no fetched objects
+ break
- return indicators
+ # now we have a list of objects, go over each obj, save id with obj, parse the obj
+ for obj in stix_objects:
+ obj_type = obj.get('type')
+
+ # we currently don't support extension object
+ if obj_type == 'extension-definition':
+ continue
+ elif obj_type == 'relationship':
+ relationships_lst.append(obj)
+ continue
- def parse_dict_envelope(self, envelopes: Dict[str, Any],
- parse_objects_func, limit: int = -1):
- indicators: list = []
- relationships_list: List[Dict[str, Any]] = []
- for obj_type, envelope in envelopes.items():
- cur_limit = limit
- stix_objects = envelope.get("objects", [])
- if obj_type != "relationship":
- for obj in stix_objects:
- # we currently don't support extension object
- if obj.get('type') == 'extension-definition':
- continue
- self.id_to_object[obj.get('id')] = obj
- result = parse_objects_func[obj_type](obj)
- if not result:
- continue
+ self.id_to_object[obj.get('id')] = obj
+ if not parse_objects_func.get(obj_type):
+ demisto.debug(f'There is no parsing function for object type {obj_type}, '
+ f'available parsing functions are for types: {",".join(parse_objects_func.keys())}.')
+ continue
+ if result := parse_objects_func[obj_type](obj):
indicators.extend(result)
self.update_last_modified_indicator_date(obj.get("modified"))
- else:
- relationships_list.extend(stix_objects)
- while envelope.get("more", False):
- page_size = self.get_page_size(limit, cur_limit)
- envelope = self.collection_to_fetch.get_objects(
- limit=page_size, next=envelope.get("next", ""), type=obj_type
- )
- if isinstance(envelope, Dict):
- stix_objects = envelope.get("objects")
- if obj_type != "relationship":
- for obj in stix_objects:
- self.id_to_object[obj.get('id')] = obj
- result = parse_objects_func[obj_type](obj)
- if not result:
- continue
- indicators.extend(result)
- self.update_last_modified_indicator_date(obj.get("modified"))
- else:
- relationships_list.extend(stix_objects)
- else:
- raise DemistoException(
- "Error: TAXII 2 client received the following response while requesting "
- f"indicators: {str(envelope)}\n\nExpected output is json"
- )
+ if reached_limit(limit, len(indicators)):
+ return indicators, relationships_lst
- if relationships_list:
- indicators.extend(self.parse_relationships(relationships_list))
- return indicators
+ return indicators, relationships_lst
def poll_collection(
self, page_size: int, **kwargs
- ) -> Dict[str, Union[types.GeneratorType, Dict[str, str]]]:
+ ) -> types.GeneratorType:
"""
Polls a taxii collection
:param page_size: size of the request page
"""
- types_envelopes = {}
get_objects = self.collection_to_fetch.get_objects
- if len(self.objects_to_fetch) > 1: # when fetching one type no need to fetch relationship
+ if 'relationship' not in self.objects_to_fetch and \
+ len(self.objects_to_fetch) > 1: # when fetching one type no need to fetch relationship
self.objects_to_fetch.append('relationship')
- for obj_type in self.objects_to_fetch:
- kwargs['type'] = obj_type
- if isinstance(self.collection_to_fetch, v20.Collection):
- envelope = v20.as_pages(get_objects, per_request=page_size, **kwargs)
- else:
- envelope = get_objects(limit=page_size, **kwargs)
- if envelope:
- types_envelopes[obj_type] = envelope
- return types_envelopes
+ kwargs['type'] = self.objects_to_fetch
+ if isinstance(self.collection_to_fetch, v20.Collection):
+ return v20.as_pages(get_objects, per_request=page_size, **kwargs)
+ return v21.as_pages(get_objects, per_request=page_size, **kwargs)
def get_page_size(self, max_limit: int, cur_limit: int) -> int:
"""
diff --git a/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule_test.py b/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule_test.py
index 8b2def140cc4..d49606da4b46 100644
--- a/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule_test.py
+++ b/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule_test.py
@@ -1,4 +1,4 @@
-from taxii2client.exceptions import TAXIIServiceException
+from taxii2client.exceptions import TAXIIServiceException, InvalidJSONError
from CommonServerPython import *
from TAXII2ApiModule import Taxii2FeedClient, TAXII_VER_2_1, HEADER_USERNAME
@@ -180,6 +180,26 @@ def test_limit_0_v21(self, mocker):
iocs = mock_client.build_iterator(limit=0)
assert iocs == []
+ def test_handle_json_error(self, mocker):
+ """
+ Scenario: Call build iterator when the collection raises an InvalidJSONError because the response is "ç½"
+
+ Given:
+ - Collection to fetch is of type v21.Collection
+
+ When
+ - Initializing collection to fetch
+
+ Then:
+ - Ensure 0 iocs are returned
+ """
+ mock_client = Taxii2FeedClient(url='', collection_to_fetch=None, proxies=[], verify=False, objects_to_fetch=[])
+ mocker.patch.object(mock_client, 'collection_to_fetch', spec=v21.Collection)
+ mocker.patch.object(mock_client, 'load_stix_objects_from_envelope', side_effect=InvalidJSONError('Invalid JSON'))
+
+ iocs = mock_client.build_iterator()
+ assert iocs == []
+
class TestInitServer:
"""
@@ -458,7 +478,7 @@ def test_21_empty(self):
expected = []
mock_client = Taxii2FeedClient(url='', collection_to_fetch='', proxies=[], verify=False, objects_to_fetch=[])
- actual = mock_client.load_stix_objects_from_envelope({"indicator": STIX_ENVELOPE_NO_IOCS}, -1)
+ actual = mock_client.load_stix_objects_from_envelope(STIX_ENVELOPE_NO_IOCS, -1)
assert len(actual) == 0
assert expected == actual
@@ -481,7 +501,7 @@ def test_21_simple(self):
mock_client = Taxii2FeedClient(url='', collection_to_fetch='', proxies=[], verify=False, tlp_color='GREEN',
objects_to_fetch=[])
- actual = mock_client.load_stix_objects_from_envelope({"indicator": STIX_ENVELOPE_17_IOCS_19_OBJS}, -1)
+ actual = mock_client.load_stix_objects_from_envelope(STIX_ENVELOPE_17_IOCS_19_OBJS, -1)
assert len(actual) == 17
assert expected == actual
@@ -495,7 +515,7 @@ def test_21_complex_not_skipped(self):
- skip is False
When:
- - extract_indicators_from_envelope_and_parse is called
+ - load_stix_objects_from_envelope is called
Then:
- Extract and parse the indicators from the envelope with the complex iocs
@@ -505,7 +525,7 @@ def test_21_complex_not_skipped(self):
mock_client = Taxii2FeedClient(url='', collection_to_fetch='', proxies=[], verify=False, tlp_color='GREEN',
objects_to_fetch=[])
- actual = mock_client.load_stix_objects_from_envelope({"indicator": STIX_ENVELOPE_20_IOCS_19_OBJS}, -1)
+ actual = mock_client.load_stix_objects_from_envelope(STIX_ENVELOPE_20_IOCS_19_OBJS, -1)
assert len(actual) == 20
assert actual == expected
@@ -519,7 +539,7 @@ def test_21_complex_skipped(self):
- skip is True
When:
- - extract_indicators_from_envelope_and_parse is called
+ - load_stix_objects_from_envelope is called
Then:
- Extract and parse the indicators from the envelope with the complex iocs
@@ -529,7 +549,7 @@ def test_21_complex_skipped(self):
mock_client = Taxii2FeedClient(url='', collection_to_fetch='', proxies=[], verify=False, skip_complex_mode=True,
objects_to_fetch=[])
- actual = mock_client.load_stix_objects_from_envelope({"indicator": STIX_ENVELOPE_20_IOCS_19_OBJS}, -1)
+ actual = mock_client.load_stix_objects_from_envelope(STIX_ENVELOPE_20_IOCS_19_OBJS, -1)
assert len(actual) == 14
assert actual == expected
@@ -550,7 +570,6 @@ def test_load_stix_objects_from_envelope_v21(self):
"""
mock_client = Taxii2FeedClient(url='', collection_to_fetch='', proxies=[], verify=False, objects_to_fetch=[])
objects_envelopes = envelopes_v21
- mock_client.id_to_object = id_to_object
result = mock_client.load_stix_objects_from_envelope(objects_envelopes, -1)
assert mock_client.id_to_object == id_to_object
@@ -564,29 +583,15 @@ def test_load_stix_objects_from_envelope_v20(self):
- Envelope with indicators, arranged by object type.
When:
- - parse_generator_type_envelope is called (skipping condition from load_stix_objects_from_envelope).
+ - load_stix_objects_from_envelope is called.
Then: - Load and parse objects from the envelope according to their object type and ignore
extension-definition objects.
"""
mock_client = Taxii2FeedClient(url='', collection_to_fetch='', proxies=[], verify=False, objects_to_fetch=[])
- objects_envelopes = envelopes_v20
- mock_client.id_to_object = id_to_object
-
- parse_stix_2_objects = {
- "indicator": mock_client.parse_indicator,
- "attack-pattern": mock_client.parse_attack_pattern,
- "malware": mock_client.parse_malware,
- "report": mock_client.parse_report,
- "course-of-action": mock_client.parse_course_of_action,
- "campaign": mock_client.parse_campaign,
- "intrusion-set": mock_client.parse_intrusion_set,
- "tool": mock_client.parse_tool,
- "threat-actor": mock_client.parse_threat_actor,
- "infrastructure": mock_client.parse_infrastructure
- }
- result = mock_client.parse_generator_type_envelope(objects_envelopes, parse_stix_2_objects)
+
+ result = mock_client.load_stix_objects_from_envelope(envelopes_v20)
assert mock_client.id_to_object == id_to_object
assert result == parsed_objects
@@ -1083,3 +1088,21 @@ def test_parse_location(self, taxii_2_client, location_object, xsoar_expected_re
- Make sure all the fields are being parsed correctly.
"""
assert taxii_2_client.parse_location(location_object) == xsoar_expected_response
+
+
+@pytest.mark.parametrize('limit, element_count, return_value',
+ [(8, 8, True),
+ (8, 9, True),
+ (8, 0, False),
+ (-1, 10, False)])
+def test_reached_limit(limit, element_count, return_value):
+ """
+ Given:
+ - A limit and element count.
+ When:
+ - Enforcing limit on the elements count.
+ Then:
+ - Assert that the element count is not exceeded.
+ """
+ from TAXII2ApiModule import reached_limit
+ assert reached_limit(limit, element_count) == return_value
diff --git a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/id_to_object_test.json b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/id_to_object_test.json
index a94aaebc50e8..faedde0f0b98 100644
--- a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/id_to_object_test.json
+++ b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/id_to_object_test.json
@@ -201,23 +201,5 @@
"modified":"2016-05-07T11:22:30.000Z",
"name":"Poison Ivy C2",
"infrastructure_types": ["command-and-control"]
- },
- "relationship--01a5a209-b94c-450b-b7f9-946497d91055": {
- "created": "2018-08-03T21:03:51.484Z",
- "id": "relationship--01a5a209-b94c-450b-b7f9-946497d91055",
- "modified": "2018-08-03T21:03:51.484Z",
- "relationship_type": "uses",
- "source_ref": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
- "target_ref": "attack-pattern--4e6620ac-c30c-4f6d-918e-fa20cae7c1ce",
- "type": "relationship"
- },
- "relationship--abc475d9-199c-4623-9e9a-02adf340a415": {
- "created": "2018-08-03T20:31:03.780Z",
- "id": "relationship--abc475d9-199c-4623-9e9a-02adf340a415",
- "modified": "2018-08-22T12:36:32.248Z",
- "relationship_type": "indicates",
- "source_ref": "indicator--545928d9-bfe8-4320-bb98-751f38139892",
- "target_ref": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
- "type": "relationship"
}
}
\ No newline at end of file
diff --git a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/objects_envelopes_v20.json b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/objects_envelopes_v20.json
index 8d386f0baf97..35599e8a225a 100644
--- a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/objects_envelopes_v20.json
+++ b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/objects_envelopes_v20.json
@@ -1,281 +1,315 @@
-{
- "indicator": [{
+[
+ {
"objects": [
- {
- "created": "2018-04-23T17:01:01.248Z",
- "id": "indicator--545928d9-bfe8-4320-bb98-751f38139892",
- "labels": ["malicious-activity"],
- "modified": "2018-04-23T17:01:01.248Z",
- "name": "windows-updates.com",
- "pattern": "[domain-name:value = 'windows-updates.com']",
- "type": "indicator",
- "valid_from": "2018-04-23T17:01:01.248Z"
+ {
+ "created": "2018-04-23T17:01:01.248Z",
+ "id": "indicator--545928d9-bfe8-4320-bb98-751f38139892",
+ "labels": [
+ "malicious-activity"
+ ],
+ "modified": "2018-04-23T17:01:01.248Z",
+ "name": "windows-updates.com",
+ "pattern": "[domain-name:value = 'windows-updates.com']",
+ "type": "indicator",
+ "valid_from": "2018-04-23T17:01:01.248Z"
}
]
- }],
- "attack-pattern": [{
+ },
+ {
"objects": [
+ {
+ "created": "2017-10-25T14:48:11.535Z",
+ "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5",
+ "description": "An adversary could call standard operating system APIs from a malicious application to gather contact list (i.e., address book) data, or with escalated privileges could directly access files containing contact list data.",
+ "external_references": [
+ {
+ "external_id": "T1432",
+ "source_name": "mitre-mobile-attack",
+ "url": "https://attack.mitre.org/techniques/T1432"
+ },
+ {
+ "external_id": "APP-13",
+ "source_name": "NIST Mobile Threat Catalogue",
+ "url": "https://pages.nist.gov/mobile-threat-catalogue/application-threats/APP-13.html"
+ }
+ ],
+ "id": "attack-pattern--4e6620ac-c30c-4f6d-918e-fa20cae7c1ce",
+ "kill_chain_phases": [
+ {
+ "kill_chain_name": "lockheed",
+ "phase_name": "act-on-objectives"
+ },
{
- "created": "2017-10-25T14:48:11.535Z",
- "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5",
- "description": "An adversary could call standard operating system APIs from a malicious application to gather contact list (i.e., address book) data, or with escalated privileges could directly access files containing contact list data.",
- "external_references": [
- {
- "external_id": "T1432",
- "source_name": "mitre-mobile-attack",
- "url": "https://attack.mitre.org/techniques/T1432"
- },
- {
- "external_id": "APP-13",
- "source_name": "NIST Mobile Threat Catalogue",
- "url": "https://pages.nist.gov/mobile-threat-catalogue/application-threats/APP-13.html"
- }
- ],
- "id": "attack-pattern--4e6620ac-c30c-4f6d-918e-fa20cae7c1ce",
- "kill_chain_phases": [
- {
- "kill_chain_name": "lockheed",
- "phase_name": "act-on-objectives"
- },
- {
- "kill_chain_name": "mitre-mobile-attack",
- "phase_name": "collection"
- }
- ],
- "modified": "2018-10-17T00:14:20.652Z",
- "name": "T1432: Access Contact List (Mobile)",
- "object_marking_refs": [
- "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168"
- ],
- "type": "attack-pattern",
- "x_mitre_detection": "On both Android (6.0 and up) and iOS, the user can view which applications have permission to access contact list information through the device settings screen, and the user can choose to revoke the permissions.",
- "x_mitre_platforms": ["Android", "iOS"],
- "x_mitre_version": "1.0"
- },
+ "kill_chain_name": "mitre-mobile-attack",
+ "phase_name": "collection"
+ }
+ ],
+ "modified": "2018-10-17T00:14:20.652Z",
+ "name": "T1432: Access Contact List (Mobile)",
+ "object_marking_refs": [
+ "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168"
+ ],
+ "type": "attack-pattern",
+ "x_mitre_detection": "On both Android (6.0 and up) and iOS, the user can view which applications have permission to access contact list information through the device settings screen, and the user can choose to revoke the permissions.",
+ "x_mitre_platforms": [
+ "Android",
+ "iOS"
+ ],
+ "x_mitre_version": "1.0"
+ },
{
- "created": "2020-02-05T14:17:46.686Z",
- "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5",
- "description": "Adversaries may perform software packing or virtual machine software protection to conceal their code. Software packing is a method of compressing or encrypting an executable. Packing an executable changes the file signature in an attempt to avoid signature-based detection. Most decompression techniques decompress the executable code in memory. Virtual machine software protection translates an executable's original code into a special format that only a special virtual machine can run. A virtual machine is then called to run this code.(Citation: ESET FinFisher Jan 2018) \n\nUtilities used to perform software packing are called packers. Example packers are MPRESS and UPX. A more comprehensive list of known packers is available, (Citation: Wikipedia Exe Compression) but adversaries may create their own packing techniques that do not leave the same artifacts as well-known packers to evade defenses. ",
- "external_references": [
- {
- "external_id": "T1027.002",
- "source_name": "mitre-attack",
- "url": "https://attack.mitre.org/techniques/T1027/002"
- },
- {
- "external_id": "CAPEC-570",
- "source_name": "capec",
- "url": "https://example.example.com/data/definitions/570.html"
- },
- {
- "description": "Kafka, F. (2018, January). ESET's Guide to Deobfuscating and Devirtualizing FinFisher. Retrieved August 12, 2019.",
- "source_name": "ESET FinFisher Jan 2018",
- "url": "https://www.welivesecurity.com/wp-content/uploads/2018/01/WP-FinFisher.pdf"
- },
- {
- "description": "Executable compression. (n.d.). Retrieved December 4, 2014.",
- "source_name": "Wikipedia Exe Compression",
- "url": "http://en.wikipedia.org/wiki/Executable_compression"
- }
- ],
- "id": "attack-pattern--deb98323-e13f-4b0c-8d94-175379069062",
- "kill_chain_phases": [
- {
- "kill_chain_name": "lockheed",
- "phase_name": "installation"
- },
- {
- "kill_chain_name": "mitre-attack",
- "phase_name": "defense-evasion"
- }
- ],
- "modified": "2020-02-05T20:05:41.548Z",
- "name": "T1027.002: Software Packing",
- "object_marking_refs": ["marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168"],
- "type": "attack-pattern",
- "x_mitre_contributors": ["Filip Kafka, ESET"],
- "x_mitre_data_sources": ["File: File Content", "File: File Metadata"],
- "x_mitre_defense_bypassed": [
- "Anti-virus",
- "Heuristic detection",
- "Signature-based detection"],
- "x_mitre_detection": "Use file scanning to look for known software packers or artifacts of packing techniques. Packing is not a definitive indicator of malicious activity, because legitimate software may use packing techniques to reduce binary size or to protect proprietary code.",
- "x_mitre_is_subtechnique": true,
- "x_mitre_platforms": ["macOS", "Windows"],
- "x_mitre_version": "1.0",
- "x_panw_parent_technique": "Obfuscated Files or Information",
- "x_panw_parent_technique_subtechnique": "Obfuscated Files or Information: Software Packing"
- },
+ "created": "2020-02-05T14:17:46.686Z",
+ "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5",
+ "description": "Adversaries may perform software packing or virtual machine software protection to conceal their code. Software packing is a method of compressing or encrypting an executable. Packing an executable changes the file signature in an attempt to avoid signature-based detection. Most decompression techniques decompress the executable code in memory. Virtual machine software protection translates an executable's original code into a special format that only a special virtual machine can run. A virtual machine is then called to run this code.(Citation: ESET FinFisher Jan 2018) \n\nUtilities used to perform software packing are called packers. Example packers are MPRESS and UPX. A more comprehensive list of known packers is available, (Citation: Wikipedia Exe Compression) but adversaries may create their own packing techniques that do not leave the same artifacts as well-known packers to evade defenses. ",
+ "external_references": [
+ {
+ "external_id": "T1027.002",
+ "source_name": "mitre-attack",
+ "url": "https://attack.mitre.org/techniques/T1027/002"
+ },
+ {
+ "external_id": "CAPEC-570",
+ "source_name": "capec",
+ "url": "https://example.example.com/data/definitions/570.html"
+ },
+ {
+ "description": "Kafka, F. (2018, January). ESET's Guide to Deobfuscating and Devirtualizing FinFisher. Retrieved August 12, 2019.",
+ "source_name": "ESET FinFisher Jan 2018",
+ "url": "https://www.welivesecurity.com/wp-content/uploads/2018/01/WP-FinFisher.pdf"
+ },
+ {
+ "description": "Executable compression. (n.d.). Retrieved December 4, 2014.",
+ "source_name": "Wikipedia Exe Compression",
+ "url": "http://en.wikipedia.org/wiki/Executable_compression"
+ }
+ ],
+ "id": "attack-pattern--deb98323-e13f-4b0c-8d94-175379069062",
+ "kill_chain_phases": [
+ {
+ "kill_chain_name": "lockheed",
+ "phase_name": "installation"
+ },
+ {
+ "kill_chain_name": "mitre-attack",
+ "phase_name": "defense-evasion"
+ }
+ ],
+ "modified": "2020-02-05T20:05:41.548Z",
+ "name": "T1027.002: Software Packing",
+ "object_marking_refs": [
+ "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168"
+ ],
+ "type": "attack-pattern",
+ "x_mitre_contributors": [
+ "Filip Kafka, ESET"
+ ],
+ "x_mitre_data_sources": [
+ "File: File Content",
+ "File: File Metadata"
+ ],
+ "x_mitre_defense_bypassed": [
+ "Anti-virus",
+ "Heuristic detection",
+ "Signature-based detection"
+ ],
+ "x_mitre_detection": "Use file scanning to look for known software packers or artifacts of packing techniques. Packing is not a definitive indicator of malicious activity, because legitimate software may use packing techniques to reduce binary size or to protect proprietary code.",
+ "x_mitre_is_subtechnique": true,
+ "x_mitre_platforms": [
+ "macOS",
+ "Windows"
+ ],
+ "x_mitre_version": "1.0",
+ "x_panw_parent_technique": "Obfuscated Files or Information",
+ "x_panw_parent_technique_subtechnique": "Obfuscated Files or Information: Software Packing"
+ },
{
- "created": "2021-10-21T16:56:05.840605Z",
- "created_by_ref": "identity--749249c0-f7c7-5428-a4ad-ea5e1627a221",
- "description": "This schema adds TIM data to the object",
- "extension_types": [
- "property-extension"
- ],
- "id": "extension-definition--fc572b8c-bbe3-444b-b40b-06d3b890cd79",
- "modified": "2021-10-21T16:56:17.747194Z",
- "name": "Cortex XSOAR TIM Attack Pattern",
- "schema": "https://github.com/demisto/content/tree/master/Packs/TAXIIServer/doc_files/XSOAR_indicator_schema.json",
- "spec_version": "2.0",
- "type": "extension-definition",
- "version": "1.0"
- }
+ "created": "2021-10-21T16:56:05.840605Z",
+ "created_by_ref": "identity--749249c0-f7c7-5428-a4ad-ea5e1627a221",
+ "description": "This schema adds TIM data to the object",
+ "extension_types": [
+ "property-extension"
+ ],
+ "id": "extension-definition--fc572b8c-bbe3-444b-b40b-06d3b890cd79",
+ "modified": "2021-10-21T16:56:17.747194Z",
+ "name": "Cortex XSOAR TIM Attack Pattern",
+ "schema": "https://github.com/demisto/content/tree/master/Packs/TAXIIServer/doc_files/XSOAR_indicator_schema.json",
+ "spec_version": "2.0",
+ "type": "extension-definition",
+ "version": "1.0"
+ }
]
- }],
- "malware": [{
+ },
+ {
"objects": [
- {
- "created": "2019-10-10T14:43:27.256Z",
- "id": "malware--481d22d7-6dd8-4e37-a543-dd21cc1707c4",
- "labels": ["backdoor"],
- "modified": "2019-10-10T20:25:46.596Z",
- "name": "Freenki",
- "type": "malware"
- }
+ {
+ "created": "2019-10-10T14:43:27.256Z",
+ "id": "malware--481d22d7-6dd8-4e37-a543-dd21cc1707c4",
+ "labels": [
+ "backdoor"
+ ],
+ "modified": "2019-10-10T20:25:46.596Z",
+ "name": "Freenki",
+ "type": "malware"
+ }
]
- }],
- "report":[{
+ },
+ {
"objects": [
- {
- "created": "2018-08-03T20:31:05.060Z",
- "description": "Sofacy (also known as Fancy Bear, APT 28, STRONTIUM, Pawn Storm) is a highly active actor with a Russian nexus. They have been active since the mid 2000s, and have been responsible for targeted intrusion campaigns against various industry vertical such as but not limited to Aerospace, Defense, Energy, Government and Media. Extensive observation and research of Sofacy's activities over time indicated a profile closely mirroring the strategic interests of the Russian government. More recently, this group has been attributed to the GRU, Russia's premier military intelligence service as reported by the US intelligence community within several declassified public documents.\n\nSeveral high profile intrusions have been publicly linked to the Sofacy group, such as the German Bundestag, France's TV5Monde TV station, the Democratic National Committee, the World Anti-Doping Agency, and the Ukrainian military.",
- "id": "report--708d589b-8d99-48fd-bbb6-2d47648f807f",
- "labels": ["intrusion-set"],
- "modified": "2020-07-30T16:55:21.569Z",
- "name": "Sofacy",
- "object_refs": [
- "intrusion-set--75ac5bcc-3915-4815-b9a1-bf87277fc343",
- "report--2791a5e0-d65a-43c0-936e-c15a6b7ba9d1",
- "report--40aaefc2-3fe9-423b-a6eb-55abf11639b7",
- "report--2f5734bf-4127-4997-9288-e9c5530aa737",
- "report--ffe94621-b007-4115-ad49-fb7c8a26b66a"
- ],
- "published": "2020-07-30T16:55:21.569Z",
- "type": "report"
- }
+ {
+ "created": "2018-08-03T20:31:05.060Z",
+ "description": "Sofacy (also known as Fancy Bear, APT 28, STRONTIUM, Pawn Storm) is a highly active actor with a Russian nexus. They have been active since the mid 2000s, and have been responsible for targeted intrusion campaigns against various industry vertical such as but not limited to Aerospace, Defense, Energy, Government and Media. Extensive observation and research of Sofacy's activities over time indicated a profile closely mirroring the strategic interests of the Russian government. More recently, this group has been attributed to the GRU, Russia's premier military intelligence service as reported by the US intelligence community within several declassified public documents.\n\nSeveral high profile intrusions have been publicly linked to the Sofacy group, such as the German Bundestag, France's TV5Monde TV station, the Democratic National Committee, the World Anti-Doping Agency, and the Ukrainian military.",
+ "id": "report--708d589b-8d99-48fd-bbb6-2d47648f807f",
+ "labels": [
+ "intrusion-set"
+ ],
+ "modified": "2020-07-30T16:55:21.569Z",
+ "name": "Sofacy",
+ "object_refs": [
+ "intrusion-set--75ac5bcc-3915-4815-b9a1-bf87277fc343",
+ "report--2791a5e0-d65a-43c0-936e-c15a6b7ba9d1",
+ "report--40aaefc2-3fe9-423b-a6eb-55abf11639b7",
+ "report--2f5734bf-4127-4997-9288-e9c5530aa737",
+ "report--ffe94621-b007-4115-ad49-fb7c8a26b66a"
+ ],
+ "published": "2020-07-30T16:55:21.569Z",
+ "type": "report"
+ }
]
- }],
- "course-of-action": [{
+ },
+ {
"objects": [
- {
- "created": "2020-06-23T19:50:31.722Z",
- "description": "Configure antivirus profiles to a value of 'block' for all decoders except imap and pop3 under both Action and WildFire Action. If required by the organization's email implementation, configure imap and pop3 decoders to 'alert' under both Action and WildFire Action.",
- "id": "course-of-action--645c6c45-116e-4265-98d2-e30f56325bc6",
- "modified": "2020-06-26T13:02:55.803Z",
- "name": "Ensure that antivirus profiles are set to block on all decoders except 'imap' and 'pop3'",
- "type": "course-of-action",
- "x_panw_coa_bp_audit_procedure": "Navigate to `Objects > Security Profiles > Antivirus`\n\nVerify that antivirus profiles have all decoders set to `block` for both `Action` and `Wildfire Action`. If `imap` and `pop3` are required in the organization, verify that the `imap` and `pop3` decoders are set to `alert` for both `Action` and `Wildfire Action`.",
- "x_panw_coa_bp_cis_controls": [
- "TITLE:Deploy Network-based Anti-malware Tools CONTROL:v6 8.5 DESCRIPTION:Use network-based anti-malware tools to identify executables in all network traffic and use techniques other than signature-based detection to identify and filter out malicious content before it arrives at the endpoint.;TITLE:Malware Defenses CONTROL:v7 8 DESCRIPTION:Malware Defenses;"
- ],
- "x_panw_coa_bp_description": "Configure antivirus profiles to a value of 'block' for all decoders except imap and pop3 under both Action and WildFire Action. If required by the organization's email implementation, configure imap and pop3 decoders to 'alert' under both Action and WildFire Action.",
- "x_panw_coa_bp_rationale_statement": "Antivirus signatures produce low false positives. By blocking any detected malware through the specified decoders, the threat of malware propagation through the firewall is greatly reduced. It is recommended to mitigate malware found in pop3 and imap through a dedicated antivirus gateway. Due to the nature of the pop3 and imap protocols, the firewall is not able to block only a single email message containing malware. Instead, the entire session would be terminated, potentially affecting benign email messages.",
- "x_panw_coa_bp_recommendation_number": "6.1",
- "x_panw_coa_bp_references": [
- "\u201cThreat Prevention Deployment Tech Note\u201d - https://live.paloaltonetworks.com/docs/DOC-3094:\u201cPAN-OS Administrator's Guide 9.0 (English) - Security Profiles\u201d - https://docs.paloaltonetworks.com/pan-os/9-0/pan-os-admin/policy/security-profiles.html"
- ],
- "x_panw_coa_bp_remediation_procedure": "Navigate to `Objects > Security Profiles > Antivirus.`\n\nSet antivirus profiles to have all decoders set to `block` for both `Action` and `Wildfire Action`. If `imap` and `pop3` are required in the organization, set the `imap` and `pop3` decoders to `alert` for both `Action` and `Wildfire Action`.",
- "x_panw_coa_bp_scoring_status": "full",
- "x_panw_coa_bp_section_number": "6",
- "x_panw_coa_bp_status": "published",
- "x_panw_coa_bp_title": "Ensure that antivirus profiles are set to block on all decoders except 'imap' and 'pop3'"
- }
+ {
+ "created": "2020-06-23T19:50:31.722Z",
+ "description": "Configure antivirus profiles to a value of 'block' for all decoders except imap and pop3 under both Action and WildFire Action. If required by the organization's email implementation, configure imap and pop3 decoders to 'alert' under both Action and WildFire Action.",
+ "id": "course-of-action--645c6c45-116e-4265-98d2-e30f56325bc6",
+ "modified": "2020-06-26T13:02:55.803Z",
+ "name": "Ensure that antivirus profiles are set to block on all decoders except 'imap' and 'pop3'",
+ "type": "course-of-action",
+ "x_panw_coa_bp_audit_procedure": "Navigate to `Objects > Security Profiles > Antivirus`\n\nVerify that antivirus profiles have all decoders set to `block` for both `Action` and `Wildfire Action`. If `imap` and `pop3` are required in the organization, verify that the `imap` and `pop3` decoders are set to `alert` for both `Action` and `Wildfire Action`.",
+ "x_panw_coa_bp_cis_controls": [
+ "TITLE:Deploy Network-based Anti-malware Tools CONTROL:v6 8.5 DESCRIPTION:Use network-based anti-malware tools to identify executables in all network traffic and use techniques other than signature-based detection to identify and filter out malicious content before it arrives at the endpoint.;TITLE:Malware Defenses CONTROL:v7 8 DESCRIPTION:Malware Defenses;"
+ ],
+ "x_panw_coa_bp_description": "Configure antivirus profiles to a value of 'block' for all decoders except imap and pop3 under both Action and WildFire Action. If required by the organization's email implementation, configure imap and pop3 decoders to 'alert' under both Action and WildFire Action.",
+ "x_panw_coa_bp_rationale_statement": "Antivirus signatures produce low false positives. By blocking any detected malware through the specified decoders, the threat of malware propagation through the firewall is greatly reduced. It is recommended to mitigate malware found in pop3 and imap through a dedicated antivirus gateway. Due to the nature of the pop3 and imap protocols, the firewall is not able to block only a single email message containing malware. Instead, the entire session would be terminated, potentially affecting benign email messages.",
+ "x_panw_coa_bp_recommendation_number": "6.1",
+ "x_panw_coa_bp_references": [
+ "\u201cThreat Prevention Deployment Tech Note\u201d - https://live.paloaltonetworks.com/docs/DOC-3094:\u201cPAN-OS Administrator's Guide 9.0 (English) - Security Profiles\u201d - https://docs.paloaltonetworks.com/pan-os/9-0/pan-os-admin/policy/security-profiles.html"
+ ],
+ "x_panw_coa_bp_remediation_procedure": "Navigate to `Objects > Security Profiles > Antivirus.`\n\nSet antivirus profiles to have all decoders set to `block` for both `Action` and `Wildfire Action`. If `imap` and `pop3` are required in the organization, set the `imap` and `pop3` decoders to `alert` for both `Action` and `Wildfire Action`.",
+ "x_panw_coa_bp_scoring_status": "full",
+ "x_panw_coa_bp_section_number": "6",
+ "x_panw_coa_bp_status": "published",
+ "x_panw_coa_bp_title": "Ensure that antivirus profiles are set to block on all decoders except 'imap' and 'pop3'"
+ }
]
- }],
- "campaign": [{
+ },
+ {
"objects": [
- {
- "created": "2018-08-03T21:03:51.484Z",
- "description": "In July 2018, Unit 42 analyzed a targeted attack using a novel file type against at least one government agency in the Middle East. It was carried out by a previously unpublished threat group we track as DarkHydrus. Based on our telemetry, we were able to uncover additional artifacts leading us to believe this adversary group has been in operation with their current playbook since early 2016. This attack diverged from previous attacks we observed from this group as it involved spear-phishing emails sent to targeted organizations with password protected RAR archive attachments that contained malicious Excel Web Query files (.iqy).",
- "first_seen": "2018-07-15T00:00:00.000Z",
- "id": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
- "last_seen": "2018-07-16T00:00:00.000Z",
- "modified": "2018-08-03T21:03:51.484Z",
- "name": "IQY Attachment E-mails",
- "type": "campaign"
- }
+ {
+ "created": "2018-08-03T21:03:51.484Z",
+ "description": "In July 2018, Unit 42 analyzed a targeted attack using a novel file type against at least one government agency in the Middle East. It was carried out by a previously unpublished threat group we track as DarkHydrus. Based on our telemetry, we were able to uncover additional artifacts leading us to believe this adversary group has been in operation with their current playbook since early 2016. This attack diverged from previous attacks we observed from this group as it involved spear-phishing emails sent to targeted organizations with password protected RAR archive attachments that contained malicious Excel Web Query files (.iqy).",
+ "first_seen": "2018-07-15T00:00:00.000Z",
+ "id": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
+ "last_seen": "2018-07-16T00:00:00.000Z",
+ "modified": "2018-08-03T21:03:51.484Z",
+ "name": "IQY Attachment E-mails",
+ "type": "campaign"
+ }
]
- }],
- "intrusion-set": [{
+ },
+ {
"objects": [
- {
- "created": "2018-08-03T20:30:50.665Z",
- "id": "intrusion-set--8e11eaa4-1964-4b73-85c1-fcfa29159f9b",
- "modified": "2018-08-03T20:30:50.665Z",
- "name": "OilRig",
- "type": "intrusion-set"
- }
+ {
+ "created": "2018-08-03T20:30:50.665Z",
+ "id": "intrusion-set--8e11eaa4-1964-4b73-85c1-fcfa29159f9b",
+ "modified": "2018-08-03T20:30:50.665Z",
+ "name": "OilRig",
+ "type": "intrusion-set"
+ }
]
- }],
- "tool": [{
+ },
+ {
"objects": [
- {
- "type": "tool",
- "spec_version": "2.1",
- "id": "tool--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
- "created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- "created": "2016-04-06T20:03:48.000Z",
- "modified": "2016-04-06T20:03:48.000Z",
- "tool_types": [ "remote-access"],
- "name": "VNC"
- }
+ {
+ "type": "tool",
+ "spec_version": "2.1",
+ "id": "tool--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
+ "created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
+ "created": "2016-04-06T20:03:48.000Z",
+ "modified": "2016-04-06T20:03:48.000Z",
+ "tool_types": [
+ "remote-access"
+ ],
+ "name": "VNC"
+ }
]
- }],
- "threat-actor": [{
+ },
+ {
"objects": [
{
- "type": "threat-actor",
- "spec_version": "2.1",
- "id": "threat-actor--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
- "created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- "created": "2016-04-06T20:03:48.000Z",
- "modified": "2016-04-06T20:03:48.000Z",
- "threat_actor_types": [ "crime-syndicate"],
- "name": "Evil Org",
- "description": "The Evil Org threat actor group",
- "aliases": ["Syndicate 1", "Evil Syndicate 99"],
- "roles": ["director"],
- "goals": ["Steal bank money", "Steal credit cards"],
- "sophistication": "advanced",
- "resource_level": "team",
- "primary_motivation": "organizational-gain"
- }
+ "type": "threat-actor",
+ "spec_version": "2.1",
+ "id": "threat-actor--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
+ "created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
+ "created": "2016-04-06T20:03:48.000Z",
+ "modified": "2016-04-06T20:03:48.000Z",
+ "threat_actor_types": [
+ "crime-syndicate"
+ ],
+ "name": "Evil Org",
+ "description": "The Evil Org threat actor group",
+ "aliases": [
+ "Syndicate 1",
+ "Evil Syndicate 99"
+ ],
+ "roles": [
+ "director"
+ ],
+ "goals": [
+ "Steal bank money",
+ "Steal credit cards"
+ ],
+ "sophistication": "advanced",
+ "resource_level": "team",
+ "primary_motivation": "organizational-gain"
+ }
]
- }],
- "infrastructure": [{
- "objects": [
- {
- "type":"infrastructure",
- "spec_version": "2.1",
- "id":"infrastructure--38c47d93-d984-4fd9-b87b-d69d0841628d",
- "created":"2016-05-07T11:22:30.000Z",
- "modified":"2016-05-07T11:22:30.000Z",
- "name":"Poison Ivy C2",
- "infrastructure_types": ["command-and-control"]
- }
- ]
- }],
- "relationship": [{
- "objects": [
- {
- "created": "2018-08-03T21:03:51.484Z",
- "id": "relationship--01a5a209-b94c-450b-b7f9-946497d91055",
- "modified": "2018-08-03T21:03:51.484Z",
- "relationship_type": "uses",
- "source_ref": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
- "target_ref": "attack-pattern--4e6620ac-c30c-4f6d-918e-fa20cae7c1ce",
- "type": "relationship"
- },
- {
- "created": "2018-08-03T20:31:03.780Z",
- "id": "relationship--abc475d9-199c-4623-9e9a-02adf340a415",
- "modified": "2018-08-22T12:36:32.248Z",
- "relationship_type": "indicates",
- "source_ref": "indicator--545928d9-bfe8-4320-bb98-751f38139892",
- "target_ref": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
- "type": "relationship"
- }
- ]
- }]
-}
\ No newline at end of file
+ },
+ {
+ "objects": [
+ {
+ "type": "infrastructure",
+ "spec_version": "2.1",
+ "id": "infrastructure--38c47d93-d984-4fd9-b87b-d69d0841628d",
+ "created": "2016-05-07T11:22:30.000Z",
+ "modified": "2016-05-07T11:22:30.000Z",
+ "name": "Poison Ivy C2",
+ "infrastructure_types": [
+ "command-and-control"
+ ]
+ }
+ ]
+ },
+ {
+ "objects": [
+ {
+ "created": "2018-08-03T21:03:51.484Z",
+ "id": "relationship--01a5a209-b94c-450b-b7f9-946497d91055",
+ "modified": "2018-08-03T21:03:51.484Z",
+ "relationship_type": "uses",
+ "source_ref": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
+ "target_ref": "attack-pattern--4e6620ac-c30c-4f6d-918e-fa20cae7c1ce",
+ "type": "relationship"
+ },
+ {
+ "created": "2018-08-03T20:31:03.780Z",
+ "id": "relationship--abc475d9-199c-4623-9e9a-02adf340a415",
+ "modified": "2018-08-22T12:36:32.248Z",
+ "relationship_type": "indicates",
+ "source_ref": "indicator--545928d9-bfe8-4320-bb98-751f38139892",
+ "target_ref": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
+ "type": "relationship"
+ }
+ ]
+ }
+]
\ No newline at end of file
diff --git a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/objects_envelopes_v21.json b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/objects_envelopes_v21.json
index c6364062233c..bd77afa8790c 100644
--- a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/objects_envelopes_v21.json
+++ b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/objects_envelopes_v21.json
@@ -1,281 +1,326 @@
-{
- "indicator": {
+[
+ {
"objects": [
- {
- "created": "2018-04-23T17:01:01.248Z",
- "id": "indicator--545928d9-bfe8-4320-bb98-751f38139892",
- "labels": ["malicious-activity"],
- "modified": "2018-04-23T17:01:01.248Z",
- "name": "windows-updates.com",
- "pattern": "[domain-name:value = 'windows-updates.com']",
- "type": "indicator",
- "valid_from": "2018-04-23T17:01:01.248Z"
+ {
+ "created": "2018-04-23T17:01:01.248Z",
+ "id": "indicator--545928d9-bfe8-4320-bb98-751f38139892",
+ "labels": [
+ "malicious-activity"
+ ],
+ "modified": "2018-04-23T17:01:01.248Z",
+ "name": "windows-updates.com",
+ "pattern": "[domain-name:value = 'windows-updates.com']",
+ "type": "indicator",
+ "valid_from": "2018-04-23T17:01:01.248Z"
}
- ]
+ ],
+ "more": true
},
- "attack-pattern": {
+ {
"objects": [
+ {
+ "created": "2017-10-25T14:48:11.535Z",
+ "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5",
+ "description": "An adversary could call standard operating system APIs from a malicious application to gather contact list (i.e., address book) data, or with escalated privileges could directly access files containing contact list data.",
+ "external_references": [
+ {
+ "external_id": "T1432",
+ "source_name": "mitre-mobile-attack",
+ "url": "https://attack.mitre.org/techniques/T1432"
+ },
+ {
+ "external_id": "APP-13",
+ "source_name": "NIST Mobile Threat Catalogue",
+ "url": "https://pages.nist.gov/mobile-threat-catalogue/application-threats/APP-13.html"
+ }
+ ],
+ "id": "attack-pattern--4e6620ac-c30c-4f6d-918e-fa20cae7c1ce",
+ "kill_chain_phases": [
{
- "created": "2017-10-25T14:48:11.535Z",
- "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5",
- "description": "An adversary could call standard operating system APIs from a malicious application to gather contact list (i.e., address book) data, or with escalated privileges could directly access files containing contact list data.",
- "external_references": [
- {
- "external_id": "T1432",
- "source_name": "mitre-mobile-attack",
- "url": "https://attack.mitre.org/techniques/T1432"
- },
- {
- "external_id": "APP-13",
- "source_name": "NIST Mobile Threat Catalogue",
- "url": "https://pages.nist.gov/mobile-threat-catalogue/application-threats/APP-13.html"
- }
- ],
- "id": "attack-pattern--4e6620ac-c30c-4f6d-918e-fa20cae7c1ce",
- "kill_chain_phases": [
- {
- "kill_chain_name": "lockheed",
- "phase_name": "act-on-objectives"
- },
- {
- "kill_chain_name": "mitre-mobile-attack",
- "phase_name": "collection"
- }
- ],
- "modified": "2018-10-17T00:14:20.652Z",
- "name": "T1432: Access Contact List (Mobile)",
- "object_marking_refs": [
- "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168"
- ],
- "type": "attack-pattern",
- "x_mitre_detection": "On both Android (6.0 and up) and iOS, the user can view which applications have permission to access contact list information through the device settings screen, and the user can choose to revoke the permissions.",
- "x_mitre_platforms": ["Android", "iOS"],
- "x_mitre_version": "1.0"
- },
+ "kill_chain_name": "lockheed",
+ "phase_name": "act-on-objectives"
+ },
+ {
+ "kill_chain_name": "mitre-mobile-attack",
+ "phase_name": "collection"
+ }
+ ],
+ "modified": "2018-10-17T00:14:20.652Z",
+ "name": "T1432: Access Contact List (Mobile)",
+ "object_marking_refs": [
+ "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168"
+ ],
+ "type": "attack-pattern",
+ "x_mitre_detection": "On both Android (6.0 and up) and iOS, the user can view which applications have permission to access contact list information through the device settings screen, and the user can choose to revoke the permissions.",
+ "x_mitre_platforms": [
+ "Android",
+ "iOS"
+ ],
+ "x_mitre_version": "1.0"
+ },
{
- "created": "2020-02-05T14:17:46.686Z",
- "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5",
- "description": "Adversaries may perform software packing or virtual machine software protection to conceal their code. Software packing is a method of compressing or encrypting an executable. Packing an executable changes the file signature in an attempt to avoid signature-based detection. Most decompression techniques decompress the executable code in memory. Virtual machine software protection translates an executable's original code into a special format that only a special virtual machine can run. A virtual machine is then called to run this code.(Citation: ESET FinFisher Jan 2018) \n\nUtilities used to perform software packing are called packers. Example packers are MPRESS and UPX. A more comprehensive list of known packers is available, (Citation: Wikipedia Exe Compression) but adversaries may create their own packing techniques that do not leave the same artifacts as well-known packers to evade defenses. ",
- "external_references": [
- {
- "external_id": "T1027.002",
- "source_name": "mitre-attack",
- "url": "https://attack.mitre.org/techniques/T1027/002"
- },
- {
- "external_id": "CAPEC-570",
- "source_name": "capec",
- "url": "https://example.example.com/data/definitions/570.html"
- },
- {
- "description": "Kafka, F. (2018, January). ESET's Guide to Deobfuscating and Devirtualizing FinFisher. Retrieved August 12, 2019.",
- "source_name": "ESET FinFisher Jan 2018",
- "url": "https://www.welivesecurity.com/wp-content/uploads/2018/01/WP-FinFisher.pdf"
- },
- {
- "description": "Executable compression. (n.d.). Retrieved December 4, 2014.",
- "source_name": "Wikipedia Exe Compression",
- "url": "http://en.wikipedia.org/wiki/Executable_compression"
- }
- ],
- "id": "attack-pattern--deb98323-e13f-4b0c-8d94-175379069062",
- "kill_chain_phases": [
- {
- "kill_chain_name": "lockheed",
- "phase_name": "installation"
- },
- {
- "kill_chain_name": "mitre-attack",
- "phase_name": "defense-evasion"
- }
- ],
- "modified": "2020-02-05T20:05:41.548Z",
- "name": "T1027.002: Software Packing",
- "object_marking_refs": ["marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168"],
- "type": "attack-pattern",
- "x_mitre_contributors": ["Filip Kafka, ESET"],
- "x_mitre_data_sources": ["File: File Content", "File: File Metadata"],
- "x_mitre_defense_bypassed": [
- "Anti-virus",
- "Heuristic detection",
- "Signature-based detection"],
- "x_mitre_detection": "Use file scanning to look for known software packers or artifacts of packing techniques. Packing is not a definitive indicator of malicious activity, because legitimate software may use packing techniques to reduce binary size or to protect proprietary code.",
- "x_mitre_is_subtechnique": true,
- "x_mitre_platforms": ["macOS", "Windows"],
- "x_mitre_version": "1.0",
- "x_panw_parent_technique": "Obfuscated Files or Information",
- "x_panw_parent_technique_subtechnique": "Obfuscated Files or Information: Software Packing"
- },
+ "created": "2020-02-05T14:17:46.686Z",
+ "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5",
+ "description": "Adversaries may perform software packing or virtual machine software protection to conceal their code. Software packing is a method of compressing or encrypting an executable. Packing an executable changes the file signature in an attempt to avoid signature-based detection. Most decompression techniques decompress the executable code in memory. Virtual machine software protection translates an executable's original code into a special format that only a special virtual machine can run. A virtual machine is then called to run this code.(Citation: ESET FinFisher Jan 2018) \n\nUtilities used to perform software packing are called packers. Example packers are MPRESS and UPX. A more comprehensive list of known packers is available, (Citation: Wikipedia Exe Compression) but adversaries may create their own packing techniques that do not leave the same artifacts as well-known packers to evade defenses. ",
+ "external_references": [
+ {
+ "external_id": "T1027.002",
+ "source_name": "mitre-attack",
+ "url": "https://attack.mitre.org/techniques/T1027/002"
+ },
+ {
+ "external_id": "CAPEC-570",
+ "source_name": "capec",
+ "url": "https://example.example.com/data/definitions/570.html"
+ },
+ {
+ "description": "Kafka, F. (2018, January). ESET's Guide to Deobfuscating and Devirtualizing FinFisher. Retrieved August 12, 2019.",
+ "source_name": "ESET FinFisher Jan 2018",
+ "url": "https://www.welivesecurity.com/wp-content/uploads/2018/01/WP-FinFisher.pdf"
+ },
+ {
+ "description": "Executable compression. (n.d.). Retrieved December 4, 2014.",
+ "source_name": "Wikipedia Exe Compression",
+ "url": "http://en.wikipedia.org/wiki/Executable_compression"
+ }
+ ],
+ "id": "attack-pattern--deb98323-e13f-4b0c-8d94-175379069062",
+ "kill_chain_phases": [
+ {
+ "kill_chain_name": "lockheed",
+ "phase_name": "installation"
+ },
+ {
+ "kill_chain_name": "mitre-attack",
+ "phase_name": "defense-evasion"
+ }
+ ],
+ "modified": "2020-02-05T20:05:41.548Z",
+ "name": "T1027.002: Software Packing",
+ "object_marking_refs": [
+ "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168"
+ ],
+ "type": "attack-pattern",
+ "x_mitre_contributors": [
+ "Filip Kafka, ESET"
+ ],
+ "x_mitre_data_sources": [
+ "File: File Content",
+ "File: File Metadata"
+ ],
+ "x_mitre_defense_bypassed": [
+ "Anti-virus",
+ "Heuristic detection",
+ "Signature-based detection"
+ ],
+ "x_mitre_detection": "Use file scanning to look for known software packers or artifacts of packing techniques. Packing is not a definitive indicator of malicious activity, because legitimate software may use packing techniques to reduce binary size or to protect proprietary code.",
+ "x_mitre_is_subtechnique": true,
+ "x_mitre_platforms": [
+ "macOS",
+ "Windows"
+ ],
+ "x_mitre_version": "1.0",
+ "x_panw_parent_technique": "Obfuscated Files or Information",
+ "x_panw_parent_technique_subtechnique": "Obfuscated Files or Information: Software Packing"
+ },
{
- "created": "2021-10-21T16:56:05.840605Z",
- "created_by_ref": "identity--749249c0-f7c7-5428-a4ad-ea5e1627a221",
- "description": "This schema adds TIM data to the object",
- "extension_types": [
- "property-extension"
- ],
- "id": "extension-definition--fc572b8c-bbe3-444b-b40b-06d3b890cd79",
- "modified": "2021-10-21T16:56:17.747194Z",
- "name": "Cortex XSOAR TIM Attack Pattern",
- "schema": "https://github.com/demisto/content/tree/master/Packs/TAXIIServer/doc_files/XSOAR_indicator_schema.json",
- "spec_version": "2.0",
- "type": "extension-definition",
- "version": "1.0"
- }
- ]
+ "created": "2021-10-21T16:56:05.840605Z",
+ "created_by_ref": "identity--749249c0-f7c7-5428-a4ad-ea5e1627a221",
+ "description": "This schema adds TIM data to the object",
+ "extension_types": [
+ "property-extension"
+ ],
+ "id": "extension-definition--fc572b8c-bbe3-444b-b40b-06d3b890cd79",
+ "modified": "2021-10-21T16:56:17.747194Z",
+ "name": "Cortex XSOAR TIM Attack Pattern",
+ "schema": "https://github.com/demisto/content/tree/master/Packs/TAXIIServer/doc_files/XSOAR_indicator_schema.json",
+ "spec_version": "2.0",
+ "type": "extension-definition",
+ "version": "1.0"
+ }
+ ],
+ "more": true
},
- "malware": {
+ {
"objects": [
- {
- "created": "2019-10-10T14:43:27.256Z",
- "id": "malware--481d22d7-6dd8-4e37-a543-dd21cc1707c4",
- "labels": ["backdoor"],
- "modified": "2019-10-10T20:25:46.596Z",
- "name": "Freenki",
- "type": "malware"
- }
- ]
+ {
+ "created": "2019-10-10T14:43:27.256Z",
+ "id": "malware--481d22d7-6dd8-4e37-a543-dd21cc1707c4",
+ "labels": [
+ "backdoor"
+ ],
+ "modified": "2019-10-10T20:25:46.596Z",
+ "name": "Freenki",
+ "type": "malware"
+ }
+ ],
+ "more": true
},
- "report":{
+ {
"objects": [
- {
- "created": "2018-08-03T20:31:05.060Z",
- "description": "Sofacy (also known as Fancy Bear, APT 28, STRONTIUM, Pawn Storm) is a highly active actor with a Russian nexus. They have been active since the mid 2000s, and have been responsible for targeted intrusion campaigns against various industry vertical such as but not limited to Aerospace, Defense, Energy, Government and Media. Extensive observation and research of Sofacy's activities over time indicated a profile closely mirroring the strategic interests of the Russian government. More recently, this group has been attributed to the GRU, Russia's premier military intelligence service as reported by the US intelligence community within several declassified public documents.\n\nSeveral high profile intrusions have been publicly linked to the Sofacy group, such as the German Bundestag, France's TV5Monde TV station, the Democratic National Committee, the World Anti-Doping Agency, and the Ukrainian military.",
- "id": "report--708d589b-8d99-48fd-bbb6-2d47648f807f",
- "labels": ["intrusion-set"],
- "modified": "2020-07-30T16:55:21.569Z",
- "name": "Sofacy",
- "object_refs": [
- "intrusion-set--75ac5bcc-3915-4815-b9a1-bf87277fc343",
- "report--2791a5e0-d65a-43c0-936e-c15a6b7ba9d1",
- "report--40aaefc2-3fe9-423b-a6eb-55abf11639b7",
- "report--2f5734bf-4127-4997-9288-e9c5530aa737",
- "report--ffe94621-b007-4115-ad49-fb7c8a26b66a"
- ],
- "published": "2020-07-30T16:55:21.569Z",
- "type": "report"
- }
- ]
+ {
+ "created": "2018-08-03T20:31:05.060Z",
+ "description": "Sofacy (also known as Fancy Bear, APT 28, STRONTIUM, Pawn Storm) is a highly active actor with a Russian nexus. They have been active since the mid 2000s, and have been responsible for targeted intrusion campaigns against various industry vertical such as but not limited to Aerospace, Defense, Energy, Government and Media. Extensive observation and research of Sofacy's activities over time indicated a profile closely mirroring the strategic interests of the Russian government. More recently, this group has been attributed to the GRU, Russia's premier military intelligence service as reported by the US intelligence community within several declassified public documents.\n\nSeveral high profile intrusions have been publicly linked to the Sofacy group, such as the German Bundestag, France's TV5Monde TV station, the Democratic National Committee, the World Anti-Doping Agency, and the Ukrainian military.",
+ "id": "report--708d589b-8d99-48fd-bbb6-2d47648f807f",
+ "labels": [
+ "intrusion-set"
+ ],
+ "modified": "2020-07-30T16:55:21.569Z",
+ "name": "Sofacy",
+ "object_refs": [
+ "intrusion-set--75ac5bcc-3915-4815-b9a1-bf87277fc343",
+ "report--2791a5e0-d65a-43c0-936e-c15a6b7ba9d1",
+ "report--40aaefc2-3fe9-423b-a6eb-55abf11639b7",
+ "report--2f5734bf-4127-4997-9288-e9c5530aa737",
+ "report--ffe94621-b007-4115-ad49-fb7c8a26b66a"
+ ],
+ "published": "2020-07-30T16:55:21.569Z",
+ "type": "report"
+ }
+ ],
+ "more": true
},
- "course-of-action": {
+ {
"objects": [
- {
- "created": "2020-06-23T19:50:31.722Z",
- "description": "Configure antivirus profiles to a value of 'block' for all decoders except imap and pop3 under both Action and WildFire Action. If required by the organization's email implementation, configure imap and pop3 decoders to 'alert' under both Action and WildFire Action.",
- "id": "course-of-action--645c6c45-116e-4265-98d2-e30f56325bc6",
- "modified": "2020-06-26T13:02:55.803Z",
- "name": "Ensure that antivirus profiles are set to block on all decoders except 'imap' and 'pop3'",
- "type": "course-of-action",
- "x_panw_coa_bp_audit_procedure": "Navigate to `Objects > Security Profiles > Antivirus`\n\nVerify that antivirus profiles have all decoders set to `block` for both `Action` and `Wildfire Action`. If `imap` and `pop3` are required in the organization, verify that the `imap` and `pop3` decoders are set to `alert` for both `Action` and `Wildfire Action`.",
- "x_panw_coa_bp_cis_controls": [
- "TITLE:Deploy Network-based Anti-malware Tools CONTROL:v6 8.5 DESCRIPTION:Use network-based anti-malware tools to identify executables in all network traffic and use techniques other than signature-based detection to identify and filter out malicious content before it arrives at the endpoint.;TITLE:Malware Defenses CONTROL:v7 8 DESCRIPTION:Malware Defenses;"
- ],
- "x_panw_coa_bp_description": "Configure antivirus profiles to a value of 'block' for all decoders except imap and pop3 under both Action and WildFire Action. If required by the organization's email implementation, configure imap and pop3 decoders to 'alert' under both Action and WildFire Action.",
- "x_panw_coa_bp_rationale_statement": "Antivirus signatures produce low false positives. By blocking any detected malware through the specified decoders, the threat of malware propagation through the firewall is greatly reduced. It is recommended to mitigate malware found in pop3 and imap through a dedicated antivirus gateway. Due to the nature of the pop3 and imap protocols, the firewall is not able to block only a single email message containing malware. Instead, the entire session would be terminated, potentially affecting benign email messages.",
- "x_panw_coa_bp_recommendation_number": "6.1",
- "x_panw_coa_bp_references": [
- "\u201cThreat Prevention Deployment Tech Note\u201d - https://live.paloaltonetworks.com/docs/DOC-3094:\u201cPAN-OS Administrator's Guide 9.0 (English) - Security Profiles\u201d - https://docs.paloaltonetworks.com/pan-os/9-0/pan-os-admin/policy/security-profiles.html"
- ],
- "x_panw_coa_bp_remediation_procedure": "Navigate to `Objects > Security Profiles > Antivirus.`\n\nSet antivirus profiles to have all decoders set to `block` for both `Action` and `Wildfire Action`. If `imap` and `pop3` are required in the organization, set the `imap` and `pop3` decoders to `alert` for both `Action` and `Wildfire Action`.",
- "x_panw_coa_bp_scoring_status": "full",
- "x_panw_coa_bp_section_number": "6",
- "x_panw_coa_bp_status": "published",
- "x_panw_coa_bp_title": "Ensure that antivirus profiles are set to block on all decoders except 'imap' and 'pop3'"
- }
- ]
+ {
+ "created": "2020-06-23T19:50:31.722Z",
+ "description": "Configure antivirus profiles to a value of 'block' for all decoders except imap and pop3 under both Action and WildFire Action. If required by the organization's email implementation, configure imap and pop3 decoders to 'alert' under both Action and WildFire Action.",
+ "id": "course-of-action--645c6c45-116e-4265-98d2-e30f56325bc6",
+ "modified": "2020-06-26T13:02:55.803Z",
+ "name": "Ensure that antivirus profiles are set to block on all decoders except 'imap' and 'pop3'",
+ "type": "course-of-action",
+ "x_panw_coa_bp_audit_procedure": "Navigate to `Objects > Security Profiles > Antivirus`\n\nVerify that antivirus profiles have all decoders set to `block` for both `Action` and `Wildfire Action`. If `imap` and `pop3` are required in the organization, verify that the `imap` and `pop3` decoders are set to `alert` for both `Action` and `Wildfire Action`.",
+ "x_panw_coa_bp_cis_controls": [
+ "TITLE:Deploy Network-based Anti-malware Tools CONTROL:v6 8.5 DESCRIPTION:Use network-based anti-malware tools to identify executables in all network traffic and use techniques other than signature-based detection to identify and filter out malicious content before it arrives at the endpoint.;TITLE:Malware Defenses CONTROL:v7 8 DESCRIPTION:Malware Defenses;"
+ ],
+ "x_panw_coa_bp_description": "Configure antivirus profiles to a value of 'block' for all decoders except imap and pop3 under both Action and WildFire Action. If required by the organization's email implementation, configure imap and pop3 decoders to 'alert' under both Action and WildFire Action.",
+ "x_panw_coa_bp_rationale_statement": "Antivirus signatures produce low false positives. By blocking any detected malware through the specified decoders, the threat of malware propagation through the firewall is greatly reduced. It is recommended to mitigate malware found in pop3 and imap through a dedicated antivirus gateway. Due to the nature of the pop3 and imap protocols, the firewall is not able to block only a single email message containing malware. Instead, the entire session would be terminated, potentially affecting benign email messages.",
+ "x_panw_coa_bp_recommendation_number": "6.1",
+ "x_panw_coa_bp_references": [
+ "\u201cThreat Prevention Deployment Tech Note\u201d - https://live.paloaltonetworks.com/docs/DOC-3094:\u201cPAN-OS Administrator's Guide 9.0 (English) - Security Profiles\u201d - https://docs.paloaltonetworks.com/pan-os/9-0/pan-os-admin/policy/security-profiles.html"
+ ],
+ "x_panw_coa_bp_remediation_procedure": "Navigate to `Objects > Security Profiles > Antivirus.`\n\nSet antivirus profiles to have all decoders set to `block` for both `Action` and `Wildfire Action`. If `imap` and `pop3` are required in the organization, set the `imap` and `pop3` decoders to `alert` for both `Action` and `Wildfire Action`.",
+ "x_panw_coa_bp_scoring_status": "full",
+ "x_panw_coa_bp_section_number": "6",
+ "x_panw_coa_bp_status": "published",
+ "x_panw_coa_bp_title": "Ensure that antivirus profiles are set to block on all decoders except 'imap' and 'pop3'"
+ }
+ ],
+ "more": true
},
- "campaign": {
+ {
"objects": [
- {
- "created": "2018-08-03T21:03:51.484Z",
- "description": "In July 2018, Unit 42 analyzed a targeted attack using a novel file type against at least one government agency in the Middle East. It was carried out by a previously unpublished threat group we track as DarkHydrus. Based on our telemetry, we were able to uncover additional artifacts leading us to believe this adversary group has been in operation with their current playbook since early 2016. This attack diverged from previous attacks we observed from this group as it involved spear-phishing emails sent to targeted organizations with password protected RAR archive attachments that contained malicious Excel Web Query files (.iqy).",
- "first_seen": "2018-07-15T00:00:00.000Z",
- "id": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
- "last_seen": "2018-07-16T00:00:00.000Z",
- "modified": "2018-08-03T21:03:51.484Z",
- "name": "IQY Attachment E-mails",
- "type": "campaign"
- }
- ]
+ {
+ "created": "2018-08-03T21:03:51.484Z",
+ "description": "In July 2018, Unit 42 analyzed a targeted attack using a novel file type against at least one government agency in the Middle East. It was carried out by a previously unpublished threat group we track as DarkHydrus. Based on our telemetry, we were able to uncover additional artifacts leading us to believe this adversary group has been in operation with their current playbook since early 2016. This attack diverged from previous attacks we observed from this group as it involved spear-phishing emails sent to targeted organizations with password protected RAR archive attachments that contained malicious Excel Web Query files (.iqy).",
+ "first_seen": "2018-07-15T00:00:00.000Z",
+ "id": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
+ "last_seen": "2018-07-16T00:00:00.000Z",
+ "modified": "2018-08-03T21:03:51.484Z",
+ "name": "IQY Attachment E-mails",
+ "type": "campaign"
+ }
+ ],
+ "more": true
},
- "intrusion-set": {
+ {
"objects": [
- {
- "created": "2018-08-03T20:30:50.665Z",
- "id": "intrusion-set--8e11eaa4-1964-4b73-85c1-fcfa29159f9b",
- "modified": "2018-08-03T20:30:50.665Z",
- "name": "OilRig",
- "type": "intrusion-set"
- }
- ]
+ {
+ "created": "2018-08-03T20:30:50.665Z",
+ "id": "intrusion-set--8e11eaa4-1964-4b73-85c1-fcfa29159f9b",
+ "modified": "2018-08-03T20:30:50.665Z",
+ "name": "OilRig",
+ "type": "intrusion-set"
+ }
+ ],
+ "more": true
},
- "tool": {
+ {
"objects": [
- {
- "type": "tool",
- "spec_version": "2.1",
- "id": "tool--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
- "created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- "created": "2016-04-06T20:03:48.000Z",
- "modified": "2016-04-06T20:03:48.000Z",
- "tool_types": [ "remote-access"],
- "name": "VNC"
- }
- ]
+ {
+ "type": "tool",
+ "spec_version": "2.1",
+ "id": "tool--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
+ "created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
+ "created": "2016-04-06T20:03:48.000Z",
+ "modified": "2016-04-06T20:03:48.000Z",
+ "tool_types": [
+ "remote-access"
+ ],
+ "name": "VNC"
+ }
+ ],
+ "more": true
},
- "threat-actor": {
+ {
"objects": [
{
- "type": "threat-actor",
- "spec_version": "2.1",
- "id": "threat-actor--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
- "created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- "created": "2016-04-06T20:03:48.000Z",
- "modified": "2016-04-06T20:03:48.000Z",
- "threat_actor_types": [ "crime-syndicate"],
- "name": "Evil Org",
- "description": "The Evil Org threat actor group",
- "aliases": ["Syndicate 1", "Evil Syndicate 99"],
- "roles": ["director"],
- "goals": ["Steal bank money", "Steal credit cards"],
- "sophistication": "advanced",
- "resource_level": "team",
- "primary_motivation": "organizational-gain"
- }
- ]
+ "type": "threat-actor",
+ "spec_version": "2.1",
+ "id": "threat-actor--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
+ "created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
+ "created": "2016-04-06T20:03:48.000Z",
+ "modified": "2016-04-06T20:03:48.000Z",
+ "threat_actor_types": [
+ "crime-syndicate"
+ ],
+ "name": "Evil Org",
+ "description": "The Evil Org threat actor group",
+ "aliases": [
+ "Syndicate 1",
+ "Evil Syndicate 99"
+ ],
+ "roles": [
+ "director"
+ ],
+ "goals": [
+ "Steal bank money",
+ "Steal credit cards"
+ ],
+ "sophistication": "advanced",
+ "resource_level": "team",
+ "primary_motivation": "organizational-gain"
+ }
+ ],
+ "more": true
},
- "infrastructure": {
- "objects": [
- {
- "type":"infrastructure",
- "spec_version": "2.1",
- "id":"infrastructure--38c47d93-d984-4fd9-b87b-d69d0841628d",
- "created":"2016-05-07T11:22:30.000Z",
- "modified":"2016-05-07T11:22:30.000Z",
- "name":"Poison Ivy C2",
- "infrastructure_types": ["command-and-control"]
- }
- ]
+ {
+ "objects": [
+ {
+ "type": "infrastructure",
+ "spec_version": "2.1",
+ "id": "infrastructure--38c47d93-d984-4fd9-b87b-d69d0841628d",
+ "created": "2016-05-07T11:22:30.000Z",
+ "modified": "2016-05-07T11:22:30.000Z",
+ "name": "Poison Ivy C2",
+ "infrastructure_types": [
+ "command-and-control"
+ ]
+ }
+ ],
+ "more": true
},
- "relationship": {
- "objects": [
- {
- "created": "2018-08-03T21:03:51.484Z",
- "id": "relationship--01a5a209-b94c-450b-b7f9-946497d91055",
- "modified": "2018-08-03T21:03:51.484Z",
- "relationship_type": "uses",
- "source_ref": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
- "target_ref": "attack-pattern--4e6620ac-c30c-4f6d-918e-fa20cae7c1ce",
- "type": "relationship"
- },
- {
- "created": "2018-08-03T20:31:03.780Z",
- "id": "relationship--abc475d9-199c-4623-9e9a-02adf340a415",
- "modified": "2018-08-22T12:36:32.248Z",
- "relationship_type": "indicates",
- "source_ref": "indicator--545928d9-bfe8-4320-bb98-751f38139892",
- "target_ref": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
- "type": "relationship"
- }
- ]
+ {
+ "objects": [
+ {
+ "created": "2018-08-03T21:03:51.484Z",
+ "id": "relationship--01a5a209-b94c-450b-b7f9-946497d91055",
+ "modified": "2018-08-03T21:03:51.484Z",
+ "relationship_type": "uses",
+ "source_ref": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
+ "target_ref": "attack-pattern--4e6620ac-c30c-4f6d-918e-fa20cae7c1ce",
+ "type": "relationship"
+ },
+ {
+ "created": "2018-08-03T20:31:03.780Z",
+ "id": "relationship--abc475d9-199c-4623-9e9a-02adf340a415",
+ "modified": "2018-08-22T12:36:32.248Z",
+ "relationship_type": "indicates",
+ "source_ref": "indicator--545928d9-bfe8-4320-bb98-751f38139892",
+ "target_ref": "campaign--6320584e-3ef0-4a72-aaf8-0a49fa1d477c",
+ "type": "relationship"
+ }
+ ],
+ "more": false
}
-}
\ No newline at end of file
+]
\ No newline at end of file
diff --git a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_17-19.json b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_17-19.json
index 7bf5797fe341..77a5a68ab1e5 100644
--- a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_17-19.json
+++ b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_17-19.json
@@ -1,433 +1,435 @@
-{
- "objects": [
- {
- "id": "indicator--86fee2b1-807d-423d-9d0e-1117bab576ce",
- "pattern": "[ipv4-addr:value = '195.123.227.186']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:33.126Z",
- "modified": "2020-06-10T01:14:33.126Z",
- "name": "bot_ip: 195.123.227.186",
- "description": "TS ID: 55694549840; iType: bot_ip; Date First: 2020-06-05T08:42:19.170Z; State: active; Org: Layer6 Networks; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.779852Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--891207b3-bff4-4bc2-8c12-7fd2321c9f38",
- "pattern": "[ipv4-addr:value = '134.209.37.102']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:52.501Z",
- "modified": "2020-06-10T01:14:52.501Z",
- "name": "bot_ip: 134.209.37.102",
- "description": "TS ID: 55682983162; iType: bot_ip; Date First: 2020-06-02T07:26:06.274Z; State: active; Org: Covidien Lp; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.722754Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--8c726d5f-cb6b-45dc-8c2b-2be8596043cf",
- "pattern": "[ipv4-addr:value = '117.141.112.155']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:54.684Z",
- "modified": "2020-06-10T01:14:54.684Z",
- "name": "bot_ip: 117.141.112.155",
- "description": "TS ID: 55694549819; iType: bot_ip; Date First: 2020-06-05T08:42:17.907Z; State: active; Org: China Mobile Guangdong; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.775627Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--8e19a19c-cd66-4278-8bfb-c05c64977d12",
- "pattern": "[ipv4-addr:value = '23.129.64.217']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:19.858Z",
- "modified": "2020-06-10T01:14:19.858Z",
- "name": "bot_ip: 23.129.64.217",
- "description": "TS ID: 55682983514; iType: bot_ip; Date First: 2020-06-02T07:26:46.206Z; State: active; Org: Emerald Onion; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.731573Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--90a4f95d-1e35-4f47-b303-5651c93457f4",
- "pattern": "[ipv4-addr:value = '45.142.213.11']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:10.753Z",
- "modified": "2020-06-10T01:14:10.753Z",
- "name": "bot_ip: 45.142.213.11",
- "description": "TS ID: 55694549856; iType: bot_ip; Date First: 2020-06-05T08:45:37.178Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.808281Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--94f109aa-3ef2-4a8c-a847-dfb4c64f4f29",
- "pattern": "[ipv4-addr:value = '157.245.250.190']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:15.950Z",
- "modified": "2020-06-10T01:14:15.950Z",
- "name": "bot_ip: 157.245.250.190",
- "description": "TS ID: 55697907923; iType: bot_ip; Date First: 2020-06-06T09:32:01.051Z; State: active; Org: Datalogic ADC; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.818576Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--96d1737a-5565-49ac-8a91-52c2c7b38903",
- "pattern": "[ipv4-addr:value = '144.91.106.47']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:15:00.764Z",
- "modified": "2020-06-10T01:15:00.764Z",
- "name": "bot_ip: 144.91.106.47",
- "description": "TS ID: 55694549829; iType: bot_ip; Date First: 2020-06-05T08:44:22.790Z; State: active; Org: Mills College; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.791474Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--9c98d81b-b4a5-4b8d-8fd6-4b9beec0f1be",
- "pattern": "[ipv4-addr:value = '141.98.81.208']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:39.995Z",
- "modified": "2020-06-10T01:14:39.995Z",
- "name": "bot_ip: 141.98.81.208",
- "description": "TS ID: 55691320102; iType: bot_ip; Date First: 2020-06-04T10:33:13.398Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.766866Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--9cbf82af-8a54-478a-af76-b88a73a33d37",
- "pattern": "[ipv4-addr:value = '51.81.53.159']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:15:01.999Z",
- "modified": "2020-06-10T01:15:01.999Z",
- "name": "bot_ip: 51.81.53.159",
- "description": "TS ID: 55694549861; iType: bot_ip; Date First: 2020-06-05T08:42:44.478Z; State: active; Org: OVH SAS; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.781286Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--9ee9aecd-89e6-4dd6-9a24-4c610b33ebbb",
- "pattern": "[ipv4-addr:value = '104.168.173.252']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:58.530Z",
- "modified": "2020-06-10T01:14:58.530Z",
- "name": "bot_ip: 104.168.173.252",
- "description": "TS ID: 55691320097; iType: bot_ip; Date First: 2020-06-04T10:32:46.612Z; State: active; Org: Hostwinds LLC.; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.753603Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--9febf107-dd82-4727-bcb7-199291ec474c",
- "pattern": "[ipv4-addr:value = '173.212.206.89']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:34.822Z",
- "modified": "2020-06-10T01:14:34.822Z",
- "name": "bot_ip: 173.212.206.89",
- "description": "TS ID: 55697907953; iType: bot_ip; Date First: 2020-06-06T09:31:54.190Z; State: active; Org: Contabo GmbH; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.814015Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--a25904c8-0270-4d57-add5-64f5ed1485b5",
- "pattern": "[ipv4-addr:value = '67.207.94.201']",
- "confidence": 15,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:29.751Z",
- "modified": "2020-06-10T01:14:29.751Z",
- "name": "bot_ip: 67.207.94.201",
- "description": "TS ID: 55697908164; iType: bot_ip; Date First: 2020-06-06T09:32:30.450Z; State: active; Org: Digital Ocean; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.837493Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--a5a1408d-ff8b-41b2-8c57-6678aa0c8688",
- "pattern": "[ipv4-addr:value = '89.163.242.76']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:35.839Z",
- "modified": "2020-06-10T01:14:35.839Z",
- "name": "bot_ip: 89.163.242.76",
- "description": "TS ID: 55694549874; iType: bot_ip; Date First: 2020-06-05T08:45:20.346Z; State: active; Org: myLoc managed IT AG; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.800264Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--a8cc5b11-3bbb-4fb2-970c-31a6f58e1374",
- "pattern": "[ipv4-addr:value = '51.75.71.205']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:41.919Z",
- "modified": "2020-06-10T01:14:41.919Z",
- "name": "bot_ip: 51.75.71.205",
- "description": "TS ID: 55686993979; iType: bot_ip; Date First: 2020-06-03T07:29:11.148Z; State: active; Org: OVH SAS; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.73608Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--a8ee1e5f-8c08-4135-878c-4973179cbac5",
- "pattern": "[ipv4-addr:value = '140.224.183.58']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:11.651Z",
- "modified": "2020-06-10T01:14:11.651Z",
- "name": "bot_ip: 140.224.183.58",
- "description": "TS ID: 55694549823; iType: bot_ip; Date First: 2020-06-05T08:45:24.055Z; State: active; Org: China Telecom FUJIAN NETWORK; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.801661Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--aa4ec99f-3c54-4e60-ab47-83ff78d76570",
- "pattern": "[ipv4-addr:value = '161.35.22.86']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:49.620Z",
- "modified": "2020-06-10T01:14:49.620Z",
- "name": "bot_ip: 161.35.22.86",
- "description": "TS ID: 55697907934; iType: bot_ip; Date First: 2020-06-06T09:32:22.615Z; State: active; Org: Racal-Redac; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.831549Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--ac4a9ca5-9f6e-4072-b568-46dbb03a3ace",
- "pattern": "[ipv4-addr:value = '45.143.220.246']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:15:10.905Z",
- "modified": "2020-06-10T01:15:10.905Z",
- "name": "bot_ip: 45.143.220.246",
- "description": "TS ID: 55691320117; iType: bot_ip; Date First: 2020-06-04T10:32:46.584Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.752185Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
- "created": "2017-01-20T00:00:00.000Z",
- "definition_type": "tlp",
- "definition": {
- "tlp": "amber"
+[
+ {
+ "objects": [
+ {
+ "id": "indicator--86fee2b1-807d-423d-9d0e-1117bab576ce",
+ "pattern": "[ipv4-addr:value = '195.123.227.186']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:33.126Z",
+ "modified": "2020-06-10T01:14:33.126Z",
+ "name": "bot_ip: 195.123.227.186",
+ "description": "TS ID: 55694549840; iType: bot_ip; Date First: 2020-06-05T08:42:19.170Z; State: active; Org: Layer6 Networks; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.779852Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
},
- "type": "marking-definition",
- "spec_version": "2.1"
- },
- {
- "id": "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da",
- "created": "2017-01-20T00:00:00.000Z",
- "definition_type": "tlp",
- "definition": {
- "tlp": "green"
+ {
+ "id": "indicator--891207b3-bff4-4bc2-8c12-7fd2321c9f38",
+ "pattern": "[ipv4-addr:value = '134.209.37.102']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:52.501Z",
+ "modified": "2020-06-10T01:14:52.501Z",
+ "name": "bot_ip: 134.209.37.102",
+ "description": "TS ID: 55682983162; iType: bot_ip; Date First: 2020-06-02T07:26:06.274Z; State: active; Org: Covidien Lp; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.722754Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
},
- "type": "marking-definition",
- "spec_version": "2.1"
- }
- ],
- "more": false
-}
+ {
+ "id": "indicator--8c726d5f-cb6b-45dc-8c2b-2be8596043cf",
+ "pattern": "[ipv4-addr:value = '117.141.112.155']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:54.684Z",
+ "modified": "2020-06-10T01:14:54.684Z",
+ "name": "bot_ip: 117.141.112.155",
+ "description": "TS ID: 55694549819; iType: bot_ip; Date First: 2020-06-05T08:42:17.907Z; State: active; Org: China Mobile Guangdong; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.775627Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--8e19a19c-cd66-4278-8bfb-c05c64977d12",
+ "pattern": "[ipv4-addr:value = '23.129.64.217']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:19.858Z",
+ "modified": "2020-06-10T01:14:19.858Z",
+ "name": "bot_ip: 23.129.64.217",
+ "description": "TS ID: 55682983514; iType: bot_ip; Date First: 2020-06-02T07:26:46.206Z; State: active; Org: Emerald Onion; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.731573Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--90a4f95d-1e35-4f47-b303-5651c93457f4",
+ "pattern": "[ipv4-addr:value = '45.142.213.11']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:10.753Z",
+ "modified": "2020-06-10T01:14:10.753Z",
+ "name": "bot_ip: 45.142.213.11",
+ "description": "TS ID: 55694549856; iType: bot_ip; Date First: 2020-06-05T08:45:37.178Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.808281Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--94f109aa-3ef2-4a8c-a847-dfb4c64f4f29",
+ "pattern": "[ipv4-addr:value = '157.245.250.190']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:15.950Z",
+ "modified": "2020-06-10T01:14:15.950Z",
+ "name": "bot_ip: 157.245.250.190",
+ "description": "TS ID: 55697907923; iType: bot_ip; Date First: 2020-06-06T09:32:01.051Z; State: active; Org: Datalogic ADC; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.818576Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--96d1737a-5565-49ac-8a91-52c2c7b38903",
+ "pattern": "[ipv4-addr:value = '144.91.106.47']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:15:00.764Z",
+ "modified": "2020-06-10T01:15:00.764Z",
+ "name": "bot_ip: 144.91.106.47",
+ "description": "TS ID: 55694549829; iType: bot_ip; Date First: 2020-06-05T08:44:22.790Z; State: active; Org: Mills College; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.791474Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--9c98d81b-b4a5-4b8d-8fd6-4b9beec0f1be",
+ "pattern": "[ipv4-addr:value = '141.98.81.208']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:39.995Z",
+ "modified": "2020-06-10T01:14:39.995Z",
+ "name": "bot_ip: 141.98.81.208",
+ "description": "TS ID: 55691320102; iType: bot_ip; Date First: 2020-06-04T10:33:13.398Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.766866Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--9cbf82af-8a54-478a-af76-b88a73a33d37",
+ "pattern": "[ipv4-addr:value = '51.81.53.159']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:15:01.999Z",
+ "modified": "2020-06-10T01:15:01.999Z",
+ "name": "bot_ip: 51.81.53.159",
+ "description": "TS ID: 55694549861; iType: bot_ip; Date First: 2020-06-05T08:42:44.478Z; State: active; Org: OVH SAS; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.781286Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--9ee9aecd-89e6-4dd6-9a24-4c610b33ebbb",
+ "pattern": "[ipv4-addr:value = '104.168.173.252']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:58.530Z",
+ "modified": "2020-06-10T01:14:58.530Z",
+ "name": "bot_ip: 104.168.173.252",
+ "description": "TS ID: 55691320097; iType: bot_ip; Date First: 2020-06-04T10:32:46.612Z; State: active; Org: Hostwinds LLC.; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.753603Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--9febf107-dd82-4727-bcb7-199291ec474c",
+ "pattern": "[ipv4-addr:value = '173.212.206.89']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:34.822Z",
+ "modified": "2020-06-10T01:14:34.822Z",
+ "name": "bot_ip: 173.212.206.89",
+ "description": "TS ID: 55697907953; iType: bot_ip; Date First: 2020-06-06T09:31:54.190Z; State: active; Org: Contabo GmbH; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.814015Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--a25904c8-0270-4d57-add5-64f5ed1485b5",
+ "pattern": "[ipv4-addr:value = '67.207.94.201']",
+ "confidence": 15,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:29.751Z",
+ "modified": "2020-06-10T01:14:29.751Z",
+ "name": "bot_ip: 67.207.94.201",
+ "description": "TS ID: 55697908164; iType: bot_ip; Date First: 2020-06-06T09:32:30.450Z; State: active; Org: Digital Ocean; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.837493Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--a5a1408d-ff8b-41b2-8c57-6678aa0c8688",
+ "pattern": "[ipv4-addr:value = '89.163.242.76']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:35.839Z",
+ "modified": "2020-06-10T01:14:35.839Z",
+ "name": "bot_ip: 89.163.242.76",
+ "description": "TS ID: 55694549874; iType: bot_ip; Date First: 2020-06-05T08:45:20.346Z; State: active; Org: myLoc managed IT AG; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.800264Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--a8cc5b11-3bbb-4fb2-970c-31a6f58e1374",
+ "pattern": "[ipv4-addr:value = '51.75.71.205']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:41.919Z",
+ "modified": "2020-06-10T01:14:41.919Z",
+ "name": "bot_ip: 51.75.71.205",
+ "description": "TS ID: 55686993979; iType: bot_ip; Date First: 2020-06-03T07:29:11.148Z; State: active; Org: OVH SAS; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.73608Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--a8ee1e5f-8c08-4135-878c-4973179cbac5",
+ "pattern": "[ipv4-addr:value = '140.224.183.58']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:11.651Z",
+ "modified": "2020-06-10T01:14:11.651Z",
+ "name": "bot_ip: 140.224.183.58",
+ "description": "TS ID: 55694549823; iType: bot_ip; Date First: 2020-06-05T08:45:24.055Z; State: active; Org: China Telecom FUJIAN NETWORK; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.801661Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--aa4ec99f-3c54-4e60-ab47-83ff78d76570",
+ "pattern": "[ipv4-addr:value = '161.35.22.86']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:49.620Z",
+ "modified": "2020-06-10T01:14:49.620Z",
+ "name": "bot_ip: 161.35.22.86",
+ "description": "TS ID: 55697907934; iType: bot_ip; Date First: 2020-06-06T09:32:22.615Z; State: active; Org: Racal-Redac; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.831549Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--ac4a9ca5-9f6e-4072-b568-46dbb03a3ace",
+ "pattern": "[ipv4-addr:value = '45.143.220.246']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:15:10.905Z",
+ "modified": "2020-06-10T01:15:10.905Z",
+ "name": "bot_ip: 45.143.220.246",
+ "description": "TS ID: 55691320117; iType: bot_ip; Date First: 2020-06-04T10:32:46.584Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.752185Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
+ "created": "2017-01-20T00:00:00.000Z",
+ "definition_type": "tlp",
+ "definition": {
+ "tlp": "amber"
+ },
+ "type": "marking-definition",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da",
+ "created": "2017-01-20T00:00:00.000Z",
+ "definition_type": "tlp",
+ "definition": {
+ "tlp": "green"
+ },
+ "type": "marking-definition",
+ "spec_version": "2.1"
+ }
+ ],
+ "more": false
+ }
+]
diff --git a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_complex_20-19.json b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_complex_20-19.json
index 5acaa155427a..53d1edab2468 100644
--- a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_complex_20-19.json
+++ b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_complex_20-19.json
@@ -1,433 +1,435 @@
-{
- "objects": [
- {
- "id": "indicator--86fee2b1-807d-423d-9d0e-1117bab576ce",
- "pattern": "[ipv4-addr:value = '195.123.227.186' AND ipv4-addr:value = '1.1.1.1']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:33.126Z",
- "modified": "2020-06-10T01:14:33.126Z",
- "name": "bot_ip: 195.123.227.186",
- "description": "TS ID: 55694549840; iType: bot_ip; Date First: 2020-06-05T08:42:19.170Z; State: active; Org: Layer6 Networks; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.779852Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--891207b3-bff4-4bc2-8c12-7fd2321c9f38",
- "pattern": "[ipv4-addr:value = '134.209.37.102' OR ipv4-addr:value = '2.2.2.2']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:52.501Z",
- "modified": "2020-06-10T01:14:52.501Z",
- "name": "bot_ip: 134.209.37.102",
- "description": "TS ID: 55682983162; iType: bot_ip; Date First: 2020-06-02T07:26:06.274Z; State: active; Org: Covidien Lp; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.722754Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--8c726d5f-cb6b-45dc-8c2b-2be8596043cf",
- "pattern": "[ipv4-addr:value = '117.141.112.155' FOLLOWEDBY ipv4-addr:value = '3.3.3.3']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:54.684Z",
- "modified": "2020-06-10T01:14:54.684Z",
- "name": "bot_ip: 117.141.112.155",
- "description": "TS ID: 55694549819; iType: bot_ip; Date First: 2020-06-05T08:42:17.907Z; State: active; Org: China Mobile Guangdong; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.775627Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--8e19a19c-cd66-4278-8bfb-c05c64977d12",
- "pattern": "[ipv4-addr:value = '23.129.64.217']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:19.858Z",
- "modified": "2020-06-10T01:14:19.858Z",
- "name": "bot_ip: 23.129.64.217",
- "description": "TS ID: 55682983514; iType: bot_ip; Date First: 2020-06-02T07:26:46.206Z; State: active; Org: Emerald Onion; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.731573Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--90a4f95d-1e35-4f47-b303-5651c93457f4",
- "pattern": "[ipv4-addr:value = '45.142.213.11']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:10.753Z",
- "modified": "2020-06-10T01:14:10.753Z",
- "name": "bot_ip: 45.142.213.11",
- "description": "TS ID: 55694549856; iType: bot_ip; Date First: 2020-06-05T08:45:37.178Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.808281Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--94f109aa-3ef2-4a8c-a847-dfb4c64f4f29",
- "pattern": "[ipv4-addr:value = '157.245.250.190']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:15.950Z",
- "modified": "2020-06-10T01:14:15.950Z",
- "name": "bot_ip: 157.245.250.190",
- "description": "TS ID: 55697907923; iType: bot_ip; Date First: 2020-06-06T09:32:01.051Z; State: active; Org: Datalogic ADC; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.818576Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--96d1737a-5565-49ac-8a91-52c2c7b38903",
- "pattern": "[ipv4-addr:value = '144.91.106.47']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:15:00.764Z",
- "modified": "2020-06-10T01:15:00.764Z",
- "name": "bot_ip: 144.91.106.47",
- "description": "TS ID: 55694549829; iType: bot_ip; Date First: 2020-06-05T08:44:22.790Z; State: active; Org: Mills College; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.791474Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--9c98d81b-b4a5-4b8d-8fd6-4b9beec0f1be",
- "pattern": "[ipv4-addr:value = '141.98.81.208']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:39.995Z",
- "modified": "2020-06-10T01:14:39.995Z",
- "name": "bot_ip: 141.98.81.208",
- "description": "TS ID: 55691320102; iType: bot_ip; Date First: 2020-06-04T10:33:13.398Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.766866Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--9cbf82af-8a54-478a-af76-b88a73a33d37",
- "pattern": "[ipv4-addr:value = '51.81.53.159']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:15:01.999Z",
- "modified": "2020-06-10T01:15:01.999Z",
- "name": "bot_ip: 51.81.53.159",
- "description": "TS ID: 55694549861; iType: bot_ip; Date First: 2020-06-05T08:42:44.478Z; State: active; Org: OVH SAS; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.781286Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--9ee9aecd-89e6-4dd6-9a24-4c610b33ebbb",
- "pattern": "[ipv4-addr:value = '104.168.173.252']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:58.530Z",
- "modified": "2020-06-10T01:14:58.530Z",
- "name": "bot_ip: 104.168.173.252",
- "description": "TS ID: 55691320097; iType: bot_ip; Date First: 2020-06-04T10:32:46.612Z; State: active; Org: Hostwinds LLC.; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.753603Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--9febf107-dd82-4727-bcb7-199291ec474c",
- "pattern": "[ipv4-addr:value = '173.212.206.89']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:34.822Z",
- "modified": "2020-06-10T01:14:34.822Z",
- "name": "bot_ip: 173.212.206.89",
- "description": "TS ID: 55697907953; iType: bot_ip; Date First: 2020-06-06T09:31:54.190Z; State: active; Org: Contabo GmbH; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.814015Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--a25904c8-0270-4d57-add5-64f5ed1485b5",
- "pattern": "[ipv4-addr:value = '67.207.94.201']",
- "confidence": 15,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:29.751Z",
- "modified": "2020-06-10T01:14:29.751Z",
- "name": "bot_ip: 67.207.94.201",
- "description": "TS ID: 55697908164; iType: bot_ip; Date First: 2020-06-06T09:32:30.450Z; State: active; Org: Digital Ocean; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.837493Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--a5a1408d-ff8b-41b2-8c57-6678aa0c8688",
- "pattern": "[ipv4-addr:value = '89.163.242.76']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:35.839Z",
- "modified": "2020-06-10T01:14:35.839Z",
- "name": "bot_ip: 89.163.242.76",
- "description": "TS ID: 55694549874; iType: bot_ip; Date First: 2020-06-05T08:45:20.346Z; State: active; Org: myLoc managed IT AG; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.800264Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--a8cc5b11-3bbb-4fb2-970c-31a6f58e1374",
- "pattern": "[ipv4-addr:value = '51.75.71.205']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:41.919Z",
- "modified": "2020-06-10T01:14:41.919Z",
- "name": "bot_ip: 51.75.71.205",
- "description": "TS ID: 55686993979; iType: bot_ip; Date First: 2020-06-03T07:29:11.148Z; State: active; Org: OVH SAS; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.73608Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--a8ee1e5f-8c08-4135-878c-4973179cbac5",
- "pattern": "[ipv4-addr:value = '140.224.183.58']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:11.651Z",
- "modified": "2020-06-10T01:14:11.651Z",
- "name": "bot_ip: 140.224.183.58",
- "description": "TS ID: 55694549823; iType: bot_ip; Date First: 2020-06-05T08:45:24.055Z; State: active; Org: China Telecom FUJIAN NETWORK; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.801661Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--aa4ec99f-3c54-4e60-ab47-83ff78d76570",
- "pattern": "[ipv4-addr:value = '161.35.22.86']",
- "confidence": 85,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:14:49.620Z",
- "modified": "2020-06-10T01:14:49.620Z",
- "name": "bot_ip: 161.35.22.86",
- "description": "TS ID: 55697907934; iType: bot_ip; Date First: 2020-06-06T09:32:22.615Z; State: active; Org: Racal-Redac; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.831549Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "indicator--ac4a9ca5-9f6e-4072-b568-46dbb03a3ace",
- "pattern": "[ipv4-addr:value = '45.143.220.246']",
- "confidence": 50,
- "lang": "en",
- "type": "indicator",
- "created": "2020-06-10T01:15:10.905Z",
- "modified": "2020-06-10T01:15:10.905Z",
- "name": "bot_ip: 45.143.220.246",
- "description": "TS ID: 55691320117; iType: bot_ip; Date First: 2020-06-04T10:32:46.584Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
- "valid_from": "2020-06-10T01:00:33.752185Z",
- "pattern_type": "stix",
- "object_marking_refs": [
- "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
- ],
- "labels": [
- "low"
- ],
- "indicator_types": [
- "anomalous-activity"
- ],
- "pattern_version": "2.1",
- "spec_version": "2.1"
- },
- {
- "id": "marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
- "created": "2017-01-20T00:00:00.000Z",
- "definition_type": "tlp",
- "definition": {
- "tlp": "amber"
+[
+ {
+ "objects": [
+ {
+ "id": "indicator--86fee2b1-807d-423d-9d0e-1117bab576ce",
+ "pattern": "[ipv4-addr:value = '195.123.227.186' AND ipv4-addr:value = '1.1.1.1']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:33.126Z",
+ "modified": "2020-06-10T01:14:33.126Z",
+ "name": "bot_ip: 195.123.227.186",
+ "description": "TS ID: 55694549840; iType: bot_ip; Date First: 2020-06-05T08:42:19.170Z; State: active; Org: Layer6 Networks; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.779852Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
},
- "type": "marking-definition",
- "spec_version": "2.1"
- },
- {
- "id": "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da",
- "created": "2017-01-20T00:00:00.000Z",
- "definition_type": "tlp",
- "definition": {
- "tlp": "green"
+ {
+ "id": "indicator--891207b3-bff4-4bc2-8c12-7fd2321c9f38",
+ "pattern": "[ipv4-addr:value = '134.209.37.102' OR ipv4-addr:value = '2.2.2.2']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:52.501Z",
+ "modified": "2020-06-10T01:14:52.501Z",
+ "name": "bot_ip: 134.209.37.102",
+ "description": "TS ID: 55682983162; iType: bot_ip; Date First: 2020-06-02T07:26:06.274Z; State: active; Org: Covidien Lp; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.722754Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
},
- "type": "marking-definition",
- "spec_version": "2.1"
- }
- ],
- "more": false
-}
+ {
+ "id": "indicator--8c726d5f-cb6b-45dc-8c2b-2be8596043cf",
+ "pattern": "[ipv4-addr:value = '117.141.112.155' FOLLOWEDBY ipv4-addr:value = '3.3.3.3']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:54.684Z",
+ "modified": "2020-06-10T01:14:54.684Z",
+ "name": "bot_ip: 117.141.112.155",
+ "description": "TS ID: 55694549819; iType: bot_ip; Date First: 2020-06-05T08:42:17.907Z; State: active; Org: China Mobile Guangdong; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.775627Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--8e19a19c-cd66-4278-8bfb-c05c64977d12",
+ "pattern": "[ipv4-addr:value = '23.129.64.217']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:19.858Z",
+ "modified": "2020-06-10T01:14:19.858Z",
+ "name": "bot_ip: 23.129.64.217",
+ "description": "TS ID: 55682983514; iType: bot_ip; Date First: 2020-06-02T07:26:46.206Z; State: active; Org: Emerald Onion; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.731573Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--90a4f95d-1e35-4f47-b303-5651c93457f4",
+ "pattern": "[ipv4-addr:value = '45.142.213.11']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:10.753Z",
+ "modified": "2020-06-10T01:14:10.753Z",
+ "name": "bot_ip: 45.142.213.11",
+ "description": "TS ID: 55694549856; iType: bot_ip; Date First: 2020-06-05T08:45:37.178Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.808281Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--94f109aa-3ef2-4a8c-a847-dfb4c64f4f29",
+ "pattern": "[ipv4-addr:value = '157.245.250.190']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:15.950Z",
+ "modified": "2020-06-10T01:14:15.950Z",
+ "name": "bot_ip: 157.245.250.190",
+ "description": "TS ID: 55697907923; iType: bot_ip; Date First: 2020-06-06T09:32:01.051Z; State: active; Org: Datalogic ADC; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.818576Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--96d1737a-5565-49ac-8a91-52c2c7b38903",
+ "pattern": "[ipv4-addr:value = '144.91.106.47']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:15:00.764Z",
+ "modified": "2020-06-10T01:15:00.764Z",
+ "name": "bot_ip: 144.91.106.47",
+ "description": "TS ID: 55694549829; iType: bot_ip; Date First: 2020-06-05T08:44:22.790Z; State: active; Org: Mills College; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.791474Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--9c98d81b-b4a5-4b8d-8fd6-4b9beec0f1be",
+ "pattern": "[ipv4-addr:value = '141.98.81.208']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:39.995Z",
+ "modified": "2020-06-10T01:14:39.995Z",
+ "name": "bot_ip: 141.98.81.208",
+ "description": "TS ID: 55691320102; iType: bot_ip; Date First: 2020-06-04T10:33:13.398Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.766866Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--9cbf82af-8a54-478a-af76-b88a73a33d37",
+ "pattern": "[ipv4-addr:value = '51.81.53.159']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:15:01.999Z",
+ "modified": "2020-06-10T01:15:01.999Z",
+ "name": "bot_ip: 51.81.53.159",
+ "description": "TS ID: 55694549861; iType: bot_ip; Date First: 2020-06-05T08:42:44.478Z; State: active; Org: OVH SAS; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.781286Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--9ee9aecd-89e6-4dd6-9a24-4c610b33ebbb",
+ "pattern": "[ipv4-addr:value = '104.168.173.252']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:58.530Z",
+ "modified": "2020-06-10T01:14:58.530Z",
+ "name": "bot_ip: 104.168.173.252",
+ "description": "TS ID: 55691320097; iType: bot_ip; Date First: 2020-06-04T10:32:46.612Z; State: active; Org: Hostwinds LLC.; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.753603Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--9febf107-dd82-4727-bcb7-199291ec474c",
+ "pattern": "[ipv4-addr:value = '173.212.206.89']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:34.822Z",
+ "modified": "2020-06-10T01:14:34.822Z",
+ "name": "bot_ip: 173.212.206.89",
+ "description": "TS ID: 55697907953; iType: bot_ip; Date First: 2020-06-06T09:31:54.190Z; State: active; Org: Contabo GmbH; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.814015Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--a25904c8-0270-4d57-add5-64f5ed1485b5",
+ "pattern": "[ipv4-addr:value = '67.207.94.201']",
+ "confidence": 15,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:29.751Z",
+ "modified": "2020-06-10T01:14:29.751Z",
+ "name": "bot_ip: 67.207.94.201",
+ "description": "TS ID: 55697908164; iType: bot_ip; Date First: 2020-06-06T09:32:30.450Z; State: active; Org: Digital Ocean; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.837493Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--a5a1408d-ff8b-41b2-8c57-6678aa0c8688",
+ "pattern": "[ipv4-addr:value = '89.163.242.76']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:35.839Z",
+ "modified": "2020-06-10T01:14:35.839Z",
+ "name": "bot_ip: 89.163.242.76",
+ "description": "TS ID: 55694549874; iType: bot_ip; Date First: 2020-06-05T08:45:20.346Z; State: active; Org: myLoc managed IT AG; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.800264Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--a8cc5b11-3bbb-4fb2-970c-31a6f58e1374",
+ "pattern": "[ipv4-addr:value = '51.75.71.205']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:41.919Z",
+ "modified": "2020-06-10T01:14:41.919Z",
+ "name": "bot_ip: 51.75.71.205",
+ "description": "TS ID: 55686993979; iType: bot_ip; Date First: 2020-06-03T07:29:11.148Z; State: active; Org: OVH SAS; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.73608Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--a8ee1e5f-8c08-4135-878c-4973179cbac5",
+ "pattern": "[ipv4-addr:value = '140.224.183.58']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:11.651Z",
+ "modified": "2020-06-10T01:14:11.651Z",
+ "name": "bot_ip: 140.224.183.58",
+ "description": "TS ID: 55694549823; iType: bot_ip; Date First: 2020-06-05T08:45:24.055Z; State: active; Org: China Telecom FUJIAN NETWORK; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.801661Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--aa4ec99f-3c54-4e60-ab47-83ff78d76570",
+ "pattern": "[ipv4-addr:value = '161.35.22.86']",
+ "confidence": 85,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:14:49.620Z",
+ "modified": "2020-06-10T01:14:49.620Z",
+ "name": "bot_ip: 161.35.22.86",
+ "description": "TS ID: 55697907934; iType: bot_ip; Date First: 2020-06-06T09:32:22.615Z; State: active; Org: Racal-Redac; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.831549Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "indicator--ac4a9ca5-9f6e-4072-b568-46dbb03a3ace",
+ "pattern": "[ipv4-addr:value = '45.143.220.246']",
+ "confidence": 50,
+ "lang": "en",
+ "type": "indicator",
+ "created": "2020-06-10T01:15:10.905Z",
+ "modified": "2020-06-10T01:15:10.905Z",
+ "name": "bot_ip: 45.143.220.246",
+ "description": "TS ID: 55691320117; iType: bot_ip; Date First: 2020-06-04T10:32:46.584Z; State: active; Source: Emerging Threats - Compromised; MoreDetail: imported by user 668",
+ "valid_from": "2020-06-10T01:00:33.752185Z",
+ "pattern_type": "stix",
+ "object_marking_refs": [
+ "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da"
+ ],
+ "labels": [
+ "low"
+ ],
+ "indicator_types": [
+ "anomalous-activity"
+ ],
+ "pattern_version": "2.1",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
+ "created": "2017-01-20T00:00:00.000Z",
+ "definition_type": "tlp",
+ "definition": {
+ "tlp": "amber"
+ },
+ "type": "marking-definition",
+ "spec_version": "2.1"
+ },
+ {
+ "id": "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da",
+ "created": "2017-01-20T00:00:00.000Z",
+ "definition_type": "tlp",
+ "definition": {
+ "tlp": "green"
+ },
+ "type": "marking-definition",
+ "spec_version": "2.1"
+ }
+ ],
+ "more": false
+ }
+]
diff --git a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_no_indicators.json b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_no_indicators.json
index 880413ab4b21..2106847d2956 100644
--- a/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_no_indicators.json
+++ b/Packs/ApiModules/Scripts/TAXII2ApiModule/test_data/stix_envelope_no_indicators.json
@@ -1,25 +1,27 @@
-{
- "objects": [
- {
- "id": "marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
- "created": "2017-01-20T00:00:00.000Z",
- "definition_type": "tlp",
- "definition": {
- "tlp": "amber"
+[
+ {
+ "objects": [
+ {
+ "id": "marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
+ "created": "2017-01-20T00:00:00.000Z",
+ "definition_type": "tlp",
+ "definition": {
+ "tlp": "amber"
+ },
+ "type": "marking-definition",
+ "spec_version": "2.1"
},
- "type": "marking-definition",
- "spec_version": "2.1"
- },
- {
- "id": "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da",
- "created": "2017-01-20T00:00:00.000Z",
- "definition_type": "tlp",
- "definition": {
- "tlp": "green"
- },
- "type": "marking-definition",
- "spec_version": "2.1"
- }
- ],
- "more": false
-}
+ {
+ "id": "marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da",
+ "created": "2017-01-20T00:00:00.000Z",
+ "definition_type": "tlp",
+ "definition": {
+ "tlp": "green"
+ },
+ "type": "marking-definition",
+ "spec_version": "2.1"
+ }
+ ],
+ "more": false
+ }
+]
diff --git a/Packs/ApiModules/pack_metadata.json b/Packs/ApiModules/pack_metadata.json
index f4677b94d096..0c75363b0974 100644
--- a/Packs/ApiModules/pack_metadata.json
+++ b/Packs/ApiModules/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "ApiModules",
"description": "API Modules",
"support": "xsoar",
- "currentVersion": "2.2.11",
+ "currentVersion": "2.2.15",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
@@ -15,4 +15,4 @@
"xsoar",
"marketplacev2"
]
-}
\ No newline at end of file
+}
diff --git a/Packs/AppNovi/Integrations/appNovi/appNovi.yml b/Packs/AppNovi/Integrations/appNovi/appNovi.yml
index c9fef59d0b6d..196893836fbf 100644
--- a/Packs/AppNovi/Integrations/appNovi/appNovi.yml
+++ b/Packs/AppNovi/Integrations/appNovi/appNovi.yml
@@ -401,7 +401,7 @@ script:
type: textArea
description: Server IP to search
description: Search for servers using IP address
- dockerimage: demisto/python3:3.10.8.37753
+ dockerimage: demisto/python3:3.10.10.48392
tests:
- No tests (auto formatted)
fromversion: 6.5.0
diff --git a/Packs/AppNovi/ReleaseNotes/1_0_1.md b/Packs/AppNovi/ReleaseNotes/1_0_1.md
new file mode 100644
index 000000000000..52dfc8748a7e
--- /dev/null
+++ b/Packs/AppNovi/ReleaseNotes/1_0_1.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### appNovi
+- Updated the Docker image to: *demisto/python3:3.10.9.40422*.
diff --git a/Packs/AppNovi/ReleaseNotes/1_0_2.md b/Packs/AppNovi/ReleaseNotes/1_0_2.md
new file mode 100644
index 000000000000..7f8647f712ec
--- /dev/null
+++ b/Packs/AppNovi/ReleaseNotes/1_0_2.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### appNovi
+- Updated the Docker image to: *demisto/python3:3.10.9.46032*.
diff --git a/Packs/AppNovi/ReleaseNotes/1_0_3.md b/Packs/AppNovi/ReleaseNotes/1_0_3.md
new file mode 100644
index 000000000000..62e95e3dc089
--- /dev/null
+++ b/Packs/AppNovi/ReleaseNotes/1_0_3.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### appNovi
+- Updated the Docker image to: *demisto/python3:3.10.10.48392*.
diff --git a/Packs/AppNovi/pack_metadata.json b/Packs/AppNovi/pack_metadata.json
index 1a40944c0035..1dfe218bc3b8 100644
--- a/Packs/AppNovi/pack_metadata.json
+++ b/Packs/AppNovi/pack_metadata.json
@@ -2,48 +2,13 @@
"name": "AppNovi",
"description": "Search your combined security data in appNovi via simplified search or search via the appNovi security graph.",
"support": "partner",
- "currentVersion": "1.0.0",
+ "currentVersion": "1.0.3",
"author": "appNovi",
"url": "https://appnovi.com/support",
"email": "",
"categories": ["Analytics & SIEM"],
- "tags": [
- "Incident Response",
- "IoT",
- "Malware",
- "Network",
- "Security Analytics",
- "Relationship",
- "Alerts",
- "Attack",
- "Breach",
- "Compliance",
- "Email",
- "IAM",
- "HIPAA",
- "Machine Learning",
- "Threat Intelligence",
- "New",
- "Use Case"
- ],
- "useCases": [
- "Asset Management",
- "Breach Notification",
- "Breach and Attack Simulation",
- "Compliance",
- "GDPR Breach Notification",
- "HIPAA Breach Notification",
- "Hunting",
- "Identity and Access Management",
- "Incident Response",
- "Malware",
- "NIST",
- "Ransomware",
- "Threat Intelligence Management",
- "Vulnerability Management",
- "Network Security",
- "Rapid Breach Response"
- ],
+ "tags": [],
+ "useCases": [],
"keywords": [
"Asset intelligence",
"Asset attribution",
diff --git a/Packs/ArcSightXML/Integrations/README.md b/Packs/ArcSightXML/Integrations/README.md
index 9390f0f2e29f..b33e60db85cf 100644
--- a/Packs/ArcSightXML/Integrations/README.md
+++ b/Packs/ArcSightXML/Integrations/README.md
@@ -1,83 +1,52 @@
-
-
-
-
Overview
-
-
Use the ArcSight XML integration to fetch cases from ArcSight and create incidents Cortex XSOAR using XML files. ArcSight exports cases and security events as XML to a specified folder and Cortex XSOAR fetches the emails from the folder and creates an incident in Cortex XSOAR for each case.
-
Important: The integration should be executed in native Python, not Docker, because the program must have direct access to the folder, otherwise will not be fetched. You can use an engine, but make sure the engine does not use Docker. If the folder is on the Cortex XSOAR server then you can use python.executable=python.
-
Configure ArcSight XML on Cortex XSOAR
-
-
-
Navigate to Settings > Integrations > Servers & Services.
-
Search for ArcSight XML.
-
Click Add instance to create and configure a new integration instance.
-
-
Name: a textual name for the integration instance.
-
Fetch incidents
-
Incident type
-
Directory from which to get XML files and create incidents.
-
Directory to which put command XML files.
-
-
-
Click Test to validate the URLs, token, and connection.
-
-
Fetched Incidents Data
-
-
The integration polls the specified folder every minute. When there is an XML file in the folder, the integration loads that file, parses the Security Events/Cases, and converts the cases to incidents in Cortex XSOAR. The integration will delete those XML files.
-
Commands
-
-
You can execute these commands from the Cortex XSOAR CLI, as part of an automation, or in a playbook. After you successfully execute a command, a DBot message appears in the War Room with the command details.