diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 08a9bd35381..8d774885215 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,7 +1,14 @@ -* @centreon/owners-cpp +* @centreon/owners-cpp -.github/** @centreon/owners-pipelines -packaging/** @centreon/owners-pipelines -selinux/** @centreon/owners-pipelines +.github/** @centreon/owners-pipelines +packaging/** @centreon/owners-pipelines +selinux/** @centreon/owners-pipelines -tests/** @centreon/owners-robot-e2e +tests/** @centreon/owners-robot-e2e + +gorgone/ @centreon/owners-perl +gorgone/docs/ @centreon/owners-doc + +gorgone/tests/robot/config/ @centreon/owners-perl +*.pm @centreon/owners-perl +*.pl @centreon/owners-perl diff --git a/.github/actions/deb-delivery/action.yml b/.github/actions/deb-delivery/action.yml new file mode 100644 index 00000000000..46b6c5ec189 --- /dev/null +++ b/.github/actions/deb-delivery/action.yml @@ -0,0 +1,80 @@ +name: "deb-delivery" +description: "Package deb packages" +inputs: + module_name: + description: "The package module name" + required: true + distrib: + description: "The distribution used for packaging" + required: true + version: + description: "Centreon packaged major version" + required: true + cache_key: + description: "The cached package key" + required: true + stability: + description: "The package stability (stable, testing, unstable)" + required: true + artifactory_token: + description: "Artifactory token" + required: true + release_type: + description: "Type of release (hotfix, release)" + required: true + release_cloud: + description: "Release context (cloud or not cloud)" + required: true + +runs: + using: "composite" + steps: + - name: Use cache DEB files + uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + path: ./*.deb + key: ${{ inputs.cache_key }} + fail-on-cache-miss: true + + - uses: jfrog/setup-jfrog-cli@0f30b43d62ccad81fba40748d2c671c4665b2d27 # v3.5.3 + env: + JF_URL: https://centreon.jfrog.io + JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} + + - name: Publish DEBs + run: | + FILES="*.deb" + + # DEBUG + echo "[DEBUG] - Version: ${{ inputs.version }}" + echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" + echo "[DEBUG] - module_name: ${{ inputs.module_name }}" + echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - release_type: ${{ inputs.release_type }}" + echo "[DEBUG] - stability: ${{ inputs.stability }}" + + # Make sure all required inputs are NOT empty + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + echo "Some mandatory inputs are empty, please check the logs." + exit 1 + fi + + # Handle either standard debian or ubuntu repository path + if [[ "${{ inputs.distrib }}" == "jammy" ]]; then + ROOT_REPO_PATH="ubuntu-standard-${{ inputs.version }}-${{ inputs.stability }}" + else + ROOT_REPO_PATH="apt-standard-${{ inputs.version }}-${{ inputs.stability }}" + fi + + for FILE in $FILES; do + echo "[DEBUG] - File: $FILE" + + VERSION=${{ inputs.version }} + DISTRIB=$(echo $FILE | cut -d '_' -f2 | cut -d '-' -f2) + ARCH=$(echo $FILE | cut -d '_' -f3 | cut -d '.' -f1) + + echo "[DEBUG] - Version: $VERSION" + + jf rt upload "$FILE" "$ROOT_REPO_PATH/pool/${{ inputs.module_name }}/" --deb "${{ inputs.distrib }}/main/$ARCH" --flat + done + shell: bash diff --git a/.github/actions/delivery/action.yml b/.github/actions/delivery/action.yml index f762844e143..8cbca5c8073 100644 --- a/.github/actions/delivery/action.yml +++ b/.github/actions/delivery/action.yml @@ -7,7 +7,7 @@ inputs: distrib: description: "The distribution used for packaging" required: true - version: + major_version: description: "Centreon packaged major version" required: true cache_key: @@ -60,7 +60,7 @@ runs: FILES="*.${{ env.extfile }}" # DEBUG - echo "[DEBUG] - Version: ${{ inputs.version }}" + echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" @@ -68,7 +68,7 @@ runs: echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi @@ -96,19 +96,19 @@ runs: if [[ ${{ inputs.release_cloud }} -eq 1 && ( ${{ inputs.release_type }} == "hotfix" || ${{ inputs.release_type }} == "release" ) ]]; then echo "[DEBUG] : Release cloud + ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" - UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" + UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # CLOUD + NOT HOTFIX OR CLOUD + NOT RELEASE + REPO STANDARD INTERNAL elif [[ ${{ inputs.release_cloud }} -eq 1 && ( ${{ inputs.release_type }} != "hotfix" && ${{ inputs.release_type }} != "release" ) ]]; then echo "[DEBUG] : Release cloud + NOT ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" - UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" + UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # NON-CLOUD + (HOTFIX OR RELEASE) + REPO STANDARD elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then echo "[DEBUG] : NOT Release cloud + ${{ inputs.release_type }}, using rpm-standard." ROOT_REPO_PATHS="rpm-standard" - UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" + UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" # NOT VALID, DO NOT DELIVER else @@ -125,7 +125,7 @@ runs: elif [ "${{ inputs.stability }}" == "testing" ]; then jf rt upload "$ARCH/*.rpm" "$ROOT_REPO_PATH/$UPLOAD_REPO_PATH" --sync-deletes="$ROOT_REPO_PATH/$UPLOAD_REPO_PATH" --flat else - jf rt upload "$ARCH/*.rpm" "$ROOT_REPO_PATH/${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --sync-deletes="$ROOT_REPO_PATH/${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --flat + jf rt upload "$ARCH/*.rpm" "$ROOT_REPO_PATH/${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --sync-deletes="$ROOT_REPO_PATH/${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --flat fi fi done @@ -138,7 +138,7 @@ runs: FILES="*.${{ env.extfile }}" # DEBUG - echo "[DEBUG] - Version: ${{ inputs.version }}" + echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" @@ -146,7 +146,7 @@ runs: echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi @@ -154,16 +154,16 @@ runs: for FILE in $FILES; do echo "[DEBUG] - File: $FILE" - VERSION=${{ inputs.version }} + VERSION=${{ inputs.major_version }} DISTRIB=$(echo $FILE | cut -d '_' -f2 | cut -d '-' -f2) ARCH=$(echo $FILE | cut -d '_' -f3 | cut -d '.' -f1) - echo "[DEBUG] - Version: $VERSION" + echo "[DEBUG] - Major version: $VERSION" if [[ "${{ inputs.distrib }}" == "jammy" ]]; then - ROOT_REPO_PATH="ubuntu-standard-${{ inputs.version }}-${{ inputs.stability }}" + ROOT_REPO_PATH="ubuntu-standard-${{ inputs.major_version }}-${{ inputs.stability }}" else - ROOT_REPO_PATH="apt-standard-${{ inputs.version }}-${{ inputs.stability }}" + ROOT_REPO_PATH="apt-standard-${{ inputs.major_version }}-${{ inputs.stability }}" fi jf rt upload "$FILE" "$ROOT_REPO_PATH/pool/${{ inputs.module_name }}/" --deb "${{ inputs.distrib }}/main/$ARCH" diff --git a/.github/actions/package/action.yml b/.github/actions/package/action.yml index b51c1ae496e..950b9cb8e27 100644 --- a/.github/actions/package/action.yml +++ b/.github/actions/package/action.yml @@ -10,8 +10,11 @@ inputs: distrib: description: The package distrib required: true - version: - description: The package version + major_version: + description: The major version + required: false + minor_version: + description: The minor version required: false release: description: The package release number @@ -53,7 +56,8 @@ runs: RPM_GPG_SIGNING_KEY_ID: ${{ inputs.rpm_gpg_signing_key_id }} RPM_GPG_SIGNING_PASSPHRASE: ${{ inputs.rpm_gpg_signing_passphrase }} run: | - export VERSION="${{ inputs.version }}" + export MAJOR_VERSION="${{ inputs.major_version }}" + export VERSION="${{ inputs.major_version }}.${{ inputs.minor_version }}" export RELEASE="${{ inputs.release }}" export ARCH="${{ inputs.arch }}" @@ -68,6 +72,19 @@ runs: fi fi + MAJOR_LEFT=$( echo $MAJOR_VERSION | cut -d "." -f1 ) + MAJOR_RIGHT=$( echo $MAJOR_VERSION | cut -d "-" -f1 | cut -d "." -f2 ) + BUMP_MAJOR_RIGHT=$(( MAJOR_RIGHT_PART + 1 )) + if [ "$MAJOR_RIGHT" = "04" ]; then + BUMP_MAJOR_LEFT="$MAJOR_LEFT" + BUMP_MAJOR_RIGHT="10" + else + BUMP_MAJOR_LEFT=$(( $MAJOR_LEFT + 1 )) + BUMP_MAJOR_RIGHT="04" + fi + + export NEXT_MAJOR_VERSION="$BUMP_MAJOR_LEFT.$BUMP_MAJOR_RIGHT" + export RPM_SIGNING_KEY_FILE="$(pwd)/key.gpg" export RPM_SIGNING_KEY_ID="$RPM_GPG_SIGNING_KEY_ID" export NFPM_RPM_PASSPHRASE="$RPM_GPG_SIGNING_PASSPHRASE" @@ -91,3 +108,12 @@ runs: with: path: ./*.${{ inputs.package_extension }} key: ${{ inputs.cache_key }} + + # Update if condition to true to get packages as artifacts + - if: ${{ false }} + name: Upload package artifacts + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + with: + name: ${{ inputs.arch != '' && format('packages-{0}-{1}', inputs.distrib, inputs.arch) || format('packages-{0}', inputs.distrib) }} + path: ./*.${{ inputs.package_extension}} + retention-days: 1 diff --git a/.github/actions/promote-to-stable/action.yml b/.github/actions/promote-to-stable/action.yml index df267f5acfc..a031cc74d82 100644 --- a/.github/actions/promote-to-stable/action.yml +++ b/.github/actions/promote-to-stable/action.yml @@ -13,9 +13,6 @@ inputs: major_version: description: "Centreon packaged major version" required: true - minor_version: - description: "Centreon package minor version" - required: true stability: description: "The package stability (stable, testing, unstable)" required: true @@ -44,7 +41,6 @@ runs: # DEBUG echo "[DEBUG] - Major version: ${{ inputs.major_version }}" - echo "[DEBUG] - Minor version: ${{ inputs.minor_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" @@ -108,12 +104,11 @@ runs: shell: bash - name: Promote DEB packages to stable - if: ${{ contains(fromJSON('["bullseye", "bookworm"]'), inputs.distrib) }} + if: ${{ !inputs.release_cloud && contains(fromJSON('["bullseye", "bookworm"]'), inputs.distrib) }} run: | set -eux echo "[DEBUG] - Major version: ${{ inputs.major_version }}" - echo "[DEBUG] - Minor version: ${{ inputs.minor_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" # Define ROOT_REPO_PATH for debian diff --git a/.github/actions/release/action.yml b/.github/actions/release/action.yml index 205bc442042..2f952b78bab 100644 --- a/.github/actions/release/action.yml +++ b/.github/actions/release/action.yml @@ -33,7 +33,7 @@ runs: set -eux # Variables - COMPONENTS_COLLECT=("centreon-collect") + COMPONENTS_COLLECT=("centreon-collect" "centreon-gorgone") CURRENT_STABLE_BRANCH_MAJOR_VERSION="" declare -a TMP_STABLE_TAGS=() declare -a NEW_STABLE_TAGS=() diff --git a/.github/actions/rpm-delivery/action.yml b/.github/actions/rpm-delivery/action.yml new file mode 100644 index 00000000000..3174c753300 --- /dev/null +++ b/.github/actions/rpm-delivery/action.yml @@ -0,0 +1,132 @@ +name: "rpm-delivery" +description: "Deliver rpm packages" +inputs: + module_name: + description: "The package module name" + required: true + distrib: + description: "The distribution used for packaging" + required: true + version: + description: "Centreon packaged major version" + required: true + cache_key: + description: "The cached package key" + required: true + stability: + description: "The package stability (stable, testing, unstable)" + required: true + artifactory_token: + description: "Artifactory token" + required: true + release_type: + description: "Type of release (hotfix, release)" + required: true + release_cloud: + description: "Release context (cloud or not cloud)" + required: true + +runs: + using: "composite" + steps: + - name: Use cache RPM files + uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + path: ./*.rpm + key: ${{ inputs.cache_key }} + fail-on-cache-miss: true + + - uses: jfrog/setup-jfrog-cli@26da2259ee7690e63b5410d7451b2938d08ce1f9 # v4.0.0 + env: + JF_URL: https://centreon.jfrog.io + JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} + + - name: Publish RPMs + run: | + set -eux + + FILES="*.rpm" + + if [ -z "${{ inputs.module_name }}" ]; then + echo "module name is required" + exit 1 + fi + + if [ -z "${{ inputs.distrib }}" ]; then + echo "distrib is required" + exit 1 + fi + + # DEBUG + echo "[DEBUG] - Version: ${{ inputs.version }}" + echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" + echo "[DEBUG] - module_name: ${{ inputs.module_name }}" + echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - release_type: ${{ inputs.release_type }}" + echo "[DEBUG] - stability: ${{ inputs.stability }}" + + # Make sure all required inputs are NOT empty + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + echo "Some mandatory inputs are empty, please check the logs." + exit 1 + fi + + # Create ARCH dirs + mkdir noarch x86_64 + + # Get ARCH target for files to deliver and regroupe them by ARCH + for FILE in $FILES; do + echo "[DEBUG] - File: $FILE" + + ARCH=$(echo $FILE | grep -oP '(x86_64|noarch)') + + echo "[DEBUG] - Arch: $ARCH" + + mv "$FILE" "$ARCH" + done + + # Build upload target path based on release_cloud and release_type values + # if cloud + hotfix or cloud + release, deliver to internal testing- + # if cloud + develop, delivery to internal unstable + # if non-cloud, delivery to onprem testing or unstable + + # CLOUD + HOTFIX + REPO STANDARD INTERNAL OR CLOUD + RELEASE + REPO STANDARD INTERNAL + if [[ ${{ inputs.release_cloud }} -eq 1 ]] && ([[ ${{ inputs.release_type }} == "hotfix" ]] || [[ ${{ inputs.release_type }} == "release" ]]); then + echo "[DEBUG] : Release cloud + ${{ inputs.release_type }}, using rpm-standard-internal." + ROOT_REPO_PATHS="rpm-standard-internal" + UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" + + # CLOUD + NOT HOTFIX OR CLOUD + NOT RELEASE + REPO STANDARD INTERNAL + elif [[ ${{ inputs.release_cloud }} -eq 1 ]] && ([[ ${{ inputs.release_type }} != "hotfix" ]] || [[ ${{ inputs.release_type }} != "release" ]]); then + echo "[DEBUG] : Release cloud + NOT ${{ inputs.release_type }}, using rpm-standard-internal." + ROOT_REPO_PATHS="rpm-standard-internal" + UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" + + # NON-CLOUD + (HOTFIX OR RELEASE) + REPO STANDARD + elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then + echo "[DEBUG] : NOT Release cloud + ${{ inputs.release_type }}, using rpm-standard." + ROOT_REPO_PATHS="rpm-standard" + UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" + + # ANYTHING ELSE + else + echo "::error:: Invalid combination of release_type [${{ inputs.release_type }}] and release_cloud [${{ inputs.release_cloud }}]" + exit 1 + fi + + # Deliver based on inputs + for ROOT_REPO_PATH in "$ROOT_REPO_PATHS"; do + for ARCH in "noarch" "x86_64"; do + if [ "$(ls -A $ARCH)" ]; then + if [ "${{ inputs.stability }}" == "stable" ]; then + echo "[DEBUG] - Stability is ${{ inputs.stability }}, not delivering." + elif [ "${{ inputs.stability }}" == "testing" ]; then + jf rt upload "$ARCH/*.rpm" "$ROOT_REPO_PATH/$UPLOAD_REPO_PATH" --sync-deletes="$ROOT_REPO_PATH/$UPLOAD_REPO_PATH" --flat + else + jf rt upload "$ARCH/*.rpm" "$ROOT_REPO_PATH/${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --sync-deletes="$ROOT_REPO_PATH/${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --flat + fi + fi + done + done + + shell: bash diff --git a/.github/docker/Dockerfile.centreon-collect-alma8 b/.github/docker/Dockerfile.centreon-collect-alma8 index 7e0de7ac1a7..a5592ac59ef 100644 --- a/.github/docker/Dockerfile.centreon-collect-alma8 +++ b/.github/docker/Dockerfile.centreon-collect-alma8 @@ -19,7 +19,8 @@ baseurl=https://repo.goreleaser.com/yum/ enabled=1 gpgcheck=0' | tee /etc/yum.repos.d/goreleaser.repo -curl -LsS "https://r.mariadb.com/downloads/mariadb_repo_setup" | bash -s -- --os-type=rhel --os-version=8 --mariadb-server-version="mariadb-10.5" +curl -LsS "https://r.mariadb.com/downloads/mariadb_repo_setup" | bash -s -- --skip-maxscale --os-type=rhel --os-version=8 --mariadb-server-version="mariadb-10.5" + dnf install -y cmake \ gcc \ gcc-c++ \ diff --git a/.github/docker/Dockerfile.gorgone-testing-alma8 b/.github/docker/Dockerfile.gorgone-testing-alma8 new file mode 100644 index 00000000000..7fe2db43131 --- /dev/null +++ b/.github/docker/Dockerfile.gorgone-testing-alma8 @@ -0,0 +1,15 @@ +FROM almalinux:8 + +RUN bash -e < /dev/null 2>&1 +apt-get clean +rm -rf /var/lib/apt/lists/* + +EOF + + diff --git a/.github/docker/Dockerfile.gorgone-testing-bullseye b/.github/docker/Dockerfile.gorgone-testing-bullseye new file mode 100644 index 00000000000..0c3cc92a2a8 --- /dev/null +++ b/.github/docker/Dockerfile.gorgone-testing-bullseye @@ -0,0 +1,28 @@ +FROM debian:bullseye + +ENV DEBIAN_FRONTEND noninteractive +# fix locale +ENV LANG en_US.utf8 + +RUN bash -e < /dev/null 2>&1 +apt-get clean +rm -rf /var/lib/apt/lists/* + +EOF diff --git a/.github/docker/Dockerfile.gorgone-testing-jammy b/.github/docker/Dockerfile.gorgone-testing-jammy new file mode 100644 index 00000000000..6338489114d --- /dev/null +++ b/.github/docker/Dockerfile.gorgone-testing-jammy @@ -0,0 +1,41 @@ +FROM ubuntu:jammy + +ENV DEBIAN_FRONTEND=noninteractive + +# Set locale +RUN apt-get update && \ + apt-get install -y locales libcurl4-openssl-dev curl wget zstd jq lsb-release mariadb-client iproute2 && \ + apt-get install -y ca-certificates apt-transport-https software-properties-common gnupg2 procps lsof && \ + localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 + +ENV LANG=en_US.UTF-8 + +# Add Centreon repositories and their public key +RUN echo "deb https://packages.centreon.com/ubuntu-standard-24.05-testing/ jammy main" | tee -a /etc/apt/sources.list.d/centreon-testing.list && \ + echo "deb https://packages.centreon.com/ubuntu-plugins-testing/ jammy main" | tee -a /etc/apt/sources.list.d/centreon-plugins-testing.list && \ + wget -O- https://apt-key.centreon.com | gpg --dearmor | tee /etc/apt/trusted.gpg.d/centreon.gpg > /dev/null 2>&1 && \ + apt-get update + +# Install required packages and Robotframework +RUN apt-get update && \ + apt-get install -y \ + python3 \ + python3-dev \ + python3-pip \ + python3-venv + +# Create a virtual environment and install Robot Framework +RUN python3 -m venv /opt/robotframework-env && \ + /opt/robotframework-env/bin/pip install --no-cache-dir \ + robotframework \ + robotframework-examples \ + robotframework-databaselibrary \ + robotframework-requests \ + robotframework-jsonlibrary \ + pymysql + +# Clean up +RUN apt-get clean && rm -rf /var/lib/apt/lists/* + +# Set the PATH to include the virtual environment +ENV PATH="/opt/robotframework-env/bin:$PATH" diff --git a/.github/scripts/collect-prepare-test-robot.sh b/.github/scripts/collect-prepare-test-robot.sh index 25e9f02e5b0..c3cbc047175 100755 --- a/.github/scripts/collect-prepare-test-robot.sh +++ b/.github/scripts/collect-prepare-test-robot.sh @@ -67,8 +67,10 @@ fi if [ "$distrib" = "ALMALINUX" ]; then dnf groupinstall -y "Development Tools" dnf install -y python3-devel + dnf clean all else apt-get update apt-get install -y build-essential apt-get install -y python3-dev + apt-get clean fi diff --git a/.github/scripts/collect-unit-tests.sh b/.github/scripts/collect-unit-tests.sh deleted file mode 100755 index 077ff0291b9..00000000000 --- a/.github/scripts/collect-unit-tests.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -set -e - -#Cmake -cd /src -export VCPKG_ROOT=/vcpkg -export PATH=$VCPKG_ROOT:$PATH - -OS_ID=$(awk '{print $1}' /etc/issue) -if [[ "$OS_ID" == "Debian" || "$OS_ID" == "Ubuntu" ]] ; then - CXXFLAGS="-Wall -Wextra" cmake -B build -DVCPKG_OVERLAY_TRIPLETS=/custom-triplets -DVCPKG_TARGET_TRIPLET=x64-linux-release -DVCPKG_OVERLAY_PORTS=/overlays -GNinja -DCMAKE_EXPORT_COMPILE_COMMANDS=On -DCMAKE_BUILD_TYPE=Debug -DWITH_PREFIX=/usr -DWITH_PREFIX_BIN=/usr/sbin -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_PREFIX_MODULES=/usr/share/centreon/lib/centreon-broker -DWITH_PREFIX_CONF_BROKER=/etc/centreon-broker -DWITH_PREFIX_LIB_BROKER=/usr/lib64/nagios -DWITH_PREFIX_CONF_ENGINE=/etc/centreon-engine -DWITH_PREFIX_LIB_ENGINE=/usr/lib64/centreon-engine -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ -DWITH_RW_DIR=/var/lib/centreon-engine/rw -DWITH_VAR_DIR=/var/log/centreon-engine -DWITH_MODULE_SIMU=On -S . -else - CXXFLAGS="-Wall -Wextra" cmake -B build -GNinja -DVCPKG_OVERLAY_TRIPLETS=/custom-triplets -DVCPKG_OVERLAY_PORTS=/overlays -DVCPKG_TARGET_TRIPLET=x64-linux-release -DCMAKE_EXPORT_COMPILE_COMMANDS=On -DCMAKE_BUILD_TYPE=Debug -DWITH_PREFIX=/usr -DWITH_PREFIX_BIN=/usr/sbin -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_PREFIX_MODULES=/usr/share/centreon/lib/centreon-broker -DWITH_PREFIX_CONF_BROKER=/etc/centreon-broker -DWITH_PREFIX_LIB_BROKER=/usr/lib64/nagios -DWITH_PREFIX_CONF_ENGINE=/etc/centreon-engine -DWITH_PREFIX_LIB_ENGINE=/usr/lib64/centreon-engine -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ -DWITH_RW_DIR=/var/lib/centreon-engine/rw -DWITH_VAR_DIR=/var/log/centreon-engine -DWITH_MODULE_SIMU=On -S . -fi - -#Build -ninja -Cbuild -j8 -ninja -Cbuild -j8 install - -#Test - -cd build -tests/ut_broker --gtest_output=xml:ut_broker.xml -tests/ut_engine --gtest_output=xml:ut_engine.xml -tests/ut_clib --gtest_output=xml:ut_clib.xml -tests/ut_connector --gtest_output=xml:ut_connector.xml -tests/ut_common --gtest_output=xml:ut_common.xml -tests/ut_agent --gtest_output=xml:ut_agent.xml -echo "---------------------------------------------------------- end of ut tests ------------------------------------------------" diff --git a/.github/scripts/windows-agent-compile.ps1 b/.github/scripts/windows-agent-compile.ps1 new file mode 100644 index 00000000000..831cf31f363 --- /dev/null +++ b/.github/scripts/windows-agent-compile.ps1 @@ -0,0 +1,78 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +Write-Host "Work in" $pwd.ToString() + +[System.Environment]::SetEnvironmentVariable("AWS_EC2_METADATA_DISABLED","true") + +Write-Host $env:VCPKG_BINARY_SOURCES + +$current_dir = $pwd.ToString() + +#get cache from s3 +$files_to_hash = "vcpkg.json", "custom-triplets\x64-windows.cmake", "CMakeLists.txt", "CMakeListsWindows.txt" +$files_content = Get-Content -Path $files_to_hash -Raw +$stringAsStream = [System.IO.MemoryStream]::new() +$writer = [System.IO.StreamWriter]::new($stringAsStream) +$writer.write($files_content -join " ") +$writer.Flush() +$stringAsStream.Position = 0 +$vcpkg_hash = Get-FileHash -InputStream $stringAsStream -Algorithm SHA256 | Select-Object Hash +$file_name = "windows-agent-vcpkg-dependencies-cache-" + $vcpkg_hash.Hash +$file_name_extension = "${file_name}.7z" + +#try to get compiled dependenciesfrom s3 +Write-Host "try to download compiled dependencies from s3: $file_name_extension $file_name_extension" +aws --quiet s3 cp s3://centreon-collect-robot-report/$file_name_extension $file_name_extension +if ( $? -ne $true ) { + #no => generate + Write-Host "#######################################################################################################################" + Write-Host "compiled dependencies unavailable for this version we will need to build it, it will take a long time" + Write-Host "#######################################################################################################################" + + Write-Host "install vcpkg" + git clone --depth 1 -b 2024.07.12 https://github.com/microsoft/vcpkg.git + cd vcpkg + bootstrap-vcpkg.bat + cd $current_dir + + [System.Environment]::SetEnvironmentVariable("VCPKG_ROOT",$pwd.ToString()+"\vcpkg") + [System.Environment]::SetEnvironmentVariable("PATH",$pwd.ToString()+"\vcpkg;" + $env:PATH) + + Write-Host "compile vcpkg dependencies" + vcpkg install --vcpkg-root $env:VCPKG_ROOT --x-install-root build_windows\vcpkg_installed --x-manifest-root . --overlay-triplets custom-triplets --triplet x64-windows + + Write-Host "Compress binary archive" + 7z a $file_name_extension build_windows\vcpkg_installed + Write-Host "Upload binary archive" + aws s3 cp $file_name_extension s3://centreon-collect-robot-report/$file_name_extension + Write-Host "create CMake files" +} +else { + 7z x $file_name_extension + Write-Host "Create cmake files from binary-cache downloaded without use vcpkg" +} + + + +cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTING=On -DWINDOWS=On -DBUILD_FROM_CACHE=On -S. -DVCPKG_CRT_LINKAGE=dynamic -DBUILD_SHARED_LIBS=OFF -Bbuild_windows + +Write-Host "build agent and tests" + +cmake --build build_windows --config Release + diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml index 8388e621380..1966692a01a 100644 --- a/.github/workflows/actionlint.yml +++ b/.github/workflows/actionlint.yml @@ -5,6 +5,7 @@ concurrency: cancel-in-progress: true on: + workflow_dispatch: pull_request: branches: - develop @@ -15,38 +16,45 @@ on: - release-* paths: - ".github/**" + - "**/packaging/*.ya?ml" jobs: - actionlint: - runs-on: ubuntu-22.04 + action-lint: + runs-on: ubuntu-24.04 steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Download actionlint id: get_actionlint - run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) + run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/v1.7.1/scripts/download-actionlint.bash) shell: bash - name: Check workflow files + env: + SHELLCHECK_OPTS: "--severity=error" run: | ${{ steps.get_actionlint.outputs.executable }} \ - -ignore 'label "(common|collect|collect-arm64)" is unknown' \ - -ignore 'label "veracode" is unknown' \ - -ignore '"github.head_ref" is potentially untrusted' \ - -shellcheck= \ - -pyflakes= \ - -color + -ignore 'label "ubuntu-24.04" is unknown' \ + -ignore 'label "(common|collect|collect-arm64)" is unknown' \ + -ignore 'label "veracode" is unknown' \ + -ignore '"github.head_ref" is potentially untrusted' \ + -pyflakes= \ + -color shell: bash + yaml-lint: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + with: + python-version: '3.12' - name: Install Yaml - run: | - pip install yamllint==1.32.0 + run: pip install yamllint==1.35.1 - name: Add Yaml Lint Rules run: | @@ -73,5 +81,4 @@ jobs: EOF - name: Lint YAML files - run: | - yamllint -c ./yamllint_rules.yml ./.github/actions/ ./.github/workflows/ + run: yamllint -c ./yamllint_rules.yml ./.github/actions/ ./.github/workflows/ ./**/packaging/ diff --git a/.github/workflows/centreon-collect.yml b/.github/workflows/centreon-collect.yml index b0dc3532ac1..464198dea2a 100644 --- a/.github/workflows/centreon-collect.yml +++ b/.github/workflows/centreon-collect.yml @@ -1,4 +1,10 @@ name: Centreon collect +run-name: | + ${{ + (github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.is_nightly == 'true')) + && format('collect nightly {0}', github.ref_name) + || '' + }} concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -6,6 +12,14 @@ concurrency: on: workflow_dispatch: + inputs: + is_nightly: + description: 'Set to true for nightly run' + required: true + default: false + type: boolean + schedule: + - cron: '30 0 * * 1-5' pull_request: paths: - agent/** @@ -21,12 +35,13 @@ on: - cmake.sh - cmake-vcpkg.sh - CMakeLists.txt + - CMakeListsLinux.txt - vcpkg.json - overlays/** - selinux/** - - vcpkg/** - "!.veracode-exclusions" - "!veracode.json" + - "!**/test/**" push: branches: - develop @@ -47,67 +62,268 @@ on: - cmake.sh - cmake-vcpkg.sh - CMakeLists.txt + - CMakeListsLinux.txt - vcpkg.json - overlays/** - selinux/** - - vcpkg/** - "!.veracode-exclusions" - "!veracode.json" + - "!**/test/**" jobs: + dispatch-to-maintained-branches: + if: ${{ github.event_name == 'schedule' && github.ref_name == 'develop' }} + runs-on: ubuntu-24.04 + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - run: | + gh workflow run robot-nightly.yml -r "dev-24.04.x" + gh workflow run robot-nightly.yml -r "dev-23.10.x" + gh workflow run robot-nightly.yml -r "dev-23.04.x" + gh workflow run robot-nightly.yml -r "dev-22.10.x" + shell: bash + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + get-version: uses: ./.github/workflows/get-version.yml + with: + version_file: CMakeLists.txt + + veracode-analysis: + needs: [get-version] + if: ${{ github.event_name == 'schedule' && github.ref_name == 'develop' }} + uses: ./.github/workflows/veracode-analysis.yml + with: + module_name: centreon-collect + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} + img_version: ${{ needs.get-version.outputs.img_version }} + secrets: + veracode_api_id: ${{ secrets.VERACODE_API_ID_COLL }} + veracode_api_key: ${{ secrets.VERACODE_API_KEY_COLL }} + veracode_srcclr_token: ${{ secrets.VERACODE_SRCCLR_TOKEN }} + docker_registry_id: ${{ secrets.DOCKER_REGISTRY_ID }} + docker_registry_passwd: ${{ secrets.DOCKER_REGISTRY_PASSWD }} unit-test: needs: [get-version] if: ${{ ! contains(fromJson('["stable"]'), needs.get-version.outputs.stability) }} - runs-on: [self-hosted, collect] strategy: fail-fast: false matrix: - image: [alma8, alma9, debian-bullseye, debian-bookworm] - name: unit test ${{ matrix.image }} + distrib: [alma8, alma9, debian-bullseye, debian-bookworm] + + runs-on: [self-hosted, collect] + + env: + SCCACHE_PATH: "/usr/bin/sccache" + SCCACHE_BUCKET: "centreon-github-sccache" + SCCACHE_REGION: "eu-west-1" + AWS_ACCESS_KEY_ID: ${{ secrets.COLLECT_S3_ACCESS_KEY }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.COLLECT_S3_SECRET_KEY }} + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/centreon-collect-${{ matrix.distrib }}:${{ needs.get-version.outputs.img_version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: unit test ${{ matrix.distrib }} steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - name: Login to Registry - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 - with: - registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + - name: Install sccache + run: | + if ! command -v wget &> /dev/null; then + if [[ "${{ matrix.distrib }}" == "alma8" || "${{ matrix.distrib }}" == "alma9" ]]; then + dnf install -y wget + else + apt-get update + apt-get install -y wget + fi + fi - - name: Test ${{ matrix.image }} - uses: ./.github/actions/runner-docker - with: - registry_url: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} - script_name: /src/.github/scripts/collect-unit-tests - image_name: centreon-collect-${{ matrix.image }} - image_version: ${{ needs.get-version.outputs.img_version }} + wget https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz + tar xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz + mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/bin/ + + ${SCCACHE_PATH} --start-server + shell: bash + + - name: Compile centreon-collect + run: | + CMAKE="cmake" + if [[ "${{ matrix.distrib }}" == "alma8" || "${{ matrix.distrib }}" == "alma9" ]]; then + CMAKE="cmake3" + fi + + export VCPKG_ROOT=/vcpkg + export PATH=$VCPKG_ROOT:$PATH + + mv /root/.cache /github/home/ + + CXXFLAGS="-Wall -Wextra" $CMAKE \ + -B build \ + -DVCPKG_OVERLAY_TRIPLETS=/custom-triplets \ + -DVCPKG_TARGET_TRIPLET=x64-linux-release \ + -DVCPKG_OVERLAY_PORTS=/overlays \ + -GNinja \ + -DDEBUG_ROBOT=OFF \ + -DCMAKE_EXPORT_COMPILE_COMMANDS=On \ + -DCMAKE_BUILD_TYPE=Debug \ + -DWITH_PREFIX=/usr \ + -DWITH_PREFIX_BIN=/usr/sbin \ + -DWITH_USER_BROKER=centreon-broker \ + -DWITH_USER_ENGINE=centreon-engine \ + -DWITH_GROUP_BROKER=centreon-broker \ + -DWITH_GROUP_ENGINE=centreon-engine \ + -DWITH_TESTING=On \ + -DWITH_PREFIX_MODULES=/usr/share/centreon/lib/centreon-broker \ + -DWITH_PREFIX_CONF_BROKER=/etc/centreon-broker \ + -DWITH_PREFIX_LIB_BROKER=/usr/lib64/nagios \ + -DWITH_PREFIX_CONF_ENGINE=/etc/centreon-engine \ + -DWITH_PREFIX_LIB_ENGINE=/usr/lib64/centreon-engine \ + -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ \ + -DWITH_RW_DIR=/var/lib/centreon-engine/rw \ + -DWITH_VAR_DIR=/var/log/centreon-engine \ + -DWITH_MODULE_SIMU=On \ + -DCMAKE_C_COMPILER_LAUNCHER=${SCCACHE_PATH} \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${SCCACHE_PATH} \ + -DLEGACY_ENGINE=ON \ + -S . + + ninja -Cbuild + ninja -Cbuild install + shell: bash + + - name: Cache statistics + run: ${SCCACHE_PATH} --show-stats + shell: bash + + - name: Stop sccache server + run: ${SCCACHE_PATH} --stop-server + shell: bash + + - name: Run unit tests + run: | + tests/ut_broker --gtest_output=xml:ut_broker.xml + tests/ut_engine --gtest_output=xml:ut_engine.xml + tests/ut_clib --gtest_output=xml:ut_clib.xml + tests/ut_connector --gtest_output=xml:ut_connector.xml + tests/ut_common --gtest_output=xml:ut_common.xml + tests/ut_agent --gtest_output=xml:ut_agent.xml + working-directory: build + shell: bash package: needs: [get-version] if: ${{ ! contains(fromJson('["stable"]'), needs.get-version.outputs.stability) }} uses: ./.github/workflows/package-collect.yml with: - version: ${{ needs.get-version.outputs.version }}.${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} img_version: ${{ needs.get-version.outputs.img_version }} release: ${{ needs.get-version.outputs.release }} commit_hash: ${{ github.sha }} stability: ${{ needs.get-version.outputs.stability }} secrets: inherit + robot-test: + needs: [get-version, package] + if: | + (github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.is_nightly == 'true')) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + + strategy: + fail-fast: false + matrix: + include: + - distrib: el9 + image: centreon-collect-alma9-test + package_extension: rpm + arch: amd64 + database_type: mariadb + test_group_name: robot_test-mariadb-el9-amd64 + tests_params: '{}' + - distrib: el9 + image: centreon-collect-mysql-alma9-test + package_extension: rpm + arch: amd64 + database_type: mysql + test_group_name: robot_test-mysql-el9-amd64 + tests_params: '{}' + - distrib: bullseye + image: centreon-collect-debian-bullseye-arm64-test + package_extension: deb + arch: arm64 + database_type: mariadb + test_group_name: robot_test-mariadb-bullseye-arm64 + tests_params: '{}' + - distrib: bookworm + image: centreon-collect-debian-bookworm-arm64-test + package_extension: deb + arch: arm64 + database_type: mariadb + test_group_name: robot_test-mariadb-bookworm-arm64 + tests_params: '{}' + - distrib: bookworm + image: centreon-collect-debian-bookworm-test + package_extension: deb + arch: amd64 + database_type: mariadb + test_group_name: robot_test-mariadb-bookworm-amd64 + tests_params: '{}' + - distrib: el9 + image: centreon-collect-alma9-test + package_extension: rpm + arch: amd64 + database_type: mariadb + test_group_name: robot_test-mariadb-el9-amd64-grpc + tests_params: '{"default_transport":"grpc","default_bbdo_version":"3.1.0"}' + + name: ${{ matrix.test_group_name }} + + uses: ./.github/workflows/robot-test.yml + with: + distrib: ${{ matrix.distrib }} + arch: ${{ matrix.arch }} + image: ${{ matrix.image }} + image_test: ${{ matrix.image }}:${{ needs.get-version.outputs.test_img_version }} + image_version: ${{ needs.get-version.outputs.img_version }} + package_cache_key: ${{ github.run_id }}-${{ github.sha }}-${{ matrix.package_extension }}-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} + package_cache_path: ./*.${{ matrix.package_extension}} + database_type: ${{ matrix.database_type }} + tests_params: ${{matrix.tests_params}} + test_group_name: ${{matrix.test_group_name}} + secrets: + registry_username: ${{ secrets.DOCKER_REGISTRY_ID }} + registry_password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + collect_s3_access_key: ${{ secrets.COLLECT_S3_ACCESS_KEY }} + collect_s3_secret_key: ${{ secrets.COLLECT_S3_SECRET_KEY }} + xray_client_id: ${{ secrets.XRAY_CLIENT_ID }} + xray_client_secret: ${{ secrets.XRAY_CLIENT_SECRET }} + deliver-sources: runs-on: [self-hosted, common] needs: [get-version, package] - if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + if: | + github.event_name != 'workflow_dispatch' && + contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: path: centreon-collect @@ -117,13 +333,17 @@ jobs: bucket_directory: centreon-collect module_directory: centreon-collect module_name: centreon-collect - major_version: ${{ needs.get-version.outputs.version }} - minor_version: ${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} token_download_centreon_com: ${{ secrets.TOKEN_DOWNLOAD_CENTREON_COM }} deliver-rpm: - if: ${{ contains(fromJson('["testing", "stable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package] + if: | + contains(fromJson('["unstable", "testing"]'), needs.get-version.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + needs: [get-version, robot-test] environment: ${{ needs.get-version.outputs.environment }} runs-on: [self-hosted, common] strategy: @@ -138,14 +358,14 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Publish RPM packages uses: ./.github/actions/delivery with: module_name: collect distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} + major_version: ${{ needs.get-version.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} stability: ${{ needs.get-version.outputs.stability }} @@ -153,8 +373,12 @@ jobs: release_cloud: ${{ needs.get-version.outputs.release_cloud }} deliver-deb: - if: ${{ contains(fromJson('["testing", "stable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package] + if: | + contains(fromJson('["unstable", "testing"]'), needs.get-version.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + needs: [get-version, robot-test] environment: ${{ needs.get-version.outputs.environment }} runs-on: [self-hosted, common] strategy: @@ -173,14 +397,14 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Publish DEB packages uses: ./.github/actions/delivery with: module_name: collect distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} + major_version: ${{ needs.get-version.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} stability: ${{ needs.get-version.outputs.stability }} @@ -189,7 +413,12 @@ jobs: promote: needs: [get-version] - if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + if: | + github.event_name != 'workflow_dispatch' && + contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') runs-on: [self-hosted, common] strategy: matrix: @@ -197,7 +426,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Promote ${{ matrix.distrib }} to stable uses: ./.github/actions/promote-to-stable @@ -205,8 +434,7 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: collect distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.version }} - minor_version: ${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.major_version }} stability: ${{ needs.get-version.outputs.stability }} github_ref_name: ${{ github.ref_name }} release_type: ${{ needs.get-version.outputs.release_type }} diff --git a/.github/workflows/check-status.yml b/.github/workflows/check-status.yml new file mode 100644 index 00000000000..6cfbf4f7884 --- /dev/null +++ b/.github/workflows/check-status.yml @@ -0,0 +1,103 @@ +name: check-status + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + pull_request: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + - hotfix-* + - release-* + +jobs: + check-status: + runs-on: ubuntu-24.04 + steps: + - name: Check workflow statuses and display token usage + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + echo "current rest api rate usage:" + curl -s -H "Accept: application/vnd.github+json" -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" https://api.github.com/rate_limit | jq .rate + echo "" + echo "" + echo "current graphql rate usage:" + curl -s -H "Accept: application/vnd.github+json" -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" https://api.github.com/rate_limit | jq .resources.graphql + echo "" + echo "" + + - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.number }} + with: + script: | + await exec.exec("sleep 20s"); + + for (let i = 0; i < 120; i++) { + const failure = []; + const cancelled = []; + const pending = []; + + const result = await github.rest.checks.listSuitesForRef({ + owner: context.repo.owner, + repo: context.repo.repo, + ref: "${{ github.event.pull_request.head.sha }}" + }); + result.data.check_suites.forEach(({ app: { slug }, conclusion, id}) => { + if (slug === 'github-actions') { + if (conclusion === 'failure' || conclusion === 'cancelled') { + failure.push(id); + } else if (conclusion === null) { + pending.push(id); + } + console.log(`check suite ${id} => ${conclusion === null ? 'pending' : conclusion}`); + } + }); + + if (pending.length === 0) { + core.setFailed("Cannot get pull request check status"); + return; + } + + if (failure.length > 0) { + let failureMessage = ''; + const failedCheckRuns = []; + for await (const suite_id of failure) { + const resultCheckRuns = await github.rest.checks.listForSuite({ + owner: context.repo.owner, + repo: context.repo.repo, + check_suite_id: suite_id + }); + + resultCheckRuns.data.check_runs.forEach(({ conclusion, name, html_url }) => { + if (conclusion === 'failure' || conclusion === 'cancelled') { + failedCheckRuns.push(`${name} (${conclusion})`); + } + }); + } + + core.summary.addRaw(`${failedCheckRuns.length} job(s) failed:`, true) + core.summary.addList(failedCheckRuns); + core.summary.write() + + core.setFailed(`${failure.length} workflow(s) failed`); + return; + } + + if (pending.length === 1) { + core.info("All workflows are ok"); + return; + } + + core.info(`${pending.length} workflows in progress`); + + await exec.exec("sleep 30s"); + } + + core.setFailed("Timeout: some jobs are still in progress"); diff --git a/.github/workflows/docker-builder.yml b/.github/workflows/docker-builder.yml index b0f7f768353..2db215693b4 100644 --- a/.github/workflows/docker-builder.yml +++ b/.github/workflows/docker-builder.yml @@ -11,14 +11,16 @@ on: - develop - dev-[2-9][0-9].[0-9][0-9].x paths: - - '.github/docker/**' + - '.github/docker/Dockerfile.centreon-collect-*' pull_request: paths: - - '.github/docker/**' + - '.github/docker/Dockerfile.centreon-collect-*' jobs: get-version: uses: ./.github/workflows/get-version.yml + with: + version_file: CMakeLists.txt create-and-push-docker: needs: [get-version] @@ -90,26 +92,26 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Login to Registry - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - name: Login to Proxy Registry - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ${{ vars.DOCKER_PROXY_REGISTRY_URL }} username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - - uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 + - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Build image ${{ matrix.image }}:${{ matrix.tag }} - uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 + uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0 with: file: .github/docker/Dockerfile.${{ matrix.dockerfile }} context: . diff --git a/.github/workflows/docker-gorgone-testing.yml b/.github/workflows/docker-gorgone-testing.yml new file mode 100644 index 00000000000..6b8aa098d81 --- /dev/null +++ b/.github/workflows/docker-gorgone-testing.yml @@ -0,0 +1,51 @@ +name: docker-gorgone-testing + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + paths: + - ".github/docker/Dockerfile.gorgone-testing-*" + - ".github/workflows/docker-gorgone-testing.yml" + pull_request: + paths: + - ".github/docker/Dockerfile.gorgone-testing-*" + - ".github/workflows/docker-gorgone-testing.yml" + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + + dockerize: + needs: [get-version] + runs-on: ubuntu-22.04 + + strategy: + matrix: + distrib: [alma8, alma9, bullseye, bookworm, jammy] + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Login to registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0 + with: + file: .github/docker/Dockerfile.gorgone-testing-${{ matrix.distrib }} + context: . + pull: true + push: true + tags: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/gorgone-testing-${{ matrix.distrib }}:${{ needs.get-version.outputs.gorgone_docker_version }} diff --git a/.github/workflows/get-version.yml b/.github/workflows/get-version.yml index c48ef8ca58e..2ac04a47b19 100644 --- a/.github/workflows/get-version.yml +++ b/.github/workflows/get-version.yml @@ -1,6 +1,17 @@ on: workflow_call: + inputs: + version_file: + required: false + type: string + default: CMakeLists.txt outputs: + major_version: + description: "major version" + value: ${{ jobs.get-version.outputs.major_version }} + minor_version: + description: "minor version" + value: ${{ jobs.get-version.outputs.minor_version }} img_version: description: "docker image version (vcpkg checksum)" value: ${{ jobs.get-version.outputs.img_version }} @@ -10,9 +21,6 @@ on: version: description: "major version" value: ${{ jobs.get-version.outputs.version }} - patch: - description: "patch version" - value: ${{ jobs.get-version.outputs.patch }} release: description: "release" value: ${{ jobs.get-version.outputs.release }} @@ -28,23 +36,28 @@ on: release_cloud: description: "context of release (cloud or not cloud)" value: ${{ jobs.get-version.outputs.release_cloud }} + gorgone_docker_version: + description: "md5 of gorgone dockerfile" + value: ${{ jobs.get-version.outputs.gorgone_docker_version }} jobs: get-version: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: + major_version: ${{ steps.get_version.outputs.major_version }} + minor_version: ${{ steps.get_version.outputs.minor_version }} img_version: ${{ steps.get_version.outputs.img_version }} test_img_version: ${{ steps.get_version.outputs.test_img_version }} version: ${{ steps.get_version.outputs.version }} - patch: ${{ steps.get_version.outputs.patch }} release: ${{ steps.get_version.outputs.release }} stability: ${{ steps.get_version.outputs.stability }} environment: ${{ steps.get_version.outputs.env }} release_type: ${{ steps.get_version.outputs.release_type }} release_cloud: ${{ steps.get_version.outputs.release_cloud}} + gorgone_docker_version: ${{ steps.get_version.outputs.gorgone_docker_version }} steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: install gh cli on self-hosted runner run: | @@ -64,14 +77,41 @@ jobs: - id: get_version run: | set -x + + if [[ "${{ inputs.version_file }}" == */.version ]]; then + . .version + . ${{ inputs.version_file }} + VERSION="$MAJOR.$MINOR" + elif [[ "${{ inputs.version_file }}" == CMakeLists.txt ]]; then + MAJOR=$(awk '$1 ~ "COLLECT_MAJOR" {maj=substr($2, 1, length($2)-1)} $1 ~ "COLLECT_MINOR" {min=substr($2, 1, length($2)-1) ; print maj "." min}' CMakeLists.txt) + MINOR=$(awk '$1 ~ "COLLECT_PATCH" {print substr($2, 1, length($2) - 1)}' CMakeLists.txt) + VERSION="$MAJOR.$MINOR" + else + echo "Unable to parse ${{ inputs.version_file }}" + exit 1 + fi + + echo "VERSION=$VERSION" + + if egrep '^[2-9][0-9]\.[0-9][0-9]\.[0-9]+' <<<"$VERSION" >/dev/null 2>&1 ; then + n=${VERSION//[!0-9]/ } + a=(${n//\./ }) + echo "major_version=${a[0]}.${a[1]}" >> $GITHUB_OUTPUT + MAJOR=${a[0]}.${a[1]} + echo "minor_version=${a[2]}" >> $GITHUB_OUTPUT + else + echo "Cannot parse version number from ${{ inputs.version_file }}" + exit 1 + fi + + GORGONE_DOCKER_VERSION=$(cat .github/docker/Dockerfile.gorgone-testing-* | md5sum | cut -c1-8) + echo "gorgone_docker_version=$GORGONE_DOCKER_VERSION" >> $GITHUB_OUTPUT + IMG_VERSION=$( cat `ls .github/docker/Dockerfile.centreon-collect-* | grep -v test` vcpkg.json | md5sum | awk '{print substr($1, 0, 8)}') TEST_IMG_VERSION=$(cat .github/docker/Dockerfile.centreon-collect-*-test .github/scripts/collect-prepare-test-robot.sh resources/*.sql | md5sum | cut -c1-8) - VERSION=$(awk '$1 ~ "COLLECT_MAJOR" {maj=substr($2, 1, length($2)-1)} $1 ~ "COLLECT_MINOR" {min=substr($2, 1, length($2)-1) ; print maj "." min}' CMakeLists.txt) - PATCH=$(awk '$1 ~ "COLLECT_PATCH" {print substr($2, 1, length($2) - 1)}' CMakeLists.txt) echo "img_version=$IMG_VERSION" >> $GITHUB_OUTPUT echo "test_img_version=$TEST_IMG_VERSION" >> $GITHUB_OUTPUT echo "version=$VERSION" >> $GITHUB_OUTPUT - echo "patch=$PATCH" >> $GITHUB_OUTPUT if [[ -z "$GITHUB_HEAD_REF" ]]; then BRANCHNAME="$GITHUB_REF_NAME" @@ -152,7 +192,7 @@ jobs: esac case "$BRANCHNAME" in - develop | dev-[2-9][0-9].[0-9][0-9].x) + develop | dev-[2-9][0-9].[0-9][0-9].x | prepare-release-cloud*) STABILITY="unstable" ENV="development" ;; diff --git a/.github/workflows/gorgone.yml b/.github/workflows/gorgone.yml new file mode 100644 index 00000000000..d9ca865018b --- /dev/null +++ b/.github/workflows/gorgone.yml @@ -0,0 +1,337 @@ +name: gorgone + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + types: + - opened + - synchronize + - reopened + - ready_for_review + paths: + - "gorgone/**" + - "!gorgone/tests/**" + - "!gorgone/veracode.json" + - "!gorgone/.veracode-exclusions" + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - "gorgone/**" + - "!gorgone/tests/**" + - "!gorgone/veracode.json" + - "!gorgone/.veracode-exclusions" + +env: + base_directory: gorgone + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + with: + version_file: gorgone/.version + + veracode-analysis: + needs: [get-version] + uses: ./.github/workflows/veracode-analysis.yml + with: + module_directory: gorgone + module_name: centreon-gorgone + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} + img_version: ${{ needs.get-version.outputs.img_version }} + secrets: + veracode_api_id: ${{ secrets.VERACODE_API_ID_GORG }} + veracode_api_key: ${{ secrets.VERACODE_API_KEY_GORG }} + veracode_srcclr_token: ${{ secrets.VERACODE_SRCCLR_TOKEN }} + docker_registry_id: ${{ secrets.DOCKER_REGISTRY_ID }} + docker_registry_passwd: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + package: + needs: [get-version] + if: ${{ needs.get-version.outputs.stability != 'stable' }} + + strategy: + fail-fast: false + matrix: + distrib: [el8, el9, bullseye, bookworm, jammy] + include: + - package_extension: rpm + image: packaging-nfpm-alma8 + distrib: el8 + - package_extension: rpm + image: packaging-nfpm-alma9 + distrib: el9 + - package_extension: deb + image: packaging-nfpm-bullseye + distrib: bullseye + - package_extension: deb + image: packaging-nfpm-bookworm + distrib: bookworm + - package_extension: deb + image: packaging-nfpm-jammy + distrib: jammy + + runs-on: ubuntu-22.04 + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: package ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Set package version and paths according to distrib + run: | + if [[ "${{ matrix.package_extension }}" == "deb" ]]; then + PERL_VENDORLIB="/usr/share/perl5" + else + PERL_VENDORLIB="/usr/share/perl5/vendor_perl" + fi + echo "PERL_VENDORLIB=$PERL_VENDORLIB" >> $GITHUB_ENV + shell: bash + + - name: Generate selinux binaries + if: ${{ matrix.package_extension == 'rpm' }} + run: | + cd gorgone/selinux + sed -i "s/@VERSION@/${{ needs.get-version.outputs.major_version }}.${{ needs.get-version.outputs.minor_version }}/g" centreon-gorgoned.te + make -f /usr/share/selinux/devel/Makefile + shell: bash + + - name: Remove selinux packaging files on debian + if: ${{ matrix.package_extension == 'deb' }} + run: rm -f gorgone/packaging/*-selinux.yaml + shell: bash + + - name: Package + uses: ./.github/actions/package + with: + nfpm_file_pattern: "gorgone/packaging/*.yaml" + distrib: ${{ matrix.distrib }} + package_extension: ${{ matrix.package_extension }} + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} + release: ${{ needs.get-version.outputs.release }} + arch: all + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-version.outputs.stability }} + + test-gorgone: + needs: [get-version, package] + + strategy: + fail-fast: false + matrix: + distrib: [el8, el9, bullseye, bookworm, jammy] + include: + - package_extension: rpm + image: gorgone-testing-alma8 + distrib: el8 + - package_extension: rpm + image: gorgone-testing-alma9 + distrib: el9 + - package_extension: deb + image: gorgone-testing-bullseye + distrib: bullseye + - package_extension: deb + image: gorgone-testing-jammy + distrib: jammy + - package_extension: deb + image: gorgone-testing-bookworm + distrib: bookworm + + runs-on: ubuntu-22.04 + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.gorgone_docker_version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + services: + mariadb: + image: mariadb:latest + ports: + - 3306 + env: + MYSQL_USER: centreon + MYSQL_PASSWORD: password + MYSQL_ROOT_PASSWORD: password + + steps: + - name: Get linked branch of centreon repository + id: centreon_repo_linked_branch + run: | + CENTREON_REPO_LINKED_BRANCH=$(git ls-remote -h https://github.com/centreon/centreon.git | grep -E "refs/heads/dev-${{ needs.get-version.outputs.major_version }}\.x$" >/dev/null 2>&1 && echo "dev-${{ needs.get-version.outputs.major_version }}.x" || echo develop) + + GIT_BRANCH_EXISTS=$(git ls-remote -h https://github.com/centreon/centreon.git | grep -E "refs/heads/${{ github.head_ref || github.ref_name }}$" >/dev/null 2>&1 && echo yes || echo no) + if [[ "$GIT_BRANCH_EXISTS" == "yes" ]]; then + CENTREON_REPO_LINKED_BRANCH="${{ github.head_ref || github.ref_name }}" + fi + + echo "linked_branch=$CENTREON_REPO_LINKED_BRANCH" >> $GITHUB_OUTPUT + shell: bash + + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + repository: centreon/centreon + path: centreon + ref: ${{ steps.centreon_repo_linked_branch.outputs.linked_branch }} + sparse-checkout: | + centreon/www/install/createTables.sql + centreon/www/install/createTablesCentstorage.sql + + - name: get cached gorgone package + uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + path: ./*.${{ matrix.package_extension }} + key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + fail-on-cache-miss: true + + - name: Install gorgogne from just built package + shell: bash + run: | + if [[ "${{ matrix.package_extension }}" == "deb" ]]; then + apt update + apt install -y ./centreon-gorgone*${{ matrix.distrib }}* + else + dnf install -y ./centreon-gorgone*${{ matrix.distrib }}* ./centreon-gorgone-centreon-config*${{ matrix.distrib }}* + # in el8 at least, there is a package for the configuration and a package for the actual code. + # this is not the case for debian, and for now I don't know why it was made any different between the 2 Os. + fi + + - name: Create databases + run: | + mysql -h mariadb -u root -ppassword -e "CREATE DATABASE \`centreon\`" + mysql -h mariadb -u root -ppassword -e "CREATE DATABASE \`centreon-storage\`" + mysql -h mariadb -u root -ppassword -e "GRANT ALL PRIVILEGES ON centreon.* TO 'centreon'@'%'" + mysql -h mariadb -u root -ppassword -e "GRANT ALL PRIVILEGES ON \`centreon-storage\`.* TO 'centreon'@'%'" + mysql -h mariadb -u root -ppassword 'centreon' < centreon/centreon/www/install/createTables.sql + mysql -h mariadb -u root -ppassword 'centreon-storage' < centreon/centreon/www/install/createTablesCentstorage.sql + + - name: Run tests + run: robot -v 'DBHOST:mariadb' -v 'DBNAME:centreon' -v 'DBNAME_STORAGE:centreon-storage' -v 'DBUSER:centreon' gorgone/tests + + - name: Upload gorgone and robot debug artifacts + if: failure() + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + with: + name: gorgone-debug-${{ matrix.distrib }} + path: | + log.html + /var/log/centreon-gorgone + /etc/centreon-gorgone + retention-days: 1 + + deliver-sources: + runs-on: [self-hosted, common] + needs: [get-version, package] + if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Deliver sources + uses: ./.github/actions/release-sources + with: + bucket_directory: centreon-gorgone + module_directory: gorgone + module_name: centreon-gorgone + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} + token_download_centreon_com: ${{ secrets.TOKEN_DOWNLOAD_CENTREON_COM }} + + deliver-rpm: + runs-on: [self-hosted, common] + needs: [get-version, package] + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + + strategy: + matrix: + distrib: [el8, el9] + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Delivery + uses: ./.github/actions/rpm-delivery + with: + module_name: gorgone + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.major_version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} + + deliver-deb: + runs-on: [self-hosted, common] + needs: [get-version, package] + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + + strategy: + matrix: + distrib: [bullseye, bookworm, jammy] + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Delivery + uses: ./.github/actions/deb-delivery + with: + module_name: gorgone + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.major_version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} + + promote: + needs: [get-version] + if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + runs-on: [self-hosted, common] + strategy: + matrix: + distrib: [el8, el9, bullseye, bookworm] + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Promote ${{ matrix.distrib }} to stable + uses: ./.github/actions/promote-to-stable + with: + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + module_name: gorgone + distrib: ${{ matrix.distrib }} + major_version: ${{ needs.get-version.outputs.major_version }} + stability: ${{ needs.get-version.outputs.stability }} + github_ref_name: ${{ github.ref_name }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} diff --git a/.github/workflows/libzmq.yml b/.github/workflows/libzmq.yml index f289fa48b1d..ad0adeb625a 100644 --- a/.github/workflows/libzmq.yml +++ b/.github/workflows/libzmq.yml @@ -39,7 +39,7 @@ jobs: runs-on: ubuntu-22.04 container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} credentials: username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} @@ -99,7 +99,7 @@ jobs: runs-on: ${{ matrix.runner }} container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} credentials: username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} @@ -153,14 +153,14 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Publish RPM packages uses: ./.github/actions/delivery with: module_name: libzmq distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} + major_version: ${{ needs.get-version.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} stability: ${{ needs.get-version.outputs.stability }} @@ -188,14 +188,14 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Publish DEB packages uses: ./.github/actions/delivery with: module_name: libzmq distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} + major_version: ${{ needs.get-version.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} stability: ${{ needs.get-version.outputs.stability }} @@ -212,7 +212,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Promote ${{ matrix.distrib }} to stable uses: ./.github/actions/promote-to-stable @@ -220,8 +220,7 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: libzmq distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.version }} - minor_version: ${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.major_version }} stability: ${{ needs.get-version.outputs.stability }} github_ref_name: ${{ github.ref_name }} release_type: ${{ needs.get-version.outputs.release_type }} diff --git a/.github/workflows/lua-curl.yml b/.github/workflows/lua-curl.yml new file mode 100644 index 00000000000..402cc3bf640 --- /dev/null +++ b/.github/workflows/lua-curl.yml @@ -0,0 +1,227 @@ +name: lua-curl + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - lua-curl/** + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - lua-curl/** + +env: + major_version: 0.3 + minor_version: 13 + release: 21 # 10 for openssl 1.1.1 / 20 for openssl system / 21 for openssl system and possible issue with ~ + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + + package: + needs: [get-version] + if: ${{ needs.get-version.outputs.stability != 'stable' }} + + strategy: + fail-fast: false + matrix: + include: + - package_extension: rpm + image: centreon-collect-alma8 + distrib: el8 + lua_version: 5.3 + runner: ubuntu-24.04 + arch: amd64 + - package_extension: rpm + image: centreon-collect-alma9 + distrib: el9 + lua_version: 5.4 + runner: ubuntu-24.04 + arch: amd64 + - package_extension: deb + image: centreon-collect-debian-bullseye + distrib: bullseye + lua_version: 5.3 + runner: ubuntu-24.04 + arch: amd64 + - package_extension: deb + image: centreon-collect-debian-bookworm + distrib: bookworm + lua_version: 5.3 + runner: ubuntu-24.04 + arch: amd64 + - package_extension: deb + image: centreon-collect-ubuntu-jammy + distrib: jammy + lua_version: 5.3 + runner: ubuntu-24.04 + arch: amd64 + - package_extension: deb + image: centreon-collect-debian-bullseye-arm64 + distrib: bullseye + lua_version: 5.3 + runner: ["self-hosted", "collect-arm64"] + arch: arm64 + - package_extension: deb + image: centreon-collect-debian-bookworm-arm64 + distrib: bookworm + lua_version: 5.3 + runner: ["self-hosted", "collect-arm64"] + arch: arm64 + + runs-on: ${{ matrix.runner }} + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.img_version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: package ${{ matrix.distrib }} ${{ matrix.arch }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Checkout sources of lua-curl + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + repository: Lua-cURL/Lua-cURLv3 + path: lua-curl-src + ref: v${{ env.major_version }}.${{ env.minor_version }} + + - name: Compile lua-curl and prepare packaging + run: | + if [ "${{ matrix.package_extension }}" == "rpm" ]; then + dnf install -y dnf-plugins-core + if [ "${{ matrix.distrib }}" == "el8" ]; then + dnf config-manager --set-enabled powertools + else + dnf config-manager --set-enabled crb + fi + dnf install -y make gcc openssl openssl-devel libcurl-devel lua lua-devel + cd lua-curl-src + make + cd .. + else + apt-get update + apt-get install -y make openssl libssl-dev libcurl4-openssl-dev lua${{ matrix.lua_version }} liblua${{ matrix.lua_version }} liblua${{ matrix.lua_version }}-dev + cd lua-curl-src + make LUA_IMPL=lua${{ matrix.lua_version }} LUA_INC=/usr/include/lua${{ matrix.lua_version }} + cd .. + fi + + sed -i "s/@luaver@/${{ matrix.lua_version }}/g" lua-curl/packaging/lua-curl.yaml + shell: bash + + - name: Package + uses: ./.github/actions/package + with: + nfpm_file_pattern: "lua-curl/packaging/lua-curl.yaml" + distrib: ${{ matrix.distrib }} + package_extension: ${{ matrix.package_extension }} + major_version: ${{ env.major_version }} + minor_version: ${{ env.minor_version }} + release: ${{ env.release }} + arch: ${{ matrix.arch }} + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-lua-curl-${{ matrix.distrib }}-${{ matrix.arch }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-rpm: + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + include: + - distrib: el8 + arch: amd64 + - distrib: el9 + arch: amd64 + name: deliver ${{ matrix.distrib }} ${{ matrix.arch }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Publish RPM packages + uses: ./.github/actions/rpm-delivery + with: + module_name: lua-curl + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.major_version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-curl-${{ matrix.distrib }}-${{ matrix.arch }} + stability: ${{ needs.get-version.outputs.stability }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} + + deliver-deb: + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + include: + - distrib: bullseye + arch: amd64 + - distrib: bullseye + arch: arm64 + - distrib: bookworm + arch: amd64 + - distrib: jammy + arch: amd64 + name: deliver ${{ matrix.distrib }} ${{ matrix.arch }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Publish DEB packages + uses: ./.github/actions/deb-delivery + with: + module_name: lua-curl + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.major_version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-lua-curl-${{ matrix.distrib }}-${{ matrix.arch }} + stability: ${{ needs.get-version.outputs.stability }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} + + promote: + needs: [get-version] + if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + runs-on: [self-hosted, common] + strategy: + matrix: + distrib: [el8, el9, bullseye, bookworm] + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Promote ${{ matrix.distrib }} to stable + uses: ./.github/actions/promote-to-stable + with: + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + module_name: lua-curl + distrib: ${{ matrix.distrib }} + major_version: ${{ needs.get-version.outputs.major_version }} + stability: ${{ needs.get-version.outputs.stability }} + github_ref_name: ${{ github.ref_name }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} diff --git a/.github/workflows/package-collect.yml b/.github/workflows/package-collect.yml index 0fd36641a3c..d1b4299692c 100644 --- a/.github/workflows/package-collect.yml +++ b/.github/workflows/package-collect.yml @@ -3,7 +3,10 @@ name: Centreon collect packaging on: workflow_call: inputs: - version: + major_version: + required: true + type: string + minor_version: required: true type: string img_version: @@ -80,7 +83,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install sccache run: | @@ -91,13 +94,13 @@ jobs: dnf install -y wget fi if [ "${{ matrix.arch }}" = "amd64" ]; then - wget https://github.com/mozilla/sccache/releases/download/v0.7.4/sccache-v0.7.4-x86_64-unknown-linux-musl.tar.gz - tar xzf sccache-v0.7.4-x86_64-unknown-linux-musl.tar.gz - mv sccache-v0.7.4-x86_64-unknown-linux-musl/sccache /usr/bin/ + wget https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz + tar xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz + mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/bin/ elif [ "${{ matrix.arch }}" = "arm64" ]; then - wget https://github.com/mozilla/sccache/releases/download/v0.7.4/sccache-v0.7.4-aarch64-unknown-linux-musl.tar.gz - tar xzf sccache-v0.7.4-aarch64-unknown-linux-musl.tar.gz - mv sccache-v0.7.4-aarch64-unknown-linux-musl/sccache /usr/bin/ + wget https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-aarch64-unknown-linux-musl.tar.gz + tar xzf sccache-v0.8.1-aarch64-unknown-linux-musl.tar.gz + mv sccache-v0.8.1-aarch64-unknown-linux-musl/sccache /usr/bin/ fi ${SCCACHE_PATH} --start-server @@ -107,7 +110,7 @@ jobs: cd selinux for MODULE in "centreon-engine" "centreon-broker" "centreon-monitoring-agent"; do cd $MODULE - sed -i "s/@VERSION@/${{ inputs.version }}/g" $MODULE.te + sed -i "s/@VERSION@/${{ inputs.major_version }}.${{ inputs.minor_version }}/g" $MODULE.te make -f /usr/share/selinux/devel/Makefile cd - done @@ -159,6 +162,7 @@ jobs: -DCMAKE_BUILD_TYPE=RelWithDebInfo \ -DCMAKE_C_COMPILER_LAUNCHER=${SCCACHE_PATH} \ -DCMAKE_CXX_COMPILER_LAUNCHER=${SCCACHE_PATH} \ + -DLEGACY_ENGINE=ON \ -S . ninja -Cbuild @@ -189,7 +193,7 @@ jobs: "build/connectors/ssh/centreon_connector_ssh" "build/ccc/ccc" "build/agent/centagent") - for file in ${exe[@]}; do + for file in "${exe[@]}"; do echo "Making a debug file of $file" objcopy --only-keep-debug $file $file.debug objcopy --strip-debug $file @@ -202,7 +206,8 @@ jobs: nfpm_file_pattern: "packaging/*.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} - version: ${{ inputs.version }} + major_version: ${{ inputs.major_version }} + minor_version: ${{ inputs.minor_version }} release: ${{ inputs.release }} arch: ${{ matrix.arch }} commit_hash: ${{ inputs.commit_hash }} @@ -219,7 +224,7 @@ jobs: # set condition to true if artifacts are needed - if: ${{ false }} name: Upload package artifacts - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: packages-${{ matrix.distrib }}-${{ matrix.arch }} path: ./*.${{ matrix.package_extension}} diff --git a/.github/workflows/rebase-master.yml b/.github/workflows/rebase-master.yml index a49afb39f8d..03520557266 100644 --- a/.github/workflows/rebase-master.yml +++ b/.github/workflows/rebase-master.yml @@ -16,7 +16,7 @@ jobs: if: github.event.pull_request.merged == true steps: - name: git checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 token: ${{ secrets.CENTREON_TECHNIQUE_PAT }} diff --git a/.github/workflows/rebase-version.yml b/.github/workflows/rebase-version.yml index ef93c147a52..c89b3fe98b5 100644 --- a/.github/workflows/rebase-version.yml +++ b/.github/workflows/rebase-version.yml @@ -16,7 +16,7 @@ jobs: if: github.event.pull_request.merged == true steps: - name: git checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 token: ${{ secrets.CENTREON_TECHNIQUE_PAT }} diff --git a/.github/workflows/release-trigger-builds.yml b/.github/workflows/release-trigger-builds.yml index 0fbd70c5989..3769c527747 100644 --- a/.github/workflows/release-trigger-builds.yml +++ b/.github/workflows/release-trigger-builds.yml @@ -19,7 +19,7 @@ jobs: release-trigger-builds: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install Github CLI run: | @@ -47,8 +47,8 @@ jobs: #COMPONENTS_OSS_FULL=("awie" "dsm" "gorgone" "ha" "open-tickets" "web") #COMPONENTS_MODULES=("anomaly-detection" "autodiscovery" "bam" "cloud-business-extensions" "cloud-extensions" "it-edition-extensions" "lm" "map" "mbi" "ppm") #COMPONENTS_MODULES_FULL=("anomaly-detection" "autodiscovery" "bam" "cloud-business-extensions" "cloud-extensions" "it-edition-extensions" "lm" "map" "mbi" "ppm" "php-pecl-gnupg" "sourceguardian-loader") - COMPONENTS_COLLECT=("Centreon collect") - COMPONENTS_COLLECT_FULL=("Centreon collect") + COMPONENTS_COLLECT=("Centreon collect" "gorgone") + COMPONENTS_COLLECT_FULL=("Centreon collect" "gorgone") RUNS_URL="" # Accept release prefixed or develop branches, nothing else diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d5fde4487e1..594d0392f0a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -37,7 +37,7 @@ jobs: shell: bash - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 diff --git a/.github/workflows/robot-nightly.yml b/.github/workflows/robot-nightly.yml deleted file mode 100644 index 3ce8b5df421..00000000000 --- a/.github/workflows/robot-nightly.yml +++ /dev/null @@ -1,187 +0,0 @@ -name: robot-nightly -run-name: nightly robot ${{ github.head_ref || github.ref_name }} - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -on: - workflow_dispatch: - schedule: - - cron: '30 0 * * *' - -jobs: - dispatch-to-maintained-branches: - if: ${{ github.event_name == 'schedule' && github.ref_name == 'develop' }} - runs-on: ubuntu-22.04 - steps: - - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - - run: | - gh workflow run robot-nightly.yml -r "dev-24.04.x" - gh workflow run robot-nightly.yml -r "dev-23.10.x" - gh workflow run robot-nightly.yml -r "dev-23.04.x" - gh workflow run robot-nightly.yml -r "dev-22.10.x" - shell: bash - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - get-version: - uses: ./.github/workflows/get-version.yml - - veracode-analysis: - needs: [get-version] - uses: ./.github/workflows/veracode-analysis.yml - with: - module_name: centreon-collect - major_version: ${{ needs.get-version.outputs.version }} - minor_version: ${{ needs.get-version.outputs.patch }} - img_version: ${{ needs.get-version.outputs.img_version }} - secrets: - veracode_api_id: ${{ secrets.VERACODE_API_ID_COLL }} - veracode_api_key: ${{ secrets.VERACODE_API_KEY_COLL }} - veracode_srcclr_token: ${{ secrets.VERACODE_SRCCLR_TOKEN }} - docker_registry_id: ${{ secrets.DOCKER_REGISTRY_ID }} - docker_registry_passwd: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - - package: - needs: [get-version] - uses: ./.github/workflows/package-collect.yml - with: - stability: ${{ needs.get-version.outputs.stability }} - version: ${{ needs.get-version.outputs.version }}.${{ needs.get-version.outputs.patch }} - img_version: ${{ needs.get-version.outputs.img_version }} - release: ${{ needs.get-version.outputs.release }} - commit_hash: ${{ github.sha }} - secrets: inherit - - robot-test: - needs: [get-version, package] - - strategy: - fail-fast: false - matrix: - include: - - distrib: el9 - image: centreon-collect-alma9-test - package_extension: rpm - arch: amd64 - database_type: mariadb - test_group_name: robot_test-mariadb-el9-amd64 - tests_params: '{}' - - distrib: el9 - image: centreon-collect-mysql-alma9-test - package_extension: rpm - arch: amd64 - database_type: mysql - test_group_name: robot_test-mysql-el9-amd64 - tests_params: '{}' - - distrib: bullseye - image: centreon-collect-debian-bullseye-arm64-test - package_extension: deb - arch: arm64 - database_type: mariadb - test_group_name: robot_test-mariadb-bullseye-arm64 - tests_params: '{}' - - distrib: bookworm - image: centreon-collect-debian-bookworm-arm64-test - package_extension: deb - arch: arm64 - database_type: mariadb - test_group_name: robot_test-mariadb-bookworm-arm64 - tests_params: '{}' - - distrib: bookworm - image: centreon-collect-debian-bookworm-test - package_extension: deb - arch: amd64 - database_type: mariadb - test_group_name: robot_test-mariadb-bookworm-amd64 - tests_params: '{}' - - distrib: el9 - image: centreon-collect-alma9-test - package_extension: rpm - arch: amd64 - database_type: mariadb - test_group_name: robot_test-mariadb-el9-amd64-grpc - tests_params: '{"default_transport":"grpc","default_bbdo_version":"3.1.0"}' - - name: ${{ matrix.test_group_name }} - - uses: ./.github/workflows/robot-test.yml - with: - distrib: ${{ matrix.distrib }} - arch: ${{ matrix.arch }} - image: ${{ matrix.image }} - image_test: ${{ matrix.image }}:${{ needs.get-version.outputs.test_img_version }} - image_version: ${{ needs.get-version.outputs.img_version }} - package_cache_key: ${{ github.run_id }}-${{ github.sha }}-${{ matrix.package_extension }}-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} - package_cache_path: ./*.${{ matrix.package_extension}} - database_type: ${{ matrix.database_type }} - tests_params: ${{matrix.tests_params}} - test_group_name: ${{matrix.test_group_name}} - secrets: - registry_username: ${{ secrets.DOCKER_REGISTRY_ID }} - registry_password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - collect_s3_access_key: ${{ secrets.COLLECT_S3_ACCESS_KEY }} - collect_s3_secret_key: ${{ secrets.COLLECT_S3_SECRET_KEY }} - xray_client_id: ${{ secrets.XRAY_CLIENT_ID }} - xray_client_secret: ${{ secrets.XRAY_CLIENT_SECRET }} - - deliver-rpm: - if: ${{ contains(fromJson('["unstable"]'), needs.get-version.outputs.stability) }} - needs: [robot-test, get-version] - runs-on: [self-hosted, common] - strategy: - matrix: - distrib: [el8, el9] - name: deliver ${{ matrix.distrib }} - - steps: - - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - - name: Publish RPM packages - uses: ./.github/actions/delivery - with: - module_name: collect - distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} - artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} - cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-centreon-collect-${{ matrix.distrib }}-amd64-${{ github.head_ref || github.ref_name }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} - - deliver-deb: - if: ${{ contains(fromJson('["unstable"]'), needs.get-version.outputs.stability) }} - needs: [robot-test, get-version] - runs-on: [self-hosted, common] - strategy: - matrix: - include: - - distrib: bullseye - arch: amd64 - - distrib: bookworm - arch: amd64 - - distrib: jammy - arch: amd64 - - distrib: bullseye - arch: arm64 - name: deliver ${{ matrix.distrib }} - - steps: - - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - - name: Publish DEB packages - uses: ./.github/actions/delivery - with: - module_name: collect - distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} - artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} - cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} diff --git a/.github/workflows/robot-test.yml b/.github/workflows/robot-test.yml index c10e0629e55..d6f8bc622e3 100644 --- a/.github/workflows/robot-test.yml +++ b/.github/workflows/robot-test.yml @@ -50,17 +50,17 @@ jobs: runs-on: ${{ contains(inputs.image, 'arm') && fromJson('["self-hosted", "collect-arm64"]') || 'ubuntu-22.04' }} steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Login to Registry - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} username: ${{ secrets.registry_username }} password: ${{ secrets.registry_password }} - name: Login to Proxy Registry - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ${{ vars.DOCKER_PROXY_REGISTRY_URL }} username: ${{ secrets.registry_username }} @@ -90,7 +90,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: List features id: list-features @@ -111,19 +111,19 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 - name: Restore image - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: /tmp/${{inputs.image}} key: ${{inputs.image_test}} fail-on-cache-miss: true - name: Restore packages - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ${{ inputs.package_cache_path }} key: ${{ inputs.package_cache_key }} @@ -134,32 +134,31 @@ jobs: docker load --input /tmp/${{ inputs.image }} - name: Test ${{ matrix.feature }} - run: | - docker run --rm --privileged --ulimit core=-1 --security-opt seccomp=unconfined \ - -v $(pwd):/test_collect \ - --env AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ - --env AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ - --env AWS_BUCKET=centreon-collect-robot-report \ - --workdir /test_collect \ - ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{inputs.image_test}} \ - ./.github/scripts/collect-test-robot.sh \ - ${{ matrix.feature }} ${{inputs.database_type}} ${{inputs.tests_params}} - env: AWS_ACCESS_KEY_ID: ${{ secrets.collect_s3_access_key }} AWS_SECRET_ACCESS_KEY: ${{ secrets.collect_s3_secret_key }} - - - name: Generate Xray Token - id: generate-xray-token run: | - token_response=$(curl -H "Content-Type: application/json" -X POST --data "{\"client_id\": \"${{ secrets.XRAY_CLIENT_ID }}\", \"client_secret\": \"${{ secrets.XRAY_CLIENT_SECRET }}\"}" "https://xray.cloud.getxray.app/api/v1/authenticate") - xray_token=$(echo "$token_response" | sed -n 's/.*"\(.*\)".*/\1/p') - echo "xray_token=$xray_token" >> $GITHUB_OUTPUT - shell: bash - - - name: Send report to xrays - run: | - curl -H "Content-Type: multipart/form-data" -X POST -F info=@tests/issueFields.json -F results=@tests/output.xml -F testInfo=@tests/testIssueFields.json -H "Authorization: Bearer ${{ steps.generate-xray-token.outputs.xray_token }}" https://xray.cloud.getxray.app/api/v2/import/execution/robot/multipart + docker run --rm --privileged --ulimit core=-1 --security-opt seccomp=unconfined \ + -v $(pwd):/test_collect \ + --env AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ + --env AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ + --env AWS_BUCKET=centreon-collect-robot-report \ + --workdir /test_collect \ + ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{inputs.image_test}} \ + ./.github/scripts/collect-test-robot.sh \ + ${{ matrix.feature }} ${{inputs.database_type}} ${{inputs.tests_params}} + + # - name: Generate Xray Token + # id: generate-xray-token + # run: | + # token_response=$(curl -H "Content-Type: application/json" -X POST --data "{\"client_id\": \"${{ secrets.XRAY_CLIENT_ID }}\", \"client_secret\": \"${{ secrets.XRAY_CLIENT_SECRET }}\"}" "https://xray.cloud.getxray.app/api/v1/authenticate") + # xray_token=$(echo "$token_response" | sed -n 's/.*"\(.*\)".*/\1/p') + # echo "xray_token=$xray_token" >> $GITHUB_OUTPUT + # shell: bash + + # - name: Send report to xrays + # run: | + # curl -H "Content-Type: multipart/form-data" -X POST -F info=@tests/issueFields.json -F results=@tests/output.xml -F testInfo=@tests/testIssueFields.json -H "Authorization: Bearer ${{ steps.generate-xray-token.outputs.xray_token }}" https://xray.cloud.getxray.app/api/v2/import/execution/robot/multipart - name: Move reports if: ${{ failure() }} @@ -183,7 +182,7 @@ jobs: - name: Upload Test Results if: ${{ failure() }} - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: reports-${{inputs.test_group_name}}-${{ steps.feature-path.outputs.feature_name_with_dash }} path: reports @@ -195,17 +194,17 @@ jobs: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Download Artifacts - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: pattern: reports-${{inputs.test_group_name}}-* path: reports merge-multiple: true - name: Upload the regrouped artifact - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: reports-${{inputs.test_group_name}} path: reports/ @@ -244,15 +243,15 @@ jobs: shell: bash # setup-python v5.0.0 relies on node20 which is not supported by el7 distributions - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 if: ${{ inputs.distrib == 'el7'}} with: - python-version: '3.10' + python-version: "3.10" - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 if: ${{ inputs.distrib != 'el7' }} with: - python-version: '3.10' + python-version: "3.10" - run: | pip3 install -U robotframework robotframework-databaselibrary pymysql python-dateutil @@ -261,7 +260,7 @@ jobs: shell: bash - name: Send report to commit - uses: joonvena/robotframework-reporter-action@f99583edc5902bd73a61df5c37d1321bc38890ca # v2.4 + uses: joonvena/robotframework-reporter-action@229b6d4248b20be6e54f4fc32c7414130d1bf200 # v2.5 with: gh_access_token: ${{ secrets.GITHUB_TOKEN }} report_path: reports diff --git a/.github/workflows/veracode-analysis.yml b/.github/workflows/veracode-analysis.yml index 412319590f9..23361521e81 100644 --- a/.github/workflows/veracode-analysis.yml +++ b/.github/workflows/veracode-analysis.yml @@ -2,6 +2,9 @@ on: workflow_call: inputs: + module_directory: + required: false + type: string module_name: required: true type: string @@ -32,6 +35,7 @@ jobs: runs-on: ubuntu-22.04 outputs: development_stage: ${{ steps.routing-mode.outputs.development_stage }} + skip_analysis: ${{ steps.routing-mode.outputs.skip_analysis }} steps: - name: Set routing mode @@ -46,14 +50,21 @@ jobs: fi done + # skip analysis of draft PR and analysis on development branches using workflow dispatch + SKIP_ANALYSIS="true" + if [[ "${{ github.event_name }}" == "pull_request" && -n "${{ github.event.pull_request.number }}" && -n "${{ github.event.pull_request.draft }}" && "${{ github.event.pull_request.draft }}" == "false" ]] || [[ "$DEVELOPMENT_STAGE" != "Development" ]]; then + SKIP_ANALYSIS="false" + fi + echo "development_stage=$DEVELOPMENT_STAGE" >> $GITHUB_OUTPUT + echo "skip_analysis=$SKIP_ANALYSIS" >> $GITHUB_OUTPUT cat $GITHUB_OUTPUT build: name: Binary preparation runs-on: [self-hosted, collect] needs: [routing] - if: needs.routing.outputs.development_stage != 'Development' + if: needs.routing.outputs.skip_analysis == 'false' container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/centreon-collect-alma9:${{ inputs.img_version }} credentials: @@ -61,9 +72,10 @@ jobs: password: ${{ secrets.docker_registry_passwd }} steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - name: Compiling Cpp sources + - if: ${{ inputs.module_name == 'centreon-collect' }} + name: Compiling Cpp sources run: | mv /root/.cache /github/home export VCPKG_ROOT=/vcpkg @@ -101,12 +113,18 @@ jobs: echo "[DEBUG] - Build size" du -sh $(find build/{broker,engine,clib,connectors,common} -name "*.so" -type f) | sort -rh - - name: Binary preparation + - if: ${{ inputs.module_name == 'centreon-collect' }} + name: Preserve centreon-collect binaries from cleaning run: | echo "[INFO] - Keeping only compiled files" - # preserve binaries from cleaning find build -type f -not \( -name "*.so" -or -name "cbd" -or -name "centengine" -or -name "cbwd" -or -name "centreon_connector_*" \) -delete + - name: Binary preparation of ${{ inputs.module_name }} + run: | + if [ -n "${{ inputs.module_directory }}" ]; then + cd ${{ inputs.module_directory }} + fi + echo "[INFO] - Removing veracode exclusions" if [[ -f ".veracode-exclusions" ]]; then for LINE in $( cat .veracode-exclusions | sed 's/[^a-zA-Z0-9_./-]//g' | sed -r 's/\.\./\./g' ); do @@ -125,17 +143,26 @@ jobs: else echo "::warning::No '.veracode-exclusions' file found for this module. Skipping exclusion step" fi - echo "[INFO] - Keeping only build's non empty folders" - find build -empty -type d -delete - ls -la build - echo "[INFO] - Generating the tarball" - tar cvzf "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" build + if [ "${{ inputs.module_name }}" = "centreon-collect" ]; then + echo "[INFO] - Keeping only build's non empty folders" + find build -empty -type d -delete + ls -la build + echo "[INFO] - Generating the tarball" + tar cvzf "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" build + else + echo "[INFO] - Generating the zip" + zip -rq "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.zip" * + if [ -n "${{ inputs.module_directory }}" ]; then + cd - + mv ${{ inputs.module_directory }}/${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.zip . + fi + fi - name: Cache uses: actions/cache/save@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: - path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" + path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.${{ inputs.module_name == 'centreon-collect' && 'tar.gz' || 'zip' }}" key: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary" policy-scan: @@ -165,7 +192,7 @@ jobs: - name: Get build binary uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: - path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" + path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.${{ inputs.module_name == 'centreon-collect' && 'tar.gz' || 'zip' }}" key: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary" - name: Sandbox scan @@ -173,8 +200,8 @@ jobs: continue-on-error: ${{ vars.VERACODE_CONTINUE_ON_ERROR == 'true' }} with: appname: "${{ inputs.module_name }}" - version: "${{ inputs.major_version }}.${{ inputs.minor_version }}_runId-${{ github.run_id }}" - filepath: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" + version: "${{ inputs.major_version }}.${{ inputs.minor_version }}_runId-${{ github.run_id }}_attempt-${{ github.run_attempt }}" + filepath: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.${{ inputs.module_name == 'centreon-collect' && 'tar.gz' || 'zip' }}" vid: "vera01ei-${{ secrets.veracode_api_id }}" vkey: "vera01es-${{ secrets.veracode_api_key }}" createprofile: true @@ -182,4 +209,4 @@ jobs: sandboxname: "${{ github.ref_name }}" includenewmodules: true scanallnonfataltoplevelmodules: true - deleteincompletescan: 2 + deleteincompletescan: 1 diff --git a/.github/workflows/windows-agent-robot-test.yml b/.github/workflows/windows-agent-robot-test.yml new file mode 100644 index 00000000000..8d52099a14e --- /dev/null +++ b/.github/workflows/windows-agent-robot-test.yml @@ -0,0 +1,15 @@ +name: Centreon Monitoring Agent Windows robot test + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + +jobs: + build-agent: + runs-on: windows-latest + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 diff --git a/.github/workflows/windows-agent.yml b/.github/workflows/windows-agent.yml new file mode 100644 index 00000000000..3e6b132ab68 --- /dev/null +++ b/.github/workflows/windows-agent.yml @@ -0,0 +1,83 @@ +name: Centreon Monitoring Agent Windows build and packaging + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - agent/** + - custom-triplets/** + - CMakeLists.txt + - CMakeListsWindows.txt + - vcpkg.json + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - agent/** + - custom-triplets/** + - CMakeLists.txt + - CMakeListsWindows.txt + - vcpkg.json + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + with: + version_file: CMakeLists.txt + + build-and-test-agent: + needs: [get-version] + runs-on: windows-latest + env: + AWS_ACCESS_KEY_ID: ${{ secrets.COLLECT_S3_ACCESS_KEY }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.COLLECT_S3_SECRET_KEY }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Compile Agent + run: .github/scripts/windows-agent-compile.ps1 + shell: powershell + + - name: Common test + run: | + cd build_windows + tests/ut_common + + - name: Agent test + run: | + cd build_windows + tests/ut_agent + + - name: Zip agent + run: | + $files_to_compress = "agent\conf\centagent.reg", "build_windows\agent\Release\centagent.exe" + Compress-Archive -Path $files_to_compress -DestinationPath centreon-monitoring-agent.zip + + - name: Save agent package in cache + uses: actions/cache/save@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + path: centreon-monitoring-agent.zip + key: ${{ github.run_id }}-${{ github.sha }}-CMA-${{ github.head_ref || github.ref_name }} + enableCrossOsArchive: ${{ true }} + + - name: Upload package artifacts + if: | + github.event_name != 'workflow_dispatch' && + contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + with: + name: packages-centreon-monitoring-agent-windows + path: centreon-monitoring-agent.zip + retention-days: 1 diff --git a/.version b/.version index 4cb5b94d8d2..db9a189ef29 100644 --- a/.version +++ b/.version @@ -1,2 +1,2 @@ -MAJOR=24.07 +MAJOR=24.09 MINOR=0 diff --git a/CMakeLists.txt b/CMakeLists.txt index fc08f5d56c9..51319732e2a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,82 +30,47 @@ set(CMAKE_CXX_STANDARD 17) string(TIMESTAMP CENTREON_CURRENT_YEAR "%Y") add_definitions(-DCENTREON_CURRENT_YEAR="${CENTREON_CURRENT_YEAR}") -if(DEFINED ENV{VCPKG_ROOT}) - set(VCPKG_ROOT "$ENV{VCPKG_ROOT}") - message( - STATUS "TOOLCHAIN set to ${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake") - set(CMAKE_TOOLCHAIN_FILE - "${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake" - CACHE STRING "Vcpkg toolchain file") -else() - message( - STATUS - "TOOLCHAIN set to ${CMAKE_CURRENT_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake" - ) +#when we build from cache(CI), we don't use vcpkg because it recompiles often everything +if (NOT BUILD_FROM_CACHE) + if(DEFINED ENV{VCPKG_ROOT}) + set(VCPKG_ROOT "$ENV{VCPKG_ROOT}") + message( + STATUS "TOOLCHAIN set to ${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake") + set(CMAKE_TOOLCHAIN_FILE + "${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake" + CACHE STRING "Vcpkg toolchain file") + else() + message( + STATUS + "TOOLCHAIN set to ${CMAKE_CURRENT_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake" + ) + set(CMAKE_TOOLCHAIN_FILE + "${CMAKE_CURRENT_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake" + CACHE STRING "Vcpkg toolchain file") + endif() + set(CMAKE_TOOLCHAIN_FILE "${CMAKE_CURRENT_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake" CACHE STRING "Vcpkg toolchain file") -endif() - -set(CMAKE_TOOLCHAIN_FILE - "${CMAKE_CURRENT_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake" - CACHE STRING "Vcpkg toolchain file") - -project("Centreon Collect" C CXX) - -option(WITH_ASAN - "Add the libasan to check memory leaks and other memory issues." OFF) -option(WITH_TSAN - "Add the libtsan to check threads and other multithreading issues." OFF) -if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_ID - STREQUAL "Clang") - message( - FATAL_ERROR "You can build broker with g++ or clang++. CMake will exit.") endif() -option(WITH_MALLOC_TRACE "compile centreon-malloc-trace library." OFF) +project("Centreon Collect" C CXX) # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -stdlib=libc++") # set(CMAKE_CXX_COMPILER "clang++") add_definitions("-D_GLIBCXX_USE_CXX11_ABI=1") -option(DEBUG_ROBOT OFF) +add_definitions("-DBOOST_PROCESS_USE_STD_FS=1") set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) -if(WITH_TSAN) - set(CMAKE_CXX_FLAGS_DEBUG - "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=thread") - set(CMAKE_LINKER_FLAGS_DEBUG - "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=thread") -endif() - -if(WITH_ASAN) - set(CMAKE_BUILD_TYPE Debug) - if(WITH_CLANG) - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=address") - set(CMAKE_LINKER_FLAGS_DEBUG - "${CMAKE_LINKER_FLAGS_DEBUG} -fsanitize=address") - else() - set(CMAKE_CXX_FLAGS_DEBUG - "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address") - set(CMAKE_LINKER_FLAGS_DEBUG - "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address" - ) - endif() -endif() - -set(ALLOW_DUPLICATE_EXECUTABLE TRUE) - -set(BUILD_ARGS "-w" "dupbuild=warn") - # # Get distributions name # if(CMAKE_SYSTEM_NAME STREQUAL "Linux") - file(STRINGS "/etc/os-release" release REGEX "^ID") + file(STRINGS "/etc/os-release" release REGEX "ID") foreach(line ${release}) if(${line} MATCHES "ID_LIKE=.*") @@ -115,6 +80,10 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux") if(${line} MATCHES "ID=.*") string(REGEX REPLACE "ID=\"(.*)\"" "\\1" id ${line}) endif() + + if(${line} MATCHES "VERSION_ID=.*") + string(REGEX REPLACE "VERSION_ID=\"([0-9]+)\..*" "\\1" os_version ${line}) + endif() endforeach() string(TOLOWER "${like}" like) @@ -135,6 +104,11 @@ else() set(OS_DISTRIBUTOR "${CMAKE_SYSTEM_NAME}") endif() +if(OS_DISTRIBUTOR STREQUAL "CentOS" AND os_version STREQUAL "8") + message(STATUS "Legacy gettimeofday") + add_definitions("-DLEGACY_GETTIMEOFDAY") +endif() + message(STATUS "${id} detected (compatible with ${OS_DISTRIBUTOR})") # set -latomic if OS is Raspbian. @@ -144,133 +118,13 @@ endif() # Version. set(COLLECT_MAJOR 24) -set(COLLECT_MINOR 07) +set(COLLECT_MINOR 09) set(COLLECT_PATCH 0) set(COLLECT_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}") -add_definitions(-DCENTREON_CONNECTOR_VERSION=\"${COLLECT_VERSION}\") - -if(DEBUG_ROBOT) - add_definitions(-DDEBUG_ROBOT) -endif() - -# ########### CONSTANTS ########### -set(USER_BROKER centreon-broker) -set(USER_ENGINE centreon-engine) - -find_package(fmt CONFIG REQUIRED) -find_package(spdlog CONFIG REQUIRED) -find_package(gRPC CONFIG REQUIRED) -find_package(Protobuf REQUIRED) -find_package(nlohmann_json CONFIG REQUIRED) -find_package(GTest CONFIG REQUIRED) -find_package(CURL REQUIRED) -find_package(Boost REQUIRED COMPONENTS url) -find_package(ryml CONFIG REQUIRED) -add_definitions("-DSPDLOG_FMT_EXTERNAL") - -include(FindPkgConfig) -pkg_check_modules(MARIADB REQUIRED libmariadb) -pkg_check_modules(LIBSSH2 REQUIRED libssh2) - -# There is a bug with grpc. It is not put in the triplet directory. So we have -# to search for its plugin. -file( - GLOB_RECURSE GRPC_CPP_PLUGIN_EXE - RELATIVE ${CMAKE_BINARY_DIR} - grpc_cpp_plugin) -find_program( - GRPC_CPP_PLUGIN - NAMES ${GRPC_CPP_PLUGIN_EXE} - PATHS ${CMAKE_BINARY_DIR} REQUIRED - NO_DEFAULT_PATH) - -set(PROTOBUF_LIB_DIR ${Protobuf_DIR}/../../lib) -set(OTLP_LIB_DIR ${opentelemetry-cpp_DIR}/../../lib) -set(VCPKG_INCLUDE_DIR ${Protobuf_INCLUDE_DIR}) -include(GNUInstallDirs) - -# import opentelemetry-proto -add_custom_command( - OUTPUT - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto - COMMENT "get opentelemetry proto files from git repository" - COMMAND /bin/rm -rf ${CMAKE_SOURCE_DIR}/opentelemetry-proto - COMMAND - git ARGS clone --depth=1 --single-branch - https://github.com/open-telemetry/opentelemetry-proto.git - ${CMAKE_SOURCE_DIR}/opentelemetry-proto - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}) - -add_custom_target( - opentelemetry-proto-files - DEPENDS - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto -) - -# var directories. -set(BROKER_VAR_LOG_DIR - "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-broker") -set(BROKER_VAR_LIB_DIR - "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/lib/centreon-broker") -set(ENGINE_VAR_LOG_DIR - "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-engine") -set(ENGINE_VAR_LOG_ARCHIVE_DIR - "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-engine/archives") -set(ENGINE_VAR_LIB_DIR - "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/lib/centreon-engine") -add_definitions(-DDEFAULT_COMMAND_FILE="${ENGINE_VAR_LIB_DIR}/rw/centengine.cmd" - -DDEFAULT_DEBUG_FILE="${ENGINE_VAR_LOG_DIR}/centengine.debug" - -DDEFAULT_LOG_FILE="${ENGINE_VAR_LOG_DIR}/centengine.log" - -DDEFAULT_RETENTION_FILE="${ENGINE_VAR_LOG_DIR}/retention.dat" - -DDEFAULT_STATUS_FILE="${ENGINE_VAR_LOG_DIR}/status.dat") -set(CMAKE_INSTALL_PREFIX "/usr") -option(WITH_TESTING "Build unit tests." OFF) -option(WITH_CONF "Install configuration files." ON) - -# Code coverage on unit tests -option(WITH_COVERAGE "Add code coverage on unit tests." OFF) - -if(WITH_TESTING AND WITH_COVERAGE) - set(CMAKE_BUILD_TYPE "Debug") - include(cmake/CodeCoverage.cmake) - append_coverage_compiler_flags() -endif() - -set(protobuf_MODULE_COMPATIBLE True) - -include_directories(${CMAKE_SOURCE_DIR} ${VCPKG_INCLUDE_DIR} fmt::fmt - spdlog::spdlog ${CMAKE_SOURCE_DIR}/clib/inc) - -add_subdirectory(clib) -add_subdirectory(common) -add_subdirectory(broker) -add_subdirectory(bbdo) -add_subdirectory(engine) -add_subdirectory(connectors) -add_subdirectory(ccc) -add_subdirectory(agent) - -if(WITH_MALLOC_TRACE) - add_subdirectory(malloc-trace) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + include(CMakeListsLinux.txt) +else() + include(CMakeListsWindows.txt) endif() - -add_custom_target(test-broker COMMAND tests/ut_broker) -add_custom_target(test-engine COMMAND tests/ut_engine) -add_custom_target(test-clib COMMAND tests/ut_clib) -add_custom_target(test-connector COMMAND tests/ut_connector) -add_custom_target(test-common COMMAND tests/ut_common) -add_custom_target(test-agent COMMAND tests/ut_agent) - -add_custom_target(test DEPENDS test-broker test-engine test-clib test-connector - test-common test-agent) - -add_custom_target(test-coverage DEPENDS broker-test-coverage - engine-test-coverage clib-test-coverage) diff --git a/CMakeListsLinux.txt b/CMakeListsLinux.txt new file mode 100644 index 00000000000..00360bf7827 --- /dev/null +++ b/CMakeListsLinux.txt @@ -0,0 +1,200 @@ +# +# Copyright 2009-2023 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +# +# Global settings. +# + +option(LEGACY_ENGINE + "Enable this option to compile the old Engine version." OFF) + +option(WITH_ASAN + "Add the libasan to check memory leaks and other memory issues." OFF) + +option(WITH_TSAN + "Add the libtsan to check threads and other multithreading issues." OFF) +if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_ID + STREQUAL "Clang") + message( + FATAL_ERROR "You can build broker with g++ or clang++. CMake will exit.") +endif() + +option(WITH_MALLOC_TRACE "compile centreon-malloc-trace library." OFF) + +option(DEBUG_ROBOT OFF) + +if(LEGACY_ENGINE) + add_definitions(-DLEGACY_CONF) +endif() + +if(WITH_TSAN) + set(CMAKE_CXX_FLAGS_DEBUG + "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=thread") + set(CMAKE_LINKER_FLAGS_DEBUG + "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=thread") +endif() + +if(WITH_ASAN) + set(CMAKE_BUILD_TYPE Debug) + if(WITH_CLANG) + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=address") + set(CMAKE_LINKER_FLAGS_DEBUG + "${CMAKE_LINKER_FLAGS_DEBUG} -fsanitize=address") + else() + set(CMAKE_CXX_FLAGS_DEBUG + "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address") + set(CMAKE_LINKER_FLAGS_DEBUG + "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address" + ) + endif() +endif() + +set(ALLOW_DUPLICATE_EXECUTABLE TRUE) + +set(BUILD_ARGS "-w" "dupbuild=warn") + + +# Version. +add_definitions(-DCENTREON_CONNECTOR_VERSION=\"${COLLECT_VERSION}\") + +if (DEBUG_ROBOT) + add_definitions(-DDEBUG_ROBOT) +endif() + +# ########### CONSTANTS ########### +set(USER_BROKER centreon-broker) +set(USER_ENGINE centreon-engine) + +find_package(fmt CONFIG REQUIRED) +find_package(spdlog CONFIG REQUIRED) +find_package(gRPC CONFIG REQUIRED) +find_package(Protobuf REQUIRED) +find_package(nlohmann_json CONFIG REQUIRED) +find_package(GTest CONFIG REQUIRED) +find_package(CURL REQUIRED) +find_package(Boost REQUIRED COMPONENTS url) +find_package(ryml CONFIG REQUIRED) +add_definitions("-DSPDLOG_FMT_EXTERNAL") + +add_definitions("-DCOLLECT_MAJOR=${COLLECT_MAJOR}") +add_definitions("-DCOLLECT_MINOR=${COLLECT_MINOR}") +add_definitions("-DCOLLECT_PATCH=${COLLECT_PATCH}") + +include(FindPkgConfig) +pkg_check_modules(MARIADB REQUIRED libmariadb) +pkg_check_modules(LIBSSH2 REQUIRED libssh2) + +# There is a bug with grpc. It is not put in the triplet directory. So we have +# to search for its plugin. +file(GLOB_RECURSE GRPC_CPP_PLUGIN_EXE + RELATIVE ${CMAKE_BINARY_DIR} grpc_cpp_plugin) +find_program(GRPC_CPP_PLUGIN + NAMES ${GRPC_CPP_PLUGIN_EXE} + PATHS ${CMAKE_BINARY_DIR} + REQUIRED + NO_DEFAULT_PATH) + +set(PROTOBUF_LIB_DIR ${Protobuf_DIR}/../../lib) +set(OTLP_LIB_DIR ${opentelemetry-cpp_DIR}/../../lib) +set(VCPKG_INCLUDE_DIR ${Protobuf_INCLUDE_DIR}) +include(GNUInstallDirs) + +#import opentelemetry-proto +add_custom_command( + OUTPUT ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto + COMMENT "get opentelemetry proto files from git repository" + COMMAND /bin/rm -rf ${CMAKE_SOURCE_DIR}/opentelemetry-proto + COMMAND git ARGS clone --depth=1 --single-branch https://github.com/open-telemetry/opentelemetry-proto.git ${CMAKE_SOURCE_DIR}/opentelemetry-proto + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) + +add_custom_target(opentelemetry-proto-files DEPENDS ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto +) + +# var directories. +set(BROKER_VAR_LOG_DIR + "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-broker") +set(BROKER_VAR_LIB_DIR + "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/lib/centreon-broker") +set(ENGINE_VAR_LOG_DIR + "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-engine") +set(ENGINE_VAR_LOG_ARCHIVE_DIR + "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-engine/archives") +set(ENGINE_VAR_LIB_DIR + "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/lib/centreon-engine") +add_definitions(-DDEFAULT_COMMAND_FILE="${ENGINE_VAR_LIB_DIR}/rw/centengine.cmd" + -DDEFAULT_DEBUG_FILE="${ENGINE_VAR_LOG_DIR}/centengine.debug" + -DDEFAULT_LOG_FILE="${ENGINE_VAR_LOG_DIR}/centengine.log" + -DDEFAULT_RETENTION_FILE="${ENGINE_VAR_LOG_DIR}/retention.dat" + -DDEFAULT_STATUS_FILE="${ENGINE_VAR_LOG_DIR}/status.dat") + +set(CMAKE_INSTALL_PREFIX "/usr") +option(WITH_TESTING "Build unit tests." OFF) + +option(WITH_CONF "Install configuration files." ON) + +# Code coverage on unit tests +option(WITH_COVERAGE "Add code coverage on unit tests." OFF) + +if(WITH_TESTING AND WITH_COVERAGE) + set(CMAKE_BUILD_TYPE "Debug") + include(cmake/CodeCoverage.cmake) + append_coverage_compiler_flags() +endif() + +set(protobuf_MODULE_COMPATIBLE True) + +include_directories(${CMAKE_SOURCE_DIR} + ${VCPKG_INCLUDE_DIR} + fmt::fmt + spdlog::spdlog + ${CMAKE_SOURCE_DIR}/clib/inc + ${CMAKE_CURRENT_BINARY_DIR}) + +add_subdirectory(clib) +add_subdirectory(common) +add_subdirectory(broker) +add_subdirectory(bbdo) +add_subdirectory(engine) +add_subdirectory(connectors) +add_subdirectory(ccc) +add_subdirectory(agent) + +if (WITH_MALLOC_TRACE) + add_subdirectory(malloc-trace) +endif() + + +add_custom_target(test-broker COMMAND tests/ut_broker) +add_custom_target(test-engine COMMAND tests/ut_engine) +add_custom_target(test-clib COMMAND tests/ut_clib) +add_custom_target(test-connector COMMAND tests/ut_connector) +add_custom_target(test-common COMMAND tests/ut_common) +add_custom_target(test-agent COMMAND tests/ut_agent) + +add_custom_target(test DEPENDS test-broker test-engine test-clib test-connector + test-common test-agent) + +add_custom_target(test-coverage DEPENDS broker-test-coverage + engine-test-coverage clib-test-coverage) diff --git a/CMakeListsWindows.txt b/CMakeListsWindows.txt new file mode 100644 index 00000000000..f3d9d8de57a --- /dev/null +++ b/CMakeListsWindows.txt @@ -0,0 +1,95 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +#in order to make fmt compile +add_definitions("/utf-8") + +# When we build from cache (CI), we don't use vcpkg cmaketool, so we tell to cmake where to find packages info +if (BUILD_FROM_CACHE) + LIST(APPEND CMAKE_PREFIX_PATH "build_windows/vcpkg_installed/x64-windows") +endif() + +find_package(fmt CONFIG REQUIRED) +find_package(spdlog CONFIG REQUIRED) +find_package(gRPC CONFIG REQUIRED) +find_package(Protobuf REQUIRED) +find_package(GTest CONFIG REQUIRED) +find_package(boost_asio CONFIG REQUIRED) +find_package(boost_process CONFIG REQUIRED) +find_package(boost_multi_index CONFIG REQUIRED) +find_package(boost_program_options CONFIG REQUIRED) +find_package(boost_multi_index CONFIG REQUIRED) +add_definitions("-DSPDLOG_FMT_EXTERNAL") + +add_definitions("-DCOLLECT_MAJOR=${COLLECT_MAJOR}") +add_definitions("-DCOLLECT_MINOR=${COLLECT_MINOR}") +add_definitions("-DCOLLECT_PATCH=${COLLECT_PATCH}") + +if (NOT CMAKE_BUILD_TYPE STREQUAL "Debug") + set (CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded") +endif() + +# There is a bug with grpc. It is not put in the triplet directory. So we have +# to search for its plugin. +file(GLOB_RECURSE GRPC_CPP_PLUGIN_EXE + RELATIVE ${CMAKE_BINARY_DIR} grpc_cpp_plugin.exe) +find_program(GRPC_CPP_PLUGIN + NAMES ${GRPC_CPP_PLUGIN_EXE} + PATHS ${CMAKE_BINARY_DIR} + REQUIRED + NO_DEFAULT_PATH) + +set(PROTOBUF_LIB_DIR ${Protobuf_DIR}/../../lib) +set(VCPKG_INCLUDE_DIR ${OPENSSL_INCLUDE_DIR}) +include(GNUInstallDirs) + +option(WITH_TESTING "Build unit tests." OFF) + +set(protobuf_MODULE_COMPATIBLE True) + +include_directories(${CMAKE_SOURCE_DIR} + ${VCPKG_INCLUDE_DIR} + ${CMAKE_SOURCE_DIR}/clib/inc) + +#import opentelemetry-proto +add_custom_command( + OUTPUT ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto + COMMENT "get opentelemetry proto files from git repository" + COMMAND RMDIR /S /Q \"${CMAKE_SOURCE_DIR}/opentelemetry-proto\" + COMMAND git ARGS clone --depth=1 --single-branch https://github.com/open-telemetry/opentelemetry-proto.git ${CMAKE_SOURCE_DIR}/opentelemetry-proto + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) + +add_custom_target(opentelemetry-proto-files DEPENDS ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto +) + +add_subdirectory(common) +add_subdirectory(agent) + + +add_custom_target(test-common COMMAND tests/ut_common) +add_custom_target(test-agent COMMAND tests/ut_agent) + +add_custom_target(test DEPENDS test-common test-agent) + diff --git a/CMakePresets.json b/CMakePresets.json new file mode 100644 index 00000000000..e94d5ad68b9 --- /dev/null +++ b/CMakePresets.json @@ -0,0 +1,27 @@ +{ + "version": 2, + "configurePresets": [ + { + "name": "debug", + "generator": "Ninja", + "binaryDir": "${sourceDir}/build_windows", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Debug", + "WITH_TESTING": "On", + "CMAKE_TOOLCHAIN_FILE": "$env{VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake" + } + }, + { + "name": "release", + "generator": "Ninja", + "binaryDir": "${sourceDir}/build_windows", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Release", + "WITH_TESTING": "On", + "CMAKE_TOOLCHAIN_FILE": "$env{VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake", + "VCPKG_OVERLAY_TRIPLETS": "custom-triplets", + "VCPKG_TARGET_TRIPLET": "x64-windows" + } + } + ] +} \ No newline at end of file diff --git a/README.md b/README.md index b22f7400d3a..83970d026d9 100644 --- a/README.md +++ b/README.md @@ -157,6 +157,15 @@ make -Cbuild install These two variables are very important if you want to recompile the project later. +#### Windows compilation +A small part of the project (centreon-monitoring-agent in agent folder) is Windows compatible. +In order to compile it, you need at least msbuild tools and git. +Then you have to: +* Start a x64 command tool console +* Execute centreon_cmake.bat. It first installs vcpkg in your home directory and then tells you to set two environment variables VCPKG_ROOT and PATH. Be careful, the next time you will start x64 command tool console, it will set VCPKG_ROOT to wrong value and you will need to set it again. +* Then install agent\conf\agent.reg in the registry and modify parameters such as server, certificates or logging. + + ### Other distributions If you are on another distribution, then follow the steps below. diff --git a/agent/CMakeLists.txt b/agent/CMakeLists.txt index 7a8ec1a1036..510a237da98 100644 --- a/agent/CMakeLists.txt +++ b/agent/CMakeLists.txt @@ -98,18 +98,43 @@ add_custom_command( WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set( SRC_COMMON + ${SRC_DIR}/agent.grpc.pb.cc + ${SRC_DIR}/agent.pb.cc + ${SRC_DIR}/bireactor.cc + ${SRC_DIR}/check.cc + ${SRC_DIR}/check_exec.cc + ${SRC_DIR}/opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.cc + ${SRC_DIR}/opentelemetry/proto/collector/metrics/v1/metrics_service.pb.cc + ${SRC_DIR}/opentelemetry/proto/metrics/v1/metrics.pb.cc + ${SRC_DIR}/opentelemetry/proto/common/v1/common.pb.cc + ${SRC_DIR}/opentelemetry/proto/resource/v1/resource.pb.cc + ${SRC_DIR}/scheduler.cc + ${SRC_DIR}/streaming_client.cc + ${SRC_DIR}/streaming_server.cc +) + +set( SRC_WINDOWS + ${SRC_DIR}/config_win.cc +) + +set( SRC_LINUX + ${SRC_DIR}/config.cc +) + +configure_file("${INCLUDE_DIR}/version.hh.in" + "${INCLUDE_DIR}/version.hh") + + +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + set(SRC ${SRC_COMMON} ${SRC_LINUX}) +else() + set(SRC ${SRC_COMMON} ${SRC_WINDOWS}) +endif() + + add_library(centagent_lib STATIC - ${SRC_DIR}/agent.grpc.pb.cc - ${SRC_DIR}/agent.pb.cc - ${SRC_DIR}/check.cc - ${SRC_DIR}/check_exec.cc - ${SRC_DIR}/opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.cc - ${SRC_DIR}/opentelemetry/proto/collector/metrics/v1/metrics_service.pb.cc - ${SRC_DIR}/opentelemetry/proto/metrics/v1/metrics.pb.cc - ${SRC_DIR}/opentelemetry/proto/common/v1/common.pb.cc - ${SRC_DIR}/opentelemetry/proto/resource/v1/resource.pb.cc - ${SRC_DIR}/config.cc - ${SRC_DIR}/scheduler.cc + ${SRC} ) include_directories( @@ -117,34 +142,46 @@ include_directories( ${SRC_DIR} ${CMAKE_SOURCE_DIR}/common/inc ${CMAKE_SOURCE_DIR}/common/grpc/inc + ${CMAKE_SOURCE_DIR}/common/process/inc ) target_precompile_headers(centagent_lib PRIVATE precomp_inc/precomp.hh) SET(CENTREON_AGENT centagent) -add_executable(${CENTREON_AGENT} ${SRC_DIR}/main.cc) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + add_executable(${CENTREON_AGENT} ${SRC_DIR}/main.cc) + + target_link_libraries( + ${CENTREON_AGENT} PRIVATE + -L${PROTOBUF_LIB_DIR} + gRPC::gpr gRPC::grpc gRPC::grpc++ gRPC::grpc++_alts + centagent_lib + centreon_common + centreon_grpc + centreon_process + -L${Boost_LIBRARY_DIR_RELEASE} + boost_program_options + fmt::fmt + stdc++fs) +else() + add_executable(${CENTREON_AGENT} ${SRC_DIR}/main_win.cc) + + target_link_libraries( + ${CENTREON_AGENT} PRIVATE + centagent_lib + centreon_common + centreon_grpc + centreon_process + gRPC::gpr gRPC::grpc gRPC::grpc++ gRPC::grpc++_alts + absl::any absl::log absl::base absl::bits + Boost::program_options + fmt::fmt) +endif() -target_link_libraries( - ${CENTREON_AGENT} PRIVATE - -L${PROTOBUF_LIB_DIR} - gRPC::gpr gRPC::grpc gRPC::grpc++ gRPC::grpc++_alts - centagent_lib - centreon_common - centreon_grpc - centreon_process - -L${Boost_LIBRARY_DIR_RELEASE} - boost_program_options - fmt::fmt) target_precompile_headers(${CENTREON_AGENT} REUSE_FROM centagent_lib) -target_include_directories(${CENTREON_AGENT} PRIVATE - ${INCLUDE_DIR} - ${SRC_DIR} - ${CMAKE_SOURCE_DIR}/common/inc -) - set(AGENT_VAR_LOG_DIR "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-monitoring-agent") diff --git a/agent/conf/centagent.reg b/agent/conf/centagent.reg new file mode 100644 index 00000000000..ba43c5406a9 Binary files /dev/null and b/agent/conf/centagent.reg differ diff --git a/agent/inc/com/centreon/agent/bireactor.hh b/agent/inc/com/centreon/agent/bireactor.hh new file mode 100644 index 00000000000..16af5594c81 --- /dev/null +++ b/agent/inc/com/centreon/agent/bireactor.hh @@ -0,0 +1,88 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_BIREACTOR_HH +#define CENTREON_AGENT_BIREACTOR_HH + +#include "agent.grpc.pb.h" + +namespace com::centreon::agent { + +template +class bireactor + : public bireactor_class, + public std::enable_shared_from_this> { + private: + static std::set> _instances; + static std::mutex _instances_m; + + bool _write_pending; + std::deque> _write_queue; + std::shared_ptr _read_current; + + const std::string_view _class_name; + + const std::string _peer; + + protected: + std::shared_ptr _io_context; + std::shared_ptr _logger; + + bool _alive; + /** + * @brief All attributes of this object are protected by this mutex + * + */ + mutable std::mutex _protect; + + public: + bireactor(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& class_name, + const std::string& peer); + + virtual ~bireactor(); + + static void register_stream(const std::shared_ptr& strm); + + void start_read(); + + void start_write(); + void write(const std::shared_ptr& request); + + // bireactor part + void OnReadDone(bool ok) override; + + virtual void on_incomming_request( + const std::shared_ptr& request) = 0; + + virtual void on_error() = 0; + + void OnWriteDone(bool ok) override; + + // server version + void OnDone(); + // client version + void OnDone(const ::grpc::Status& /*s*/); + + virtual void shutdown(); +}; + +} // namespace com::centreon::agent + +#endif diff --git a/agent/inc/com/centreon/agent/check_exec.hh b/agent/inc/com/centreon/agent/check_exec.hh index 42107040c4a..c458194bb18 100644 --- a/agent/inc/com/centreon/agent/check_exec.hh +++ b/agent/inc/com/centreon/agent/check_exec.hh @@ -20,7 +20,7 @@ #define CENTREON_AGENT_CHECK_EXEC_HH #include "check.hh" -#include "com/centreon/common/process.hh" +#include "com/centreon/common/process/process.hh" namespace com::centreon::agent { @@ -37,7 +37,7 @@ namespace detail { * ensure that completion is called for the right process and not for the * previous one */ -class process : public common::process { +class process : public common::process { bool _process_ended; bool _stdout_eof; std::string _stdout; @@ -54,9 +54,11 @@ class process : public common::process { void start(unsigned running_index); - void kill() { common::process::kill(); } + void kill() { common::process::kill(); } - int get_exit_status() const { return common::process::get_exit_status(); } + int get_exit_status() const { + return common::process::get_exit_status(); + } const std::string& get_stdout() const { return _stdout; } diff --git a/agent/inc/com/centreon/agent/config.hh b/agent/inc/com/centreon/agent/config.hh index d0bd774f97a..0cd7b9d4821 100644 --- a/agent/inc/com/centreon/agent/config.hh +++ b/agent/inc/com/centreon/agent/config.hh @@ -1,4 +1,3 @@ - /** * Copyright 2024 Centreon * Licensed under the Apache License, Version 2.0(the "License"); @@ -15,6 +14,7 @@ * * For more information : contact@centreon.com */ + #ifndef CENTREON_AGENT_CONFIG_HH #define CENTREON_AGENT_CONFIG_HH @@ -24,7 +24,7 @@ namespace com::centreon::agent { class config { public: - enum log_type { to_stdout, to_file }; + enum log_type { to_stdout, to_file, to_event_log }; static const std::string_view config_schema; diff --git a/agent/inc/com/centreon/agent/scheduler.hh b/agent/inc/com/centreon/agent/scheduler.hh index fcd3d71a6fa..b1ed36edfbc 100644 --- a/agent/inc/com/centreon/agent/scheduler.hh +++ b/agent/inc/com/centreon/agent/scheduler.hh @@ -57,13 +57,13 @@ class scheduler : public std::enable_shared_from_this { // pointers in this struct point to _current_request struct scope_metric_request { ::opentelemetry::proto::metrics::v1::ScopeMetrics* scope_metric; - absl::flat_hash_map + std::unordered_map metrics; }; // one serv => one scope_metric => several metrics - absl::flat_hash_map _serv_to_scope_metrics; + std::unordered_map _serv_to_scope_metrics; std::shared_ptr _io_context; std::shared_ptr _logger; diff --git a/agent/inc/com/centreon/agent/streaming_client.hh b/agent/inc/com/centreon/agent/streaming_client.hh new file mode 100644 index 00000000000..17fe24ef07b --- /dev/null +++ b/agent/inc/com/centreon/agent/streaming_client.hh @@ -0,0 +1,113 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_STREAMING_CLIENT_HH +#define CENTREON_AGENT_STREAMING_CLIENT_HH + +#include "com/centreon/common/grpc/grpc_client.hh" + +#include "bireactor.hh" +#include "scheduler.hh" + +namespace com::centreon::agent { + +class streaming_client; + +class client_reactor + : public bireactor< + ::grpc::ClientBidiReactor> { + std::weak_ptr _parent; + ::grpc::ClientContext _context; + + public: + client_reactor(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& parent, + const std::string& peer); + + std::shared_ptr shared_from_this() { + return std::static_pointer_cast( + bireactor<::grpc::ClientBidiReactor>:: + shared_from_this()); + } + + ::grpc::ClientContext& get_context() { return _context; } + + void on_incomming_request( + const std::shared_ptr& request) override; + + void on_error() override; + + void shutdown() override; +}; + +/** + * @brief this object not only manages connection to engine, but also embed + * check scheduler + * + */ +class streaming_client : public common::grpc::grpc_client_base, + public std::enable_shared_from_this { + std::shared_ptr _io_context; + std::shared_ptr _logger; + std::string _supervised_host; + + std::unique_ptr _stub; + + std::shared_ptr _reactor; + std::shared_ptr _sched; + + /** + * @brief All attributes of this object are protected by this mutex + * + */ + std::mutex _protect; + + void _create_reactor(); + + void _start(); + + void _send(const std::shared_ptr& request); + + public: + streaming_client(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host); + + static std::shared_ptr load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host); + + void on_incomming_request(const std::shared_ptr& caller, + const std::shared_ptr& request); + void on_error(const std::shared_ptr& caller); + + void shutdown(); + + // use only for tests + engine_to_agent_request_ptr get_last_message_to_agent() const { + return _sched->get_last_message_to_agent(); + } +}; + +} // namespace com::centreon::agent + +#endif \ No newline at end of file diff --git a/agent/inc/com/centreon/agent/streaming_server.hh b/agent/inc/com/centreon/agent/streaming_server.hh new file mode 100644 index 00000000000..b88a1cb0c3f --- /dev/null +++ b/agent/inc/com/centreon/agent/streaming_server.hh @@ -0,0 +1,77 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_STREAMING_SERVER_HH +#define CENTREON_AGENT_STREAMING_SERVER_HH + +#include "com/centreon/common/grpc/grpc_server.hh" + +#include "bireactor.hh" +#include "scheduler.hh" + +namespace com::centreon::agent { + +class server_reactor; + +/** + * @brief grpc engine to agent server (reverse connection) + * It accept only one connection at a time + * If another connection occurs, previous connection is shutdown + * This object is both grpc server and grpc service + */ +class streaming_server : public common::grpc::grpc_server_base, + public std::enable_shared_from_this, + public ReversedAgentService::Service { + std::shared_ptr _io_context; + std::shared_ptr _logger; + const std::string _supervised_host; + + /** active engine to agent connection*/ + std::shared_ptr _incoming; + + /** + * @brief All attributes of this object are protected by this mutex + * + */ + mutable std::mutex _protect; + + void _start(); + + public: + streaming_server(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host); + + ~streaming_server(); + + static std::shared_ptr load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host); + + ::grpc::ServerBidiReactor* Import( + ::grpc::CallbackServerContext* context); + + void shutdown(); +}; + +} // namespace com::centreon::agent + +#endif diff --git a/agent/inc/com/centreon/agent/version.hh.in b/agent/inc/com/centreon/agent/version.hh.in new file mode 100644 index 00000000000..f4c2d2e0136 --- /dev/null +++ b/agent/inc/com/centreon/agent/version.hh.in @@ -0,0 +1,28 @@ +/* + * Copyright 2012-2013,2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#ifndef CCE_VERSION_HH +#define CCE_VERSION_HH + +// Compile-time values. +constexpr unsigned CENTREON_AGENT_VERSION_MAJOR = @COLLECT_MAJOR@; +constexpr unsigned CENTREON_AGENT_VERSION_MINOR = @COLLECT_MINOR@.0; +constexpr unsigned CENTREON_AGENT_VERSION_PATCH = @COLLECT_PATCH@.0; + +#endif // !CCE_VERSION_HH diff --git a/agent/precomp_inc/precomp.hh b/agent/precomp_inc/precomp.hh index 1cc4bcd1c5f..8c9b04fb62a 100644 --- a/agent/precomp_inc/precomp.hh +++ b/agent/precomp_inc/precomp.hh @@ -30,8 +30,9 @@ #include #include -#include +#include #include +#include #include diff --git a/agent/src/bireactor.cc b/agent/src/bireactor.cc new file mode 100644 index 00000000000..e26346be55c --- /dev/null +++ b/agent/src/bireactor.cc @@ -0,0 +1,207 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "bireactor.hh" + +using namespace com::centreon::agent; + +/** + * @brief when BiReactor::OnDone is called by grpc layers, we should delete + * this. But this object is even used by others. + * So it's stored in this container and just removed from this container when + * OnDone is called + * + * @tparam bireactor_class + */ +template +std::set>> + bireactor::_instances; + +template +std::mutex bireactor::_instances_m; + +template +bireactor::bireactor( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& class_name, + const std::string& peer) + : _write_pending(false), + _alive(true), + _class_name(class_name), + _peer(peer), + _io_context(io_context), + _logger(logger) { + SPDLOG_LOGGER_DEBUG(_logger, "create {} this={:p} peer:{}", _class_name, + static_cast(this), _peer); +} + +template +bireactor::~bireactor() { + SPDLOG_LOGGER_DEBUG(_logger, "delete {} this={:p} peer:{}", _class_name, + static_cast(this), _peer); +} + +template +void bireactor::register_stream( + const std::shared_ptr& strm) { + std::lock_guard l(_instances_m); + _instances.insert(strm); +} + +template +void bireactor::start_read() { + std::lock_guard l(_protect); + if (!_alive) { + return; + } + std::shared_ptr to_read; + if (_read_current) { + return; + } + to_read = _read_current = std::make_shared(); + bireactor_class::StartRead(to_read.get()); +} + +template +void bireactor::OnReadDone(bool ok) { + if (ok) { + std::shared_ptr read; + { + std::lock_guard l(_protect); + SPDLOG_LOGGER_TRACE(_logger, "{:p} {} peer {} receive: {}", + static_cast(this), _class_name, _peer, + _read_current->ShortDebugString()); + read = _read_current; + _read_current.reset(); + } + start_read(); + if (read->has_config()) { + on_incomming_request(read); + } + } else { + SPDLOG_LOGGER_ERROR(_logger, "{:p} {} peer:{} fail read from stream", + static_cast(this), _class_name, _peer); + on_error(); + shutdown(); + } +} + +template +void bireactor::write( + const std::shared_ptr& request) { + { + std::lock_guard l(_protect); + if (!_alive) { + return; + } + _write_queue.push_back(request); + } + start_write(); +} + +template +void bireactor::start_write() { + std::shared_ptr to_send; + { + std::lock_guard l(_protect); + if (!_alive || _write_pending || _write_queue.empty()) { + return; + } + to_send = _write_queue.front(); + _write_pending = true; + } + bireactor_class::StartWrite(to_send.get()); +} + +template +void bireactor::OnWriteDone(bool ok) { + if (ok) { + { + std::lock_guard l(_protect); + _write_pending = false; + SPDLOG_LOGGER_TRACE(_logger, "{:p} {} {} sent", + static_cast(this), _class_name, + (*_write_queue.begin())->ShortDebugString()); + _write_queue.pop_front(); + } + start_write(); + } else { + SPDLOG_LOGGER_ERROR(_logger, "{:p} {} peer {} fail write to stream", + static_cast(this), _class_name, _peer); + on_error(); + shutdown(); + } +} + +template +void bireactor::OnDone() { + /**grpc has a bug, sometimes if we delete this class in this handler as it is + * described in examples, it also deletes used channel and does a pthread_join + * of the current thread witch go to a EDEADLOCK error and call grpc::Crash. + * So we uses asio thread to do the job + */ + _io_context->post([me = std::enable_shared_from_this< + bireactor>::shared_from_this(), + &peer = _peer, logger = _logger]() { + std::lock_guard l(_instances_m); + SPDLOG_LOGGER_DEBUG(logger, "{:p} server::OnDone() to {}", + static_cast(me.get()), peer); + _instances.erase(std::static_pointer_cast>(me)); + }); +} + +template +void bireactor::OnDone(const ::grpc::Status& status) { + /**grpc has a bug, sometimes if we delete this class in this handler as it is + * described in examples, it also deletes used channel and does a + * pthread_join of the current thread witch go to a EDEADLOCK error and call + * grpc::Crash. So we uses asio thread to do the job + */ + _io_context->post([me = std::enable_shared_from_this< + bireactor>::shared_from_this(), + status, &peer = _peer, logger = _logger]() { + std::lock_guard l(_instances_m); + if (status.ok()) { + SPDLOG_LOGGER_DEBUG(logger, "{:p} peer: {} client::OnDone({}) {}", + static_cast(me.get()), peer, + status.error_message(), status.error_details()); + } else { + SPDLOG_LOGGER_ERROR(logger, "{:p} peer:{} client::OnDone({}) {}", + static_cast(me.get()), peer, + status.error_message(), status.error_details()); + } + _instances.erase(std::static_pointer_cast>(me)); + }); +} + +template +void bireactor::shutdown() { + SPDLOG_LOGGER_DEBUG(_logger, "{:p} {}::shutdown", static_cast(this), + _class_name); +} + +namespace com::centreon::agent { + +template class bireactor< + ::grpc::ClientBidiReactor>; + +template class bireactor< + ::grpc::ServerBidiReactor>; + +} // namespace com::centreon::agent \ No newline at end of file diff --git a/agent/src/check.cc b/agent/src/check.cc index 562fd0329b2..27c29701f16 100644 --- a/agent/src/check.cc +++ b/agent/src/check.cc @@ -109,7 +109,8 @@ void check::_timeout_timer_handler(const boost::system::error_code& err, return; } if (start_check_index == _running_check_index) { - SPDLOG_LOGGER_ERROR(_logger, "check timeout for service {}", _service); + SPDLOG_LOGGER_ERROR(_logger, "check timeout for service {} cmd: {}", + _service, _command_name); on_completion(start_check_index, 3 /*unknown*/, std::list(), {"Timeout at execution of " + _command_line}); @@ -132,7 +133,8 @@ void check::on_completion( const std::list& perfdata, const std::list& outputs) { if (start_check_index == _running_check_index) { - SPDLOG_LOGGER_TRACE(_logger, "end check for service {}", _service); + SPDLOG_LOGGER_TRACE(_logger, "end check for service {} cmd: {}", _service, + _command_name); _time_out_timer.cancel(); _running_check = false; ++_running_check_index; diff --git a/agent/src/check_exec.cc b/agent/src/check_exec.cc index d38d0deeac9..bd475ef5d08 100644 --- a/agent/src/check_exec.cc +++ b/agent/src/check_exec.cc @@ -32,7 +32,7 @@ detail::process::process(const std::shared_ptr& io_context, const std::shared_ptr& logger, const std::string& cmd_line, const std::shared_ptr& parent) - : common::process(io_context, logger, cmd_line), _parent(parent) {} + : common::process(io_context, logger, cmd_line), _parent(parent) {} /** * @brief start a new process, if a previous one is already running, it's killed @@ -44,7 +44,7 @@ void detail::process::start(unsigned running_index) { _stdout_eof = false; _running_index = running_index; _stdout.clear(); - common::process::start_process(); + common::process::start_process(false); } /** @@ -57,11 +57,11 @@ void detail::process::on_stdout_read(const boost::system::error_code& err, size_t nb_read) { if (!err && nb_read > 0) { _stdout.append(_stdout_read_buffer, nb_read); - } else if (err == asio::error::eof) { + } else if (err) { _stdout_eof = true; _on_completion(); } - common::process::on_stdout_read(err, nb_read); + common::process::on_stdout_read(err, nb_read); } /** @@ -76,7 +76,7 @@ void detail::process::on_stderr_read(const boost::system::error_code& err, SPDLOG_LOGGER_ERROR(_logger, "process error: {}", std::string_view(_stderr_read_buffer, nb_read)); } - common::process::on_stderr_read(err, nb_read); + common::process::on_stderr_read(err, nb_read); } /** @@ -91,7 +91,7 @@ void detail::process::on_process_end(const boost::system::error_code& err, _stdout += fmt::format("fail to execute process {} : {}", get_exe_path(), err.message()); } - common::process::on_process_end(err, raw_exit_status); + common::process::on_process_end(err, raw_exit_status); _process_ended = true; _on_completion(); } @@ -174,6 +174,7 @@ void check_exec::_init() { } catch (const std::exception& e) { SPDLOG_LOGGER_ERROR(_logger, "fail to create process of cmd_line '{}' : {}", get_command_line(), e.what()); + throw; } } @@ -231,8 +232,11 @@ void check_exec::_timeout_timer_handler(const boost::system::error_code& err, return; } if (start_check_index == _get_running_check_index()) { - check::_timeout_timer_handler(err, start_check_index); _process->kill(); + check::_timeout_timer_handler(err, start_check_index); + } else { + SPDLOG_LOGGER_ERROR(_logger, "start_check_index={}, running_index={}", + start_check_index, _get_running_check_index()); } } @@ -243,6 +247,8 @@ void check_exec::_timeout_timer_handler(const boost::system::error_code& err, */ void check_exec::on_completion(unsigned running_index) { if (running_index != _get_running_check_index()) { + SPDLOG_LOGGER_ERROR(_logger, "running_index={}, running_index={}", + running_index, _get_running_check_index()); return; } diff --git a/agent/src/config.cc b/agent/src/config.cc index cd46ce23742..d15de69aead 100644 --- a/agent/src/config.cc +++ b/agent/src/config.cc @@ -121,10 +121,10 @@ config::config(const std::string& path) { _endpoint = json_config.get_string("endpoint"); // pattern schema doesn't work so we do it ourselves - if (!RE2::FullMatch(_endpoint, "[\\w\\.:]+:\\w+")) { + if (!RE2::FullMatch(_endpoint, "[\\w\\.\\-:]+:\\w+")) { throw exceptions::msg_fmt( - "bad format for endpoint {}, it must match to the regex: " - "[\\w\\.:]+:\\w+", + "bad format for endpoint {}, it must match the regex: " + "[\\w\\.\\-:]+:\\w+", _endpoint); } _log_level = diff --git a/agent/src/config_win.cc b/agent/src/config_win.cc new file mode 100644 index 00000000000..9fe35068904 --- /dev/null +++ b/agent/src/config_win.cc @@ -0,0 +1,109 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include + +#include "com/centreon/exceptions/msg_fmt.hh" +#include "config.hh" + +using namespace com::centreon::agent; + +/** + * @brief Construct a new config::config object + * + * @param registry_key registry path as + * HKEY_LOCAL_MACHINE\SOFTWARE\Centreon\CentreonMonitoringAgent + */ +config::config(const std::string& registry_key) { + HKEY h_key; + LSTATUS res = RegOpenKeyExA(HKEY_LOCAL_MACHINE, registry_key.c_str(), 0, + KEY_READ, &h_key); + if (res != ERROR_SUCCESS) { + if (res == ERROR_FILE_NOT_FOUND) { + throw exceptions::msg_fmt("{} not found", registry_key); + } else { + throw exceptions::msg_fmt("unable to read {}", registry_key); + } + } + + char str_buffer[4096]; + + auto get_sz_reg_or_default = [&](const char* value_name, + const char* default_value) { + DWORD size = sizeof(str_buffer); + LSTATUS result = RegGetValueA(h_key, nullptr, value_name, RRF_RT_REG_SZ, + nullptr, str_buffer, &size); + return (result == ERROR_SUCCESS) ? str_buffer : default_value; + }; + + auto get_bool = [&](const char* value_name) -> bool { + int32_t value; + DWORD size = sizeof(value); + LSTATUS result = RegGetValueA(h_key, nullptr, value_name, RRF_RT_DWORD, + nullptr, &value, &size); + return result == ERROR_SUCCESS && value; + }; + + auto get_unsigned = [&](const char* value_name) -> uint32_t { + uint32_t value; + DWORD size = sizeof(value); + LSTATUS result = RegGetValueA(h_key, nullptr, value_name, RRF_RT_DWORD, + nullptr, &value, &size); + return result == ERROR_SUCCESS ? value : 0; + }; + + _endpoint = get_sz_reg_or_default("endpoint", ""); + + // pattern schema doesn't work so we do it ourselves + if (!RE2::FullMatch(_endpoint, "[\\w\\.\\-:]+:\\w+")) { + RegCloseKey(h_key); + throw exceptions::msg_fmt( + "bad format for endpoint {}, it must match the regex: " + "[\\w\\.\\-:]+:\\w+", + _endpoint); + } + _log_level = + spdlog::level::from_str(get_sz_reg_or_default("log_level", "info")); + + const char* log_type = get_sz_reg_or_default("log_type", "event-log"); + if (!strcmp(log_type, "file")) { + _log_type = to_file; + } else if (!strcmp(log_type, "stdout")) { + _log_type = to_stdout; + } else { + _log_type = to_event_log; + } + + _log_file = get_sz_reg_or_default("log_file", ""); + _log_files_max_size = get_unsigned("log_files_max_size"); + _log_files_max_number = get_unsigned("log_files_max_number"); + _encryption = get_bool("encryption"); + _public_cert_file = get_sz_reg_or_default("public_cert", ""); + _private_key_file = get_sz_reg_or_default("private_key", ""); + _ca_certificate_file = get_sz_reg_or_default("ca_certificate", ""); + _ca_name = get_sz_reg_or_default("ca_name", ""); + _host = get_sz_reg_or_default("host", ""); + if (_host.empty()) { + _host = boost::asio::ip::host_name(); + } + _reverse_connection = get_bool("reverse_connection"); + + RegCloseKey(h_key); +} diff --git a/agent/src/main.cc b/agent/src/main.cc index 562a1f05e46..34d11ab1874 100644 --- a/agent/src/main.cc +++ b/agent/src/main.cc @@ -21,6 +21,8 @@ #include #include "config.hh" +#include "streaming_client.hh" +#include "streaming_server.hh" using namespace com::centreon::agent; @@ -28,6 +30,9 @@ std::shared_ptr g_io_context = std::make_shared(); std::shared_ptr g_logger; +static std::shared_ptr _streaming_client; + +static std::shared_ptr _streaming_server; static asio::signal_set _signals(*g_io_context, SIGTERM, SIGUSR1, SIGUSR2); @@ -36,9 +41,16 @@ static void signal_handler(const boost::system::error_code& error, if (!error) { switch (signal_number) { case SIGTERM: - SPDLOG_LOGGER_INFO(g_logger, "SIGTERM received"); - g_io_context->stop(); - break; + case SIGINT: + SPDLOG_LOGGER_INFO(g_logger, "SIGTERM or SIGINT received"); + if (_streaming_client) { + _streaming_client->shutdown(); + } + if (_streaming_server) { + _streaming_server->shutdown(); + } + g_io_context->post([]() { g_io_context->stop(); }); + return; case SIGUSR2: SPDLOG_LOGGER_INFO(g_logger, "SIGUSR2 received"); if (g_logger->level()) { @@ -69,6 +81,8 @@ static std::string read_file(const std::string& file_path) { ss << file.rdbuf(); file.close(); return ss.str(); + } else { + SPDLOG_LOGGER_ERROR(g_logger, "fail to open {}", file_path); } } catch (const std::exception& e) { SPDLOG_LOGGER_ERROR(g_logger, "fail to read {}: {}", file_path, e.what()); @@ -151,6 +165,7 @@ int main(int argc, char* argv[]) { try { // ignored but mandatory because of forks _signals.add(SIGPIPE); + _signals.add(SIGINT); _signals.async_wait(signal_handler); @@ -166,6 +181,14 @@ int main(int argc, char* argv[]) { return -1; } + if (conf->use_reverse_connection()) { + _streaming_server = streaming_server::load(g_io_context, g_logger, + grpc_conf, conf->get_host()); + } else { + _streaming_client = streaming_client::load(g_io_context, g_logger, + grpc_conf, conf->get_host()); + } + try { g_io_context->run(); } catch (const std::exception& e) { diff --git a/agent/src/main_win.cc b/agent/src/main_win.cc new file mode 100644 index 00000000000..05ba6276b17 --- /dev/null +++ b/agent/src/main_win.cc @@ -0,0 +1,172 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include +#include +#include + +#include "config.hh" +#include "streaming_client.hh" +#include "streaming_server.hh" + +using namespace com::centreon::agent; + +std::shared_ptr g_io_context = + std::make_shared(); + +std::shared_ptr g_logger; +static std::shared_ptr _streaming_client; + +static std::shared_ptr _streaming_server; + +static asio::signal_set _signals(*g_io_context, SIGTERM, SIGINT); + +static void signal_handler(const boost::system::error_code& error, + int signal_number) { + if (!error) { + switch (signal_number) { + case SIGINT: + case SIGTERM: + SPDLOG_LOGGER_INFO(g_logger, "SIGTERM or SIGINT received"); + if (_streaming_client) { + _streaming_client->shutdown(); + } + if (_streaming_server) { + _streaming_server->shutdown(); + } + g_io_context->post([]() { g_io_context->stop(); }); + return; + } + _signals.async_wait(signal_handler); + } +} + +static std::string read_file(const std::string& file_path) { + if (file_path.empty()) { + return {}; + } + try { + std::ifstream file(file_path); + if (file.is_open()) { + std::stringstream ss; + ss << file.rdbuf(); + file.close(); + return ss.str(); + } + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(g_logger, "fail to read {}: {}", file_path, e.what()); + } + return ""; +} + +int main(int argc, char* argv[]) { + const char* registry_path = "SOFTWARE\\Centreon\\CentreonMonitoringAgent"; + + std::unique_ptr conf; + try { + conf = std::make_unique(registry_path); + } catch (const std::exception& e) { + SPDLOG_ERROR("fail to read conf from registry {}: {}", registry_path, + e.what()); + return 1; + } + + SPDLOG_INFO("centreon-monitoring-agent start"); + + const std::string logger_name = "centreon-monitoring-agent"; + + auto create_event_logger = []() { + auto sink = std::make_shared( + "CentreonMonitoringAgent"); + g_logger = std::make_shared("", sink); + }; + + try { + if (conf->get_log_type() == config::to_file) { + if (!conf->get_log_file().empty()) { + if (conf->get_log_files_max_size() > 0 && + conf->get_log_files_max_number() > 0) { + g_logger = spdlog::rotating_logger_mt( + logger_name, conf->get_log_file(), + conf->get_log_files_max_size() * 0x100000, + conf->get_log_files_max_number()); + } else { + SPDLOG_INFO( + "no log-max-file-size option or no log-max-files option provided " + "=> logs will not be rotated by centagent"); + g_logger = spdlog::basic_logger_mt(logger_name, conf->get_log_file()); + } + } else { + SPDLOG_ERROR( + "log-type=file needs the option log-file => log to event log"); + create_event_logger(); + } + } else if (conf->get_log_type() == config::to_stdout) { + g_logger = spdlog::stdout_color_mt(logger_name); + } else { + create_event_logger(); + } + } catch (const std::exception& e) { + SPDLOG_CRITICAL("Can't log to {}: {}", conf->get_log_file(), e.what()); + return 2; + } + + g_logger->set_level(conf->get_log_level()); + + g_logger->flush_on(spdlog::level::warn); + + spdlog::flush_every(std::chrono::seconds(1)); + + SPDLOG_LOGGER_INFO(g_logger, "centreon-monitoring-agent start"); + std::shared_ptr grpc_conf; + + try { + _signals.async_wait(signal_handler); + + grpc_conf = std::make_shared( + conf->get_endpoint(), conf->use_encryption(), + read_file(conf->get_public_cert_file()), + read_file(conf->get_private_key_file()), + read_file(conf->get_ca_certificate_file()), conf->get_ca_name(), true, + 30); + + } catch (const std::exception& e) { + SPDLOG_CRITICAL("fail to parse input params: {}", e.what()); + return -1; + } + + if (conf->use_reverse_connection()) { + _streaming_server = streaming_server::load(g_io_context, g_logger, + grpc_conf, conf->get_host()); + } else { + _streaming_client = streaming_client::load(g_io_context, g_logger, + grpc_conf, conf->get_host()); + } + + try { + g_io_context->run(); + } catch (const std::exception& e) { + SPDLOG_LOGGER_CRITICAL(g_logger, "unhandled exception: {}", e.what()); + return -1; + } + + SPDLOG_LOGGER_INFO(g_logger, "centreon-monitoring-agent end"); + + return 0; +} diff --git a/agent/src/scheduler.cc b/agent/src/scheduler.cc index 890f9d62dff..a08749884c2 100644 --- a/agent/src/scheduler.cc +++ b/agent/src/scheduler.cc @@ -17,6 +17,7 @@ */ #include "scheduler.hh" +#include "com/centreon/common/utf8.hh" using namespace com::centreon::agent; @@ -174,16 +175,23 @@ void scheduler::update(const engine_to_agent_request_ptr& conf) { "check expected to start at {} for service {}", next, serv.service_description()); } - _check_queue.emplace(_check_builder( - _io_context, _logger, next, serv.service_description(), - serv.command_name(), serv.command_line(), conf, - [me = shared_from_this()]( - const std::shared_ptr& check, unsigned status, - const std::list& perfdata, - const std::list& outputs) { - me->_check_handler(check, status, perfdata, outputs); - })); - next += check_interval; + try { + auto check_to_schedule = _check_builder( + _io_context, _logger, next, serv.service_description(), + serv.command_name(), serv.command_line(), conf, + [me = shared_from_this()]( + const std::shared_ptr& check, unsigned status, + const std::list& perfdata, + const std::list& outputs) { + me->_check_handler(check, status, perfdata, outputs); + }); + _check_queue.emplace(check_to_schedule); + next += check_interval; + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(_logger, + "service: {} command:{} won't be scheduled", + serv.service_description(), serv.command_name()); + } } } @@ -317,9 +325,9 @@ void scheduler::_store_result_in_metrics_and_exemplars( if (!outputs.empty()) { const std::string& first_line = *outputs.begin(); size_t pipe_pos = first_line.find('|'); - state_metrics->set_description(pipe_pos != std::string::npos - ? first_line.substr(0, pipe_pos) - : first_line); + state_metrics->set_description(common::check_string_utf8( + pipe_pos != std::string::npos ? first_line.substr(0, pipe_pos) + : first_line)); } auto data_point = state_metrics->mutable_gauge()->add_data_points(); data_point->set_time_unix_nano(now); diff --git a/agent/src/streaming_client.cc b/agent/src/streaming_client.cc new file mode 100644 index 00000000000..764f9be5769 --- /dev/null +++ b/agent/src/streaming_client.cc @@ -0,0 +1,226 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "streaming_client.hh" +#include "check_exec.hh" +#include "com/centreon/common/defer.hh" +#include "version.hh" + +using namespace com::centreon::agent; + +/** + * @brief Construct a new client reactor::client reactor object + * + * @param io_context + * @param parent we will keep a weak_ptr on streaming_client object + */ +client_reactor::client_reactor( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + + const std::shared_ptr& parent, + const std::string& peer) + : bireactor<::grpc::ClientBidiReactor>( + io_context, + logger, + "client", + peer), + _parent(parent) {} + +/** + * @brief pass request to streaming_client parent + * + * @param request + */ +void client_reactor::on_incomming_request( + const std::shared_ptr& request) { + std::shared_ptr parent = _parent.lock(); + if (!parent) { + shutdown(); + } else { + parent->on_incomming_request(shared_from_this(), request); + } +} + +/** + * @brief called whe OnReadDone or OnWriteDone ok parameter is false + * + */ +void client_reactor::on_error() { + std::shared_ptr parent = _parent.lock(); + if (parent) { + parent->on_error(shared_from_this()); + } +} + +/** + * @brief shutdown connection to engine if not yet done + * + */ +void client_reactor::shutdown() { + std::lock_guard l(_protect); + if (_alive) { + _alive = false; + bireactor<::grpc::ClientBidiReactor>::shutdown(); + RemoveHold(); + _context.TryCancel(); + } +} + +/** + * @brief Construct a new streaming client::streaming client object + * not use it, use load instead + * + * @param io_context + * @param conf + * @param supervised_hosts + */ +streaming_client::streaming_client( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host) + : com::centreon::common::grpc::grpc_client_base(conf, logger), + _io_context(io_context), + _logger(logger), + _supervised_host(supervised_host) { + _stub = std::move(AgentService::NewStub(_channel)); +} + +/** + * @brief to call after construction + * + */ +void streaming_client::_start() { + std::weak_ptr weak_this = shared_from_this(); + + _sched = scheduler::load( + _io_context, _logger, _supervised_host, scheduler::default_config(), + [sender = std::move(weak_this)]( + const std::shared_ptr& request) { + auto parent = sender.lock(); + if (parent) { + parent->_send(request); + } + }, + check_exec::load); + _create_reactor(); +} + +/** + * @brief create reactor on current grpc channel + * and send agent infos (hostname, supervised hosts, collect version) + * + */ +void streaming_client::_create_reactor() { + std::lock_guard l(_protect); + if (_reactor) { + _reactor->shutdown(); + } + _reactor = std::make_shared( + _io_context, _logger, shared_from_this(), get_conf()->get_hostport()); + client_reactor::register_stream(_reactor); + _stub->async()->Export(&_reactor->get_context(), _reactor.get()); + _reactor->start_read(); + _reactor->AddHold(); + _reactor->StartCall(); + + // identifies to engine + std::shared_ptr who_i_am = + std::make_shared(); + auto infos = who_i_am->mutable_init(); + + infos->mutable_centreon_version()->set_major(CENTREON_AGENT_VERSION_MAJOR); + infos->mutable_centreon_version()->set_minor(CENTREON_AGENT_VERSION_MINOR); + infos->mutable_centreon_version()->set_patch(CENTREON_AGENT_VERSION_PATCH); + infos->set_host(_supervised_host); + + _reactor->write(who_i_am); +} + +/** + * @brief construct a new streaming_client + * + * @param io_context + * @param conf + * @param supervised_hosts list of host to supervise (match to engine config) + * @return std::shared_ptr + */ +std::shared_ptr streaming_client::load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host) { + std::shared_ptr ret = std::make_shared( + io_context, logger, conf, supervised_host); + ret->_start(); + return ret; +} + +/** + * @brief send a request to engine + * + * @param request + */ +void streaming_client::_send(const std::shared_ptr& request) { + std::lock_guard l(_protect); + if (_reactor) + _reactor->write(request); +} + +/** + * @brief + * + * @param caller + * @param request + */ +void streaming_client::on_incomming_request( + const std::shared_ptr& caller, + const std::shared_ptr& request) { + // incoming request is used in main thread + _io_context->post([request, sched = _sched]() { sched->update(request); }); +} + +/** + * @brief called by _reactor when something was wrong + * Then we wait 10s to reconnect to engine + * + * @param caller + */ +void streaming_client::on_error(const std::shared_ptr& caller) { + std::lock_guard l(_protect); + if (caller == _reactor) { + _reactor.reset(); + common::defer(_io_context, std::chrono::seconds(10), + [me = shared_from_this()] { me->_create_reactor(); }); + } +} + +/** + * @brief stop and shutdown scheduler and connection + * After, this object is dead and must be deleted + * + */ +void streaming_client::shutdown() { + std::lock_guard l(_protect); + _sched->stop(); + if (_reactor) { + _reactor->shutdown(); + } +} diff --git a/agent/src/streaming_server.cc b/agent/src/streaming_server.cc new file mode 100644 index 00000000000..06a637e8b94 --- /dev/null +++ b/agent/src/streaming_server.cc @@ -0,0 +1,234 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "streaming_server.hh" +#include "check_exec.hh" +#include "scheduler.hh" +#include "version.hh" + +using namespace com::centreon::agent; + +namespace com::centreon::agent { + +class server_reactor + : public bireactor< + ::grpc::ServerBidiReactor> { + std::shared_ptr _sched; + std::string _supervised_host; + + void _start(); + + public: + server_reactor(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_hosts, + const std::string& peer); + + static std::shared_ptr load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_hosts, + const std::string& peer); + + std::shared_ptr shared_from_this() { + return std::static_pointer_cast( + bireactor<::grpc::ServerBidiReactor>:: + shared_from_this()); + } + + void on_incomming_request( + const std::shared_ptr& request) override; + + void on_error() override; + + void shutdown() override; +}; + +server_reactor::server_reactor( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_host, + const std::string& peer) + : bireactor<::grpc::ServerBidiReactor>( + io_context, + logger, + "server", + peer), + _supervised_host(supervised_host) {} + +void server_reactor::_start() { + std::weak_ptr weak_this(shared_from_this()); + + _sched = scheduler::load( + _io_context, _logger, _supervised_host, scheduler::default_config(), + [sender = std::move(weak_this)]( + const std::shared_ptr& request) { + auto parent = sender.lock(); + if (parent) { + parent->write(request); + } + }, + check_exec::load); + + // identifies to engine + std::shared_ptr who_i_am = + std::make_shared(); + auto infos = who_i_am->mutable_init(); + + infos->mutable_centreon_version()->set_major(CENTREON_AGENT_VERSION_MAJOR); + infos->mutable_centreon_version()->set_minor(CENTREON_AGENT_VERSION_MINOR); + infos->mutable_centreon_version()->set_patch(CENTREON_AGENT_VERSION_PATCH); + infos->set_host(_supervised_host); + + write(who_i_am); +} + +std::shared_ptr server_reactor::load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_host, + const std::string& peer) { + std::shared_ptr ret = std::make_shared( + io_context, logger, supervised_host, peer); + ret->_start(); + return ret; +} + +void server_reactor::on_incomming_request( + const std::shared_ptr& request) { + _io_context->post([sched = _sched, request]() { sched->update(request); }); +} + +void server_reactor::on_error() { + shutdown(); +} + +void server_reactor::shutdown() { + std::lock_guard l(_protect); + if (_alive) { + _alive = false; + _sched->stop(); + bireactor<::grpc::ServerBidiReactor>::shutdown(); + Finish(::grpc::Status::CANCELLED); + } +} + +} // namespace com::centreon::agent + +/** + * @brief Construct a new streaming server::streaming server object + * Not use it, use load instead + * @param io_context + * @param conf + * @param supervised_hosts list of supervised hosts that will be sent to engine + * in order to have checks configuration + */ +streaming_server::streaming_server( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host) + : com::centreon::common::grpc::grpc_server_base(conf, logger), + _io_context(io_context), + _logger(logger), + _supervised_host(supervised_host) { + SPDLOG_LOGGER_INFO(_logger, "create grpc server listening on {}", + conf->get_hostport()); +} + +streaming_server::~streaming_server() { + SPDLOG_LOGGER_INFO(_logger, "delete grpc server listening on {}", + get_conf()->get_hostport()); +} + +/** + * @brief register service and start grpc server + * + */ +void streaming_server::_start() { + ::grpc::Service::MarkMethodCallback( + 0, new ::grpc::internal::CallbackBidiHandler< + ::com::centreon::agent::MessageToAgent, + ::com::centreon::agent::MessageFromAgent>( + [me = shared_from_this()](::grpc::CallbackServerContext* context) { + return me->Import(context); + })); + + _init([this](::grpc::ServerBuilder& builder) { + builder.RegisterService(this); + }); +} + +/** + * @brief construct and start a new streaming_server + * + * @param io_context + * @param conf + * @param supervised_hosts list of supervised hosts that will be sent to engine + * in order to have checks configuration + * @return std::shared_ptr + */ +std::shared_ptr streaming_server::load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host) { + std::shared_ptr ret = std::make_shared( + io_context, logger, conf, supervised_host); + ret->_start(); + return ret; +} + +/** + * @brief shutdown server and incoming connection + * + */ +void streaming_server::shutdown() { + SPDLOG_LOGGER_INFO(_logger, "shutdown grpc server listening on {}", + get_conf()->get_hostport()); + { + std::lock_guard l(_protect); + if (_incoming) { + _incoming->shutdown(); + _incoming.reset(); + } + } + common::grpc::grpc_server_base::shutdown(std::chrono::seconds(10)); +} + +/** + * @brief callback called on incoming connection + * + * @param context + * @return ::grpc::ServerBidiReactor* = + * _incoming + */ +::grpc::ServerBidiReactor* +streaming_server::Import(::grpc::CallbackServerContext* context) { + SPDLOG_LOGGER_INFO(_logger, "incoming connection from {}", context->peer()); + std::lock_guard l(_protect); + if (_incoming) { + _incoming->shutdown(); + } + _incoming = server_reactor::load(_io_context, _logger, _supervised_host, + context->peer()); + server_reactor::register_stream(_incoming); + _incoming->start_read(); + return _incoming.get(); +} diff --git a/agent/test/CMakeLists.txt b/agent/test/CMakeLists.txt index c677ecc9c93..897aea3b643 100644 --- a/agent/test/CMakeLists.txt +++ b/agent/test/CMakeLists.txt @@ -16,15 +16,21 @@ # For more information : contact@centreon.com # - - -add_executable(ut_agent - config_test.cc +set( SRC_COMMON check_test.cc check_exec_test.cc scheduler_test.cc test_main.cc - ${TESTS_SOURCES}) +) + +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + set(SRC ${SRC_COMMON} config_test.cc) +else() + set(SRC ${SRC_COMMON}) +endif() + + +add_executable(ut_agent ${SRC}) add_test(NAME tests COMMAND ut_agent) @@ -36,8 +42,25 @@ set_target_properties( RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO ${CMAKE_BINARY_DIR}/tests RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${CMAKE_BINARY_DIR}/tests) - -target_link_libraries(ut_agent PRIVATE +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + target_link_libraries(ut_agent PRIVATE + centagent_lib + centreon_common + centreon_process + GTest::gtest + GTest::gtest_main + GTest::gmock + GTest::gmock_main + -L${Boost_LIBRARY_DIR_RELEASE} + boost_program_options + stdc++fs + -L${PROTOBUF_LIB_DIR} + gRPC::gpr gRPC::grpc gRPC::grpc++ gRPC::grpc++_alts + fmt::fmt pthread + crypto ssl + ) +else() + target_link_libraries(ut_agent PRIVATE centagent_lib centreon_common centreon_process @@ -45,14 +68,11 @@ target_link_libraries(ut_agent PRIVATE GTest::gtest_main GTest::gmock GTest::gmock_main - -L${Boost_LIBRARY_DIR_RELEASE} - boost_program_options - stdc++fs - -L${PROTOBUF_LIB_DIR} + Boost::program_options gRPC::gpr gRPC::grpc gRPC::grpc++ gRPC::grpc++_alts - fmt::fmt pthread - crypto ssl + fmt::fmt ) +endif() add_dependencies(ut_agent centreon_common centagent_lib) @@ -60,3 +80,6 @@ set_property(TARGET ut_agent PROPERTY POSITION_INDEPENDENT_CODE ON) target_precompile_headers(ut_agent PRIVATE ${PROJECT_SOURCE_DIR}/precomp_inc/precomp.hh) +file(COPY ${PROJECT_SOURCE_DIR}/test/scripts/sleep.bat + DESTINATION ${CMAKE_BINARY_DIR}/tests) + diff --git a/agent/test/check_exec_test.cc b/agent/test/check_exec_test.cc index 34c050f48e0..b3b547cfd13 100644 --- a/agent/test/check_exec_test.cc +++ b/agent/test/check_exec_test.cc @@ -22,6 +22,16 @@ using namespace com::centreon::agent; +#ifdef _WINDOWS +#define ECHO_PATH "tests\\echo.bat" +#define SLEEP_PATH "tests\\sleep.bat" +#define END_OF_LINE "\r\n" +#else +#define ECHO_PATH "/bin/echo" +#define SLEEP_PATH "/bin/sleep" +#define END_OF_LINE "\n" +#endif + extern std::shared_ptr g_io_context; static const std::string serv("serv"); @@ -29,9 +39,10 @@ static const std::string cmd_name("command"); static std::string command_line; TEST(check_exec_test, echo) { - command_line = "/bin/echo hello toto"; + command_line = ECHO_PATH " hello toto"; int status; std::list outputs; + std::mutex mut; std::condition_variable cond; std::shared_ptr check = check_exec::load( g_io_context, spdlog::default_logger(), time_point(), serv, cmd_name, @@ -40,22 +51,24 @@ TEST(check_exec_test, echo) { int statuss, const std::list& perfdata, const std::list& output) { - status = statuss; - outputs = output; + { + std::lock_guard l(mut); + status = statuss; + outputs = output; + } cond.notify_one(); }); check->start_check(std::chrono::seconds(1)); - std::mutex mut; std::unique_lock l(mut); cond.wait(l); ASSERT_EQ(status, 0); ASSERT_EQ(outputs.size(), 1); - ASSERT_EQ(*outputs.begin(), "hello toto"); + ASSERT_EQ(outputs.begin()->substr(0, 10), "hello toto"); } TEST(check_exec_test, timeout) { - command_line = "/bin/sleep 5"; + command_line = SLEEP_PATH " 5"; int status; std::list outputs; std::condition_variable cond; @@ -75,9 +88,10 @@ TEST(check_exec_test, timeout) { std::mutex mut; std::unique_lock l(mut); cond.wait(l); - ASSERT_EQ(status, 3); + ASSERT_NE(status, 0); ASSERT_EQ(outputs.size(), 1); - ASSERT_EQ(*outputs.begin(), "Timeout at execution of /bin/sleep 5"); + + ASSERT_EQ(*outputs.begin(), "Timeout at execution of " SLEEP_PATH " 5"); } TEST(check_exec_test, bad_command) { @@ -98,7 +112,8 @@ TEST(check_exec_test, bad_command) { status = statuss; outputs = output; } - std::this_thread::sleep_for(std::chrono::milliseconds(10)); + SPDLOG_INFO("end of {}", command_line); + std::this_thread::sleep_for(std::chrono::milliseconds(50)); cond.notify_one(); }); check->start_check(std::chrono::seconds(1)); @@ -107,7 +122,35 @@ TEST(check_exec_test, bad_command) { cond.wait(l); ASSERT_EQ(status, 3); ASSERT_EQ(outputs.size(), 1); +#ifdef _WINDOWS + // message is language dependant + ASSERT_GE(outputs.begin()->size(), 20); +#else ASSERT_EQ(*outputs.begin(), "Fail to execute /usr/bad_path/turlututu titi toto : No such file " "or directory"); +#endif +} + +TEST(check_exec_test, recurse_not_lock) { + command_line = ECHO_PATH " hello toto"; + std::condition_variable cond; + unsigned cpt = 0; + std::shared_ptr check = check_exec::load( + g_io_context, spdlog::default_logger(), time_point(), serv, cmd_name, + command_line, engine_to_agent_request_ptr(), + [&](const std::shared_ptr& caller, int, + const std::list& perfdata, + const std::list& output) { + if (!cpt) { + ++cpt; + caller->start_check(std::chrono::seconds(1)); + } else + cond.notify_one(); + }); + check->start_check(std::chrono::seconds(1)); + + std::mutex mut; + std::unique_lock l(mut); + cond.wait(l); } diff --git a/agent/test/scheduler_test.cc b/agent/test/scheduler_test.cc index ccd9f47a7fc..5af1a86f4dd 100644 --- a/agent/test/scheduler_test.cc +++ b/agent/test/scheduler_test.cc @@ -59,6 +59,7 @@ class tempo_check : public check { void start_check(const duration& timeout) override { { std::lock_guard l(check_starts_m); + SPDLOG_INFO("start tempo check"); check_starts.emplace_back(this, std::chrono::system_clock::now()); } check::start_check(timeout); @@ -93,6 +94,11 @@ class scheduler_test : public ::testing::Test { spdlog::default_logger()->set_level(spdlog::level::trace); } + void TearDown() override { + // let time to async check to end + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + } + std::shared_ptr create_conf( unsigned nb_serv, unsigned second_check_period, @@ -148,10 +154,23 @@ TEST_F(scheduler_test, no_config) { ASSERT_FALSE(weak_shed.lock()); } +static bool tempo_check_assert_pred(const time_point& after, + const time_point& before) { + if ((after - before) <= std::chrono::milliseconds(400)) { + SPDLOG_ERROR("after={}, before={}", after, before); + return false; + } + if ((after - before) >= std::chrono::milliseconds(600)) { + SPDLOG_ERROR("after={}, before={}", after, before); + return false; + } + return true; +} + TEST_F(scheduler_test, correct_schedule) { std::shared_ptr sched = scheduler::load( g_io_context, spdlog::default_logger(), "my_host", - create_conf(20, 1, 1, 50, 1), + create_conf(20, 10, 1, 50, 1), [](const std::shared_ptr&) {}, [](const std::shared_ptr& io_context, const std::shared_ptr& logger, @@ -170,10 +189,10 @@ TEST_F(scheduler_test, correct_schedule) { tempo_check::check_starts.clear(); } - std::this_thread::sleep_for(std::chrono::milliseconds(1100)); + std::this_thread::sleep_for(std::chrono::milliseconds(10100)); - // we have 2 * 10 = 20 checks spread over 1 second - duration expected_interval = std::chrono::milliseconds(50); + // we have 2 * 10 = 20 checks spread over 10 second + duration expected_interval = std::chrono::milliseconds(1000); { std::lock_guard l(tempo_check::check_starts_m); @@ -185,16 +204,14 @@ TEST_F(scheduler_test, correct_schedule) { first = false; } else { ASSERT_NE(previous.first, check_time.first); - ASSERT_GT((check_time.second - previous.second), - expected_interval - std::chrono::milliseconds(1)); - ASSERT_LT((check_time.second - previous.second), - expected_interval + std::chrono::milliseconds(1)); + ASSERT_PRED2(tempo_check_assert_pred, check_time.second, + previous.second); } previous = check_time; } } - std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + std::this_thread::sleep_for(std::chrono::milliseconds(10000)); { std::lock_guard l(tempo_check::check_starts_m); @@ -206,10 +223,8 @@ TEST_F(scheduler_test, correct_schedule) { first = false; } else { ASSERT_NE(previous.first, check_time.first); - ASSERT_TRUE((check_time.second - previous.second) > - expected_interval - std::chrono::milliseconds(1)); - ASSERT_TRUE((check_time.second - previous.second) < - expected_interval + std::chrono::milliseconds(1)); + ASSERT_PRED2(tempo_check_assert_pred, check_time.second, + previous.second); } previous = check_time; } @@ -306,7 +321,7 @@ TEST_F(scheduler_test, correct_output_examplar) { ASSERT_TRUE(exported_request); - SPDLOG_INFO("export:{}", exported_request->otel_request().DebugString()); + SPDLOG_INFO("export:{}", exported_request->otel_request().ShortDebugString()); ASSERT_EQ(exported_request->otel_request().resource_metrics_size(), 2); const ::opentelemetry::proto::metrics::v1::ResourceMetrics& res = @@ -422,7 +437,7 @@ unsigned concurent_check::max_active_check; TEST_F(scheduler_test, max_concurent) { std::shared_ptr sched = scheduler::load( g_io_context, spdlog::default_logger(), "my_host", - create_conf(200, 1, 1, 10, 1), + create_conf(200, 10, 1, 10, 1), [&](const std::shared_ptr& req) {}, [](const std::shared_ptr& io_context, const std::shared_ptr& logger, @@ -432,19 +447,19 @@ TEST_F(scheduler_test, max_concurent) { check::completion_handler&& handler) { return std::make_shared( io_context, logger, start_expected, service, cmd_name, cmd_line, - engine_to_agent_request, 0, std::chrono::milliseconds(75), + engine_to_agent_request, 0, std::chrono::milliseconds(750), std::move(handler)); }); - // to many tests to be completed in one second - std::this_thread::sleep_for(std::chrono::milliseconds(1100)); + // to many tests to be completed in eleven second + std::this_thread::sleep_for(std::chrono::milliseconds(11000)); ASSERT_LT(concurent_check::checked.size(), 200); ASSERT_EQ(concurent_check::max_active_check, 10); - // all tests must be completed in 1.5s - std::this_thread::sleep_for(std::chrono::milliseconds(500)); + // all tests must be completed in 16s + std::this_thread::sleep_for(std::chrono::milliseconds(5000)); ASSERT_EQ(concurent_check::max_active_check, 10); ASSERT_EQ(concurent_check::checked.size(), 200); sched->stop(); -} \ No newline at end of file +} diff --git a/agent/test/scripts/sleep.bat b/agent/test/scripts/sleep.bat new file mode 100644 index 00000000000..9b178637c61 --- /dev/null +++ b/agent/test/scripts/sleep.bat @@ -0,0 +1,2 @@ +@echo off +ping 127.0.0.1 -n1 %~1 \ No newline at end of file diff --git a/agent/test/test_main.cc b/agent/test/test_main.cc index 919a087af50..21d63bb5a22 100644 --- a/agent/test/test_main.cc +++ b/agent/test/test_main.cc @@ -23,12 +23,13 @@ std::shared_ptr g_io_context( class CentreonEngineEnvironment : public testing::Environment { public: +#ifndef _WINDOWS void SetUp() override { setenv("TZ", ":Europe/Paris", 1); return; } +#endif - void TearDown() override { return; } }; /** diff --git a/bbdo/storage.proto b/bbdo/storage.proto index 9c097a3a089..a0544da87bd 100644 --- a/bbdo/storage.proto +++ b/bbdo/storage.proto @@ -1,20 +1,20 @@ -/* -** Copyright 2022 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2022 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ syntax = "proto3"; diff --git a/broker/bam/src/reporting_stream.cc b/broker/bam/src/reporting_stream.cc index 6fd5a8e8903..e159484af9e 100644 --- a/broker/bam/src/reporting_stream.cc +++ b/broker/bam/src/reporting_stream.cc @@ -34,9 +34,9 @@ #include "com/centreon/broker/bam/ba.hh" #include "com/centreon/broker/exceptions/shutdown.hh" #include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/broker/time/timezone_manager.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -543,25 +543,25 @@ struct bulk_dimension_kpi_binder { binder.set_value_as_i32(0, dk.kpi_id); binder.set_value_as_str( 1, - misc::string::truncate( + com::centreon::common::truncate_utf8( kpi_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_name))); binder.set_value_as_i32(2, dk.ba_id); binder.set_value_as_str( 3, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.ba_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_ba_name))); binder.set_value_as_i32(4, dk.host_id); binder.set_value_as_str( - 5, misc::string::truncate( + 5, com::centreon::common::truncate_utf8( dk.host_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_host_name))); binder.set_value_as_i32(6, dk.service_id); binder.set_value_as_str( 7, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.service_description, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_service_description))); @@ -570,14 +570,14 @@ struct bulk_dimension_kpi_binder { else binder.set_null_i32(8); binder.set_value_as_str( - 9, misc::string::truncate( + 9, com::centreon::common::truncate_utf8( dk.kpi_ba_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_ba_name))); binder.set_value_as_i32(10, dk.meta_service_id); binder.set_value_as_str( 11, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.meta_service_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_meta_service_name))); @@ -586,7 +586,7 @@ struct bulk_dimension_kpi_binder { binder.set_value_as_f32(14, dk.impact_unknown); binder.set_value_as_i32(15, dk.boolean_id); binder.set_value_as_str( - 16, misc::string::truncate( + 16, com::centreon::common::truncate_utf8( dk.boolean_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_boolean_name))); @@ -612,25 +612,25 @@ struct bulk_dimension_kpi_binder { binder.set_value_as_i32(0, dk.kpi_id()); binder.set_value_as_str( 1, - misc::string::truncate( + com::centreon::common::truncate_utf8( kpi_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_name))); binder.set_value_as_i32(2, dk.ba_id()); binder.set_value_as_str( - 3, misc::string::truncate( + 3, com::centreon::common::truncate_utf8( dk.ba_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_ba_name))); binder.set_value_as_i32(4, dk.host_id()); binder.set_value_as_str( - 5, misc::string::truncate( + 5, com::centreon::common::truncate_utf8( dk.host_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_host_name))); binder.set_value_as_i32(6, dk.service_id()); binder.set_value_as_str( 7, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.service_description(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_service_description))); @@ -639,14 +639,14 @@ struct bulk_dimension_kpi_binder { else binder.set_null_i32(8); binder.set_value_as_str( - 9, misc::string::truncate( + 9, com::centreon::common::truncate_utf8( dk.kpi_ba_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_ba_name))); binder.set_value_as_i32(10, dk.meta_service_id()); binder.set_value_as_str( 11, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.meta_service_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_meta_service_name))); @@ -655,7 +655,7 @@ struct bulk_dimension_kpi_binder { binder.set_value_as_f32(14, dk.impact_unknown()); binder.set_value_as_i32(15, dk.boolean_id()); binder.set_value_as_str( - 16, misc::string::truncate( + 16, com::centreon::common::truncate_utf8( dk.boolean_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_boolean_name))); @@ -691,36 +691,36 @@ struct dimension_kpi_binder { return fmt::format( "({},'{}',{},'{}',{},'{}',{},'{}',{},'{}',{},'{}',{},{},{},{},'{}')", dk.kpi_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( kpi_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_name)), dk.ba_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.ba_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_ba_name)), dk.host_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.host_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_host_name)), dk.service_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.service_description, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_service_description)), sz_kpi_ba_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.kpi_ba_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_ba_name)), dk.meta_service_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.meta_service_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_meta_service_name)), dk.impact_warning, dk.impact_critical, dk.impact_unknown, dk.boolean_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.boolean_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_boolean_name))); @@ -747,37 +747,37 @@ struct dimension_kpi_binder { return fmt::format( "({},'{}',{},'{}',{},'{}',{},'{}',{},'{}',{},'{}',{},{},{},{},'{}')", dk.kpi_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( kpi_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_name)), dk.ba_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.ba_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_ba_name)), dk.host_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.host_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_host_name)), dk.service_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.service_description(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_service_description)), sz_kpi_ba_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.kpi_ba_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_ba_name)), dk.meta_service_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.meta_service_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_meta_service_name)), dk.impact_warning(), dk.impact_critical(), dk.impact_unknown(), dk.boolean_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.boolean_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_boolean_name))); @@ -1455,11 +1455,11 @@ void reporting_stream::_process_dimension_ba( dba.ba_id, dba.ba_description); _dimension_ba_insert.bind_value_as_i32(0, dba.ba_id); _dimension_ba_insert.bind_value_as_str( - 1, misc::string::truncate( + 1, com::centreon::common::truncate_utf8( dba.ba_name, get_centreon_storage_mod_bam_reporting_ba_col_size( centreon_storage_mod_bam_reporting_ba_ba_name))); _dimension_ba_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( dba.ba_description, get_centreon_storage_mod_bam_reporting_ba_col_size( centreon_storage_mod_bam_reporting_ba_ba_description))); @@ -1485,11 +1485,11 @@ void reporting_stream::_process_pb_dimension_ba( _dimension_ba_insert.bind_value_as_i32(0, dba.ba_id()); _dimension_ba_insert.bind_value_as_str( 1, - misc::string::truncate( + com::centreon::common::truncate_utf8( dba.ba_name(), get_centreon_storage_mod_bam_reporting_ba_col_size( centreon_storage_mod_bam_reporting_ba_ba_name))); _dimension_ba_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( dba.ba_description(), get_centreon_storage_mod_bam_reporting_ba_col_size( centreon_storage_mod_bam_reporting_ba_ba_description))); @@ -1514,11 +1514,11 @@ void reporting_stream::_process_dimension_bv( _dimension_bv_insert.bind_value_as_i32(0, dbv.bv_id); _dimension_bv_insert.bind_value_as_str( - 1, misc::string::truncate( + 1, com::centreon::common::truncate_utf8( dbv.bv_name, get_centreon_storage_mod_bam_reporting_bv_col_size( centreon_storage_mod_bam_reporting_bv_bv_name))); _dimension_bv_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( dbv.bv_description, get_centreon_storage_mod_bam_reporting_bv_col_size( centreon_storage_mod_bam_reporting_bv_bv_description))); @@ -1541,11 +1541,11 @@ void reporting_stream::_process_pb_dimension_bv( _dimension_bv_insert.bind_value_as_i32(0, dbv.bv_id()); _dimension_bv_insert.bind_value_as_str( 1, - misc::string::truncate( + com::centreon::common::truncate_utf8( dbv.bv_name(), get_centreon_storage_mod_bam_reporting_bv_col_size( centreon_storage_mod_bam_reporting_bv_bv_name))); _dimension_bv_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( dbv.bv_description(), get_centreon_storage_mod_bam_reporting_bv_col_size( centreon_storage_mod_bam_reporting_bv_bv_description))); @@ -1896,42 +1896,42 @@ void reporting_stream::_process_pb_dimension_timeperiod( tp.id(), tp.name()); _dimension_timeperiod_insert.bind_value_as_i32(0, tp.id()); _dimension_timeperiod_insert.bind_value_as_str( - 1, misc::string::truncate( + 1, com::centreon::common::truncate_utf8( tp.name(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_name))); _dimension_timeperiod_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( tp.sunday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_sunday))); _dimension_timeperiod_insert.bind_value_as_str( - 3, misc::string::truncate( + 3, com::centreon::common::truncate_utf8( tp.monday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_monday))); _dimension_timeperiod_insert.bind_value_as_str( - 4, misc::string::truncate( + 4, com::centreon::common::truncate_utf8( tp.tuesday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_tuesday))); _dimension_timeperiod_insert.bind_value_as_str( - 5, misc::string::truncate( + 5, com::centreon::common::truncate_utf8( tp.wednesday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_wednesday))); _dimension_timeperiod_insert.bind_value_as_str( - 6, misc::string::truncate( + 6, com::centreon::common::truncate_utf8( tp.thursday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_thursday))); _dimension_timeperiod_insert.bind_value_as_str( - 7, misc::string::truncate( + 7, com::centreon::common::truncate_utf8( tp.friday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_friday))); _dimension_timeperiod_insert.bind_value_as_str( - 8, misc::string::truncate( + 8, com::centreon::common::truncate_utf8( tp.saturday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_saturday))); @@ -1958,41 +1958,41 @@ void reporting_stream::_process_dimension_timeperiod( _dimension_timeperiod_insert.bind_value_as_i32(0, tp.id); _dimension_timeperiod_insert.bind_value_as_str( 1, - misc::string::truncate( + com::centreon::common::truncate_utf8( tp.name, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_name))); _dimension_timeperiod_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( tp.sunday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_sunday))); _dimension_timeperiod_insert.bind_value_as_str( - 3, misc::string::truncate( + 3, com::centreon::common::truncate_utf8( tp.monday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_monday))); _dimension_timeperiod_insert.bind_value_as_str( - 4, misc::string::truncate( + 4, com::centreon::common::truncate_utf8( tp.tuesday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_tuesday))); _dimension_timeperiod_insert.bind_value_as_str( - 5, misc::string::truncate( + 5, com::centreon::common::truncate_utf8( tp.wednesday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_wednesday))); _dimension_timeperiod_insert.bind_value_as_str( - 6, misc::string::truncate( + 6, com::centreon::common::truncate_utf8( tp.thursday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_thursday))); _dimension_timeperiod_insert.bind_value_as_str( - 7, misc::string::truncate( + 7, com::centreon::common::truncate_utf8( tp.friday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_friday))); _dimension_timeperiod_insert.bind_value_as_str( - 8, misc::string::truncate( + 8, com::centreon::common::truncate_utf8( tp.saturday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_saturday))); diff --git a/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh b/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh index 30099ecac64..1e44b023ba8 100644 --- a/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh +++ b/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh @@ -84,7 +84,8 @@ class endpoint { static bool loaded(); static multiplexing::muxer_filter parse_filters( - const std::set& str_filters); + const std::set& str_filters, + const multiplexing::muxer_filter& forbidden_filter); }; } // namespace applier } // namespace config diff --git a/broker/core/inc/com/centreon/broker/misc/string.hh b/broker/core/inc/com/centreon/broker/misc/string.hh index 2ee2db16d8e..03c234bdcaf 100644 --- a/broker/core/inc/com/centreon/broker/misc/string.hh +++ b/broker/core/inc/com/centreon/broker/misc/string.hh @@ -24,7 +24,8 @@ #include namespace com::centreon::broker::misc::string { -inline std::string& replace(std::string& str, std::string const& old_str, +inline std::string& replace(std::string& str, + std::string const& old_str, std::string const& new_str) { std::size_t pos(str.find(old_str, 0)); while (pos != std::string::npos) { @@ -37,28 +38,7 @@ inline std::string& replace(std::string& str, std::string const& old_str, std::string& trim(std::string& str) throw(); std::string base64_encode(std::string const& str); bool is_number(const std::string& s); -std::string check_string_utf8(const std::string& str) noexcept; -/** - * @brief This function works almost like the resize method but takes care - * of the UTF-8 encoding and avoids to cut a string in the middle of a - * character. This function assumes the string to be UTF-8 encoded. - * - * @param str A string to truncate. - * @param s The desired size, maybe the resulting string will contain less - * characters. - * - * @return a reference to the string str. - */ -template -fmt::string_view truncate(const T& str, size_t s) { - if (s >= str.size()) return fmt::string_view(str); - if (s > 0) - while ((str[s] & 0xc0) == 0x80) s--; - return fmt::string_view(str.data(), s); -} - -size_t adjust_size_utf8(const std::string& str, size_t s); std::string escape(const std::string& str, size_t s); std::string debug_buf(const char* data, int32_t size, int max_len = 10); diff --git a/broker/core/inc/com/centreon/broker/processing/feeder.hh b/broker/core/inc/com/centreon/broker/processing/feeder.hh index 71e6636b11c..4ccfcd90ea7 100644 --- a/broker/core/inc/com/centreon/broker/processing/feeder.hh +++ b/broker/core/inc/com/centreon/broker/processing/feeder.hh @@ -39,6 +39,7 @@ namespace processing { * Take events from a source and send them to a destination. */ class feeder : public stat_visitable, + public multiplexing::muxer::data_handler, public std::enable_shared_from_this { enum class state : unsigned { running, finished }; // Condition variable used when waiting for the thread to finish @@ -63,6 +64,8 @@ class feeder : public stat_visitable, const multiplexing::muxer_filter& read_filters, const multiplexing::muxer_filter& write_filters); + void init(); + const std::string& _get_read_filters() const override; const std::string& _get_write_filters() const override; void _forward_statistic(nlohmann::json& tree) override; @@ -74,9 +77,6 @@ class feeder : public stat_visitable, void _start_read_from_stream_timer(); void _read_from_stream_timer_handler(const boost::system::error_code& err); - unsigned _write_to_client( - const std::vector>& events); - void _stop_no_lock(); void _ack_events_on_muxer(uint32_t count) noexcept; @@ -98,6 +98,9 @@ class feeder : public stat_visitable, bool is_finished() const noexcept; bool wait_for_all_events_written(unsigned ms_timeout); + + uint32_t on_events( + const std::vector>& events) override; }; } // namespace processing diff --git a/broker/core/inc/com/centreon/broker/time/timeperiod.hh b/broker/core/inc/com/centreon/broker/time/timeperiod.hh index 5523ac9bb02..a83fd4e2d91 100644 --- a/broker/core/inc/com/centreon/broker/time/timeperiod.hh +++ b/broker/core/inc/com/centreon/broker/time/timeperiod.hh @@ -97,7 +97,7 @@ class timeperiod { bool set_timerange(std::string const& timerange_text, int day); std::list const& get_timeranges_by_day(int day) const throw(); - std::string const& get_timezone() const throw(); + std::string const& get_timezone() const noexcept; void set_timezone(std::string const& tz); bool is_valid(time_t preferred_time) const; @@ -120,6 +120,6 @@ class timeperiod { }; } // namespace time -} +} // namespace com::centreon::broker #endif // !CCB_CORE_TIME_TIMEPERIOD_HH diff --git a/broker/core/inc/com/centreon/broker/version.hh.in b/broker/core/inc/com/centreon/broker/version.hh.in index 62198225fdd..7c5fa84888e 100644 --- a/broker/core/inc/com/centreon/broker/version.hh.in +++ b/broker/core/inc/com/centreon/broker/version.hh.in @@ -23,9 +23,9 @@ namespace com::centreon::broker::version { // Compile-time values. - unsigned int const major = @COLLECT_MAJOR@; - unsigned int const minor = @COLLECT_MINOR@; - unsigned int const patch = @COLLECT_PATCH@; + constexpr unsigned major = @COLLECT_MAJOR@; + constexpr unsigned minor = @COLLECT_MINOR@.0; + constexpr unsigned patch = @COLLECT_PATCH@.0; char const* const string = "@CENTREON_BROKER_VERSION@"; } diff --git a/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh b/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh index bc7b6d959a2..82887bce2af 100644 --- a/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh +++ b/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh @@ -51,6 +51,14 @@ namespace com::centreon::broker::multiplexing { * @see engine */ class muxer : public io::stream, public std::enable_shared_from_this { + public: + class data_handler { + public: + virtual ~data_handler() = default; + virtual uint32_t on_events( + const std::vector>& events) = 0; + }; + private: static uint32_t _event_queue_max_size; @@ -63,7 +71,7 @@ class muxer : public io::stream, public std::enable_shared_from_this { std::string _write_filters_str; const bool _persistent; - std::function>&)> _data_handler; + std::shared_ptr _data_handler; std::atomic_bool _reader_running = false; /** Events are stacked into _events or into _file. Because several threads @@ -139,9 +147,8 @@ class muxer : public io::stream, public std::enable_shared_from_this { void set_write_filter(const muxer_filter& w_filter); void clear_read_handler(); void unsubscribe(); - void set_action_on_new_data( - std::function>)>&& - data_handler) ABSL_LOCKS_EXCLUDED(_events_m); + void set_action_on_new_data(const std::shared_ptr& handler) + ABSL_LOCKS_EXCLUDED(_events_m); void clear_action_on_new_data() ABSL_LOCKS_EXCLUDED(_events_m); }; diff --git a/broker/core/multiplexing/src/muxer.cc b/broker/core/multiplexing/src/muxer.cc index c81a955206e..2c3250ea32a 100644 --- a/broker/core/multiplexing/src/muxer.cc +++ b/broker/core/multiplexing/src/muxer.cc @@ -311,28 +311,34 @@ uint32_t muxer::event_queue_max_size() noexcept { * execute the data handler. */ void muxer::_execute_reader_if_needed() { - _logger->debug("muxer '{}' execute reader if needed data_handler: {}", _name, - static_cast(_data_handler)); - if (_data_handler) { - bool expected = false; - if (_reader_running.compare_exchange_strong(expected, true)) { - com::centreon::common::pool::io_context_ptr()->post( - [me = shared_from_this()] { + SPDLOG_LOGGER_DEBUG( + _logger, "muxer '{}' execute reader if needed data_handler", _name); + bool expected = false; + if (_reader_running.compare_exchange_strong(expected, true)) { + com::centreon::common::pool::io_context_ptr()->post( + [me = shared_from_this(), this] { + std::shared_ptr to_call; + { + absl::MutexLock lck(&_events_m); + to_call = _data_handler; + } + if (to_call) { std::vector> to_fill; - to_fill.reserve(me->_events_size); - bool still_events_to_read = me->read(to_fill, me->_events_size); - uint32_t written = me->_data_handler(to_fill); + to_fill.reserve(_events_size); + bool still_events_to_read = read(to_fill, _events_size); + uint32_t written = to_call->on_events(to_fill); if (written > 0) - me->ack_events(written); + ack_events(written); if (written != to_fill.size()) { - me->_logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "Unable to handle all the incoming events in muxer '{}'", - me->_name); - me->clear_action_on_new_data(); + _name); + clear_action_on_new_data(); } - me->_reader_running.store(false); - }); - } + _reader_running.store(false); + } + }); } } @@ -784,13 +790,12 @@ void muxer::unsubscribe() { } void muxer::set_action_on_new_data( - std::function>)>&& - data_handler) { + const std::shared_ptr& handler) { absl::MutexLock lck(&_events_m); - _data_handler = data_handler; + _data_handler = handler; } void muxer::clear_action_on_new_data() { absl::MutexLock lck(&_events_m); - _data_handler = nullptr; + _data_handler.reset(); } diff --git a/broker/core/sql/inc/com/centreon/broker/sql/mysql_error.hh b/broker/core/sql/inc/com/centreon/broker/sql/mysql_error.hh index 0afb7938210..d7af4120bc1 100644 --- a/broker/core/sql/inc/com/centreon/broker/sql/mysql_error.hh +++ b/broker/core/sql/inc/com/centreon/broker/sql/mysql_error.hh @@ -33,87 +33,83 @@ namespace database { class mysql_error { public: enum code { - empty, - clean_hosts_services, - clean_hostgroup_members, - clean_servicegroup_members, - clean_empty_hostgroups, - clean_empty_servicegroups, - clean_host_dependencies, - clean_service_dependencies, - clean_host_parents, - clean_modules, - clean_downtimes, - clean_comments, - clean_customvariables, - restore_instances, - update_customvariables, - update_logs, - update_metrics, - insert_data, - delete_metric, - delete_index, - flag_index_data, - delete_hosts, - delete_modules, - update_index_state, - delete_availabilities, - insert_availability, - rebuild_ba, - close_event, - close_ba_events, - close_kpi_events, - delete_ba_durations, - store_host_state, - store_acknowledgement, - store_comment, - remove_customvariable, - store_customvariable, - store_downtime, - store_eventhandler, - store_flapping, - store_host_check, - store_host_dependency, - store_host_group, - store_host_group_member, - delete_host_group_member, - store_host, - store_host_parentship, - store_host_status, - store_poller, - update_poller, - store_module, - store_service_check_command, - store_service_dependency, - store_service_group, - store_service_group_member, - delete_service_group_member, - store_service, - store_service_status, - update_ba, - update_kpi, - update_kpi_event, - insert_kpi_event, - insert_ba, - insert_bv, - insert_dimension_ba_bv, - truncate_dimension_table, - insert_dimension_kpi, - insert_timeperiod, - insert_timeperiod_exception, - insert_exclusion_timeperiod, - insert_relation_ba_timeperiod, - store_severity, - clean_severities, - store_tag, - clean_resources_tags, - update_index_data, - update_resources, - store_host_resources, - store_tags_resources_tags, - delete_resources_tags, - clean_resources, - delete_poller, + empty = 0, + clean_hosts_services = 1, + clean_hostgroup_members = 2, + clean_servicegroup_members = 3, + clean_empty_hostgroups = 4, + clean_empty_servicegroups = 5, + clean_host_parents = 6, + clean_modules = 7, + clean_downtimes = 8, + clean_comments = 9, + clean_customvariables = 10, + restore_instances = 11, + update_customvariables = 12, + update_logs = 13, + update_metrics = 14, + insert_data = 15, + delete_metric = 16, + delete_index = 17, + flag_index_data = 18, + delete_hosts = 19, + delete_modules = 20, + update_index_state = 21, + delete_availabilities = 22, + insert_availability = 23, + rebuild_ba = 24, + close_event = 25, + close_ba_events = 26, + close_kpi_events = 27, + delete_ba_durations = 28, + store_host_state = 29, + store_acknowledgement = 30, + store_comment = 31, + remove_customvariable = 32, + store_customvariable = 33, + store_downtime = 34, + store_eventhandler = 35, + store_flapping = 36, + store_host_check = 37, + store_host_group = 38, + store_host_group_member = 39, + delete_host_group_member = 40, + store_host = 41, + store_host_parentship = 42, + store_host_status = 43, + store_poller = 44, + update_poller = 45, + store_module = 46, + store_service_check_command = 47, + store_service_group = 48, + store_service_group_member = 49, + delete_service_group_member = 50, + store_service = 51, + store_service_status = 52, + update_ba = 53, + update_kpi = 54, + update_kpi_event = 55, + insert_kpi_event = 56, + insert_ba = 57, + insert_bv = 58, + insert_dimension_ba_bv = 59, + truncate_dimension_table = 60, + insert_dimension_kpi = 61, + insert_timeperiod = 62, + insert_timeperiod_exception = 63, + insert_exclusion_timeperiod = 64, + insert_relation_ba_timeperiod = 65, + store_severity = 66, + clean_severities = 67, + store_tag = 68, + clean_resources_tags = 69, + update_index_data = 70, + update_resources = 71, + store_host_resources = 72, + store_tags_resources_tags = 73, + delete_resources_tags = 74, + clean_resources = 75, + delete_poller = 76, }; static constexpr const char* msg[]{ @@ -123,8 +119,6 @@ class mysql_error { "could not clean service groups memberships table: ", "could not remove empty host groups: ", "could not remove empty service groups: ", - "could not clean host dependencies table: ", - "could not clean service dependencies table: ", "could not clean host parents table: ", "could not clean modules table: ", "could not clean downtimes table: ", @@ -158,7 +152,6 @@ class mysql_error { "could not store event handler: ", "could not store flapping status: ", "could not store host check: ", - "could not store host dependency: ", "could not store host group: ", "could not store host group membership: ", "could not delete membership of host to host group: ", @@ -169,7 +162,6 @@ class mysql_error { "could not update poller: ", "could not store module: ", "could not store service check command: ", - "could not store service dependency: ", "could not store service group: ", "could not store service group membership: ", "could not delete membersjip of service to service group: ", diff --git a/broker/core/sql/src/mysql_stmt.cc b/broker/core/sql/src/mysql_stmt.cc index c3222c0510f..728e8d4473d 100644 --- a/broker/core/sql/src/mysql_stmt.cc +++ b/broker/core/sql/src/mysql_stmt.cc @@ -24,7 +24,7 @@ #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/io/protobuf.hh" #include "com/centreon/broker/mapping/entry.hh" -#include "com/centreon/broker/misc/string.hh" +#include "com/centreon/common/utf8.hh" using namespace com::centreon::exceptions; using namespace com::centreon::broker; @@ -166,7 +166,7 @@ void mysql_stmt::operator<<(io::data const& d) { "column '{}' should admit a longer string, it is cut to {} " "characters to be stored anyway.", current_entry->get_name_v2(), max_len); - max_len = misc::string::adjust_size_utf8(v, max_len); + max_len = common::adjust_size_utf8(v, max_len); sv = fmt::string_view(v.data(), max_len); } else sv = fmt::string_view(v); @@ -283,7 +283,7 @@ void mysql_stmt::operator<<(io::data const& d) { "column '{}' should admit a longer string, it is cut to {} " "characters to be stored anyway.", field, max_len); - max_len = misc::string::adjust_size_utf8(v, max_len); + max_len = common::adjust_size_utf8(v, max_len); sv = fmt::string_view(v.data(), max_len); } else sv = fmt::string_view(v); diff --git a/broker/core/src/broker_impl.cc b/broker/core/src/broker_impl.cc index 21f817e07d9..bff46898aaf 100644 --- a/broker/core/src/broker_impl.cc +++ b/broker/core/src/broker_impl.cc @@ -336,8 +336,10 @@ grpc::Status broker_impl::GetLogInfo(grpc::ServerContext* context return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, msg); } } else { - for (auto& p : lvs) - map[p.first] = p.second; + for (auto& p : lvs) { + auto level = to_string_view(p.second); + map[p.first] = std::string(level.data(), level.size()); + } return grpc::Status::OK; } } diff --git a/broker/core/src/config/applier/endpoint.cc b/broker/core/src/config/applier/endpoint.cc index 60edff28267..02c456e3ae7 100644 --- a/broker/core/src/config/applier/endpoint.cc +++ b/broker/core/src/config/applier/endpoint.cc @@ -102,14 +102,15 @@ endpoint::~endpoint() { */ void endpoint::apply(std::list const& endpoints) { // Log messages. - _logger->info("endpoint applier: loading configuration"); + SPDLOG_LOGGER_INFO(_logger, "endpoint applier: loading configuration"); - { + if (_logger->level() <= spdlog::level::debug) { std::vector eps; for (auto& ep : endpoints) eps.push_back(ep.name); - _logger->debug("endpoint applier: {} endpoints to apply: {}", - endpoints.size(), fmt::format("{}", fmt::join(eps, ", "))); + SPDLOG_LOGGER_DEBUG(_logger, "endpoint applier: {} endpoints to apply: {}", + endpoints.size(), + fmt::format("{}", fmt::join(eps, ", "))); } // Copy endpoint configurations and apply eventual modifications. @@ -129,8 +130,9 @@ void endpoint::apply(std::list const& endpoints) { // resources that might be used by other endpoints. auto it = _endpoints.find(ep); if (it != _endpoints.end()) { - _logger->debug("endpoint applier: removing old endpoint {}", - it->first.name); + SPDLOG_LOGGER_DEBUG(_logger, + "endpoint applier: removing old endpoint {}", + it->first.name); /* failover::exit() is called. */ it->second->exit(); delete it->second; @@ -141,13 +143,14 @@ void endpoint::apply(std::list const& endpoints) { // Update existing endpoints. for (auto it = _endpoints.begin(), end = _endpoints.end(); it != end; ++it) { - _logger->debug("endpoint applier: updating endpoint {}", it->first.name); + SPDLOG_LOGGER_DEBUG(_logger, "endpoint applier: updating endpoint {}", + it->first.name); it->second->update(); } // Debug message. - _logger->debug("endpoint applier: {} endpoints to create", - endp_to_create.size()); + SPDLOG_LOGGER_DEBUG(_logger, "endpoint applier: {} endpoints to create", + endp_to_create.size()); // Create new endpoints. for (config::endpoint& ep : endp_to_create) { @@ -156,7 +159,8 @@ void endpoint::apply(std::list const& endpoints) { if (ep.name.empty() || std::find_if(endp_to_create.begin(), endp_to_create.end(), name_match_failover(ep.name)) == endp_to_create.end()) { - _logger->debug("endpoint applier: creating endpoint {}", ep.name); + SPDLOG_LOGGER_DEBUG(_logger, "endpoint applier: creating endpoint {}", + ep.name); bool is_acceptor; std::shared_ptr e{_create_endpoint(ep, is_acceptor)}; std::unique_ptr endp; @@ -173,15 +177,18 @@ void endpoint::apply(std::list const& endpoints) { * if broker sends data to map. This is needed because a failover needs * its peer to ack events to release them (and a failover is also able * to write data). */ - multiplexing::muxer_filter r_filter = parse_filters(ep.read_filters); - multiplexing::muxer_filter w_filter = parse_filters(ep.write_filters); + multiplexing::muxer_filter r_filter = + parse_filters(ep.read_filters, e->get_stream_forbidden_filter()); + multiplexing::muxer_filter w_filter = + parse_filters(ep.write_filters, e->get_stream_forbidden_filter()); if (is_acceptor) { w_filter -= e->get_stream_forbidden_filter(); r_filter -= e->get_stream_forbidden_filter(); std::unique_ptr acceptr( std::make_unique(e, ep.name, r_filter, w_filter)); - _logger->debug( + SPDLOG_LOGGER_DEBUG( + _logger, "endpoint applier: acceptor '{}' configured with write filters: {} " "and read filters: {}", ep.name, w_filter.get_allowed_categories(), @@ -193,7 +200,8 @@ void endpoint::apply(std::list const& endpoints) { /* Are there missing events in the w_filter ? */ if (!e->get_stream_mandatory_filter().is_in(w_filter)) { w_filter |= e->get_stream_mandatory_filter(); - _logger->debug( + SPDLOG_LOGGER_DEBUG( + _logger, "endpoint applier: The configured write filters for the endpoint " "'{}' are too restrictive. Mandatory categories added to them", ep.name); @@ -201,7 +209,8 @@ void endpoint::apply(std::list const& endpoints) { /* Are there events in w_filter that are forbidden ? */ if (w_filter.contains_some_of(e->get_stream_forbidden_filter())) { w_filter -= e->get_stream_forbidden_filter(); - _logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "endpoint applier: The configured write filters for the endpoint " "'{}' contain forbidden filters. These ones are removed", ep.name); @@ -210,13 +219,14 @@ void endpoint::apply(std::list const& endpoints) { /* Are there events in r_filter that are forbidden ? */ if (r_filter.contains_some_of(e->get_stream_forbidden_filter())) { r_filter -= e->get_stream_forbidden_filter(); - _logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "endpoint applier: The configured read filters for the endpoint " "'{}' contain forbidden filters. These ones are removed", ep.name); } - _logger->debug( - "endpoint applier: filters {} for endpoint '{}' applied.", + SPDLOG_LOGGER_DEBUG( + _logger, "endpoint applier: filters {} for endpoint '{}' applied.", w_filter.get_allowed_categories(), ep.name); auto mux = multiplexing::muxer::create( @@ -230,7 +240,8 @@ void endpoint::apply(std::list const& endpoints) { } // Run thread. - _logger->debug( + SPDLOG_LOGGER_DEBUG( + _logger, "endpoint applier: endpoint thread {} of '{}' is registered and " "ready to run", static_cast(endp.get()), ep.name); @@ -245,13 +256,14 @@ void endpoint::apply(std::list const& endpoints) { */ void endpoint::_discard() { _discarding = true; - _logger->debug("endpoint applier: destruction"); + SPDLOG_LOGGER_DEBUG(_logger, "endpoint applier: destruction"); // wait for failover and feeder to push endloop event ::usleep(processing::idle_microsec_wait_idle_thread_delay + 100000); // Exit threads. { - _logger->debug("endpoint applier: requesting threads termination"); + SPDLOG_LOGGER_DEBUG(_logger, + "endpoint applier: requesting threads termination"); std::unique_lock lock(_endpointsm); // Send termination requests. @@ -259,8 +271,9 @@ void endpoint::_discard() { for (auto it = _endpoints.begin(); it != _endpoints.end();) { if (it->second->is_feeder()) { it->second->wait_for_all_events_written(5000); - _logger->trace("endpoint applier: send exit signal to endpoint '{}'", - it->second->get_name()); + SPDLOG_LOGGER_TRACE( + _logger, "endpoint applier: send exit signal to endpoint '{}'", + it->second->get_name()); delete it->second; it = _endpoints.erase(it); } else @@ -270,19 +283,22 @@ void endpoint::_discard() { // Exit threads. { - _logger->debug("endpoint applier: requesting threads termination"); + SPDLOG_LOGGER_DEBUG(_logger, + "endpoint applier: requesting threads termination"); std::unique_lock lock(_endpointsm); // We continue with failovers for (auto it = _endpoints.begin(); it != _endpoints.end();) { it->second->wait_for_all_events_written(5000); - _logger->trace("endpoint applier: send exit signal on endpoint '{}'", - it->second->get_name()); + SPDLOG_LOGGER_TRACE(_logger, + "endpoint applier: send exit signal on endpoint '{}'", + it->second->get_name()); delete it->second; it = _endpoints.erase(it); } - _logger->debug("endpoint applier: all threads are terminated"); + SPDLOG_LOGGER_DEBUG(_logger, + "endpoint applier: all threads are terminated"); } // Stop multiplexing: we must stop the engine after failovers otherwise @@ -373,7 +389,8 @@ processing::failover* endpoint::_create_failover( std::shared_ptr endp, std::list& l) { // Debug message. - _logger->info("endpoint applier: creating new failover '{}'", cfg.name); + SPDLOG_LOGGER_INFO(_logger, "endpoint applier: creating new failover '{}'", + cfg.name); // Check that failover is configured. std::shared_ptr failovr; @@ -382,7 +399,8 @@ processing::failover* endpoint::_create_failover( std::list::iterator it = std::find_if(l.begin(), l.end(), failover_match_name(front_failover)); if (it == l.end()) - _logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "endpoint applier: could not find failover '{}' for endpoint '{}'", front_failover, cfg.name); else { @@ -411,7 +429,8 @@ processing::failover* endpoint::_create_failover( bool is_acceptor{false}; std::shared_ptr endp(_create_endpoint(*it, is_acceptor)); if (is_acceptor) { - _logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "endpoint applier: secondary failover '{}' is an acceptor and " "cannot therefore be instantiated for endpoint '{}'", *failover_it, cfg.name); @@ -462,8 +481,8 @@ std::shared_ptr endpoint::_create_endpoint(config::endpoint& cfg, endp = std::shared_ptr( it->second.endpntfactry->new_endpoint(cfg, is_acceptor, cache)); - _logger->info(" create endpoint {} for endpoint '{}'", it->first, - cfg.name); + SPDLOG_LOGGER_INFO(_logger, " create endpoint {} for endpoint '{}'", + it->first, cfg.name); level = it->second.osi_to + 1; break; } @@ -484,8 +503,8 @@ std::shared_ptr endpoint::_create_endpoint(config::endpoint& cfg, (it->second.endpntfactry->has_endpoint(cfg, nullptr))) { std::shared_ptr current( it->second.endpntfactry->new_endpoint(cfg, is_acceptor)); - _logger->info(" create endpoint {} for endpoint '{}'", it->first, - cfg.name); + SPDLOG_LOGGER_INFO(_logger, " create endpoint {} for endpoint '{}'", + it->first, cfg.name); current->from(endp); endp = current; level = it->second.osi_to; @@ -545,7 +564,8 @@ void endpoint::_diff_endpoints( list_it = std::find_if(new_ep.begin(), new_ep.end(), failover_match_name(failover)); if (list_it == new_ep.end()) - _logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "endpoint applier: could not find failover '{}' for endpoint " "'{}'", failover, entry.name); @@ -570,11 +590,14 @@ void endpoint::_diff_endpoints( * Create filters from a set of categories. * * @param[in] cfg Endpoint configuration. + * @param[in] forbidden_filter forbidden filter applied in case of default + * filter config * * @return Filters. */ multiplexing::muxer_filter endpoint::parse_filters( - const std::set& str_filters) { + const std::set& str_filters, + const multiplexing::muxer_filter& forbidden_filter) { auto logger = log_v2::instance().get(log_v2::CONFIG); multiplexing::muxer_filter elements({}); std::forward_list applied_filters; @@ -595,6 +618,7 @@ multiplexing::muxer_filter endpoint::parse_filters( if (str_filters.size() == 1 && *str_filters.begin() == "all") { elements = multiplexing::muxer_filter(); + elements -= forbidden_filter; applied_filters.emplace_front("all"); } else { for (auto& str : str_filters) { @@ -610,10 +634,11 @@ multiplexing::muxer_filter endpoint::parse_filters( } if (applied_filters.empty() && !str_filters.empty()) { fill_elements("all"); + elements -= forbidden_filter; applied_filters.emplace_front("all"); } } - logger->info("Filters applied on endpoint:{}", - fmt::join(applied_filters, ", ")); + SPDLOG_LOGGER_INFO(logger, "Filters applied on endpoint:{}", + fmt::join(applied_filters, ", ")); return elements; } diff --git a/broker/core/src/misc/string.cc b/broker/core/src/misc/string.cc index 263f6cbe9fd..354669c0fcc 100644 --- a/broker/core/src/misc/string.cc +++ b/broker/core/src/misc/string.cc @@ -1,22 +1,23 @@ /** -* Copyright 2011-2013 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2011-2013 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/misc/string.hh" +#include "com/centreon/common/utf8.hh" #include @@ -74,259 +75,6 @@ bool string::is_number(const std::string& s) { }) == s.end(); } -/** - * @brief Checks if the string given as parameter is a real UTF-8 string. - * If it is not, it tries to convert it to UTF-8. Encodings correctly changed - * are ISO-8859-15 and CP-1252. - * - * @param str The string to check - * - * @return The string itself or a new string converted to UTF-8. The output - * string should always be an UTF-8 string. - */ -std::string string::check_string_utf8(std::string const& str) noexcept { - std::string::const_iterator it; - for (it = str.begin(); it != str.end();) { - uint32_t val = (*it & 0xff); - if ((val & 0x80) == 0) { - ++it; - continue; - } - val = (val << 8) | (*(it + 1) & 0xff); - if ((val & 0xe0c0) == 0xc080) { - val &= 0x1e00; - if (val == 0) - break; - it += 2; - continue; - } - - val = (val << 8) | (*(it + 2) & 0xff); - if ((val & 0xf0c0c0) == 0xe08080) { - val &= 0xf2000; - if (val == 0 || val == 0xd2000) - break; - it += 3; - continue; - } - - val = (val << 8) | (*(it + 3) & 0xff); - if ((val & 0xf8c0c0c0) == 0xF0808080) { - val &= 0x7300000; - if (val == 0 || val > 0x4000000) - break; - it += 4; - continue; - } - break; - } - - if (it == str.end()) - return str; - - /* Not an UTF-8 string */ - bool is_cp1252 = true, is_iso8859 = true; - auto itt = it; - - auto iso8859_to_utf8 = [&str, &it]() -> std::string { - /* Strings are both cp1252 and iso8859-15 */ - std::string out; - std::size_t d = it - str.begin(); - out.reserve(d + 2 * (str.size() - d)); - out = str.substr(0, d); - while (it != str.end()) { - uint8_t c = static_cast(*it); - if (c < 128) - out.push_back(c); - else if (c <= 160) - out.push_back('_'); - else { - switch (c) { - case 0xa4: - out.append("€"); - break; - case 0xa6: - out.append("Š"); - break; - case 0xa8: - out.append("š"); - break; - case 0xb4: - out.append("Ž"); - break; - case 0xb8: - out.append("ž"); - break; - case 0xbc: - out.append("Œ"); - break; - case 0xbd: - out.append("œ"); - break; - case 0xbe: - out.append("Ÿ"); - break; - default: - out.push_back(0xc0 | c >> 6); - out.push_back((c & 0x3f) | 0x80); - break; - } - } - ++it; - } - return out; - }; - do { - uint8_t c = *itt; - /* not ISO-8859-15 */ - if (c > 126 && c < 160) - is_iso8859 = false; - /* not cp1252 */ - if (c & 128) - if (c == 129 || c == 141 || c == 143 || c == 144 || c == 155) - is_cp1252 = false; - if (!is_cp1252) - return iso8859_to_utf8(); - else if (!is_iso8859) { - std::string out; - std::size_t d = it - str.begin(); - out.reserve(d + 3 * (str.size() - d)); - out = str.substr(0, d); - while (it != str.end()) { - c = *it; - if (c < 128) - out.push_back(c); - else { - switch (c) { - case 128: - out.append("€"); - break; - case 129: - case 141: - case 143: - case 144: - case 157: - out.append("_"); - break; - case 130: - out.append("‚"); - break; - case 131: - out.append("ƒ"); - break; - case 132: - out.append("„"); - break; - case 133: - out.append("…"); - break; - case 134: - out.append("†"); - break; - case 135: - out.append("‡"); - break; - case 136: - out.append("ˆ"); - break; - case 137: - out.append("‰"); - break; - case 138: - out.append("Š"); - break; - case 139: - out.append("‹"); - break; - case 140: - out.append("Œ"); - break; - case 142: - out.append("Ž"); - break; - case 145: - out.append("‘"); - break; - case 146: - out.append("’"); - break; - case 147: - out.append("“"); - break; - case 148: - out.append("”"); - break; - case 149: - out.append("•"); - break; - case 150: - out.append("–"); - break; - case 151: - out.append("—"); - break; - case 152: - out.append("˜"); - break; - case 153: - out.append("™"); - break; - case 154: - out.append("š"); - break; - case 155: - out.append("›"); - break; - case 156: - out.append("œ"); - break; - case 158: - out.append("ž"); - break; - case 159: - out.append("Ÿ"); - break; - default: - out.push_back(0xc0 | c >> 6); - out.push_back((c & 0x3f) | 0x80); - break; - } - } - ++it; - } - return out; - } - ++itt; - } while (itt != str.end()); - assert(is_cp1252 == is_iso8859); - return iso8859_to_utf8(); -} - -/** - * @brief This function adjusts the given integer s so that the str string may - * be cut at this length and still be a UTF-8 string (we don't want to cut it - * in a middle of a character). - * - * This function assumes the string to be UTF-8 encoded. - * - * @param str A string to truncate. - * @param s The desired size, maybe the resulting string will contain less - * characters. - * - * @return The newly computed size. - */ -size_t string::adjust_size_utf8(const std::string& str, size_t s) { - if (s >= str.size()) - return str.size(); - if (s == 0) - return s; - else { - while ((str[s] & 0xc0) == 0x80) - s--; - return s; - } -} - /** * @brief Escape the given string so that it can be directly inserted into the * database. Essntially, characters \ and ' are prefixed with \. The function @@ -340,7 +88,7 @@ size_t string::adjust_size_utf8(const std::string& str, size_t s) { std::string string::escape(const std::string& str, size_t s) { size_t found = str.find_first_of("'\\"); if (found == std::string::npos) - return str.substr(0, adjust_size_utf8(str, s)); + return str.substr(0, common::adjust_size_utf8(str, s)); else { std::string ret; /* ret is reserved with the worst size */ @@ -362,7 +110,7 @@ std::string string::escape(const std::string& str, size_t s) { ret += str[ffound]; found = ffound; } while (found < s); - ret.resize(adjust_size_utf8(ret, s)); + ret.resize(common::adjust_size_utf8(ret, s)); if (ret.size() > 1) { auto it = --ret.end(); size_t nb{0}; diff --git a/broker/core/src/processing/feeder.cc b/broker/core/src/processing/feeder.cc index a433cfcb232..a032eebcf40 100644 --- a/broker/core/src/processing/feeder.cc +++ b/broker/core/src/processing/feeder.cc @@ -56,12 +56,21 @@ std::shared_ptr feeder::create( std::shared_ptr ret( new feeder(name, parent, client, read_filters, write_filters)); - ret->_start_stat_timer(); - - ret->_start_read_from_stream_timer(); + ret->init(); return ret; } +/** + * @brief to call after object construction + * + */ +void feeder::init() { + _start_stat_timer(); + _muxer->set_action_on_new_data(shared_from_this()); + + _start_read_from_stream_timer(); +} + /** * Constructor. * @@ -91,10 +100,6 @@ feeder::feeder(const std::string& name, if (!_client) throw msg_fmt("could not process '{}' with no client stream", _name); - _muxer->set_action_on_new_data( - [this](const std::vector>& events) -> uint32_t { - return _write_to_client(events); - }); set_last_connection_attempt(timestamp::now()); set_last_connection_success(timestamp::now()); set_state("connected"); @@ -146,11 +151,10 @@ void feeder::_forward_statistic(nlohmann::json& tree) { /** * @brief write event to client stream - * _protect must be locked * @param event * @return number of events written */ -unsigned feeder::_write_to_client( +uint32_t feeder::on_events( const std::vector>& events) { unsigned written = 0; try { @@ -242,11 +246,6 @@ void feeder::_stop_no_lock() { _name); _muxer->remove_queue_files(); SPDLOG_LOGGER_INFO(_logger, "feeder: {} terminated", _name); - - /* The muxer is in a shared_ptr. When the feeder is destroyed, we must be - * sure the muxer won't write data anymore otherwise we will have a segfault. - */ - _muxer->clear_action_on_new_data(); } /** diff --git a/broker/core/test/misc/string.cc b/broker/core/test/misc/string.cc index 947157ba219..cf18b6edf3f 100644 --- a/broker/core/test/misc/string.cc +++ b/broker/core/test/misc/string.cc @@ -23,6 +23,7 @@ #include #include "com/centreon/broker/misc/misc.hh" +#include "com/centreon/common/utf8.hh" using namespace com::centreon::broker::misc; @@ -56,201 +57,6 @@ TEST(StringBase64, Encode) { ASSERT_EQ(string::base64_encode("ABC"), "QUJD"); } -/* - * Given a string encoded in ISO-8859-15 and CP-1252 - * Then the check_string_utf8 function converts it to UTF-8. - */ -TEST(string_check_utf8, simple) { - std::string txt("L'acc\350s \340 l'h\364tel est encombr\351"); - ASSERT_EQ(string::check_string_utf8(txt), "L'accès à l'hôtel est encombré"); -} - -/* - * Given a string encoded in UTF-8 - * Then the check_string_utf8 function returns itself. - */ -TEST(string_check_utf8, utf8) { - std::string txt("L'accès à l'hôtel est encombré"); - ASSERT_EQ(string::check_string_utf8(txt), "L'accès à l'hôtel est encombré"); -} - -/* - * Given a string encoded in CP-1252 - * Then the check_string_utf8 function converts it to UTF-8. - */ -TEST(string_check_utf8, cp1252) { - std::string txt("Le ticket co\xfbte 12\x80\n"); - ASSERT_EQ(string::check_string_utf8(txt), "Le ticket coûte 12€\n"); -} - -/* - * Given a string encoded in ISO-8859-15 - * Then the check_string_utf8 function converts it to UTF-8. - */ -TEST(string_check_utf8, iso8859) { - std::string txt("Le ticket co\xfbte 12\xa4\n"); - ASSERT_EQ(string::check_string_utf8(txt), "Le ticket coûte 12€\n"); -} - -/* - * Given a string encoded in ISO-8859-15 - * Then the check_string_utf8 function converts it to UTF-8. - */ -TEST(string_check_utf8, iso8859_cpx) { - std::string txt("\xa4\xa6\xa8\xb4\xb8\xbc\xbd\xbe"); - ASSERT_EQ(string::check_string_utf8(txt), "€ŠšŽžŒœŸ"); -} - -/* - * Given a string encoded in CP-1252 - * Then the check_string_utf8 function converts it to UTF-8. - */ -TEST(string_check_utf8, cp1252_cpx) { - std::string txt("\x80\x95\x82\x89\x8a"); - ASSERT_EQ(string::check_string_utf8(txt), "€•‚‰Š"); -} - -/* - * Given a string badly encoded in CP-1252 - * Then the check_string_utf8 function converts it to UTF-8 and replaces bad - * characters into '_'. - */ -TEST(string_check_utf8, whatever_as_cp1252) { - std::string txt; - for (uint8_t c = 32; c < 255; c++) - if (c != 127) - txt.push_back(c); - std::string result( - " !\"#$%&'()*+,-./" - "0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`" - "abcdefghijklmnopqrstuvwxyz{|}~€_‚ƒ„…†‡ˆ‰Š‹Œ_Ž__‘’“”•–—˜™š›œ_" - "žŸ ¡¢£¤¥¦§¨©ª«¬­®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäå" - "æçèéêëìíîïðñòóôõö÷øùúûüýþ"); - ASSERT_EQ(string::check_string_utf8(txt), result); -} - -/* - * Given a string badly encoded in ISO-8859-15 - * Then the check_string_utf8 function converts it to UTF-8 and replaces bad - * characters into '_'. - */ -TEST(string_check_utf8, whatever_as_iso8859) { - /* Construction of a string that is not cp1252 so it should be considered as - * iso8859-15 */ - std::string txt; - for (uint8_t c = 32; c < 255; c++) { - if (c == 32) - txt.push_back(0x81); - if (c != 127) - txt.push_back(c); - } - std::string result( - "_ " - "!\"#$%&'()*+,-./" - "0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`" - "abcdefghijklmnopqrstuvwxyz{|}~_________________________________" - "¡¢£€¥Š§š©ª«¬­®¯°±²³Žµ¶·ž¹º»ŒœŸ¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçè" - "éêëìíîïðñòóôõö÷øùúûüýþ"); - ASSERT_EQ(string::check_string_utf8(txt), result); -} - -/* - * In case of a string containing multiple encoding, the resulting string should - * be an UTF-8 string. Here we have a string beginning with UTF-8 and finishing - * with cp1252. The resulting string is good and is UTF-8 only encoded. - */ -TEST(string_check_utf8, utf8_and_cp1252) { - std::string txt( - "\xc3\xa9\xc3\xa7\xc3\xa8\xc3\xa0\xc3\xb9\xc3\xaf\xc3\xab\x7e\x23\x0a\xe9" - "\xe7\xe8\xe0\xf9\xef\xeb\x7e\x23\x0a"); - std::string result("éçèàùïë~#\néçèàùïë~#\n"); - ASSERT_EQ(string::check_string_utf8(txt), result); -} - -/* A check coming from windows with characters from the cmd console */ -TEST(string_check_utf8, strange_string) { - std::string txt( - "WARNING - [Triggered by _ItemCount>0] - 1 event(s) of Severity Level: " - "\"Error\", were recorded in the last 24 hours from the Application " - "Event Log. (List is on next line. Fields shown are - " - "Logfile:TimeGenerated:EventId:EventCode:SeverityLevel:Type:SourceName:" - "Message)|'Event " - "Count'=1;0;50;\nApplication:20200806000001.000000-000:3221243278:17806:" - "Erreur:MSSQLSERVER:╔chec de la nÚgociation SSPI avec le code " - "d'erreurá0x8009030c lors de l'Útablissement d'une connexion avec une " - "sÚcuritÚ intÚgrÚeá; la connexion a ÚtÚ fermÚe. [CLIENTá: X.X.X.X]"); - ASSERT_EQ(string::check_string_utf8(txt), txt); -} - -/* A check coming from windows with characters from the cmd console */ -TEST(string_check_utf8, chinese) { - std::string txt("超级杀手死亡检查"); - ASSERT_EQ(string::check_string_utf8(txt), txt); -} - -/* A check coming from windows with characters from the cmd console */ -TEST(string_check_utf8, vietnam) { - std::string txt( - "looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" - "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong " - "chinese 告警数量 output puté! | '告警数量'=42\navé dé long ouput oçi " - "还有中国人! Hái yǒu zhòng guó rén!"); - ASSERT_EQ(string::check_string_utf8(txt), txt); -} - -TEST(truncate, nominal1) { - std::string str("foobar"); - ASSERT_EQ(string::truncate(str, 3), "foo"); -} - -TEST(truncate, nominal2) { - std::string str("foobar"); - ASSERT_EQ(string::truncate(str, 0), ""); -} - -TEST(truncate, nominal3) { - std::string str("foobar 超级杀手死亡检查"); - ASSERT_EQ(string::truncate(str, 1000), "foobar 超级杀手死亡检查"); -} - -TEST(truncate, utf8_1) { - std::string str("告警数量"); - for (size_t i = 0; i <= str.size(); i++) { - fmt::string_view tmp(str); - fmt::string_view res(string::truncate(tmp, i)); - std::string tmp1( - string::check_string_utf8(std::string(res.data(), res.size()))); - ASSERT_EQ(res, tmp1); - } -} - -TEST(adjust_size_utf8, nominal1) { - std::string str("foobar"); - ASSERT_EQ(fmt::string_view(str.data(), string::adjust_size_utf8(str, 3)), - fmt::string_view("foo")); -} - -TEST(adjust_size_utf8, nominal2) { - std::string str("foobar"); - ASSERT_EQ(fmt::string_view(str.data(), string::adjust_size_utf8(str, 0)), ""); -} - -TEST(adjust_size_utf8, nominal3) { - std::string str("foobar 超级杀手死亡检查"); - ASSERT_EQ(fmt::string_view(str.data(), string::adjust_size_utf8(str, 1000)), - str); -} - -TEST(adjust_size_utf8, utf8_1) { - std::string str("告警数量"); - for (size_t i = 0; i <= str.size(); i++) { - fmt::string_view sv(str.data(), string::adjust_size_utf8(str, i)); - std::string tmp(string::check_string_utf8( - std::string(sv.data(), sv.data() + sv.size()))); - ASSERT_EQ(sv.size(), tmp.size()); - } -} - TEST(escape, simple) { ASSERT_EQ("Hello", string::escape("Hello", 10)); ASSERT_EQ("Hello", string::escape("Hello", 5)); @@ -261,7 +67,7 @@ TEST(escape, utf8) { std::string str("告'警'数\\量"); std::string res("告\\'警\\'数\\\\量"); std::string res1(res); - res1.resize(string::adjust_size_utf8(res, 10)); + res1.resize(com::centreon::common::adjust_size_utf8(res, 10)); ASSERT_EQ(res, string::escape(str, 20)); ASSERT_EQ(res1, string::escape(str, 10)); } diff --git a/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh b/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh index 616f2afe377..d9f04f48fe2 100644 --- a/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh +++ b/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh @@ -27,6 +27,7 @@ #include "com/centreon/broker/neb/host_group.hh" #include "com/centreon/broker/neb/host_group_member.hh" #include "com/centreon/broker/neb/instance.hh" +#include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/service.hh" #include "com/centreon/broker/neb/service_group.hh" #include "com/centreon/broker/neb/service_group_member.hh" @@ -42,14 +43,26 @@ class macro_cache { std::shared_ptr _cache; absl::flat_hash_map> _instances; absl::flat_hash_map> _hosts; - absl::flat_hash_map> _host_groups; + /* The host groups cache stores also a set with the pollers telling they need + * the cache. So if no more poller needs a host group, we can remove it from + * the cache. */ + absl::flat_hash_map, + absl::flat_hash_set>> + _host_groups; absl::btree_map, std::shared_ptr> _host_group_members; absl::flat_hash_map, std::shared_ptr> _custom_vars; absl::flat_hash_map, std::shared_ptr> _services; - absl::flat_hash_map> _service_groups; + /* The service groups cache stores also a set with the pollers telling they + * need the cache. So if no more poller needs a service group, we can remove + * it from the cache. */ + absl::flat_hash_map, + absl::flat_hash_set>> + _service_groups; absl::btree_map, std::shared_ptr> _service_group_members; diff --git a/broker/lua/src/broker_utils.cc b/broker/lua/src/broker_utils.cc index e5a7f9792af..1040267507b 100644 --- a/broker/lua/src/broker_utils.cc +++ b/broker/lua/src/broker_utils.cc @@ -24,7 +24,7 @@ #include "absl/strings/string_view.h" #include "com/centreon/broker/config/applier/state.hh" -#include +#include #include #include #include @@ -40,6 +40,7 @@ #include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/common/hex_dump.hh" #include "com/centreon/common/perfdata.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -654,10 +655,10 @@ static int l_broker_parse_perfdata(lua_State* L) { com::centreon::common::perfdata::parse_perfdata(0, 0, perf_data, logger)}; lua_createtable(L, 0, pds.size()); for (auto& pd : pds) { - pd.resize_name(misc::string::adjust_size_utf8( + pd.resize_name(com::centreon::common::adjust_size_utf8( pd.name(), get_centreon_storage_metrics_col_size( centreon_storage_metrics_metric_name))); - pd.resize_unit(misc::string::adjust_size_utf8( + pd.resize_unit(com::centreon::common::adjust_size_utf8( pd.unit(), get_centreon_storage_metrics_col_size( centreon_storage_metrics_unit_name))); @@ -809,6 +810,31 @@ static int l_broker_stat(lua_State* L) { } } +static void md5_message(const unsigned char* message, + size_t message_len, + unsigned char** digest, + unsigned int* digest_len) { + EVP_MD_CTX* mdctx; + auto logger = log_v2::instance().get(log_v2::LUA); + if ((mdctx = EVP_MD_CTX_new()) == nullptr) { + logger->error("lua: fail to call MD5 (EVP_MD_CTX_new call)"); + } + if (1 != EVP_DigestInit_ex(mdctx, EVP_md5(), nullptr)) { + logger->error("lua: fail to call MD5 (EVP_DigestInit_ex call)"); + } + if (1 != EVP_DigestUpdate(mdctx, message, message_len)) { + logger->error("lua: fail to call MD5 (EVP_DigestUpdate call)"); + } + if ((*digest = (unsigned char*)OPENSSL_malloc(EVP_MD_size(EVP_md5()))) == + nullptr) { + logger->error("lua: fail to call MD5 (OPENSSL_malloc call)"); + } + if (1 != EVP_DigestFinal_ex(mdctx, *digest, digest_len)) { + logger->error("lua: fail to call MD5 (EVP_DigestFinal_ex call)"); + } + EVP_MD_CTX_free(mdctx); +} + static int l_broker_md5(lua_State* L) { auto digit = [](unsigned char d) -> char { if (d < 10) @@ -819,11 +845,12 @@ static int l_broker_md5(lua_State* L) { size_t len; const unsigned char* str = reinterpret_cast(lua_tolstring(L, -1, &len)); - unsigned char md5[MD5_DIGEST_LENGTH]; - MD5(str, len, md5); - char result[2 * MD5_DIGEST_LENGTH + 1]; + unsigned char* md5; + uint32_t md5_len; + md5_message(str, len, &md5, &md5_len); + char result[2 * md5_len + 1]; char* tmp = result; - for (int i = 0; i < MD5_DIGEST_LENGTH; i++) { + for (uint32_t i = 0; i < md5_len; i++) { *tmp = digit(md5[i] >> 4); ++tmp; *tmp = digit(md5[i] & 0xf); @@ -831,6 +858,7 @@ static int l_broker_md5(lua_State* L) { } *tmp = 0; lua_pushstring(L, result); + OPENSSL_free(md5); return 1; } diff --git a/broker/lua/src/macro_cache.cc b/broker/lua/src/macro_cache.cc index 0c848b5d092..d043a51c218 100644 --- a/broker/lua/src/macro_cache.cc +++ b/broker/lua/src/macro_cache.cc @@ -17,11 +17,14 @@ */ #include "com/centreon/broker/lua/macro_cache.hh" +#include +#include #include "bbdo/bam/dimension_ba_bv_relation_event.hh" #include "bbdo/bam/dimension_ba_event.hh" #include "bbdo/bam/dimension_bv_event.hh" #include "bbdo/storage/index_mapping.hh" #include "bbdo/storage/metric_mapping.hh" +#include "com/centreon/broker/neb/internal.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -367,17 +370,15 @@ macro_cache::get_host_group_members() const { * * @return The name of the host group. */ -std::string const& macro_cache::get_host_group_name(uint64_t id) const { - auto const found = _host_groups.find(id); +const std::string& macro_cache::get_host_group_name(uint64_t id) const { + const auto found = _host_groups.find(id); - if (found == _host_groups.end()) + if (found == _host_groups.end()) { + _cache->logger()->error("lua: could not find information on host group {}", + id); throw msg_fmt("lua: could not find information on host group {}", id); - if (found->second->type() == neb::host_group::static_type()) - return std::static_pointer_cast(found->second)->name; - else - return std::static_pointer_cast(found->second) - ->obj() - .name(); + } + return found->second.first->obj().name(); } /** @@ -428,14 +429,12 @@ macro_cache::get_service_group_members() const { std::string const& macro_cache::get_service_group_name(uint64_t id) const { auto found = _service_groups.find(id); - if (found == _service_groups.end()) + if (found == _service_groups.end()) { + _cache->logger()->error( + "lua: could not find information on service group {}", id); throw msg_fmt("lua: could not find information on service group {}", id); - if (found->second->type() == neb::service_group::static_type()) - return std::static_pointer_cast(found->second)->name; - else - return std::static_pointer_cast(found->second) - ->obj() - .name(); + } + return found->second.first->obj().name(); } /** @@ -830,14 +829,42 @@ void macro_cache::_process_pb_adaptive_host( * @param data The event. */ void macro_cache::_process_host_group(std::shared_ptr const& data) { - std::shared_ptr const& hg = + const std::shared_ptr& hg = std::static_pointer_cast(data); SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing host group '{}' of id {} enabled: {}", hg->name, hg->id, hg->enabled); - if (hg->enabled) - _host_groups[hg->id] = data; - // erasure is desactivated because a group cen be owned by several pollers + if (hg->enabled) { + auto found = _host_groups.find(hg->id); + if (found != _host_groups.end()) { + /* here, we complete the set of pollers */ + found->second.second.insert(hg->poller_id); + found->second.first->mut_obj().set_name(hg->name); + } else { + /* Here, we add the hostgroup and the first poller that needs it */ + absl::flat_hash_set pollers{hg->poller_id}; + auto pb_hg = std::make_shared(); + auto& obj = pb_hg->mut_obj(); + obj.set_enabled(hg->enabled); + obj.set_hostgroup_id(hg->id); + obj.set_name(hg->name); + obj.set_poller_id(hg->poller_id); + _host_groups[hg->id] = std::make_pair(std::move(pb_hg), pollers); + } + } else { + /* We check that no more pollers need this host group. So if the set is + * empty, we can also remove the host group. */ + auto found = _host_groups.find(hg->id); + if (found != _host_groups.end()) { + auto f = found->second.second.find(hg->poller_id); + if (f != found->second.second.end()) { + found->second.second.erase(f); + if (found->second.second.empty()) { + _host_groups.erase(found); + } + } + } + } } /** @@ -846,15 +873,39 @@ void macro_cache::_process_host_group(std::shared_ptr const& data) { * @param data The event. */ void macro_cache::_process_pb_host_group( - std::shared_ptr const& data) { - const HostGroup& hg = - std::static_pointer_cast(data)->obj(); + const std::shared_ptr& data) { + auto pb_hg = std::static_pointer_cast(data); + const HostGroup& hg = pb_hg->obj(); SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing pb host group '{}' of id {}, enabled {}", hg.name(), hg.hostgroup_id(), hg.enabled()); - if (hg.enabled()) - _host_groups[hg.hostgroup_id()] = data; - // erasure is desactivated because a group cen be owned by several pollers + if (hg.enabled()) { + auto found = _host_groups.find(hg.hostgroup_id()); + if (found != _host_groups.end()) { + found->second.second.insert(hg.poller_id()); + HostGroup& current_hg = + std::static_pointer_cast(found->second.first) + ->mut_obj(); + current_hg.set_name(hg.name()); + } else { + absl::flat_hash_set pollers{hg.poller_id()}; + _host_groups[hg.hostgroup_id()] = + std::make_pair(std::move(pb_hg), pollers); + } + } else { + /* We check that no more pollers need this host group. So if the set is + * empty, we can also remove the host group. */ + auto found = _host_groups.find(hg.hostgroup_id()); + if (found != _host_groups.end()) { + auto f = found->second.second.find(hg.poller_id()); + if (f != found->second.second.end()) { + found->second.second.erase(f); + if (found->second.second.empty()) { + _host_groups.erase(found); + } + } + } + } } /** @@ -1113,9 +1164,37 @@ void macro_cache::_process_service_group( SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing service group '{}' of id {}", sg->name, sg->id); - if (sg->enabled) - _service_groups[sg->id] = data; - // erasure is desactivated because a group cen be owned by several pollers + if (sg->enabled) { + auto found = _service_groups.find(sg->id); + if (found != _service_groups.end()) { + /* here, we complete the set of pollers */ + found->second.second.insert(sg->poller_id); + found->second.first->mut_obj().set_name(sg->name); + } else { + /* Here, we add the servicegroup and the first poller that needs it */ + absl::flat_hash_set pollers{sg->poller_id}; + auto pb_sg = std::make_shared(); + auto& obj = pb_sg->mut_obj(); + obj.set_servicegroup_id(sg->id); + obj.set_enabled(sg->enabled); + obj.set_name(sg->name); + obj.set_poller_id(sg->poller_id); + _service_groups[sg->id] = std::make_pair(std::move(pb_sg), pollers); + } + } else { + /* We check that no more pollers need this service group. So if the set is + * empty, we can also remove the service group. */ + auto found = _service_groups.find(sg->id); + if (found != _service_groups.end()) { + auto f = found->second.second.find(sg->poller_id); + if (f != found->second.second.end()) { + found->second.second.erase(f); + if (found->second.second.empty()) { + _service_groups.erase(found); + } + } + } + } } /** @@ -1124,15 +1203,38 @@ void macro_cache::_process_service_group( * @param sg The event. */ void macro_cache::_process_pb_service_group( - std::shared_ptr const& data) { - const ServiceGroup& sg = - std::static_pointer_cast(data)->obj(); + const std::shared_ptr& data) { + auto pb_sg = std::static_pointer_cast(data); + const ServiceGroup& sg = pb_sg->obj(); SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing pb service group '{}' of id {}", sg.name(), sg.servicegroup_id()); - if (sg.enabled()) - _service_groups[sg.servicegroup_id()] = data; - // erasure is desactivated because a group cen be owned by several pollers + if (sg.enabled()) { + auto found = _service_groups.find(sg.servicegroup_id()); + if (found != _service_groups.end()) { + found->second.second.insert(sg.poller_id()); + ServiceGroup& current_sg = found->second.first->mut_obj(); + current_sg.set_name(sg.name()); + } else { + /* Here, we add the servicegroup and the first poller that needs it */ + absl::flat_hash_set pollers{sg.poller_id()}; + _service_groups[sg.servicegroup_id()] = + std::make_pair(std::move(pb_sg), pollers); + } + } else { + /* We check that no more pollers need this service group. So if the set is + * empty, we can also remove the service group. */ + auto found = _service_groups.find(sg.servicegroup_id()); + if (found != _service_groups.end()) { + auto f = found->second.second.find(sg.poller_id()); + if (f != found->second.second.end()) { + found->second.second.erase(f); + if (found->second.second.empty()) { + _service_groups.erase(found); + } + } + } + } } /** @@ -1166,12 +1268,12 @@ void macro_cache::_process_pb_service_group_member( std::shared_ptr const& data) { const ServiceGroupMember& sgm = std::static_pointer_cast(data)->obj(); - SPDLOG_LOGGER_DEBUG( - _cache->logger(), - "lua: processing pb service group member (group_name: {}, group_id: {}, " - "host_id: {}, service_id: {} enabled: {}", - sgm.name(), sgm.servicegroup_id(), sgm.host_id(), sgm.service_id(), - sgm.enabled()); + SPDLOG_LOGGER_DEBUG(_cache->logger(), + "lua: processing pb service group member (group_name: " + "{}, group_id: {}, " + "host_id: {}, service_id: {} enabled: {}", + sgm.name(), sgm.servicegroup_id(), sgm.host_id(), + sgm.service_id(), sgm.enabled()); if (sgm.enabled()) _service_group_members[std::make_tuple(sgm.host_id(), sgm.service_id(), sgm.servicegroup_id())] = data; @@ -1290,10 +1392,10 @@ void macro_cache::_process_dimension_ba_bv_relation_event( } else { auto const& rel = std::static_pointer_cast(data); - SPDLOG_LOGGER_DEBUG( - _cache->logger(), - "lua: processing dimension ba bv relation event (ba_id: {}, bv_id: {})", - rel->ba_id, rel->bv_id); + SPDLOG_LOGGER_DEBUG(_cache->logger(), + "lua: processing dimension ba bv relation event " + "(ba_id: {}, bv_id: {})", + rel->ba_id, rel->bv_id); auto pb_data(std::make_shared()); pb_data->mut_obj().set_ba_id(rel->ba_id); pb_data->mut_obj().set_bv_id(rel->bv_id); @@ -1372,11 +1474,11 @@ void macro_cache::_process_custom_variable( std::shared_ptr const& data) { auto const& cv = std::static_pointer_cast(data); if (cv->name == "CRITICALITY_LEVEL") { - SPDLOG_LOGGER_DEBUG( - _cache->logger(), - "lua: processing custom variable representing a criticality level for " - "host_id {} and service_id {} and level {}", - cv->host_id, cv->service_id, cv->value); + SPDLOG_LOGGER_DEBUG(_cache->logger(), + "lua: processing custom variable representing a " + "criticality level for " + "host_id {} and service_id {} and level {}", + cv->host_id, cv->service_id, cv->value); int32_t value = std::atoi(cv->value.c_str()); if (value) _custom_vars[{cv->host_id, cv->service_id}] = cv; @@ -1427,8 +1529,13 @@ void macro_cache::_save_to_disk() { for (auto it(_hosts.begin()), end(_hosts.end()); it != end; ++it) _cache->add(it->second); - for (auto it(_host_groups.begin()), end(_host_groups.end()); it != end; ++it) - _cache->add(it->second); + for (auto it = _host_groups.begin(), end = _host_groups.end(); it != end; + ++it) { + for (auto poller_id : it->second.second) { + it->second.first->mut_obj().set_poller_id(poller_id); + _cache->add(it->second.first); + } + } for (auto it(_host_group_members.begin()), end(_host_group_members.end()); it != end; ++it) @@ -1437,9 +1544,13 @@ void macro_cache::_save_to_disk() { for (auto it(_services.begin()), end(_services.end()); it != end; ++it) _cache->add(it->second); - for (auto it(_service_groups.begin()), end(_service_groups.end()); it != end; - ++it) - _cache->add(it->second); + for (auto it = _service_groups.begin(), end = _service_groups.end(); + it != end; ++it) { + for (auto poller_id : it->second.second) { + it->second.first->mut_obj().set_poller_id(poller_id); + _cache->add(it->second.first); + } + } for (auto it = _service_group_members.begin(), end = _service_group_members.end(); diff --git a/broker/lua/test/lua.cc b/broker/lua/test/lua.cc index d002cf5956d..8268c693b3e 100644 --- a/broker/lua/test/lua.cc +++ b/broker/lua/test/lua.cc @@ -1442,6 +1442,7 @@ TEST_F(LuaTest, ServiceGroupCacheTestName) { auto sg{std::make_shared()}; sg->id = 28; sg->name = "centreon"; + sg->enabled = true; _cache->write(sg); CreateScript(filename, diff --git a/broker/neb/CMakeLists.txt b/broker/neb/CMakeLists.txt index 50fa3d67cca..8fd3e496809 100644 --- a/broker/neb/CMakeLists.txt +++ b/broker/neb/CMakeLists.txt @@ -31,13 +31,11 @@ set(NEB_SOURCES ${SRC_DIR}/comment.cc ${SRC_DIR}/custom_variable.cc ${SRC_DIR}/custom_variable_status.cc - ${SRC_DIR}/dependency.cc ${SRC_DIR}/downtime.cc ${SRC_DIR}/group.cc ${SRC_DIR}/group_member.cc ${SRC_DIR}/host.cc ${SRC_DIR}/host_check.cc - ${SRC_DIR}/host_dependency.cc ${SRC_DIR}/host_group.cc ${SRC_DIR}/host_group_member.cc ${SRC_DIR}/host_parent.cc @@ -51,7 +49,6 @@ set(NEB_SOURCES ${SRC_DIR}/responsive_instance.cc ${SRC_DIR}/service.cc ${SRC_DIR}/service_check.cc - ${SRC_DIR}/service_dependency.cc ${SRC_DIR}/service_group.cc ${SRC_DIR}/service_group_member.cc ${SRC_DIR}/service_status.cc @@ -61,14 +58,12 @@ set(NEB_SOURCES ${INC_DIR}/com/centreon/broker/neb/comment.hh ${INC_DIR}/com/centreon/broker/neb/custom_variable.hh ${INC_DIR}/com/centreon/broker/neb/custom_variable_status.hh - ${INC_DIR}/com/centreon/broker/neb/dependency.hh ${INC_DIR}/com/centreon/broker/neb/downtime.hh ${INC_DIR}/com/centreon/broker/neb/events.hh ${INC_DIR}/com/centreon/broker/neb/group.hh ${INC_DIR}/com/centreon/broker/neb/group_member.hh ${INC_DIR}/com/centreon/broker/neb/host.hh ${INC_DIR}/com/centreon/broker/neb/host_check.hh - ${INC_DIR}/com/centreon/broker/neb/host_dependency.hh ${INC_DIR}/com/centreon/broker/neb/host_group.hh ${INC_DIR}/com/centreon/broker/neb/host_group_member.hh ${INC_DIR}/com/centreon/broker/neb/host_parent.hh @@ -83,7 +78,6 @@ set(NEB_SOURCES ${INC_DIR}/com/centreon/broker/neb/responsive_instance.hh ${INC_DIR}/com/centreon/broker/neb/service.hh ${INC_DIR}/com/centreon/broker/neb/service_check.hh - ${INC_DIR}/com/centreon/broker/neb/service_dependency.hh ${INC_DIR}/com/centreon/broker/neb/service_group.hh ${INC_DIR}/com/centreon/broker/neb/service_group_member.hh ${INC_DIR}/com/centreon/broker/neb/service_status.hh @@ -160,12 +154,9 @@ if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") "-Wl,--whole-archive" rokerbase multiplexing - "-Wl,--no-whole-archive" - nlohmann_json::nlohmann_json - spdlog::spdlog) + "-Wl,--no-whole-archive") else() - target_link_libraries("${CBMOD}" -L${PROTOBUF_LIB_DIR} "rokerbase" - nlohmann_json::nlohmann_json spdlog::spdlog) + target_link_libraries("${CBMOD}" -L${PROTOBUF_LIB_DIR} "rokerbase") endif() set_target_properties("${CBMOD}" PROPERTIES PREFIX "") @@ -183,7 +174,6 @@ if(WITH_TESTING) ${TEST_DIR}/custom_variable_status.cc ${TEST_DIR}/host.cc ${TEST_DIR}/host_check.cc - ${TEST_DIR}/host_dependency.cc ${TEST_DIR}/host_parent.cc ${TEST_DIR}/host_status.cc ${TEST_DIR}/instance.cc @@ -193,7 +183,6 @@ if(WITH_TESTING) ${TEST_DIR}/randomize.hh ${TEST_DIR}/service.cc ${TEST_DIR}/service_check.cc - ${TEST_DIR}/service_dependency.cc ${TEST_DIR}/service_status.cc ${TEST_DIR}/set_log_data.cc PARENT_SCOPE) diff --git a/broker/neb/inc/com/centreon/broker/neb/callbacks.hh b/broker/neb/inc/com/centreon/broker/neb/callbacks.hh index 4b226171c5b..f9ee10a1cb1 100644 --- a/broker/neb/inc/com/centreon/broker/neb/callbacks.hh +++ b/broker/neb/inc/com/centreon/broker/neb/callbacks.hh @@ -34,8 +34,6 @@ int callback_comment(int callback_type, void* data); int callback_pb_comment(int callback_type, void* data); int callback_custom_variable(int callback_type, void* data); int callback_pb_custom_variable(int callback_type, void* data); -int callback_dependency(int callback_type, void* data); -int callback_pb_dependency(int callback_type, void* data); int callback_downtime(int callback_type, void* data); int callback_pb_downtime(int callback_type, void* data); int callback_external_command(int callback_type, void* data); diff --git a/broker/neb/inc/com/centreon/broker/neb/dependency.hh b/broker/neb/inc/com/centreon/broker/neb/dependency.hh deleted file mode 100644 index 8b165e7a3ee..00000000000 --- a/broker/neb/inc/com/centreon/broker/neb/dependency.hh +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2009-2013,2015 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#ifndef CCB_NEB_DEPENDENCY_HH -#define CCB_NEB_DEPENDENCY_HH - -#include "com/centreon/broker/io/data.hh" - -namespace com::centreon::broker { - -namespace neb { -/** - * @class dependency dependency.hh "com/centreon/broker/neb/dependency.hh" - * @brief Dependency relationship. - * - * Defines a dependency between two objects. - * - * @see host_dependency - * @see service_dependency - */ -class dependency : public io::data { - public: - dependency() = delete; - dependency(uint32_t type); - dependency(dependency const& dep); - virtual ~dependency(); - dependency& operator=(dependency const& dep); - - std::string dependency_period; - uint32_t dependent_host_id; - bool enabled; - std::string execution_failure_options; - uint32_t host_id; - bool inherits_parent; - std::string notification_failure_options; - - private: - void _internal_copy(dependency const& dep); -}; -} // namespace neb - -} - -#endif // !CCB_NEB_DEPENDENCY_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/events.hh b/broker/neb/inc/com/centreon/broker/neb/events.hh index 25b292c5e75..d364d16fc23 100644 --- a/broker/neb/inc/com/centreon/broker/neb/events.hh +++ b/broker/neb/inc/com/centreon/broker/neb/events.hh @@ -26,7 +26,6 @@ #include "com/centreon/broker/neb/downtime.hh" #include "com/centreon/broker/neb/host.hh" #include "com/centreon/broker/neb/host_check.hh" -#include "com/centreon/broker/neb/host_dependency.hh" #include "com/centreon/broker/neb/host_group.hh" #include "com/centreon/broker/neb/host_group_member.hh" #include "com/centreon/broker/neb/host_parent.hh" @@ -38,7 +37,6 @@ #include "com/centreon/broker/neb/responsive_instance.hh" #include "com/centreon/broker/neb/service.hh" #include "com/centreon/broker/neb/service_check.hh" -#include "com/centreon/broker/neb/service_dependency.hh" #include "com/centreon/broker/neb/service_group.hh" #include "com/centreon/broker/neb/service_group_member.hh" #include "com/centreon/broker/neb/service_status.hh" diff --git a/broker/neb/inc/com/centreon/broker/neb/host_dependency.hh b/broker/neb/inc/com/centreon/broker/neb/host_dependency.hh deleted file mode 100644 index 5ab87f8131c..00000000000 --- a/broker/neb/inc/com/centreon/broker/neb/host_dependency.hh +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright 2009-2012 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#ifndef CCB_NEB_HOST_DEPENDENCY_HH -#define CCB_NEB_HOST_DEPENDENCY_HH - -#include "com/centreon/broker/io/event_info.hh" -#include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/mapping/entry.hh" -#include "com/centreon/broker/neb/dependency.hh" -#include "com/centreon/broker/neb/internal.hh" - -namespace com::centreon::broker { - -namespace neb { -/** - * @class host_dependency host_dependency.hh - * "com/centreon/broker/neb/host_dependency.hh" - * @brief Host dependency relationship. - * - * Defines a dependency between two hosts. - */ -class host_dependency : public dependency { - public: - host_dependency(); - host_dependency(host_dependency const& other); - ~host_dependency(); - host_dependency& operator=(host_dependency const& other); - constexpr static uint32_t static_type() { - return io::events::data_type::value; - } - - static mapping::entry const entries[]; - static io::event_info::event_operations const operations; -}; -} // namespace neb - -} - -#endif // !CCB_NEB_HOST_DEPENDENCY_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/initial.hh b/broker/neb/inc/com/centreon/broker/neb/initial.hh index 4556e4b699a..730beaff290 100644 --- a/broker/neb/inc/com/centreon/broker/neb/initial.hh +++ b/broker/neb/inc/com/centreon/broker/neb/initial.hh @@ -19,15 +19,9 @@ #ifndef CCB_NEB_INITIAL_HH_ #define CCB_NEB_INITIAL_HH_ -namespace com { -namespace centreon { -namespace broker { -namespace neb { +namespace com::centreon::broker::neb { void send_initial_configuration(); void send_initial_pb_configuration(); -} // namespace neb -} // namespace broker -} // namespace centreon -} // namespace com +} // namespace com::centreon::broker::neb #endif /* !CCB_NEB_INITIAL_HH_ */ diff --git a/broker/neb/inc/com/centreon/broker/neb/internal.hh b/broker/neb/inc/com/centreon/broker/neb/internal.hh index b3fba5da256..c12f0b660d3 100644 --- a/broker/neb/inc/com/centreon/broker/neb/internal.hh +++ b/broker/neb/inc/com/centreon/broker/neb/internal.hh @@ -99,14 +99,6 @@ using pb_acknowledgement = io::protobuf; -using pb_host_dependency = - io::protobuf; - -using pb_service_dependency = - io::protobuf; - using pb_host_group = io::protobuf; diff --git a/broker/neb/inc/com/centreon/broker/neb/service_dependency.hh b/broker/neb/inc/com/centreon/broker/neb/service_dependency.hh deleted file mode 100644 index adf3e7665ae..00000000000 --- a/broker/neb/inc/com/centreon/broker/neb/service_dependency.hh +++ /dev/null @@ -1,61 +0,0 @@ -/* -** Copyright 2009-2012 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ - -#ifndef CCB_NEB_SERVICE_DEPENDENCY_HH -#define CCB_NEB_SERVICE_DEPENDENCY_HH - -#include "com/centreon/broker/io/event_info.hh" -#include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/mapping/entry.hh" -#include "com/centreon/broker/neb/dependency.hh" -#include "com/centreon/broker/neb/internal.hh" - -namespace com::centreon::broker { - -namespace neb { -/** - * @class service_dependency service_dependency.hh - * "com/centreon/broker/neb/service_dependency.hh" - * @brief Service dependency relationship. - * - * Defines a dependency between two services. - */ -class service_dependency : public dependency { - public: - service_dependency(); - service_dependency(service_dependency const& other); - ~service_dependency(); - service_dependency& operator=(service_dependency const& other); - constexpr static uint32_t static_type() { - return io::events::data_type::value; - } - - uint32_t dependent_service_id; - uint32_t service_id; - - static mapping::entry const entries[]; - static io::event_info::event_operations const operations; - - private: - void _internal_copy(service_dependency const& other); -}; -} // namespace neb - -} - -#endif // !CCB_NEB_SERVICE_DEPENDENCY_HH diff --git a/broker/neb/src/broker.cc b/broker/neb/src/broker.cc index d8645e48670..3a7c8e2f291 100644 --- a/broker/neb/src/broker.cc +++ b/broker/neb/src/broker.cc @@ -94,10 +94,6 @@ void broker_module_init(void const* arg) { e.register_event(make_type(io::neb, neb::de_host_check), "host_check", &neb::host_check::operations, neb::host_check::entries, "hosts"); - e.register_event(make_type(io::neb, neb::de_host_dependency), - "host_dependency", &neb::host_dependency::operations, - neb::host_dependency::entries, - "hosts_hosts_dependencies"); e.register_event(make_type(io::neb, neb::de_host), "host", &neb::host::operations, neb::host::entries, "hosts"); e.register_event(make_type(io::neb, neb::de_host_group), "host_group", @@ -124,10 +120,6 @@ void broker_module_init(void const* arg) { e.register_event(make_type(io::neb, neb::de_service_check), "service_check", &neb::service_check::operations, neb::service_check::entries, "services"); - e.register_event( - make_type(io::neb, neb::de_service_dependency), "service_dependency", - &neb::service_dependency::operations, - neb::service_dependency::entries, "services_services_dependencies"); e.register_event(make_type(io::neb, neb::de_service), "service", &neb::service::operations, neb::service::entries, "services"); @@ -211,13 +203,6 @@ void broker_module_init(void const* arg) { e.register_event(make_type(io::neb, neb::de_pb_acknowledgement), "Acknowledgement", &neb::pb_acknowledgement::operations, "acknowledgements"); - e.register_event(neb::pb_host_dependency::static_type(), "HostDependency", - &neb::pb_host_dependency::operations, - "hosts_hosts_dependencies"); - e.register_event(neb::pb_service_dependency::static_type(), - "ServiceDependency", - &neb::pb_service_dependency::operations, - "services_services_dependencies"); e.register_event(neb::pb_host_group::static_type(), "HostGroup", &neb::pb_host_group::operations, "hostgroups"); e.register_event( diff --git a/broker/neb/src/callbacks.cc b/broker/neb/src/callbacks.cc index eca357ed58e..243e31b106f 100644 --- a/broker/neb/src/callbacks.cc +++ b/broker/neb/src/callbacks.cc @@ -28,23 +28,21 @@ #include "com/centreon/broker/config/applier/state.hh" #include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/config/state.hh" -#include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/neb/callback.hh" #include "com/centreon/broker/neb/events.hh" #include "com/centreon/broker/neb/initial.hh" #include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/set_log_data.hh" #include "com/centreon/common/time.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/engine/anomalydetection.hh" #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/comment.hh" #include "com/centreon/engine/events/loop.hh" #include "com/centreon/engine/globals.hh" -#include "com/centreon/engine/hostdependency.hh" #include "com/centreon/engine/hostgroup.hh" #include "com/centreon/engine/nebcallbacks.hh" #include "com/centreon/engine/nebstructs.hh" -#include "com/centreon/engine/servicedependency.hh" #include "com/centreon/engine/servicegroup.hh" #include "com/centreon/engine/severity.hh" #include "com/centreon/engine/tag.hh" @@ -53,7 +51,6 @@ using namespace com::centreon::broker; using namespace com::centreon::exceptions; -using com::centreon::common::log_v2::log_v2; // List of Nagios modules. extern nebmodule* neb_module_list; @@ -120,7 +117,6 @@ static struct { uint32_t macro; int (*callback)(int, void*); } const gl_engine_callbacks[] = { - {NEBCALLBACK_ADAPTIVE_DEPENDENCY_DATA, &neb::callback_dependency}, {NEBCALLBACK_ADAPTIVE_HOST_DATA, &neb::callback_host}, {NEBCALLBACK_ADAPTIVE_SERVICE_DATA, &neb::callback_service}, {NEBCALLBACK_CUSTOM_VARIABLE_DATA, &neb::callback_custom_variable}, @@ -133,7 +129,6 @@ static struct { uint32_t macro; int (*callback)(int, void*); } const gl_pb_engine_callbacks[] = { - {NEBCALLBACK_ADAPTIVE_DEPENDENCY_DATA, &neb::callback_pb_dependency}, {NEBCALLBACK_ADAPTIVE_HOST_DATA, &neb::callback_pb_host}, {NEBCALLBACK_ADAPTIVE_SERVICE_DATA, &neb::callback_pb_service}, {NEBCALLBACK_CUSTOM_VARIABLE_DATA, &neb::callback_pb_custom_variable}, @@ -165,7 +160,8 @@ char const* get_program_version(); */ int neb::callback_acknowledgement(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating acknowledgement event"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating acknowledgement event"); (void)callback_type; try { @@ -177,9 +173,9 @@ int neb::callback_acknowledgement(int callback_type, void* data) { ack_data = static_cast(data); ack->acknowledgement_type = short(ack_data->acknowledgement_type); if (ack_data->author_name) - ack->author = misc::string::check_string_utf8(ack_data->author_name); + ack->author = common::check_string_utf8(ack_data->author_name); if (ack_data->comment_data) - ack->comment = misc::string::check_string_utf8(ack_data->comment_data); + ack->comment = common::check_string_utf8(ack_data->comment_data); ack->entry_time = time(nullptr); if (!ack_data->host_id) throw msg_fmt("unnamed host"); @@ -232,8 +228,8 @@ int neb::callback_acknowledgement(int callback_type, void* data) { int neb::callback_pb_acknowledgement(int callback_type [[maybe_unused]], void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: generating pb acknowledgement event"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating pb acknowledgement event"); // In/Out variables. auto ack{std::make_shared()}; @@ -245,10 +241,9 @@ int neb::callback_pb_acknowledgement(int callback_type [[maybe_unused]], ack_obj.set_type(static_cast( ack_data->acknowledgement_type)); if (ack_data->author_name) - ack_obj.set_author(misc::string::check_string_utf8(ack_data->author_name)); + ack_obj.set_author(common::check_string_utf8(ack_data->author_name)); if (ack_data->comment_data) - ack_obj.set_comment_data( - misc::string::check_string_utf8(ack_data->comment_data)); + ack_obj.set_comment_data(common::check_string_utf8(ack_data->comment_data)); ack_obj.set_entry_time(time(nullptr)); if (!ack_data->host_id) { SPDLOG_LOGGER_ERROR(neb_logger, @@ -289,7 +284,7 @@ int neb::callback_pb_acknowledgement(int callback_type [[maybe_unused]], */ int neb::callback_comment(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating comment event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating comment event"); (void)callback_type; try { @@ -300,11 +295,9 @@ int neb::callback_comment(int callback_type, void* data) { // Fill output var. comment_data = static_cast(data); if (comment_data->author_name) - comment->author = - misc::string::check_string_utf8(comment_data->author_name); + comment->author = common::check_string_utf8(comment_data->author_name); if (comment_data->comment_data) - comment->data = - misc::string::check_string_utf8(comment_data->comment_data); + comment->data = common::check_string_utf8(comment_data->comment_data); comment->comment_type = comment_data->comment_type; if (NEBTYPE_COMMENT_DELETE == comment_data->type) comment->deletion_time = time(nullptr); @@ -361,7 +354,7 @@ int neb::callback_comment(int callback_type, void* data) { */ int neb::callback_pb_comment(int, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating pb comment event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating pb comment event"); const nebstruct_comment_data* comment_data = static_cast(data); @@ -372,11 +365,9 @@ int neb::callback_pb_comment(int, void* data) { // Fill output var. if (comment_data->author_name) - comment.set_author( - misc::string::check_string_utf8(comment_data->author_name)); + comment.set_author(common::check_string_utf8(comment_data->author_name)); if (comment_data->comment_data) - comment.set_data( - misc::string::check_string_utf8(comment_data->comment_data)); + comment.set_data(common::check_string_utf8(comment_data->comment_data)); comment.set_type( (comment_data->comment_type == com::centreon::engine::comment::type::host) ? com::centreon::broker::Comment_Type_HOST @@ -458,20 +449,15 @@ int neb::callback_pb_custom_variable(int, void* data) { const nebstruct_custom_variable_data* cvar( static_cast(data)); - if (neb_logger->level() <= spdlog::level::debug) { - SPDLOG_LOGGER_DEBUG( - neb_logger, "callbacks: generating custom variable event {} value:{}", - cvar->var_name, cvar->var_value); - } else { - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: generating custom variable event"); - } + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating custom variable event {} value:{}", + cvar->var_name, cvar->var_value); neb::pb_custom_variable::shared_ptr cv = std::make_shared(); neb::pb_custom_variable::pb_type& obj = cv->mut_obj(); bool ok_to_send = false; - if (cvar && cvar->var_name && cvar->var_value) { + if (cvar && !cvar->var_name.empty() && !cvar->var_value.empty()) { // Host custom variable. if (NEBTYPE_HOSTCUSTOMVARIABLE_ADD == cvar->type || NEBTYPE_HOSTCUSTOMVARIABLE_DELETE == cvar->type) { @@ -479,7 +465,7 @@ int neb::callback_pb_custom_variable(int, void* data) { if (hst && !hst->name().empty()) { uint64_t host_id = engine::get_host_id(hst->name()); if (host_id != 0) { - std::string name(misc::string::check_string_utf8(cvar->var_name)); + std::string name(common::check_string_utf8(cvar->var_name)); bool add = NEBTYPE_HOSTCUSTOMVARIABLE_ADD == cvar->type; obj.set_enabled(add); obj.set_host_id(host_id); @@ -488,14 +474,15 @@ int neb::callback_pb_custom_variable(int, void* data) { obj.set_type(com::centreon::broker::CustomVariable_VarType_HOST); obj.set_update_time(cvar->timestamp.tv_sec); if (add) { - std::string value(misc::string::check_string_utf8(cvar->var_value)); + std::string value(common::check_string_utf8(cvar->var_value)); obj.set_value(value); obj.set_default_value(value); - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: new custom variable '{}' on host {}", - name, host_id); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: new custom variable '{}' with " + "value '{}' on host {}", + name, value, host_id); } else { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: deleted custom variable '{}' on host {}", name, host_id); @@ -514,7 +501,7 @@ int neb::callback_pb_custom_variable(int, void* data) { p = engine::get_host_and_service_id(svc->get_hostname(), svc->description()); if (p.first && p.second) { - std::string name(misc::string::check_string_utf8(cvar->var_name)); + std::string name(common::check_string_utf8(cvar->var_name)); bool add = NEBTYPE_SERVICECUSTOMVARIABLE_ADD == cvar->type; obj.set_enabled(add); obj.set_host_id(p.first); @@ -524,16 +511,16 @@ int neb::callback_pb_custom_variable(int, void* data) { obj.set_type(com::centreon::broker::CustomVariable_VarType_SERVICE); obj.set_update_time(cvar->timestamp.tv_sec); if (add) { - std::string value(misc::string::check_string_utf8(cvar->var_value)); + std::string value(common::check_string_utf8(cvar->var_value)); obj.set_value(value); obj.set_default_value(value); - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: new custom variable '{}' on service ({}, {})", name, p.first, p.second); } else { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: deleted custom variable '{}' on service ({},{})", name, p.first, p.second); @@ -567,14 +554,15 @@ int neb::callback_pb_custom_variable(int, void* data) { int neb::callback_custom_variable(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating custom variable event"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating custom variable event"); (void)callback_type; try { // Input variable. nebstruct_custom_variable_data const* cvar( static_cast(data)); - if (cvar && cvar->var_name && cvar->var_value) { + if (cvar && !cvar->var_name.empty() && !cvar->var_value.empty()) { // Host custom variable. if (NEBTYPE_HOSTCUSTOMVARIABLE_ADD == cvar->type) { engine::host* hst(static_cast(cvar->object_ptr)); @@ -586,17 +574,17 @@ int neb::callback_custom_variable(int callback_type, void* data) { new_cvar->enabled = true; new_cvar->host_id = host_id; new_cvar->modified = false; - new_cvar->name = misc::string::check_string_utf8(cvar->var_name); + new_cvar->name = common::check_string_utf8(cvar->var_name); new_cvar->var_type = 0; new_cvar->update_time = cvar->timestamp.tv_sec; - new_cvar->value = misc::string::check_string_utf8(cvar->var_value); + new_cvar->value = common::check_string_utf8(cvar->var_value); new_cvar->default_value = - misc::string::check_string_utf8(cvar->var_value); + common::check_string_utf8(cvar->var_value); // Send custom variable event. - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: new custom variable '{}' on host {}", - new_cvar->name, new_cvar->host_id); + SPDLOG_LOGGER_DEBUG( + neb_logger, "callbacks: new custom variable '{}' on host {}", + new_cvar->name, new_cvar->host_id); neb::gl_publisher.write(new_cvar); } } @@ -608,12 +596,12 @@ int neb::callback_custom_variable(int callback_type, void* data) { auto old_cvar{std::make_shared()}; old_cvar->enabled = false; old_cvar->host_id = host_id; - old_cvar->name = misc::string::check_string_utf8(cvar->var_name); + old_cvar->name = common::check_string_utf8(cvar->var_name); old_cvar->var_type = 0; old_cvar->update_time = cvar->timestamp.tv_sec; // Send custom variable event. - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: deleted custom variable '{}' on host {}", old_cvar->name, old_cvar->host_id); @@ -635,16 +623,16 @@ int neb::callback_custom_variable(int callback_type, void* data) { new_cvar->enabled = true; new_cvar->host_id = p.first; new_cvar->modified = false; - new_cvar->name = misc::string::check_string_utf8(cvar->var_name); + new_cvar->name = common::check_string_utf8(cvar->var_name); new_cvar->service_id = p.second; new_cvar->var_type = 1; new_cvar->update_time = cvar->timestamp.tv_sec; - new_cvar->value = misc::string::check_string_utf8(cvar->var_value); + new_cvar->value = common::check_string_utf8(cvar->var_value); new_cvar->default_value = - misc::string::check_string_utf8(cvar->var_value); + common::check_string_utf8(cvar->var_value); // Send custom variable event. - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: new custom variable '{}' on service ({}, {})", new_cvar->name, new_cvar->host_id, new_cvar->service_id); @@ -662,13 +650,13 @@ int neb::callback_custom_variable(int callback_type, void* data) { old_cvar->enabled = false; old_cvar->host_id = p.first; old_cvar->modified = true; - old_cvar->name = misc::string::check_string_utf8(cvar->var_name); + old_cvar->name = common::check_string_utf8(cvar->var_name); old_cvar->service_id = p.second; old_cvar->var_type = 1; old_cvar->update_time = cvar->timestamp.tv_sec; // Send custom variable event. - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: deleted custom variable '{}' on service ({},{})", old_cvar->name, old_cvar->host_id, old_cvar->service_id); @@ -684,315 +672,6 @@ int neb::callback_custom_variable(int callback_type, void* data) { return 0; } -/** - * @brief Function that process dependency data. - * - * This function is called by Centreon Engine when some dependency data - * is available. - * - * @param[in] callback_type Type of the callback - * (NEBCALLBACK_ADAPTIVE_DEPENDENCY_DATA). - * @param[in] data A pointer to a - * nebstruct_adaptive_dependency_data - * containing the dependency data. - * - * @return 0 on success. - */ -int neb::callback_dependency(int callback_type, void* data) { - // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating dependency event"); - (void)callback_type; - - try { - // Input variables. - nebstruct_adaptive_dependency_data* nsadd( - static_cast(data)); - - // Host dependency. - if ((NEBTYPE_HOSTDEPENDENCY_ADD == nsadd->type) || - (NEBTYPE_HOSTDEPENDENCY_UPDATE == nsadd->type) || - (NEBTYPE_HOSTDEPENDENCY_DELETE == nsadd->type)) { - // Find IDs. - uint64_t host_id; - uint64_t dep_host_id; - engine::hostdependency* dep( - static_cast(nsadd->object_ptr)); - if (!dep->get_hostname().empty()) { - host_id = engine::get_host_id(dep->get_hostname()); - } else { - SPDLOG_LOGGER_ERROR( - neb_logger, - "callbacks: dependency callback called without valid host"); - host_id = 0; - } - if (!dep->get_dependent_hostname().empty()) { - dep_host_id = engine::get_host_id(dep->get_dependent_hostname()); - } else { - SPDLOG_LOGGER_INFO( - neb_logger, - "callbacks: dependency callback called without valid dependent " - "host"); - dep_host_id = 0; - } - - // Generate service dependency event. - auto hst_dep{std::make_shared()}; - hst_dep->host_id = host_id; - hst_dep->dependent_host_id = dep_host_id; - hst_dep->enabled = (nsadd->type != NEBTYPE_HOSTDEPENDENCY_DELETE); - if (!dep->get_dependency_period().empty()) - hst_dep->dependency_period = dep->get_dependency_period(); - { - std::string options; - if (dep->get_fail_on_down()) - options.append("d"); - if (dep->get_fail_on_up()) - options.append("o"); - if (dep->get_fail_on_pending()) - options.append("p"); - if (dep->get_fail_on_unreachable()) - options.append("u"); - if (dep->get_dependency_type() == engine::dependency::notification) - hst_dep->notification_failure_options = options; - else if (dep->get_dependency_type() == engine::dependency::execution) - hst_dep->execution_failure_options = options; - } - hst_dep->inherits_parent = dep->get_inherits_parent(); - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: host {} depends on host {}", - dep_host_id, host_id); - - // Publish dependency event. - neb::gl_publisher.write(hst_dep); - } - // Service dependency. - else if ((NEBTYPE_SERVICEDEPENDENCY_ADD == nsadd->type) || - (NEBTYPE_SERVICEDEPENDENCY_UPDATE == nsadd->type) || - (NEBTYPE_SERVICEDEPENDENCY_DELETE == nsadd->type)) { - // Find IDs. - std::pair ids; - std::pair dep_ids; - engine::servicedependency* dep( - static_cast(nsadd->object_ptr)); - if (!dep->get_hostname().empty() && - !dep->get_service_description().empty()) { - ids = engine::get_host_and_service_id(dep->get_hostname(), - dep->get_service_description()); - } else { - SPDLOG_LOGGER_ERROR( - neb_logger, - "callbacks: dependency callback called without valid service"); - ids.first = 0; - ids.second = 0; - } - if (!dep->get_dependent_hostname().empty() && - !dep->get_dependent_service_description().empty()) { - dep_ids = engine::get_host_and_service_id( - dep->get_dependent_hostname(), - dep->get_dependent_service_description()); - } else { - SPDLOG_LOGGER_ERROR( - neb_logger, - "callbacks: dependency callback called without valid dependent " - "service"); - dep_ids.first = 0; - dep_ids.second = 0; - } - - // Generate service dependency event. - auto svc_dep{std::make_shared()}; - svc_dep->host_id = ids.first; - svc_dep->service_id = ids.second; - svc_dep->dependent_host_id = dep_ids.first; - svc_dep->dependent_service_id = dep_ids.second; - svc_dep->enabled = (nsadd->type != NEBTYPE_SERVICEDEPENDENCY_DELETE); - if (!dep->get_dependency_period().empty()) - svc_dep->dependency_period = dep->get_dependency_period(); - { - std::string options; - if (dep->get_fail_on_critical()) - options.append("c"); - if (dep->get_fail_on_ok()) - options.append("o"); - if (dep->get_fail_on_pending()) - options.append("p"); - if (dep->get_fail_on_unknown()) - options.append("u"); - if (dep->get_fail_on_warning()) - options.append("w"); - if (dep->get_dependency_type() == engine::dependency::notification) - svc_dep->notification_failure_options = options; - else if (dep->get_dependency_type() == engine::dependency::execution) - svc_dep->execution_failure_options = options; - } - svc_dep->inherits_parent = dep->get_inherits_parent(); - SPDLOG_LOGGER_INFO( - neb_logger, "callbacks: service ({}, {}) depends on service ({}, {})", - dep_ids.first, dep_ids.second, ids.first, ids.second); - - // Publish dependency event. - neb::gl_publisher.write(svc_dep); - } - } - // Avoid exception propagation to C code. - catch (...) { - } - - return 0; -} - -/** - * @brief Function that process dependency data. - * - * This function is called by Centreon Engine when some dependency data - * is available. - * - * @param[in] callback_type Type of the callback - * (NEBCALLBACK_ADAPTIVE_DEPENDENCY_DATA). - * @param[in] data A pointer to a - * nebstruct_adaptive_dependency_data - * containing the dependency data. - * - * @return 0 on success. - */ -int neb::callback_pb_dependency(int, void* data) { - // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating dependency event"); - - // Input variables. - nebstruct_adaptive_dependency_data* nsadd( - static_cast(data)); - - // Host dependency. - if ((NEBTYPE_HOSTDEPENDENCY_ADD == nsadd->type) || - (NEBTYPE_HOSTDEPENDENCY_UPDATE == nsadd->type) || - (NEBTYPE_HOSTDEPENDENCY_DELETE == nsadd->type)) { - // Find IDs. - uint64_t host_id; - uint64_t dep_host_id; - - engine::hostdependency* dep( - static_cast(nsadd->object_ptr)); - if (!dep->get_hostname().empty()) { - host_id = engine::get_host_id(dep->get_hostname()); - } else { - SPDLOG_LOGGER_ERROR( - neb_logger, - "callbacks: dependency callback called without valid host"); - host_id = 0; - } - if (!dep->get_dependent_hostname().empty()) { - dep_host_id = engine::get_host_id(dep->get_dependent_hostname()); - } else { - SPDLOG_LOGGER_INFO( - neb_logger, - "callbacks: dependency callback called without valid dependent " - "host"); - dep_host_id = 0; - } - - // Generate service dependency event. - auto hd{std::make_shared()}; - HostDependency& hst_dep = hd->mut_obj(); - hst_dep.set_host_id(host_id); - hst_dep.set_dependent_host_id(dep_host_id); - hst_dep.set_enabled(nsadd->type != NEBTYPE_HOSTDEPENDENCY_DELETE); - if (!dep->get_dependency_period().empty()) - hst_dep.set_dependency_period(dep->get_dependency_period()); - { - std::string options; - if (dep->get_fail_on_down()) - options.append("d"); - if (dep->get_fail_on_up()) - options.append("o"); - if (dep->get_fail_on_pending()) - options.append("p"); - if (dep->get_fail_on_unreachable()) - options.append("u"); - if (dep->get_dependency_type() == engine::dependency::notification) - hst_dep.set_notification_failure_options(options); - else if (dep->get_dependency_type() == engine::dependency::execution) - hst_dep.set_execution_failure_options(options); - } - hst_dep.set_inherits_parent(dep->get_inherits_parent()); - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: host {} depends on host {}", - dep_host_id, host_id); - - // Publish dependency event. - neb::gl_publisher.write(hd); - } - // Service dependency. - else if ((NEBTYPE_SERVICEDEPENDENCY_ADD == nsadd->type) || - (NEBTYPE_SERVICEDEPENDENCY_UPDATE == nsadd->type) || - (NEBTYPE_SERVICEDEPENDENCY_DELETE == nsadd->type)) { - // Find IDs. - std::pair ids; - std::pair dep_ids; - engine::servicedependency* dep( - static_cast(nsadd->object_ptr)); - if (!dep->get_hostname().empty() && - !dep->get_service_description().empty()) { - ids = engine::get_host_and_service_id(dep->get_hostname(), - dep->get_service_description()); - } else { - SPDLOG_LOGGER_ERROR( - neb_logger, - "callbacks: dependency callback called without valid service"); - ids.first = 0; - ids.second = 0; - } - if (!dep->get_dependent_hostname().empty() && - !dep->get_dependent_service_description().empty()) { - dep_ids = engine::get_host_and_service_id( - dep->get_dependent_hostname(), - dep->get_dependent_service_description()); - } else { - SPDLOG_LOGGER_ERROR( - neb_logger, - "callbacks: dependency callback called without valid dependent " - "service"); - dep_ids.first = 0; - dep_ids.second = 0; - } - - // Generate service dependency event. - auto sd{std::make_shared()}; - ServiceDependency& svc_dep = sd->mut_obj(); - svc_dep.set_host_id(ids.first); - svc_dep.set_service_id(ids.second); - svc_dep.set_dependent_host_id(dep_ids.first); - svc_dep.set_dependent_service_id(dep_ids.second); - svc_dep.set_enabled(nsadd->type != NEBTYPE_SERVICEDEPENDENCY_DELETE); - if (!dep->get_dependency_period().empty()) - svc_dep.set_dependency_period(dep->get_dependency_period()); - { - std::string options; - if (dep->get_fail_on_critical()) - options.append("c"); - if (dep->get_fail_on_ok()) - options.append("o"); - if (dep->get_fail_on_pending()) - options.append("p"); - if (dep->get_fail_on_unknown()) - options.append("u"); - if (dep->get_fail_on_warning()) - options.append("w"); - if (dep->get_dependency_type() == engine::dependency::notification) - svc_dep.set_notification_failure_options(options); - else if (dep->get_dependency_type() == engine::dependency::execution) - svc_dep.set_execution_failure_options(options); - } - svc_dep.set_inherits_parent(dep->get_inherits_parent()); - SPDLOG_LOGGER_INFO( - neb_logger, "callbacks: service ({}, {}) depends on service ({}, {})", - dep_ids.first, dep_ids.second, ids.first, ids.second); - - // Publish dependency event. - neb::gl_publisher.write(sd); - } - - return 0; -} - /** * @brief Function that process downtime data. * @@ -1006,7 +685,7 @@ int neb::callback_pb_dependency(int, void* data) { */ int neb::callback_downtime(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating downtime event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating downtime event"); (void)callback_type; const nebstruct_downtime_data* downtime_data{ static_cast(data)}; @@ -1019,11 +698,10 @@ int neb::callback_downtime(int callback_type, void* data) { // Fill output var. if (downtime_data->author_name) - downtime->author = - misc::string::check_string_utf8(downtime_data->author_name); + downtime->author = common::check_string_utf8(downtime_data->author_name); if (downtime_data->comment_data) downtime->comment = - misc::string::check_string_utf8(downtime_data->comment_data); + common::check_string_utf8(downtime_data->comment_data); downtime->downtime_type = downtime_data->downtime_type; downtime->duration = downtime_data->duration; downtime->end_time = downtime_data->end_time; @@ -1097,7 +775,7 @@ int neb::callback_downtime(int callback_type, void* data) { */ int neb::callback_pb_downtime(int callback_type, void* data) { // Log message. - neb_logger->info("callbacks: generating pb downtime event"); + neb_logger->debug("callbacks: generating pb downtime event"); (void)callback_type; const nebstruct_downtime_data* downtime_data = @@ -1111,11 +789,10 @@ int neb::callback_pb_downtime(int callback_type, void* data) { // Fill output var. if (downtime_data->author_name) - downtime.set_author( - misc::string::check_string_utf8(downtime_data->author_name)); + downtime.set_author(common::check_string_utf8(downtime_data->author_name)); if (downtime_data->comment_data) downtime.set_comment_data( - misc::string::check_string_utf8(downtime_data->comment_data)); + common::check_string_utf8(downtime_data->comment_data)); downtime.set_id(downtime_data->downtime_id); downtime.set_type( static_cast(downtime_data->downtime_type)); @@ -1194,14 +871,14 @@ int neb::callback_external_command(int callback_type, void* data) { if (necd && (necd->type == NEBTYPE_EXTERNALCOMMAND_START)) { try { if (necd->command_type == CMD_CHANGE_CUSTOM_HOST_VAR) { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: generating host custom variable update event"); // Split argument string. if (necd->command_args) { std::list l{absl::StrSplit( - misc::string::check_string_utf8(necd->command_args), ';')}; + common::check_string_utf8(necd->command_args), ';')}; if (l.size() != 3) SPDLOG_LOGGER_ERROR( neb_logger, "callbacks: invalid host custom variable command"); @@ -1231,14 +908,14 @@ int neb::callback_external_command(int callback_type, void* data) { } } } else if (necd->command_type == CMD_CHANGE_CUSTOM_SVC_VAR) { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: generating service custom variable update event"); // Split argument string. if (necd->command_args) { std::list l{absl::StrSplit( - misc::string::check_string_utf8(necd->command_args), ';')}; + common::check_string_utf8(necd->command_args), ';')}; if (l.size() != 4) SPDLOG_LOGGER_ERROR( neb_logger, @@ -1300,12 +977,12 @@ int neb::callback_pb_external_command(int, void* data) { nebstruct_external_command_data* necd( static_cast(data)); if (necd && (necd->type == NEBTYPE_EXTERNALCOMMAND_START)) { - auto args = absl::StrSplit( - misc::string::check_string_utf8(necd->command_args), ';'); + auto args = + absl::StrSplit(common::check_string_utf8(necd->command_args), ';'); size_t args_size = std::distance(args.begin(), args.end()); auto split_iter = args.begin(); if (necd->command_type == CMD_CHANGE_CUSTOM_HOST_VAR) { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: generating host custom variable update event"); @@ -1340,7 +1017,7 @@ int neb::callback_pb_external_command(int, void* data) { } } } else if (necd->command_type == CMD_CHANGE_CUSTOM_SVC_VAR) { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: generating service custom variable update event"); @@ -1398,7 +1075,7 @@ int neb::callback_pb_external_command(int, void* data) { */ int neb::callback_group(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating group event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating group event"); (void)callback_type; try { @@ -1416,20 +1093,20 @@ int neb::callback_group(int callback_type, void* data) { auto new_hg{std::make_shared()}; new_hg->poller_id = config::applier::state::instance().poller_id(); new_hg->id = host_group->get_id(); - new_hg->enabled = (group_data->type != NEBTYPE_HOSTGROUP_DELETE && + new_hg->enabled = group_data->type == NEBTYPE_HOSTGROUP_ADD || + (group_data->type == NEBTYPE_ADAPTIVEHOST_UPDATE && !host_group->members.empty()); - new_hg->name = - misc::string::check_string_utf8(host_group->get_group_name()); + new_hg->name = common::check_string_utf8(host_group->get_group_name()); // Send host group event. if (new_hg->id) { if (new_hg->enabled) - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: new host group {} ('{}') on instance {}", new_hg->id, new_hg->name, new_hg->poller_id); else - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: disable host group {} ('{}') on instance {}", new_hg->id, new_hg->name, new_hg->poller_id); @@ -1447,20 +1124,21 @@ int neb::callback_group(int callback_type, void* data) { auto new_sg{std::make_shared()}; new_sg->poller_id = config::applier::state::instance().poller_id(); new_sg->id = service_group->get_id(); - new_sg->enabled = (group_data->type != NEBTYPE_SERVICEGROUP_DELETE && + new_sg->enabled = group_data->type == NEBTYPE_SERVICEGROUP_ADD || + (group_data->type == NEBTYPE_SERVICEGROUP_UPDATE && !service_group->members.empty()); new_sg->name = - misc::string::check_string_utf8(service_group->get_group_name()); + common::check_string_utf8(service_group->get_group_name()); // Send service group event. if (new_sg->id) { if (new_sg->enabled) - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks:: new service group {} ('{}) on instance {}", new_sg->id, new_sg->name, new_sg->poller_id); else - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks:: disable service group {} ('{}) on instance {}", new_sg->id, new_sg->name, new_sg->poller_id); @@ -1495,42 +1173,43 @@ int neb::callback_pb_group(int callback_type, void* data) { nebstruct_group_data const* group_data( static_cast(data)); - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating pb group event type:{}", - group_data->type); - // Host group. if ((NEBTYPE_HOSTGROUP_ADD == group_data->type) || (NEBTYPE_HOSTGROUP_UPDATE == group_data->type) || (NEBTYPE_HOSTGROUP_DELETE == group_data->type)) { engine::hostgroup const* host_group( static_cast(group_data->object_ptr)); + SPDLOG_LOGGER_DEBUG( + neb_logger, + "callbacks: generating pb host group {} (id: {}) event type:{}", + host_group->get_group_name(), host_group->get_id(), group_data->type); + if (!host_group->get_group_name().empty()) { auto new_hg{std::make_shared()}; - new_hg->mut_obj().set_poller_id( - config::applier::state::instance().poller_id()); - new_hg->mut_obj().set_hostgroup_id(host_group->get_id()); - new_hg->mut_obj().set_enabled(group_data->type != - NEBTYPE_HOSTGROUP_DELETE && - !host_group->members.empty()); - new_hg->mut_obj().set_name( - misc::string::check_string_utf8(host_group->get_group_name())); + auto& obj = new_hg->mut_obj(); + obj.set_poller_id(config::applier::state::instance().poller_id()); + obj.set_hostgroup_id(host_group->get_id()); + obj.set_enabled(group_data->type == NEBTYPE_HOSTGROUP_ADD || + (group_data->type == NEBTYPE_HOSTGROUP_UPDATE && + !host_group->members.empty())); + obj.set_name(common::check_string_utf8(host_group->get_group_name())); // Send host group event. if (host_group->get_id()) { if (new_hg->obj().enabled()) - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: new pb host group {} ('{}' {} " - "members) on instance {}", - host_group->get_id(), new_hg->obj().name(), - host_group->members.size(), - new_hg->obj().poller_id()); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: new pb host group {} ('{}' {} " + "members) on instance {}", + host_group->get_id(), new_hg->obj().name(), + host_group->members.size(), + new_hg->obj().poller_id()); else - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: disable pb host group {} ('{}' {} " - "members) on instance {}", - host_group->get_id(), new_hg->obj().name(), - host_group->members.size(), - new_hg->obj().poller_id()); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: disable pb host group {} ('{}' {} " + "members) on instance {}", + host_group->get_id(), new_hg->obj().name(), + host_group->members.size(), + new_hg->obj().poller_id()); neb::gl_publisher.write(new_hg); } @@ -1542,27 +1221,32 @@ int neb::callback_pb_group(int callback_type, void* data) { (NEBTYPE_SERVICEGROUP_DELETE == group_data->type)) { engine::servicegroup const* service_group( static_cast(group_data->object_ptr)); + SPDLOG_LOGGER_DEBUG( + neb_logger, + "callbacks: generating pb host group {} (id: {}) event type:{}", + service_group->get_group_name(), service_group->get_id(), + group_data->type); + if (!service_group->get_group_name().empty()) { auto new_sg{std::make_shared()}; - new_sg->mut_obj().set_poller_id( - config::applier::state::instance().poller_id()); - new_sg->mut_obj().set_servicegroup_id(service_group->get_id()); - new_sg->mut_obj().set_enabled(group_data->type != - NEBTYPE_SERVICEGROUP_DELETE && - !service_group->members.empty()); - new_sg->mut_obj().set_name( - misc::string::check_string_utf8(service_group->get_group_name())); + auto& obj = new_sg->mut_obj(); + obj.set_poller_id(config::applier::state::instance().poller_id()); + obj.set_servicegroup_id(service_group->get_id()); + obj.set_enabled(group_data->type == NEBTYPE_SERVICEGROUP_ADD || + (group_data->type == NEBTYPE_SERVICEGROUP_UPDATE && + !service_group->members.empty())); + obj.set_name(common::check_string_utf8(service_group->get_group_name())); // Send service group event. if (service_group->get_id()) { if (new_sg->obj().enabled()) - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks:: new pb service group {} ('{}) on instance {}", service_group->get_id(), new_sg->obj().name(), new_sg->obj().poller_id()); else - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks:: disable pb service group {} ('{}) on instance {}", service_group->get_id(), new_sg->obj().name(), @@ -1590,7 +1274,7 @@ int neb::callback_pb_group(int callback_type, void* data) { */ int neb::callback_group_member(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating group member event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating group member event"); (void)callback_type; try { @@ -1609,20 +1293,20 @@ int neb::callback_group_member(int callback_type, void* data) { // Output variable. auto hgm{std::make_shared()}; hgm->group_id = hg->get_id(); - hgm->group_name = misc::string::check_string_utf8(hg->get_group_name()); + hgm->group_name = common::check_string_utf8(hg->get_group_name()); hgm->poller_id = config::applier::state::instance().poller_id(); uint32_t host_id = engine::get_host_id(hst->name()); if (host_id != 0 && hgm->group_id != 0) { hgm->host_id = host_id; if (member_data->type == NEBTYPE_HOSTGROUPMEMBER_DELETE) { - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: host {} is not a member of group " - "{} on instance {} " - "anymore", - hgm->host_id, hgm->group_id, hgm->poller_id); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: host {} is not a member of group " + "{} on instance {} " + "anymore", + hgm->host_id, hgm->group_id, hgm->poller_id); hgm->enabled = false; } else { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: host {} is a member of group {} on instance {}", hgm->host_id, hgm->group_id, hgm->poller_id); @@ -1647,7 +1331,7 @@ int neb::callback_group_member(int callback_type, void* data) { // Output variable. auto sgm{std::make_shared()}; sgm->group_id = sg->get_id(); - sgm->group_name = misc::string::check_string_utf8(sg->get_group_name()); + sgm->group_name = common::check_string_utf8(sg->get_group_name()); sgm->poller_id = config::applier::state::instance().poller_id(); std::pair p; p = engine::get_host_and_service_id(svc->get_hostname(), @@ -1656,14 +1340,14 @@ int neb::callback_group_member(int callback_type, void* data) { sgm->service_id = p.second; if (sgm->host_id && sgm->service_id && sgm->group_id) { if (member_data->type == NEBTYPE_SERVICEGROUPMEMBER_DELETE) { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: service ({},{}) is not a member of group {} on " "instance {} anymore", sgm->host_id, sgm->service_id, sgm->group_id, sgm->poller_id); sgm->enabled = false; } else { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: service ({}, {}) is a member of group {} on " "instance {}", @@ -1699,7 +1383,8 @@ int neb::callback_group_member(int callback_type, void* data) { */ int neb::callback_pb_group_member(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating pb group member event"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating pb group member event"); (void)callback_type; // Input variable. @@ -1707,8 +1392,8 @@ int neb::callback_pb_group_member(int callback_type, void* data) { static_cast(data)); // Host group member. - if ((member_data->type == NEBTYPE_HOSTGROUPMEMBER_ADD) || - (member_data->type == NEBTYPE_HOSTGROUPMEMBER_DELETE)) { + if (member_data->type == NEBTYPE_HOSTGROUPMEMBER_ADD || + member_data->type == NEBTYPE_HOSTGROUPMEMBER_DELETE) { engine::host const* hst( static_cast(member_data->object_ptr)); engine::hostgroup const* hg( @@ -1718,21 +1403,21 @@ int neb::callback_pb_group_member(int callback_type, void* data) { auto hgmp{std::make_shared()}; HostGroupMember& hgm = hgmp->mut_obj(); hgm.set_hostgroup_id(hg->get_id()); - hgm.set_name(misc::string::check_string_utf8(hg->get_group_name())); + hgm.set_name(common::check_string_utf8(hg->get_group_name())); hgm.set_poller_id(config::applier::state::instance().poller_id()); uint32_t host_id = engine::get_host_id(hst->name()); if (host_id != 0 && hgm.hostgroup_id() != 0) { hgm.set_host_id(host_id); if (member_data->type == NEBTYPE_HOSTGROUPMEMBER_DELETE) { - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: host {} is not a member of group " - "{} on instance {} " - "anymore", - hgm.host_id(), hgm.hostgroup_id(), - hgm.poller_id()); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: host {} is not a member of group " + "{} on instance {} " + "anymore", + hgm.host_id(), hgm.hostgroup_id(), + hgm.poller_id()); hgm.set_enabled(false); } else { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: host {} is a member of group {} on instance {}", hgm.host_id(), hgm.hostgroup_id(), hgm.poller_id()); @@ -1758,7 +1443,7 @@ int neb::callback_pb_group_member(int callback_type, void* data) { auto sgmp{std::make_shared()}; ServiceGroupMember& sgm = sgmp->mut_obj(); sgm.set_servicegroup_id(sg->get_id()); - sgm.set_name(misc::string::check_string_utf8(sg->get_group_name())); + sgm.set_name(common::check_string_utf8(sg->get_group_name())); sgm.set_poller_id(config::applier::state::instance().poller_id()); std::pair p; p = engine::get_host_and_service_id(svc->get_hostname(), @@ -1767,7 +1452,7 @@ int neb::callback_pb_group_member(int callback_type, void* data) { sgm.set_service_id(p.second); if (sgm.host_id() && sgm.service_id() && sgm.servicegroup_id()) { if (member_data->type == NEBTYPE_SERVICEGROUPMEMBER_DELETE) { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: service ({},{}) is not a member of group {} on " "instance {} anymore", @@ -1775,7 +1460,7 @@ int neb::callback_pb_group_member(int callback_type, void* data) { sgm.poller_id()); sgm.set_enabled(false); } else { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: service ({}, {}) is a member of group {} on " "instance {}", @@ -1807,7 +1492,7 @@ int neb::callback_pb_group_member(int callback_type, void* data) { */ int neb::callback_host(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating host event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating host event"); (void)callback_type; try { @@ -1823,17 +1508,15 @@ int neb::callback_host(int callback_type, void* data) { my_host->acknowledged = h->problem_has_been_acknowledged(); my_host->acknowledgement_type = h->get_acknowledgement(); if (!h->get_action_url().empty()) - my_host->action_url = - misc::string::check_string_utf8(h->get_action_url()); + my_host->action_url = common::check_string_utf8(h->get_action_url()); my_host->active_checks_enabled = h->active_checks_enabled(); if (!h->get_address().empty()) - my_host->address = misc::string::check_string_utf8(h->get_address()); + my_host->address = common::check_string_utf8(h->get_address()); if (!h->get_alias().empty()) - my_host->alias = misc::string::check_string_utf8(h->get_alias()); + my_host->alias = common::check_string_utf8(h->get_alias()); my_host->check_freshness = h->check_freshness_enabled(); if (!h->check_command().empty()) - my_host->check_command = - misc::string::check_string_utf8(h->check_command()); + my_host->check_command = common::check_string_utf8(h->check_command()); my_host->check_interval = h->check_interval(); if (!h->check_period().empty()) my_host->check_period = h->check_period(); @@ -1848,12 +1531,10 @@ int neb::callback_host(int callback_type, void* data) { my_host->default_passive_checks_enabled = h->passive_checks_enabled(); my_host->downtime_depth = h->get_scheduled_downtime_depth(); if (!h->get_display_name().empty()) - my_host->display_name = - misc::string::check_string_utf8(h->get_display_name()); + my_host->display_name = common::check_string_utf8(h->get_display_name()); my_host->enabled = (host_data->type != NEBTYPE_HOST_DELETE); if (!h->event_handler().empty()) - my_host->event_handler = - misc::string::check_string_utf8(h->event_handler()); + my_host->event_handler = common::check_string_utf8(h->event_handler()); my_host->event_handler_enabled = h->event_handler_enabled(); my_host->execution_time = h->get_execution_time(); my_host->first_notification_delay = h->get_first_notification_delay(); @@ -1869,13 +1550,12 @@ int neb::callback_host(int callback_type, void* data) { my_host->has_been_checked = h->has_been_checked(); my_host->high_flap_threshold = h->get_high_flap_threshold(); if (!h->name().empty()) - my_host->host_name = misc::string::check_string_utf8(h->name()); + my_host->host_name = common::check_string_utf8(h->name()); if (!h->get_icon_image().empty()) - my_host->icon_image = - misc::string::check_string_utf8(h->get_icon_image()); + my_host->icon_image = common::check_string_utf8(h->get_icon_image()); if (!h->get_icon_image_alt().empty()) my_host->icon_image_alt = - misc::string::check_string_utf8(h->get_icon_image_alt()); + common::check_string_utf8(h->get_icon_image_alt()); my_host->is_flapping = h->get_is_flapping(); my_host->last_check = h->get_last_check(); my_host->last_hard_state = h->get_last_hard_state(); @@ -1893,9 +1573,9 @@ int neb::callback_host(int callback_type, void* data) { my_host->next_notification = h->get_next_notification(); my_host->no_more_notifications = h->get_no_more_notifications(); if (!h->get_notes().empty()) - my_host->notes = misc::string::check_string_utf8(h->get_notes()); + my_host->notes = common::check_string_utf8(h->get_notes()); if (!h->get_notes_url().empty()) - my_host->notes_url = misc::string::check_string_utf8(h->get_notes_url()); + my_host->notes_url = common::check_string_utf8(h->get_notes_url()); my_host->notifications_enabled = h->get_notifications_enabled(); my_host->notification_interval = h->get_notification_interval(); if (!h->notification_period().empty()) @@ -1909,16 +1589,16 @@ int neb::callback_host(int callback_type, void* data) { h->get_notify_on(engine::notifier::unreachable); my_host->obsess_over = h->obsess_over(); if (!h->get_plugin_output().empty()) { - my_host->output = misc::string::check_string_utf8(h->get_plugin_output()); + my_host->output = common::check_string_utf8(h->get_plugin_output()); my_host->output.append("\n"); } if (!h->get_long_plugin_output().empty()) my_host->output.append( - misc::string::check_string_utf8(h->get_long_plugin_output())); + common::check_string_utf8(h->get_long_plugin_output())); my_host->passive_checks_enabled = h->passive_checks_enabled(); my_host->percent_state_change = h->get_percent_state_change(); if (!h->get_perf_data().empty()) - my_host->perf_data = misc::string::check_string_utf8(h->get_perf_data()); + my_host->perf_data = common::check_string_utf8(h->get_perf_data()); my_host->poller_id = config::applier::state::instance().poller_id(); my_host->retain_nonstatus_information = h->get_retain_nonstatus_information(); @@ -1933,7 +1613,7 @@ int neb::callback_host(int callback_type, void* data) { (h->has_been_checked() ? h->get_state_type() : engine::notifier::hard); if (!h->get_statusmap_image().empty()) my_host->statusmap_image = - misc::string::check_string_utf8(h->get_statusmap_image()); + common::check_string_utf8(h->get_statusmap_image()); my_host->timezone = h->get_timezone(); // Find host ID. @@ -1942,7 +1622,7 @@ int neb::callback_host(int callback_type, void* data) { my_host->host_id = host_id; // Send host event. - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: new host {} ('{}') on instance {}", my_host->host_id, my_host->host_name, my_host->poller_id); neb::gl_publisher.write(my_host); @@ -1974,8 +1654,8 @@ int neb::callback_host(int callback_type, void* data) { */ int neb::callback_pb_host(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: generating pb host event protobuf"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating pb host event protobuf"); (void)callback_type; nebstruct_adaptive_host_data* dh = @@ -2000,11 +1680,9 @@ int neb::callback_pb_host(int callback_type, void* data) { else if (dh->modified_attribute & MODATTR_OBSESSIVE_HANDLER_ENABLED) hst.set_obsess_over_host(eh->obsess_over()); else if (dh->modified_attribute & MODATTR_EVENT_HANDLER_COMMAND) - hst.set_event_handler( - misc::string::check_string_utf8(eh->event_handler())); + hst.set_event_handler(common::check_string_utf8(eh->event_handler())); else if (dh->modified_attribute & MODATTR_CHECK_COMMAND) - hst.set_check_command( - misc::string::check_string_utf8(eh->check_command())); + hst.set_check_command(common::check_string_utf8(eh->check_command())); else if (dh->modified_attribute & MODATTR_NORMAL_CHECK_INTERVAL) hst.set_check_interval(eh->check_interval()); else if (dh->modified_attribute & MODATTR_RETRY_CHECK_INTERVAL) @@ -2028,10 +1706,10 @@ int neb::callback_pb_host(int callback_type, void* data) { hst.set_host_id(host_id); // Send host event. - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: new host {} ('{}') on instance {}", - hst.host_id(), eh->name(), - config::applier::state::instance().poller_id()); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: new host {} ('{}') on instance {}", + hst.host_id(), eh->name(), + config::applier::state::instance().poller_id()); neb::gl_publisher.write(h); } else SPDLOG_LOGGER_ERROR(neb_logger, @@ -2045,17 +1723,15 @@ int neb::callback_pb_host(int callback_type, void* data) { host.set_acknowledged(eh->problem_has_been_acknowledged()); host.set_acknowledgement_type(eh->get_acknowledgement()); if (!eh->get_action_url().empty()) - host.set_action_url( - misc::string::check_string_utf8(eh->get_action_url())); + host.set_action_url(common::check_string_utf8(eh->get_action_url())); host.set_active_checks(eh->active_checks_enabled()); if (!eh->get_address().empty()) - host.set_address(misc::string::check_string_utf8(eh->get_address())); + host.set_address(common::check_string_utf8(eh->get_address())); if (!eh->get_alias().empty()) - host.set_alias(misc::string::check_string_utf8(eh->get_alias())); + host.set_alias(common::check_string_utf8(eh->get_alias())); host.set_check_freshness(eh->check_freshness_enabled()); if (!eh->check_command().empty()) - host.set_check_command( - misc::string::check_string_utf8(eh->check_command())); + host.set_check_command(common::check_string_utf8(eh->check_command())); host.set_check_interval(eh->check_interval()); if (!eh->check_period().empty()) host.set_check_period(eh->check_period()); @@ -2071,13 +1747,11 @@ int neb::callback_pb_host(int callback_type, void* data) { host.set_default_passive_checks(eh->passive_checks_enabled()); host.set_scheduled_downtime_depth(eh->get_scheduled_downtime_depth()); if (!eh->get_display_name().empty()) - host.set_display_name( - misc::string::check_string_utf8(eh->get_display_name())); + host.set_display_name(common::check_string_utf8(eh->get_display_name())); host.set_enabled(static_cast(data)->type != NEBTYPE_HOST_DELETE); if (!eh->event_handler().empty()) - host.set_event_handler( - misc::string::check_string_utf8(eh->event_handler())); + host.set_event_handler(common::check_string_utf8(eh->event_handler())); host.set_event_handler_enabled(eh->event_handler_enabled()); host.set_execution_time(eh->get_execution_time()); host.set_first_notification_delay(eh->get_first_notification_delay()); @@ -2093,13 +1767,12 @@ int neb::callback_pb_host(int callback_type, void* data) { host.set_checked(eh->has_been_checked()); host.set_high_flap_threshold(eh->get_high_flap_threshold()); if (!eh->name().empty()) - host.set_name(misc::string::check_string_utf8(eh->name())); + host.set_name(common::check_string_utf8(eh->name())); if (!eh->get_icon_image().empty()) - host.set_icon_image( - misc::string::check_string_utf8(eh->get_icon_image())); + host.set_icon_image(common::check_string_utf8(eh->get_icon_image())); if (!eh->get_icon_image_alt().empty()) host.set_icon_image_alt( - misc::string::check_string_utf8(eh->get_icon_image_alt())); + common::check_string_utf8(eh->get_icon_image_alt())); host.set_flapping(eh->get_is_flapping()); host.set_last_check(eh->get_last_check()); host.set_last_hard_state( @@ -2118,9 +1791,9 @@ int neb::callback_pb_host(int callback_type, void* data) { host.set_next_host_notification(eh->get_next_notification()); host.set_no_more_notifications(eh->get_no_more_notifications()); if (!eh->get_notes().empty()) - host.set_notes(misc::string::check_string_utf8(eh->get_notes())); + host.set_notes(common::check_string_utf8(eh->get_notes())); if (!eh->get_notes_url().empty()) - host.set_notes_url(misc::string::check_string_utf8(eh->get_notes_url())); + host.set_notes_url(common::check_string_utf8(eh->get_notes_url())); host.set_notify(eh->get_notifications_enabled()); host.set_notification_interval(eh->get_notification_interval()); if (!eh->notification_period().empty()) @@ -2134,15 +1807,14 @@ int neb::callback_pb_host(int callback_type, void* data) { eh->get_notify_on(engine::notifier::unreachable)); host.set_obsess_over_host(eh->obsess_over()); if (!eh->get_plugin_output().empty()) { - host.set_output(misc::string::check_string_utf8(eh->get_plugin_output())); + host.set_output(common::check_string_utf8(eh->get_plugin_output())); } if (!eh->get_long_plugin_output().empty()) - host.set_output( - misc::string::check_string_utf8(eh->get_long_plugin_output())); + host.set_output(common::check_string_utf8(eh->get_long_plugin_output())); host.set_passive_checks(eh->passive_checks_enabled()); host.set_percent_state_change(eh->get_percent_state_change()); if (!eh->get_perf_data().empty()) - host.set_perfdata(misc::string::check_string_utf8(eh->get_perf_data())); + host.set_perfdata(common::check_string_utf8(eh->get_perf_data())); host.set_instance_id(config::applier::state::instance().poller_id()); host.set_retain_nonstatus_information( eh->get_retain_nonstatus_information()); @@ -2158,7 +1830,7 @@ int neb::callback_pb_host(int callback_type, void* data) { : engine::notifier::hard)); if (!eh->get_statusmap_image().empty()) host.set_statusmap_image( - misc::string::check_string_utf8(eh->get_statusmap_image())); + common::check_string_utf8(eh->get_statusmap_image())); host.set_timezone(eh->get_timezone()); host.set_severity_id(eh->get_severity() ? eh->get_severity()->id() : 0); host.set_icon_id(eh->get_icon_id()); @@ -2174,9 +1846,9 @@ int neb::callback_pb_host(int callback_type, void* data) { host.set_host_id(host_id); // Send host event. - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: new host {} ('{}') on instance {}", - host.host_id(), host.name(), host.instance_id()); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: new host {} ('{}') on instance {}", + host.host_id(), host.name(), host.instance_id()); neb::gl_publisher.write(h); /* No need to send this service custom variables changes, custom @@ -2215,7 +1887,7 @@ int neb::callback_host_check(int callback_type, void* data) { return 0; // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating host check event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating host check event"); try { auto host_check{std::make_shared()}; @@ -2226,7 +1898,7 @@ int neb::callback_host_check(int callback_type, void* data) { host_check->active_checks_enabled = h->active_checks_enabled(); host_check->check_type = hcdata->check_type; host_check->command_line = - misc::string::check_string_utf8(hcdata->command_line); + common::check_string_utf8(hcdata->command_line); if (!hcdata->host_name) throw msg_fmt("unnamed host"); host_check->host_id = engine::get_host_id(hcdata->host_name); @@ -2281,7 +1953,7 @@ int neb::callback_pb_host_check(int callback_type, void* data) { "callbacks: generating host check event for {} command_line={}", hcdata->host_name, hcdata->command_line); } else { - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating host check event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating host check event"); } std::shared_ptr host_check{ @@ -2297,7 +1969,7 @@ int neb::callback_pb_host_check(int callback_type, void* data) { ? com::centreon::broker::CheckActive : com::centreon::broker::CheckPassive); host_check->mut_obj().set_command_line( - misc::string::check_string_utf8(hcdata->command_line)); + common::check_string_utf8(hcdata->command_line)); host_check->mut_obj().set_host_id(h->host_id()); host_check->mut_obj().set_next_check(h->get_next_check()); @@ -2322,7 +1994,7 @@ int neb::callback_pb_host_check(int callback_type, void* data) { */ int neb::callback_host_status(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating host status event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating host status event"); (void)callback_type; try { @@ -2337,7 +2009,7 @@ int neb::callback_host_status(int callback_type, void* data) { host_status->active_checks_enabled = h->active_checks_enabled(); if (!h->check_command().empty()) host_status->check_command = - misc::string::check_string_utf8(h->check_command()); + common::check_string_utf8(h->check_command()); host_status->check_interval = h->check_interval(); if (!h->check_period().empty()) host_status->check_period = h->check_period(); @@ -2348,7 +2020,7 @@ int neb::callback_host_status(int callback_type, void* data) { host_status->downtime_depth = h->get_scheduled_downtime_depth(); if (!h->event_handler().empty()) host_status->event_handler = - misc::string::check_string_utf8(h->event_handler()); + common::check_string_utf8(h->event_handler()); host_status->event_handler_enabled = h->event_handler_enabled(); host_status->execution_time = h->get_execution_time(); host_status->flap_detection_enabled = h->flap_detection_enabled(); @@ -2379,18 +2051,16 @@ int neb::callback_host_status(int callback_type, void* data) { host_status->notifications_enabled = h->get_notifications_enabled(); host_status->obsess_over = h->obsess_over(); if (!h->get_plugin_output().empty()) { - host_status->output = - misc::string::check_string_utf8(h->get_plugin_output()); + host_status->output = common::check_string_utf8(h->get_plugin_output()); host_status->output.append("\n"); } if (!h->get_long_plugin_output().empty()) host_status->output.append( - misc::string::check_string_utf8(h->get_long_plugin_output())); + common::check_string_utf8(h->get_long_plugin_output())); host_status->passive_checks_enabled = h->passive_checks_enabled(); host_status->percent_state_change = h->get_percent_state_change(); if (!h->get_perf_data().empty()) - host_status->perf_data = - misc::string::check_string_utf8(h->get_perf_data()); + host_status->perf_data = common::check_string_utf8(h->get_perf_data()); host_status->retry_interval = h->retry_interval(); host_status->should_be_scheduled = h->get_should_be_scheduled(); host_status->state_type = @@ -2453,7 +2123,7 @@ int neb::callback_host_status(int callback_type, void* data) { */ int neb::callback_pb_host_status(int callback_type, void* data) noexcept { // Log message. - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: generating pb host status check result event protobuf"); (void)callback_type; @@ -2496,14 +2166,13 @@ int neb::callback_pb_host_status(int callback_type, void* data) noexcept { hscr.set_next_host_notification(eh->get_next_notification()); hscr.set_no_more_notifications(eh->get_no_more_notifications()); if (!eh->get_plugin_output().empty()) - hscr.set_output(misc::string::check_string_utf8(eh->get_plugin_output())); + hscr.set_output(common::check_string_utf8(eh->get_plugin_output())); if (!eh->get_long_plugin_output().empty()) - hscr.set_output( - misc::string::check_string_utf8(eh->get_long_plugin_output())); + hscr.set_output(common::check_string_utf8(eh->get_long_plugin_output())); hscr.set_percent_state_change(eh->get_percent_state_change()); if (!eh->get_perf_data().empty()) - hscr.set_perfdata(misc::string::check_string_utf8(eh->get_perf_data())); + hscr.set_perfdata(common::check_string_utf8(eh->get_perf_data())); hscr.set_should_be_scheduled(eh->get_should_be_scheduled()); hscr.set_state_type(static_cast( eh->has_been_checked() ? eh->get_state_type() : engine::notifier::hard)); @@ -2555,7 +2224,7 @@ int neb::callback_pb_host_status(int callback_type, void* data) noexcept { */ int neb::callback_log(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating log event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating log event"); (void)callback_type; try { @@ -2568,7 +2237,7 @@ int neb::callback_log(int callback_type, void* data) { le->c_time = log_data->entry_time; le->poller_name = config::applier::state::instance().poller_name(); if (log_data->data) { - le->output = misc::string::check_string_utf8(log_data->data); + le->output = common::check_string_utf8(log_data->data); set_log_data(*le, le->output.c_str()); } @@ -2594,7 +2263,7 @@ int neb::callback_log(int callback_type, void* data) { */ int neb::callback_pb_log(int callback_type [[maybe_unused]], void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating pb log event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating pb log event"); try { // In/Out variables. @@ -2607,7 +2276,7 @@ int neb::callback_pb_log(int callback_type [[maybe_unused]], void* data) { le_obj.set_ctime(log_data->entry_time); le_obj.set_instance_name(config::applier::state::instance().poller_name()); if (log_data->data) { - std::string output = misc::string::check_string_utf8(log_data->data); + std::string output = common::check_string_utf8(log_data->data); le_obj.set_output(output); set_pb_log_data(*le, output); } @@ -2642,7 +2311,8 @@ int neb::callback_process(int, void* data) { // Check process event type. process_data = static_cast(data); if (NEBTYPE_PROCESS_EVENTLOOPSTART == process_data->type) { - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating process start event"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating process start event"); // Register callbacks. SPDLOG_LOGGER_DEBUG( @@ -2680,7 +2350,7 @@ int neb::callback_process(int, void* data) { gl_publisher.write(instance); send_initial_configuration(); } else if (NEBTYPE_PROCESS_EVENTLOOPEND == process_data->type) { - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating process end event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating process end event"); // Output variable. auto instance{std::make_shared()}; @@ -2731,7 +2401,8 @@ int neb::callback_pb_process(int callback_type, void* data) { // Check process event type. process_data = static_cast(data); if (NEBTYPE_PROCESS_EVENTLOOPSTART == process_data->type) { - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating process start event"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating process start event"); // Register callbacks. SPDLOG_LOGGER_DEBUG( @@ -2766,7 +2437,7 @@ int neb::callback_pb_process(int callback_type, void* data) { gl_publisher.write(inst_obj); send_initial_pb_configuration(); } else if (NEBTYPE_PROCESS_EVENTLOOPEND == process_data->type) { - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating process end event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating process end event"); // Fill output var. inst.set_instance_id(config::applier::state::instance().poller_id()); inst.set_running(false); @@ -2797,7 +2468,8 @@ int neb::callback_pb_process(int callback_type, void* data) { */ int neb::callback_program_status(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating instance status event"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating instance status event"); (void)callback_type; try { @@ -2817,10 +2489,10 @@ int neb::callback_program_status(int callback_type, void* data) { is->event_handler_enabled = program_status_data->event_handlers_enabled; is->flap_detection_enabled = program_status_data->flap_detection_enabled; if (!program_status_data->global_host_event_handler.empty()) - is->global_host_event_handler = misc::string::check_string_utf8( + is->global_host_event_handler = common::check_string_utf8( program_status_data->global_host_event_handler); if (!program_status_data->global_service_event_handler.empty()) - is->global_service_event_handler = misc::string::check_string_utf8( + is->global_service_event_handler = common::check_string_utf8( program_status_data->global_service_event_handler); is->last_alive = time(nullptr); is->last_command_check = program_status_data->last_command_check; @@ -2856,8 +2528,8 @@ int neb::callback_program_status(int callback_type, void* data) { */ int neb::callback_pb_program_status(int, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: generating pb instance status event"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating pb instance status event"); // In/Out variables. std::shared_ptr is_obj{ @@ -2868,10 +2540,10 @@ int neb::callback_pb_program_status(int, void* data) { const nebstruct_program_status_data& program_status_data = *static_cast(data); - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: generating pb instance status event " - "global_service_event_handler={}", - program_status_data.global_host_event_handler); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating pb instance status event " + "global_service_event_handler={}", + program_status_data.global_host_event_handler); is.set_instance_id(config::applier::state::instance().poller_id()); is.set_active_host_checks(program_status_data.active_host_checks_enabled); @@ -2882,10 +2554,10 @@ int neb::callback_pb_program_status(int, void* data) { is.set_event_handlers(program_status_data.event_handlers_enabled); is.set_flap_detection(program_status_data.flap_detection_enabled); if (!program_status_data.global_host_event_handler.empty()) - is.set_global_host_event_handler(misc::string::check_string_utf8( + is.set_global_host_event_handler(common::check_string_utf8( program_status_data.global_host_event_handler)); if (!program_status_data.global_service_event_handler.empty()) - is.set_global_service_event_handler(misc::string::check_string_utf8( + is.set_global_service_event_handler(common::check_string_utf8( program_status_data.global_service_event_handler)); is.set_last_alive(time(nullptr)); is.set_last_command_check(program_status_data.last_command_check); @@ -2916,7 +2588,7 @@ int neb::callback_pb_program_status(int, void* data) { */ int neb::callback_relation(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating relation event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating relation event"); (void)callback_type; try { @@ -2944,7 +2616,7 @@ int neb::callback_relation(int callback_type, void* data) { new_host_parent->parent_id = parent_id; // Send event. - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: host {} is parent of host {}", new_host_parent->parent_id, new_host_parent->host_id); neb::gl_publisher.write(new_host_parent); @@ -2973,7 +2645,7 @@ int neb::callback_relation(int callback_type, void* data) { */ int neb::callback_pb_relation(int callback_type [[maybe_unused]], void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating pb relation event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating pb relation event"); try { // Input variable. @@ -2999,9 +2671,9 @@ int neb::callback_pb_relation(int callback_type [[maybe_unused]], void* data) { new_host_parent->mut_obj().set_parent_id(parent_id); // Send event. - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: pb host {} is parent of host {}", - parent_id, host_id); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: pb host {} is parent of host {}", + parent_id, host_id); neb::gl_publisher.write(new_host_parent); } } @@ -3029,7 +2701,7 @@ int neb::callback_pb_relation(int callback_type [[maybe_unused]], void* data) { */ int neb::callback_service(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating service event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating service event"); (void)callback_type; try { @@ -3046,12 +2718,10 @@ int neb::callback_service(int callback_type, void* data) { my_service->acknowledged = s->problem_has_been_acknowledged(); my_service->acknowledgement_type = s->get_acknowledgement(); if (!s->get_action_url().empty()) - my_service->action_url = - misc::string::check_string_utf8(s->get_action_url()); + my_service->action_url = common::check_string_utf8(s->get_action_url()); my_service->active_checks_enabled = s->active_checks_enabled(); if (!s->check_command().empty()) - my_service->check_command = - misc::string::check_string_utf8(s->check_command()); + my_service->check_command = common::check_string_utf8(s->check_command()); my_service->check_freshness = s->check_freshness_enabled(); my_service->check_interval = s->check_interval(); if (!s->check_period().empty()) @@ -3068,11 +2738,10 @@ int neb::callback_service(int callback_type, void* data) { my_service->downtime_depth = s->get_scheduled_downtime_depth(); if (!s->get_display_name().empty()) my_service->display_name = - misc::string::check_string_utf8(s->get_display_name()); + common::check_string_utf8(s->get_display_name()); my_service->enabled = (service_data->type != NEBTYPE_SERVICE_DELETE); if (!s->event_handler().empty()) - my_service->event_handler = - misc::string::check_string_utf8(s->event_handler()); + my_service->event_handler = common::check_string_utf8(s->event_handler()); my_service->event_handler_enabled = s->event_handler_enabled(); my_service->execution_time = s->get_execution_time(); my_service->first_notification_delay = s->get_first_notification_delay(); @@ -3090,14 +2759,12 @@ int neb::callback_service(int callback_type, void* data) { my_service->has_been_checked = s->has_been_checked(); my_service->high_flap_threshold = s->get_high_flap_threshold(); if (!s->get_hostname().empty()) - my_service->host_name = - misc::string::check_string_utf8(s->get_hostname()); + my_service->host_name = common::check_string_utf8(s->get_hostname()); if (!s->get_icon_image().empty()) - my_service->icon_image = - misc::string::check_string_utf8(s->get_icon_image()); + my_service->icon_image = common::check_string_utf8(s->get_icon_image()); if (!s->get_icon_image_alt().empty()) my_service->icon_image_alt = - misc::string::check_string_utf8(s->get_icon_image_alt()); + common::check_string_utf8(s->get_icon_image_alt()); my_service->is_flapping = s->get_is_flapping(); my_service->is_volatile = s->get_is_volatile(); my_service->last_check = s->get_last_check(); @@ -3117,10 +2784,9 @@ int neb::callback_service(int callback_type, void* data) { my_service->next_notification = s->get_next_notification(); my_service->no_more_notifications = s->get_no_more_notifications(); if (!s->get_notes().empty()) - my_service->notes = misc::string::check_string_utf8(s->get_notes()); + my_service->notes = common::check_string_utf8(s->get_notes()); if (!s->get_notes_url().empty()) - my_service->notes_url = - misc::string::check_string_utf8(s->get_notes_url()); + my_service->notes_url = common::check_string_utf8(s->get_notes_url()); my_service->notifications_enabled = s->get_notifications_enabled(); my_service->notification_interval = s->get_notification_interval(); if (!s->notification_period().empty()) @@ -3136,25 +2802,23 @@ int neb::callback_service(int callback_type, void* data) { my_service->notify_on_warning = s->get_notify_on(engine::notifier::warning); my_service->obsess_over = s->obsess_over(); if (!s->get_plugin_output().empty()) { - my_service->output = - misc::string::check_string_utf8(s->get_plugin_output()); + my_service->output = common::check_string_utf8(s->get_plugin_output()); my_service->output.append("\n"); } if (!s->get_long_plugin_output().empty()) my_service->output.append( - misc::string::check_string_utf8(s->get_long_plugin_output())); + common::check_string_utf8(s->get_long_plugin_output())); my_service->passive_checks_enabled = s->passive_checks_enabled(); my_service->percent_state_change = s->get_percent_state_change(); if (!s->get_perf_data().empty()) - my_service->perf_data = - misc::string::check_string_utf8(s->get_perf_data()); + my_service->perf_data = common::check_string_utf8(s->get_perf_data()); my_service->retain_nonstatus_information = s->get_retain_nonstatus_information(); my_service->retain_status_information = s->get_retain_status_information(); my_service->retry_interval = s->retry_interval(); if (!s->description().empty()) my_service->service_description = - misc::string::check_string_utf8(s->description()); + common::check_string_utf8(s->description()); my_service->should_be_scheduled = s->get_should_be_scheduled(); my_service->stalk_on_critical = s->get_stalk_on(engine::notifier::critical); my_service->stalk_on_ok = s->get_stalk_on(engine::notifier::ok); @@ -3170,10 +2834,10 @@ int neb::callback_service(int callback_type, void* data) { my_service->service_id = p.second; if (my_service->host_id && my_service->service_id) { // Send service event. - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: new service {} ('{}') on host {}", - my_service->service_id, - my_service->service_description, my_service->host_id); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: new service {} ('{}') on host {}", + my_service->service_id, + my_service->service_description, my_service->host_id); neb::gl_publisher.write(my_service); /* No need to send this service custom variables changes, custom @@ -3208,8 +2872,8 @@ int neb::callback_service(int callback_type, void* data) { * @return 0 on success. */ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: generating pb service event protobuf"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating pb service event protobuf"); nebstruct_adaptive_service_data* ds = static_cast(data); @@ -3235,11 +2899,9 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { else if (ds->modified_attribute & MODATTR_OBSESSIVE_HANDLER_ENABLED) srv.set_obsess_over_service(es->obsess_over()); else if (ds->modified_attribute & MODATTR_EVENT_HANDLER_COMMAND) - srv.set_event_handler( - misc::string::check_string_utf8(es->event_handler())); + srv.set_event_handler(common::check_string_utf8(es->event_handler())); else if (ds->modified_attribute & MODATTR_CHECK_COMMAND) - srv.set_check_command( - misc::string::check_string_utf8(es->check_command())); + srv.set_check_command(common::check_string_utf8(es->check_command())); else if (ds->modified_attribute & MODATTR_NORMAL_CHECK_INTERVAL) srv.set_check_interval(es->check_interval()); else if (ds->modified_attribute & MODATTR_RETRY_CHECK_INTERVAL) @@ -3263,9 +2925,9 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_host_id(p.first); srv.set_service_id(p.second); // Send service event. - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: new service {} ('{}') on host {}", - srv.service_id(), es->description(), srv.host_id()); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: new service {} ('{}') on host {}", + srv.service_id(), es->description(), srv.host_id()); neb::gl_publisher.write(s); /* No need to send this service custom variables changes, custom @@ -3285,11 +2947,10 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_acknowledged(es->problem_has_been_acknowledged()); srv.set_acknowledgement_type(es->get_acknowledgement()); if (!es->get_action_url().empty()) - srv.set_action_url(misc::string::check_string_utf8(es->get_action_url())); + srv.set_action_url(common::check_string_utf8(es->get_action_url())); srv.set_active_checks(es->active_checks_enabled()); if (!es->check_command().empty()) - srv.set_check_command( - misc::string::check_string_utf8(es->check_command())); + srv.set_check_command(common::check_string_utf8(es->check_command())); srv.set_check_freshness(es->check_freshness_enabled()); srv.set_check_interval(es->check_interval()); if (!es->check_period().empty()) @@ -3306,13 +2967,11 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_default_passive_checks(es->passive_checks_enabled()); srv.set_scheduled_downtime_depth(es->get_scheduled_downtime_depth()); if (!es->get_display_name().empty()) - srv.set_display_name( - misc::string::check_string_utf8(es->get_display_name())); + srv.set_display_name(common::check_string_utf8(es->get_display_name())); srv.set_enabled(static_cast(data)->type != NEBTYPE_SERVICE_DELETE); if (!es->event_handler().empty()) - srv.set_event_handler( - misc::string::check_string_utf8(es->event_handler())); + srv.set_event_handler(common::check_string_utf8(es->event_handler())); srv.set_event_handler_enabled(es->event_handler_enabled()); srv.set_execution_time(es->get_execution_time()); srv.set_first_notification_delay(es->get_first_notification_delay()); @@ -3330,10 +2989,10 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_checked(es->has_been_checked()); srv.set_high_flap_threshold(es->get_high_flap_threshold()); if (!es->description().empty()) - srv.set_description(misc::string::check_string_utf8(es->description())); + srv.set_description(common::check_string_utf8(es->description())); if (!es->get_hostname().empty()) { - std::string name{misc::string::check_string_utf8(es->get_hostname())}; + std::string name{common::check_string_utf8(es->get_hostname())}; switch (es->get_service_type()) { case com::centreon::engine::service_type::METASERVICE: { srv.set_type(METASERVICE); @@ -3385,10 +3044,10 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { } if (!es->get_icon_image().empty()) *srv.mutable_icon_image() = - misc::string::check_string_utf8(es->get_icon_image()); + common::check_string_utf8(es->get_icon_image()); if (!es->get_icon_image_alt().empty()) *srv.mutable_icon_image_alt() = - misc::string::check_string_utf8(es->get_icon_image_alt()); + common::check_string_utf8(es->get_icon_image_alt()); srv.set_flapping(es->get_is_flapping()); srv.set_is_volatile(es->get_is_volatile()); srv.set_last_check(es->get_last_check()); @@ -3409,10 +3068,9 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_next_notification(es->get_next_notification()); srv.set_no_more_notifications(es->get_no_more_notifications()); if (!es->get_notes().empty()) - srv.set_notes(misc::string::check_string_utf8(es->get_notes())); + srv.set_notes(common::check_string_utf8(es->get_notes())); if (!es->get_notes_url().empty()) - *srv.mutable_notes_url() = - misc::string::check_string_utf8(es->get_notes_url()); + *srv.mutable_notes_url() = common::check_string_utf8(es->get_notes_url()); srv.set_notify(es->get_notifications_enabled()); srv.set_notification_interval(es->get_notification_interval()); if (!es->notification_period().empty()) @@ -3427,15 +3085,14 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_obsess_over_service(es->obsess_over()); if (!es->get_plugin_output().empty()) *srv.mutable_output() = - misc::string::check_string_utf8(es->get_plugin_output()); + common::check_string_utf8(es->get_plugin_output()); if (!es->get_long_plugin_output().empty()) *srv.mutable_long_output() = - misc::string::check_string_utf8(es->get_long_plugin_output()); + common::check_string_utf8(es->get_long_plugin_output()); srv.set_passive_checks(es->passive_checks_enabled()); srv.set_percent_state_change(es->get_percent_state_change()); if (!es->get_perf_data().empty()) - *srv.mutable_perfdata() = - misc::string::check_string_utf8(es->get_perf_data()); + *srv.mutable_perfdata() = common::check_string_utf8(es->get_perf_data()); srv.set_retain_nonstatus_information( es->get_retain_nonstatus_information()); srv.set_retain_status_information(es->get_retain_status_information()); @@ -3468,9 +3125,9 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.host_id(), srv.service_id(), srv.severity_id()); if (srv.host_id() && srv.service_id()) { // Send service event. - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: new service {} ('{}') on host {}", - srv.service_id(), srv.description(), srv.host_id()); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: new service {} ('{}') on host {}", + srv.service_id(), srv.description(), srv.host_id()); neb::gl_publisher.write(s); /* No need to send this service custom variables changes, custom @@ -3510,7 +3167,7 @@ int neb::callback_service_check(int callback_type, void* data) { return 0; // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating service check event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating service check event"); (void)callback_type; try { @@ -3522,7 +3179,7 @@ int neb::callback_service_check(int callback_type, void* data) { service_check->active_checks_enabled = s->active_checks_enabled(); service_check->check_type = scdata->check_type; service_check->command_line = - misc::string::check_string_utf8(scdata->command_line); + common::check_string_utf8(scdata->command_line); if (!scdata->host_id) throw msg_fmt("host without id"); if (!scdata->service_id) @@ -3577,7 +3234,8 @@ int neb::callback_pb_service_check(int, void* data) { scdata->host_id, scdata->service_id, scdata->command_line); } else { - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating service check event"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating service check event"); } // In/Out variables. @@ -3594,7 +3252,7 @@ int neb::callback_pb_service_check(int, void* data) { ? com::centreon::broker::CheckActive : com::centreon::broker::CheckPassive); service_check->mut_obj().set_command_line( - misc::string::check_string_utf8(scdata->command_line)); + common::check_string_utf8(scdata->command_line)); service_check->mut_obj().set_host_id(scdata->host_id); service_check->mut_obj().set_service_id(scdata->service_id); service_check->mut_obj().set_next_check(s->get_next_check()); @@ -3615,8 +3273,8 @@ int neb::callback_pb_service_check(int, void* data) { */ int32_t neb::callback_severity(int callback_type [[maybe_unused]], void* data) noexcept { - SPDLOG_LOGGER_INFO(neb_logger, - "callbacks: generating protobuf severity event"); + SPDLOG_LOGGER_DEBUG(neb_logger, + "callbacks: generating protobuf severity event"); nebstruct_adaptive_severity_data* ds = static_cast(data); @@ -3626,15 +3284,15 @@ int32_t neb::callback_severity(int callback_type [[maybe_unused]], Severity& sv = s.get()->mut_obj(); switch (ds->type) { case NEBTYPE_SEVERITY_ADD: - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: new severity"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: new severity"); sv.set_action(Severity_Action_ADD); break; case NEBTYPE_SEVERITY_DELETE: - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: removed severity"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: removed severity"); sv.set_action(Severity_Action_DELETE); break; case NEBTYPE_SEVERITY_UPDATE: - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: modified severity"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: modified severity"); sv.set_action(Severity_Action_MODIFY); break; default: @@ -3666,7 +3324,7 @@ int32_t neb::callback_severity(int callback_type [[maybe_unused]], */ int32_t neb::callback_tag(int callback_type [[maybe_unused]], void* data) noexcept { - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating protobuf tag event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating protobuf tag event"); nebstruct_adaptive_tag_data* ds = static_cast(data); @@ -3676,15 +3334,15 @@ int32_t neb::callback_tag(int callback_type [[maybe_unused]], Tag& tg = t.get()->mut_obj(); switch (ds->type) { case NEBTYPE_TAG_ADD: - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: new tag"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: new tag"); tg.set_action(Tag_Action_ADD); break; case NEBTYPE_TAG_DELETE: - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: removed tag"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: removed tag"); tg.set_action(Tag_Action_DELETE); break; case NEBTYPE_TAG_UPDATE: - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: modified tag"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: modified tag"); tg.set_action(Tag_Action_MODIFY); break; default: @@ -3725,15 +3383,15 @@ int32_t neb::callback_tag(int callback_type [[maybe_unused]], int32_t neb::callback_pb_service_status(int callback_type [[maybe_unused]], void* data) noexcept { - SPDLOG_LOGGER_INFO( + SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: generating pb service status check result event"); const engine::service* es{static_cast( static_cast(data)->object_ptr)}; - neb_logger->info("callbacks: pb_service_status ({},{}) status {}, type {}", - es->host_id(), es->service_id(), - static_cast(es->get_current_state()), - static_cast(es->get_check_type())); + neb_logger->debug("callbacks: pb_service_status ({},{}) status {}, type {}", + es->host_id(), es->service_id(), + static_cast(es->get_current_state()), + static_cast(es->get_check_type())); auto s{std::make_shared()}; ServiceStatus& sscr = s.get()->mut_obj(); @@ -3773,13 +3431,13 @@ int32_t neb::callback_pb_service_status(int callback_type [[maybe_unused]], sscr.set_next_notification(es->get_next_notification()); sscr.set_no_more_notifications(es->get_no_more_notifications()); if (!es->get_plugin_output().empty()) - sscr.set_output(misc::string::check_string_utf8(es->get_plugin_output())); + sscr.set_output(common::check_string_utf8(es->get_plugin_output())); if (!es->get_long_plugin_output().empty()) sscr.set_long_output( - misc::string::check_string_utf8(es->get_long_plugin_output())); + common::check_string_utf8(es->get_long_plugin_output())); sscr.set_percent_state_change(es->get_percent_state_change()); if (!es->get_perf_data().empty()) { - sscr.set_perfdata(misc::string::check_string_utf8(es->get_perf_data())); + sscr.set_perfdata(common::check_string_utf8(es->get_perf_data())); SPDLOG_LOGGER_TRACE(neb_logger, "callbacks: service ({}, {}) has perfdata <<{}>>", es->host_id(), es->service_id(), es->get_perf_data()); @@ -3841,8 +3499,8 @@ int32_t neb::callback_pb_service_status(int callback_type [[maybe_unused]], std::make_pair(sscr.host_id(), sscr.service_id())); if (it != gl_acknowledgements.end() && sscr.acknowledgement_type() == AckType::NONE) { - neb_logger->info("acknowledgement found on service ({}:{})", sscr.host_id(), - sscr.service_id()); + neb_logger->debug("acknowledgement found on service ({}:{})", + sscr.host_id(), sscr.service_id()); if (it->second->type() == make_type(io::neb, de_pb_acknowledgement)) { neb::pb_acknowledgement* a = static_cast(it->second.get()); @@ -3883,7 +3541,7 @@ int32_t neb::callback_pb_service_status(int callback_type [[maybe_unused]], */ int neb::callback_service_status(int callback_type, void* data) { // Log message. - SPDLOG_LOGGER_INFO(neb_logger, "callbacks: generating service status event"); + SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating service status event"); (void)callback_type; try { @@ -3898,7 +3556,7 @@ int neb::callback_service_status(int callback_type, void* data) { service_status->active_checks_enabled = s->active_checks_enabled(); if (!s->check_command().empty()) service_status->check_command = - misc::string::check_string_utf8(s->check_command()); + common::check_string_utf8(s->check_command()); service_status->check_interval = s->check_interval(); if (!s->check_period().empty()) service_status->check_period = s->check_period(); @@ -3909,7 +3567,7 @@ int neb::callback_service_status(int callback_type, void* data) { service_status->downtime_depth = s->get_scheduled_downtime_depth(); if (!s->event_handler().empty()) service_status->event_handler = - misc::string::check_string_utf8(s->event_handler()); + common::check_string_utf8(s->event_handler()); service_status->event_handler_enabled = s->event_handler_enabled(); service_status->execution_time = s->get_execution_time(); service_status->flap_detection_enabled = s->flap_detection_enabled(); @@ -3935,27 +3593,25 @@ int neb::callback_service_status(int callback_type, void* data) { service_status->obsess_over = s->obsess_over(); if (!s->get_plugin_output().empty()) { service_status->output = - misc::string::check_string_utf8(s->get_plugin_output()); + common::check_string_utf8(s->get_plugin_output()); service_status->output.append("\n"); } if (!s->get_long_plugin_output().empty()) service_status->output.append( - misc::string::check_string_utf8(s->get_long_plugin_output())); + common::check_string_utf8(s->get_long_plugin_output())); service_status->passive_checks_enabled = s->passive_checks_enabled(); service_status->percent_state_change = s->get_percent_state_change(); if (!s->get_perf_data().empty()) - service_status->perf_data = - misc::string::check_string_utf8(s->get_perf_data()); + service_status->perf_data = common::check_string_utf8(s->get_perf_data()); service_status->retry_interval = s->retry_interval(); if (s->get_hostname().empty()) throw msg_fmt("unnamed host"); if (s->description().empty()) throw msg_fmt("unnamed service"); - service_status->host_name = - misc::string::check_string_utf8(s->get_hostname()); + service_status->host_name = common::check_string_utf8(s->get_hostname()); service_status->service_description = - misc::string::check_string_utf8(s->description()); + common::check_string_utf8(s->description()); { std::pair p{ engine::get_host_and_service_id(s->get_hostname(), s->description())}; diff --git a/broker/neb/src/dependency.cc b/broker/neb/src/dependency.cc deleted file mode 100644 index 9af51cdfc1c..00000000000 --- a/broker/neb/src/dependency.cc +++ /dev/null @@ -1,88 +0,0 @@ -/** -* Copyright 2009-2012,2015 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ - -#include "com/centreon/broker/neb/dependency.hh" - -using namespace com::centreon::broker::neb; - -/************************************** - * * - * Public Methods * - * * - **************************************/ - -/** - * Default constructor. - */ -dependency::dependency(uint32_t type) - : io::data(type), - dependent_host_id(0), - enabled(true), - host_id(0), - inherits_parent(false) {} - -/** - * Copy constructor. - * - * @param[in] dep Object to copy. - */ -dependency::dependency(dependency const& dep) : io::data(dep) { - _internal_copy(dep); -} - -/** - * Destructor. - */ -dependency::~dependency() {} - -/** - * Assignment operator. - * - * @param[in] dep Object to copy from. - * - * @return This object. - */ -dependency& dependency::operator=(dependency const& dep) { - io::data::operator=(dep); - _internal_copy(dep); - return (*this); -} - -/************************************** - * * - * Private Methods * - * * - **************************************/ - -/** - * @brief Copy internal data members. - * - * This method is used by the copy constructor and the assignment operator. - * - * @param[in] dep Object to copy. - */ -void dependency::_internal_copy(dependency const& dep) { - dependency_period = dep.dependency_period; - dependent_host_id = dep.dependent_host_id; - enabled = dep.enabled; - execution_failure_options = dep.execution_failure_options; - host_id = dep.host_id; - inherits_parent = dep.inherits_parent; - notification_failure_options = dep.notification_failure_options; - return; -} diff --git a/broker/neb/src/host_dependency.cc b/broker/neb/src/host_dependency.cc deleted file mode 100644 index 352c4e345ca..00000000000 --- a/broker/neb/src/host_dependency.cc +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Copyright 2009-2013,2015 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#include "com/centreon/broker/neb/host_dependency.hh" - -#include "com/centreon/broker/sql/table_max_size.hh" - -using namespace com::centreon::broker; -using namespace com::centreon::broker::neb; - -/************************************** - * * - * Public Methods * - * * - **************************************/ - -/** - * Default constructor. - */ -host_dependency::host_dependency() - : dependency(host_dependency::static_type()) {} - -/** - * Copy constructor. - * - * @param[in] other Object to copy. - */ -host_dependency::host_dependency(host_dependency const& other) - : dependency(other) {} - -/** - * Destructor. - */ -host_dependency::~host_dependency() {} - -/** - * Assignment operator. - * - * @param[in] other Object to copy. - * - * @return This object. - */ -host_dependency& host_dependency::operator=(host_dependency const& other) { - dependency::operator=(other); - return *this; -} - -/************************************** - * * - * Static Objects * - * * - **************************************/ - -// Mapping. -mapping::entry const host_dependency::entries[] = { - mapping::entry( - &host_dependency::dependency_period, - "dependency_period", - get_centreon_storage_hosts_hosts_dependencies_col_size( - centreon_storage_hosts_hosts_dependencies_dependency_period)), - mapping::entry(&host_dependency::dependent_host_id, - "dependent_host_id", - mapping::entry::invalid_on_zero), - mapping::entry(&host_dependency::enabled, ""), - mapping::entry( - &host_dependency::execution_failure_options, - "execution_failure_options", - get_centreon_storage_hosts_hosts_dependencies_col_size( - centreon_storage_hosts_hosts_dependencies_execution_failure_options)), - mapping::entry(&host_dependency::inherits_parent, "inherits_parent"), - mapping::entry(&host_dependency::host_id, - "host_id", - mapping::entry::invalid_on_zero), - mapping::entry( - &host_dependency::notification_failure_options, - "notification_failure_options", - get_centreon_storage_hosts_hosts_dependencies_col_size( - centreon_storage_hosts_hosts_dependencies_notification_failure_options)), - mapping::entry()}; - -// Operations. -static io::data* new_host_dep() { - return (new host_dependency); -} -io::event_info::event_operations const host_dependency::operations = { - &new_host_dep, nullptr, nullptr}; diff --git a/broker/neb/src/initial.cc b/broker/neb/src/initial.cc index a8fad65920e..4fc23eb0626 100644 --- a/broker/neb/src/initial.cc +++ b/broker/neb/src/initial.cc @@ -27,7 +27,6 @@ #include "com/centreon/engine/downtimes/service_downtime.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/host.hh" -#include "com/centreon/engine/hostdependency.hh" #include "com/centreon/engine/nebcallbacks.hh" #include "com/centreon/engine/nebstructs.hh" #include "com/centreon/engine/objects.hh" @@ -72,7 +71,6 @@ static void send_custom_variables_list( if (cit->second.is_sent()) { // Fill callback struct. nebstruct_custom_variable_data nscvd; - memset(&nscvd, 0, sizeof(nscvd)); nscvd.type = NEBTYPE_HOSTCUSTOMVARIABLE_ADD; nscvd.timestamp.tv_sec = time(nullptr); nscvd.var_name = const_cast(name.c_str()); @@ -98,13 +96,13 @@ static void send_custom_variables_list( std::string name{cit->first}; if (cit->second.is_sent()) { // Fill callback struct. - nebstruct_custom_variable_data nscvd; - memset(&nscvd, 0, sizeof(nscvd)); - nscvd.type = NEBTYPE_SERVICECUSTOMVARIABLE_ADD; - nscvd.timestamp.tv_sec = time(nullptr); - nscvd.var_name = const_cast(name.c_str()); - nscvd.var_value = const_cast(cit->second.value().c_str()); - nscvd.object_ptr = it->second.get(); + nebstruct_custom_variable_data nscvd{ + .type = NEBTYPE_SERVICECUSTOMVARIABLE_ADD, + .timestamp = {time(nullptr), 0}, + .var_name = std::string_view(name), + .var_value = std::string_view(cit->second.value()), + .object_ptr = it->second.get(), + }; // Callback. sender(NEBCALLBACK_CUSTOM_VARIABLE_DATA, &nscvd); @@ -173,48 +171,6 @@ static void send_pb_downtimes_list() { send_downtimes_list(neb::callback_pb_downtime); } -/** - * Send to the global publisher the list of host dependencies within Nagios. - */ -static void send_host_dependencies_list( - neb_sender callbackfct = neb::callback_dependency) { - // Start log message. - neb_logger->info("init: beginning host dependencies dump"); - - try { - // Loop through all dependencies. - for (hostdependency_mmap::const_iterator - it{com::centreon::engine::hostdependency::hostdependencies - .begin()}, - end{com::centreon::engine::hostdependency::hostdependencies.end()}; - it != end; ++it) { - // Fill callback struct. - nebstruct_adaptive_dependency_data nsadd; - memset(&nsadd, 0, sizeof(nsadd)); - nsadd.type = NEBTYPE_HOSTDEPENDENCY_ADD; - nsadd.object_ptr = it->second.get(); - - // Callback. - callbackfct(NEBCALLBACK_ADAPTIVE_DEPENDENCY_DATA, &nsadd); - } - } catch (std::exception const& e) { - neb_logger->info("init: error occurred while dumping host dependencies: {}", - e.what()); - } catch (...) { - neb_logger->error( - "init: unknown error occurred while dumping host dependencies"); - } - - // End log message. - neb_logger->info("init: end of host dependencies dump"); - - return; -} - -static void send_pb_host_dependencies_list() { - send_host_dependencies_list(neb::callback_pb_dependency); -} - /** * Send to the global publisher the list of host groups within Engine. */ @@ -369,49 +325,6 @@ static void send_pb_host_parents_list() { send_host_parents_list(neb::callback_pb_relation); } -/** - * Send to the global publisher the list of service dependencies within - * Nagios. - */ -static void send_service_dependencies_list( - neb_sender sender_fct = neb::callback_dependency) { - // Start log message. - neb_logger->info("init: beginning service dependencies dump"); - - try { - // Loop through all dependencies. - for (servicedependency_mmap::const_iterator - it{com::centreon::engine::servicedependency::servicedependencies - .begin()}, - end{com::centreon::engine::servicedependency::servicedependencies - .end()}; - it != end; ++it) { - // Fill callback struct. - nebstruct_adaptive_dependency_data nsadd; - memset(&nsadd, 0, sizeof(nsadd)); - nsadd.type = NEBTYPE_SERVICEDEPENDENCY_ADD; - nsadd.object_ptr = it->second.get(); - - // Callback. - sender_fct(NEBCALLBACK_ADAPTIVE_DEPENDENCY_DATA, &nsadd); - } - } catch (std::exception const& e) { - neb_logger->error( - "init: error occurred while dumping service dependencies: {}", - e.what()); - } catch (...) { - neb_logger->error( - "init: unknown error occurred while dumping service dependencies"); - } - - // End log message. - neb_logger->info("init: end of service dependencies dump"); -} - -static void send_pb_service_dependencies_list() { - send_service_dependencies_list(neb::callback_pb_dependency); -} - /** * Send to the global publisher the list of service groups within Engine. */ @@ -541,8 +454,6 @@ void neb::send_initial_configuration() { send_host_parents_list(); send_host_group_list(); send_service_group_list(); - send_host_dependencies_list(); - send_service_dependencies_list(); send_instance_configuration(); } @@ -566,7 +477,5 @@ void neb::send_initial_pb_configuration() { send_pb_host_parents_list(); send_pb_host_group_list(); send_pb_service_group_list(); - send_pb_host_dependencies_list(); - send_pb_service_dependencies_list(); send_pb_instance_configuration(); } diff --git a/broker/neb/src/service_dependency.cc b/broker/neb/src/service_dependency.cc deleted file mode 100644 index 8357aa1df2a..00000000000 --- a/broker/neb/src/service_dependency.cc +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Copyright 2009-2013,2015 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#include "com/centreon/broker/neb/service_dependency.hh" - -#include "com/centreon/broker/sql/table_max_size.hh" - -using namespace com::centreon::broker; -using namespace com::centreon::broker::neb; - -/** - * Default constructor. - */ -service_dependency::service_dependency() - : dependency(service_dependency::static_type()), - dependent_service_id(0), - service_id(0) {} - -/** - * Copy constructor. - * - * @param[in] sd Object to copy. - */ -service_dependency::service_dependency(service_dependency const& sd) - : dependency(sd) { - _internal_copy(sd); -} - -/** - * Destructor. - */ -service_dependency::~service_dependency() {} - -/** - * Assignment operator. - * - * @param[in] sd Object to copy. - * - * @return This object. - */ -service_dependency& service_dependency::operator=( - service_dependency const& sd) { - if (this != &sd) { - dependency::operator=(sd); - _internal_copy(sd); - } - return *this; -} - -/************************************** - * * - * Private Methods * - * * - **************************************/ - -/** - * Copy internal members from the given object. - * - * @param[in] sd Object to copy. - */ -void service_dependency::_internal_copy(service_dependency const& sd) { - dependent_service_id = sd.dependent_service_id; - service_id = sd.service_id; -} - -/************************************** - * * - * Static Objects * - * * - **************************************/ - -// Mapping. -mapping::entry const service_dependency::entries[] = { - mapping::entry( - &service_dependency::dependency_period, - "dependency_period", - get_centreon_storage_services_services_dependencies_col_size( - centreon_storage_services_services_dependencies_dependency_period)), - mapping::entry(&service_dependency::dependent_host_id, - "dependent_host_id", - mapping::entry::invalid_on_zero), - mapping::entry(&service_dependency::dependent_service_id, - "dependent_service_id", - mapping::entry::invalid_on_zero), - mapping::entry(&service_dependency::enabled, ""), - mapping::entry( - &service_dependency::execution_failure_options, - "execution_failure_options", - get_centreon_storage_services_services_dependencies_col_size( - centreon_storage_services_services_dependencies_execution_failure_options)), - mapping::entry(&service_dependency::host_id, - "host_id", - mapping::entry::invalid_on_zero), - mapping::entry(&service_dependency::inherits_parent, "inherits_parent"), - mapping::entry( - &service_dependency::notification_failure_options, - "notification_failure_options", - get_centreon_storage_services_services_dependencies_col_size( - centreon_storage_services_services_dependencies_notification_failure_options)), - mapping::entry(&service_dependency::service_id, - "service_id", - mapping::entry::invalid_on_zero), - mapping::entry()}; - -// Operations. -static io::data* new_service_dependency() { - return new service_dependency; -} -io::event_info::event_operations const service_dependency::operations = { - &new_service_dependency, nullptr, nullptr}; diff --git a/broker/neb/test/host_dependency.cc b/broker/neb/test/host_dependency.cc deleted file mode 100644 index 21a3d76e709..00000000000 --- a/broker/neb/test/host_dependency.cc +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Copyright 2011 - 2019 Centreon (https://www.centreon.com/) - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - * - */ - -#include -#include -#include "com/centreon/broker/neb/host.hh" -#include "randomize.hh" - -using namespace com::centreon::broker; - -class HostDependencyTest : public ::testing::Test { - public: - void SetUp() override { - // Initialization. - randomize_init(); - } - - void TearDown() override { - // Cleanup. - randomize_cleanup(); - } -}; - -TEST_F(HostDependencyTest, Assignment) { - // Object #1. - neb::host_dependency hdep1; - std::vector randvals1; - randomize(hdep1, &randvals1); - - // Object #2. - neb::host_dependency hdep2; - randomize(hdep2); - - // Assignment. - hdep2 = hdep1; - - // Reset object #1. - std::vector randvals2; - randomize(hdep1, &randvals2); - - // Compare objects with expected results. - ASSERT_TRUE(hdep1 == randvals2); - ASSERT_TRUE(hdep2 == randvals1); -} - -TEST_F(HostDependencyTest, CopyConstructor) { - // Object #1. - neb::host_dependency hdep1; - std::vector randvals1; - randomize(hdep1, &randvals1); - - // Object #2. - neb::host_dependency hdep2(hdep1); - - // Reset object #1. - std::vector randvals2; - randomize(hdep1, &randvals2); - - // Compare objects with expected results. - ASSERT_TRUE(hdep1 == randvals2); - ASSERT_TRUE(hdep2 == randvals1); -} - -TEST_F(HostDependencyTest, DefaultConstructor) { - // Object. - neb::host_dependency hdep; - - // Check. - ASSERT_EQ(hdep.source_id, 0u); - ASSERT_EQ(hdep.destination_id, 0u); - ASSERT_TRUE(hdep.dependency_period == ""); - ASSERT_EQ(hdep.dependent_host_id, 0u); - ASSERT_TRUE(hdep.enabled); - ASSERT_TRUE(hdep.execution_failure_options.empty()); - ASSERT_EQ(hdep.host_id, 0u); - ASSERT_FALSE(hdep.inherits_parent); - ASSERT_TRUE(hdep.notification_failure_options.empty()); - ASSERT_FALSE( - hdep.type() != - (io::events::data_type::value)); -} diff --git a/broker/neb/test/randomize.cc b/broker/neb/test/randomize.cc index a87050945ac..c7787c46b9a 100644 --- a/broker/neb/test/randomize.cc +++ b/broker/neb/test/randomize.cc @@ -108,9 +108,6 @@ void randomize_init() { &neb::downtime::operations, neb::downtime::entries); e.register_event(make_type(io::neb, neb::de_host_check), "host_check", &neb::host_check::operations, neb::host_check::entries); - e.register_event(make_type(io::neb, neb::de_host_dependency), - "host_dependency", &neb::host_dependency::operations, - neb::host_dependency::entries); e.register_event(make_type(io::neb, neb::de_host), "host", &neb::host::operations, neb::host::entries); e.register_event(make_type(io::neb, neb::de_host_parent), "host_parent", @@ -127,9 +124,6 @@ void randomize_init() { e.register_event(make_type(io::neb, neb::de_service_check), "service_check", &neb::service_check::operations, neb::service_check::entries); - e.register_event(make_type(io::neb, neb::de_service_dependency), - "service_dependency", &neb::service_dependency::operations, - neb::service_dependency::entries); e.register_event(make_type(io::neb, neb::de_service), "service", &neb::service::operations, neb::service::entries); e.register_event(make_type(io::neb, neb::de_service_status), "service_status", @@ -143,7 +137,7 @@ void randomize_init() { void randomize_cleanup() { for (std::list::iterator it(generated.begin()), end(generated.end()); it != end; ++it) - delete[] * it; + delete[] *it; generated.clear(); io::events::unload(); io::protocols::unload(); diff --git a/broker/neb/test/service_dependency.cc b/broker/neb/test/service_dependency.cc deleted file mode 100644 index 66ad6dc4418..00000000000 --- a/broker/neb/test/service_dependency.cc +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Copyright 2011 - 2019 Centreon (https://www.centreon.com/) - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - * - */ - -#include "com/centreon/broker/neb/service_dependency.hh" -#include -#include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/neb/internal.hh" -#include "randomize.hh" - -using namespace com::centreon::broker; - -class ServiceDependencyTest : public ::testing::Test { - void SetUp() override { randomize_init(); }; - - void TearDown() override { randomize_cleanup(); }; -}; - -/** - * Check service_dependency's assignment operator. - */ -TEST_F(ServiceDependencyTest, Assign) { - // Object #1. - neb::service_dependency sdep1; - std::vector randvals1; - randomize(sdep1, &randvals1); - - // Object #2. - neb::service_dependency sdep2; - randomize(sdep2); - - // Assignment. - sdep2 = sdep1; - - // Reset object #1. - std::vector randvals2; - randomize(sdep1, &randvals2); - - // Compare objects with expected results. - ASSERT_FALSE(sdep1 != randvals2); - ASSERT_FALSE(sdep2 != randvals1); -} - -/** - * Check service_dependency's copy constructor. - */ -TEST_F(ServiceDependencyTest, CopyCtor) { - // Object #1. - neb::service_dependency sdep1; - std::vector randvals1; - randomize(sdep1, &randvals1); - - // Object #2. - neb::service_dependency sdep2(sdep1); - - // Reset object #1. - std::vector randvals2; - randomize(sdep1, &randvals2); - - // Compare objects with expected results. - ASSERT_FALSE(sdep1 != randvals2); - ASSERT_FALSE(sdep2 != randvals1); -} - -/** - * Check service_dependency's default constructor. - */ -TEST_F(ServiceDependencyTest, DefaultCtor) { - // Object. - neb::service_dependency sdep; - - // Check. - ASSERT_FALSE(sdep.source_id != 0); - ASSERT_FALSE(sdep.destination_id != 0); - ASSERT_FALSE(sdep.dependency_period != ""); - ASSERT_FALSE(sdep.dependent_host_id != 0); - ASSERT_FALSE(sdep.enabled != true); - ASSERT_FALSE(!sdep.execution_failure_options.empty()); - ASSERT_FALSE(sdep.host_id != 0); - ASSERT_FALSE(sdep.inherits_parent != false); - ASSERT_FALSE(!sdep.notification_failure_options.empty()); - ASSERT_FALSE(sdep.service_id != 0); - auto val(io::events::data_type::value); - ASSERT_FALSE(sdep.type() != val); -} diff --git a/broker/rrd/src/output.cc b/broker/rrd/src/output.cc index 8bd15e38ac2..cbca109d7eb 100644 --- a/broker/rrd/src/output.cc +++ b/broker/rrd/src/output.cc @@ -348,15 +348,21 @@ int output::write(std::shared_ptr const& d) { _backend.open(status_path, s.rrd_len(), s.time() - 1, interval); } std::string value; - if (s.state() == 0) - value = "100"; - else if (s.state() == 1) - value = "75"; - else if (s.state() == 2) - value = "0"; - else - value = ""; - _backend.update(s.time(), value); + switch (s.state()) { + case 0: + value = "100"; + break; + case 1: + value = "75"; + break; + case 2: + value = "0"; + break; + default: + value = "U"; + break; + } + _backend.update(s.time(), value); } else // Cache value. it->second.push_back(d); @@ -387,15 +393,21 @@ int output::write(std::shared_ptr const& d) { _backend.open(status_path, e->rrd_len, e->time - 1, interval); } std::string value; - if (e->state == 0) - value = "100"; - else if (e->state == 1) - value = "75"; - else if (e->state == 2) - value = "0"; - else - value = ""; - _backend.update(e->time, value); + switch (e->state) { + case 0: + value = "100"; + break; + case 1: + value = "75"; + break; + case 2: + value = "0"; + break; + default: + value = "U"; + break; + } + _backend.update(e->time, value); } else // Cache value. it->second.push_back(d); diff --git a/broker/simu/src/luabinding.cc b/broker/simu/src/luabinding.cc index 0b29503dedc..6aa5feabf61 100644 --- a/broker/simu/src/luabinding.cc +++ b/broker/simu/src/luabinding.cc @@ -28,7 +28,6 @@ using namespace com::centreon::exceptions; using namespace com::centreon::broker; using namespace com::centreon::broker::simu; -using com::centreon::common::log_v2::log_v2; /** * Constructor. @@ -39,7 +38,7 @@ using com::centreon::common::log_v2::log_v2; luabinding::luabinding(std::string const& lua_script, std::map const& conf_params, const std::shared_ptr& logger) - : _lua_script(lua_script), _total(0), _logger(logger) { + : _logger(logger), _lua_script(lua_script), _total(0) { size_t pos(lua_script.find_last_of('/')); std::string path(lua_script.substr(0, pos)); _L = _load_interpreter(); diff --git a/broker/storage/inc/com/centreon/broker/storage/conflict_manager.hh b/broker/storage/inc/com/centreon/broker/storage/conflict_manager.hh index b7815b6e5be..19abaa710ad 100644 --- a/broker/storage/inc/com/centreon/broker/storage/conflict_manager.hh +++ b/broker/storage/inc/com/centreon/broker/storage/conflict_manager.hh @@ -78,11 +78,9 @@ class conflict_manager { enum special_conn { custom_variable, downtime, - host_dependency, host_group, host_parent, log, - service_dependency, service_group, severity, tag, @@ -94,21 +92,19 @@ class conflict_manager { comments = 1 << 1, custom_variables = 1 << 2, downtimes = 1 << 3, - host_dependencies = 1 << 4, - host_hostgroups = 1 << 5, - host_parents = 1 << 6, - hostgroups = 1 << 7, - hosts = 1 << 8, - instances = 1 << 9, - modules = 1 << 10, - service_dependencies = 1 << 11, - service_servicegroups = 1 << 12, - servicegroups = 1 << 13, - services = 1 << 14, - index_data = 1 << 15, - metrics = 1 << 16, - severities = 1 << 17, - tags = 1 << 18, + host_hostgroups = 1 << 4, + host_parents = 1 << 5, + hostgroups = 1 << 6, + hosts = 1 << 7, + instances = 1 << 8, + modules = 1 << 9, + service_servicegroups = 1 << 10, + servicegroups = 1 << 11, + services = 1 << 12, + index_data = 1 << 13, + metrics = 1 << 14, + severities = 1 << 15, + tags = 1 << 16, }; struct index_info { @@ -247,7 +243,6 @@ class conflict_manager { database::mysql_stmt _event_handler_insupdate; database::mysql_stmt _flapping_status_insupdate; database::mysql_stmt _host_check_update; - database::mysql_stmt _host_dependency_insupdate; database::mysql_stmt _host_group_insupdate; database::mysql_stmt _host_group_member_delete; database::mysql_stmt _host_group_member_insert; @@ -259,7 +254,6 @@ class conflict_manager { database::mysql_stmt _instance_status_insupdate; database::mysql_stmt _module_insert; database::mysql_stmt _service_check_update; - database::mysql_stmt _service_dependency_insupdate; database::mysql_stmt _service_group_insupdate; database::mysql_stmt _service_group_member_delete; database::mysql_stmt _service_group_member_insert; @@ -304,8 +298,6 @@ class conflict_manager { std::tuple, uint32_t, bool*>& t); void _process_host_check( std::tuple, uint32_t, bool*>& t); - void _process_host_dependency( - std::tuple, uint32_t, bool*>& t); void _process_host_group( std::tuple, uint32_t, bool*>& t); void _process_host_group_member( @@ -322,8 +314,6 @@ class conflict_manager { void _process_log(std::tuple, uint32_t, bool*>& t); void _process_service_check( std::tuple, uint32_t, bool*>& t); - void _process_service_dependency( - std::tuple, uint32_t, bool*>& t); void _process_service_group( std::tuple, uint32_t, bool*>& t); void _process_service_group_member( diff --git a/broker/storage/src/conflict_manager.cc b/broker/storage/src/conflict_manager.cc index 2978cbc1cb7..c19669f9db6 100644 --- a/broker/storage/src/conflict_manager.cc +++ b/broker/storage/src/conflict_manager.cc @@ -56,7 +56,7 @@ void (conflict_manager::*const conflict_manager::_neb_processing_table[])( nullptr, nullptr, &conflict_manager::_process_host_check, - &conflict_manager::_process_host_dependency, + nullptr, &conflict_manager::_process_host_group, &conflict_manager::_process_host_group_member, &conflict_manager::_process_host, @@ -67,7 +67,7 @@ void (conflict_manager::*const conflict_manager::_neb_processing_table[])( &conflict_manager::_process_log, nullptr, &conflict_manager::_process_service_check, - &conflict_manager::_process_service_dependency, + nullptr, &conflict_manager::_process_service_group, &conflict_manager::_process_service_group_member, &conflict_manager::_process_service, @@ -955,8 +955,7 @@ void conflict_manager::process_stop(const std::shared_ptr& d) { _finish_action(-1, actions::hosts | actions::acknowledgements | actions::modules | actions::downtimes | actions::comments | actions::servicegroups | - actions::hostgroups | actions::service_dependencies | - actions::host_dependencies); + actions::hostgroups); // Log message. _logger_sql->info("SQL: Disabling poller (id: {}, running: no)", diff --git a/broker/storage/src/conflict_manager_sql.cc b/broker/storage/src/conflict_manager_sql.cc index 3192190688f..0f02ec0a519 100644 --- a/broker/storage/src/conflict_manager_sql.cc +++ b/broker/storage/src/conflict_manager_sql.cc @@ -84,18 +84,6 @@ void conflict_manager::_clean_tables(uint32_t instance_id) { conn); _add_action(conn, actions::servicegroups); - /* Remove host dependencies. */ - _logger_sql->debug( - "conflict_manager: remove host dependencies (instance_id: {})", - instance_id); - query = fmt::format( - "DELETE hhd FROM hosts_hosts_dependencies AS hhd INNER JOIN hosts as " - "h ON hhd.host_id=h.host_id OR hhd.dependent_host_id=h.host_id WHERE " - "h.instance_id={}", - instance_id); - _mysql.run_query(query, database::mysql_error::clean_host_dependencies, conn); - _add_action(conn, actions::host_dependencies); - /* Remove host parents. */ _logger_sql->debug("conflict_manager: remove host parents (instance_id: {})", instance_id); @@ -107,23 +95,6 @@ void conflict_manager::_clean_tables(uint32_t instance_id) { _mysql.run_query(query, database::mysql_error::clean_host_parents, conn); _add_action(conn, actions::host_parents); - /* Remove service dependencies. */ - _logger_sql->debug( - "conflict_manager: remove service dependencies (instance_id: {})", - instance_id); - query = fmt::format( - "DELETE ssd FROM services_services_dependencies AS ssd" - " INNER JOIN services as s" - " ON ssd.service_id=s.service_id OR " - "ssd.dependent_service_id=s.service_id" - " INNER JOIN hosts as h" - " ON s.host_id=h.host_id" - " WHERE h.instance_id={}", - instance_id); - _mysql.run_query(query, database::mysql_error::clean_service_dependencies, - conn); - _add_action(conn, actions::service_dependencies); - /* Remove list of modules. */ _logger_sql->debug("SQL: remove list of modules (instance_id: {})", instance_id); @@ -400,8 +371,7 @@ void conflict_manager::_process_comment( std::tuple, uint32_t, bool*>& t) { auto& d = std::get<0>(t); _finish_action(-1, actions::hosts | actions::instances | - actions::host_parents | actions::host_dependencies | - actions::service_dependencies | actions::comments); + actions::host_parents | actions::comments); // Cast object. neb::comment const& cmmnt{*static_cast(d.get())}; @@ -592,8 +562,7 @@ void conflict_manager::_process_host_check( std::tuple, uint32_t, bool*>& t) { auto& d = std::get<0>(t); _finish_action(-1, actions::instances | actions::downtimes | - actions::comments | actions::host_dependencies | - actions::host_parents | actions::service_dependencies); + actions::comments | actions::host_parents); // Cast object. neb::host_check const& hc = *static_cast(d.get()); @@ -645,60 +614,6 @@ void conflict_manager::_process_host_check( *std::get<2>(t) = true; } -/** - * Process a host dependency event. - * - * @param[in] e Uncasted host dependency. - * - * @return The number of events that can be acknowledged. - */ -void conflict_manager::_process_host_dependency( - std::tuple, uint32_t, bool*>& t) { - auto& d = std::get<0>(t); - int32_t conn = special_conn::host_dependency % _mysql.connections_count(); - _finish_action(-1, actions::hosts | actions::host_parents | - actions::comments | actions::downtimes | - actions::host_dependencies | - actions::service_dependencies); - - // Cast object. - neb::host_dependency const& hd = - *static_cast(d.get()); - - // Insert/Update. - if (hd.enabled) { - _logger_sql->info("SQL: enabling host dependency of {} on {}", - hd.dependent_host_id, hd.host_id); - - // Prepare queries. - if (!_host_dependency_insupdate.prepared()) { - query_preparator::event_unique unique; - unique.insert("host_id"); - unique.insert("dependent_host_id"); - query_preparator qp(neb::host_dependency::static_type(), unique); - _host_dependency_insupdate = qp.prepare_insert_or_update(_mysql); - } - - // Process object. - _host_dependency_insupdate << hd; - _mysql.run_statement(_host_dependency_insupdate, - database::mysql_error::store_host_dependency, conn); - _add_action(conn, actions::host_dependencies); - } - // Delete. - else { - _logger_sql->info("SQL: removing host dependency of {} on {}", - hd.dependent_host_id, hd.host_id); - std::string query(fmt::format( - "DELETE FROM hosts_hosts_dependencies WHERE dependent_host_id={}" - " AND host_id={}", - hd.dependent_host_id, hd.host_id)); - _mysql.run_query(query, database::mysql_error::empty, conn); - _add_action(conn, actions::host_dependencies); - } - *std::get<2>(t) = true; -} - /** * Process a host group event. * @@ -843,9 +758,8 @@ void conflict_manager::_process_host( std::tuple, uint32_t, bool*>& t) { auto& d = std::get<0>(t); _finish_action(-1, actions::instances | actions::hostgroups | - actions::host_dependencies | actions::host_parents | - actions::custom_variables | actions::downtimes | - actions::comments | actions::service_dependencies); + actions::host_parents | actions::custom_variables | + actions::downtimes | actions::comments); neb::host& h = *static_cast(d.get()); // Log message. @@ -900,8 +814,7 @@ void conflict_manager::_process_host_parent( std::tuple, uint32_t, bool*>& t) { auto& d = std::get<0>(t); int32_t conn = special_conn::host_parent % _mysql.connections_count(); - _finish_action(-1, actions::hosts | actions::host_dependencies | - actions::comments | actions::downtimes); + _finish_action(-1, actions::hosts | actions::comments | actions::downtimes); neb::host_parent const& hp(*static_cast(d.get())); @@ -958,8 +871,7 @@ void conflict_manager::_process_host_status( auto& d = std::get<0>(t); _finish_action(-1, actions::instances | actions::downtimes | actions::comments | actions::custom_variables | - actions::hostgroups | actions::host_dependencies | - actions::host_parents); + actions::hostgroups | actions::host_parents); // Processed object. neb::host_status const& hs(*static_cast(d.get())); @@ -1017,8 +929,7 @@ void conflict_manager::_process_instance( _finish_action(-1, actions::hosts | actions::acknowledgements | actions::modules | actions::downtimes | actions::comments | actions::servicegroups | - actions::hostgroups | actions::service_dependencies | - actions::host_dependencies); + actions::hostgroups); // Log message. _logger_sql->info( @@ -1147,9 +1058,8 @@ void conflict_manager::_process_log( void conflict_manager::_process_service_check( std::tuple, uint32_t, bool*>& t) { auto& d = std::get<0>(t); - _finish_action(-1, actions::downtimes | actions::comments | - actions::host_dependencies | actions::host_parents | - actions::service_dependencies); + _finish_action( + -1, actions::downtimes | actions::comments | actions::host_parents); // Cast object. neb::service_check const& sc( @@ -1211,67 +1121,6 @@ void conflict_manager::_process_service_check( *std::get<2>(t) = true; } -/** - * Process a service dependency event. - * - * @param[in] e Uncasted service dependency. - * - * @return The number of events that can be acknowledged. - */ -void conflict_manager::_process_service_dependency( - std::tuple, uint32_t, bool*>& t) { - auto& d = std::get<0>(t); - int32_t conn = special_conn::service_dependency % _mysql.connections_count(); - _finish_action(-1, actions::hosts | actions::host_parents | - actions::downtimes | actions::comments | - actions::host_dependencies | - actions::service_dependencies); - - // Cast object. - neb::service_dependency const& sd( - *static_cast(d.get())); - - // Insert/Update. - if (sd.enabled) { - _logger_sql->info( - "SQL: enabling service dependency of ({}, {}) on ({}, {})", - sd.dependent_host_id, sd.dependent_service_id, sd.host_id, - sd.service_id); - - // Prepare queries. - if (!_service_dependency_insupdate.prepared()) { - query_preparator::event_unique unique; - unique.insert("dependent_host_id"); - unique.insert("dependent_service_id"); - unique.insert("host_id"); - unique.insert("service_id"); - query_preparator qp(neb::service_dependency::static_type(), unique); - _service_dependency_insupdate = qp.prepare_insert_or_update(_mysql); - } - - // Process object. - _service_dependency_insupdate << sd; - _mysql.run_statement(_service_dependency_insupdate, - database::mysql_error::store_service_dependency, conn); - _add_action(conn, actions::service_dependencies); - } - // Delete. - else { - _logger_sql->info( - "SQL: removing service dependency of ({}, {}) on ({}, {})", - sd.dependent_host_id, sd.dependent_service_id, sd.host_id, - sd.service_id); - std::string query(fmt::format( - "DELETE FROM serivces_services_dependencies WHERE dependent_host_id={} " - "AND dependent_service_id={} AND host_id={} AND service_id={}", - sd.dependent_host_id, sd.dependent_service_id, sd.host_id, - sd.service_id)); - _mysql.run_query(query, database::mysql_error::empty, conn); - _add_action(conn, actions::service_dependencies); - } - *std::get<2>(t) = true; -} - /** * Process a service group event. * @@ -1419,9 +1268,8 @@ void conflict_manager::_process_service_group_member( void conflict_manager::_process_service( std::tuple, uint32_t, bool*>& t) { auto& d = std::get<0>(t); - _finish_action(-1, actions::host_parents | actions::comments | - actions::downtimes | actions::host_dependencies | - actions::service_dependencies); + _finish_action( + -1, actions::host_parents | actions::comments | actions::downtimes); // Processed object. const neb::service& s(*static_cast(d.get())); @@ -1472,9 +1320,8 @@ void conflict_manager::_process_service( void conflict_manager::_process_service_status( std::tuple, uint32_t, bool*>& t) { auto& d = std::get<0>(t); - _finish_action(-1, actions::host_parents | actions::comments | - actions::downtimes | actions::host_dependencies | - actions::service_dependencies); + _finish_action( + -1, actions::host_parents | actions::comments | actions::downtimes); // Processed object. neb::service_status const& ss{ *static_cast(d.get())}; @@ -1771,8 +1618,7 @@ void conflict_manager::_update_downtimes() { _logger_sql->debug("sql: update downtimes"); int32_t conn = special_conn::downtime % _mysql.connections_count(); _finish_action(-1, actions::hosts | actions::instances | actions::downtimes | - actions::host_parents | actions::host_dependencies | - actions::service_dependencies); + actions::host_parents); if (!_downtimes_queue.empty()) { auto it = _downtimes_queue.begin(); std::ostringstream oss; diff --git a/broker/storage/src/conflict_manager_storage.cc b/broker/storage/src/conflict_manager_storage.cc index d2fca02796e..e3677b463c3 100644 --- a/broker/storage/src/conflict_manager_storage.cc +++ b/broker/storage/src/conflict_manager_storage.cc @@ -31,6 +31,7 @@ #include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/broker/storage/conflict_manager.hh" #include "com/centreon/common/perfdata.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; @@ -135,10 +136,10 @@ void conflict_manager::_storage_process_service_status( "(host_id,host_name,service_id,service_description,must_be_rebuild," "special) VALUES (?,?,?,?,?,?)"); - fmt::string_view hv(misc::string::truncate( + fmt::string_view hv(common::truncate_utf8( ss.host_name, get_centreon_storage_index_data_col_size( centreon_storage_index_data_host_name))); - fmt::string_view sv(misc::string::truncate( + fmt::string_view sv(common::truncate_utf8( ss.service_description, get_centreon_storage_index_data_col_size( centreon_storage_index_data_service_description))); @@ -264,10 +265,10 @@ void conflict_manager::_storage_process_service_status( std::deque> to_publish; for (auto& pd : pds) { - pd.resize_name(misc::string::adjust_size_utf8( + pd.resize_name(common::adjust_size_utf8( pd.name(), get_centreon_storage_metrics_col_size( centreon_storage_metrics_metric_name))); - pd.resize_unit(misc::string::adjust_size_utf8( + pd.resize_unit(common::adjust_size_utf8( pd.unit(), get_centreon_storage_metrics_col_size( centreon_storage_metrics_unit_name))); auto it_index_cache = _metric_cache.find({index_id, pd.name()}); diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh index b8ccb624625..4705374db1d 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh @@ -71,7 +71,8 @@ class rebuilder { ~rebuilder() noexcept; rebuilder(const rebuilder&) = delete; rebuilder& operator=(const rebuilder&) = delete; - void rebuild_graphs(const std::shared_ptr& d); + void rebuild_graphs(const std::shared_ptr& d, + const std::shared_ptr& logger); }; } // namespace unified_sql diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh index f0944953f6f..1a86768f5c9 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh @@ -159,11 +159,9 @@ class stream : public io::stream { enum special_conn { custom_variable, downtime, - host_dependency, host_group, host_parent, log, - service_dependency, service_group, severity, tag, @@ -176,23 +174,21 @@ class stream : public io::stream { comments = 1 << 1, custom_variables = 1 << 2, downtimes = 1 << 3, - host_dependencies = 1 << 4, - host_hostgroups = 1 << 5, - host_parents = 1 << 6, - hostgroups = 1 << 7, - hosts = 1 << 8, - instances = 1 << 9, - modules = 1 << 10, - service_dependencies = 1 << 11, - service_servicegroups = 1 << 12, - servicegroups = 1 << 13, - services = 1 << 14, - index_data = 1 << 15, - metrics = 1 << 16, - severities = 1 << 17, - tags = 1 << 18, - resources = 1 << 19, - resources_tags = 1 << 20, + host_hostgroups = 1 << 4, + host_parents = 1 << 5, + hostgroups = 1 << 6, + hosts = 1 << 7, + instances = 1 << 8, + modules = 1 << 9, + service_servicegroups = 1 << 10, + servicegroups = 1 << 11, + services = 1 << 12, + index_data = 1 << 13, + metrics = 1 << 14, + severities = 1 << 15, + tags = 1 << 16, + resources = 1 << 17, + resources_tags = 1 << 18, }; struct index_info { @@ -335,8 +331,6 @@ class stream : public io::stream { database::mysql_stmt _flapping_status_insupdate; database::mysql_stmt _host_check_update; database::mysql_stmt _pb_host_check_update; - database::mysql_stmt _host_exe_dependency_insupdate; - database::mysql_stmt _host_notif_dependency_insupdate; database::mysql_stmt _host_group_insupdate; database::mysql_stmt _pb_host_group_insupdate; database::mysql_stmt _host_group_member_delete; @@ -355,8 +349,6 @@ class stream : public io::stream { database::mysql_stmt _pb_instance_status_insupdate; database::mysql_stmt _service_check_update; database::mysql_stmt _pb_service_check_update; - database::mysql_stmt _service_dependency_insupdate; - database::mysql_stmt _pb_service_dependency_insupdate; database::mysql_stmt _service_group_insupdate; database::mysql_stmt _pb_service_group_insupdate; database::mysql_stmt _service_group_member_delete; @@ -419,8 +411,6 @@ class stream : public io::stream { void _process_pb_downtime(const std::shared_ptr& d); void _process_host_check(const std::shared_ptr& d); void _process_pb_host_check(const std::shared_ptr& d); - void _process_host_dependency(const std::shared_ptr& d); - void _process_pb_host_dependency(const std::shared_ptr& d); void _process_host_group(const std::shared_ptr& d); void _process_pb_host_group(const std::shared_ptr& d); void _process_host_group_member(const std::shared_ptr& d); @@ -436,8 +426,6 @@ class stream : public io::stream { void _process_log(const std::shared_ptr& d); void _process_service_check(const std::shared_ptr& d); void _process_pb_service_check(const std::shared_ptr& d); - void _process_service_dependency(const std::shared_ptr& d); - void _process_pb_service_dependency(const std::shared_ptr& d); void _process_service_group(const std::shared_ptr& d); void _process_pb_service_group(const std::shared_ptr& d); void _process_service_group_member(const std::shared_ptr& d); diff --git a/broker/unified_sql/src/rebuilder.cc b/broker/unified_sql/src/rebuilder.cc index 0f75add35f6..d3e085a37f4 100644 --- a/broker/unified_sql/src/rebuilder.cc +++ b/broker/unified_sql/src/rebuilder.cc @@ -67,10 +67,10 @@ rebuilder::~rebuilder() noexcept { * * @param d The BBDO message with all the metric ids to rebuild. */ -void rebuilder::rebuild_graphs(const std::shared_ptr& d) { +void rebuilder::rebuild_graphs(const std::shared_ptr& d, + const std::shared_ptr& logger) { asio::post(com::centreon::common::pool::io_context(), [this, data = d, - logger = - log_v2::instance().get(log_v2::SQL)] { + logger] { { std::lock_guard lck(_rebuilding_m); _rebuilding++; @@ -190,16 +190,21 @@ void rebuilder::rebuild_graphs(const std::shared_ptr& d) { while (ms.fetch_row(res)) { uint64_t id_metric = res.value_as_u64(0); time_t ctime = res.value_as_u64(1); - double value = res.value_as_f64(2); + float value = res.value_as_f32(2); uint32_t status = res.value_as_u32(3); // duplicate values not allowed by rrd library auto yet_inserted = last_inserted.find(id_metric); if (yet_inserted != last_inserted.end()) { - if (yet_inserted->second >= ctime) + if (yet_inserted->second >= ctime) { + logger->trace("Metric {} too old to be inserted: {} >= {}", + id_metric, yet_inserted->second, ctime); continue; - else + } else { + logger->trace("Metric {} updated at {}", id_metric, ctime); yet_inserted->second = ctime; + } } else { + logger->trace("Metric {} inserted at {}", id_metric, ctime); last_inserted[id_metric] = ctime; } Point* pt = diff --git a/broker/unified_sql/src/stream.cc b/broker/unified_sql/src/stream.cc index eaaa899f879..8ee2af74538 100644 --- a/broker/unified_sql/src/stream.cc +++ b/broker/unified_sql/src/stream.cc @@ -73,7 +73,7 @@ constexpr void (stream::*const stream::neb_processing_table[])( nullptr, nullptr, &stream::_process_host_check, - &stream::_process_host_dependency, + nullptr, &stream::_process_host_group, &stream::_process_host_group_member, &stream::_process_host, @@ -84,7 +84,7 @@ constexpr void (stream::*const stream::neb_processing_table[])( &stream::_process_log, nullptr, &stream::_process_service_check, - &stream::_process_service_dependency, + nullptr, &stream::_process_service_group, &stream::_process_service_group_member, &stream::_process_service, @@ -111,8 +111,8 @@ constexpr void (stream::*const stream::neb_processing_table[])( &stream::_process_pb_instance, &stream::_process_pb_acknowledgement, &stream::_process_pb_responsive_instance, - &stream::_process_pb_host_dependency, - &stream::_process_pb_service_dependency, + nullptr, + nullptr, &stream::_process_pb_host_group, &stream::_process_pb_host_group_member, &stream::_process_pb_service_group, @@ -733,7 +733,7 @@ int32_t stream::write(const std::shared_ptr& data) { } else if (cat == io::bbdo) { switch (elem) { case bbdo::de_rebuild_graphs: - _rebuilder.rebuild_graphs(data); + _rebuilder.rebuild_graphs(data, _logger_sql); break; case bbdo::de_remove_graphs: remove_graphs(data); @@ -878,8 +878,7 @@ void stream::process_stop(const std::shared_ptr& d) { _finish_action(-1, actions::hosts | actions::acknowledgements | actions::modules | actions::downtimes | actions::comments | actions::servicegroups | - actions::hostgroups | actions::service_dependencies | - actions::host_dependencies); + actions::hostgroups); // Log message. _logger_sql->info("unified_sql: Disabling poller (id: {}, running: no)", @@ -1397,22 +1396,6 @@ void stream::_init_statements() { "last_check=?," // 9: last_check "output=? " // 10: output "WHERE id=? AND parent_id=?"); // 11, 12: service_id and host_id - const std::string host_exe_dep_query( - "INSERT INTO hosts_hosts_dependencies (dependent_host_id, host_id, " - "dependency_period, execution_failure_options, inherits_parent) " - "VALUES(?,?,?,?,?) ON DUPLICATE KEY UPDATE " - "dependency_period=VALUES(dependency_period), " - "execution_failure_options=VALUES(execution_failure_options), " - "inherits_parent=VALUES(inherits_parent)"); - _host_exe_dependency_insupdate = _mysql.prepare_query(host_exe_dep_query); - const std::string host_notif_dep_query( - "INSERT INTO hosts_hosts_dependencies (dependent_host_id, host_id, " - "dependency_period, notification_failure_options, inherits_parent) " - "VALUES(?,?,?,?,?) ON DUPLICATE KEY UPDATE " - "dependency_period=VALUES(dependency_period), " - "notification_failure_options=VALUES(notification_failure_options), " - "inherits_parent=VALUES(inherits_parent)"); - _host_notif_dependency_insupdate = _mysql.prepare_query(host_notif_dep_query); if (_store_in_hosts_services) { if (_bulk_prepared_statement) { auto hu = std::make_unique(hscr_query); diff --git a/broker/unified_sql/src/stream_sql.cc b/broker/unified_sql/src/stream_sql.cc index af662996366..5ad2836dbeb 100644 --- a/broker/unified_sql/src/stream_sql.cc +++ b/broker/unified_sql/src/stream_sql.cc @@ -29,6 +29,7 @@ #include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/broker/unified_sql/internal.hh" #include "com/centreon/broker/unified_sql/stream.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/engine/host.hh" #include "com/centreon/engine/service.hh" @@ -116,18 +117,6 @@ void stream::_clean_tables(uint32_t instance_id) { conn); _add_action(conn, actions::servicegroups); - /* Remove host dependencies. */ - SPDLOG_LOGGER_DEBUG(_logger_sql, - "unified sql: remove host dependencies (instance_id: {})", - instance_id); - query = fmt::format( - "DELETE hhd FROM hosts_hosts_dependencies AS hhd INNER JOIN hosts as " - "h ON hhd.host_id=h.host_id OR hhd.dependent_host_id=h.host_id WHERE " - "h.instance_id={}", - instance_id); - _mysql.run_query(query, database::mysql_error::clean_host_dependencies, conn); - _add_action(conn, actions::host_dependencies); - /* Remove host parents. */ SPDLOG_LOGGER_DEBUG(_logger_sql, "unified sql: remove host parents (instance_id: {})", @@ -140,23 +129,6 @@ void stream::_clean_tables(uint32_t instance_id) { _mysql.run_query(query, database::mysql_error::clean_host_parents, conn); _add_action(conn, actions::host_parents); - /* Remove service dependencies. */ - SPDLOG_LOGGER_DEBUG( - _logger_sql, "unified sql: remove service dependencies (instance_id: {})", - instance_id); - query = fmt::format( - "DELETE ssd FROM services_services_dependencies AS ssd" - " INNER JOIN services as s" - " ON ssd.service_id=s.service_id OR " - "ssd.dependent_service_id=s.service_id" - " INNER JOIN hosts as h" - " ON s.host_id=h.host_id" - " WHERE h.instance_id={}", - instance_id); - _mysql.run_query(query, database::mysql_error::clean_service_dependencies, - conn); - _add_action(conn, actions::service_dependencies); - /* Remove list of modules. */ SPDLOG_LOGGER_DEBUG(_logger_sql, "unified_sql: remove list of modules (instance_id: {})", @@ -563,8 +535,7 @@ void stream::_process_pb_acknowledgement(const std::shared_ptr& d) { */ void stream::_process_comment(const std::shared_ptr& d) { _finish_action(-1, actions::hosts | actions::instances | - actions::host_parents | actions::host_dependencies | - actions::service_dependencies | actions::comments); + actions::host_parents | actions::comments); // Cast object. neb::comment const& cmmnt{*static_cast(d.get())}; @@ -1073,8 +1044,7 @@ bool stream::_host_instance_known(uint64_t host_id) const { */ void stream::_process_host_check(const std::shared_ptr& d) { _finish_action(-1, actions::instances | actions::downtimes | - actions::comments | actions::host_dependencies | - actions::host_parents | actions::service_dependencies); + actions::comments | actions::host_parents); // Cast object. neb::host_check const& hc = *static_cast(d.get()); @@ -1144,8 +1114,7 @@ void stream::_process_host_check(const std::shared_ptr& d) { */ void stream::_process_pb_host_check(const std::shared_ptr& d) { _finish_action(-1, actions::instances | actions::downtimes | - actions::comments | actions::host_dependencies | - actions::host_parents | actions::service_dependencies); + actions::comments | actions::host_parents); // Cast object. const neb::pb_host_check& hc_obj = @@ -1216,140 +1185,6 @@ void stream::_process_pb_host_check(const std::shared_ptr& d) { hc.next_check(), now); } -/** - * Process a host dependency event. - * - * @param[in] e Uncasted host dependency. - * - * @return The number of events that can be acknowledged. - */ -void stream::_process_host_dependency(const std::shared_ptr& d) { - int32_t conn = special_conn::host_dependency % _mysql.connections_count(); - _finish_action(-1, actions::hosts | actions::host_parents | - actions::comments | actions::downtimes | - actions::host_dependencies | - actions::service_dependencies); - - // Cast object. - neb::host_dependency const& hd = - *static_cast(d.get()); - - // Insert/Update. - if (hd.enabled) { - SPDLOG_LOGGER_INFO(_logger_sql, - "SQL: enabling host dependency of {} on {}: execution " - "failure options: {} - notification failure options: {}", - hd.dependent_host_id, hd.host_id, - hd.execution_failure_options, - hd.notification_failure_options); - - // Process object. - if (!hd.execution_failure_options.empty()) { - _host_exe_dependency_insupdate.bind_value_as_i32(0, hd.dependent_host_id); - _host_exe_dependency_insupdate.bind_value_as_i32(1, hd.host_id); - _host_exe_dependency_insupdate.bind_value_as_str(2, hd.dependency_period); - _host_exe_dependency_insupdate.bind_value_as_str( - 3, hd.execution_failure_options); - _host_exe_dependency_insupdate.bind_value_as_tiny(4, hd.inherits_parent); - _mysql.run_statement(_host_exe_dependency_insupdate, - database::mysql_error::store_host_dependency, conn); - } else if (!hd.notification_failure_options.empty()) { - _host_notif_dependency_insupdate.bind_value_as_i32(0, - hd.dependent_host_id); - _host_notif_dependency_insupdate.bind_value_as_i32(1, hd.host_id); - _host_notif_dependency_insupdate.bind_value_as_str(2, - hd.dependency_period); - _host_notif_dependency_insupdate.bind_value_as_str( - 3, hd.notification_failure_options); - _host_notif_dependency_insupdate.bind_value_as_tiny(4, - hd.inherits_parent); - _mysql.run_statement(_host_notif_dependency_insupdate, - database::mysql_error::store_host_dependency, conn); - } - _add_action(conn, actions::host_dependencies); - } - // Delete. - else { - SPDLOG_LOGGER_INFO(_logger_sql, "SQL: removing host dependency of {} on {}", - hd.dependent_host_id, hd.host_id); - std::string query(fmt::format( - "DELETE FROM hosts_hosts_dependencies WHERE dependent_host_id={}" - " AND host_id={}", - hd.dependent_host_id, hd.host_id)); - _mysql.run_query(query, database::mysql_error::empty, conn); - _add_action(conn, actions::host_dependencies); - } -} - -/** - * Process a host dependency event. - * - * @param[in] e Uncasted host dependency. - * - * @return The number of events that can be acknowledged. - */ -void stream::_process_pb_host_dependency(const std::shared_ptr& d) { - int32_t conn = special_conn::host_dependency % _mysql.connections_count(); - _finish_action(-1, actions::hosts | actions::host_parents | - actions::comments | actions::downtimes | - actions::host_dependencies | - actions::service_dependencies); - - // Cast object. - const neb::pb_host_dependency& hd_protobuf = - *static_cast(d.get()); - const HostDependency& hd = hd_protobuf.obj(); - - // Insert/Update. - if (hd.enabled()) { - SPDLOG_LOGGER_INFO( - _logger_sql, - "SQL: enabling pb host dependency of {} on {}: execution failure " - "options: {} - notification failure options: {}", - hd.dependent_host_id(), hd.host_id(), hd.execution_failure_options(), - hd.notification_failure_options()); - - // Process object. - if (!hd.execution_failure_options().empty()) { - _host_exe_dependency_insupdate.bind_value_as_i32(0, - hd.dependent_host_id()); - _host_exe_dependency_insupdate.bind_value_as_i32(1, hd.host_id()); - _host_exe_dependency_insupdate.bind_value_as_str(2, - hd.dependency_period()); - _host_exe_dependency_insupdate.bind_value_as_str( - 3, hd.execution_failure_options()); - _host_exe_dependency_insupdate.bind_value_as_tiny(4, - hd.inherits_parent()); - _mysql.run_statement(_host_exe_dependency_insupdate, - database::mysql_error::store_host_dependency, conn); - } else if (!hd.notification_failure_options().empty()) { - _host_notif_dependency_insupdate.bind_value_as_i32( - 0, hd.dependent_host_id()); - _host_notif_dependency_insupdate.bind_value_as_i32(1, hd.host_id()); - _host_notif_dependency_insupdate.bind_value_as_str( - 2, hd.dependency_period()); - _host_notif_dependency_insupdate.bind_value_as_str( - 3, hd.notification_failure_options()); - _host_notif_dependency_insupdate.bind_value_as_tiny(4, - hd.inherits_parent()); - _mysql.run_statement(_host_notif_dependency_insupdate, - database::mysql_error::store_host_dependency, conn); - } - _add_action(conn, actions::host_dependencies); - } - // Delete. - else { - SPDLOG_LOGGER_INFO(_logger_sql, "SQL: removing host dependency of {} on {}", - hd.dependent_host_id(), hd.host_id()); - std::string query(fmt::format( - "DELETE FROM hosts_hosts_dependencies WHERE dependent_host_id={}" - " AND host_id={}", - hd.dependent_host_id(), hd.host_id())); - _mysql.run_query(query, database::mysql_error::empty, conn); - _add_action(conn, actions::host_dependencies); - } -} - /** * Process a host group event. * @@ -1418,6 +1253,8 @@ void stream::_process_pb_host_group(const std::shared_ptr& d) { std::shared_ptr hgd = std::static_pointer_cast(d); const HostGroup& hg = hgd->obj(); + _logger_sql->debug("process pb hostgroup {}, enabled {}", hg.hostgroup_id(), + hg.enabled()); if (hg.enabled()) { SPDLOG_LOGGER_INFO(_logger_sql, @@ -1579,6 +1416,9 @@ void stream::_process_pb_host_group_member(const std::shared_ptr& d) { const neb::pb_host_group_member& hgmp{ *static_cast(d.get())}; const HostGroupMember& hgm = hgmp.obj(); + _logger_sql->debug( + "process pb hostgroup members hostgroup {}, member {}, enabled {}", + hgm.hostgroup_id(), hgm.host_id(), hgm.enabled()); if (!_host_instance_known(hgm.host_id())) { SPDLOG_LOGGER_WARN( @@ -1611,7 +1451,7 @@ void stream::_process_pb_host_group_member(const std::shared_ptr& d) { query_preparator qp(neb::pb_host_group_member::static_type(), unique); _pb_host_group_member_insert = qp.prepare_insert_into( _mysql, "hosts_hostgroups ", /*space is mandatory to avoid - conflict with _process_host_dependency*/ + conflict with _process_host_group_member*/ {{3, "hostgroup_id", io::protobuf_base::invalid_on_zero, 0}, {5, "host_id", io::protobuf_base::invalid_on_zero, 0}}); } @@ -1621,16 +1461,16 @@ void stream::_process_pb_host_group_member(const std::shared_ptr& d) { if (_hostgroup_cache.find(hgm.hostgroup_id()) == _hostgroup_cache.end()) { SPDLOG_LOGGER_ERROR(_logger_sql, "SQL: host group {} {} does not exist - insertion " - "before insertion of " - "members", + "before insertion of members", hgm.hostgroup_id(), hgm.name()); _prepare_pb_hg_insupdate_statement(); neb::pb_host_group hg; - hg.mut_obj().set_hostgroup_id(hgm.hostgroup_id()); - hg.mut_obj().set_name(hgm.name()); - hg.mut_obj().set_enabled(true); - hg.mut_obj().set_poller_id(_cache_host_instance[hgm.host_id()]); + auto& obj = hg.mut_obj(); + obj.set_hostgroup_id(hgm.hostgroup_id()); + obj.set_name(hgm.name()); + obj.set_enabled(true); + obj.set_poller_id(_cache_host_instance[hgm.host_id()]); _pb_host_group_insupdate << hg; _mysql.run_statement(_pb_host_group_insupdate, @@ -1681,9 +1521,8 @@ void stream::_process_pb_host_group_member(const std::shared_ptr& d) { */ void stream::_process_host(const std::shared_ptr& d) { _finish_action(-1, actions::instances | actions::hostgroups | - actions::host_dependencies | actions::host_parents | - actions::custom_variables | actions::downtimes | - actions::comments | actions::service_dependencies); + actions::host_parents | actions::custom_variables | + actions::downtimes | actions::comments); neb::host& h = *static_cast(d.get()); // Log message. @@ -1742,8 +1581,7 @@ void stream::_process_host(const std::shared_ptr& d) { */ void stream::_process_host_parent(const std::shared_ptr& d) { int32_t conn = special_conn::host_parent % _mysql.connections_count(); - _finish_action(-1, actions::hosts | actions::host_dependencies | - actions::comments | actions::downtimes); + _finish_action(-1, actions::hosts | actions::comments | actions::downtimes); neb::host_parent const& hp(*static_cast(d.get())); @@ -1797,8 +1635,7 @@ void stream::_process_host_parent(const std::shared_ptr& d) { */ void stream::_process_pb_host_parent(const std::shared_ptr& d) { int32_t conn = special_conn::host_parent % _mysql.connections_count(); - _finish_action(-1, actions::hosts | actions::host_dependencies | - actions::comments | actions::downtimes); + _finish_action(-1, actions::hosts | actions::comments | actions::downtimes); std::shared_ptr hpp = std::static_pointer_cast(d); @@ -1866,8 +1703,7 @@ void stream::_process_host_status(const std::shared_ptr& d) { _finish_action(-1, actions::instances | actions::downtimes | actions::comments | actions::custom_variables | - actions::hostgroups | actions::host_dependencies | - actions::host_parents); + actions::hostgroups | actions::host_parents); // Processed object. neb::host_status const& hs(*static_cast(d.get())); @@ -1927,9 +1763,8 @@ void stream::_process_host_status(const std::shared_ptr& d) { */ void stream::_process_pb_host(const std::shared_ptr& d) { _finish_action(-1, actions::instances | actions::hostgroups | - actions::host_dependencies | actions::host_parents | - actions::custom_variables | actions::downtimes | - actions::comments | actions::service_dependencies | + actions::host_parents | actions::custom_variables | + actions::downtimes | actions::comments | actions::severities | actions::resources_tags | actions::tags); auto hst{static_cast(d.get())}; @@ -2154,25 +1989,25 @@ uint64_t stream::_process_pb_host_in_resources(const Host& h, int32_t conn) { uint64_t res_id = 0; if (h.enabled()) { uint64_t sid = 0; - fmt::string_view name{misc::string::truncate( - h.name(), get_centreon_storage_resources_col_size( - centreon_storage_resources_name))}; - fmt::string_view address{misc::string::truncate( + fmt::string_view name{ + common::truncate_utf8(h.name(), get_centreon_storage_resources_col_size( + centreon_storage_resources_name))}; + fmt::string_view address{common::truncate_utf8( h.address(), get_centreon_storage_resources_col_size( centreon_storage_resources_address))}; - fmt::string_view alias{misc::string::truncate( + fmt::string_view alias{common::truncate_utf8( h.alias(), get_centreon_storage_resources_col_size( centreon_storage_resources_alias))}; - fmt::string_view parent_name{misc::string::truncate( + fmt::string_view parent_name{common::truncate_utf8( h.name(), get_centreon_storage_resources_col_size( centreon_storage_resources_parent_name))}; - fmt::string_view notes_url{misc::string::truncate( + fmt::string_view notes_url{common::truncate_utf8( h.notes_url(), get_centreon_storage_resources_col_size( centreon_storage_resources_notes_url))}; - fmt::string_view notes{misc::string::truncate( + fmt::string_view notes{common::truncate_utf8( h.notes(), get_centreon_storage_resources_col_size( centreon_storage_resources_notes))}; - fmt::string_view action_url{misc::string::truncate( + fmt::string_view action_url{common::truncate_utf8( h.action_url(), get_centreon_storage_resources_col_size( centreon_storage_resources_action_url))}; @@ -2408,9 +2243,8 @@ uint64_t stream::_process_pb_host_in_resources(const Host& h, int32_t conn) { */ void stream::_process_pb_adaptive_host(const std::shared_ptr& d) { SPDLOG_LOGGER_INFO(_logger_sql, "unified_sql: processing pb adaptive host"); - _finish_action(-1, actions::host_parents | actions::comments | - actions::downtimes | actions::host_dependencies | - actions::service_dependencies); + _finish_action( + -1, actions::host_parents | actions::comments | actions::downtimes); // Processed object. auto h{static_cast(d.get())}; auto& ah = h->obj(); @@ -2522,15 +2356,16 @@ void stream::_process_pb_adaptive_host(const std::shared_ptr& d) { * */ void stream::_process_pb_host_status(const std::shared_ptr& d) { - _finish_action(-1, actions::host_parents | actions::comments | - actions::downtimes | actions::host_dependencies); + _finish_action( + -1, actions::host_parents | actions::comments | actions::downtimes); // Processed object. auto h{static_cast(d.get())}; auto& hscr = h->obj(); SPDLOG_LOGGER_DEBUG(_logger_sql, - "unified_sql: pb host status check result output: <<{}>>", - hscr.output()); + "unified_sql: pb host {} status check result output: " + "<<{}>> - last_check: {}", + hscr.host_id(), hscr.output(), hscr.last_check()); SPDLOG_LOGGER_DEBUG( _logger_sql, "unified_sql: pb host status check result perfdata: <<{}>>", hscr.perfdata()); @@ -2581,13 +2416,13 @@ void stream::_process_pb_host_status(const std::shared_ptr& d) { mapping::entry::invalid_on_zero); std::string full_output{ fmt::format("{}\n{}", hscr.output(), hscr.long_output())}; - size_t size = misc::string::adjust_size_utf8( + size_t size = common::adjust_size_utf8( full_output, get_centreon_storage_hosts_col_size(centreon_storage_hosts_output)); b->set_value_as_str(10, fmt::string_view(full_output.data(), size)); - size = misc::string::adjust_size_utf8( - hscr.perfdata(), get_centreon_storage_hosts_col_size( - centreon_storage_hosts_perfdata)); + size = common::adjust_size_utf8(hscr.perfdata(), + get_centreon_storage_hosts_col_size( + centreon_storage_hosts_perfdata)); b->set_value_as_str(11, fmt::string_view(hscr.perfdata().data(), size)); b->set_value_as_bool(12, hscr.flapping()); b->set_value_as_f64(13, hscr.percent_state_change()); @@ -2632,14 +2467,14 @@ void stream::_process_pb_host_status(const std::shared_ptr& d) { mapping::entry::invalid_on_zero); std::string full_output{ fmt::format("{}\n{}", hscr.output(), hscr.long_output())}; - size_t size = misc::string::adjust_size_utf8( + size_t size = common::adjust_size_utf8( full_output, get_centreon_storage_hosts_col_size(centreon_storage_hosts_output)); _hscr_update->bind_value_as_str( 10, fmt::string_view(full_output.data(), size)); - size = misc::string::adjust_size_utf8( - hscr.perfdata(), get_centreon_storage_hosts_col_size( - centreon_storage_hosts_perfdata)); + size = common::adjust_size_utf8(hscr.perfdata(), + get_centreon_storage_hosts_col_size( + centreon_storage_hosts_perfdata)); _hscr_update->bind_value_as_str( 11, fmt::string_view(hscr.perfdata().data(), size)); _hscr_update->bind_value_as_bool(12, hscr.flapping()); @@ -2748,8 +2583,7 @@ void stream::_process_instance(const std::shared_ptr& d) { _finish_action(-1, actions::hosts | actions::acknowledgements | actions::modules | actions::downtimes | actions::comments | actions::servicegroups | - actions::hostgroups | actions::service_dependencies | - actions::host_dependencies); + actions::hostgroups); // Log message. SPDLOG_LOGGER_INFO( @@ -2800,8 +2634,7 @@ void stream::_process_pb_instance(const std::shared_ptr& d) { _finish_action(-1, actions::hosts | actions::acknowledgements | actions::modules | actions::downtimes | actions::comments | actions::servicegroups | - actions::hostgroups | actions::service_dependencies | - actions::host_dependencies); + actions::hostgroups); /* Now, the local::pb_stop is handled by unified_sql. So the pb_instance with * running = false, seems no more useful. */ @@ -3131,9 +2964,8 @@ void stream::_process_pb_log(const std::shared_ptr& d) { * @return The number of events that can be acknowledged. */ void stream::_process_service_check(const std::shared_ptr& d) { - _finish_action(-1, actions::downtimes | actions::comments | - actions::host_dependencies | actions::host_parents | - actions::service_dependencies); + _finish_action( + -1, actions::downtimes | actions::comments | actions::host_parents); // Cast object. neb::service_check const& sc( @@ -3205,9 +3037,8 @@ void stream::_process_service_check(const std::shared_ptr& d) { * @return The number of events that can be acknowledged. */ void stream::_process_pb_service_check(const std::shared_ptr& d) { - _finish_action(-1, actions::downtimes | actions::comments | - actions::host_dependencies | actions::host_parents | - actions::service_dependencies); + _finish_action( + -1, actions::downtimes | actions::comments | actions::host_parents); // Cast object. const neb::pb_service_check& pb_sc( @@ -3280,140 +3111,6 @@ void stream::_process_pb_service_check(const std::shared_ptr& d) { sc.check_type(), sc.next_check(), now); } -/** - * Process a service dependency event. - * - * @param[in] e Uncasted service dependency. - * - * @return The number of events that can be acknowledged. - */ -void stream::_process_service_dependency(const std::shared_ptr& d) { - int32_t conn = special_conn::service_dependency % _mysql.connections_count(); - _finish_action(-1, actions::hosts | actions::host_parents | - actions::downtimes | actions::comments | - actions::host_dependencies | - actions::service_dependencies); - - // Cast object. - neb::service_dependency const& sd( - *static_cast(d.get())); - - // Insert/Update. - if (sd.enabled) { - SPDLOG_LOGGER_INFO( - _logger_sql, "SQL: enabling service dependency of ({}, {}) on ({}, {})", - sd.dependent_host_id, sd.dependent_service_id, sd.host_id, - sd.service_id); - - // Prepare queries. - if (!_service_dependency_insupdate.prepared()) { - query_preparator::event_unique unique; - unique.insert("dependent_host_id"); - unique.insert("dependent_service_id"); - unique.insert("host_id"); - unique.insert("service_id"); - query_preparator qp(neb::service_dependency::static_type(), unique); - _service_dependency_insupdate = qp.prepare_insert_or_update(_mysql); - } - - // Process object. - _service_dependency_insupdate << sd; - _mysql.run_statement(_service_dependency_insupdate, - database::mysql_error::store_service_dependency, conn); - _add_action(conn, actions::service_dependencies); - } - // Delete. - else { - SPDLOG_LOGGER_INFO( - _logger_sql, "SQL: removing service dependency of ({}, {}) on ({}, {})", - sd.dependent_host_id, sd.dependent_service_id, sd.host_id, - sd.service_id); - std::string query(fmt::format( - "DELETE FROM services_services_dependencies WHERE dependent_host_id={} " - "AND dependent_service_id={} AND host_id={} AND service_id={}", - sd.dependent_host_id, sd.dependent_service_id, sd.host_id, - sd.service_id)); - _mysql.run_query(query, database::mysql_error::empty, conn); - _add_action(conn, actions::service_dependencies); - } -} - -/** - * Process a service dependency event. - * - * @param[in] e Uncasted service dependency. - * - * @return The number of events that can be acknowledged. - */ -void stream::_process_pb_service_dependency( - const std::shared_ptr& d) { - int32_t conn = special_conn::service_dependency % _mysql.connections_count(); - _finish_action(-1, actions::hosts | actions::host_parents | - actions::downtimes | actions::comments | - actions::host_dependencies | - actions::service_dependencies); - - // Cast object. - const neb::pb_service_dependency& proto_obj = - *static_cast(d.get()); - const ServiceDependency& sd = proto_obj.obj(); - - // Insert/Update. - if (sd.enabled()) { - SPDLOG_LOGGER_INFO( - _logger_sql, "SQL: enabling service dependency of ({}, {}) on ({}, {})", - sd.dependent_host_id(), sd.dependent_service_id(), sd.host_id(), - sd.service_id()); - - // Prepare queries. - if (!_pb_service_dependency_insupdate.prepared()) { - query_preparator::event_pb_unique unique{ - {6, "host_id", io::protobuf_base::invalid_on_zero, 0}, - {10, "service_id", io::protobuf_base::invalid_on_zero, 0}, - {3, "dependent_host_id", io::protobuf_base::invalid_on_zero, 0}, - {9, "dependent_service_id", io::protobuf_base::invalid_on_zero, 0}}; - query_preparator qp(neb::pb_service_dependency::static_type(), unique); - _pb_service_dependency_insupdate = qp.prepare_insert_or_update_table( - _mysql, "services_services_dependencies ", /*space is mandatory to - avoid conflict with _process_service_dependency*/ - {{6, "host_id", io::protobuf_base::invalid_on_zero, 0}, - {10, "service_id", io::protobuf_base::invalid_on_zero, 0}, - {3, "dependent_host_id", io::protobuf_base::invalid_on_zero, 0}, - {9, "dependent_service_id", io::protobuf_base::invalid_on_zero, 0}, - {2, "dependency_period", 0, - get_centreon_storage_services_services_dependencies_col_size( - centreon_storage_services_services_dependencies_dependency_period)}, - {5, "execution_failure_options", 0, - get_centreon_storage_services_services_dependencies_col_size( - centreon_storage_services_services_dependencies_execution_failure_options)}, - {7, "inherits_parent", 0, 0}, - {8, "notification_failure_options", 0, - get_centreon_storage_services_services_dependencies_col_size( - centreon_storage_services_services_dependencies_notification_failure_options)}}); - } - - // Process object. - _pb_service_dependency_insupdate << proto_obj; - _mysql.run_statement(_pb_service_dependency_insupdate, - database::mysql_error::store_service_dependency, conn); - _add_action(conn, actions::service_dependencies); - } - // Delete. - else { - SPDLOG_LOGGER_INFO( - _logger_sql, "SQL: removing service dependency of ({}, {}) on ({}, {})", - sd.dependent_host_id(), sd.dependent_service_id(), sd.host_id(), - sd.service_id()); - std::string query(fmt::format( - "DELETE FROM services_services_dependencies WHERE dependent_host_id={} " - "AND dependent_service_id={} AND host_id={} AND service_id={}", - sd.dependent_host_id(), sd.dependent_service_id(), sd.host_id(), - sd.service_id())); - _mysql.run_query(query, database::mysql_error::empty, conn); - _add_action(conn, actions::service_dependencies); - } -} - /** * Process a service group event. * @@ -3686,10 +3383,11 @@ void stream::_process_pb_service_group_member( _prepare_sg_insupdate_statement(); neb::pb_service_group sg; - sg.mut_obj().set_servicegroup_id(sgm.servicegroup_id()); - sg.mut_obj().set_name(sgm.name()); - sg.mut_obj().set_enabled(true); - sg.mut_obj().set_poller_id(sgm.poller_id()); + auto& obj = sg.mut_obj(); + obj.set_servicegroup_id(sgm.servicegroup_id()); + obj.set_name(sgm.name()); + obj.set_enabled(true); + obj.set_poller_id(sgm.poller_id()); _pb_service_group_insupdate << sg; _mysql.run_statement(_pb_service_group_insupdate, @@ -3744,9 +3442,8 @@ void stream::_process_pb_service_group_member( * @return The number of events that can be acknowledged. */ void stream::_process_service(const std::shared_ptr& d) { - _finish_action(-1, actions::host_parents | actions::comments | - actions::downtimes | actions::host_dependencies | - actions::service_dependencies); + _finish_action( + -1, actions::host_parents | actions::comments | actions::downtimes); // Processed object. const neb::service& s(*static_cast(d.get())); @@ -3803,8 +3500,7 @@ void stream::_process_service(const std::shared_ptr& d) { */ void stream::_process_pb_service(const std::shared_ptr& d) { _finish_action(-1, actions::host_parents | actions::comments | - actions::downtimes | actions::host_dependencies | - actions::service_dependencies | actions::severities | + actions::downtimes | actions::severities | actions::resources_tags | actions::tags); // Processed object. auto svc{static_cast(d.get())}; @@ -4031,19 +3727,19 @@ uint64_t stream::_process_pb_service_in_resources(const Service& s, if (s.enabled()) { uint64_t sid = 0; - fmt::string_view name{misc::string::truncate( + fmt::string_view name{common::truncate_utf8( s.display_name(), get_centreon_storage_resources_col_size( centreon_storage_resources_name))}; - fmt::string_view parent_name{misc::string::truncate( + fmt::string_view parent_name{common::truncate_utf8( s.host_name(), get_centreon_storage_resources_col_size( centreon_storage_resources_parent_name))}; - fmt::string_view notes_url{misc::string::truncate( + fmt::string_view notes_url{common::truncate_utf8( s.notes_url(), get_centreon_storage_resources_col_size( centreon_storage_resources_notes_url))}; - fmt::string_view notes{misc::string::truncate( + fmt::string_view notes{common::truncate_utf8( s.notes(), get_centreon_storage_resources_col_size( centreon_storage_resources_notes))}; - fmt::string_view action_url{misc::string::truncate( + fmt::string_view action_url{common::truncate_utf8( s.action_url(), get_centreon_storage_resources_col_size( centreon_storage_resources_action_url))}; @@ -4278,9 +3974,8 @@ uint64_t stream::_process_pb_service_in_resources(const Service& s, void stream::_process_pb_adaptive_service(const std::shared_ptr& d) { SPDLOG_LOGGER_DEBUG(_logger_sql, "unified_sql: processing pb adaptive service"); - _finish_action(-1, actions::host_parents | actions::comments | - actions::downtimes | actions::host_dependencies | - actions::service_dependencies); + _finish_action( + -1, actions::host_parents | actions::comments | actions::downtimes); // Processed object. auto s{static_cast(d.get())}; auto& as = s->obj(); @@ -4401,10 +4096,10 @@ void stream::_check_and_update_index_cache(const Service& ss) { auto it_index_cache = _index_cache.find({ss.host_id(), ss.service_id()}); - fmt::string_view hv(misc::string::truncate( + fmt::string_view hv(common::truncate_utf8( ss.host_name(), get_centreon_storage_index_data_col_size( centreon_storage_index_data_host_name))); - fmt::string_view sv(misc::string::truncate( + fmt::string_view sv(common::truncate_utf8( ss.description(), get_centreon_storage_index_data_col_size( centreon_storage_index_data_service_description))); bool special = ss.type() == BA; @@ -4517,9 +4212,8 @@ void stream::_process_service_status(const std::shared_ptr& d) { if (!_store_in_hosts_services) return; - _finish_action(-1, actions::host_parents | actions::comments | - actions::downtimes | actions::host_dependencies | - actions::service_dependencies); + _finish_action( + -1, actions::host_parents | actions::comments | actions::downtimes); // Processed object. neb::service_status const& ss{ *static_cast(d.get())}; @@ -4587,9 +4281,8 @@ void stream::_process_service_status(const std::shared_ptr& d) { * */ void stream::_process_pb_service_status(const std::shared_ptr& d) { - _finish_action(-1, actions::host_parents | actions::comments | - actions::downtimes | actions::host_dependencies | - actions::service_dependencies); + _finish_action( + -1, actions::host_parents | actions::comments | actions::downtimes); // Processed object. auto s{static_cast(d.get())}; auto& sscr = s->obj(); @@ -4649,11 +4342,11 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { mapping::entry::invalid_on_zero); std::string full_output{ fmt::format("{}\n{}", sscr.output(), sscr.long_output())}; - size_t size = misc::string::adjust_size_utf8( + size_t size = common::adjust_size_utf8( full_output, get_centreon_storage_services_col_size( centreon_storage_services_output)); b->set_value_as_str(11, fmt::string_view(full_output.data(), size)); - size = misc::string::adjust_size_utf8( + size = common::adjust_size_utf8( sscr.perfdata(), get_centreon_storage_services_col_size( centreon_storage_services_perfdata)); b->set_value_as_str(12, fmt::string_view(sscr.perfdata().data(), size)); @@ -4703,12 +4396,12 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { mapping::entry::invalid_on_zero); std::string full_output{ fmt::format("{}\n{}", sscr.output(), sscr.long_output())}; - size_t size = misc::string::adjust_size_utf8( + size_t size = common::adjust_size_utf8( full_output, get_centreon_storage_services_col_size( centreon_storage_services_output)); _sscr_update->bind_value_as_str( 11, fmt::string_view(full_output.data(), size)); - size = misc::string::adjust_size_utf8( + size = common::adjust_size_utf8( sscr.perfdata(), get_centreon_storage_services_col_size( centreon_storage_services_perfdata)); _sscr_update->bind_value_as_str( @@ -4745,7 +4438,7 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { if (_store_in_resources) { int32_t conn = _mysql.choose_connection_by_instance( _cache_host_instance[static_cast(sscr.host_id())]); - size_t output_size = misc::string::adjust_size_utf8( + size_t output_size = common::adjust_size_utf8( sscr.output(), get_centreon_storage_resources_col_size( centreon_storage_resources_output)); _logger_sql->debug( diff --git a/broker/unified_sql/src/stream_storage.cc b/broker/unified_sql/src/stream_storage.cc index 01e6f92fd80..c502953f953 100644 --- a/broker/unified_sql/src/stream_storage.cc +++ b/broker/unified_sql/src/stream_storage.cc @@ -38,6 +38,7 @@ #include "com/centreon/broker/unified_sql/internal.hh" #include "com/centreon/broker/unified_sql/stream.hh" #include "com/centreon/common/perfdata.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; @@ -156,10 +157,10 @@ void stream::_unified_sql_process_pb_service_status( std::deque> to_publish; for (auto& pd : pds) { misc::read_lock rlck(_metric_cache_m); - pd.resize_name(misc::string::adjust_size_utf8( + pd.resize_name(common::adjust_size_utf8( pd.name(), get_centreon_storage_metrics_col_size( centreon_storage_metrics_metric_name))); - pd.resize_unit(misc::string::adjust_size_utf8( + pd.resize_unit(common::adjust_size_utf8( pd.unit(), get_centreon_storage_metrics_col_size( centreon_storage_metrics_unit_name))); @@ -460,10 +461,10 @@ void stream::_unified_sql_process_service_status( if (!_index_data_insert.prepared()) _index_data_insert = _mysql.prepare_query(_index_data_insert_request); - fmt::string_view hv(misc::string::truncate( + fmt::string_view hv(common::truncate_utf8( ss.host_name, get_centreon_storage_index_data_col_size( centreon_storage_index_data_host_name))); - fmt::string_view sv(misc::string::truncate( + fmt::string_view sv(common::truncate_utf8( ss.service_description, get_centreon_storage_index_data_col_size( centreon_storage_index_data_service_description))); @@ -533,10 +534,10 @@ void stream::_unified_sql_process_service_status( std::deque> to_publish; for (auto& pd : pds) { misc::read_lock rlck(_metric_cache_m); - pd.resize_name(misc::string::adjust_size_utf8( + pd.resize_name(common::adjust_size_utf8( pd.name(), get_centreon_storage_metrics_col_size( centreon_storage_metrics_metric_name))); - pd.resize_unit(misc::string::adjust_size_utf8( + pd.resize_unit(common::adjust_size_utf8( pd.unit(), get_centreon_storage_metrics_col_size( centreon_storage_metrics_unit_name))); @@ -816,9 +817,8 @@ void stream::_check_queues(boost::system::error_code ec) { try { if (_bulk_prepared_statement) { - _finish_action(-1, actions::host_parents | actions::comments | - actions::downtimes | actions::host_dependencies | - actions::service_dependencies); + _finish_action( + -1, actions::host_parents | actions::comments | actions::downtimes); if (_store_in_hosts_services) { if (_hscr_bind) { SPDLOG_LOGGER_TRACE( @@ -961,9 +961,7 @@ void stream::_check_queues(boost::system::error_code ec) { SPDLOG_LOGGER_DEBUG(_logger_sql, "{} new downtimes inserted", _downtimes->row_count()); _finish_action(-1, actions::hosts | actions::instances | - actions::downtimes | actions::host_parents | - actions::host_dependencies | - actions::service_dependencies); + actions::downtimes | actions::host_parents); int32_t conn = special_conn::downtime % _mysql.connections_count(); _downtimes->execute(_mysql, database::mysql_error::store_downtime, conn); @@ -1116,6 +1114,6 @@ void stream::_check_rebuild_index() { auto& obj = rg->mut_obj(); for (auto& i : index_to_rebuild) obj.add_index_ids(i); - _rebuilder.rebuild_graphs(rg); + _rebuilder.rebuild_graphs(rg, _logger_sql); } } diff --git a/broker/victoria_metrics/src/request.cc b/broker/victoria_metrics/src/request.cc index 0994528a0b3..431ad991dd7 100644 --- a/broker/victoria_metrics/src/request.cc +++ b/broker/victoria_metrics/src/request.cc @@ -69,7 +69,6 @@ request::request(boost::beast::http::verb method, static constexpr std::string_view _sz_metric = "metric,id="; static constexpr std::string_view _sz_status = "status,id="; -static constexpr std::string_view _sz_space = " "; static constexpr std::string_view _sz_name = ",name="; static constexpr std::string_view _sz_unit = ",unit="; static constexpr std::string_view _sz_host_id = ",host_id="; diff --git a/centreon_cmake.bat b/centreon_cmake.bat new file mode 100644 index 00000000000..07bb3e28e38 --- /dev/null +++ b/centreon_cmake.bat @@ -0,0 +1,61 @@ +echo off + +set "build_type=debug" + +if "%~1" == "--help" ( + call :show_help + goto :eof +) else if "%~1" == "--release" ( + set "build_type=release" +) + +where /q cl.exe +IF ERRORLEVEL 1 ( + echo unable to find cl.exe, please run vcvarsall.bat or compile from x64 Native Tools Command Prompt for VS20xx + exit /B +) + +where /q cmake.exe +IF ERRORLEVEL 1 ( + echo unable to find cmake.exe, please install cmake.exe + exit /B +) + +where /q ninja.exe +IF ERRORLEVEL 1 ( + echo unable to find ninja.exe, please install ninja.exe + exit /B +) + +if not defined VCPKG_ROOT ( + echo "install vcpkg" + set "current_dir=%cd%" + cd /D %USERPROFILE% + git clone https://github.com/microsoft/vcpkg.git + cd vcpkg && bootstrap-vcpkg.bat + cd /D %current_dir% + set "VCPKG_ROOT=%USERPROFILE%\vcpkg" + set "PATH=%VCPKG_ROOT%;%PATH%" + echo "Please add this variables to environment for future compile:" + echo "VCPKG_ROOT=%USERPROFILE%\vcpkg" + echo "PATH=%VCPKG_ROOT%;%PATH%" +) + + +cmake.exe --preset=%build_type% + +cmake.exe --build build_windows + +goto :eof + + +:show_help +echo This program build Centreon-Monitoring-Agent +echo --release : Build on release mode +echo --help : help +goto :eof + + + + + diff --git a/ci/debian/centreon-broker-victoria_metrics.install b/ci/debian/centreon-broker-victoria_metrics.install deleted file mode 100644 index 72c4113b02f..00000000000 --- a/ci/debian/centreon-broker-victoria_metrics.install +++ /dev/null @@ -1 +0,0 @@ -debian/tmp-centreon-collect/usr/share/centreon/lib/centreon-broker/70-victoria_metrics.so usr/share/centreon/lib/centreon-broker diff --git a/clib/inc/com/centreon/clib/version.hh.in b/clib/inc/com/centreon/clib/version.hh.in index ad0cf3b714c..21a8b9ff4e8 100644 --- a/clib/inc/com/centreon/clib/version.hh.in +++ b/clib/inc/com/centreon/clib/version.hh.in @@ -28,15 +28,9 @@ namespace com::centreon::clib { namespace version { // Compile-time values. -unsigned int const major = @COLLECT_MAJOR@; -unsigned int const minor = @COLLECT_MINOR@; -unsigned int const patch = @COLLECT_PATCH@; char const* const string = "@CLIB_VERSION@"; // Run-time values. -unsigned int get_major() noexcept; -unsigned int get_minor() noexcept; -unsigned int get_patch() noexcept; char const* get_string() noexcept; } // namespace version } // namespace com::centreon::clib diff --git a/clib/src/clib/version.cc b/clib/src/clib/version.cc index 5984cea7a44..14d1dfee4cf 100644 --- a/clib/src/clib/version.cc +++ b/clib/src/clib/version.cc @@ -1,51 +1,24 @@ /** -* Copyright 2011-2013 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ - -#include "com/centreon/clib/version.hh" - -using namespace com::centreon::clib; - -/** - * Get version major. + * Copyright 2011-2013 Centreon * - * @return Centreon Clib version major. - */ -unsigned int version::get_major() throw() { - return (major); -} - -/** - * Get version minor. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * @return Centreon Clib version minor. - */ -unsigned int version::get_minor() throw() { - return (minor); -} - -/** - * Get version patch. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * - * @return Centreon Clib version patch. + * For more information : contact@centreon.com */ -unsigned int version::get_patch() throw() { - return (patch); -} + +#include "com/centreon/clib/version.hh" + +using namespace com::centreon::clib; /** * Get version string. diff --git a/clib/test/version.cc b/clib/test/version.cc index 8f2b10e7a88..e24c7e60f34 100644 --- a/clib/test/version.cc +++ b/clib/test/version.cc @@ -1,41 +1,26 @@ /** -* Copyright 2011-2020 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2011-2020 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/clib/version.hh" #include using namespace com::centreon::clib; -TEST(ClibVersion, Major) { - ASSERT_EQ(version::get_major(), version::major); - ASSERT_EQ(CENTREON_CLIB_VERSION_MAJOR, version::major); -} - -TEST(ClibVersion, Minor) { - ASSERT_EQ(version::get_minor(), version::minor); - ASSERT_EQ(CENTREON_CLIB_VERSION_MINOR, version::minor); -} - -TEST(ClibVersion, Patch) { - ASSERT_EQ(version::get_patch(), version::patch); - ASSERT_EQ(CENTREON_CLIB_VERSION_PATCH, version::patch); -} - TEST(ClibVersion, String) { ASSERT_STREQ(version::get_string(), version::string); ASSERT_STREQ(CENTREON_CLIB_VERSION_STRING, version::string); diff --git a/cmake-vcpkg.sh b/cmake-vcpkg.sh index f160be00773..96f76b4de32 100755 --- a/cmake-vcpkg.sh +++ b/cmake-vcpkg.sh @@ -204,6 +204,7 @@ elif [ -r /etc/issue ] ; then libgcrypt20-dev libgnutls28-dev liblua5.3-dev + libmariadb-dev libperl-dev librrd-dev libssh2-1-dev @@ -258,7 +259,7 @@ fi if [ ! -d vcpkg ] ; then echo "No vcpkg directory. Cloning the repo" - git clone -b 2024.01.12 https://github.com/Microsoft/vcpkg.git + git clone --depth 1 -b 2024.01.12 https://github.com/Microsoft/vcpkg.git ./vcpkg/bootstrap-vcpkg.sh fi diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 03b0488d216..6a8cb3b5bc7 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -19,12 +19,18 @@ # Global options. project("Centreon common" C CXX) -add_subdirectory(log_v2) -add_subdirectory(engine_legacy_conf) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + add_subdirectory(log_v2) + if(LEGACY_ENGINE) + add_subdirectory(engine_legacy_conf) + else() + add_subdirectory(engine_conf) + endif() +endif() # Set directories. set(INCLUDE_DIR "${PROJECT_SOURCE_DIR}/inc/com/centreon/common") -set(PROCESS_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/process") +set(PROCESS_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/process/inc") set(HTTP_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/http/inc/com/centreon/common/http") set(SRC_DIR "${PROJECT_SOURCE_DIR}/src") set(TEST_DIR "${PROJECT_SOURCE_DIR}/tests") @@ -45,31 +51,36 @@ add_custom_command( WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # Set sources. -set(SOURCES - ${SRC_DIR}/hex_dump.cc - ${SRC_DIR}/perfdata.cc - ${SRC_DIR}/pool.cc - ${SRC_DIR}/process_stat.cc - ${SRC_DIR}/process_stat.pb.cc - ${SRC_DIR}/process_stat.grpc.pb.cc - ${SRC_DIR}/rapidjson_helper.cc -) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + set(SOURCES + ${SRC_DIR}/hex_dump.cc + ${SRC_DIR}/perfdata.cc + ${SRC_DIR}/pool.cc + ${SRC_DIR}/process_stat.cc + ${SRC_DIR}/process_stat.pb.cc + ${SRC_DIR}/process_stat.grpc.pb.cc + ${SRC_DIR}/rapidjson_helper.cc + ${SRC_DIR}/utf8.cc) +else() + # we need not many things to just compile centreon-monitoring-agent + # (centagent) + set(SOURCES ${SRC_DIR}/perfdata.cc ${SRC_DIR}/utf8.cc) +endif() # Include directories. -include_directories("${INCLUDE_DIR}" - ${HTTP_INCLUDE_DIR} - ${VCPKG_INCLUDE_DIR} - ${PROCESS_INCLUDE_DIR} - ) +include_directories("${INCLUDE_DIR}" ${HTTP_INCLUDE_DIR} ${VCPKG_INCLUDE_DIR} + ${PROCESS_INCLUDE_DIR}) add_definitions(-DSPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_TRACE) add_library(centreon_common STATIC ${SOURCES}) -target_include_directories(centreon_common PRIVATE ${INCLUDE_DIR}) set_property(TARGET centreon_common PROPERTY POSITION_INDEPENDENT_CODE ON) target_precompile_headers(centreon_common PRIVATE precomp_inc/precomp.hh) -add_subdirectory(http) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + add_subdirectory(http) +endif() + add_subdirectory(grpc) add_subdirectory(process) diff --git a/common/doc/common-doc.md b/common/doc/common-doc.md index 38959b0b41f..9edf0d08e52 100644 --- a/common/doc/common-doc.md +++ b/common/doc/common-doc.md @@ -5,6 +5,7 @@ * [Pool](#Pool) * [Grpc](#Grpc) * [Process](#Process) +* [Engine configuration](#Engineconfiguration) ## Pool @@ -116,3 +117,32 @@ class process_wait : public process { ``` +### Asio bug work around +There is an issue in io_context::notify_fork. Internally, ctx.notify_fork calls epoll_reactor::notify_fork which locks registered_descriptors_mutex_. An issue occurs when registered_descriptors_mutex_ is locked by another thread at fork timepoint. +In such a case, child process starts with registered_descriptors_mutex_ already locked and both child and parent process will hang. + +## Engine configuration + +Here is the new Engine configuration library. It is a full rewrite of the legacy Engine configuration library that you can find now in the `engine_legacy_conf` directory. This new one library uses protobuf messages to store the configuration. We have tried to keep the objects structures as Protobuf was able to do. + +A new parser has been implemented. It works differently from the previous one as the goal is to traduce the legacy cfg format to protobuf. The parser is in the `engine_conf/parser.cc` file, functions are almost the same as in the legacy parser but they use a lot the Protobuf reflection. + +A cfg file defines objects, each one has fields that are given by key/value. + +Configuration objects have default values whereas Protobuf messages have fixed default values, 0 for numbers, empty strings for strings, empty array for arrays, etc. + +To allow custom default values in our messages, each one has a helper class associated to it. For example, the Contact message has a contact_helper class associated. To define them it is simple: +``` +// To define a contact with the structure of Protobuf Contact +configuration::Contact ctc; +// Now we initialize the helper +configuration::contact_helper ctc_hlp(&ctc); +``` +Once the helper is defined, all the default values in the message are initialized. +The helper provides also a hook method, this is needed because in the cfg files some fields can have several key names, and also some enum types can be defined from strings, or pair of strings or anything else. So to allow weird things with object as cfg files allow, we have the hook function. +The `hook()` function takes the key and the value as arguments and returns a boolean True if the hook has been correctly applied. + +Three steps are done while a cfg file is parsed. For each key, +* we firstly try to read it from the `hook`. +* on failure, we try the `set` method that only uses the Protobuf reflection. +* on failure, we try to read the key as a custom variable, and here there is another way to parse it (the idea is very similar to the legacy parser). diff --git a/common/engine_conf/CMakeLists.txt b/common/engine_conf/CMakeLists.txt new file mode 100644 index 00000000000..d8c2b3b3bdc --- /dev/null +++ b/common/engine_conf/CMakeLists.txt @@ -0,0 +1,93 @@ +# +# Copyright 2022-2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# +add_custom_command( + OUTPUT state.pb.cc state.pb.h + DEPENDS state.proto + COMMENT "Generating interface files of the engine configuration file" + COMMAND ${Protobuf_PROTOC_EXECUTABLE} ARGS --cpp_out=. + --proto_path=${CMAKE_CURRENT_SOURCE_DIR} state.proto + VERBATIM) + +add_custom_target(target_state_proto DEPENDS state.pb.cc state.pb.h) + +include_directories("${CMAKE_SOURCE_DIR}/engine/inc") + +add_definitions(-DDEFAULT_STATUS_FILE="${ENGINE_VAR_LOG_DIR}/status.dat") +add_definitions(-DDEFAULT_RETENTION_FILE="${ENGINE_VAR_LOG_DIR}/retention.dat") +add_definitions(-DDEFAULT_DEBUG_FILE="${ENGINE_VAR_LOG_DIR}/centengine.debug") +add_definitions( + -DDEFAULT_COMMAND_FILE="${ENGINE_VAR_LIB_DIR}/rw/centengine.cmd") +add_definitions(-DDEFAULT_LOG_FILE="${ENGINE_VAR_LOG_DIR}/centengine.log") + +add_library( + engine_conf STATIC + anomalydetection_helper.cc + anomalydetection_helper.hh + command_helper.cc + command_helper.hh + connector_helper.cc + connector_helper.hh + contact_helper.cc + contact_helper.hh + contactgroup_helper.cc + contactgroup_helper.hh + host_helper.cc + host_helper.hh + hostdependency_helper.cc + hostdependency_helper.hh + hostescalation_helper.cc + hostescalation_helper.hh + hostgroup_helper.cc + hostgroup_helper.hh + message_helper.cc + message_helper.hh + parser.cc + parser.hh + service_helper.cc + service_helper.hh + servicedependency_helper.cc + servicedependency_helper.hh + serviceescalation_helper.cc + serviceescalation_helper.hh + servicegroup_helper.cc + servicegroup_helper.hh + severity_helper.cc + severity_helper.hh + ${CMAKE_CURRENT_BINARY_DIR}/state.pb.cc + ${CMAKE_CURRENT_BINARY_DIR}/state.pb.h + state_helper.cc + state_helper.hh + tag_helper.cc + tag_helper.hh + timeperiod_helper.cc + timeperiod_helper.hh) +add_dependencies(engine_conf target_state_proto) +include_directories(${CMAKE_SOURCE_DIR}/common/inc) + +target_precompile_headers(engine_conf PRIVATE + ${CMAKE_SOURCE_DIR}/common/precomp_inc/precomp.hh) +set_target_properties(engine_conf PROPERTIES POSITION_INDEPENDENT_CODE ON) +target_link_libraries( + engine_conf + log_v2 + absl::any + absl::log + absl::base + absl::bits + -L${PROTOBUF_LIB_DIR} + protobuf) diff --git a/common/engine_conf/anomalydetection_helper.cc b/common/engine_conf/anomalydetection_helper.cc new file mode 100644 index 00000000000..1917ecbae77 --- /dev/null +++ b/common/engine_conf/anomalydetection_helper.cc @@ -0,0 +1,300 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/anomalydetection_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" +#include "common/engine_conf/state.pb.h" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from an Anomalydetection object. + * + * @param obj The Anomalydetection object on which this helper works. The helper + * is not the owner of this object, it just helps to initialize it. + */ +anomalydetection_helper::anomalydetection_helper(Anomalydetection* obj) + : message_helper(object_type::anomalydetection, + obj, + { + {"_HOST_ID", "host_id"}, + {"_SERVICE_ID", "service_id"}, + {"description", "service_description"}, + {"service_groups", "servicegroups"}, + {"contact_groups", "contactgroups"}, + {"normal_check_interval", "check_interval"}, + {"retry_check_interval", "retry_interval"}, + {"active_checks_enabled", "checks_active"}, + {"passive_checks_enabled", "checks_passive"}, + {"severity", "severity_id"}, + }, + Anomalydetection::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Anomalydetection objects has a + * particular behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool anomalydetection_helper::hook(std::string_view key, + const std::string_view& value) { + Anomalydetection* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + if (key == "contactgroups") { + fill_string_group(obj->mutable_contactgroups(), value); + return true; + } else if (key == "contacts") { + fill_string_group(obj->mutable_contacts(), value); + return true; + } else if (key == "flap_detection_options") { + uint16_t options(action_svc_none); + auto values = absl::StrSplit(value, ','); + for (auto it = values.begin(); it != values.end(); ++it) { + std::string_view v = absl::StripAsciiWhitespace(*it); + if (v == "o" || v == "ok") + options |= action_svc_ok; + else if (v == "w" || v == "warning") + options |= action_svc_warning; + else if (v == "u" || v == "unknown") + options |= action_svc_unknown; + else if (v == "c" || v == "critical") + options |= action_svc_critical; + else if (v == "n" || v == "none") + options |= action_svc_none; + else if (v == "a" || v == "all") + options = action_svc_ok | action_svc_warning | action_svc_unknown | + action_svc_critical; + else + return false; + } + obj->set_flap_detection_options(options); + return true; + } else if (key == "initial_state") { + ServiceStatus initial_state; + if (value == "o" || value == "ok") + initial_state = ServiceStatus::state_ok; + else if (value == "w" || value == "warning") + initial_state = ServiceStatus::state_warning; + else if (value == "u" || value == "unknown") + initial_state = ServiceStatus::state_unknown; + else if (value == "c" || value == "critical") + initial_state = ServiceStatus::state_critical; + else + return false; + obj->set_initial_state(initial_state); + return true; + } else if (key == "notification_options") { + uint16_t options(action_svc_none); + if (fill_service_notification_options(&options, value)) { + obj->set_notification_options(options); + return true; + } else + return false; + obj->set_notification_options(options); + return true; + } else if (key == "servicegroups") { + fill_string_group(obj->mutable_servicegroups(), value); + return true; + } else if (key == "stalking_options") { + uint16_t options(action_svc_none); + auto values = absl::StrSplit(value, ','); + for (auto it = values.begin(); it != values.end(); ++it) { + std::string_view v = absl::StripAsciiWhitespace(*it); + if (v == "u" || v == "unknown") + options |= action_svc_unknown; + else if (v == "o" || v == "ok") + options |= action_svc_ok; + else if (v == "w" || v == "warning") + options |= action_svc_warning; + else if (v == "c" || v == "critical") + options |= action_svc_critical; + else if (v == "n" || v == "none") + options = action_svc_none; + else if (v == "a" || v == "all") + options = action_svc_ok | action_svc_unknown | action_svc_warning | + action_svc_critical; + else + return false; + } + obj->set_stalking_options(options); + return true; + } else if (key == "category_tags") { + auto tags{absl::StrSplit(value, ',')}; + bool ret = true; + + for (auto it = obj->tags().begin(); it != obj->tags().end();) { + if (it->second() == TagType::tag_servicecategory) + it = obj->mutable_tags()->erase(it); + else + ++it; + } + + for (auto& tag : tags) { + uint64_t id; + bool parse_ok; + parse_ok = absl::SimpleAtoi(tag, &id); + if (parse_ok) { + auto t = obj->add_tags(); + t->set_first(id); + t->set_second(TagType::tag_servicecategory); + } else { + ret = false; + } + } + return ret; + } else if (key == "group_tags") { + auto tags{absl::StrSplit(value, ',')}; + bool ret = true; + + for (auto it = obj->tags().begin(); it != obj->tags().end();) { + if (it->second() == TagType::tag_servicegroup) + it = obj->mutable_tags()->erase(it); + else + ++it; + } + + for (auto& tag : tags) { + uint64_t id; + bool parse_ok; + parse_ok = absl::SimpleAtoi(tag, &id); + if (parse_ok) { + auto t = obj->add_tags(); + t->set_first(id); + t->set_second(TagType::tag_servicegroup); + } else { + ret = false; + } + } + return ret; + } + return false; +} + +/** + * @brief Check the validity of the Anomalydetection object. + * + * @param err An error counter. + */ +void anomalydetection_helper::check_validity(error_cnt& err) const { + const Anomalydetection* o = static_cast(obj()); + + if (o->obj().register_()) { + if (o->service_description().empty()) { + err.config_errors++; + throw msg_fmt( + "Anomaly detection has no name (property 'service_description')"); + } + if (o->host_name().empty()) { + err.config_errors++; + throw msg_fmt( + "Anomaly detection '{}' has no host name (property 'host_name')", + o->service_description()); + } + if (o->metric_name().empty()) { + err.config_errors++; + throw msg_fmt( + "Anomaly detection '{}' has no metric name (property 'metric_name')", + o->service_description()); + } + if (o->thresholds_file().empty()) { + err.config_errors++; + throw msg_fmt( + "Anomaly detection '{}' has no thresholds file (property " + "'thresholds_file')", + o->service_description()); + } + } +} + +/** + * @brief Initializer of the Anomalydetection object, in other words set its + * default values. Protobuf does not allow specific default values, so we fix + * this with this method. + */ +void anomalydetection_helper::_init() { + Anomalydetection* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); + obj->set_acknowledgement_timeout(0); + obj->set_status_change(false); + obj->set_checks_active(true); + obj->set_checks_passive(true); + obj->set_check_freshness(0); + obj->set_check_interval(5); + obj->set_event_handler_enabled(true); + obj->set_first_notification_delay(0); + obj->set_flap_detection_enabled(true); + obj->set_flap_detection_options(action_svc_ok | action_svc_warning | + action_svc_unknown | action_svc_critical); + obj->set_freshness_threshold(0); + obj->set_high_flap_threshold(0); + obj->set_initial_state(ServiceStatus::state_ok); + obj->set_is_volatile(false); + obj->set_low_flap_threshold(0); + obj->set_max_check_attempts(3); + obj->set_notifications_enabled(true); + obj->set_notification_interval(0); + obj->set_notification_options(action_svc_ok | action_svc_warning | + action_svc_critical | action_svc_unknown | + action_svc_flapping | action_svc_downtime); + obj->set_obsess_over_service(true); + obj->set_process_perf_data(true); + obj->set_retain_nonstatus_information(true); + obj->set_retain_status_information(true); + obj->set_retry_interval(1); + obj->set_stalking_options(action_svc_none); +} + +/** + * @brief If the provided key/value have their parsing that failed previously, + * it is possible they are a customvariable. A customvariable name has its + * name starting with an underscore. This method checks the possibility to + * store a customvariable in the given object and stores it if possible. + * + * @param key The name of the customvariable. + * @param value Its value as a string. + * + * @return True if the customvariable has been well stored. + */ +bool anomalydetection_helper::insert_customvariable(std::string_view key, + std::string_view value) { + if (key[0] != '_') + return false; + + key.remove_prefix(1); + + Anomalydetection* obj = static_cast(mut_obj()); + auto* cvs = obj->mutable_customvariables(); + for (auto& c : *cvs) { + if (c.name() == key) { + c.set_value(value.data(), value.size()); + return true; + } + } + auto new_cv = cvs->Add(); + new_cv->set_name(key.data(), key.size()); + new_cv->set_value(value.data(), value.size()); + return true; +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/anomalydetection_helper.hh b/common/engine_conf/anomalydetection_helper.hh new file mode 100644 index 00000000000..9b8698ba728 --- /dev/null +++ b/common/engine_conf/anomalydetection_helper.hh @@ -0,0 +1,48 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#ifndef CCE_CONFIGURATION_ANOMALYDETECTION +#define CCE_CONFIGURATION_ANOMALYDETECTION + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +/** + * @brief Helper for the Anomalydetection message. The helper is instanciated + * just after a message is created. It provides default values for it and also + * several methods to helper the developer to fill the message fields. + */ +class anomalydetection_helper : public message_helper { + void _init(); + + public: + anomalydetection_helper(Anomalydetection* obj); + ~anomalydetection_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; + + bool insert_customvariable(std::string_view key, + std::string_view value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_ANOMALYDETECTION */ diff --git a/common/engine_conf/command_helper.cc b/common/engine_conf/command_helper.cc new file mode 100644 index 00000000000..fd222f1574e --- /dev/null +++ b/common/engine_conf/command_helper.cc @@ -0,0 +1,67 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/command_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a Command object. + * + * @param obj The Command object on which this helper works. The helper is not + * the owner of this object. It is just used to set the message default values. + */ +command_helper::command_helper(Command* obj) + : message_helper(object_type::command, + obj, + {}, + Command::descriptor()->field_count()) { + _init(); +} + +/** + * @brief Check the validity of the Command object. + * + * @param err An error counter. + */ +void command_helper::check_validity(error_cnt& err) const { + const Command* o = static_cast(obj()); + + if (o->command_name().empty()) { + err.config_errors++; + throw msg_fmt("Command has no name (property 'command_name')"); + } + if (o->command_line().empty()) { + err.config_errors++; + throw msg_fmt("Command '{}' has no command line (property 'command_line')", + o->command_name()); + } +} + +/** + * @brief The initializer of the Command message. + */ +void command_helper::_init() { + Command* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/command_helper.hh b/common/engine_conf/command_helper.hh new file mode 100644 index 00000000000..777ed6eac26 --- /dev/null +++ b/common/engine_conf/command_helper.hh @@ -0,0 +1,42 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#ifndef CCE_CONFIGURATION_COMMAND +#define CCE_CONFIGURATION_COMMAND + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +/** + * @brief Helper for the Command message. The helper is instanciated + * just after a message is created. It provides default values for it and also + * several methods to help the developer to fill the message fields. + */ +class command_helper : public message_helper { + void _init(); + + public: + command_helper(Command* obj); + ~command_helper() noexcept = default; + void check_validity(error_cnt& err) const override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_COMMAND */ diff --git a/common/engine_conf/connector_helper.cc b/common/engine_conf/connector_helper.cc new file mode 100644 index 00000000000..27273c600ec --- /dev/null +++ b/common/engine_conf/connector_helper.cc @@ -0,0 +1,68 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/connector_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a Connector object. + * + * @param obj The Connector object on which this helper works. The helper is not + * the owner of this object. + */ +connector_helper::connector_helper(Connector* obj) + : message_helper(object_type::connector, + obj, + {}, + Connector::descriptor()->field_count()) { + _init(); +} + +/** + * @brief Check the validity of the Connector object. + * + * @param err An error counter. + */ +void connector_helper::check_validity(error_cnt& err) const { + const Connector* o = static_cast(obj()); + + if (o->connector_name().empty()) { + err.config_errors++; + throw msg_fmt("Connector has no name (property 'connector_name')"); + } + if (o->connector_line().empty()) { + err.config_errors++; + throw msg_fmt( + "Connector '{}' has no command line (property 'connector_line')", + o->connector_name()); + } +} + +/** + * @brief The initializer of the Connector message. + */ +void connector_helper::_init() { + Connector* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/connector_helper.hh b/common/engine_conf/connector_helper.hh new file mode 100644 index 00000000000..cdc4476323e --- /dev/null +++ b/common/engine_conf/connector_helper.hh @@ -0,0 +1,42 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#ifndef CCE_CONFIGURATION_CONNECTOR +#define CCE_CONFIGURATION_CONNECTOR + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +/** + * @brief Helper for the Connector message. The helper is instanciated + * just after a message is created. It provides default values for it and also + * several methods to help the developer to fill the message fields. + */ +class connector_helper : public message_helper { + void _init(); + + public: + connector_helper(Connector* obj); + ~connector_helper() noexcept = default; + void check_validity(error_cnt& err) const override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_CONNECTOR */ diff --git a/common/engine_conf/contact_helper.cc b/common/engine_conf/contact_helper.cc new file mode 100644 index 00000000000..dd48ee548b4 --- /dev/null +++ b/common/engine_conf/contact_helper.cc @@ -0,0 +1,143 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/contact_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a Contact object. + * + * @param obj The Contact object on which this helper works. The helper is not + * the owner of this object. + */ +contact_helper::contact_helper(Contact* obj) + : message_helper(object_type::contact, + obj, + { + {"contact_groups", "contactgroups"}, + }, + Contact::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Contact objects has a particular + * behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool contact_helper::hook(std::string_view key, const std::string_view& value) { + Contact* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + + if (key == "host_notification_options") { + uint16_t options = action_hst_none; + if (fill_host_notification_options(&options, value)) { + obj->set_host_notification_options(options); + return true; + } else + return false; + } else if (key == "service_notification_options") { + uint16_t options = action_svc_none; + if (fill_service_notification_options(&options, value)) { + obj->set_service_notification_options(options); + return true; + } else + return false; + } else if (key == "contactgroups") { + fill_string_group(obj->mutable_contactgroups(), value); + return true; + } else if (key == "host_notification_commands") { + fill_string_group(obj->mutable_host_notification_commands(), value); + return true; + } else if (key == "service_notification_commands") { + fill_string_group(obj->mutable_service_notification_commands(), value); + return true; + } + return false; +} + +/** + * @brief Check the validity of the Contact object. + * + * @param err An error counter. + */ +void contact_helper::check_validity(error_cnt& err) const { + const Contact* o = static_cast(obj()); + + if (o->contact_name().empty()) { + err.config_errors++; + throw msg_fmt("Contact has no name (property 'contact_name')"); + } +} + +/** + * @brief Initializer of the Contact object, in other words set its default + * values. + */ +void contact_helper::_init() { + Contact* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); + obj->set_can_submit_commands(true); + obj->set_host_notifications_enabled(true); + obj->set_host_notification_options(action_hst_none); + obj->set_retain_nonstatus_information(true); + obj->set_retain_status_information(true); + obj->set_service_notification_options(action_svc_none); + obj->set_service_notifications_enabled(true); +} + +/** + * @brief If the provided key/value have their parsing to fail previously, + * it is possible they are a customvariable. A customvariable name has its + * name starting with an underscore. This method checks the possibility to + * store a customvariable in the given object and stores it if possible. + * + * @param key The name of the customvariable. + * @param value Its value as a string. + * + * @return True if the customvariable has been well stored. + */ +bool contact_helper::insert_customvariable(std::string_view key, + std::string_view value) { + if (key[0] != '_') + return false; + + key.remove_prefix(1); + + Contact* obj = static_cast(mut_obj()); + auto* cvs = obj->mutable_customvariables(); + for (auto& c : *cvs) { + if (c.name() == key) { + c.set_value(value.data(), value.size()); + return true; + } + } + auto new_cv = cvs->Add(); + new_cv->set_name(key.data(), key.size()); + new_cv->set_value(value.data(), value.size()); + return true; +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/contact_helper.hh b/common/engine_conf/contact_helper.hh new file mode 100644 index 00000000000..4c887d75381 --- /dev/null +++ b/common/engine_conf/contact_helper.hh @@ -0,0 +1,47 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#ifndef CCE_CONFIGURATION_CONTACT +#define CCE_CONFIGURATION_CONTACT + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +/** + * @brief Helper for the Contact message. The helper is instanciated + * just after a message is created. It provides default values for it and also + * several methods to help the developer to fill the message fields. + */ +class contact_helper : public message_helper { + void _init(); + + public: + contact_helper(Contact* obj); + ~contact_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; + + bool insert_customvariable(std::string_view key, + std::string_view value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_CONTACT */ diff --git a/common/engine_conf/contactgroup_helper.cc b/common/engine_conf/contactgroup_helper.cc new file mode 100644 index 00000000000..d5f2bc7848b --- /dev/null +++ b/common/engine_conf/contactgroup_helper.cc @@ -0,0 +1,84 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/contactgroup_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a Contactgroup object. + * + * @param obj The Contactgroup object on which this helper works. The helper is + * not the owner of this object. + */ +contactgroup_helper::contactgroup_helper(Contactgroup* obj) + : message_helper(object_type::contactgroup, + obj, + {}, + Contactgroup::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Contactgroup objects has a particular + * behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool contactgroup_helper::hook(std::string_view key, + const std::string_view& value) { + Contactgroup* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + if (key == "contactgroup_members") { + fill_string_group(obj->mutable_contactgroup_members(), value); + return true; + } else if (key == "members") { + fill_string_group(obj->mutable_members(), value); + return true; + } + return false; +} + +/** + * @brief Check the validity of the Contactgroup object. + * + * @param err An error counter. + */ +void contactgroup_helper::check_validity(error_cnt& err) const { + const Contactgroup* o = static_cast(obj()); + + if (o->contactgroup_name().empty()) { + err.config_errors++; + throw msg_fmt("Contactgroup has no name (property 'contactgroup_name')"); + } +} + +/** + * @brief The initializer of the Contactgroup message. + */ +void contactgroup_helper::_init() { + Contactgroup* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/contactgroup_helper.hh b/common/engine_conf/contactgroup_helper.hh new file mode 100644 index 00000000000..a97a055d950 --- /dev/null +++ b/common/engine_conf/contactgroup_helper.hh @@ -0,0 +1,44 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#ifndef CCE_CONFIGURATION_CONTACTGROUP +#define CCE_CONFIGURATION_CONTACTGROUP + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +/** + * @brief Helper for the Contactgroup message. The helper is instanciated + * just after a message is created. It provides default values for it and also + * several methods to help the developer to fill the message fields. + */ +class contactgroup_helper : public message_helper { + void _init(); + + public: + contactgroup_helper(Contactgroup* obj); + ~contactgroup_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_CONTACTGROUP */ diff --git a/common/engine_conf/file_info.hh b/common/engine_conf/file_info.hh new file mode 100644 index 00000000000..7737dd1144c --- /dev/null +++ b/common/engine_conf/file_info.hh @@ -0,0 +1,68 @@ +/** + * Copyright 2011-2013 Merethis + * Copyright 2014-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#ifndef CCE_CONFIGURATION_FILE_INFO_HH +#define CCE_CONFIGURATION_FILE_INFO_HH + +namespace com::centreon::engine { + +namespace configuration { +class file_info { + uint32_t _line; + std::string _path; + + public: + file_info(std::string const& path = "", unsigned int line = 0) + : _line(line), _path(path) {} + file_info(file_info const& right) { operator=(right); } + ~file_info() noexcept {} + file_info& operator=(file_info const& right) { + if (this != &right) { + _line = right._line; + _path = right._path; + } + return *this; + } + bool operator==(file_info const& right) const noexcept { + return _line == right._line && _path == right._path; + } + bool operator!=(file_info const& right) const noexcept { + return !operator==(right); + } + friend std::ostream& operator<<(std::ostream& s, file_info const& info) { + s << "in file '" << info.path() << "' on line " << info.line(); + return s; + } + unsigned int line() const noexcept { return _line; } + void line(unsigned int line) noexcept { _line = line; } + std::string const& path() const noexcept { return _path; } + void path(std::string const& path) { _path = path; } +}; +} // namespace configuration + +} // namespace com::centreon::engine + +namespace fmt { +// formatter specializations for fmt +template <> +struct formatter + : ostream_formatter {}; +} // namespace fmt + +#endif // !CCE_CONFIGURATION_FILE_INFO_HH diff --git a/common/engine_conf/host_helper.cc b/common/engine_conf/host_helper.cc new file mode 100644 index 00000000000..2841618b9e9 --- /dev/null +++ b/common/engine_conf/host_helper.cc @@ -0,0 +1,225 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/host_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" + +#ifdef LEGACY_CONF +#error host_helper should not be compiled in this context. +#endif + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a Host object. + * + * @param obj The Host object on which this helper works. The helper is not the + * owner of this object. + */ +host_helper::host_helper(Host* obj) + : message_helper(object_type::host, + obj, + { + {"_HOST_ID", "host_id"}, + {"host_groups", "hostgroups"}, + {"contact_groups", "contactgroups"}, + {"gd2_image", "statusmap_image"}, + {"normal_check_interval", "check_interval"}, + {"retry_check_interval", "retry_interval"}, + {"checks_enabled", "checks_active"}, + {"active_checks_enabled", "checks_active"}, + {"passive_checks_enabled", "checks_passive"}, + {"2d_coords", "coords_2d"}, + {"3d_coords", "coords_3d"}, + {"severity", "severity_id"}, + }, + Host::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Host objects has a particular + * behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool host_helper::hook(std::string_view key, const std::string_view& value) { + Host* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + if (key == "contactgroups") { + fill_string_group(obj->mutable_contactgroups(), value); + return true; + } else if (key == "contacts") { + fill_string_group(obj->mutable_contacts(), value); + return true; + } else if (key == "hostgroups") { + fill_string_group(obj->mutable_hostgroups(), value); + return true; + } else if (key == "notification_options") { + uint16_t options = action_hst_none; + if (fill_host_notification_options(&options, value)) { + obj->set_notification_options(options); + return true; + } else + return false; + } else if (key == "parents") { + fill_string_group(obj->mutable_parents(), value); + return true; + } else if (key == "category_tags") { + auto tags{absl::StrSplit(value, ',')}; + bool ret = true; + + for (auto it = obj->tags().begin(); it != obj->tags().end();) { + if (it->second() == TagType::tag_hostcategory) + it = obj->mutable_tags()->erase(it); + else + ++it; + } + + for (auto& tag : tags) { + uint64_t id; + bool parse_ok; + parse_ok = absl::SimpleAtoi(tag, &id); + if (parse_ok) { + auto t = obj->add_tags(); + t->set_first(id); + t->set_second(TagType::tag_hostcategory); + } else { + ret = false; + } + } + return ret; + } else if (key == "group_tags") { + auto tags{absl::StrSplit(value, ',')}; + bool ret = true; + + for (auto it = obj->tags().begin(); it != obj->tags().end();) { + if (it->second() == TagType::tag_hostgroup) + it = obj->mutable_tags()->erase(it); + else + ++it; + } + + for (auto& tag : tags) { + uint64_t id; + bool parse_ok; + parse_ok = absl::SimpleAtoi(tag, &id); + if (parse_ok) { + auto t = obj->add_tags(); + t->set_first(id); + t->set_second(TagType::tag_hostgroup); + } else { + ret = false; + } + } + return ret; + } + return false; +} + +/** + * @brief Check the validity of the Host object. + * + * @param err An error counter. + */ +void host_helper::check_validity(error_cnt& err) const { + const Host* o = static_cast(obj()); + + if (o->obj().register_()) { + if (o->host_name().empty()) { + err.config_errors++; + throw msg_fmt("Host has no name (property 'host_name')"); + } + if (o->address().empty()) { + err.config_errors++; + throw msg_fmt("Host '{}' has no address (property 'address')", + o->host_name()); + } + } +} + +/** + * @brief Initializer of the Host object, in other words set its default values. + */ +void host_helper::_init() { + Host* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); + obj->set_checks_active(true); + obj->set_checks_passive(true); + obj->set_check_freshness(false); + obj->set_check_interval(5); + obj->set_event_handler_enabled(true); + obj->set_first_notification_delay(0); + obj->set_flap_detection_enabled(true); + obj->set_flap_detection_options(action_hst_up | action_hst_down | + action_hst_unreachable); + obj->set_freshness_threshold(0); + obj->set_high_flap_threshold(0); + obj->set_initial_state(HostStatus::state_up); + obj->set_low_flap_threshold(0); + obj->set_max_check_attempts(3); + obj->set_notifications_enabled(true); + obj->set_notification_interval(0); + obj->set_notification_options(action_hst_up | action_hst_down | + action_hst_unreachable | action_hst_flapping | + action_hst_downtime); + obj->set_obsess_over_host(true); + obj->set_process_perf_data(true); + obj->set_retain_nonstatus_information(true); + obj->set_retain_status_information(true); + obj->set_retry_interval(1); + obj->set_stalking_options(action_hst_none); +} + +/** + * @brief If the provided key/value have their parsing to fail previously, + * it is possible they are a customvariable. A customvariable name has its + * name starting with an underscore. This method checks the possibility to + * store a customvariable in the given object and stores it if possible. + * + * @param key The name of the customvariable. + * @param value Its value as a string. + * + * @return True if the customvariable has been well stored. + */ +bool host_helper::insert_customvariable(std::string_view key, + std::string_view value) { + if (key[0] != '_') + return false; + + key.remove_prefix(1); + + Host* obj = static_cast(mut_obj()); + auto* cvs = obj->mutable_customvariables(); + for (auto& c : *cvs) { + if (c.name() == key) { + c.set_value(value.data(), value.size()); + return true; + } + } + auto new_cv = cvs->Add(); + new_cv->set_name(key.data(), key.size()); + new_cv->set_value(value.data(), value.size()); + return true; +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/host_helper.hh b/common/engine_conf/host_helper.hh new file mode 100644 index 00000000000..63e8a010b98 --- /dev/null +++ b/common/engine_conf/host_helper.hh @@ -0,0 +1,47 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#ifndef CCE_CONFIGURATION_HOST +#define CCE_CONFIGURATION_HOST + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +/** + * @brief Helper for the Host message. The helper is instanciated + * just after a message is created. It provides default values for it and also + * several methods to help the developer to fill the message fields. + */ +class host_helper : public message_helper { + void _init(); + + public: + host_helper(Host* obj); + ~host_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; + + bool insert_customvariable(std::string_view key, + std::string_view value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_HOST */ diff --git a/common/engine_conf/hostdependency_helper.cc b/common/engine_conf/hostdependency_helper.cc new file mode 100644 index 00000000000..6dbd419f4d6 --- /dev/null +++ b/common/engine_conf/hostdependency_helper.cc @@ -0,0 +1,166 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/hostdependency_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" +#include "common/engine_conf/state.pb.h" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Builds a key from a Hostdependency message. This is useful to check + * modifications in hostdependencies. + * + * @param hd The Hostdependency object to use to build the key. + * + * @return A number of type size_t. + */ +size_t hostdependency_key(const Hostdependency& hd) { + assert(hd.hosts().data().size() == 1 && hd.hostgroups().data().empty() && + hd.dependent_hosts().data().size() == 1 && + hd.dependent_hostgroups().data().empty()); + return absl::HashOf(hd.dependency_period(), hd.dependency_type(), + hd.dependent_hosts().data(0), hd.hosts().data(0), + hd.inherits_parent(), hd.notification_failure_options()); +} + +/** + * @brief Constructor from a Hostdependency object. + * + * @param obj The Hostdependency object on which this helper works. The helper + * is not the owner of this object. + */ +hostdependency_helper::hostdependency_helper(Hostdependency* obj) + : message_helper( + object_type::hostdependency, + obj, + { + {"hostgroup", "hostgroups"}, + {"hostgroup_name", "hostgroups"}, + {"host", "hosts"}, + {"host_name", "hosts"}, + {"master_host", "hosts"}, + {"master_host_name", "hosts"}, + {"dependent_hostgroup", "dependent_hostgroups"}, + {"dependent_hostgroup_name", "dependent_hostgroups"}, + {"dependent_host", "dependent_hosts"}, + {"dependent_host_name", "dependent_hosts"}, + {"notification_failure_criteria", "notification_failure_options"}, + {"execution_failure_criteria", "execution_failure_options"}, + }, + Hostdependency::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Hostdependency objects has a + * particular behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool hostdependency_helper::hook(std::string_view key, + const std::string_view& value) { + Hostdependency* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + + if (key == "notification_failure_options" || + key == "execution_failure_options") { + auto opts = absl::StrSplit(value, ','); + uint16_t options = action_hd_none; + + for (auto& o : opts) { + std::string_view ov = absl::StripAsciiWhitespace(o); + if (ov == "o" || ov == "up") + options |= action_hd_up; + else if (ov == "d" || ov == "down") + options |= action_hd_down; + else if (ov == "u" || ov == "unreachable") + options |= action_hd_unreachable; + else if (ov == "p" || ov == "pending") + options |= action_hd_pending; + else if (ov == "n" || ov == "none") + options |= action_hd_none; + else if (ov == "a" || ov == "all") + options = action_hd_up | action_hd_down | action_hd_unreachable | + action_hd_pending; + else + return false; + } + if (key[0] == 'n') + obj->set_notification_failure_options(options); + else + obj->set_execution_failure_options(options); + return true; + } else if (key == "dependent_hostgroups") { + fill_string_group(obj->mutable_dependent_hostgroups(), value); + return true; + } else if (key == "dependent_hosts") { + fill_string_group(obj->mutable_dependent_hosts(), value); + return true; + } else if (key == "hostgroups") { + fill_string_group(obj->mutable_hostgroups(), value); + return true; + } else if (key == "hosts") { + fill_string_group(obj->mutable_hosts(), value); + return true; + } + return false; +} + +/** + * @brief Check the validity of the Hostdependency object. + * + * @param err An error counter. + */ +void hostdependency_helper::check_validity(error_cnt& err) const { + const Hostdependency* o = static_cast(obj()); + + if (o->hosts().data().empty() && o->hostgroups().data().empty()) { + err.config_errors++; + throw msg_fmt( + "Host dependency is not attached to any host or host group (properties " + "'hosts' or 'hostgroups', respectively)"); + } + if (o->dependent_hosts().data().empty() && + o->dependent_hostgroups().data().empty()) { + err.config_errors++; + throw msg_fmt( + "Host dependency is not attached to any " + "dependent host or dependent host group (properties " + "'dependent_hosts' or 'dependent_hostgroups', " + "respectively)"); + } +} + +/** + * @brief Initializer of the Hostdependency object, in other words set its + * default values. + */ +void hostdependency_helper::_init() { + Hostdependency* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); + obj->set_execution_failure_options(action_hd_none); + obj->set_inherits_parent(false); + obj->set_notification_failure_options(action_hd_none); +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/hostdependency_helper.hh b/common/engine_conf/hostdependency_helper.hh new file mode 100644 index 00000000000..36a45dab338 --- /dev/null +++ b/common/engine_conf/hostdependency_helper.hh @@ -0,0 +1,46 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#ifndef CCE_CONFIGURATION_HOSTDEPENDENCY +#define CCE_CONFIGURATION_HOSTDEPENDENCY + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +size_t hostdependency_key(const Hostdependency& hd); + +/** + * @brief Helper for the Hostdependency message. The helper is instanciated + * just after a message is created. It provides default values for it and also + * several methods to help the developer to fill the message fields. + */ +class hostdependency_helper : public message_helper { + void _init(); + + public: + hostdependency_helper(Hostdependency* obj); + ~hostdependency_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_HOSTDEPENDENCY */ diff --git a/common/engine_conf/hostescalation_helper.cc b/common/engine_conf/hostescalation_helper.cc new file mode 100644 index 00000000000..5d6e9c69df6 --- /dev/null +++ b/common/engine_conf/hostescalation_helper.cc @@ -0,0 +1,138 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/hostescalation_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" +#include "common/engine_conf/state.pb.h" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Builds a key from a Hostescalation message. This is useful to check + * modifications in hostescalations. + * + * @param hd The Hostescalation object to use to build the key. + * + * @return A number of type size_t. + */ +size_t hostescalation_key(const Hostescalation& he) { + return absl::HashOf(he.hosts().data(0), + // he.contactgroups().data(), + he.escalation_options(), he.escalation_period(), + he.first_notification(), he.last_notification(), + he.notification_interval()); +} + +/** + * @brief Constructor from a Hostescalation object. + * + * @param obj The Hostescalation object on which this helper works. The helper + * is not the owner of this object. + */ +hostescalation_helper::hostescalation_helper(Hostescalation* obj) + : message_helper(object_type::hostescalation, + obj, + { + {"hostgroup", "hostgroups"}, + {"hostgroup_name", "hostgroups"}, + {"host", "hosts"}, + {"host_name", "hosts"}, + {"contact_groups", "contactgroups"}, + }, + Hostescalation::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Hostescalation objects has a + * particular behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool hostescalation_helper::hook(std::string_view key, + const std::string_view& value) { + Hostescalation* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + + if (key == "escalation_options") { + uint32_t options = action_he_none; + auto arr = absl::StrSplit(value, ','); + for (auto& v : arr) { + std::string_view vv = absl::StripAsciiWhitespace(v); + if (vv == "d" || vv == "down") + options |= action_he_down; + else if (vv == "u" || vv == "unreachable") + options |= action_he_unreachable; + else if (vv == "r" || vv == "recovery") + options |= action_he_recovery; + else if (vv == "n" || vv == "none") + options = action_he_none; + else if (vv == "a" || vv == "all") + options = action_he_down | action_he_unreachable | action_he_recovery; + else + return false; + } + obj->set_escalation_options(options); + return true; + } else if (key == "contactgroups") { + fill_string_group(obj->mutable_contactgroups(), value); + return true; + } else if (key == "hostgroups") { + fill_string_group(obj->mutable_hostgroups(), value); + return true; + } else if (key == "hosts") { + fill_string_group(obj->mutable_hosts(), value); + return true; + } + return false; +} + +/** + * @brief Check the validity of the Hostescalation object. + * + * @param err An error counter. + */ +void hostescalation_helper::check_validity(error_cnt& err) const { + const Hostescalation* o = static_cast(obj()); + + if (o->hosts().data().empty() && o->hostgroups().data().empty()) { + err.config_errors++; + throw msg_fmt( + "Host escalation is not attached to any host or host group (properties " + "'hosts' or 'hostgroups', respectively)"); + } +} + +/** + * @brief Initializer of the Hostescalation object, in other words set its + * default values. + */ +void hostescalation_helper::_init() { + Hostescalation* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); + obj->set_escalation_options(action_he_none); + obj->set_first_notification(-2); + obj->set_last_notification(-2); + obj->set_notification_interval(0); +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/hostescalation_helper.hh b/common/engine_conf/hostescalation_helper.hh new file mode 100644 index 00000000000..18f7751b66f --- /dev/null +++ b/common/engine_conf/hostescalation_helper.hh @@ -0,0 +1,46 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#ifndef CCE_CONFIGURATION_HOSTESCALATION +#define CCE_CONFIGURATION_HOSTESCALATION + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +size_t hostescalation_key(const Hostescalation& he); + +/** + * @brief Helper for the Hostescalation message. The helper is instanciated + * just after a message is created. It provides default values for it and also + * several methods to help the developer to fill the message fields. + */ +class hostescalation_helper : public message_helper { + void _init(); + + public: + hostescalation_helper(Hostescalation* obj); + ~hostescalation_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_HOSTESCALATION */ diff --git a/common/engine_conf/hostgroup_helper.cc b/common/engine_conf/hostgroup_helper.cc new file mode 100644 index 00000000000..8d994ab6432 --- /dev/null +++ b/common/engine_conf/hostgroup_helper.cc @@ -0,0 +1,84 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/hostgroup_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a Hostgroup object. + * + * @param obj The Hostgroup object on which this helper works. The helper is not + * the owner of this object. + */ +hostgroup_helper::hostgroup_helper(Hostgroup* obj) + : message_helper(object_type::hostgroup, + obj, + {}, + Hostgroup::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Hostgroup objects has a particular + * behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool hostgroup_helper::hook(std::string_view key, + const std::string_view& value) { + Hostgroup* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + if (key == "members") { + fill_string_group(obj->mutable_members(), value); + return true; + } + return false; +} + +/** + * @brief Check the validity of the Hostgroup object. + * + * @param err An error counter. + */ +void hostgroup_helper::check_validity(error_cnt& err) const { + const Hostgroup* o = static_cast(obj()); + + if (o->obj().register_()) { + if (o->hostgroup_name().empty()) { + err.config_errors++; + throw msg_fmt("Host group has no name (property 'hostgroup_name')"); + } + } +} + +/** + * @brief Initializer of the Hostgroup object, in other words set its default + * values. + */ +void hostgroup_helper::_init() { + Hostgroup* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/hostgroup_helper.hh b/common/engine_conf/hostgroup_helper.hh new file mode 100644 index 00000000000..93fb0d243ec --- /dev/null +++ b/common/engine_conf/hostgroup_helper.hh @@ -0,0 +1,44 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#ifndef CCE_CONFIGURATION_HOSTGROUP +#define CCE_CONFIGURATION_HOSTGROUP + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +/** + * @brief Helper for the Hostgroup message. The helper is instanciated + * just after a message is created. It provides default values for it and also + * several methods to help the developer to fill the message fields. + */ +class hostgroup_helper : public message_helper { + void _init(); + + public: + hostgroup_helper(Hostgroup* obj); + ~hostgroup_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_HOSTGROUP */ diff --git a/common/engine_conf/message_helper.cc b/common/engine_conf/message_helper.cc new file mode 100644 index 00000000000..5c575221ca2 --- /dev/null +++ b/common/engine_conf/message_helper.cc @@ -0,0 +1,449 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/message_helper.hh" + +#include + +using ::google::protobuf::Descriptor; +using ::google::protobuf::FieldDescriptor; +using ::google::protobuf::Reflection; + +namespace com::centreon::engine::configuration { + +/** + * @brief Copy constructor of the base helper class. It contains basic methods + * to access/modify the message fields. This constructor is needed by the + * parser. + * + * @param other A reference to the helper to copy. + */ +message_helper::message_helper(const message_helper& other) + : _otype(other._otype), + _obj(other._obj), + _correspondence(other._correspondence), + _modified_field(other._modified_field), + _resolved(other._resolved) {} + +/** + * @brief Sugar function to fill a PairStringSet field in a Protobuf message + * from a string. This function is used when cfg files are read. + * + * @param grp The field to fill. + * @param value The value as string is a pair of strings seperated by a comma. + * + * @return A boolean that is True on success. + */ +bool fill_pair_string_group(PairStringSet* grp, const std::string_view& value) { + auto arr = absl::StrSplit(value, ','); + + bool first = true; + auto itfirst = arr.begin(); + if (itfirst == arr.end()) + return true; + + do { + auto itsecond = itfirst; + ++itsecond; + if (itsecond == arr.end()) + return false; + std::string_view v1 = absl::StripAsciiWhitespace(*itfirst); + std::string_view v2 = absl::StripAsciiWhitespace(*itsecond); + if (first) { + if (v1[0] == '+') { + grp->set_additive(true); + v1 = v1.substr(1); + } + first = false; + } + bool found = false; + for (auto& m : grp->data()) { + if (*itfirst == m.first() && *itsecond == m.second()) { + found = true; + break; + } + } + if (!found) { + auto* p = grp->mutable_data()->Add(); + p->set_first(v1.data(), v1.size()); + p->set_second(v2.data(), v2.size()); + } + itfirst = itsecond; + ++itfirst; + } while (itfirst != arr.end()); + return true; +} + +/** + * @brief Sugar function to fill a PairStringSet field in a Protobuf message + * from two strings. This function is used when cfg files are read. + * + * @param grp The field to fill. + * @param key The first value in the pair to fill. + * @param value The second value in the pair to fill. + * + * @return A boolean that is True on success. + */ +bool fill_pair_string_group(PairStringSet* grp, + const std::string_view& key, + const std::string_view& value) { + std::string_view v1 = absl::StripAsciiWhitespace(key); + std::string_view v2 = absl::StripAsciiWhitespace(value); + bool found = false; + for (auto& m : grp->data()) { + if (v1 == m.first() && v2 == m.second()) { + found = true; + break; + } + } + if (!found) { + auto* p = grp->mutable_data()->Add(); + p->set_first(v1.data(), v1.size()); + p->set_second(v2.data(), v2.size()); + } + return true; +} + +/** + * @brief Sugar function to fill a StringSet field in a Protobuf message + * from a string. This function is used when cfg files are read. + * + * @param grp The field to fill. + * @param value The value as string. + * + * @return A boolean that is True on success. + */ +void fill_string_group(StringSet* grp, const std::string_view& value) { + auto arr = absl::StrSplit(value, ','); + bool first = true; + for (std::string_view d : arr) { + d = absl::StripAsciiWhitespace(d); + if (first) { + if (d[0] == '+') { + grp->set_additive(true); + d = d.substr(1); + } + first = false; + } + bool found = false; + for (auto& v : grp->data()) { + if (v == d) { + found = true; + break; + } + } + if (!found) + grp->add_data(d.data(), d.size()); + } +} + +/** + * @brief Sugar function to fill a StringList field in a Protobuf message + * from a string. This function is used when cfg files are read. + * + * @param grp The field to fill. + * @param value The value as string. + * + * @return A boolean that is True on success. + */ +void fill_string_group(StringList* grp, const std::string_view& value) { + auto arr = absl::StrSplit(value, ','); + bool first = true; + for (std::string_view d : arr) { + d = absl::StripAsciiWhitespace(d); + if (first) { + if (d[0] == '+') { + grp->set_additive(true); + d = d.substr(1); + } + first = false; + } + grp->add_data(d.data(), d.size()); + } +} + +/** + * @brief Parse host notification options as string and set an uint32_t to + * the corresponding values. + * + * @param options A pointer to the uint32_t to set/ + * @param value A string of options seperated by a comma. + * + * @return True on success. + */ +bool fill_host_notification_options(uint16_t* options, + const std::string_view& value) { + uint16_t tmp_options = action_hst_none; + auto arr = absl::StrSplit(value, ','); + for (auto& v : arr) { + std::string_view value = absl::StripAsciiWhitespace(v); + if (value == "d" || value == "down") + tmp_options |= action_hst_down; + else if (value == "u" || value == "unreachable") + tmp_options |= action_hst_unreachable; + else if (value == "r" || value == "recovery") + tmp_options |= action_hst_up; + else if (value == "f" || value == "flapping") + tmp_options |= action_hst_flapping; + else if (value == "s" || value == "downtime") + tmp_options |= action_hst_downtime; + else if (value == "n" || value == "none") + tmp_options = action_hst_none; + else if (value == "a" || value == "all") + tmp_options = action_hst_down | action_hst_unreachable | action_hst_up | + action_hst_flapping | action_hst_downtime; + else + return false; + } + *options = tmp_options; + return true; +} + +/** + * @brief Parse host notification options as string and set an uint32_t to + * the corresponding values. + * + * @param options A pointer to the uint32_t to set/ + * @param value A string of options seperated by a comma. + * + * @return True on success. + */ +bool fill_service_notification_options(uint16_t* options, + const std::string_view& value) { + uint16_t tmp_options = action_svc_none; + auto arr = absl::StrSplit(value, ','); + for (auto& v : arr) { + std::string_view value = absl::StripAsciiWhitespace(v); + if (value == "u" || value == "unknown") + tmp_options |= action_svc_unknown; + else if (value == "w" || value == "warning") + tmp_options |= action_svc_warning; + else if (value == "c" || value == "critical") + tmp_options |= action_svc_critical; + else if (value == "r" || value == "recovery") + tmp_options |= action_svc_ok; + else if (value == "f" || value == "flapping") + tmp_options |= action_svc_flapping; + else if (value == "s" || value == "downtime") + tmp_options |= action_svc_downtime; + else if (value == "n" || value == "none") + tmp_options = action_svc_none; + else if (value == "a" || value == "all") + tmp_options = action_svc_unknown | action_svc_warning | + action_svc_critical | action_svc_ok | action_svc_flapping | + action_svc_downtime; + else + return false; + } + *options = tmp_options; + return true; +} + +/** + * @brief In some Engine configuration objects, several keys are possible for a + * same field. This function returns the good key to access the protobuf message + * field from another one. For example, "hosts", "hostname" may design the same + * field named hostname in the protobuf message. This function returns + * "hostname" for "hosts" or "hostname". + * + * @param key The key to check. + * + * @return The key used in the message. + */ +std::string_view message_helper::validate_key( + const std::string_view& key) const { + std::string_view retval; + auto it = _correspondence.find(key); + if (it != _correspondence.end()) + retval = it->second; + else + retval = key; + return retval; +} + +/** + * @brief This function does nothing but it is derived in several message + * helpers to insert custom variables. + * + * @param [[maybe_unused]] The name of the customvariable. + * @param [[maybe_unused]] Its value. + * + * @return True on success. + */ +bool message_helper::insert_customvariable(std::string_view key + [[maybe_unused]], + std::string_view value + [[maybe_unused]]) { + return false; +} + +/** + * @brief Set the value given as a string to the object referenced by the key. + * If the key does not exist, the correspondence table may be used to find a + * replacement of the key. The function converts the value to the appropriate + * type. + * + * Another important point is that many configuration objects contain the Object + * obj message (something like an inheritance). This message contains three + * field names, name, use and register that are important for templating. If + * keys are one of these names, the function tries to work directly with the obj + * message. + * + * @param key The key to localize the object to set. + * @param value The value as string that will be converted to the good type. + * + * @return true on success. + */ +bool message_helper::set(const std::string_view& key, + const std::string_view& value) { + Message* msg = mut_obj(); + const Descriptor* desc = msg->GetDescriptor(); + const FieldDescriptor* f; + const Reflection* refl; + + /* Cases where we have to work on the obj Object (the parent object) */ + if (key == "name" || key == "register" || key == "use") { + f = desc->FindFieldByName("obj"); + if (f) { + refl = msg->GetReflection(); + Object* obj = static_cast(refl->MutableMessage(msg, f)); + + /* Optimization to avoid a new string comparaison */ + switch (key[0]) { + case 'n': // name + obj->set_name(std::string(value.data(), value.size())); + break; + case 'r': { // register + bool value_b; + if (!absl::SimpleAtob(value, &value_b)) + return false; + else + obj->set_register_(value_b); + } break; + case 'u': { // use + obj->mutable_use()->Clear(); + auto arr = absl::StrSplit(value, ','); + for (auto& t : arr) { + std::string v{absl::StripAsciiWhitespace(t)}; + obj->mutable_use()->Add(std::move(v)); + } + } break; + } + return true; + } + } + + f = desc->FindFieldByName(std::string(key.data(), key.size())); + if (f == nullptr) { + auto it = correspondence().find(key); + if (it != correspondence().end()) + f = desc->FindFieldByName(it->second); + if (f == nullptr) + return false; + } + refl = msg->GetReflection(); + switch (f->type()) { + case FieldDescriptor::TYPE_BOOL: { + bool val; + if (absl::SimpleAtob(value, &val)) { + refl->SetBool(static_cast(msg), f, val); + set_changed(f->index()); + return true; + } else + return false; + } break; + case FieldDescriptor::TYPE_INT32: { + int32_t val; + if (absl::SimpleAtoi(value, &val)) { + refl->SetInt32(static_cast(msg), f, val); + set_changed(f->index()); + return true; + } else + return false; + } break; + case FieldDescriptor::TYPE_UINT32: { + uint32_t val; + if (absl::SimpleAtoi(value, &val)) { + refl->SetUInt32(static_cast(msg), f, val); + set_changed(f->index()); + return true; + } else + return false; + } break; + case FieldDescriptor::TYPE_UINT64: { + uint64_t val; + if (absl::SimpleAtoi(value, &val)) { + refl->SetUInt64(static_cast(msg), f, val); + set_changed(f->index()); + return true; + } else + return false; + } break; + case FieldDescriptor::TYPE_DOUBLE: { + double val; + if (absl::SimpleAtod(value, &val)) { + refl->SetDouble(static_cast(msg), f, val); + set_changed(f->index()); + return true; + } else + return false; + } break; + case FieldDescriptor::TYPE_STRING: + if (f->is_repeated()) { + refl->AddString(static_cast(msg), f, + std::string(value.data(), value.size())); + } else { + refl->SetString(static_cast(msg), f, + std::string(value.data(), value.size())); + } + set_changed(f->index()); + return true; + case FieldDescriptor::TYPE_MESSAGE: + if (!f->is_repeated()) { + Message* m = refl->MutableMessage(msg, f); + const Descriptor* d = m->GetDescriptor(); + + if (d && d->name() == "StringSet") { + StringSet* set = + static_cast(refl->MutableMessage(msg, f)); + fill_string_group(set, value); + set_changed(f->index()); + return true; + } else if (d && d->name() == "StringList") { + StringList* lst = + static_cast(refl->MutableMessage(msg, f)); + fill_string_group(lst, value); + set_changed(f->index()); + return true; + } + } + case FieldDescriptor::TYPE_ENUM: { + auto* v = f->enum_type()->FindValueByName( + std::string(value.data(), value.size())); + if (v) + refl->SetEnumValue(msg, f, v->number()); + else + return false; + } break; + default: + return false; + } + return true; +} + +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/message_helper.hh b/common/engine_conf/message_helper.hh new file mode 100644 index 00000000000..2ac92d48b89 --- /dev/null +++ b/common/engine_conf/message_helper.hh @@ -0,0 +1,262 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#ifndef CCE_CONFIGURATION_MESSAGE_HELPER_HH +#define CCE_CONFIGURATION_MESSAGE_HELPER_HH +#include +#include +#include "common/engine_conf/state.pb.h" + +#ifdef LEGACY_CONF +#error This library should not be compiled. +#endif + +namespace com::centreon::engine::configuration { + +/** + * @brief Error counter, it contains two attributes, one for warnings and + * another for errors. + */ +struct error_cnt { + uint32_t config_warnings = 0; + uint32_t config_errors = 0; +}; + +/* Forward declarations */ +class command_helper; +class connector_helper; +class contact_helper; +class contactgroup_helper; +class host_helper; +class hostdependency_helper; +class hostescalation_helper; +class hostgroup_helper; +class service_helper; +class servicedependency_helper; +class serviceescalation_helper; +class servicegroup_helper; +class timeperiod_helper; +class anomalydetection_helper; +class severity_helper; +class tag_helper; +class state_helper; + +using ::google::protobuf::Message; + +bool fill_pair_string_group(PairStringSet* grp, const std::string_view& value); +bool fill_pair_string_group(PairStringSet* grp, + const std::string_view& key, + const std::string_view& value); +void fill_string_group(StringList* grp, const std::string_view& value); +void fill_string_group(StringSet* grp, const std::string_view& value); +bool fill_host_notification_options(uint16_t* options, + const std::string_view& value); +bool fill_service_notification_options(uint16_t* options, + const std::string_view& value); + +/** + * @brief The base message helper used by every helpers. It defines the common + * methods. + * + */ +class message_helper { + public: + enum object_type { + command = 0, + connector = 1, + contact = 2, + contactgroup = 3, + host = 4, + hostdependency = 5, + hostescalation = 6, + hostgroup = 8, + service = 9, + servicedependency = 10, + serviceescalation = 11, + servicegroup = 13, + timeperiod = 14, + anomalydetection = 15, + severity = 16, + tag = 17, + state = 18, + nb_types = 19, + }; + + private: + const object_type _otype; + Message* _obj; + /* + * The centengine cfg file allows several words for a same field. For example, + * we can have hosts, host, hostnames, hostname for the 'hostname' field. + * This map gives as value the field name corresponding to the name specified + * in the cfg file (the key). */ + const absl::flat_hash_map _correspondence; + /* + * _modified_field is a vector used for inheritance. An object can inherit + * from another one. To apply the parent values, we must be sure this object + * does not already change the field before. And we cannot use the protobuf + * default values since configuration objects have their own default values. + * So, the idea is: + * 1. The protobuf object is created. + * 2. Thankgs to the helper, its default values are set. + * 3. _modified_field cases are all set to false. + * 4. Fields are modified while the cfg file is read and _modified_field is + * updated in consequence. + * 5. We can replace unchanged fields with the parent values if needed. + */ + std::vector _modified_field; + + /* When a configuration object is resolved, this flag is set to true. */ + bool _resolved = false; + + public: + /** + * @brief Constructor of message_helper. + * + * @param otype An object_type specifying the type of the configuration + * object. + * @param obj The Protobuf message associated to the helper. + * @param correspondence The correspondence table (see the _correspondence + * map description for more details). + * @param field_size The number of fields in the protobuf message (needed to + * initialize the _modified_field). + */ + message_helper(object_type otype, + Message* obj, + absl::flat_hash_map&& correspondence, + size_t field_size) + : _otype(otype), + _obj(obj), + _correspondence{ + std::forward>( + correspondence)}, + _modified_field(field_size, false) {} + message_helper(const message_helper& other); + message_helper() = delete; + message_helper& operator=(const message_helper&) = delete; + virtual ~message_helper() noexcept = default; + const absl::flat_hash_map& correspondence() const { + return _correspondence; + } + object_type otype() const { return _otype; } + Message* mut_obj() { return _obj; } + const Message* obj() const { return _obj; } + void set_obj(Message* obj) { _obj = obj; } + void set_changed(int num) { _modified_field[num] = true; } + bool changed(int num) const { return _modified_field[num]; } + bool resolved() const { return _resolved; } + void resolve() { _resolved = true; } + + /** + * @brief For several keys, the parser of objects has some particular + * behaviors. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + * + * @return True on success. + */ + virtual bool hook(std::string_view key [[maybe_unused]], + const std::string_view& value [[maybe_unused]]) { + return false; + } + virtual void check_validity(error_cnt& err [[maybe_unused]]) const {} + std::string_view validate_key(const std::string_view& key) const; + virtual bool insert_customvariable(std::string_view key, + std::string_view value); + template + static std::unique_ptr clone(const T& other, Message* obj) { + std::unique_ptr retval; + switch (other._otype) { + case command: + retval = std::make_unique( + static_cast(other)); + break; + case connector: + retval = std::make_unique( + static_cast(other)); + break; + case contact: + retval = std::make_unique( + static_cast(other)); + break; + case contactgroup: + retval = std::make_unique( + static_cast(other)); + break; + case host: + retval = std::make_unique( + static_cast(other)); + break; + case hostdependency: + retval = std::make_unique( + static_cast(other)); + break; + case hostescalation: + retval = std::make_unique( + static_cast(other)); + break; + case hostgroup: + retval = std::make_unique( + static_cast(other)); + break; + case service: + retval = std::make_unique( + static_cast(other)); + break; + case servicedependency: + retval = std::make_unique( + static_cast(other)); + break; + case serviceescalation: + retval = std::make_unique( + static_cast(other)); + break; + case servicegroup: + retval = std::make_unique( + static_cast(other)); + break; + case timeperiod: + retval = std::make_unique( + static_cast(other)); + break; + case anomalydetection: + retval = std::make_unique( + static_cast(other)); + break; + case severity: + retval = std::make_unique( + static_cast(other)); + break; + case tag: + retval = + std::make_unique(static_cast(other)); + break; + case state: + retval = std::make_unique( + static_cast(other)); + break; + } + retval->_obj = obj; + return retval; + } + bool set(const std::string_view& key, const std::string_view& value); +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_MESSAGE_HELPER_HH */ diff --git a/common/engine_conf/parser.cc b/common/engine_conf/parser.cc new file mode 100644 index 00000000000..4948d0c7bee --- /dev/null +++ b/common/engine_conf/parser.cc @@ -0,0 +1,794 @@ +/** + * Copyright 2011-2014,2017-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "parser.hh" +#include +#include "com/centreon/exceptions/msg_fmt.hh" +#include "common/engine_conf/state.pb.h" +#include "common/log_v2/log_v2.hh" + +#include "anomalydetection_helper.hh" +#include "command_helper.hh" +#include "connector_helper.hh" +#include "contact_helper.hh" +#include "contactgroup_helper.hh" +#include "host_helper.hh" +#include "hostdependency_helper.hh" +#include "hostescalation_helper.hh" +#include "hostgroup_helper.hh" +#include "message_helper.hh" +#include "service_helper.hh" +#include "servicedependency_helper.hh" +#include "serviceescalation_helper.hh" +#include "servicegroup_helper.hh" +#include "severity_helper.hh" +#include "state_helper.hh" +#include "tag_helper.hh" +#include "timeperiod_helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine::configuration; + +using com::centreon::common::log_v2::log_v2; +using com::centreon::exceptions::msg_fmt; +using ::google::protobuf::Descriptor; +using ::google::protobuf::FieldDescriptor; +using ::google::protobuf::Message; +using ::google::protobuf::Reflection; + +/** + * @brief Reads the content of a text file and returns it in an std::string. + * + * @param file_path The file to read. + * + * @return The content as an std::string. + */ +static std::string read_file_content(const std::filesystem::path& file_path) { + std::ifstream in(file_path, std::ios::in); + std::string retval; + if (in) { + in.seekg(0, std::ios::end); + retval.resize(in.tellg()); + in.seekg(0, std::ios::beg); + in.read(&retval[0], retval.size()); + in.close(); + } else + throw msg_fmt("Parsing of resource file failed: can't open file '{}': {}", + file_path.string(), strerror(errno)); + return retval; +} + +/** + * Default constructor. + * + */ +parser::parser() : _logger{log_v2::instance().get(log_v2::CONFIG)} {} + +/** + * Parse configuration file. + * + * @param[in] path The configuration file path. + * @param[in] pb_config The state configuration to fill. + * @param[out] err The config warnings/errors counter. + */ +void parser::parse(std::string const& path, State* pb_config, error_cnt& err) { + /* Parse the global configuration file. */ + auto helper = std::make_unique(pb_config); + _pb_helper[pb_config] = std::move(helper); + _parse_global_configuration(path, pb_config); + + // parse configuration files. + _apply(pb_config->cfg_file(), pb_config, &parser::_parse_object_definitions); + // parse resource files. + _apply(pb_config->resource_file(), pb_config, &parser::_parse_resource_file); + + // parse configuration directories. + _apply(pb_config->cfg_dir(), pb_config, + &parser::_parse_directory_configuration); + + // Apply template. + _resolve_template(pb_config, err); + + _cleanup(pb_config); +} + +/** + * @brief Clean the configuration: + * * remove template objects. + * + * @param pb_config + */ +void parser::_cleanup(State* pb_config) { + int i = 0; + for (auto it = pb_config->mutable_services()->begin(); + it != pb_config->mutable_services()->end();) { + if (!it->obj().register_()) { + pb_config->mutable_services()->erase(it); + it = pb_config->mutable_services()->begin() + i; + } else { + ++it; + ++i; + } + } + i = 0; + for (auto it = pb_config->mutable_anomalydetections()->begin(); + it != pb_config->mutable_anomalydetections()->end();) { + if (!it->obj().register_()) { + pb_config->mutable_anomalydetections()->erase(it); + it = pb_config->mutable_anomalydetections()->begin() + i; + } else { + ++it; + ++i; + } + } +} + +/** + * Parse the directory configuration. + * + * @param[in] path The directory path. + */ +void parser::_parse_directory_configuration(const std::string& path, + State* pb_config) { + for (auto& entry : std::filesystem::directory_iterator(path)) { + if (entry.is_regular_file() && entry.path().extension() == ".cfg") + _parse_object_definitions(entry.path().string(), pb_config); + } +} + +/** + * Parse the global configuration file. + * + * @param[in] path The configuration path. + */ +void parser::_parse_global_configuration(const std::string& path, + State* pb_config) { + _logger->info("Reading main configuration file '{}'.", path); + + std::string content = read_file_content(path); + + pb_config->set_cfg_main(path); + _current_line = 0; + _current_path = path; + + auto tab{absl::StrSplit(content, '\n')}; + state_helper* cfg_helper = + static_cast(_pb_helper[pb_config].get()); + for (auto it = tab.begin(); it != tab.end(); ++it) { + std::string_view l = absl::StripAsciiWhitespace(*it); + if (l.empty() || l[0] == '#') + continue; + std::pair p = + absl::StrSplit(l, absl::MaxSplits('=', 1)); + p.first = absl::StripTrailingAsciiWhitespace(p.first); + p.second = absl::StripLeadingAsciiWhitespace(p.second); + bool retval = false; + /* particular cases with hook */ + retval = cfg_helper->hook(p.first, p.second); + if (!retval) { + if (!cfg_helper->set_global(p.first, p.second)) + _logger->error("Unable to parse '{}' key with value '{}'", p.first, + p.second); + } + } +} + +/** + * @brief Parse objects files (services.cfg, hosts.cfg, timeperiods.cfg... + * + * This function almost uses protobuf reflection to set values but it may fail + * because of the syntax used in these files that can be a little different + * from the message format. + * + * Two mechanisms are used to complete the reflection. + * * A hastable named correspondence is used in case of several + * keys to access to the same value. This is, for example, the case for + * host_id which is historically also named _HOST_ID. + * * A std::functioninfo("Processing object config file '{}'", path); + + std::string content = read_file_content(path); + + auto tab{absl::StrSplit(content, '\n')}; + std::string ll; + bool append_to_previous_line = false; + std::unique_ptr msg; + std::unique_ptr msg_helper; + + int current_line = 1; + std::string type; + + for (auto it = tab.begin(); it != tab.end(); ++it, current_line++) { + std::string_view l = absl::StripAsciiWhitespace(*it); + if (l.empty() || l[0] == '#' || l[0] == ';') + continue; + + /* Multiline */ + if (append_to_previous_line) { + if (l[l.size() - 1] == '\\') { + ll.append(l.data(), l.size() - 1); + continue; + } else { + ll.append(l.data(), l.size()); + append_to_previous_line = false; + l = ll; + } + } else if (l[l.size() - 1] == '\\') { + ll = std::string(l.data(), l.size() - 1); + append_to_previous_line = true; + continue; + } + + if (msg) { + if (l.empty()) + continue; + /* is it time to close the definition? */ + if (l == "}") { + const Descriptor* desc = msg->GetDescriptor(); + const FieldDescriptor* f = desc->FindFieldByName("obj"); + const Reflection* refl = msg->GetReflection(); + if (f) { + const Object& obj = + *static_cast(&refl->GetMessage(*msg, f)); + auto otype = msg_helper->otype(); + _pb_helper[msg.get()] = std::move(msg_helper); + if (!obj.name().empty()) { + pb_map_object& tmpl = _pb_templates[otype]; + auto it = tmpl.find(obj.name()); + if (it != tmpl.end()) + throw msg_fmt( + "Parsing of '{}' failed in cfg file: {} already exists", type, + obj.name()); + if (!obj.register_()) + tmpl[obj.name()] = std::move(msg); + else { + auto copy = std::unique_ptr(msg->New()); + copy->CopyFrom(*msg); + _pb_helper[copy.get()] = + message_helper::clone(*_pb_helper[msg.get()], copy.get()); + tmpl[obj.name()] = std::move(copy); + } + } + if (obj.register_()) { + switch (otype) { + case message_helper::contact: + pb_config->mutable_contacts()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::host: + pb_config->mutable_hosts()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::service: + pb_config->mutable_services()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::anomalydetection: + pb_config->mutable_anomalydetections()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::hostdependency: + pb_config->mutable_hostdependencies()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::servicedependency: + pb_config->mutable_servicedependencies()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::timeperiod: + pb_config->mutable_timeperiods()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::command: + pb_config->mutable_commands()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::hostgroup: + pb_config->mutable_hostgroups()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::servicegroup: + pb_config->mutable_servicegroups()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::tag: + pb_config->mutable_tags()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::contactgroup: + pb_config->mutable_contactgroups()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::connector: + pb_config->mutable_connectors()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::severity: + pb_config->mutable_severities()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::serviceescalation: + pb_config->mutable_serviceescalations()->AddAllocated( + static_cast(msg.release())); + break; + case message_helper::hostescalation: + pb_config->mutable_hostescalations()->AddAllocated( + static_cast(msg.release())); + break; + default: + _logger->critical("Attempt to add an object of unknown type"); + } + } + } + msg = nullptr; + } else { + /* Main part where keys/values are read */ + /* ------------------------------------ */ + size_t pos = l.find_first_of(" \t"); + std::string_view key = l.substr(0, pos); + if (pos != std::string::npos) { + l.remove_prefix(pos); + l = absl::StripLeadingAsciiWhitespace(l); + } else + l = {}; + + bool retval = false; + /* particular cases with hook */ + retval = msg_helper->hook(key, l); + + if (!retval) { + /* Classical part */ + if (!msg_helper->set(key, l)) { + if (!msg_helper->insert_customvariable(key, l)) + throw msg_fmt( + "Unable to parse '{}' key with value '{}' in message of type " + "'{}'", + key, l, type); + } + } + } + } else { + if (!absl::StartsWith(l, "define") || !std::isspace(l[6])) + throw msg_fmt( + "Parsing of object definition failed in file '{}' at line {}: " + "Unexpected start definition", + path, current_line); + /* Let's remove the first 6 characters ("define") */ + l = absl::StripLeadingAsciiWhitespace(l.substr(6)); + if (l.empty() || l[l.size() - 1] != '{') + throw msg_fmt( + "Parsing of object definition failed in file '{}' at line {}; " + "unexpected start definition", + path, current_line); + l = absl::StripTrailingAsciiWhitespace(l.substr(0, l.size() - 1)); + type = std::string(l.data(), l.size()); + if (type == "contact") { + msg = std::make_unique(); + msg_helper = + std::make_unique(static_cast(msg.get())); + } else if (type == "host") { + msg = std::make_unique(); + msg_helper = + std::make_unique(static_cast(msg.get())); + } else if (type == "service") { + msg = std::make_unique(); + msg_helper = + std::make_unique(static_cast(msg.get())); + } else if (type == "anomalydetection") { + msg = std::make_unique(); + msg_helper = std::make_unique( + static_cast(msg.get())); + } else if (type == "hostdependency") { + msg = std::make_unique(); + msg_helper = std::make_unique( + static_cast(msg.get())); + } else if (type == "servicedependency") { + msg = std::make_unique(); + msg_helper = std::make_unique( + static_cast(msg.get())); + } else if (type == "timeperiod") { + msg = std::make_unique(); + msg_helper = std::make_unique( + static_cast(msg.get())); + } else if (type == "command") { + msg = std::make_unique(); + msg_helper = + std::make_unique(static_cast(msg.get())); + } else if (type == "hostgroup") { + msg = std::make_unique(); + msg_helper = std::make_unique( + static_cast(msg.get())); + } else if (type == "servicegroup") { + msg = std::make_unique(); + msg_helper = std::make_unique( + static_cast(msg.get())); + } else if (type == "tag") { + msg = std::make_unique(); + msg_helper = std::make_unique(static_cast(msg.get())); + } else if (type == "contactgroup") { + msg = std::make_unique(); + msg_helper = std::make_unique( + static_cast(msg.get())); + } else if (type == "connector") { + msg = std::make_unique(); + msg_helper = std::make_unique( + static_cast(msg.get())); + } else if (type == "severity") { + msg = std::make_unique(); + msg_helper = std::make_unique( + static_cast(msg.get())); + } else if (type == "serviceescalation") { + msg = std::make_unique(); + msg_helper = std::make_unique( + static_cast(msg.get())); + } else if (type == "hostescalation") { + msg = std::make_unique(); + msg_helper = std::make_unique( + static_cast(msg.get())); + } else { + _logger->error("Type '{}' not yet supported by the parser", type); + assert(1 == 18); + } + } + } +} + +/** + * Parse the resource file. + * + * @param[in] path The resource file path. + */ +void parser::_parse_resource_file(const std::string& path, State* pb_config) { + _logger->info("Reading resource file '{}'", path); + + std::string content = read_file_content(path); + + auto tab{absl::StrSplit(content, '\n')}; + int current_line = 1; + for (auto it = tab.begin(); it != tab.end(); ++it, current_line++) { + std::string_view l = absl::StripLeadingAsciiWhitespace(*it); + if (l.empty() || l[0] == '#' || l[0] == ';') + continue; + std::pair p = + absl::StrSplit(l, absl::MaxSplits('=', 1)); + p.first = absl::StripTrailingAsciiWhitespace(p.first); + p.second = absl::StripLeadingAsciiWhitespace(p.second); + if (p.first.size() >= 3 && p.first[0] == '$' && + p.first[p.first.size() - 1] == '$') { + p.first = p.first.substr(1, p.first.size() - 2); + (*pb_config + ->mutable_users())[std::string(p.first.data(), p.first.size())] = + std::string(p.second.data(), p.second.size()); + } else + throw msg_fmt("Invalid user key '{}'", p.first); + } +} + +/** + * @brief For each type of object in the State, templates are resolved that is + * to say, children inherite from parents properties. + * + * @param pb_config The State containing all the object to handle. + * @param err The config warnings/errors counter. + */ +void parser::_resolve_template(State* pb_config, error_cnt& err) { + for (Command& c : *pb_config->mutable_commands()) + _resolve_template(_pb_helper[&c], _pb_templates[message_helper::command]); + + for (Connector& c : *pb_config->mutable_connectors()) + _resolve_template(_pb_helper[&c], _pb_templates[message_helper::connector]); + + for (Contact& c : *pb_config->mutable_contacts()) + _resolve_template(_pb_helper[&c], _pb_templates[message_helper::contact]); + + for (Contactgroup& cg : *pb_config->mutable_contactgroups()) + _resolve_template(_pb_helper[&cg], + _pb_templates[message_helper::contactgroup]); + + for (Host& h : *pb_config->mutable_hosts()) + _resolve_template(_pb_helper[&h], _pb_templates[message_helper::host]); + + for (Service& s : *pb_config->mutable_services()) + _resolve_template(_pb_helper[&s], _pb_templates[message_helper::service]); + + for (Anomalydetection& a : *pb_config->mutable_anomalydetections()) + _resolve_template(_pb_helper[&a], + _pb_templates[message_helper::anomalydetection]); + + for (Serviceescalation& se : *pb_config->mutable_serviceescalations()) + _resolve_template(_pb_helper[&se], + _pb_templates[message_helper::serviceescalation]); + + for (Hostescalation& he : *pb_config->mutable_hostescalations()) + _resolve_template(_pb_helper[&he], + _pb_templates[message_helper::hostescalation]); + + for (const Command& c : pb_config->commands()) + _pb_helper.at(&c)->check_validity(err); + + for (const Contact& c : pb_config->contacts()) + _pb_helper.at(&c)->check_validity(err); + + for (const Contactgroup& cg : pb_config->contactgroups()) + _pb_helper.at(&cg)->check_validity(err); + + for (const Host& h : pb_config->hosts()) + _pb_helper.at(&h)->check_validity(err); + + for (const Hostdependency& hd : pb_config->hostdependencies()) + _pb_helper.at(&hd)->check_validity(err); + + for (const Hostescalation& he : pb_config->hostescalations()) + _pb_helper.at(&he)->check_validity(err); + + for (const Hostgroup& hg : pb_config->hostgroups()) + _pb_helper.at(&hg)->check_validity(err); + + for (const Service& s : pb_config->services()) + _pb_helper.at(&s)->check_validity(err); + + for (const Hostdependency& hd : pb_config->hostdependencies()) + _pb_helper.at(&hd)->check_validity(err); + + for (const Servicedependency& sd : pb_config->servicedependencies()) + _pb_helper.at(&sd)->check_validity(err); + + for (const Servicegroup& sg : pb_config->servicegroups()) + _pb_helper.at(&sg)->check_validity(err); + + for (const Timeperiod& t : pb_config->timeperiods()) + _pb_helper.at(&t)->check_validity(err); + + for (const Anomalydetection& a : pb_config->anomalydetections()) + _pb_helper.at(&a)->check_validity(err); + + for (const Tag& t : pb_config->tags()) + _pb_helper.at(&t)->check_validity(err); + + for (const Servicegroup& sg : pb_config->servicegroups()) + _pb_helper.at(&sg)->check_validity(err); + + for (const Severity& sv : pb_config->severities()) + _pb_helper.at(&sv)->check_validity(err); + + for (const Tag& t : pb_config->tags()) + _pb_helper.at(&t)->check_validity(err); + + for (const Serviceescalation& se : pb_config->serviceescalations()) + _pb_helper.at(&se)->check_validity(err); + + for (const Hostescalation& he : pb_config->hostescalations()) + _pb_helper.at(&he)->check_validity(err); + + for (const Connector& c : pb_config->connectors()) + _pb_helper.at(&c)->check_validity(err); +} + +/** + * @brief Resolvers a message given by its helper and using the given templates. + * + * @param msg_helper The message helper. + * @param tmpls The templates to use. + */ +void parser::_resolve_template(std::unique_ptr& msg_helper, + const pb_map_object& tmpls) { + if (msg_helper->resolved()) + return; + Message* msg = msg_helper->mut_obj(); + + msg_helper->resolve(); + const Descriptor* desc = msg->GetDescriptor(); + const FieldDescriptor* f = desc->FindFieldByName("obj"); + const Reflection* refl = msg->GetReflection(); + if (!f) + return; + + Object* obj = static_cast(refl->MutableMessage(msg, f)); + for (const std::string& u : obj->use()) { + auto it = tmpls.find(u); + if (it == tmpls.end()) + throw msg_fmt("Cannot merge object of type '{}'", u); + _resolve_template(_pb_helper[it->second.get()], tmpls); + _merge(msg_helper, it->second.get()); + } +} + +/** + * @brief For each unchanged field in the Protobuf object stored in msg_helper, + * we copy the corresponding field from tmpl. This is the key for the + * inheritence with cfg files. + * + * @param msg_helper A message_help holding a protobuf message + * @param tmpl A template of the same type as the on in the msg_helper + */ +void parser::_merge(std::unique_ptr& msg_helper, + Message* tmpl) { + Message* msg = msg_helper->mut_obj(); + const Descriptor* desc = msg->GetDescriptor(); + const Reflection* refl = msg->GetReflection(); + std::string tmp_str; + + for (int i = 0; i < desc->field_count(); ++i) { + const FieldDescriptor* f = desc->field(i); + if (f->name() != "obj") { + /* Optional? If not defined in template, we continue. */ + const auto* oof = f->containing_oneof(); + if (oof) { + if (!refl->GetOneofFieldDescriptor(*tmpl, oof)) + continue; + } + + if ((oof && !refl->GetOneofFieldDescriptor(*msg, oof)) || + !msg_helper->changed(f->index())) { + if (f->is_repeated()) { + switch (f->cpp_type()) { + case FieldDescriptor::CPPTYPE_STRING: { + size_t count = refl->FieldSize(*tmpl, f); + for (size_t j = 0; j < count; ++j) { + const std::string& s = + refl->GetRepeatedStringReference(*tmpl, f, j, &tmp_str); + size_t count_msg = refl->FieldSize(*msg, f); + std::string tmp_str1; + bool found = false; + for (size_t k = 0; k < count_msg; ++k) { + const std::string& s1 = + refl->GetRepeatedStringReference(*msg, f, k, &tmp_str1); + if (s1 == s) { + found = true; + break; + } + } + if (!found) + refl->AddString(msg, f, s); + } + msg_helper->set_changed(f->index()); + } break; + case FieldDescriptor::CPPTYPE_MESSAGE: { + size_t count = refl->FieldSize(*tmpl, f); + for (size_t j = 0; j < count; ++j) { + const Message& m = refl->GetRepeatedMessage(*tmpl, f, j); + const Descriptor* d = m.GetDescriptor(); + size_t count_msg = refl->FieldSize(*msg, f); + bool found = false; + for (size_t k = 0; k < count_msg; ++k) { + const Message& m1 = refl->GetRepeatedMessage(*msg, f, k); + const Descriptor* d1 = m1.GetDescriptor(); + if (d && d1) { + if (d->name() == "PairUint64_32" && + d1->name() == "PairUint64_32") { + const PairUint64_32& p = + static_cast(m); + const PairUint64_32& p1 = + static_cast(m1); + if (p.first() == p1.first() && + p.second() == p1.second()) { + found = true; + break; + } + } else if (d->name() == "CustomVariable" && + d1->name() == "CustomVariable") { + const CustomVariable& cv = + static_cast(m); + const CustomVariable& cv1 = + static_cast(m1); + if (cv.name() == cv1.name()) { + _logger->info("same name"); + found = true; + break; + } + } else { + assert("not good at all" == nullptr); + } + } + } + if (!found) { + Message* new_m = refl->AddMessage(msg, f); + new_m->CopyFrom(m); + } + } + msg_helper->set_changed(f->index()); + } break; + default: + _logger->error( + "Repeated type f->cpp_type = {} not managed in the " + "inheritence.", + static_cast(f->cpp_type())); + assert(124 == 294); + } + } else { + switch (f->cpp_type()) { + case FieldDescriptor::CPPTYPE_STRING: + refl->SetString(msg, f, refl->GetString(*tmpl, f)); + msg_helper->set_changed(f->index()); + break; + case FieldDescriptor::CPPTYPE_BOOL: + refl->SetBool(msg, f, refl->GetBool(*tmpl, f)); + msg_helper->set_changed(f->index()); + break; + case FieldDescriptor::CPPTYPE_INT32: + refl->SetInt32(msg, f, refl->GetInt32(*tmpl, f)); + msg_helper->set_changed(f->index()); + break; + case FieldDescriptor::CPPTYPE_UINT32: + refl->SetUInt32(msg, f, refl->GetUInt32(*tmpl, f)); + msg_helper->set_changed(f->index()); + break; + case FieldDescriptor::CPPTYPE_UINT64: + refl->SetUInt64(msg, f, refl->GetUInt64(*tmpl, f)); + msg_helper->set_changed(f->index()); + break; + case FieldDescriptor::CPPTYPE_ENUM: + refl->SetEnum(msg, f, refl->GetEnum(*tmpl, f)); + msg_helper->set_changed(f->index()); + break; + case FieldDescriptor::CPPTYPE_MESSAGE: { + Message* m = refl->MutableMessage(msg, f); + const Descriptor* d = m->GetDescriptor(); + + if (d && d->name() == "StringSet") { + StringSet* orig_set = + static_cast(refl->MutableMessage(tmpl, f)); + StringSet* set = + static_cast(refl->MutableMessage(msg, f)); + if (set->additive()) { + for (auto& v : orig_set->data()) { + bool found = false; + for (auto& s : *set->mutable_data()) { + if (s == v) { + found = true; + break; + } + } + if (!found) + set->add_data(v); + } + } else if (set->data().empty()) + *set->mutable_data() = orig_set->data(); + + } else if (d && d->name() == "StringList") { + StringList* orig_lst = + static_cast(refl->MutableMessage(tmpl, f)); + StringList* lst = + static_cast(refl->MutableMessage(msg, f)); + if (lst->additive()) { + for (auto& v : orig_lst->data()) + lst->add_data(v); + } else if (lst->data().empty()) + *lst->mutable_data() = orig_lst->data(); + } + msg_helper->set_changed(f->index()); + } break; + + default: + _logger->error("Entry '{}' of type {} not managed in merge", + f->name(), f->type_name()); + assert(123 == 293); + } + } + } + } + } +} diff --git a/common/engine_conf/parser.hh b/common/engine_conf/parser.hh new file mode 100644 index 00000000000..35a3656e90f --- /dev/null +++ b/common/engine_conf/parser.hh @@ -0,0 +1,103 @@ +/** + * Copyright 2011-2013,2017-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#ifndef CCE_CONFIGURATION_PARSER_HH +#define CCE_CONFIGURATION_PARSER_HH + +#include "common/engine_conf/message_helper.hh" +#include "state_helper.hh" +// #include "host.hh" + +namespace com::centreon::engine::configuration { + +/** + * @brief Each instance of a pb_map_object is about one type of message, for + * example it contains only commands. It is just a map containing commands + * indexed by their name. + */ +using pb_map_object = + absl::flat_hash_map>; + +/** + * @brief A Protobuf message has only predefined default values, 0 for integers, + * empty for an array, etc. And we cannot change these default values. Because + * we still work with cfg files, we also have some tricks when reading values, + * some arrays are stored as strings, some bitfields are also stored as strings, + * etc. So we need a helper to proceed in these operations. This is what is a + * message_helper. Each protobuf configuration message has its own helper. The + * map below makes the relation between a new message and its helper. + */ +using pb_map_helper = + absl::flat_hash_map>; + +class parser { + std::shared_ptr _logger; + + /** + * @brief An array of pb_map_objects. At index object_type::command we get all + * the templates of commands, at index object_type::service we get all the + * templates of services, etc. + */ + std::array + _pb_templates; + + /** + * @brief The map of helpers of all the configuration objects parsed by this + * parser. + */ + pb_map_helper _pb_helper; + + void _merge(std::unique_ptr& msg_helper, Message* tmpl); + void _cleanup(State* pb_config); + + public: + parser(); + parser(const parser&) = delete; + parser& operator=(const parser&) = delete; + ~parser() noexcept = default; + void parse(const std::string& path, State* config, error_cnt& err); + + private: + /** + * Apply parse method into list. + * + * @param[in] lst The list to apply action. + * @param[in] pfunc The method to apply. + */ + template + void _apply(const L& lst, + State* pb_config, + void (parser::*pfunc)(const std::string&, State*)) { + for (auto& f : lst) + (this->*pfunc)(f, pb_config); + } + void _parse_directory_configuration(std::string const& path, + State* pb_config); + void _parse_global_configuration(const std::string& path, State* pb_config); + void _parse_object_definitions(const std::string& path, State* pb_config); + void _parse_resource_file(std::string const& path, State* pb_config); + void _resolve_template(State* pb_config, error_cnt& err); + void _resolve_template(std::unique_ptr& msg_helper, + const pb_map_object& tmpls); + + unsigned int _current_line; + std::string _current_path; +}; +} // namespace com::centreon::engine::configuration + +#endif // !CCE_CONFIGURATION_PARSER_HH diff --git a/common/engine_conf/service_helper.cc b/common/engine_conf/service_helper.cc new file mode 100644 index 00000000000..dc8ef517612 --- /dev/null +++ b/common/engine_conf/service_helper.cc @@ -0,0 +1,286 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "service_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" +#include "common/engine_conf/message_helper.hh" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a Service object. + * + * @param obj The Service object on which this helper works. The helper is not + * the owner of this object. + */ +service_helper::service_helper(Service* obj) + : message_helper(object_type::service, + obj, + { + {"host", "host_name"}, + {"hosts", "host_name"}, + {"_SERVICE_ID", "service_id"}, + {"description", "service_description"}, + {"service_groups", "servicegroups"}, + {"contact_groups", "contactgroups"}, + {"normal_check_interval", "check_interval"}, + {"retry_check_interval", "retry_interval"}, + {"active_checks_enabled", "checks_active"}, + {"passive_checks_enabled", "checks_passive"}, + {"severity", "severity_id"}, + }, + Service::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Service objects has a particular + * behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool service_helper::hook(std::string_view key, const std::string_view& value) { + Service* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + if (key == "contactgroups") { + fill_string_group(obj->mutable_contactgroups(), value); + return true; + } else if (key == "contacts") { + fill_string_group(obj->mutable_contacts(), value); + return true; + } else if (key == "flap_detection_options") { + uint16_t options(action_svc_none); + auto values = absl::StrSplit(value, ','); + for (auto it = values.begin(); it != values.end(); ++it) { + std::string_view v = absl::StripAsciiWhitespace(*it); + if (v == "o" || v == "ok") + options |= action_svc_ok; + else if (v == "w" || v == "warning") + options |= action_svc_warning; + else if (v == "u" || v == "unknown") + options |= action_svc_unknown; + else if (v == "c" || v == "critical") + options |= action_svc_critical; + else if (v == "n" || v == "none") + options |= action_svc_none; + else if (v == "a" || v == "all") + options = action_svc_ok | action_svc_warning | action_svc_unknown | + action_svc_critical; + else + return false; + } + obj->set_flap_detection_options(options); + return true; + } else if (key == "initial_state") { + ServiceStatus initial_state; + if (value == "o" || value == "ok") + initial_state = ServiceStatus::state_ok; + else if (value == "w" || value == "warning") + initial_state = ServiceStatus::state_warning; + else if (value == "u" || value == "unknown") + initial_state = ServiceStatus::state_unknown; + else if (value == "c" || value == "critical") + initial_state = ServiceStatus::state_critical; + else + return false; + obj->set_initial_state(initial_state); + return true; + } else if (key == "notification_options") { + uint16_t options(action_svc_none); + if (fill_service_notification_options(&options, value)) { + obj->set_notification_options(options); + return true; + } else + return false; + } else if (key == "servicegroups") { + fill_string_group(obj->mutable_servicegroups(), value); + return true; + } else if (key == "stalking_options") { + uint16_t options(action_svc_none); + auto values = absl::StrSplit(value, ','); + for (auto it = values.begin(); it != values.end(); ++it) { + std::string_view v = absl::StripAsciiWhitespace(*it); + if (v == "u" || v == "unknown") + options |= action_svc_unknown; + else if (v == "o" || v == "ok") + options |= action_svc_ok; + else if (v == "w" || v == "warning") + options |= action_svc_warning; + else if (v == "c" || v == "critical") + options |= action_svc_critical; + else if (v == "n" || v == "none") + options = action_svc_none; + else if (v == "a" || v == "all") + options = action_svc_ok | action_svc_unknown | action_svc_warning | + action_svc_critical; + else + return false; + } + obj->set_stalking_options(options); + return true; + } else if (key == "category_tags") { + auto tags{absl::StrSplit(value, ',')}; + bool ret = true; + + for (auto it = obj->tags().begin(); it != obj->tags().end();) { + if (it->second() == TagType::tag_servicecategory) + it = obj->mutable_tags()->erase(it); + else + ++it; + } + + for (auto& tag : tags) { + uint64_t id; + bool parse_ok; + parse_ok = absl::SimpleAtoi(tag, &id); + if (parse_ok) { + auto t = obj->add_tags(); + t->set_first(id); + t->set_second(TagType::tag_servicecategory); + } else { + ret = false; + } + } + return ret; + } else if (key == "group_tags") { + auto tags{absl::StrSplit(value, ',')}; + bool ret = true; + + for (auto it = obj->tags().begin(); it != obj->tags().end();) { + if (it->second() == TagType::tag_servicegroup) + it = obj->mutable_tags()->erase(it); + else + ++it; + } + + for (auto& tag : tags) { + uint64_t id; + bool parse_ok; + parse_ok = absl::SimpleAtoi(tag, &id); + if (parse_ok) { + auto t = obj->add_tags(); + t->set_first(id); + t->set_second(TagType::tag_servicegroup); + } else { + ret = false; + } + } + return ret; + } + return false; +} + +/** + * @brief Check the validity of the Service object. + * + * @param err An error counter. + */ +void service_helper::check_validity(error_cnt& err) const { + const Service* o = static_cast(obj()); + + if (o->obj().register_()) { + if (o->service_description().empty()) { + err.config_errors++; + throw msg_fmt("Services must have a non-empty description"); + } + if (o->check_command().empty()) { + err.config_errors++; + throw msg_fmt("Service '{}' has an empty check command", + o->service_description()); + } + if (o->host_name().empty()) { + err.config_errors++; + throw msg_fmt("Service '{}' must contain one host name", + o->service_description()); + } + } +} + +/** + * @brief Initializer of the Service object, in other words set its default + * values. + */ +void service_helper::_init() { + Service* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); + obj->set_acknowledgement_timeout(0); + obj->set_checks_active(true); + obj->set_checks_passive(true); + obj->set_check_freshness(0); + obj->set_check_interval(5); + obj->set_event_handler_enabled(true); + obj->set_first_notification_delay(0); + obj->set_flap_detection_enabled(true); + obj->set_flap_detection_options(action_svc_ok | action_svc_warning | + action_svc_unknown | action_svc_critical); + obj->set_freshness_threshold(0); + obj->set_high_flap_threshold(0); + obj->set_initial_state(ServiceStatus::state_ok); + obj->set_is_volatile(false); + obj->set_low_flap_threshold(0); + obj->set_max_check_attempts(3); + obj->set_notifications_enabled(true); + obj->set_notification_interval(0); + obj->set_notification_options(action_svc_ok | action_svc_warning | + action_svc_critical | action_svc_unknown | + action_svc_flapping | action_svc_downtime); + obj->set_obsess_over_service(true); + obj->set_process_perf_data(true); + obj->set_retain_nonstatus_information(true); + obj->set_retain_status_information(true); + obj->set_retry_interval(1); + obj->set_stalking_options(action_svc_none); +} + +/** + * @brief If the provided key/value have their parsing to fail previously, + * it is possible they are a customvariable. A customvariable name has its + * name starting with an underscore. This method checks the possibility to + * store a customvariable in the given object and stores it if possible. + * + * @param key The name of the customvariable. + * @param value Its value as a string. + * + * @return True if the customvariable has been well stored. + */ +bool service_helper::insert_customvariable(std::string_view key, + std::string_view value) { + if (key[0] != '_') + return false; + + key.remove_prefix(1); + + Service* obj = static_cast(mut_obj()); + auto* cvs = obj->mutable_customvariables(); + for (auto& c : *cvs) { + if (c.name() == key) { + c.set_value(value.data(), value.size()); + return true; + } + } + auto new_cv = cvs->Add(); + new_cv->set_name(key.data(), key.size()); + new_cv->set_value(value.data(), value.size()); + return true; +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/service_helper.hh b/common/engine_conf/service_helper.hh new file mode 100644 index 00000000000..3104c0b2601 --- /dev/null +++ b/common/engine_conf/service_helper.hh @@ -0,0 +1,43 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#ifndef CCE_CONFIGURATION_SERVICE +#define CCE_CONFIGURATION_SERVICE + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +class service_helper : public message_helper { + void _init(); + + public: + service_helper(Service* obj); + ~service_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; + + bool insert_customvariable(std::string_view key, + std::string_view value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_SERVICE */ diff --git a/common/engine_conf/servicedependency_helper.cc b/common/engine_conf/servicedependency_helper.cc new file mode 100644 index 00000000000..91e9c44226d --- /dev/null +++ b/common/engine_conf/servicedependency_helper.cc @@ -0,0 +1,199 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/servicedependency_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" +#include "common/engine_conf/state.pb.h" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +size_t servicedependency_key(const Servicedependency& sd) { + return absl::HashOf(sd.dependency_period(), sd.dependency_type(), + sd.hosts().data(0), sd.service_description().data(0), + sd.dependent_hosts().data(0), + sd.dependent_service_description().data(0), + sd.execution_failure_options(), sd.inherits_parent(), + sd.notification_failure_options()); +} + +/** + * @brief Constructor from a Servicedependency object. + * + * @param obj The Servicedependency object on which this helper works. The + * helper is not the owner of this object. + */ +servicedependency_helper::servicedependency_helper(Servicedependency* obj) + : message_helper( + object_type::servicedependency, + obj, + { + {"servicegroup", "servicegroups"}, + {"servicegroup_name", "servicegroups"}, + {"hostgroup", "hostgroups"}, + {"hostgroup_name", "hostgroups"}, + {"host", "hosts"}, + {"host_name", "hosts"}, + {"master_host", "hosts"}, + {"master_host_name", "hosts"}, + {"description", "service_description"}, + {"master_description", "service_description"}, + {"master_service_description", "service_description"}, + {"dependent_servicegroup", "dependent_servicegroups"}, + {"dependent_servicegroup_name", "dependent_servicegroups"}, + {"dependent_hostgroup", "dependent_hostgroups"}, + {"dependent_hostgroup_name", "dependent_hostgroups"}, + {"dependent_host", "dependent_hosts"}, + {"dependent_host_name", "dependent_hosts"}, + {"dependent_description", "dependent_service_description"}, + {"execution_failure_criteria", "execution_failure_options"}, + {"notification_failure_criteria", "notification_failure_options"}, + }, + Servicedependency::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Servicedependency objects has a + * particular behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool servicedependency_helper::hook(std::string_view key, + const std::string_view& value) { + Servicedependency* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + + if (key == "execution_failure_options" || + key == "notification_failure_options") { + uint32_t options = action_sd_none; + auto arr = absl::StrSplit(value, ','); + for (auto& v : arr) { + std::string_view vv = absl::StripAsciiWhitespace(v); + if (vv == "o" || vv == "ok") + options |= action_sd_ok; + else if (vv == "u" || vv == "unknown") + options |= action_sd_unknown; + else if (vv == "w" || vv == "warning") + options |= action_sd_warning; + else if (vv == "c" || vv == "critical") + options |= action_sd_critical; + else if (vv == "p" || vv == "pending") + options |= action_sd_pending; + else if (vv == "n" || vv == "none") + options = action_sd_none; + else if (vv == "a" || vv == "all") + options = action_sd_ok | action_sd_warning | action_sd_critical | + action_sd_pending; + else + return false; + } + if (key[0] == 'e') + obj->set_execution_failure_options(options); + else + obj->set_notification_failure_options(options); + return true; + } else if (key == "dependent_hostgroups") { + fill_string_group(obj->mutable_dependent_hostgroups(), value); + return true; + } else if (key == "dependent_hosts") { + fill_string_group(obj->mutable_dependent_hosts(), value); + return true; + } else if (key == "dependent_servicegroups") { + fill_string_group(obj->mutable_dependent_servicegroups(), value); + return true; + } else if (key == "dependent_service_description") { + fill_string_group(obj->mutable_dependent_service_description(), value); + return true; + } else if (key == "hostgroups") { + fill_string_group(obj->mutable_hostgroups(), value); + return true; + } else if (key == "hosts") { + fill_string_group(obj->mutable_hosts(), value); + return true; + } else if (key == "servicegroups") { + fill_string_group(obj->mutable_servicegroups(), value); + return true; + } else if (key == "service_description") { + fill_string_group(obj->mutable_service_description(), value); + return true; + } + return false; +} + +/** + * @brief Check the validity of the Servicedependency object. + * + * @param err An error counter. + */ +void servicedependency_helper::check_validity(error_cnt& err) const { + const Servicedependency* o = static_cast(obj()); + + /* Check base service(s). */ + if (o->servicegroups().data().empty()) { + if (o->service_description().data().empty()) { + err.config_errors++; + throw msg_fmt( + "Service dependency is not attached to any service or service group " + "(properties 'service_description' or 'servicegroup_name', " + "respectively)"); + } else if (o->hosts().data().empty() && o->hostgroups().data().empty()) { + err.config_errors++; + throw msg_fmt( + "Service dependency is not attached to any host or host group " + "(properties 'host_name' or 'hostgroup_name', respectively)"); + } + } + + /* Check dependent service(s). */ + if (o->dependent_servicegroups().data().empty()) { + if (o->dependent_service_description().data().empty()) { + err.config_errors++; + throw msg_fmt( + "Service dependency is not attached to " + "any dependent service or dependent service group " + "(properties 'dependent_service_description' or " + "'dependent_servicegroup_name', respectively)"); + } else if (o->dependent_hosts().data().empty() && + o->dependent_hostgroups().data().empty()) { + err.config_errors++; + throw msg_fmt( + "Service dependency is not attached to " + "any dependent host or dependent host group (properties " + "'dependent_host_name' or 'dependent_hostgroup_name', " + "respectively)"); + } + } +} + +/** + * @brief Initializer of the Servicedependency object, in other words set its + * default values. + */ +void servicedependency_helper::_init() { + Servicedependency* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); + obj->set_execution_failure_options(action_sd_none); + obj->set_inherits_parent(false); + obj->set_notification_failure_options(action_sd_none); +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/servicedependency_helper.hh b/common/engine_conf/servicedependency_helper.hh new file mode 100644 index 00000000000..affa3b1df08 --- /dev/null +++ b/common/engine_conf/servicedependency_helper.hh @@ -0,0 +1,42 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#ifndef CCE_CONFIGURATION_SERVICEDEPENDENCY +#define CCE_CONFIGURATION_SERVICEDEPENDENCY + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +size_t servicedependency_key(const Servicedependency& sd); + +class servicedependency_helper : public message_helper { + void _init(); + + public: + servicedependency_helper(Servicedependency* obj); + ~servicedependency_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_SERVICEDEPENDENCY */ diff --git a/common/engine_conf/serviceescalation_helper.cc b/common/engine_conf/serviceescalation_helper.cc new file mode 100644 index 00000000000..0247bb5be80 --- /dev/null +++ b/common/engine_conf/serviceescalation_helper.cc @@ -0,0 +1,152 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/serviceescalation_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" +#include "common/engine_conf/state.pb.h" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +size_t serviceescalation_key(const Serviceescalation& se) { + return absl::HashOf(se.hosts().data(0), se.service_description().data(0), + // se.contactgroups(), + se.escalation_options(), se.escalation_period(), + se.first_notification(), se.last_notification(), + se.notification_interval()); +} + +/** + * @brief Constructor from a Serviceescalation object. + * + * @param obj The Serviceescalation object on which this helper works. The + * helper is not the owner of this object. + */ +serviceescalation_helper::serviceescalation_helper(Serviceescalation* obj) + : message_helper(object_type::serviceescalation, + obj, + { + {"host", "hosts"}, + {"host_name", "hosts"}, + {"description", "service_description"}, + {"servicegroup", "servicegroups"}, + {"servicegroup_name", "servicegroups"}, + {"hostgroup", "hostgroups"}, + {"hostgroup_name", "hostgroups"}, + {"contact_groups", "contactgroups"}, + }, + Serviceescalation::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Serviceescalation objects has a + * particular behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool serviceescalation_helper::hook(std::string_view key, + const std::string_view& value) { + Serviceescalation* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + + if (key == "escalation_options") { + uint32_t options = action_he_none; + auto arr = absl::StrSplit(value, ','); + for (auto& v : arr) { + std::string_view vv = absl::StripAsciiWhitespace(v); + if (vv == "w" || vv == "warning") + options |= action_se_warning; + else if (vv == "u" || vv == "unknown") + options |= action_se_unknown; + else if (vv == "c" || vv == "critical") + options |= action_se_critical; + else if (vv == "r" || vv == "recovery") + options |= action_se_recovery; + else if (vv == "n" || vv == "none") + options = action_se_none; + else if (vv == "a" || vv == "all") + options = action_se_warning | action_se_unknown | action_se_critical | + action_se_recovery; + else + return false; + } + obj->set_escalation_options(options); + return true; + } else if (key == "contactgroups") { + fill_string_group(obj->mutable_contactgroups(), value); + return true; + } else if (key == "hostgroups") { + fill_string_group(obj->mutable_hostgroups(), value); + return true; + } else if (key == "hosts") { + fill_string_group(obj->mutable_hosts(), value); + return true; + } else if (key == "servicegroups") { + fill_string_group(obj->mutable_servicegroups(), value); + return true; + } else if (key == "service_description") { + fill_string_group(obj->mutable_service_description(), value); + return true; + } + return false; +} + +/** + * @brief Check the validity of the Serviceescalation object. + * + * @param err An error counter. + */ +void serviceescalation_helper::check_validity(error_cnt& err) const { + const Serviceescalation* o = static_cast(obj()); + + if (o->servicegroups().data().empty()) { + if (o->service_description().data().empty()) { + err.config_errors++; + throw msg_fmt( + "Service escalation is not attached to " + "any service or service group (properties " + "'service_description' and 'servicegroup_name', " + "respectively)"); + } else if (o->hosts().data().empty() && o->hostgroups().data().empty()) { + err.config_errors++; + throw msg_fmt( + "Service escalation is not attached to " + "any host or host group (properties 'host_name' or " + "'hostgroup_name', respectively)"); + } + } +} + +/** + * @brief Initializer of the Serviceescalation object, in other words set its + * default values. + */ +void serviceescalation_helper::_init() { + Serviceescalation* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); + obj->set_escalation_options(action_se_none); + obj->set_first_notification(-2); + obj->set_last_notification(-2); + obj->set_notification_interval(0); +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/serviceescalation_helper.hh b/common/engine_conf/serviceescalation_helper.hh new file mode 100644 index 00000000000..b161bd27c06 --- /dev/null +++ b/common/engine_conf/serviceescalation_helper.hh @@ -0,0 +1,42 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#ifndef CCE_CONFIGURATION_SERVICEESCALATION +#define CCE_CONFIGURATION_SERVICEESCALATION + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +size_t serviceescalation_key(const Serviceescalation& se); + +class serviceescalation_helper : public message_helper { + void _init(); + + public: + serviceescalation_helper(Serviceescalation* obj); + ~serviceescalation_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_SERVICEESCALATION */ diff --git a/common/engine_conf/servicegroup_helper.cc b/common/engine_conf/servicegroup_helper.cc new file mode 100644 index 00000000000..0ff2c23560f --- /dev/null +++ b/common/engine_conf/servicegroup_helper.cc @@ -0,0 +1,82 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/servicegroup_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" +#include "common/engine_conf/state.pb.h" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a Servicegroup object. + * + * @param obj The Servicegroup object on which this helper works. The helper is + * not the owner of this object. + */ +servicegroup_helper::servicegroup_helper(Servicegroup* obj) + : message_helper(object_type::servicegroup, + obj, + {}, + Servicegroup::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Servicegroup objects has a particular + * behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool servicegroup_helper::hook(std::string_view key, + const std::string_view& value) { + Servicegroup* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + if (key == "members") { + fill_pair_string_group(obj->mutable_members(), value); + return true; + } else if (key == "servicegroup_members") { + fill_string_group(obj->mutable_servicegroup_members(), value); + return true; + } + return false; +} + +/** + * @brief Check the validity of the Servicegroup object. + * + * @param err An error counter. + */ +void servicegroup_helper::check_validity(error_cnt& err) const { + const Servicegroup* o = static_cast(obj()); + + if (o->servicegroup_name().empty()) { + err.config_errors++; + throw msg_fmt("Service group has no name (property 'servicegroup_name')"); + } +} + +void servicegroup_helper::_init() { + Servicegroup* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/servicegroup_helper.hh b/common/engine_conf/servicegroup_helper.hh new file mode 100644 index 00000000000..39230a3b9a0 --- /dev/null +++ b/common/engine_conf/servicegroup_helper.hh @@ -0,0 +1,40 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#ifndef CCE_CONFIGURATION_SERVICEGROUP +#define CCE_CONFIGURATION_SERVICEGROUP + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +class servicegroup_helper : public message_helper { + void _init(); + + public: + servicegroup_helper(Servicegroup* obj); + ~servicegroup_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_SERVICEGROUP */ diff --git a/common/engine_conf/severity_helper.cc b/common/engine_conf/severity_helper.cc new file mode 100644 index 00000000000..086415045d0 --- /dev/null +++ b/common/engine_conf/severity_helper.cc @@ -0,0 +1,114 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/severity_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" +#include "common/engine_conf/state.pb.h" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a Severity object. + * + * @param obj The Severity object on which this helper works. The helper is not + * the owner of this object. + */ +severity_helper::severity_helper(Severity* obj) + : message_helper(object_type::severity, + obj, + { + {"severity_id", "id"}, + {"severity_level", "level"}, + {"severity_icon_id", "icon_id"}, + {"severity_type", "type"}, + }, + Severity::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Severity objects has a particular + * behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool severity_helper::hook(std::string_view key, + const std::string_view& value) { + Severity* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + + if (key == "id" || key == "severity_id") { + uint64_t id; + if (absl::SimpleAtoi(value, &id)) + obj->mutable_key()->set_id(id); + else + return false; + return true; + } else if (key == "type" || key == "severity_type") { + if (value == "host") + obj->mutable_key()->set_type(SeverityType::host); + else if (value == "service") + obj->mutable_key()->set_type(SeverityType::service); + else + return false; + return true; + } + return false; +} + +/** + * @brief Check the validity of the Severity object. + * + * @param err An error counter. + */ +void severity_helper::check_validity(error_cnt& err) const { + const Severity* o = static_cast(obj()); + + if (o->severity_name().empty()) + throw msg_fmt("Severity has no name (property 'severity_name')"); + if (o->key().id() == 0) { + err.config_errors++; + throw msg_fmt( + "Severity id must not be less than 1 (property 'severity_id')"); + } + if (o->level() == 0) { + err.config_errors++; + throw msg_fmt("Severity level must not be less than 1 (property 'level')"); + } + if (o->key().type() == SeverityType::none) { + err.config_errors++; + throw msg_fmt("Severity type must be one of 'service' or 'host'"); + } +} + +/** + * @brief Initializer of the Severity object, in other words set its default + * values. + */ +void severity_helper::_init() { + Severity* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); + obj->mutable_key()->set_id(0); + obj->mutable_key()->set_type(SeverityType::none); +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/severity_helper.hh b/common/engine_conf/severity_helper.hh new file mode 100644 index 00000000000..ef54828d091 --- /dev/null +++ b/common/engine_conf/severity_helper.hh @@ -0,0 +1,41 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#ifndef CCE_CONFIGURATION_SEVERITY +#define CCE_CONFIGURATION_SEVERITY + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +class severity_helper : public message_helper { + void _init(); + + public: + using key_type = std::pair; + severity_helper(Severity* obj); + ~severity_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_SEVERITY */ diff --git a/common/engine_conf/state.proto b/common/engine_conf/state.proto new file mode 100644 index 00000000000..a773c32c128 --- /dev/null +++ b/common/engine_conf/state.proto @@ -0,0 +1,770 @@ +/** + * Copyright 2022-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +syntax = "proto3"; + +package com.centreon.engine.configuration; + +enum DateType { + us = 0; // U.S. (MM-DD-YYYY HH:MM:SS) + euro = 1; // European (DD-MM-YYYY HH:MM:SS) + iso8601 = 2; // ISO8601 (YYYY-MM-DD HH:MM:SS) + strict_iso8601 = 3; // ISO8601 (YYYY-MM-DDTHH:MM:SS) +} + +message InterCheckDelay { + enum IcdType { + none = 0; // no inter-check delay + dumb = 1; // dumb delay of 1 second + smart = 2; // smart delay + user = 3; // user-specified delay + } + IcdType type = 1; + double user_value = 2; +} + +message InterleaveFactor { + enum IFType { + ilf_user = 0; // user-specified interleave factor + ilf_smart = 1; // smart interleave + } + IFType type = 1; + int32 user_value = 2; +} + +enum LogLevel { + off = 0; + critical = 1; + error = 2; + warning = 3; + info = 4; + debug = 5; + trace = 6; +} + +message State { + string cfg_main = 1; + repeated string cfg_file = 2; + repeated string resource_file = 3; + int32 instance_heartbeat_interval = 4; + bool check_service_freshness = 5; + bool enable_flap_detection = 6; + string rpc_listen_address = 7; + uint32 grpc_port = 8; + map users = 9; + repeated string cfg_dir = 10; + string state_retention_file = 11; + repeated string broker_module = 12; + string broker_module_directory = 13; + bool enable_macros_filter = 14; + StringSet macros_filter = 15; + + bool log_v2_enabled = 16; + bool log_legacy_enabled = 17; + bool use_syslog = 18; + string log_v2_logger = 19; + string log_file = 20; + string debug_file = 21; + uint64 debug_level = 22; + uint32 debug_verbosity = 23; + uint32 max_debug_file_size = 24; + bool log_pid = 25; + bool log_file_line = 26; + int32 log_flush_period = 27; + LogLevel log_level_checks = 28; + LogLevel log_level_commands = 29; + LogLevel log_level_comments = 30; + LogLevel log_level_config = 31; + LogLevel log_level_downtimes = 32; + LogLevel log_level_eventbroker = 33; + LogLevel log_level_events = 34; + LogLevel log_level_external_command = 35; + LogLevel log_level_functions = 36; + LogLevel log_level_macros = 37; + LogLevel log_level_notifications = 38; + LogLevel log_level_process = 39; + LogLevel log_level_runtime = 40; + LogLevel log_level_otl = 41; + string global_host_event_handler = 42; + string global_service_event_handler = 43; + string illegal_object_chars = 44; + string illegal_output_chars = 45; + uint32 interval_length = 46; + string ochp_command = 47; + string ocsp_command = 48; + string use_timezone = 49; + bool accept_passive_host_checks = 50; + bool accept_passive_service_checks = 51; + int32 additional_freshness_latency = 52; + uint32 cached_host_check_horizon = 53; + bool check_external_commands = 54; + bool check_host_freshness = 55; + uint32 check_reaper_interval = 56; + bool enable_event_handlers = 57; + bool enable_notifications = 58; + bool execute_host_checks = 59; + bool execute_service_checks = 60; + uint32 max_host_check_spread = 61; + uint32 max_service_check_spread = 62; + uint32 notification_timeout = 63; + bool obsess_over_hosts = 64; + bool obsess_over_services = 65; + bool process_performance_data = 66; + bool soft_state_dependencies = 67; + bool use_large_installation_tweaks = 68; + string admin_email = 69; + string admin_pager = 70; + bool allow_empty_hostgroup_assignment = 71; + string command_file = 72; + string status_file = 73; + string poller_name = 74; + uint32 poller_id = 75; + bool auto_reschedule_checks = 76; + uint32 auto_rescheduling_interval = 77; + uint32 auto_rescheduling_window = 78; + uint32 cached_service_check_horizon = 79; + bool check_orphaned_hosts = 80; + bool check_orphaned_services = 81; + int32 command_check_interval = 82; + bool command_check_interval_is_seconds = 83; + bool enable_environment_macros = 84; + uint32 event_broker_options = 85; + uint32 event_handler_timeout = 86; + int32 external_command_buffer_slots = 87; + float high_host_flap_threshold = 88; + float high_service_flap_threshold = 89; + int32 host_check_timeout = 90; + uint32 host_freshness_check_interval = 91; + uint32 service_freshness_check_interval = 92; + bool log_event_handlers = 93; + bool log_external_commands = 94; + bool log_notifications = 95; + bool log_passive_checks = 96; + bool log_host_retries = 97; + bool log_service_retries = 98; + uint32 max_log_file_size = 99; + float low_host_flap_threshold = 100; + float low_service_flap_threshold = 101; + uint32 max_parallel_service_checks = 102; + uint32 ochp_timeout = 103; + uint32 ocsp_timeout = 104; + int32 perfdata_timeout = 105; + uint32 retained_host_attribute_mask = 106; + uint32 retained_process_host_attribute_mask = 107; + uint32 retained_contact_host_attribute_mask = 108; + uint32 retained_contact_service_attribute_mask = 109; + bool retain_state_information = 110; + uint32 retention_scheduling_horizon = 111; + uint32 retention_update_interval = 112; + uint32 service_check_timeout = 113; + float sleep_time = 114; + uint32 status_update_interval = 115; + uint32 time_change_threshold = 116; + bool use_regexp_matches = 117; + bool use_retained_program_state = 118; + bool use_retained_scheduling_info = 119; + bool use_setpgid = 120; + bool use_true_regexp_matching = 121; + DateType date_format = 122; + InterCheckDelay host_inter_check_delay_method = 123; + InterCheckDelay service_inter_check_delay_method = 124; + InterleaveFactor service_interleave_factor_method = 125; + bool enable_predictive_host_dependency_checks = 126; + bool enable_predictive_service_dependency_checks = 127; + bool send_recovery_notifications_anyways = 128; + bool host_down_disable_service_checks = 129; + + repeated Command commands = 130; + repeated Connector connectors = 131; + repeated Contact contacts = 132; + repeated Contactgroup contactgroups = 133; + repeated Hostdependency hostdependencies = 134; + repeated Hostescalation hostescalations = 135; + repeated Hostgroup hostgroups = 136; + repeated Host hosts = 137; + repeated Servicedependency servicedependencies = 138; + repeated Serviceescalation serviceescalations = 139; + repeated Servicegroup servicegroups = 140; + repeated Service services = 141; + repeated Anomalydetection anomalydetections = 142; + repeated Timeperiod timeperiods = 143; + repeated Severity severities = 144; + repeated Tag tags = 145; + map user = 146; +} + +message Value { + oneof value { + bool value_b = 1; + int32 value_i32 = 2; + uint32 value_u32 = 3; + string value_str = 4; + Timerange value_tr = 5; + Daterange value_dr = 6; + Timeperiod value_tp = 7; + Connector value_cn = 8; + Command value_co = 9; + CustomVariable value_cv = 10; + Contact value_ct = 11; + } +} + +message Key { + oneof key { + int32 i32 = 1; + string str = 2; + } +} + +message Path { + repeated Key key = 1; +} + +message PathWithValue { + Path path = 1; + Value val = 2; +} + +message PathWithPair { + Path path = 1; + Value val1 = 2; + Value val2 = 3; +} + +message DiffState { + repeated PathWithValue to_add = 1; + repeated Path to_remove = 2; + repeated PathWithValue to_modify = 3; +} + +message CustomVariable { + string name = 1; + string value = 2; + bool is_sent = 3; + // bool modified = 4; +} + +enum HostStatus { + state_up = 0; + state_down = 1; + state_unreachable = 2; +} + +enum ServiceStatus { + state_ok = 0; + state_warning = 1; + state_critical = 2; + state_unknown = 3; +} + +enum SeverityType { + service = 0; + host = 1; + none = 2; +} + +message Object { + string name = 1; + bool register = 2; + repeated string use = 3; +} + +message Point2d { + int32 x = 1; + int32 y = 2; +} + +message Point3d { + double x = 1; + double y = 2; + double z = 3; +} + +message KeyType { + uint64 id = 1; + uint32 type = 2; +} + +message DaysArray { + repeated Timerange sunday = 1; + repeated Timerange monday = 2; + repeated Timerange tuesday = 3; + repeated Timerange wednesday = 4; + repeated Timerange thursday = 5; + repeated Timerange friday = 6; + repeated Timerange saturday = 7; +} + +message Timerange { + uint64 range_start = 1; + uint64 range_end = 2; +} + +message Daterange { + enum TypeRange { + calendar_date = 0; + month_date = 1; + month_day = 2; + month_week_day = 3; + week_day = 4; + none = 5; // Instead of -1 in original config + } + TypeRange type = 1; + int32 syear = 2; // Start year. + int32 smon = 3; // Start month. + // Start day of month (may 3rd, last day in feb). + int32 smday = 4; + int32 swday = 5; // Start day of week (thursday). + // Start weekday offset (3rd thursday, last monday in jan). + int32 swday_offset = 6; + int32 eyear = 7; + int32 emon = 8; + int32 emday = 9; + int32 ewday = 10; + int32 ewday_offset = 11; + int32 skip_interval = 12; + repeated Timerange timerange = 13; +} + +message ExceptionArray { + repeated Daterange calendar_date = 1; + repeated Daterange month_date = 2; + repeated Daterange month_day = 3; + repeated Daterange month_week_day = 4; + repeated Daterange week_day = 5; +} + +message PairStringSet { + message Pair { + string first = 1; + string second = 2; + } + repeated Pair data = 1; + bool additive = 2; +} + +message PairUint64_32 { + uint64 first = 1; + uint32 second = 2; +} + +enum DependencyKind { + unknown = 0; + notification_dependency = 1; + execution_dependency = 2; +} + +enum ActionServiceOn { + action_svc_none = 0; + action_svc_ok = 1; // (1 << 0) + action_svc_warning = 2; // (1 << 1) + action_svc_unknown = 4; // (1 << 2) + action_svc_critical = 8; // (1 << 3) + action_svc_flapping = 16; // (1 << 4) + action_svc_downtime = 32; // (1 << 5) +} + +enum ActionHostOn { + action_hst_none = 0; + action_hst_up = 1; // (1 << 0) + action_hst_down = 2; // (1 << 1) + action_hst_unreachable = 4; // (1 << 2) + action_hst_flapping = 8; // (1 << 3) + action_hst_downtime = 16; // (1 << 4) +} + +enum ActionHostEscalationOn { + action_he_none = 0; + action_he_down = 1; // (1 << 0) + action_he_unreachable = 2; // (1 << 1) + action_he_recovery = 4; // (1 << 2) +} + +enum ActionServiceEscalationOn { + action_se_none = 0; + action_se_unknown = 1; // (1 << 1) + action_se_warning = 2; // (1 << 2) + action_se_critical = 4; // (1 << 3) + action_se_pending = 8; // (1 << 4) + action_se_recovery = 16; // (1 << 5) +} + +enum ActionServiceDependencyOn { + action_sd_none = 0; + action_sd_ok = 1; // (1 << 0) + action_sd_unknown = 2; // (1 << 1) + action_sd_warning = 4; // (1 << 2) + action_sd_critical = 8; // (1 << 3) + action_sd_pending = 16; // (1 << 4) +} + +enum ActionHostDependencyOn { + action_hd_none = 0; + action_hd_up = 1; // (1 << 0) + action_hd_down = 2; // (1 << 1) + action_hd_unreachable = 4; // (1 << 2) + action_hd_pending = 8; // (1 << 3) +} + +enum TagType { + tag_servicegroup = 0; + tag_hostgroup = 1; + tag_servicecategory = 2; + tag_hostcategory = 3; + tag_none = 255; // in legacy configuration, this was -1 +} + +message StringList { + repeated string data = 1; + bool additive = 2; +} + +message StringSet { + repeated string data = 1; + bool additive = 2; +} + +message Anomalydetection { + Object obj = 1; + int32 acknowledgement_timeout = 2; // Optional - Default value: 0 + string action_url = 3; + bool status_change = 4; // Optional - Default value: false + bool checks_active = 5; // Optional - Default value: true + bool checks_passive = 6; // Optional - Default value: true + string metric_name = 7; + string thresholds_file = 8; + bool check_freshness = 9; // Optional - Default value: 0 + uint32 check_interval = 10; // Optional - Default value: 5 + StringSet contactgroups = 11; + StringSet contacts = 12; + repeated CustomVariable customvariables = 13; + string display_name = 14; + string event_handler = 15; + bool event_handler_enabled = 16; // Optional - Default value: true + uint32 first_notification_delay = 17; // Optional - Default value: 0 + bool flap_detection_enabled = 18; // Optional - Default value: true + uint32 flap_detection_options = + 19; // Optional - Default value: action_svc_ok | action_svc_warning + // |action_svc_unknown | action_svc_critical + uint32 freshness_threshold = 20; // Optional - Default value: 0 + uint32 high_flap_threshold = 21; // Optional - Default value: 0 + string host_name = 22; + string icon_image = 23; + string icon_image_alt = 24; + ServiceStatus initial_state = + 25; // - Default value: ServiceStatus::state_ok + bool is_volatile = 26; // Optional - Default value: false + uint32 low_flap_threshold = 27; // Optional - Default value: 0 + uint32 max_check_attempts = 28; // Optional - Default value: 3 + string notes = 29; + string notes_url = 30; + bool notifications_enabled = 31; // Optional - Default value: true + uint32 notification_interval = 32; // Optional - Default value: 0 + uint32 notification_options = + 33; // Optional - Default value: action_svc_ok | action_svc_warning + // |action_svc_critical | action_svc_unknown |action_svc_flapping | + // action_svc_downtime + optional string notification_period = 34; // Optional + bool obsess_over_service = 35; // Optional - Default value: true + bool process_perf_data = 36; // Optional - Default value: true + bool retain_nonstatus_information = 37; // Optional - Default value: true + bool retain_status_information = 38; // Optional - Default value: true + uint32 retry_interval = 39; // Optional - Default value: 1 + optional uint32 recovery_notification_delay = 40; // Optional + StringSet servicegroups = 41; + string service_description = 42; + uint64 host_id = 43; + uint64 service_id = 44; + uint64 internal_id = 45; + uint64 dependent_service_id = 46; + uint32 stalking_options = 47; // Optional - Default value: action_svc_none + optional string timezone = 48; // Optional + optional uint64 severity_id = 49; // Optional + optional uint64 icon_id = 50; // Optional + repeated PairUint64_32 tags = 51; + double sensitivity = 52; +} + +message Command { + Object obj = 1; + string command_line = 2; + string command_name = 3; + string connector = 4; +} + +message Connector { + Object obj = 1; + string connector_line = 2; + string connector_name = 3; +} + +message Contact { + Object obj = 1; + repeated string address = 2; + string alias = 3; + bool can_submit_commands = 4; // Optional - Default value: true + StringSet contactgroups = 5; + string contact_name = 6; + repeated CustomVariable customvariables = 7; + string email = 8; + bool host_notifications_enabled = 9; // Optional - Default value: true + StringList host_notification_commands = 10; + uint32 host_notification_options = + 11; // Optional - Default value: action_hst_none + string host_notification_period = 12; + bool retain_nonstatus_information = 13; // Optional - Default value: true + bool retain_status_information = 14; // Optional - Default value: true + string pager = 15; + StringList service_notification_commands = 16; + uint32 service_notification_options = + 17; // Optional - Default value: action_svc_none + string service_notification_period = 18; + bool service_notifications_enabled = 19; // Optional - Default value: true + optional string timezone = 20; // Optional +} + +message Contactgroup { + Object obj = 1; + string alias = 2; + StringSet contactgroup_members = 3; + string contactgroup_name = 4; + StringSet members = 5; +} + +message Host { + Object obj = 1; + optional int32 acknowledgement_timeout = 2; // Optional + string action_url = 3; + string address = 4; + string alias = 5; + bool checks_active = 6; // Optional - Default value: true + bool checks_passive = 7; // Optional - Default value: true + string check_command = 8; + bool check_freshness = 9; // Optional - Default value: false + uint32 check_interval = 10; // Optional - Default value: 5 + string check_period = 11; + StringSet contactgroups = 12; + StringSet contacts = 13; + optional Point2d coords_2d = 14; // Optional + optional Point3d coords_3d = 15; // Optional + repeated CustomVariable customvariables = 16; + string display_name = 17; + string event_handler = 18; + bool event_handler_enabled = 19; // Optional - Default value: true + uint32 first_notification_delay = 20; // Optional - Default value: 0 + bool flap_detection_enabled = 21; // Optional - Default value: true + uint32 flap_detection_options = + 22; // Optional - Default value: action_hst_up |action_hst_down + // |action_hst_unreachable + uint32 freshness_threshold = 23; // Optional - Default value: 0 + uint32 high_flap_threshold = 24; // Optional - Default value: 0 + StringSet hostgroups = 25; + uint64 host_id = 26; + string host_name = 27; + string icon_image = 28; + string icon_image_alt = 29; + HostStatus initial_state = 30; // - Default value: HostStatus::state_up + uint32 low_flap_threshold = 31; // Optional - Default value: 0 + uint32 max_check_attempts = 32; // Optional - Default value: 3 + string notes = 33; + string notes_url = 34; + bool notifications_enabled = 35; // Optional - Default value: true + uint32 notification_interval = 36; // Optional - Default value: 0 + uint32 notification_options = + 37; // Optional - Default value: action_hst_up | action_hst_down + // |action_hst_unreachable |action_hst_flapping |action_hst_downtime + string notification_period = 38; + bool obsess_over_host = 39; // Optional - Default value: true + StringSet parents = 40; + bool process_perf_data = 41; // Optional - Default value: true + bool retain_nonstatus_information = 42; // Optional - Default value: true + bool retain_status_information = 43; // Optional - Default value: true + uint32 retry_interval = 44; // Optional - Default value: 1 + optional uint32 recovery_notification_delay = 45; // Optional + uint32 stalking_options = 46; // Optional - Default value: action_hst_none + string statusmap_image = 47; + optional string timezone = 48; // Optional + string vrml_image = 49; + optional uint64 severity_id = 50; // Optional + optional uint64 icon_id = 51; // Optional + repeated PairUint64_32 tags = 52; +} + +message Hostdependency { + Object obj = 1; + string dependency_period = 2; + DependencyKind dependency_type = 3; + StringSet dependent_hostgroups = 4; + StringSet dependent_hosts = 5; + uint32 execution_failure_options = + 6; // Optional - Default value: action_hd_none + StringSet hostgroups = 7; + StringSet hosts = 8; + bool inherits_parent = 9; // Optional - Default value: false + uint32 notification_failure_options = + 10; // Optional - Default value: action_hd_none +} + +message Hostescalation { + Object obj = 1; + StringSet contactgroups = 2; + uint32 escalation_options = 3; // Optional - Default value: action_he_none + optional string escalation_period = 4; // Optional + uint32 first_notification = 5; // Optional - Default value: -2 + StringSet hostgroups = 6; + StringSet hosts = 7; + uint32 last_notification = 8; // Optional - Default value: -2 + uint32 notification_interval = 9; // Optional - Default value: 0 +} + +message Hostgroup { + Object obj = 1; + string action_url = 2; + string alias = 3; + uint32 hostgroup_id = 4; + string hostgroup_name = 5; + StringSet members = 6; + string notes = 7; + string notes_url = 8; +} + +message Service { + Object obj = 1; + int32 acknowledgement_timeout = 2; // Optional - Default value: 0 + string action_url = 3; + bool checks_active = 4; // Optional - Default value: true + bool checks_passive = 5; // Optional - Default value: true + string check_command = 6; + bool check_command_is_important = 7; + bool check_freshness = 8; // Optional - Default value: 0 + uint32 check_interval = 9; // Optional - Default value: 5 + string check_period = 10; + StringSet contactgroups = 11; + StringSet contacts = 12; + repeated CustomVariable customvariables = 13; + string display_name = 14; + string event_handler = 15; + bool event_handler_enabled = 16; // Optional - Default value: true + uint32 first_notification_delay = 17; // Optional - Default value: 0 + bool flap_detection_enabled = 18; // Optional - Default value: true + uint32 flap_detection_options = + 19; // Optional - Default value: action_svc_ok |action_svc_warning + // |action_svc_unknown |action_svc_critical + uint32 freshness_threshold = 20; // Optional - Default value: 0 + uint32 high_flap_threshold = 21; // Optional - Default value: 0 + string host_name = 22; + string icon_image = 23; + string icon_image_alt = 24; + ServiceStatus initial_state = + 25; // - Default value: ServiceStatus::state_ok + bool is_volatile = 26; // Optional - Default value: false + uint32 low_flap_threshold = 27; // Optional - Default value: 0 + uint32 max_check_attempts = 28; // Optional - Default value: 3 + string notes = 29; + string notes_url = 30; + bool notifications_enabled = 31; // Optional - Default value: true + uint32 notification_interval = 32; // Optional - Default value: 0 + uint32 notification_options = + 33; // Optional - Default value: action_svc_ok | action_svc_warning | + // action_svc_critical | action_svc_unknown |action_svc_flapping | + // action_svc_downtime + optional string notification_period = 34; // Optional + bool obsess_over_service = 35; // Optional - Default value: true + bool process_perf_data = 36; // Optional - Default value: true + bool retain_nonstatus_information = 37; // Optional - Default value: true + bool retain_status_information = 38; // Optional - Default value: true + uint32 retry_interval = 39; // Optional - Default value: 1 + optional uint32 recovery_notification_delay = 40; // Optional + StringSet servicegroups = 41; + string service_description = 42; + uint64 host_id = 43; + uint64 service_id = 44; + uint32 stalking_options = 45; // Optional - Default value: action_svc_none + optional string timezone = 46; // Optional + optional uint64 severity_id = 47; // Optional + optional uint64 icon_id = 48; // Optional + repeated PairUint64_32 tags = 49; +} + +message Servicedependency { + Object obj = 1; + string dependency_period = 2; + DependencyKind dependency_type = 3; + StringList dependent_hostgroups = 4; + StringList dependent_hosts = 5; + StringList dependent_servicegroups = 6; + StringList dependent_service_description = 7; + uint32 execution_failure_options = + 8; // Optional - Default value: action_sd_none + StringList hostgroups = 9; + StringList hosts = 10; + bool inherits_parent = 11; // Optional - Default value: false + uint32 notification_failure_options = + 12; // Optional - Default value: action_sd_none + StringList servicegroups = 13; + StringList service_description = 14; +} + +message Serviceescalation { + Object obj = 1; + StringSet contactgroups = 2; + uint32 escalation_options = 3; // Optional - Default value: action_se_none + optional string escalation_period = 4; // Optional + uint32 first_notification = 5; // Optional - Default value: -2 + StringList hostgroups = 6; + StringList hosts = 7; + uint32 last_notification = 8; // Optional - Default value: -2 + uint32 notification_interval = 9; // Optional - Default value: 0 + StringList servicegroups = 10; + StringList service_description = 11; +} + +message Servicegroup { + Object obj = 1; + string action_url = 2; + string alias = 3; + PairStringSet members = 4; + string notes = 5; + string notes_url = 6; + uint32 servicegroup_id = 7; + StringSet servicegroup_members = 8; + string servicegroup_name = 9; +} + +message Severity { + Object obj = 1; + KeyType key = 2; // - Default value: 0, -1 + uint32 level = 3; + uint64 icon_id = 4; + string severity_name = 5; +} + +message Tag { + Object obj = 1; + KeyType key = 2; // - Default value: 0, -1 + string tag_name = 3; +} + +message Timeperiod { + Object obj = 1; + string alias = 2; + ExceptionArray exceptions = 3; + StringSet exclude = 4; + string timeperiod_name = 5; + DaysArray timeranges = 6; +} diff --git a/common/engine_conf/state_helper.cc b/common/engine_conf/state_helper.cc new file mode 100644 index 00000000000..a48afd55e44 --- /dev/null +++ b/common/engine_conf/state_helper.cc @@ -0,0 +1,492 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/state_helper.hh" +#include +#include +#include +#include "com/centreon/engine/events/sched_info.hh" +#include "com/centreon/exceptions/msg_fmt.hh" + +using com::centreon::exceptions::msg_fmt; +using ::google::protobuf::Descriptor; +using ::google::protobuf::FieldDescriptor; +using ::google::protobuf::Reflection; + +extern sched_info scheduling_info; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a State object. + * + * @param obj The State object on which this helper works. The helper is not the + * owner of this object. + */ +state_helper::state_helper(State* obj) + : message_helper( + object_type::state, + obj, + { + {"check_for_orphaned_hosts", "check_orphaned_hosts"}, + {"check_for_orphaned_services", "check_orphaned_services"}, + {"check_result_reaper_frequency", "check_reaper_interval"}, + {"illegal_macro_output_chars", "illegal_output_chars"}, + {"illegal_object_name_chars", "illegal_object_chars"}, + {"max_concurrent_checks", "max_parallel_service_checks"}, + {"rpc_port", "grpc_port"}, + {"service_interleave_factor", "service_interleave_factor_method"}, + {"service_reaper_frequency", "check_reaper_interval"}, + {"use_agressive_host_checking", "use_aggressive_host_checking"}, + {"use_regexp_matching", "use_regexp_matches"}, + {"xcddefault_comment_file", "comment_file"}, + {"xdddefault_downtime_file", "downtime_file"}, + }, + State::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of State objects has a particular + * behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool state_helper::hook(std::string_view key, const std::string_view& value) { + State* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + + if (key.substr(0, 10) == "log_level_") { + if (value == "off" || value == "critical" || value == "error" || + value == "warning" || value == "info" || value == "debug" || + value == "trace") { + return set_global(key, value); + } else + throw msg_fmt( + "Log level '{}' has value '{}' but it cannot be a different string " + "than off, critical, error, warning, info, debug or trace", + key, value); + } else if (key == "date_format") { + if (value == "euro") + obj->set_date_format(DateType::euro); + else if (value == "iso8601") + obj->set_date_format(DateType::iso8601); + else if (value == "strict-iso8601") + obj->set_date_format(DateType::strict_iso8601); + else if (value == "us") + obj->set_date_format(DateType::us); + else + return false; + return true; + } else if (key == "host_inter_check_delay_method") { + if (value == "n") + obj->mutable_host_inter_check_delay_method()->set_type( + InterCheckDelay_IcdType_none); + else if (value == "d") + obj->mutable_host_inter_check_delay_method()->set_type( + InterCheckDelay_IcdType_dumb); + else if (value == "s") + obj->mutable_host_inter_check_delay_method()->set_type( + InterCheckDelay_IcdType_smart); + else { + obj->mutable_host_inter_check_delay_method()->set_type( + InterCheckDelay_IcdType_user); + double user_value; + if (!absl::SimpleAtod(value, &user_value) || user_value <= 0.0) + throw msg_fmt( + "Invalid value for host_inter_check_delay_method, must be one of " + "'n' (none), 'd' (dumb), 's' (smart) or a stricly positive value " + "({} provided)", + user_value); + obj->mutable_host_inter_check_delay_method()->set_user_value(user_value); + } + return true; + } else if (key == "service_inter_check_delay_method") { + if (value == "n") + obj->mutable_service_inter_check_delay_method()->set_type( + InterCheckDelay_IcdType_none); + else if (value == "d") + obj->mutable_service_inter_check_delay_method()->set_type( + InterCheckDelay_IcdType_dumb); + else if (value == "s") + obj->mutable_service_inter_check_delay_method()->set_type( + InterCheckDelay_IcdType_smart); + else { + obj->mutable_service_inter_check_delay_method()->set_type( + InterCheckDelay_IcdType_user); + double user_value; + if (!absl::SimpleAtod(value, &user_value) || user_value <= 0.0) + throw msg_fmt( + "Invalid value for service_inter_check_delay_method, must be one " + "of 'n' (none), 'd' (dumb), 's' (smart) or a stricly positive " + "value ({} provided)", + user_value); + obj->mutable_service_inter_check_delay_method()->set_user_value( + user_value); + } + return true; + } else if (key == "command_check_interval") { + std::string_view v; + if (value[value.size() - 1] == 's') { + obj->set_command_check_interval_is_seconds(true); + v = value.substr(0, value.size() - 1); + } else { + obj->set_command_check_interval_is_seconds(false); + v = value; + } + int32_t res; + if (absl::SimpleAtoi(v, &res)) { + obj->set_command_check_interval(res); + return true; + } else { + throw msg_fmt( + "command_check_interval is an integer representing a duration " + "between two consecutive external command checks. This number can be " + "a number of 'time units' or a number of seconds. For the latter, " + "you must append a 's' after the number: the current incorrect value " + "is: '{}'", + fmt::string_view(value.data(), value.size())); + return false; + } + } else if (key == "service_interleave_factor_method") { + if (value == "s") + obj->mutable_service_interleave_factor_method()->set_type( + InterleaveFactor_IFType_ilf_smart); + else { + obj->mutable_service_interleave_factor_method()->set_type( + InterleaveFactor_IFType_ilf_user); + int32_t res; + if (!absl::SimpleAtoi(value, &res) || res < 1) + res = 1; + obj->mutable_service_interleave_factor_method()->set_user_value(res); + } + return true; + } else if (key == "check_reaper_interval") { + int32_t res; + if (!absl::SimpleAtoi(value, &res) || res == 0) + throw msg_fmt( + "check_reaper_interval must be a strictly positive integer (current " + "value '{}'", + fmt::string_view(value.data(), value.size())); + else + obj->set_check_reaper_interval(res); + return true; + } else if (key == "event_broker_options") { + if (value != "-1") { + uint32_t res; + if (absl::SimpleAtoi(value, &res)) + obj->set_event_broker_options(res); + else + throw msg_fmt( + "event_broker_options must be a positive integer or '-1' and not " + "'{}'", + fmt::string_view(value.data(), value.size())); + } else + obj->set_event_broker_options(static_cast(-1)); + return true; + } + return false; +} + +/** + * @brief Initializer of the State object, in other words set its default + * values. + */ +void state_helper::_init() { + State* obj = static_cast(mut_obj()); + obj->set_accept_passive_host_checks(true); + obj->set_accept_passive_service_checks(true); + obj->set_additional_freshness_latency(15); + obj->set_admin_email(""); + obj->set_admin_pager(""); + obj->set_allow_empty_hostgroup_assignment(false); + obj->set_auto_reschedule_checks(false); + obj->set_auto_rescheduling_interval(30); + obj->set_auto_rescheduling_window(180); + obj->set_cached_host_check_horizon(15); + obj->set_cached_service_check_horizon(15); + obj->set_check_external_commands(true); + obj->set_check_host_freshness(false); + obj->set_check_orphaned_hosts(true); + obj->set_check_orphaned_services(true); + obj->set_check_reaper_interval(10); + obj->set_check_service_freshness(true); + obj->set_command_check_interval(-1); + obj->set_command_file(DEFAULT_COMMAND_FILE); + obj->set_date_format(DateType::us); + obj->set_debug_file(DEFAULT_DEBUG_FILE); + obj->set_debug_level(0); + obj->set_debug_verbosity(1); + obj->set_enable_environment_macros(false); + obj->set_enable_event_handlers(true); + obj->set_enable_flap_detection(false); + obj->set_enable_macros_filter(false); + obj->set_enable_notifications(true); + obj->set_enable_predictive_host_dependency_checks(true); + obj->set_enable_predictive_service_dependency_checks(true); + obj->set_event_broker_options(std::numeric_limits::max()); + obj->set_event_handler_timeout(30); + obj->set_execute_host_checks(true); + obj->set_execute_service_checks(true); + obj->set_external_command_buffer_slots(4096); + obj->set_global_host_event_handler(""); + obj->set_global_service_event_handler(""); + obj->set_high_host_flap_threshold(30.0); + obj->set_high_service_flap_threshold(30.0); + obj->set_host_check_timeout(30); + obj->set_host_freshness_check_interval(60); + obj->mutable_host_inter_check_delay_method()->set_type( + InterCheckDelay_IcdType_smart); + obj->set_illegal_object_chars(""); + obj->set_illegal_output_chars("`~$&|'\"<>"); + obj->set_interval_length(60); + obj->set_log_event_handlers(true); + obj->set_log_external_commands(true); + obj->set_log_file(DEFAULT_LOG_FILE); + obj->set_log_host_retries(false); + obj->set_log_notifications(true); + obj->set_log_passive_checks(true); + obj->set_log_pid(true); + obj->set_log_service_retries(false); + obj->set_low_host_flap_threshold(20.0); + obj->set_low_service_flap_threshold(20.0); + obj->set_max_debug_file_size(1000000); + obj->set_max_host_check_spread(5); + obj->set_max_log_file_size(0); + obj->set_max_parallel_service_checks(0); + obj->set_max_service_check_spread(5); + obj->set_notification_timeout(30); + obj->set_obsess_over_hosts(false); + obj->set_obsess_over_services(false); + obj->set_ochp_command(""); + obj->set_ochp_timeout(15); + obj->set_ocsp_command(""); + obj->set_ocsp_timeout(15); + obj->set_perfdata_timeout(5); + obj->set_poller_name("unknown"); + obj->set_rpc_listen_address("localhost"); + obj->set_process_performance_data(false); + obj->set_retained_contact_host_attribute_mask(0L); + obj->set_retained_contact_service_attribute_mask(0L); + obj->set_retained_host_attribute_mask(0L); + obj->set_retained_process_host_attribute_mask(0L); + obj->set_retain_state_information(true); + obj->set_retention_scheduling_horizon(900); + obj->set_retention_update_interval(60); + obj->set_service_check_timeout(60); + obj->set_service_freshness_check_interval(60); + obj->mutable_service_inter_check_delay_method()->set_type( + InterCheckDelay_IcdType_smart); + obj->mutable_service_interleave_factor_method()->set_type( + InterleaveFactor_IFType_ilf_smart); + obj->set_sleep_time(0.5); + obj->set_soft_state_dependencies(false); + obj->set_state_retention_file(DEFAULT_RETENTION_FILE); + obj->set_status_file(DEFAULT_STATUS_FILE); + obj->set_status_update_interval(60); + obj->set_time_change_threshold(900); + obj->set_use_large_installation_tweaks(false); + obj->set_instance_heartbeat_interval(30); + obj->set_use_regexp_matches(false); + obj->set_use_retained_program_state(true); + obj->set_use_retained_scheduling_info(false); + obj->set_use_setpgid(true); + obj->set_use_syslog(true); + obj->set_log_v2_enabled(true); + obj->set_log_legacy_enabled(true); + obj->set_log_v2_logger("file"); + obj->set_log_level_functions(LogLevel::error); + obj->set_log_level_config(LogLevel::info); + obj->set_log_level_events(LogLevel::info); + obj->set_log_level_checks(LogLevel::info); + obj->set_log_level_notifications(LogLevel::error); + obj->set_log_level_eventbroker(LogLevel::error); + obj->set_log_level_external_command(LogLevel::error); + obj->set_log_level_commands(LogLevel::error); + obj->set_log_level_downtimes(LogLevel::error); + obj->set_log_level_comments(LogLevel::error); + obj->set_log_level_macros(LogLevel::error); + obj->set_log_level_process(LogLevel::info); + obj->set_log_level_runtime(LogLevel::error); + obj->set_use_timezone(""); + obj->set_use_true_regexp_matching(false); +} + +/** + * @brief Given the helper to a State protobuf message (so we also have access + * to the message itself) and a key/value pair, this function searches the + * field key and applies to it the value. As we work here on the State message, + * that's the reason of the name of this function. + * + * @param helper The State helper. + * @param key The field name to look for. + * @param value The value to apply. + * + * @return True on success. + */ +bool state_helper::set_global(const std::string_view& key, + const std::string_view& value) { + State* msg = static_cast(mut_obj()); + const Descriptor* desc = msg->GetDescriptor(); + const FieldDescriptor* f; + const Reflection* refl; + + f = desc->FindFieldByName(std::string(key.data(), key.size())); + if (f == nullptr) { + auto it = correspondence().find(key); + if (it != correspondence().end()) + f = desc->FindFieldByName(it->second); + if (f == nullptr) + return false; + } + refl = msg->GetReflection(); + switch (f->type()) { + case FieldDescriptor::TYPE_BOOL: { + bool val; + if (absl::SimpleAtob(value, &val)) { + refl->SetBool(static_cast(msg), f, val); + return true; + } else + return false; + } break; + case FieldDescriptor::TYPE_INT32: { + int32_t val; + if (absl::SimpleAtoi(value, &val)) { + refl->SetInt32(static_cast(msg), f, val); + return true; + } else + return false; + } break; + case FieldDescriptor::TYPE_UINT32: { + uint32_t val; + if (absl::SimpleAtoi(value, &val)) { + refl->SetUInt32(static_cast(msg), f, val); + return true; + } else + return false; + } break; + case FieldDescriptor::TYPE_UINT64: { + uint64_t val; + if (absl::SimpleAtoi(value, &val)) { + refl->SetUInt64(static_cast(msg), f, val); + return true; + } else + return false; + } break; + case FieldDescriptor::TYPE_FLOAT: { + float val; + if (absl::SimpleAtof(value, &val)) { + refl->SetFloat(static_cast(msg), f, val); + return true; + } else + return false; + } break; + case FieldDescriptor::TYPE_STRING: + if (f->is_repeated()) { + refl->AddString(static_cast(msg), f, + std::string(value.data(), value.size())); + } else { + refl->SetString(static_cast(msg), f, + std::string(value.data(), value.size())); + } + return true; + case FieldDescriptor::TYPE_ENUM: { + auto* v = f->enum_type()->FindValueByName( + std::string(value.data(), value.size())); + if (v) + refl->SetEnumValue(msg, f, v->number()); + else + return false; + } break; + case FieldDescriptor::TYPE_MESSAGE: + if (!f->is_repeated()) { + Message* m = refl->MutableMessage(msg, f); + const Descriptor* d = m->GetDescriptor(); + + if (d && d->name() == "StringSet") { + StringSet* set = + static_cast(refl->MutableMessage(msg, f)); + fill_string_group(set, value); + return true; + } else if (d && d->name() == "StringList") { + StringList* lst = + static_cast(refl->MutableMessage(msg, f)); + fill_string_group(lst, value); + return true; + } + } + default: + return false; + } + return true; +} + +bool state_helper::apply_extended_conf( + const std::string& file_path, + const rapidjson::Document& json_doc, + const std::shared_ptr& logger) { + bool retval = true; + for (rapidjson::Value::ConstMemberIterator member_iter = + json_doc.MemberBegin(); + member_iter != json_doc.MemberEnd(); ++member_iter) { + const std::string_view field_name = member_iter->name.GetString(); + try { + switch (member_iter->value.GetType()) { + case rapidjson::Type::kNumberType: { + std::string value_str; + if (member_iter->value.IsInt64()) { + int64_t value = member_iter->value.GetInt64(); + value_str = fmt::to_string(value); + } else if (member_iter->value.IsDouble()) { + double value = member_iter->value.GetDouble(); + value_str = fmt::to_string(value); + } + set_global(field_name, value_str); + } break; + case rapidjson::Type::kStringType: { + const std::string_view field_value = member_iter->value.GetString(); + set_global(field_name, field_value); + } break; + case rapidjson::Type::kFalseType: + set_global(field_name, "false"); + break; + case rapidjson::Type::kTrueType: + set_global(field_name, "true"); + break; + case rapidjson::Type::kNullType: + set_global(field_name, ""); + break; + default: + logger->error( + "The field '{}' in the file '{}' can not be converted as a " + "string", + field_name, file_path); + retval = false; + } + } catch (const std::exception& e) { + logger->error( + "The field '{}' in the file '{}' can not be converted as a string", + field_name, file_path); + retval = false; + } + } + return retval; +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/state_helper.hh b/common/engine_conf/state_helper.hh new file mode 100644 index 00000000000..ae3a4a172ea --- /dev/null +++ b/common/engine_conf/state_helper.hh @@ -0,0 +1,45 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#ifndef CCE_CONFIGURATION_STATE +#define CCE_CONFIGURATION_STATE + +#include + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +class state_helper : public message_helper { + void _init(); + + public: + state_helper(State* obj); + ~state_helper() noexcept = default; + + bool hook(std::string_view key, const std::string_view& value) override; + bool apply_extended_conf(const std::string& file_path, + const rapidjson::Document& json_doc, + const std::shared_ptr& logger); + bool set_global(const std::string_view& key, const std::string_view& value); +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_STATE */ diff --git a/common/engine_conf/tag_helper.cc b/common/engine_conf/tag_helper.cc new file mode 100644 index 00000000000..bb5cfecc6d4 --- /dev/null +++ b/common/engine_conf/tag_helper.cc @@ -0,0 +1,110 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/tag_helper.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a Tag object. + * + * @param obj The Tag object on which this helper works. The helper is not the + * owner of this object. + */ +tag_helper::tag_helper(Tag* obj) + : message_helper(object_type::tag, + obj, + { + {"tag_id", "id"}, + {"tag_type", "type"}, + }, + Tag::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Tag objects has a particular + * behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool tag_helper::hook(std::string_view key, const std::string_view& value) { + Tag* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + + if (key == "id" || key == "tag_id") { + uint64_t id; + if (absl::SimpleAtoi(value, &id)) + obj->mutable_key()->set_id(id); + else + return false; + return true; + } else if (key == "type" || key == "tag_type") { + if (value == "hostcategory") + obj->mutable_key()->set_type(tag_hostcategory); + else if (value == "servicecategory") + obj->mutable_key()->set_type(tag_servicecategory); + else if (value == "hostgroup") + obj->mutable_key()->set_type(tag_hostgroup); + else if (value == "servicegroup") + obj->mutable_key()->set_type(tag_servicegroup); + else + return false; + return true; + } + return false; +} + +/** + * @brief Check the validity of the Tag object. + * + * @param err An error counter. + */ +void tag_helper::check_validity(error_cnt& err) const { + const Tag* o = static_cast(obj()); + + if (o->tag_name().empty()) { + ++err.config_errors; + throw msg_fmt("Tag has no name (property 'tag_name')"); + } + if (o->key().id() == 0) { + ++err.config_errors; + throw msg_fmt("Tag '{}' has a null id", o->tag_name()); + } + if (o->key().type() == static_cast(-1)) { + ++err.config_errors; + throw msg_fmt("Tag type must be specified"); + } +} + +/** + * @brief Initializer of the Tag object, in other words set its default values. + */ +void tag_helper::_init() { + Tag* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); + obj->mutable_key()->set_id(0); + obj->mutable_key()->set_type(-1); +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/tag_helper.hh b/common/engine_conf/tag_helper.hh new file mode 100644 index 00000000000..2b1e02fe93c --- /dev/null +++ b/common/engine_conf/tag_helper.hh @@ -0,0 +1,40 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#ifndef CCE_CONFIGURATION_TAG +#define CCE_CONFIGURATION_TAG + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +class tag_helper : public message_helper { + void _init(); + + public: + tag_helper(Tag* obj); + ~tag_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; +}; +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_TAG */ diff --git a/common/engine_conf/timeperiod_helper.cc b/common/engine_conf/timeperiod_helper.cc new file mode 100644 index 00000000000..63e77406b88 --- /dev/null +++ b/common/engine_conf/timeperiod_helper.cc @@ -0,0 +1,589 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include "common/engine_conf/timeperiod_helper.hh" + +#include + +#include "com/centreon/exceptions/msg_fmt.hh" + +using com::centreon::exceptions::msg_fmt; + +namespace com::centreon::engine::configuration { + +/** + * @brief Constructor from a Timeperiod object. + * + * @param obj The Timeperiod object on which this helper works. The helper is + * not the owner of this object. + */ +timeperiod_helper::timeperiod_helper(Timeperiod* obj) + : message_helper(object_type::timeperiod, + obj, + {}, + Timeperiod::descriptor()->field_count()) { + _init(); +} + +/** + * @brief For several keys, the parser of Timeperiod objects has a particular + * behavior. These behaviors are handled here. + * @param key The key to parse. + * @param value The value corresponding to the key + */ +bool timeperiod_helper::hook(std::string_view key, + const std::string_view& value) { + Timeperiod* obj = static_cast(mut_obj()); + /* Since we use key to get back the good key value, it is faster to give key + * by copy to the method. We avoid one key allocation... */ + key = validate_key(key); + auto get_timerange = [](const std::string_view& value, auto* day) -> bool { + auto arr = absl::StrSplit(value, ','); + for (auto& d : arr) { + std::pair v = absl::StrSplit(d, '-'); + Timerange tr; + std::pair p = + absl::StrSplit(v.first, ':'); + uint32_t h, m; + if (!absl::SimpleAtoi(p.first, &h) || !absl::SimpleAtoi(p.second, &m)) + return false; + tr.set_range_start(h * 3600 + m * 60); + p = absl::StrSplit(v.second, ':'); + if (!absl::SimpleAtoi(p.first, &h) || !absl::SimpleAtoi(p.second, &m)) + return false; + tr.set_range_end(h * 3600 + m * 60); + day->Add(std::move(tr)); + } + return true; + }; + + bool retval = false; + if (key == "exclude") { + fill_string_group(obj->mutable_exclude(), value); + return true; + } else { + if (key == "sunday") + retval = + get_timerange(value, obj->mutable_timeranges()->mutable_sunday()); + else if (key == "monday") + retval = + get_timerange(value, obj->mutable_timeranges()->mutable_monday()); + else if (key == "tuesday") + retval = + get_timerange(value, obj->mutable_timeranges()->mutable_tuesday()); + else if (key == "wednesday") + retval = + get_timerange(value, obj->mutable_timeranges()->mutable_wednesday()); + else if (key == "thursday") + retval = + get_timerange(value, obj->mutable_timeranges()->mutable_thursday()); + else if (key == "friday") + retval = + get_timerange(value, obj->mutable_timeranges()->mutable_friday()); + else if (key == "saturday") + retval = + get_timerange(value, obj->mutable_timeranges()->mutable_saturday()); + if (!retval) { + std::string line{absl::StrFormat("%s %s", key, value)}; + retval = _add_week_day(key, value) || _add_calendar_date(line) || + _add_other_date(line); + } + } + return retval; +} + +/** + * Add a week day. + * + * @param[in] key The week day. + * @param[in] value The range. + * + * @return True on success, otherwise false. + */ +bool timeperiod_helper::_add_week_day(std::string_view key, + std::string_view value) { + Timeperiod* obj = static_cast(mut_obj()); + unsigned int day_id; + if (!_get_day_id(key, day_id)) + return false; + + google::protobuf::RepeatedPtrField* d; + switch (day_id) { + case 0: + d = obj->mutable_timeranges()->mutable_sunday(); + break; + case 1: + d = obj->mutable_timeranges()->mutable_sunday(); + break; + case 2: + d = obj->mutable_timeranges()->mutable_sunday(); + break; + case 3: + d = obj->mutable_timeranges()->mutable_sunday(); + break; + case 4: + d = obj->mutable_timeranges()->mutable_sunday(); + break; + case 5: + d = obj->mutable_timeranges()->mutable_sunday(); + break; + case 6: + d = obj->mutable_timeranges()->mutable_sunday(); + break; + } + if (!_build_timeranges(value, *d)) + return false; + + return true; +} + +/** + * Add a calendar date. + * + * @param[in] line The line to parse. + * + * @return True on success, otherwise false. + */ +bool timeperiod_helper::_add_calendar_date(const std::string& line) { + int32_t ret = 0; + int32_t pos = 0; + bool fill_missing = false; + uint32_t month_start = 0; + uint32_t month_end = 0; + uint32_t month_day_start = 0; + uint32_t month_day_end = 0; + uint32_t year_start = 0; + uint32_t year_end = 0; + uint32_t skip_interval = 0; + + if ((ret = sscanf(line.c_str(), "%4u-%2u-%2u - %4u-%2u-%2u / %u %n", + &year_start, &month_start, &month_day_start, &year_end, + &month_end, &month_day_end, &skip_interval, &pos)) == 7) + fill_missing = false; + else if ((ret = sscanf(line.c_str(), "%4u-%2u-%2u - %4u-%2u-%2u %n", + &year_start, &month_start, &month_day_start, &year_end, + &month_end, &month_day_end, &pos)) == 6) + fill_missing = false; + else if ((ret = sscanf(line.c_str(), "%4u-%2u-%2u / %u %n", &year_start, + &month_start, &month_day_start, &skip_interval, + &pos)) == 4) + fill_missing = true; + else if ((ret = sscanf(line.c_str(), "%4u-%2u-%2u %n", &year_start, + &month_start, &month_day_start, &pos)) == 3) + fill_missing = true; + + if (ret) { + if (fill_missing) { + year_end = year_start; + month_end = month_start; + month_day_end = month_day_start; + } + + Timeperiod* obj = static_cast(mut_obj()); + auto* range = obj->mutable_exceptions()->add_calendar_date(); + range->set_type(Daterange_TypeRange_calendar_date); + if (!_build_timeranges(line.substr(pos), *range->mutable_timerange())) + return false; + + range->set_syear(year_start); + range->set_smon(month_start - 1); + range->set_smday(month_day_start); + range->set_eyear(year_end); + range->set_emon(month_end - 1); + range->set_emday(month_day_end); + range->set_skip_interval(skip_interval); + + return true; + } + return false; +} + +/** + * Build timerange from new line. + * + * @param[in] line The line to parse. + * @param[out] timeranges The list to fill. + * + * @return True on success, otherwise false. + */ +bool timeperiod_helper::_build_timeranges( + std::string_view line, + google::protobuf::RepeatedPtrField& timeranges) { + auto timeranges_str = absl::StrSplit(line, ','); + for (auto tr : timeranges_str) { + tr = absl::StripAsciiWhitespace(tr); + std::size_t pos(tr.find('-')); + if (pos == std::string::npos) + return false; + time_t start_time; + if (!_build_time_t(tr.substr(0, pos), start_time)) + return false; + time_t end_time; + if (!_build_time_t(tr.substr(pos + 1), end_time)) + return false; + Timerange* t = timeranges.Add(); + t->set_range_start(start_time); + t->set_range_end(end_time); + } + return true; +} + +/** + * Build time_t from timerange configuration. + * + * @param[in] time_str The time to parse (format 00:00-12:00). + * @param[out] ret The value to fill. + * + * @return True on success, otherwise false. + */ +bool timeperiod_helper::_build_time_t(std::string_view time_str, time_t& ret) { + std::size_t pos(time_str.find(':')); + if (pos == std::string::npos) + return false; + unsigned long hours; + if (!absl::SimpleAtoi(time_str.substr(0, pos), &hours)) + return false; + unsigned long minutes; + if (!absl::SimpleAtoi(time_str.substr(pos + 1), &minutes)) + return false; + ret = hours * 3600 + minutes * 60; + return true; +} + +/** + * @brief Check the validity of the Timeperiod object. + * + * @param err An error counter. + */ +void timeperiod_helper::check_validity(error_cnt& err) const { + const Timeperiod* o = static_cast(obj()); + + if (o->timeperiod_name().empty()) { + err.config_errors++; + throw msg_fmt("Time period has no name (property 'timeperiod_name')"); + } +} + +void timeperiod_helper::_init() { + Timeperiod* obj = static_cast(mut_obj()); + obj->mutable_obj()->set_register_(true); +} + +/** + * Add other date. + * + * @param[in] line The line to parse. + * + * @return True on success, otherwise false. + */ +bool timeperiod_helper::_add_other_date(const std::string& line) { + int pos = 0; + Daterange::TypeRange type = Daterange_TypeRange_none; + uint32_t month_start = 0; + uint32_t month_end = 0; + int32_t month_day_start = 0; + int32_t month_day_end = 0; + uint32_t skip_interval = 0; + uint32_t week_day_start = 0; + uint32_t week_day_end = 0; + int32_t week_day_start_offset = 0; + int32_t week_day_end_offset = 0; + char buffer[4][4096]; + + if (line.size() > 1024) + return false; + + if (sscanf(line.c_str(), "%[a-z] %d %[a-z] - %[a-z] %d %[a-z] / %u %n", + buffer[0], &week_day_start_offset, buffer[1], buffer[2], + &week_day_end_offset, buffer[3], &skip_interval, &pos) == 7) { + // wednesday 1 january - thursday 2 july / 3 + if (_get_day_id(buffer[0], week_day_start) && + _get_month_id(buffer[1], month_start) && + _get_day_id(buffer[2], week_day_end) && + _get_month_id(buffer[3], month_end)) + type = Daterange_TypeRange_month_week_day; + } else if (sscanf(line.c_str(), "%[a-z] %d - %[a-z] %d / %u %n", buffer[0], + &month_day_start, buffer[1], &month_day_end, &skip_interval, + &pos) == 5) { + // monday 2 - thursday 3 / 2 + if (_get_day_id(buffer[0], week_day_start) && + _get_day_id(buffer[1], week_day_end)) { + week_day_start_offset = month_day_start; + week_day_end_offset = month_day_end; + type = Daterange_TypeRange_week_day; + } + // february 1 - march 15 / 3 + else if (_get_month_id(buffer[0], month_start) && + _get_month_id(buffer[1], month_end)) + type = Daterange_TypeRange_month_date; + // day 4 - 6 / 2 + else if (!strcmp(buffer[0], "day") && !strcmp(buffer[1], "day")) + type = Daterange_TypeRange_month_day; + } else if (sscanf(line.c_str(), "%[a-z] %d - %d / %u %n", buffer[0], + &month_day_start, &month_day_end, &skip_interval, + &pos) == 4) { + // thursday 2 - 4 + if (_get_day_id(buffer[0], week_day_start)) { + week_day_start_offset = month_day_start; + week_day_end = week_day_start; + week_day_end_offset = month_day_end; + type = Daterange_TypeRange_week_day; + } + // february 3 - 5 + else if (_get_month_id(buffer[0], month_start)) { + month_end = month_start; + type = Daterange_TypeRange_month_date; + } + // day 1 - 4 + else if (!strcmp(buffer[0], "day")) + type = Daterange_TypeRange_month_day; + } else if (sscanf(line.c_str(), "%[a-z] %d %[a-z] - %[a-z] %d %[a-z] %n", + buffer[0], &week_day_start_offset, buffer[1], buffer[2], + &week_day_end_offset, buffer[3], &pos) == 6) { + // wednesday 1 january - thursday 2 july + if (_get_day_id(buffer[0], week_day_start) && + _get_month_id(buffer[1], month_start) && + _get_day_id(buffer[2], week_day_end) && + _get_month_id(buffer[3], month_end)) + type = Daterange_TypeRange_month_week_day; + } else if (sscanf(line.c_str(), "%[a-z] %d - %d %n", buffer[0], + &month_day_start, &month_day_end, &pos) == 3) { + // thursday 2 - 4 + if (_get_day_id(buffer[0], week_day_start)) { + week_day_start_offset = month_day_start; + week_day_end = week_day_start; + week_day_end_offset = month_day_end; + type = Daterange_TypeRange_week_day; + } + // february 3 - 5 + else if (_get_month_id(buffer[0], month_start)) { + month_end = month_start; + type = Daterange_TypeRange_month_date; + } + // day 1 - 4 + else if (!strcmp(buffer[0], "day")) + type = Daterange_TypeRange_month_day; + } else if (sscanf(line.c_str(), "%[a-z] %d - %[a-z] %d %n", buffer[0], + &month_day_start, buffer[1], &month_day_end, &pos) == 4) { + // monday 2 - thursday 3 + if (_get_day_id(buffer[0], week_day_start) && + _get_day_id(buffer[1], week_day_end)) { + week_day_start_offset = month_day_start; + week_day_end_offset = month_day_end; + type = Daterange_TypeRange_week_day; + } + // february 1 - march 15 + else if (_get_month_id(buffer[0], month_start) && + _get_month_id(buffer[1], month_end)) + type = Daterange_TypeRange_month_date; + // day 1 - day 5 + else if (!strcmp(buffer[0], "day") && !strcmp(buffer[1], "day")) + type = Daterange_TypeRange_month_day; + } else if (sscanf(line.c_str(), "%[a-z] %d %[a-z] %n", buffer[0], + &week_day_start_offset, buffer[1], &pos) == 3) { + // thursday 3 february + if (_get_day_id(buffer[0], week_day_start) && + _get_month_id(buffer[1], month_start)) { + month_end = month_start; + week_day_end = week_day_start; + week_day_end_offset = week_day_start_offset; + type = Daterange_TypeRange_month_week_day; + } + } else if (sscanf(line.c_str(), "%[a-z] %d %n", buffer[0], &month_day_start, + &pos) == 2) { + // thursday 2 + if (_get_day_id(buffer[0], week_day_start)) { + week_day_start_offset = month_day_start; + week_day_end = week_day_start; + week_day_end_offset = week_day_start_offset; + type = Daterange_TypeRange_week_day; + } + // february 3 + else if (_get_month_id(buffer[0], month_start)) { + month_end = month_start; + month_day_end = month_day_start; + type = Daterange_TypeRange_month_date; + } + // day 1 + else if (!strcmp(buffer[0], "day")) { + month_day_end = month_day_start; + type = Daterange_TypeRange_month_day; + } + } + + if (type != Daterange_TypeRange_none) { + Timeperiod* obj = static_cast(mut_obj()); + Daterange* range = nullptr; + switch (type) { + case Daterange_TypeRange_month_day: { + range = obj->mutable_exceptions()->add_month_day(); + range->set_type(type); + range->set_smday(month_day_start); + range->set_emday(month_day_end); + } break; + case Daterange_TypeRange_month_week_day: { + range = obj->mutable_exceptions()->add_month_week_day(); + range->set_type(type); + range->set_smon(month_start); + range->set_swday(week_day_start); + range->set_swday_offset(week_day_start_offset); + range->set_emon(month_end); + range->set_ewday(week_day_end); + range->set_ewday_offset(week_day_end_offset); + } break; + case Daterange_TypeRange_week_day: { + range = obj->mutable_exceptions()->add_week_day(); + range->set_type(type); + range->set_swday(week_day_start); + range->set_swday_offset(week_day_start_offset); + range->set_ewday(week_day_end); + range->set_ewday_offset(week_day_end_offset); + } break; + case Daterange_TypeRange_month_date: { + range = obj->mutable_exceptions()->add_month_date(); + range->set_type(type); + range->set_smon(month_start); + range->set_smday(month_day_start); + range->set_emon(month_end); + range->set_emday(month_day_end); + } break; + default: + return false; + } + range->set_skip_interval(skip_interval); + + if (!_build_timeranges(line.substr(pos), *range->mutable_timerange())) + return false; + + return true; + } + return false; +} + +/** + * Get the week day id. + * + * @param[in] name The week day name. + * @param[out] id The id to fill. + * + * @return True on success, otherwise false. + */ +bool timeperiod_helper::_get_day_id(std::string_view name, uint32_t& id) { + static const absl::flat_hash_map days = { + {"sunday", 0}, {"monday", 1}, {"tuesday", 2}, {"wednesday", 3}, + {"thursday", 4}, {"friday", 5}, {"saturday", 6}, + }; + auto found = days.find(name); + if (found != days.end()) { + id = found->second; + return true; + } else + return false; +} + +/** + * Get the month id. + * + * @param[in] name The month name. + * @param[out] id The id to fill. + * + * @return True on success, otherwise false. + */ +bool timeperiod_helper::_get_month_id(std::string_view name, uint32_t& id) { + static const absl::flat_hash_map months = { + {"january", 0}, {"february", 1}, {"march", 2}, {"april", 3}, + {"may", 4}, {"june", 5}, {"july", 6}, {"august", 7}, + {"september", 8}, {"october", 9}, {"november", 10}, {"december", 11}, + }; + auto found = months.find(name); + if (found != months.end()) { + id = found->second; + return true; + } else + return false; +} + +std::string daterange_to_str(const Daterange& dr) { + static const std::array days{ + "sunday", "monday", "tuesday", "wednesday", + "thursday", "friday", "saturday"}; + static const std::array months{ + "january", "february", "march", "april", "may", "june", + "july", "august", "september", "october", "november", "december"}; + std::string retval; + switch (dr.type()) { + case Daterange_TypeRange_calendar_date: { + std::string retval = fmt::format("{:02}-{:02}-{:02}", dr.syear(), + dr.smon() + 1, dr.smday()); + if (dr.syear() != dr.eyear() || dr.smon() != dr.emon() || + dr.smday() != dr.emday()) + retval = fmt::format("{} - {:02}-{:02}-{:02} / {}", retval, dr.eyear(), + dr.emon() + 1, dr.emday(), dr.skip_interval()); + } break; + case Daterange_TypeRange_month_date: { + retval = fmt::format("{} {}", months[dr.smon()], dr.smday()); + if (dr.smon() != dr.emon()) + retval = + fmt::format("{} - {} {}", retval, months[dr.emon()], dr.emday()); + else if (dr.smday() != dr.emday()) + retval = fmt::format("{} - {}", retval, dr.emday()); + if (dr.skip_interval()) + retval = fmt::format("{} / {}", retval, dr.skip_interval()); + } break; + case Daterange_TypeRange_month_day: { + retval = fmt::format("day {}", dr.smday()); + if (dr.smday() != dr.emday()) + retval = fmt::format("{} - {}", retval, dr.emday()); + if (dr.skip_interval()) + retval = fmt::format("{} / {}", retval, dr.skip_interval()); + } break; + case Daterange_TypeRange_month_week_day: { + retval = fmt::format("{} {} {}", days[dr.swday()], dr.swday_offset(), + months[dr.smon()]); + if (dr.swday() != dr.ewday() || dr.swday_offset() != dr.ewday_offset() || + dr.smon() != dr.emon()) + retval = fmt::format("{} - {} {} {}", retval, days[dr.ewday()], + dr.ewday_offset(), months[dr.emon()]); + if (dr.skip_interval()) + retval = fmt::format("{} / {}", dr.skip_interval()); + } break; + case Daterange_TypeRange_week_day: { + retval = fmt::format("{} {}", days[dr.swday()], dr.swday_offset()); + if (dr.swday() != dr.ewday() || dr.swday_offset() != dr.ewday_offset()) + retval = fmt::format("{} - {} {}", retval, days[dr.ewday()], + dr.ewday_offset()); + if (dr.skip_interval()) + retval = fmt::format("{} / {}", retval, dr.skip_interval()); + } break; + default: + assert("should not arrive" == nullptr); + } + std::vector timeranges_str; + for (auto& t : dr.timerange()) { + uint32_t start_hours(t.range_start() / 3600); + uint32_t start_minutes((t.range_start() % 3600) / 60); + uint32_t end_hours(t.range_end() / 3600); + uint32_t end_minutes((t.range_end() % 3600) / 60); + timeranges_str.emplace_back(fmt::format("{:02}:{:02}-{:02}:{:02}", + start_hours, start_minutes, + end_hours, end_minutes)); + } + retval = fmt::format("{} {}", retval, fmt::join(timeranges_str, ", ")); + return retval; +} +} // namespace com::centreon::engine::configuration diff --git a/common/engine_conf/timeperiod_helper.hh b/common/engine_conf/timeperiod_helper.hh new file mode 100644 index 00000000000..e56c999bbb9 --- /dev/null +++ b/common/engine_conf/timeperiod_helper.hh @@ -0,0 +1,52 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#ifndef CCE_CONFIGURATION_TIMEPERIOD +#define CCE_CONFIGURATION_TIMEPERIOD + +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/state.pb.h" + +namespace com::centreon::engine::configuration { + +class timeperiod_helper : public message_helper { + void _init(); + bool _add_week_day(std::string_view key, std::string_view value); + bool _add_calendar_date(const std::string& line); + bool _add_other_date(const std::string& line); + bool _build_timeranges( + std::string_view line, + google::protobuf::RepeatedPtrField& timeranges); + bool _build_time_t(std::string_view time_str, time_t& ret); + bool _get_day_id(std::string_view name, uint32_t& id); + bool _get_month_id(std::string_view name, uint32_t& id); + + public: + timeperiod_helper(Timeperiod* obj); + ~timeperiod_helper() noexcept = default; + void check_validity(error_cnt& err) const override; + + bool hook(std::string_view key, const std::string_view& value) override; +}; + +std::string daterange_to_str(const Daterange& dr); + +} // namespace com::centreon::engine::configuration + +#endif /* !CCE_CONFIGURATION_TIMEPERIOD */ diff --git a/common/engine_legacy_conf/CMakeLists.txt b/common/engine_legacy_conf/CMakeLists.txt index 843412b0fcb..f7fb36ea749 100644 --- a/common/engine_legacy_conf/CMakeLists.txt +++ b/common/engine_legacy_conf/CMakeLists.txt @@ -16,37 +16,39 @@ # For more information : contact@centreon.com # -add_library( - engine_legacy_conf STATIC - anomalydetection.cc - command.cc - connector.cc - contact.cc - contactgroup.cc - customvariable.cc - daterange.cc - group.cc - host.cc - hostdependency.cc - hostescalation.cc - hostgroup.cc - object.cc - parser.cc - point_2d.cc - point_3d.cc - service.cc - servicedependency.cc - serviceescalation.cc - servicegroup.cc - severity.cc - state.cc - tag.cc - timeperiod.cc -) +if(LEGACY_ENGINE) + add_library( + engine_legacy_conf STATIC + anomalydetection.cc + command.cc + connector.cc + contact.cc + contactgroup.cc + customvariable.cc + daterange.cc + group.cc + host.cc + hostdependency.cc + hostescalation.cc + hostgroup.cc + object.cc + parser.cc + point_2d.cc + point_3d.cc + service.cc + servicedependency.cc + serviceescalation.cc + servicegroup.cc + severity.cc + state.cc + tag.cc + timeperiod.cc) -add_dependencies(engine_legacy_conf pb_neb_lib) -include_directories(${CMAKE_SOURCE_DIR}/common/inc) + add_dependencies(engine_legacy_conf pb_neb_lib pb_bam_lib) + include_directories(${CMAKE_SOURCE_DIR}/common/inc) -target_precompile_headers(engine_legacy_conf PRIVATE ${CMAKE_SOURCE_DIR}/common/precomp_inc/precomp.hh) -target_link_libraries(engine_legacy_conf log_v2 absl::any absl::log absl::base - absl::bits) + target_precompile_headers(engine_legacy_conf PRIVATE + ${CMAKE_SOURCE_DIR}/common/precomp_inc/precomp.hh) + target_link_libraries(engine_legacy_conf log_v2 absl::any absl::log + absl::base absl::bits) +endif() diff --git a/common/engine_legacy_conf/host.hh b/common/engine_legacy_conf/host.hh index 0c1257443bf..1c5d61b0632 100644 --- a/common/engine_legacy_conf/host.hh +++ b/common/engine_legacy_conf/host.hh @@ -28,9 +28,7 @@ using com::centreon::common::opt; -namespace com::centreon::engine { - -namespace configuration { +namespace com::centreon::engine::configuration { class host : public object { public: @@ -223,11 +221,9 @@ class host : public object { std::set> _tags; }; -typedef std::shared_ptr host_ptr; +using host_ptr = std::shared_ptr; typedef std::list list_host; using set_host = std::set; -} // namespace configuration - -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_HOST_HH diff --git a/common/engine_legacy_conf/object.hh b/common/engine_legacy_conf/object.hh index db155f96e87..1cd5b4903fe 100644 --- a/common/engine_legacy_conf/object.hh +++ b/common/engine_legacy_conf/object.hh @@ -23,6 +23,10 @@ typedef std::list list_string; typedef std::set set_string; +#ifndef LEGACY_CONF +#error This file should not be included. +#endif + namespace com::centreon::engine { namespace configuration { diff --git a/common/engine_legacy_conf/parser.hh b/common/engine_legacy_conf/parser.hh index 4d1f2d424b9..44c095534ce 100644 --- a/common/engine_legacy_conf/parser.hh +++ b/common/engine_legacy_conf/parser.hh @@ -22,10 +22,9 @@ #include #include "file_info.hh" #include "state.hh" +#include "host.hh" -namespace com::centreon::engine { - -namespace configuration { +namespace com::centreon::engine::configuration { class parser { std::shared_ptr _logger; @@ -63,7 +62,8 @@ class parser { void _apply(std::list const& lst, void (parser::*pfunc)(std::string const&)); file_info const& _get_file_info(object* obj) const; - void _get_hosts_by_hostgroups(hostgroup const& hostgroups, list_host& hosts); + void _get_hosts_by_hostgroups(const hostgroup& hostgroups, + list_host& hosts); void _get_hosts_by_hostgroups_name(set_string const& lst_group, list_host& hosts); template @@ -95,8 +95,6 @@ class parser { static store _store[]; std::array _templates; }; -} // namespace configuration - -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_PARSER_HH diff --git a/common/grpc/CMakeLists.txt b/common/grpc/CMakeLists.txt index ac154d422f4..e2371114e20 100644 --- a/common/grpc/CMakeLists.txt +++ b/common/grpc/CMakeLists.txt @@ -33,5 +33,4 @@ target_include_directories(centreon_grpc PRIVATE ${INC_DIR}) target_precompile_headers(centreon_grpc REUSE_FROM centreon_common) -set_target_properties(centreon_grpc PROPERTIES COMPILE_FLAGS "-fPIC") - +set_target_properties(centreon_grpc PROPERTIES POSITION_INDEPENDENT_CODE ON) diff --git a/common/http/src/http_connection.cc b/common/http/src/http_connection.cc index f17ea0b5904..a964a78c730 100644 --- a/common/http/src/http_connection.cc +++ b/common/http/src/http_connection.cc @@ -84,7 +84,7 @@ void connection_base::gest_keepalive(const response_ptr& resp) { if (std::regex_search(keep_alive_info->value().begin(), keep_alive_info->value().end(), res, keep_alive_time_out_r)) { - uint second_duration; + unsigned int second_duration; if (absl::SimpleAtoi(res[1].str(), &second_duration)) { _keep_alive_end = system_clock::now() + std::chrono::seconds(second_duration); diff --git a/common/inc/com/centreon/common/utf8.hh b/common/inc/com/centreon/common/utf8.hh new file mode 100644 index 00000000000..a74dd6321cd --- /dev/null +++ b/common/inc/com/centreon/common/utf8.hh @@ -0,0 +1,49 @@ +/** + * Copyright 2023 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCCM_UTF8_HH +#define CCCM_UTF8_HH + +namespace com::centreon::common { + +/** + * @brief This function works almost like the resize method but takes care + * of the UTF-8 encoding and avoids to cut a string in the middle of a + * character. This function assumes the string to be UTF-8 encoded. + * + * @param str A string to truncate. + * @param s The desired size, maybe the resulting string will contain less + * characters. + * + * @return a reference to the string str. + */ +template +fmt::string_view truncate_utf8(const T& str, size_t s) { + if (s >= str.size()) + return fmt::string_view(str); + if (s > 0) + while ((str[s] & 0xc0) == 0x80) + s--; + return fmt::string_view(str.data(), s); +} + +std::string check_string_utf8(const std::string_view& str) noexcept; +size_t adjust_size_utf8(const std::string& str, size_t s); +} // namespace com::centreon::common + +#endif diff --git a/common/log_v2/centreon_file_sink.hh b/common/log_v2/centreon_file_sink.hh new file mode 100644 index 00000000000..d8a1693af9d --- /dev/null +++ b/common/log_v2/centreon_file_sink.hh @@ -0,0 +1,102 @@ +/** + * Copyright(c) 2015-present, Gabi Melman & spdlog contributors. + * Distributed under the MIT License (http://opensource.org/licenses/MIT) + * + * This file is copied from basic_file_sink{-inl.h,.h} + * The goal here is just to add a method `reopen()` using the file_helper mutex. + */ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace spdlog { +namespace sinks { +/* + * Trivial file sink with single file as target + */ +template +class centreon_file_sink final : public base_sink { + public: + explicit centreon_file_sink(const filename_t& filename, + bool truncate = false, + const file_event_handlers& event_handlers = {}); + const filename_t& filename() const; + void reopen(); + + protected: + void sink_it_(const details::log_msg& msg) override; + void flush_() override; + + private: + details::file_helper file_helper_; +}; + +using centreon_file_sink_mt = centreon_file_sink; +using centreon_file_sink_st = centreon_file_sink; + +template +SPDLOG_INLINE centreon_file_sink::centreon_file_sink( + const filename_t& filename, + bool truncate, + const file_event_handlers& event_handlers) + : file_helper_{event_handlers} { + file_helper_.open(filename, truncate); +} + +template +SPDLOG_INLINE const filename_t& centreon_file_sink::filename() const { + return file_helper_.filename(); +} + +template +SPDLOG_INLINE void centreon_file_sink::reopen() { + std::lock_guard lock(base_sink::mutex_); + file_helper_.reopen(false); +} + +template +SPDLOG_INLINE void centreon_file_sink::sink_it_( + const details::log_msg& msg) { + memory_buf_t formatted; + base_sink::formatter_->format(msg, formatted); + file_helper_.write(formatted); +} + +template +SPDLOG_INLINE void centreon_file_sink::flush_() { + file_helper_.flush(); +} +} // namespace sinks + +// +// factory functions +// +template +inline std::shared_ptr basic_logger_mt( + const std::string& logger_name, + const filename_t& filename, + bool truncate = false, + const file_event_handlers& event_handlers = {}) { + return Factory::template create( + logger_name, filename, truncate, event_handlers); +} + +template +inline std::shared_ptr basic_logger_st( + const std::string& logger_name, + const filename_t& filename, + bool truncate = false, + const file_event_handlers& event_handlers = {}) { + return Factory::template create( + logger_name, filename, truncate, event_handlers); +} + +} // namespace spdlog diff --git a/common/log_v2/log_v2.cc b/common/log_v2/log_v2.cc index 94e857bc736..086e8ac0963 100644 --- a/common/log_v2/log_v2.cc +++ b/common/log_v2/log_v2.cc @@ -21,12 +21,12 @@ #include #include #include -#include #include #include #include #include #include +#include "centreon_file_sink.hh" #include #include @@ -37,7 +37,7 @@ using namespace spdlog; log_v2* log_v2::_instance = nullptr; -const std::array log_v2::_logger_name = { +constexpr std::array logger_name{ "core", "config", "bam", @@ -67,7 +67,7 @@ const std::array log_v2::_logger_name = { "comments", "macros", "runtime", - "otel"}; + "otl"}; /** * @brief this function is passed to grpc in order to log grpc layer's events to @@ -200,8 +200,8 @@ void log_v2::set_flush_interval(uint32_t second_flush_interval) { */ log_v2::logger_id log_v2::get_id(const std::string& name) const noexcept { uint32_t retval; - for (retval = 0; retval < _logger_name.size(); retval++) { - if (_logger_name[retval] == name) + for (retval = 0; retval < logger_name.size(); retval++) { + if (logger_name[retval] == name) return static_cast(retval); } return LOGGER_SIZE; @@ -227,7 +227,7 @@ void log_v2::create_loggers(config::logger_type typ, size_t length) { my_sink = std::make_shared( _file_path, _current_max_size, 99); else - my_sink = std::make_shared(_file_path); + my_sink = std::make_shared(_file_path); } break; case config::logger_type::LOGGER_SYSLOG: my_sink = std::make_shared(_log_name, 0, 0, true); @@ -239,7 +239,8 @@ void log_v2::create_loggers(config::logger_type typ, size_t length) { for (int32_t id = 0; id < LOGGER_SIZE; id++) { std::shared_ptr logger; - logger = std::make_shared(_logger_name[id], my_sink); + logger = std::make_shared( + std::string(logger_name[id].data(), logger_name[id].size()), my_sink); if (_log_pid) { if (_log_source) logger->set_pattern( @@ -252,7 +253,10 @@ void log_v2::create_loggers(config::logger_type typ, size_t length) { else logger->set_pattern("[%Y-%m-%dT%H:%M:%S.%e%z] [%n] [%l] %v"); } - logger->set_level(level::level_enum::info); + if (id > 1) + logger->set_level(level::level_enum::err); + else + logger->set_level(level::level_enum::info); spdlog::register_logger(logger); _loggers[id] = std::move(logger); @@ -299,7 +303,7 @@ void log_v2::apply(const config& log_conf) { my_sink = std::make_shared( _file_path, log_conf.max_size(), 99); else - my_sink = std::make_shared(_file_path); + my_sink = std::make_shared(_file_path); } break; case config::logger_type::LOGGER_SYSLOG: my_sink = @@ -314,7 +318,7 @@ void log_v2::apply(const config& log_conf) { std::vector sinks; /* Little hack to include the broker sink to engine loggers. */ - auto& name = _logger_name[id]; + auto& name = logger_name[id]; if (log_conf.loggers_with_custom_sinks().contains(name)) sinks = log_conf.custom_sinks(); @@ -359,7 +363,7 @@ void log_v2::apply(const config& log_conf) { spdlog::flush_every(_flush_interval); /* This is for all loggers, a slave will overwrite the master configuration */ for (int32_t id = 0; id < LOGGER_SIZE; id++) { - auto& name = _logger_name[id]; + auto& name = logger_name[id]; if (log_conf.loggers().contains(name)) { auto logger = _loggers[id]; level::level_enum lvl = level::from_str(log_conf.loggers().at(name)); @@ -370,6 +374,13 @@ void log_v2::apply(const config& log_conf) { logger->flush_on(lvl); } } + + for (auto& s : _loggers[0]->sinks()) { + spdlog::sinks::centreon_file_sink_mt* file_sink = + dynamic_cast(s.get()); + if (file_sink) + file_sink->reopen(); + } } /** @@ -380,10 +391,10 @@ void log_v2::apply(const config& log_conf) { * @return a boolean. */ bool log_v2::contains_logger(std::string_view logger) const { - absl::flat_hash_set loggers; - for (auto& n : _logger_name) - loggers.insert(n); - return loggers.contains(logger); + for (auto& n : logger_name) + if (n == logger) + return true; + return false; } /** diff --git a/common/log_v2/log_v2.hh b/common/log_v2/log_v2.hh index efcec015be5..adfe70d1333 100644 --- a/common/log_v2/log_v2.hh +++ b/common/log_v2/log_v2.hh @@ -82,7 +82,7 @@ class log_v2 { COMMENTS = 26, MACROS = 27, RUNTIME = 28, - OTEL = 29, + OTL = 29, LOGGER_SIZE }; @@ -91,9 +91,7 @@ class log_v2 { std::string _log_name; std::chrono::seconds _flush_interval; std::string _file_path; - const static std::array _logger_name; std::array, LOGGER_SIZE> _loggers; - std::atomic _current_log_type; size_t _current_max_size = 0U; bool _log_pid = false; bool _log_source = false; diff --git a/common/precomp_inc/precomp.hh b/common/precomp_inc/precomp.hh index d7b064ec501..227f2533caa 100644 --- a/common/precomp_inc/precomp.hh +++ b/common/precomp_inc/precomp.hh @@ -47,8 +47,10 @@ #include #include +#ifndef _WINDOWS #include #include +#endif #include #include "com/centreon/exceptions/msg_fmt.hh" diff --git a/common/process/CMakeLists.txt b/common/process/CMakeLists.txt index cf33af66177..f79bbaaa657 100644 --- a/common/process/CMakeLists.txt +++ b/common/process/CMakeLists.txt @@ -16,14 +16,14 @@ # For more information : contact@centreon.com # -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(${PROJECT_SOURCE_DIR}/process/inc) add_definitions(-DSPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_TRACE) add_definitions(${spdlog_DEFINITIONS}) add_library( centreon_process STATIC # Sources. - process.cc) + src/process.cc) target_precompile_headers(centreon_process REUSE_FROM centreon_common) diff --git a/common/process/inc/com/centreon/common/process/detail/centreon_posix_process_launcher.hh b/common/process/inc/com/centreon/common/process/detail/centreon_posix_process_launcher.hh new file mode 100644 index 00000000000..79dc1eac355 --- /dev/null +++ b/common/process/inc/com/centreon/common/process/detail/centreon_posix_process_launcher.hh @@ -0,0 +1,275 @@ +#ifndef CENTREON_POSIX_PROCESS_LAUNCHER_HH +#define CENTREON_POSIX_PROCESS_LAUNCHER_HH + +#include +#include + +namespace boost::process::v2::posix { + +struct centreon_posix_default_launcher; + +struct centreon_process_stdio { + boost::process::v2::detail::process_input_binding in; + boost::process::v2::detail::process_output_binding out; + boost::process::v2::detail::process_error_binding err; + + error_code on_exec_setup(centreon_posix_default_launcher& launcher, + const filesystem::path&, + const char* const*) { + if (::dup2(in.fd, in.target) == -1) + return error_code(errno, system_category()); + + if (::dup2(out.fd, out.target) == -1) + return error_code(errno, system_category()); + + if (::dup2(err.fd, err.target) == -1) + return error_code(errno, system_category()); + + return error_code{}; + }; +}; + +/** + * This class is a copy of posix::default_launcher + * as io_context::notify_fork can hang on child process and as we don't care + * about child process in asio as we will do an exec, it's removed + */ +struct centreon_posix_default_launcher { + /// The pointer to the environment forwarded to the subprocess. + const char* const* env = ::environ; + /// The pid of the subprocess - will be assigned after fork. + int pid = -1; + + /// The whitelist for file descriptors. + std::vector fd_whitelist = {STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO}; + + centreon_posix_default_launcher() = default; + + template + auto operator()( + ExecutionContext& context, + const typename std::enable_if< + std::is_convertible< + ExecutionContext&, + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context&>::value, + filesystem::path>::type& executable, + Args&& args, + Inits&&... inits) + -> basic_process { + error_code ec; + auto proc = (*this)(context, ec, executable, std::forward(args), + std::forward(inits)...); + + if (ec) + v2::detail::throw_error(ec, "centreon_posix_default_launcher"); + + return proc; + } + + template + auto operator()( + ExecutionContext& context, + error_code& ec, + const typename std::enable_if< + std::is_convertible< + ExecutionContext&, + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context&>::value, + filesystem::path>::type& executable, + Args&& args, + Inits&&... inits) + -> basic_process { + return (*this)(context.get_executor(), executable, std::forward(args), + std::forward(inits)...); + } + + template + auto operator()( + Executor exec, + const typename std::enable_if< + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution::is_executor< + Executor>::value || + BOOST_PROCESS_V2_ASIO_NAMESPACE::is_executor::value, + filesystem::path>::type& executable, + Args&& args, + Inits&&... inits) -> basic_process { + error_code ec; + auto proc = + (*this)(std::move(exec), ec, executable, std::forward(args), + std::forward(inits)...); + + if (ec) + v2::detail::throw_error(ec, "centreon_posix_default_launcher"); + + return proc; + } + + template + auto operator()( + Executor exec, + error_code& ec, + const typename std::enable_if< + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution::is_executor< + Executor>::value || + BOOST_PROCESS_V2_ASIO_NAMESPACE::is_executor::value, + filesystem::path>::type& executable, + Args&& args, + Inits&&... inits) -> basic_process { + auto argv = this->build_argv_(executable, std::forward(args)); + { + pipe_guard pg; + if (::pipe(pg.p)) { + BOOST_PROCESS_V2_ASSIGN_EC(ec, errno, system_category()) + return basic_process{exec}; + } + if (::fcntl(pg.p[1], F_SETFD, FD_CLOEXEC)) { + BOOST_PROCESS_V2_ASSIGN_EC(ec, errno, system_category()) + return basic_process{exec}; + } + ec = detail::on_setup(*this, executable, argv, inits...); + if (ec) { + detail::on_error(*this, executable, argv, ec, inits...); + return basic_process(exec); + } + fd_whitelist.push_back(pg.p[1]); + + auto& ctx = BOOST_PROCESS_V2_ASIO_NAMESPACE::query( + exec, BOOST_PROCESS_V2_ASIO_NAMESPACE::execution::context); + ctx.notify_fork( + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context::fork_prepare); + pid = ::fork(); + if (pid == -1) { + ctx.notify_fork( + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context::fork_parent); + detail::on_fork_error(*this, executable, argv, ec, inits...); + detail::on_error(*this, executable, argv, ec, inits...); + + BOOST_PROCESS_V2_ASSIGN_EC(ec, errno, system_category()) + return basic_process{exec}; + } else if (pid == 0) { + ::close(pg.p[0]); + /** + * ctx.notify_fork calls epoll_reactor::notify_fork which locks + * registered_descriptors_mutex_ An issue occurs when + * registered_descriptors_mutex_ is locked by another thread at fork + * timepoint. In such a case, child process starts with + * registered_descriptors_mutex_ already locked and both child and + * parent process will hang. + */ + // ctx.notify_fork(BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context::fork_child); + ec = detail::on_exec_setup(*this, executable, argv, inits...); + if (!ec) { + close_all_fds(ec); + } + if (!ec) + ::execve(executable.c_str(), const_cast(argv), + const_cast(env)); + + ignore_unused(::write(pg.p[1], &errno, sizeof(int))); + BOOST_PROCESS_V2_ASSIGN_EC(ec, errno, system_category()) + detail::on_exec_error(*this, executable, argv, ec, inits...); + ::exit(EXIT_FAILURE); + return basic_process{exec}; + } + + ctx.notify_fork( + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context::fork_parent); + ::close(pg.p[1]); + pg.p[1] = -1; + int child_error{0}; + int count = -1; + while ((count = ::read(pg.p[0], &child_error, sizeof(child_error))) == + -1) { + int err = errno; + if ((err != EAGAIN) && (err != EINTR)) { + BOOST_PROCESS_V2_ASSIGN_EC(ec, err, system_category()) + break; + } + } + if (count != 0) + BOOST_PROCESS_V2_ASSIGN_EC(ec, child_error, system_category()) + + if (ec) { + detail::on_error(*this, executable, argv, ec, inits...); + return basic_process{exec}; + } + } + basic_process proc(exec, pid); + detail::on_success(*this, executable, argv, ec, inits...); + return proc; + } + + protected: + void ignore_unused(std::size_t) {} + void close_all_fds(error_code& ec) { + std::sort(fd_whitelist.begin(), fd_whitelist.end()); + detail::close_all(fd_whitelist, ec); + fd_whitelist = {STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO}; + } + + struct pipe_guard { + int p[2]; + pipe_guard() : p{-1, -1} {} + + ~pipe_guard() { + if (p[0] != -1) + ::close(p[0]); + if (p[1] != -1) + ::close(p[1]); + } + }; + + // if we need to allocate something + std::vector argv_buffer_; + std::vector argv_; + + template + const char* const* build_argv_( + const filesystem::path& pt, + const Args& args, + typename std::enable_if< + std::is_convertible())), + cstring_ref>::value>::type* = nullptr) { + const auto arg_cnt = std::distance(std::begin(args), std::end(args)); + argv_.reserve(arg_cnt + 2); + argv_.push_back(pt.native().data()); + for (auto&& arg : args) + argv_.push_back(arg.c_str()); + + argv_.push_back(nullptr); + return argv_.data(); + } + + const char* const* build_argv_(const filesystem::path&, const char** argv) { + return argv; + } + + template + const char* const* build_argv_( + const filesystem::path& pt, + const Args& args, + typename std::enable_if< + !std::is_convertible())), + cstring_ref>::value>::type* = nullptr) { + const auto arg_cnt = std::distance(std::begin(args), std::end(args)); + argv_.reserve(arg_cnt + 2); + argv_buffer_.reserve(arg_cnt); + argv_.push_back(pt.native().data()); + + using char_type = + typename decay()))[0])>::type; + + for (basic_string_view arg : args) + argv_buffer_.push_back( + v2::detail::conv_string(arg.data(), arg.size())); + + for (auto&& arg : argv_buffer_) + argv_.push_back(arg.c_str()); + + argv_.push_back(nullptr); + return argv_.data(); + } +}; + +} // namespace boost::process::v2::posix + +#endif diff --git a/common/inc/com/centreon/common/process.hh b/common/process/inc/com/centreon/common/process/process.hh similarity index 71% rename from common/inc/com/centreon/common/process.hh rename to common/process/inc/com/centreon/common/process/process.hh index caca6a1dbc9..06a6799bd3b 100644 --- a/common/inc/com/centreon/common/process.hh +++ b/common/process/inc/com/centreon/common/process/process.hh @@ -26,6 +26,33 @@ namespace detail { struct boost_process; } // namespace detail +namespace detail { +template +class mutex; + +template +class lock; + +template <> +class mutex : public absl::Mutex {}; + +template <> +class lock : public absl::MutexLock { + public: + lock(absl::Mutex* mut) : absl::MutexLock(mut) {} +}; + +template <> +class mutex {}; + +template <> +class lock { + public: + lock(mutex* dummy_mut) {} +}; + +} // namespace detail + /** * @brief This class allow to exec a process asynchronously. * It's a base class. If you want to get stdin and stdout returned data, you @@ -37,22 +64,23 @@ struct boost_process; * When completion methods like on_stdout_read are called, _protect is already * locked */ -class process : public std::enable_shared_from_this { + +template +class process : public std::enable_shared_from_this> { + using std::enable_shared_from_this>::shared_from_this; std::string _exe_path; std::vector _args; - std::deque> _stdin_write_queue - ABSL_GUARDED_BY(_protect); - bool _write_pending ABSL_GUARDED_BY(_protect) = false; + std::deque> _stdin_write_queue; + bool _write_pending; - std::shared_ptr _proc ABSL_GUARDED_BY(_protect); + std::shared_ptr _proc; int _exit_status = 0; - absl::Mutex _protect; + detail::mutex _protect; - void stdin_write_no_lock(const std::shared_ptr& data) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(_protect); + void stdin_write_no_lock(const std::shared_ptr& data); void stdin_write(const std::shared_ptr& data); void stdout_read(); @@ -62,22 +90,18 @@ class process : public std::enable_shared_from_this { std::shared_ptr _io_context; std::shared_ptr _logger; - char _stdout_read_buffer[0x1000] ABSL_GUARDED_BY(_protect); - char _stderr_read_buffer[0x1000] ABSL_GUARDED_BY(_protect); + char _stdout_read_buffer[0x1000]; + char _stderr_read_buffer[0x1000]; virtual void on_stdout_read(const boost::system::error_code& err, - size_t nb_read) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(_protect); + size_t nb_read); virtual void on_stderr_read(const boost::system::error_code& err, - size_t nb_read) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(_protect); + size_t nb_read); virtual void on_process_end(const boost::system::error_code& err, - int raw_exit_status) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(_protect); + int raw_exit_status); - virtual void on_stdin_write(const boost::system::error_code& err) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(_protect); + virtual void on_stdin_write(const boost::system::error_code& err); public: template @@ -108,7 +132,7 @@ class process : public std::enable_shared_from_this { template void write_to_stdin(const string_class& content); - void start_process(); + void start_process(bool enable_stdin); void kill(); @@ -126,12 +150,13 @@ class process : public std::enable_shared_from_this { * @param arg_begin iterator to first argument * @param arg_end iterator after the last argument */ +template template -process::process(const std::shared_ptr& io_context, - const std::shared_ptr& logger, - const std::string_view& exe_path, - string_iterator arg_begin, - string_iterator arg_end) +process::process(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& exe_path, + string_iterator arg_begin, + string_iterator arg_end) : _exe_path(exe_path), _args(arg_begin, arg_end), _io_context(io_context), @@ -146,11 +171,13 @@ process::process(const std::shared_ptr& io_context, * @param exe_path path of executable without argument * @param args container of arguments */ +template template -process::process(const std::shared_ptr& io_context, - const std::shared_ptr& logger, - const std::string_view& exe_path, - const args_container& args) +process::process( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& exe_path, + const args_container& args) : _exe_path(exe_path), _args(args), _io_context(io_context), @@ -166,11 +193,13 @@ process::process(const std::shared_ptr& io_context, * @param exe_path path of executable without argument * @param args brace of arguments {"--flag1", "arg1", "-c", "arg2"} */ +template template -process::process(const std::shared_ptr& io_context, - const std::shared_ptr& logger, - const std::string_view& exe_path, - const std::initializer_list& args) +process::process( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& exe_path, + const std::initializer_list& args) : _exe_path(exe_path), _io_context(io_context), _logger(logger) { _args.reserve(args.size()); for (const auto& str : args) { @@ -185,8 +214,9 @@ process::process(const std::shared_ptr& io_context, * can be used to construct a std::string * @param content */ +template template -void process::write_to_stdin(const string_class& content) { +void process::write_to_stdin(const string_class& content) { stdin_write(std::make_shared(content)); } diff --git a/common/process/process.cc b/common/process/src/process.cc similarity index 51% rename from common/process/process.cc rename to common/process/src/process.cc index 9e0282b38fb..6036a0fca19 100644 --- a/common/process/process.cc +++ b/common/process/src/process.cc @@ -1,27 +1,31 @@ -/* +/** * Copyright 2024 Centreon * - * This file is part of Centreon Engine. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * Centreon Engine is free software: you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. + * http://www.apache.org/licenses/LICENSE-2.0 * - * Centreon Engine is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * - * You should have received a copy of the GNU General Public License - * along with Centreon Engine. If not, see - * . + * For more information : contact@centreon.com */ -#include #include #include -#include "process.hh" +#include "com/centreon/common/process/process.hh" + +#if !defined(BOOST_PROCESS_V2_WINDOWS) +#include "com/centreon/common/process/detail/centreon_posix_process_launcher.hh" +#endif + +#include namespace proc = boost::process::v2; @@ -32,20 +36,100 @@ namespace com::centreon::common::detail { * */ struct boost_process { +#if defined(BOOST_PROCESS_V2_WINDOWS) + /** + * @brief Construct a new boost process object + * stdin of the child process is managed + * + * @param io_context + * @param exe_path absolute or relative exe path + * @param args arguments of the command + */ boost_process(asio::io_context& io_context, const std::string& exe_path, const std::vector& args) - : stdout(io_context), - stderr(io_context), - stdin(io_context), + : stdout_pipe(io_context), + stderr_pipe(io_context), + stdin_pipe(io_context), proc(io_context, exe_path, args, - proc::process_stdio{stdin, stdout, stderr}) {} + proc::process_stdio{stdin_pipe, stdout_pipe, stderr_pipe}) {} + + /** + * @brief Construct a new boost process object + * stdin of the child process is not managed + * + * @param io_context + * @param logger + * @param cmd_line cmd line split (the first element is the path of the + * executable) + * @param no_stdin (not used) + */ + boost_process(asio::io_context& io_context, + const std::string& exe_path, + const std::vector& args, + bool no_stdin) + : stdout_pipe(io_context), + stderr_pipe(io_context), + stdin_pipe(io_context), + proc(io_context, + exe_path, + args, + proc::process_stdio{{}, stdout_pipe, stderr_pipe}) {} + +#else + /** + * @brief Construct a new boost process object + * stdin of the child process is managed + * + * @param io_context + * @param exe_path absolute or relative exe path + * @param args arguments of the command + */ + boost_process(asio::io_context& io_context, + const std::string& exe_path, + const std::vector& args) + : stdout_pipe(io_context), + stderr_pipe(io_context), + stdin_pipe(io_context), + proc(proc::posix::centreon_posix_default_launcher()( + io_context.get_executor(), + exe_path, + args, + proc::posix::centreon_process_stdio{stdin_pipe, stdout_pipe, + stderr_pipe})) {} - asio::readable_pipe stdout; - asio::readable_pipe stderr; - asio::writable_pipe stdin; + /** + * @brief Construct a new boost process object + * stdin of the child process is not managed + * + * @param io_context + * @param logger + * @param cmd_line cmd line split (the first element is the path of the + * executable) + * @param no_stdin (not used) + */ + boost_process(asio::io_context& io_context, + const std::string& exe_path, + const std::vector& args, + bool no_stdin) + : stdout_pipe(io_context), + stderr_pipe(io_context), + stdin_pipe(io_context), + proc(proc::posix::centreon_posix_default_launcher()( + io_context, + exe_path, + args, + proc::posix::centreon_process_stdio{{}, + stdout_pipe, + stderr_pipe})) {} + +#endif + + asio::readable_pipe stdout_pipe; + asio::readable_pipe stderr_pipe; + asio::writable_pipe stdin_pipe; proc::process proc; }; } // namespace com::centreon::common::detail @@ -60,11 +144,17 @@ using namespace com::centreon::common; * @param cmd_line cmd line split (the first element is the path of the * executable) */ -process::process(const std::shared_ptr& io_context, - const std::shared_ptr& logger, - const std::string_view& cmd_line) +template +process::process( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& cmd_line) : _io_context(io_context), _logger(logger) { +#ifdef _WINDOWS + auto split_res = boost::program_options::split_winmain(std::string(cmd_line)); +#else auto split_res = boost::program_options::split_unix(std::string(cmd_line)); +#endif if (split_res.begin() == split_res.end()) { SPDLOG_LOGGER_ERROR(_logger, "empty command line:\"{}\"", cmd_line); throw exceptions::msg_fmt("empty command line:\"{}\"", cmd_line); @@ -82,20 +172,28 @@ process::process(const std::shared_ptr& io_context, * In this function, we start child process and stdout, stderr asynchronous read * we also start an asynchronous read on process fd to be aware of child process * termination + * + * @param enable_stdin On Windows set it to false if you doesn't want to write + * on child stdin */ -void process::start_process() { +template +void process::start_process(bool enable_stdin) { SPDLOG_LOGGER_DEBUG(_logger, "start process: {}", _exe_path); - absl::MutexLock l(&_protect); + detail::lock l(&_protect); _stdin_write_queue.clear(); _write_pending = false; try { - _proc = - std::make_shared(*_io_context, _exe_path, _args); + _proc = enable_stdin ? std::make_shared( + *_io_context, _exe_path, _args) + : std::make_shared( + *_io_context, _exe_path, _args, false); + SPDLOG_LOGGER_TRACE(_logger, "process started: {} pid: {}", _exe_path, + _proc->proc.id()); _proc->proc.async_wait( [me = shared_from_this(), current = _proc]( const boost::system::error_code& err, int raw_exit_status) { - absl::MutexLock l(&me->_protect); + detail::lock l(&me->_protect); if (current != me->_proc) { return; } @@ -115,8 +213,9 @@ void process::start_process() { * @param err * @param raw_exit_status end status of the process */ -void process::on_process_end(const boost::system::error_code& err, - int raw_exit_status) { +template +void process::on_process_end(const boost::system::error_code& err, + int raw_exit_status) { if (err) { SPDLOG_LOGGER_ERROR(_logger, "fail async_wait of {}: {}", _exe_path, err.message()); @@ -132,12 +231,17 @@ void process::on_process_end(const boost::system::error_code& err, * @brief kill child process * */ -void process::kill() { - absl::MutexLock l(&_protect); +template +void process::kill() { + detail::lock l(&_protect); if (_proc) { + SPDLOG_LOGGER_INFO(_logger, "kill process"); boost::system::error_code err; _proc->proc.terminate(err); - _proc.reset(); + if (err) { + SPDLOG_LOGGER_INFO(_logger, "fail to kill {}: {}", _exe_path, + err.message()); + } } } @@ -147,8 +251,9 @@ void process::kill() { * * @param data */ -void process::stdin_write(const std::shared_ptr& data) { - absl::MutexLock l(&_protect); +template +void process::stdin_write(const std::shared_ptr& data) { + detail::lock l(&_protect); stdin_write_no_lock(data); } @@ -158,7 +263,9 @@ void process::stdin_write(const std::shared_ptr& data) { * * @param data */ -void process::stdin_write_no_lock(const std::shared_ptr& data) { +template +void process::stdin_write_no_lock( + const std::shared_ptr& data) { if (!_proc) { SPDLOG_LOGGER_ERROR(_logger, "stdin_write process {} not started", _exe_path); @@ -169,11 +276,11 @@ void process::stdin_write_no_lock(const std::shared_ptr& data) { } else { try { _write_pending = true; - _proc->stdin.async_write_some( + _proc->stdin_pipe.async_write_some( asio::buffer(*data), [me = shared_from_this(), caller = _proc, data]( const boost::system::error_code& err, size_t nb_written) { - absl::MutexLock l(&me->_protect); + detail::lock l(&me->_protect); if (caller != me->_proc) { return; } @@ -195,7 +302,8 @@ void process::stdin_write_no_lock(const std::shared_ptr& data) { * * @param err */ -void process::on_stdin_write(const boost::system::error_code& err) { +template +void process::on_stdin_write(const boost::system::error_code& err) { _write_pending = false; if (err) { @@ -222,14 +330,15 @@ void process::on_stdin_write(const boost::system::error_code& err) { * @brief asynchronous read from child process stdout * */ -void process::stdout_read() { +template +void process::stdout_read() { if (_proc) { try { - _proc->stdout.async_read_some( + _proc->stdout_pipe.async_read_some( asio::buffer(_stdout_read_buffer), [me = shared_from_this(), caller = _proc]( const boost::system::error_code& err, size_t nb_read) { - absl::MutexLock l(&me->_protect); + detail::lock l(&me->_protect); if (caller != me->_proc) { return; } @@ -237,7 +346,7 @@ void process::stdout_read() { }); } catch (const std::exception& e) { _io_context->post([me = shared_from_this(), caller = _proc]() { - absl::MutexLock l(&me->_protect); + detail::lock l(&me->_protect); me->on_stdout_read(std::make_error_code(std::errc::broken_pipe), 0); }); } @@ -252,15 +361,16 @@ void process::stdout_read() { * @param err * @param nb_read */ -void process::on_stdout_read(const boost::system::error_code& err, - size_t nb_read) { +template +void process::on_stdout_read(const boost::system::error_code& err, + size_t nb_read) { if (err) { - if (err == asio::error::eof) { + if (err == asio::error::eof || err == asio::error::broken_pipe) { SPDLOG_LOGGER_DEBUG(_logger, "fail read from stdout of process {}: {}", _exe_path, err.message()); } else { - SPDLOG_LOGGER_ERROR(_logger, "fail read from stdout of process {}: {}", - _exe_path, err.message()); + SPDLOG_LOGGER_ERROR(_logger, "fail read from stdout of process {}: {} {}", + _exe_path, err.value(), err.message()); } return; } @@ -273,14 +383,15 @@ void process::on_stdout_read(const boost::system::error_code& err, * @brief asynchronous read from child process stderr * */ -void process::stderr_read() { +template +void process::stderr_read() { if (_proc) { try { - _proc->stderr.async_read_some( + _proc->stderr_pipe.async_read_some( asio::buffer(_stderr_read_buffer), [me = shared_from_this(), caller = _proc]( const boost::system::error_code& err, size_t nb_read) { - absl::MutexLock l(&me->_protect); + detail::lock l(&me->_protect); if (caller != me->_proc) { return; } @@ -288,7 +399,7 @@ void process::stderr_read() { }); } catch (const std::exception& e) { _io_context->post([me = shared_from_this(), caller = _proc]() { - absl::MutexLock l(&me->_protect); + detail::lock l(&me->_protect); me->on_stderr_read(std::make_error_code(std::errc::broken_pipe), 0); }); } @@ -303,15 +414,16 @@ void process::stderr_read() { * @param err * @param nb_read */ -void process::on_stderr_read(const boost::system::error_code& err, - size_t nb_read) { +template +void process::on_stderr_read(const boost::system::error_code& err, + size_t nb_read) { if (err) { - if (err == asio::error::eof) { + if (err == asio::error::eof || err == asio::error::broken_pipe) { SPDLOG_LOGGER_DEBUG(_logger, "fail read from stderr of process {}: {}", _exe_path, err.message()); } else { - SPDLOG_LOGGER_ERROR(_logger, "fail read from stderr of process {}: {}", - _exe_path, err.message()); + SPDLOG_LOGGER_ERROR(_logger, "fail read from stderr of process {}: {} {}", + _exe_path, err.value(), err.message()); } } else { SPDLOG_LOGGER_TRACE(_logger, " process: {} read from stdout: {}", _exe_path, @@ -319,3 +431,11 @@ void process::on_stderr_read(const boost::system::error_code& err, stderr_read(); } } + +namespace com::centreon::common { + +template class process; + +template class process; + +} // namespace com::centreon::common \ No newline at end of file diff --git a/common/src/perfdata.cc b/common/src/perfdata.cc index fab01b147f5..80945b75950 100644 --- a/common/src/perfdata.cc +++ b/common/src/perfdata.cc @@ -16,6 +16,7 @@ * For more information : contact@centreon.com */ +#include #include #include "perfdata.hh" @@ -203,6 +204,8 @@ std::list perfdata::parse_perfdata( uint32_t service_id, const char* str, const std::shared_ptr& logger) { + absl::flat_hash_set metric_name; + std::string_view current_name; std::list retval; auto id = [host_id, service_id] { if (host_id || service_id) @@ -280,6 +283,15 @@ std::list perfdata::parse_perfdata( if (end - s + 1 > 0) { p._name.assign(s, end - s + 1); + current_name = std::string_view(s, end - s + 1); + + if (metric_name.contains(current_name)) { + logger->warn( + "storage: The metric '{}' appears several times in the output " + "\"{}\": you will lose any new occurence of this metric", + p.name(), str); + error = true; + } } else { logger->error("In service {}, metric name empty before '{}...'", id(), fmt::string_view(s, 10)); @@ -363,7 +375,8 @@ std::list perfdata::parse_perfdata( p.max()); // Append to list. - retval.emplace_back(std::move(p)); + metric_name.insert(current_name); + retval.push_back(std::move(p)); // Skip whitespaces. while (isspace(*tmp)) diff --git a/common/src/rapidjson_helper.cc b/common/src/rapidjson_helper.cc index 5663af59b64..2252cdc262f 100644 --- a/common/src/rapidjson_helper.cc +++ b/common/src/rapidjson_helper.cc @@ -345,7 +345,7 @@ const rapidjson::Value& rapidjson_helper::get_member( */ rapidjson::Document rapidjson_helper::read_from_file( const std::string_view& path) { - FILE* to_close = fopen(path.data(), "r+b"); + FILE* to_close = fopen(path.data(), "r"); if (!to_close) { throw exceptions::msg_fmt("Fail to read file '{}' : {}", path, strerror(errno)); diff --git a/common/src/utf8.cc b/common/src/utf8.cc new file mode 100644 index 00000000000..7ef6ebed5ed --- /dev/null +++ b/common/src/utf8.cc @@ -0,0 +1,275 @@ +/** + * Copyright 2022-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +/** + * @brief Checks if the string given as parameter is a real UTF-8 string. + * If it is not, it tries to convert it to UTF-8. Encodings correctly changed + * are ISO-8859-15 and CP-1252. + * + * @param str The string to check + * + * @return The string itself or a new string converted to UTF-8. The output + * string should always be an UTF-8 string. + */ + +#include "utf8.hh" + +std::string com::centreon::common::check_string_utf8( + const std::string_view& str) noexcept { + std::string_view::const_iterator it; + for (it = str.begin(); it != str.end();) { + uint32_t val = (*it & 0xff); + if ((val & 0x80) == 0) { + ++it; + continue; + } + val = (val << 8) | (*(it + 1) & 0xff); + if ((val & 0xe0c0) == 0xc080) { + val &= 0x1e00; + if (val == 0) + break; + it += 2; + continue; + } + + val = (val << 8) | (*(it + 2) & 0xff); + if ((val & 0xf0c0c0) == 0xe08080) { + val &= 0xf2000; + if (val == 0 || val == 0xd2000) + break; + it += 3; + continue; + } + + val = (val << 8) | (*(it + 3) & 0xff); + if ((val & 0xf8c0c0c0) == 0xF0808080) { + val &= 0x7300000; + if (val == 0 || val > 0x4000000) + break; + it += 4; + continue; + } + break; + } + + if (it == str.end()) + return std::string(str); + + /* Not an UTF-8 string */ + bool is_cp1252 = true, is_iso8859 = true; + auto itt = it; + + auto iso8859_to_utf8 = [&str, &it]() -> std::string { + /* Strings are both cp1252 and iso8859-15 */ + std::string out; + std::size_t d = it - str.begin(); + out.reserve(d + 2 * (str.size() - d)); + out = str.substr(0, d); + while (it != str.end()) { + uint8_t c = static_cast(*it); + if (c < 128) + out.push_back(c); + else if (c <= 160) + out.push_back('_'); + else { + switch (c) { + case 0xa4: + out.append("€"); + break; + case 0xa6: + out.append("Š"); + break; + case 0xa8: + out.append("š"); + break; + case 0xb4: + out.append("Ž"); + break; + case 0xb8: + out.append("ž"); + break; + case 0xbc: + out.append("Œ"); + break; + case 0xbd: + out.append("œ"); + break; + case 0xbe: + out.append("Ÿ"); + break; + default: + out.push_back(0xc0 | c >> 6); + out.push_back((c & 0x3f) | 0x80); + break; + } + } + ++it; + } + return out; + }; + do { + uint8_t c = *itt; + /* not ISO-8859-15 */ + if (c > 126 && c < 160) + is_iso8859 = false; + /* not cp1252 */ + if (c & 128) + if (c == 129 || c == 141 || c == 143 || c == 144 || c == 155) + is_cp1252 = false; + if (!is_cp1252) + return iso8859_to_utf8(); + else if (!is_iso8859) { + std::string out; + std::size_t d = it - str.begin(); + out.reserve(d + 3 * (str.size() - d)); + out = str.substr(0, d); + while (it != str.end()) { + c = *it; + if (c < 128) + out.push_back(c); + else { + switch (c) { + case 128: + out.append("€"); + break; + case 129: + case 141: + case 143: + case 144: + case 157: + out.append("_"); + break; + case 130: + out.append("‚"); + break; + case 131: + out.append("ƒ"); + break; + case 132: + out.append("„"); + break; + case 133: + out.append("…"); + break; + case 134: + out.append("†"); + break; + case 135: + out.append("‡"); + break; + case 136: + out.append("ˆ"); + break; + case 137: + out.append("‰"); + break; + case 138: + out.append("Š"); + break; + case 139: + out.append("‹"); + break; + case 140: + out.append("Œ"); + break; + case 142: + out.append("Ž"); + break; + case 145: + out.append("‘"); + break; + case 146: + out.append("’"); + break; + case 147: + out.append("“"); + break; + case 148: + out.append("”"); + break; + case 149: + out.append("•"); + break; + case 150: + out.append("–"); + break; + case 151: + out.append("—"); + break; + case 152: + out.append("˜"); + break; + case 153: + out.append("™"); + break; + case 154: + out.append("š"); + break; + case 155: + out.append("›"); + break; + case 156: + out.append("œ"); + break; + case 158: + out.append("ž"); + break; + case 159: + out.append("Ÿ"); + break; + default: + out.push_back(0xc0 | c >> 6); + out.push_back((c & 0x3f) | 0x80); + break; + } + } + ++it; + } + return out; + } + ++itt; + } while (itt != str.end()); + assert(is_cp1252 == is_iso8859); + return iso8859_to_utf8(); +} + +/** + * @brief This function adjusts the given integer s so that the str string may + * be cut at this length and still be a UTF-8 string (we don't want to cut it + * in a middle of a character). + * + * This function assumes the string to be UTF-8 encoded. + * + * @param str A string to truncate. + * @param s The desired size, maybe the resulting string will contain less + * characters. + * + * @return The newly computed size. + */ +size_t com::centreon::common::adjust_size_utf8(const std::string& str, + size_t s) { + if (s >= str.size()) + return str.size(); + if (s == 0) + return s; + else { + while ((str[s] & 0xc0) == 0x80) + s--; + return s; + } +} diff --git a/common/tests/CMakeLists.txt b/common/tests/CMakeLists.txt index fd673759850..d44100b1313 100644 --- a/common/tests/CMakeLists.txt +++ b/common/tests/CMakeLists.txt @@ -16,16 +16,27 @@ # For more information : contact@centreon.com # -add_executable(ut_common - process_stat_test.cc - hex_dump_test.cc - log_v2/log_v2.cc - node_allocator_test.cc - perfdata_test.cc - process_test.cc - rapidjson_helper_test.cc - test_main.cc - ${TESTS_SOURCES}) + +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + add_executable(ut_common + process_stat_test.cc + hex_dump_test.cc + log_v2/log_v2.cc + node_allocator_test.cc + perfdata_test.cc + process_test.cc + rapidjson_helper_test.cc + test_main.cc + utf8_test.cc + ${TESTS_SOURCES}) +else() + add_executable(ut_common + perfdata_test.cc + process_test.cc + test_main_win.cc + utf8_test.cc + ${TESTS_SOURCES}) +endif() set_target_properties( ut_common @@ -42,32 +53,60 @@ if(WITH_COVERAGE) set(GCOV gcov) endif() + +file(COPY ${PROJECT_SOURCE_DIR}/tests/scripts/echo.bat + DESTINATION ${CMAKE_BINARY_DIR}/tests) +file(COPY ${PROJECT_SOURCE_DIR}/tests/scripts/bad_script.bat + DESTINATION ${CMAKE_BINARY_DIR}/tests) + add_test(NAME tests COMMAND ut_common) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + target_link_libraries( + ut_common + PRIVATE centreon_common + centreon_http + centreon_process + -L${Boost_LIBRARY_DIR_RELEASE} + boost_program_options + re2::re2 + log_v2 + crypto + ssl + GTest::gtest + GTest::gtest_main + GTest::gmock + GTest::gmock_main + absl::any + absl::log + absl::base + absl::bits + fmt::fmt pthread) + + add_dependencies(ut_common centreon_common centreon_http) + +else() + target_link_libraries( + ut_common + PRIVATE centreon_common + centreon_process + Boost::program_options + re2::re2 + GTest::gtest + GTest::gtest_main + GTest::gmock + GTest::gmock_main + absl::any + absl::log + absl::base + absl::bits + fmt::fmt) + + add_dependencies(ut_common centreon_common) + +endif() -target_link_libraries( - ut_common - PRIVATE centreon_common - centreon_http - centreon_process - -L${Boost_LIBRARY_DIR_RELEASE} - boost_program_options - re2::re2 - log_v2 - crypto - ssl - GTest::gtest - GTest::gtest_main - GTest::gmock - GTest::gmock_main - absl::any - absl::log - absl::base - absl::bits - fmt::fmt pthread) - -add_dependencies(ut_common centreon_common centreon_http) set_property(TARGET ut_common PROPERTY POSITION_INDEPENDENT_CODE ON) diff --git a/common/tests/log_v2/log_v2.cc b/common/tests/log_v2/log_v2.cc index 66ab44d67be..31e00f4906a 100644 --- a/common/tests/log_v2/log_v2.cc +++ b/common/tests/log_v2/log_v2.cc @@ -61,10 +61,12 @@ TEST_F(TestLogV2, LoggerUpdated) { const auto& core_logger = log_v2::instance().get(log_v2::CORE); ASSERT_EQ(core_logger->level(), spdlog::level::info); testing::internal::CaptureStdout(); - core_logger->info("First log"); - core_logger->debug("First debug log"); config cfg("/tmp/test.log", config::logger_type::LOGGER_STDOUT, 0, false, false); + cfg.set_level("core", "info"); + log_v2::instance().apply(cfg); + core_logger->info("First log"); + core_logger->debug("First debug log"); cfg.set_level("core", "debug"); log_v2::instance().apply(cfg); ASSERT_EQ(core_logger->level(), spdlog::level::debug); diff --git a/common/tests/perfdata_test.cc b/common/tests/perfdata_test.cc index 487300251bd..bab234f9522 100644 --- a/common/tests/perfdata_test.cc +++ b/common/tests/perfdata_test.cc @@ -246,6 +246,58 @@ TEST_F(PerfdataParser, Simple2) { ASSERT_TRUE(expected == *it); } +TEST_F(PerfdataParser, SeveralIdenticalMetrics) { + // Parse perfdata. + std::list list{common::perfdata::parse_perfdata( + 0, 0, "'et'=18.00%;15:;10:;0;100 other=15 et=13.00%", _logger)}; + + // Assertions. + ASSERT_EQ(list.size(), 2u); + std::list::const_iterator it = list.begin(); + perfdata expected; + expected.name("et"); + expected.value_type(perfdata::gauge); + expected.value(18.0); + expected.unit("%"); + expected.warning(std::numeric_limits::infinity()); + expected.warning_low(15.0); + expected.critical(std::numeric_limits::infinity()); + expected.critical_low(10.0); + expected.min(0.0); + expected.max(100.0); + ASSERT_TRUE(expected == *it); + ++it; + ASSERT_EQ(it->name(), std::string_view("other")); + ASSERT_EQ(it->value(), 15); + ASSERT_EQ(it->value_type(), perfdata::gauge); +} + +TEST_F(PerfdataParser, ComplexSeveralIdenticalMetrics) { + // Parse perfdata. + std::list list{common::perfdata::parse_perfdata( + 0, 0, "'d[foo]'=18.00%;15:;10:;0;100 other=15 a[foo]=13.00%", _logger)}; + + // Assertions. + ASSERT_EQ(list.size(), 2u); + std::list::const_iterator it = list.begin(); + perfdata expected; + expected.name("foo"); + expected.value_type(perfdata::derive); + expected.value(18.0); + expected.unit("%"); + expected.warning(std::numeric_limits::infinity()); + expected.warning_low(15.0); + expected.critical(std::numeric_limits::infinity()); + expected.critical_low(10.0); + expected.min(0.0); + expected.max(100.0); + ASSERT_TRUE(expected == *it); + ++it; + ASSERT_EQ(it->name(), std::string_view("other")); + ASSERT_EQ(it->value(), 15); + ASSERT_EQ(it->value_type(), perfdata::gauge); +} + TEST_F(PerfdataParser, Complex1) { // Parse perfdata. std::list list{perfdata::parse_perfdata( diff --git a/common/tests/process_test.cc b/common/tests/process_test.cc index 449e000d3bc..325524a406a 100644 --- a/common/tests/process_test.cc +++ b/common/tests/process_test.cc @@ -19,11 +19,18 @@ #include #include -#include "pool.hh" -#include "process.hh" +#include "com/centreon/common/process/process.hh" using namespace com::centreon::common; +#ifdef _WINDOWS +#define ECHO_PATH "tests\\echo.bat" +#define END_OF_LINE "\r\n" +#else +#define ECHO_PATH "/bin/echo" +#define END_OF_LINE "\n" +#endif + extern std::shared_ptr g_io_context; static std::shared_ptr _logger = @@ -37,7 +44,8 @@ class process_test : public ::testing::Test { } }; -class process_wait : public process { +class process_wait : public process<> { + std::mutex _cond_m; std::condition_variable _cond; std::string _stdout; std::string _stderr; @@ -45,20 +53,25 @@ class process_wait : public process { bool _stderr_eof = false; bool _process_ended = false; - void _notify() { - if (_stdout_eof && _stderr_eof && _process_ended) { - _cond.notify_one(); - } + public: + void reset_end() { + std::lock_guard l(_cond_m); + _stdout_eof = false; + _stderr_eof = false; + _process_ended = false; } - public: void on_stdout_read(const boost::system::error_code& err, size_t nb_read) override { if (!err) { - _stdout += std::string_view(_stdout_read_buffer, nb_read); - } else if (err == asio::error::eof) { + std::string_view line(_stdout_read_buffer, nb_read); + _stdout += line; + SPDLOG_LOGGER_DEBUG(_logger, "read from stdout: {}", line); + } else if (err == asio::error::eof || err == asio::error::broken_pipe) { + std::unique_lock l(_cond_m); _stdout_eof = true; - _notify(); + l.unlock(); + _cond.notify_one(); } process::on_stdout_read(err, nb_read); } @@ -66,10 +79,14 @@ class process_wait : public process { void on_stderr_read(const boost::system::error_code& err, size_t nb_read) override { if (!err) { - _stderr += std::string_view(_stderr_read_buffer, nb_read); - } else if (err == asio::error::eof) { + std::string_view line(_stderr_read_buffer, nb_read); + _stderr += line; + SPDLOG_LOGGER_DEBUG(_logger, "read from stderr: {}", line); + } else if (err == asio::error::eof || err == asio::error::broken_pipe) { + std::unique_lock l(_cond_m); _stderr_eof = true; - _notify(); + l.unlock(); + _cond.notify_one(); } process::on_stderr_read(err, nb_read); } @@ -77,8 +94,11 @@ class process_wait : public process { void on_process_end(const boost::system::error_code& err, int raw_exit_status) override { process::on_process_end(err, raw_exit_status); + SPDLOG_LOGGER_DEBUG(_logger, "process end"); + std::unique_lock l(_cond_m); _process_ended = true; - _notify(); + l.unlock(); + _cond.notify_one(); } template @@ -97,20 +117,20 @@ class process_wait : public process { const std::string& get_stderr() const { return _stderr; } void wait() { - std::mutex dummy; - std::unique_lock l(dummy); - _cond.wait(l); + std::unique_lock l(_cond_m); + _cond.wait(l, + [this] { return _process_ended && _stderr_eof && _stdout_eof; }); } }; TEST_F(process_test, echo) { using namespace std::literals; std::shared_ptr to_wait( - new process_wait(g_io_context, _logger, "/bin/echo", {"hello"s})); - to_wait->start_process(); + new process_wait(g_io_context, _logger, ECHO_PATH, {"hello"s})); + to_wait->start_process(true); to_wait->wait(); ASSERT_EQ(to_wait->get_exit_status(), 0); - ASSERT_EQ(to_wait->get_stdout(), "hello\n"); + ASSERT_EQ(to_wait->get_stdout(), "hello" END_OF_LINE); ASSERT_EQ(to_wait->get_stderr(), ""); } @@ -118,14 +138,19 @@ TEST_F(process_test, throw_on_error) { using namespace std::literals; std::shared_ptr to_wait( new process_wait(g_io_context, _logger, "turlututu", {"hello"s})); - ASSERT_THROW(to_wait->start_process(), std::exception); + ASSERT_THROW(to_wait->start_process(true), std::exception); } TEST_F(process_test, script_error) { using namespace std::literals; +#ifdef _WINDOWS + std::shared_ptr to_wait( + new process_wait(g_io_context, _logger, "tests\\\\bad_script.bat")); +#else std::shared_ptr to_wait( new process_wait(g_io_context, _logger, "/bin/sh", {"taratata"s})); - to_wait->start_process(); +#endif + to_wait->start_process(true); to_wait->wait(); ASSERT_NE(to_wait->get_exit_status(), 0); ASSERT_EQ(to_wait->get_stdout(), ""); @@ -134,19 +159,37 @@ TEST_F(process_test, script_error) { TEST_F(process_test, call_start_several_time) { std::shared_ptr to_wait( - new process_wait(g_io_context, _logger, "/bin/echo", {"hello"})); + new process_wait(g_io_context, _logger, ECHO_PATH, {"hello"})); std::string expected; for (int ii = 0; ii < 10; ++ii) { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - to_wait->start_process(); - expected += "hello\n"; + to_wait->reset_end(); + to_wait->start_process(true); + to_wait->wait(); + expected += "hello" END_OF_LINE; } - to_wait->wait(); ASSERT_EQ(to_wait->get_exit_status(), 0); ASSERT_EQ(to_wait->get_stdout(), expected); ASSERT_EQ(to_wait->get_stderr(), ""); } +TEST_F(process_test, call_start_several_time_no_args) { + std::shared_ptr to_wait( + new process_wait(g_io_context, _logger, ECHO_PATH " hello")); + std::string expected; + for (int ii = 0; ii < 10; ++ii) { + to_wait->reset_end(); + to_wait->start_process(true); + to_wait->wait(); + expected += "hello" END_OF_LINE; + } + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + ASSERT_EQ(to_wait->get_exit_status(), 0); + ASSERT_EQ(to_wait->get_stdout(), expected); + ASSERT_EQ(to_wait->get_stderr(), ""); +} + +#ifndef _WINDOWS + TEST_F(process_test, stdin_to_stdout) { ::remove("toto.sh"); std::ofstream script("toto.sh"); @@ -155,7 +198,7 @@ TEST_F(process_test, stdin_to_stdout) { std::shared_ptr loopback( new process_wait(g_io_context, _logger, "/bin/sh toto.sh")); - loopback->start_process(); + loopback->start_process(true); std::string expected; for (unsigned ii = 0; ii < 10; ++ii) { @@ -174,7 +217,7 @@ TEST_F(process_test, shell_stdin_to_stdout) { std::shared_ptr loopback( new process_wait(g_io_context, _logger, "/bin/sh")); - loopback->start_process(); + loopback->start_process(true); std::string expected; for (unsigned ii = 0; ii < 10; ++ii) { @@ -188,3 +231,5 @@ TEST_F(process_test, shell_stdin_to_stdout) { std::this_thread::sleep_for(std::chrono::milliseconds(500)); ASSERT_EQ(expected, loopback->get_stdout()); } + +#endif diff --git a/common/tests/scripts/bad_script.bat b/common/tests/scripts/bad_script.bat new file mode 100644 index 00000000000..41297daaf43 --- /dev/null +++ b/common/tests/scripts/bad_script.bat @@ -0,0 +1,3 @@ +@echo off + +fzeurnezirfrf diff --git a/common/tests/scripts/echo.bat b/common/tests/scripts/echo.bat new file mode 100644 index 00000000000..8efa2965191 --- /dev/null +++ b/common/tests/scripts/echo.bat @@ -0,0 +1 @@ +@echo %* \ No newline at end of file diff --git a/common/tests/test_main_win.cc b/common/tests/test_main_win.cc new file mode 100644 index 00000000000..936fbda07b0 --- /dev/null +++ b/common/tests/test_main_win.cc @@ -0,0 +1,54 @@ +/** + * Copyright 2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +std::shared_ptr g_io_context( + std::make_shared()); + +class CentreonEngineEnvironment : public testing::Environment { + public: + void TearDown() override { return; } +}; + +/** + * Tester entry point. + * + * @param[in] argc Argument count. + * @param[in] argv Argument values. + * + * @return 0 on success, any other value on failure. + */ +int main(int argc, char* argv[]) { + // GTest initialization. + testing::InitGoogleTest(&argc, argv); + + auto _worker{asio::make_work_guard(*g_io_context)}; + + // Set specific environment. + testing::AddGlobalTestEnvironment(new CentreonEngineEnvironment()); + + std::thread asio_thread([] { g_io_context->run(); }); + // Run all tests. + int ret = RUN_ALL_TESTS(); + g_io_context->stop(); + asio_thread.join(); + spdlog::shutdown(); + return ret; +} diff --git a/common/tests/utf8_test.cc b/common/tests/utf8_test.cc new file mode 100644 index 00000000000..98376f390ce --- /dev/null +++ b/common/tests/utf8_test.cc @@ -0,0 +1,215 @@ +/** + * Copyright 2024 Centreon + * Licensed under the Apache License, Version 2.0(the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "utf8.hh" + +using namespace com::centreon::common; + +/* + * Given a string encoded in ISO-8859-15 and CP-1252 + * Then the check_string_utf8 function converts it to UTF-8. + */ +TEST(string_check_utf8, simple) { + std::string txt("L'acc\350s \340 l'h\364tel est encombr\351"); + ASSERT_EQ(check_string_utf8(txt), "L'accès à l'hôtel est encombré"); +} + +/* + * Given a string encoded in UTF-8 + * Then the check_string_utf8 function returns itself. + */ +TEST(string_check_utf8, utf8) { + std::string txt("L'accès à l'hôtel est encombré"); + ASSERT_EQ(check_string_utf8(txt), "L'accès à l'hôtel est encombré"); +} + +/* + * Given a string encoded in CP-1252 + * Then the check_string_utf8 function converts it to UTF-8. + */ +TEST(string_check_utf8, cp1252) { + std::string txt("Le ticket co\xfbte 12\x80\n"); + ASSERT_EQ(check_string_utf8(txt), "Le ticket coûte 12€\n"); +} + +/* + * Given a string encoded in ISO-8859-15 + * Then the check_string_utf8 function converts it to UTF-8. + */ +TEST(string_check_utf8, iso8859) { + std::string txt("Le ticket co\xfbte 12\xa4\n"); + ASSERT_EQ(check_string_utf8(txt), "Le ticket coûte 12€\n"); +} + +/* + * Given a string encoded in ISO-8859-15 + * Then the check_string_utf8 function converts it to UTF-8. + */ +TEST(string_check_utf8, iso8859_cpx) { + std::string txt("\xa4\xa6\xa8\xb4\xb8\xbc\xbd\xbe"); + ASSERT_EQ(check_string_utf8(txt), "€ŠšŽžŒœŸ"); +} + +/* + * Given a string encoded in CP-1252 + * Then the check_string_utf8 function converts it to UTF-8. + */ +TEST(string_check_utf8, cp1252_cpx) { + std::string txt("\x80\x95\x82\x89\x8a"); + ASSERT_EQ(check_string_utf8(txt), "€•‚‰Š"); +} + +/* + * Given a string badly encoded in CP-1252 + * Then the check_string_utf8 function converts it to UTF-8 and replaces bad + * characters into '_'. + */ +TEST(string_check_utf8, whatever_as_cp1252) { + std::string txt; + for (uint8_t c = 32; c < 255; c++) + if (c != 127) + txt.push_back(c); + std::string result( + " !\"#$%&'()*+,-./" + "0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`" + "abcdefghijklmnopqrstuvwxyz{|}~€_‚ƒ„…†‡ˆ‰Š‹Œ_Ž__‘’“”•–—˜™š›œ_" + "žŸ ¡¢£¤¥¦§¨©ª«¬­®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäå" + "æçèéêëìíîïðñòóôõö÷øùúûüýþ"); + ASSERT_EQ(check_string_utf8(txt), result); +} + +/* + * Given a string badly encoded in ISO-8859-15 + * Then the check_string_utf8 function converts it to UTF-8 and replaces bad + * characters into '_'. + */ +TEST(string_check_utf8, whatever_as_iso8859) { + /* Construction of a string that is not cp1252 so it should be considered as + * iso8859-15 */ + std::string txt; + for (uint8_t c = 32; c < 255; c++) { + if (c == 32) + txt.push_back(0x81); + if (c != 127) + txt.push_back(c); + } + std::string result( + "_ " + "!\"#$%&'()*+,-./" + "0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`" + "abcdefghijklmnopqrstuvwxyz{|}~_________________________________" + "¡¢£€¥Š§š©ª«¬­®¯°±²³Žµ¶·ž¹º»ŒœŸ¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçè" + "éêëìíîïðñòóôõö÷øùúûüýþ"); + ASSERT_EQ(check_string_utf8(txt), result); +} + +/* + * In case of a string containing multiple encoding, the resulting string should + * be an UTF-8 string. Here we have a string beginning with UTF-8 and finishing + * with cp1252. The resulting string is good and is UTF-8 only encoded. + */ +TEST(string_check_utf8, utf8_and_cp1252) { + std::string txt( + "\xc3\xa9\xc3\xa7\xc3\xa8\xc3\xa0\xc3\xb9\xc3\xaf\xc3\xab\x7e\x23\x0a\xe9" + "\xe7\xe8\xe0\xf9\xef\xeb\x7e\x23\x0a"); + std::string result("éçèàùïë~#\néçèàùïë~#\n"); + ASSERT_EQ(check_string_utf8(txt), result); +} + +/* A check coming from windows with characters from the cmd console */ +TEST(string_check_utf8, strange_string) { + std::string txt( + "WARNING - [Triggered by _ItemCount>0] - 1 event(s) of Severity Level: " + "\"Error\", were recorded in the last 24 hours from the Application " + "Event Log. (List is on next line. Fields shown are - " + "Logfile:TimeGenerated:EventId:EventCode:SeverityLevel:Type:SourceName:" + "Message)|'Event " + "Count'=1;0;50;\nApplication:20200806000001.000000-000:3221243278:17806:" + "Erreur:MSSQLSERVER:╔chec de la nÚgociation SSPI avec le code " + "d'erreurá0x8009030c lors de l'Útablissement d'une connexion avec une " + "sÚcuritÚ intÚgrÚeá; la connexion a ÚtÚ fermÚe. [CLIENTá: X.X.X.X]"); + ASSERT_EQ(check_string_utf8(txt), txt); +} + +/* A check coming from windows with characters from the cmd console */ +TEST(string_check_utf8, chinese) { + std::string txt("超级杀手死亡检查"); + ASSERT_EQ(check_string_utf8(txt), txt); +} + +/* A check coming from windows with characters from the cmd console */ +TEST(string_check_utf8, vietnam) { + std::string txt( + "looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong " + "chinese 告警数量 output puté! | '告警数量'=42\navé dé long ouput oçi " + "还有中国人! Hái yǒu zhòng guó rén!"); + ASSERT_EQ(check_string_utf8(txt), txt); +} + +TEST(truncate, nominal1) { + std::string str("foobar"); + ASSERT_EQ(truncate_utf8(str, 3), "foo"); +} + +TEST(truncate, nominal2) { + std::string str("foobar"); + ASSERT_EQ(truncate_utf8(str, 0), ""); +} + +TEST(truncate, nominal3) { + std::string str("foobar 超级杀手死亡检查"); + ASSERT_EQ(truncate_utf8(str, 1000), "foobar 超级杀手死亡检查"); +} + +TEST(truncate, utf8_1) { + std::string str("告警数量"); + for (size_t i = 0; i <= str.size(); i++) { + fmt::string_view tmp(str); + fmt::string_view res(truncate_utf8(tmp, i)); + std::string tmp1(check_string_utf8(std::string(res.data(), res.size()))); + ASSERT_EQ(res, tmp1); + } +} + +TEST(adjust_size_utf8, nominal1) { + std::string str("foobar"); + ASSERT_EQ(fmt::string_view(str.data(), adjust_size_utf8(str, 3)), + fmt::string_view("foo")); +} + +TEST(adjust_size_utf8, nominal2) { + std::string str("foobar"); + ASSERT_EQ(fmt::string_view(str.data(), adjust_size_utf8(str, 0)), ""); +} + +TEST(adjust_size_utf8, nominal3) { + std::string str("foobar 超级杀手死亡检查"); + ASSERT_EQ(fmt::string_view(str.data(), adjust_size_utf8(str, 1000)), str); +} + +TEST(adjust_size_utf8, utf8_1) { + std::string str("告警数量"); + for (size_t i = 0; i <= str.size(); i++) { + fmt::string_view sv(str.data(), adjust_size_utf8(str, i)); + std::string tmp( + check_string_utf8(std::string(sv.data(), sv.data() + sv.size()))); + ASSERT_EQ(sv.size(), tmp.size()); + } +} diff --git a/custom-triplets/x64-windows.cmake b/custom-triplets/x64-windows.cmake new file mode 100644 index 00000000000..2d6d206970c --- /dev/null +++ b/custom-triplets/x64-windows.cmake @@ -0,0 +1,6 @@ +set(VCPKG_TARGET_ARCHITECTURE x64) +set(VCPKG_CRT_LINKAGE static) +set(VCPKG_LIBRARY_LINKAGE static) + +#set(VCPKG_CMAKE_SYSTEM_NAME windows) +set(VCPKG_BUILD_TYPE release) diff --git a/engine/CMakeLists.txt b/engine/CMakeLists.txt index fe556d5bbfc..56b06efcddf 100644 --- a/engine/CMakeLists.txt +++ b/engine/CMakeLists.txt @@ -47,7 +47,8 @@ set(PRECOMP_HEADER "${PROJECT_SOURCE_DIR}/precomp_inc/precomp.hh") set(ENGINE_MODULES_DIR "${CMAKE_INSTALL_PREFIX}/lib64/centreon-engine/") -include_directories("${INC_DIR}" "${INC_DIR}/compatibility" "${CMAKE_SOURCE_DIR}/common/inc") +include_directories("${INC_DIR}" "${INC_DIR}/compatibility" + "${CMAKE_SOURCE_DIR}/common/inc") link_directories(${CMAKE_SOURCE_DIR}/build/centreon-clib/) @@ -480,73 +481,140 @@ add_subdirectory(src/macros) add_subdirectory(modules) add_subdirectory(src/retention) add_subdirectory(enginerpc) -include_directories(enginerpc - ${CMAKE_SOURCE_DIR}/common/src - ${CMAKE_SOURCE_DIR}/common/inc) +include_directories(enginerpc ${CMAKE_SOURCE_DIR}/common/src + ${CMAKE_SOURCE_DIR}/common/inc) + +# centenginestats target. +add_executable(centenginestats "${SRC_DIR}/centenginestats.cc") +add_dependencies(centenginestats centreon_clib) +target_link_libraries(centenginestats centreon_clib fmt::fmt) +target_precompile_headers(centenginestats PRIVATE ${PRECOMP_HEADER}) # Library engine target. -add_library(cce_core ${LIBRARY_TYPE} ${FILES}) -add_dependencies(cce_core engine_rpc) -add_dependencies(cce_core centreon_clib) -add_dependencies(cce_core pb_neb_lib) - -target_precompile_headers(cce_core PRIVATE ${PRECOMP_HEADER}) - -# Link target with required libraries. -target_link_libraries( - cce_core - ${MATH_LIBRARIES} - ${PTHREAD_LIBRARIES} - ${SOCKET_LIBRARIES} - centreon_clib - engine_legacy_conf - fmt::fmt - spdlog::spdlog) - -# centengine target. -add_executable(centengine "${SRC_DIR}/main.cc") -set_property(TARGET centengine PROPERTY ENABLE_EXPORTS "1") -add_dependencies(centengine centreon_clib) - -# Link centengine with required libraries. -target_link_libraries( - centengine - -L${PROTOBUF_LIB_DIR} - "-rdynamic" - centreon_clib - log_v2 - "-Wl,-whole-archive" - enginerpc - centreon_grpc - centreon_http - -L${Boost_LIBRARY_DIR_RELEASE} - boost_url - cce_core - gRPC::grpc++ - boost_program_options - protobuf - "-Wl,--no-whole-archive" - gRPC::gpr - gRPC::grpc - gRPC::grpc++_alts - absl::any - absl::log - absl::base - absl::bits - crypto - ssl - ${c-ares_LIBS} - z - ryml::ryml - stdc++fs - dl) +if(LEGACY_ENGINE) + add_library(cce_core ${LIBRARY_TYPE} ${FILES}) + add_dependencies(cce_core engine_rpc centreon_clib pb_neb_lib) + + target_precompile_headers(cce_core PRIVATE ${PRECOMP_HEADER}) + + # Link target with required libraries. + target_link_libraries( + cce_core + ${MATH_LIBRARIES} + ${PTHREAD_LIBRARIES} + ${SOCKET_LIBRARIES} + centreon_clib + engine_legacy_conf + fmt::fmt + spdlog::spdlog) + + # centengine target. + + # centengine target. + add_executable(centengine "${SRC_DIR}/main.cc") + set_property(TARGET centengine PROPERTY ENABLE_EXPORTS "1") + add_dependencies(centengine centreon_clib) + + # Link centengine with required libraries. + target_link_libraries( + centengine + -L${PROTOBUF_LIB_DIR} + "-rdynamic" + centreon_clib + log_v2 + "-Wl,-whole-archive" + enginerpc_legacy + centreon_grpc + centreon_http + centreon_common + -L${Boost_LIBRARY_DIR_RELEASE} + boost_url + cce_core + gRPC::grpc++ + boost_program_options + "-Wl,--no-whole-archive" + gRPC::gpr + gRPC::grpc + gRPC::grpc++_alts + absl::any + absl::log + absl::base + absl::bits + crypto + ssl + ${c-ares_LIBS} + z + ryml::ryml + stdc++fs + dl) -# centenginestats target. -add_executable("centenginestats" "${SRC_DIR}/centenginestats.cc") -add_dependencies("centenginestats" centreon_clib) -target_link_libraries("centenginestats" centreon_clib fmt::fmt) -target_precompile_headers(centenginestats PRIVATE ${PRECOMP_HEADER}) +install( + TARGETS centengine centenginestats + DESTINATION "${CMAKE_INSTALL_FULL_SBINDIR}" + COMPONENT "runtime") +else() + add_library(cce_core ${LIBRARY_TYPE} ${FILES}) + add_dependencies(cce_core engine_rpc centreon_clib pb_neb_lib) + + target_precompile_headers(cce_core PRIVATE ${PRECOMP_HEADER}) + + # Link target with required libraries. + target_link_libraries( + cce_core + ${MATH_LIBRARIES} + ${PTHREAD_LIBRARIES} + ${SOCKET_LIBRARIES} + centreon_clib + engine_conf + fmt::fmt + spdlog::spdlog) + + # centengine target. + + add_executable(centengine "${SRC_DIR}/main.cc") + set_property(TARGET centengine PROPERTY ENABLE_EXPORTS "1") + add_dependencies(centengine centreon_clib) + + # Link centengine with required libraries. + target_link_libraries( + centengine + -L${PROTOBUF_LIB_DIR} + "-rdynamic" + centreon_clib + log_v2 + "-Wl,-whole-archive" + enginerpc + centreon_grpc + centreon_http + centreon_common + -L${Boost_LIBRARY_DIR_RELEASE} + boost_url + cce_core + gRPC::grpc++ + boost_program_options + protobuf + "-Wl,--no-whole-archive" + gRPC::gpr + gRPC::grpc + gRPC::grpc++_alts + absl::any + absl::log + absl::base + absl::bits + crypto + ssl + ${c-ares_LIBS} + z + ryml::ryml + stdc++fs + dl) + +install( + TARGETS centengine centenginestats + DESTINATION "${CMAKE_INSTALL_FULL_SBINDIR}" + COMPONENT "runtime") +endif() # Unit tests. add_subdirectory(tests) @@ -556,10 +624,6 @@ add_subdirectory(tests) # # Install rules. -install( - TARGETS "centengine" "centenginestats" - DESTINATION "${CMAKE_INSTALL_FULL_SBINDIR}" - COMPONENT "runtime") # Create directories. if(CREATE_FILES) diff --git a/engine/enginerpc/CMakeLists.txt b/engine/enginerpc/CMakeLists.txt index 80534a5d33e..c5fb6725ba8 100644 --- a/engine/enginerpc/CMakeLists.txt +++ b/engine/enginerpc/CMakeLists.txt @@ -37,9 +37,9 @@ add_custom_command( OUTPUT ${ENGINERPC_DIR}/engine.grpc.pb.cc ${ENGINERPC_DIR}/engine.grpc.pb.h COMMAND ${Protobuf_PROTOC_EXECUTABLE} ARGS - --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} - --proto_path=${ENGINERPC_DIR} --proto_path=${CMAKE_SOURCE_DIR}/common/src - --grpc_out="${ENGINERPC_DIR}" ${ENGINERPC_DIR}/engine.proto + --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} --proto_path=${ENGINERPC_DIR} + --proto_path=${CMAKE_SOURCE_DIR}/common/src --grpc_out="${ENGINERPC_DIR}" + ${ENGINERPC_DIR}/engine.proto DEPENDS ${ENGINERPC_DIR}/engine.proto COMMENT "Generating interface files of the proto file (protobuf)" OUTPUT ${ENGINERPC_DIR}/engine.pb.cc ${ENGINERPC_DIR}/engine.pb.h @@ -47,7 +47,7 @@ add_custom_command( ${Protobuf_PROTOC_EXECUTABLE} ARGS --cpp_out="${ENGINERPC_DIR}" --proto_path=${ENGINERPC_DIR} --proto_path=${CMAKE_SOURCE_DIR}/common/src ${ENGINERPC_DIR}/engine.proto - WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_custom_target( engine_rpc @@ -59,22 +59,43 @@ add_library( ${ENGINERPC_DIR}/engine.grpc.pb.h ${ENGINERPC_DIR}/engine.pb.h) target_link_libraries(cerpc protobuf) -# mod_enginerpc target. -add_library( - ${ENGINERPC} STATIC - # Sources. - engine_impl.cc enginerpc.cc - # Headers. - "${INC_DIR}/engine_impl.hh" "${INC_DIR}/enginerpc.hh") +if(LEGACY_ENGINE) + # mod_enginerpc target. + add_library( + enginerpc_legacy STATIC + # Sources. + engine_impl.cc enginerpc.cc + # Headers. + "${INC_DIR}/engine_impl.hh" "${INC_DIR}/enginerpc.hh") + + add_dependencies(enginerpc_legacy centreon_common) + target_precompile_headers(enginerpc_legacy PRIVATE precomp_inc/precomp.hh) + + # Prettier name. + set_property( + TARGET enginerpc_legacy + PROPERTY PREFIX "" + PROPERTY POSITION_INDEPENDENT_CODE ON) + # Link target with libraries. + target_link_libraries(enginerpc_legacy cerpc centreon_common spdlog::spdlog) +else() + # mod_enginerpc target. + add_library( + ${ENGINERPC} STATIC + # Sources. + engine_impl.cc enginerpc.cc + # Headers. + "${INC_DIR}/engine_impl.hh" "${INC_DIR}/enginerpc.hh") -add_dependencies(${ENGINERPC} centreon_common) + add_dependencies(${ENGINERPC} centreon_common) -target_precompile_headers(${ENGINERPC} PRIVATE precomp_inc/precomp.hh) + target_precompile_headers(${ENGINERPC} PRIVATE precomp_inc/precomp.hh) -# Prettier name. -set_property( - TARGET ${ENGINERPC} - PROPERTY PREFIX "" - PROPERTY POSITION_INDEPENDENT_CODE ON) -# Link target with libraries. -target_link_libraries(${ENGINERPC} cerpc centreon_common spdlog::spdlog) + # Prettier name. + set_property( + TARGET ${ENGINERPC} + PROPERTY PREFIX "" + PROPERTY POSITION_INDEPENDENT_CODE ON) + # Link target with libraries. + target_link_libraries(${ENGINERPC} cerpc centreon_common spdlog::spdlog) +endif() diff --git a/engine/enginerpc/engine_impl.cc b/engine/enginerpc/engine_impl.cc index 8c5e5b89b20..a96819137d6 100644 --- a/engine/enginerpc/engine_impl.cc +++ b/engine/enginerpc/engine_impl.cc @@ -493,8 +493,8 @@ grpc::Status engine_impl::GetHostGroupsCount( * @return Status::OK */ grpc::Status engine_impl::GetServiceDependenciesCount( - grpc::ServerContext* context __attribute__((unused)), - const ::google::protobuf::Empty* request __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], + const ::google::protobuf::Empty* request [[maybe_unused]], GenericValue* response) { auto fn = std::packaged_task([]() -> int32_t { return servicedependency::servicedependencies.size(); @@ -517,8 +517,8 @@ grpc::Status engine_impl::GetServiceDependenciesCount( * @return Status::OK */ grpc::Status engine_impl::GetHostDependenciesCount( - grpc::ServerContext* context __attribute__((unused)), - const ::google::protobuf::Empty* request __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], + const ::google::protobuf::Empty* request [[maybe_unused]], GenericValue* response) { auto fn = std::packaged_task( []() -> int32_t { return hostdependency::hostdependencies.size(); }); @@ -545,10 +545,10 @@ grpc::Status engine_impl::GetHostDependenciesCount( * @return Status::OK */ grpc::Status engine_impl::AddHostComment(grpc::ServerContext* context - __attribute__((unused)), + [[maybe_unused]], const EngineComment* request, CommandSuccess* response - __attribute__((unused))) { + [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_host; @@ -595,10 +595,10 @@ grpc::Status engine_impl::AddHostComment(grpc::ServerContext* context * @return Status::OK */ grpc::Status engine_impl::AddServiceComment(grpc::ServerContext* context - __attribute__((unused)), + [[maybe_unused]], const EngineComment* request, CommandSuccess* response - __attribute__((unused))) { + [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_host; @@ -654,10 +654,10 @@ grpc::Status engine_impl::AddServiceComment(grpc::ServerContext* context * @return Status::OK */ grpc::Status engine_impl::DeleteComment(grpc::ServerContext* context - __attribute__((unused)), + [[maybe_unused]], const GenericValue* request, CommandSuccess* response - __attribute__((unused))) { + [[maybe_unused]]) { uint32_t comment_id = request->value(); std::string err; if (comment_id == 0) @@ -692,10 +692,10 @@ grpc::Status engine_impl::DeleteComment(grpc::ServerContext* context * @return Status::OK */ grpc::Status engine_impl::DeleteAllHostComments(grpc::ServerContext* context - __attribute__((unused)), + [[maybe_unused]], const HostIdentifier* request, CommandSuccess* response - __attribute__((unused))) { + [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_host; @@ -727,9 +727,9 @@ grpc::Status engine_impl::DeleteAllHostComments(grpc::ServerContext* context * @return Status::OK */ grpc::Status engine_impl::DeleteAllServiceComments( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ServiceIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_service; @@ -761,9 +761,9 @@ grpc::Status engine_impl::DeleteAllServiceComments( * @return Status::OK */ grpc::Status engine_impl::RemoveHostAcknowledgement( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const HostIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_host; @@ -801,9 +801,9 @@ grpc::Status engine_impl::RemoveHostAcknowledgement( * @return Status::OK */ grpc::Status engine_impl::RemoveServiceAcknowledgement( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ServiceIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_service; @@ -831,9 +831,9 @@ grpc::Status engine_impl::RemoveServiceAcknowledgement( } grpc::Status engine_impl::AcknowledgementHostProblem( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const EngineAcknowledgement* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_host; @@ -892,9 +892,9 @@ grpc::Status engine_impl::AcknowledgementHostProblem( } grpc::Status engine_impl::AcknowledgementServiceProblem( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const EngineAcknowledgement* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_service; @@ -975,7 +975,7 @@ grpc::Status engine_impl::AcknowledgementServiceProblem( * @return Status::OK */ grpc::Status engine_impl::ScheduleHostDowntime( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ScheduleDowntimeIdentifier* request, CommandSuccess* response) { if (request->host_name().empty() || request->author().empty() || @@ -1047,9 +1047,9 @@ grpc::Status engine_impl::ScheduleHostDowntime( * @return Status::OK */ grpc::Status engine_impl::ScheduleServiceDowntime( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ScheduleDowntimeIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->host_name().empty() || request->service_desc().empty() || request->author().empty() || request->comment_data().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, @@ -1121,9 +1121,9 @@ grpc::Status engine_impl::ScheduleServiceDowntime( * @return Status::OK */ grpc::Status engine_impl::ScheduleHostServicesDowntime( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ScheduleDowntimeIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->host_name().empty() || request->author().empty() || request->comment_data().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, @@ -1195,9 +1195,9 @@ grpc::Status engine_impl::ScheduleHostServicesDowntime( * @return Status::OK */ grpc::Status engine_impl::ScheduleHostGroupHostsDowntime( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ScheduleDowntimeIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->host_group_name().empty() || request->author().empty() || request->comment_data().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, @@ -1268,9 +1268,9 @@ grpc::Status engine_impl::ScheduleHostGroupHostsDowntime( * @return Status::OK */ grpc::Status engine_impl::ScheduleHostGroupServicesDowntime( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ScheduleDowntimeIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->host_group_name().empty() || request->author().empty() || request->comment_data().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, @@ -1351,9 +1351,9 @@ grpc::Status engine_impl::ScheduleHostGroupServicesDowntime( * @return Status::OK */ grpc::Status engine_impl::ScheduleServiceGroupHostsDowntime( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ScheduleDowntimeIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->service_group_name().empty() || request->author().empty() || request->comment_data().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, @@ -1431,9 +1431,9 @@ grpc::Status engine_impl::ScheduleServiceGroupHostsDowntime( * @return Status::OK */ grpc::Status engine_impl::ScheduleServiceGroupServicesDowntime( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ScheduleDowntimeIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->service_group_name().empty() || request->author().empty() || request->comment_data().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, @@ -1501,9 +1501,9 @@ grpc::Status engine_impl::ScheduleServiceGroupServicesDowntime( * @return Status::OK */ grpc::Status engine_impl::ScheduleAndPropagateHostDowntime( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ScheduleDowntimeIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->host_name().empty() || request->author().empty() || request->comment_data().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, @@ -1575,9 +1575,9 @@ grpc::Status engine_impl::ScheduleAndPropagateHostDowntime( */ grpc::Status engine_impl::ScheduleAndPropagateTriggeredHostDowntime( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ScheduleDowntimeIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->host_name().empty() || request->author().empty() || request->comment_data().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, @@ -1637,10 +1637,10 @@ grpc::Status engine_impl::ScheduleAndPropagateTriggeredHostDowntime( * @return Status::OK */ grpc::Status engine_impl::DeleteDowntime(grpc::ServerContext* context - __attribute__((unused)), + [[maybe_unused]], const GenericValue* request, CommandSuccess* response - __attribute__((unused))) { + [[maybe_unused]]) { uint32_t downtime_id = request->value(); std::string err; auto fn = @@ -1677,11 +1677,11 @@ grpc::Status engine_impl::DeleteDowntime(grpc::ServerContext* context * @return Status::OK */ grpc::Status engine_impl::DeleteHostDowntimeFull( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const DowntimeCriterias* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; - auto fn = std::packaged_task([&err, request]() -> int32_t { + auto fn = std::packaged_task([request]() -> int32_t { std::list> dtlist; for (auto it = downtimes::downtime_manager::instance() .get_scheduled_downtimes() @@ -1744,9 +1744,9 @@ grpc::Status engine_impl::DeleteHostDowntimeFull( * @return Status::OK */ grpc::Status engine_impl::DeleteServiceDowntimeFull( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const DowntimeCriterias* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::list dtlist; @@ -1817,9 +1817,9 @@ grpc::Status engine_impl::DeleteServiceDowntimeFull( * @return Status::OK */ grpc::Status engine_impl::DeleteDowntimeByHostName( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const DowntimeHostIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { /*hostname must be defined to delete the downtime but not others arguments*/ std::string const& host_name = request->host_name(); if (host_name.empty()) @@ -1874,9 +1874,9 @@ grpc::Status engine_impl::DeleteDowntimeByHostName( * @return Status::OK */ grpc::Status engine_impl::DeleteDowntimeByHostGroupName( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const DowntimeHostGroupIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string const& host_group_name = request->host_group_name(); if (host_group_name.empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, @@ -1950,9 +1950,9 @@ grpc::Status engine_impl::DeleteDowntimeByHostGroupName( * @return Status::OK */ grpc::Status engine_impl::DeleteDowntimeByStartTimeComment( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const DowntimeStartTimeIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { time_t start_time; /*hostname must be defined to delete the downtime but not others arguments*/ if (!(request->has_start())) @@ -1999,10 +1999,10 @@ grpc::Status engine_impl::DeleteDowntimeByStartTimeComment( * @return Status::OK */ grpc::Status engine_impl::ScheduleHostCheck(grpc::ServerContext* context - __attribute__((unused)), + [[maybe_unused]], const HostCheckIdentifier* request, CommandSuccess* response - __attribute__((unused))) { + [[maybe_unused]]) { if (request->host_name().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, "host_name must not be empty"); @@ -2046,9 +2046,9 @@ grpc::Status engine_impl::ScheduleHostCheck(grpc::ServerContext* context * @return Status::OK */ grpc::Status engine_impl::ScheduleHostServiceCheck( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const HostCheckIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->host_name().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, "host_name must not be empty"); @@ -2100,9 +2100,9 @@ grpc::Status engine_impl::ScheduleHostServiceCheck( * @return Status::OK */ grpc::Status engine_impl::ScheduleServiceCheck( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ServiceCheckIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->host_name().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, "host_name must not be empty"); @@ -2152,10 +2152,10 @@ grpc::Status engine_impl::ScheduleServiceCheck( * @return Status::OK */ grpc::Status engine_impl::SignalProcess(grpc::ServerContext* context - __attribute__((unused)), + [[maybe_unused]], const EngineSignalProcess* request, CommandSuccess* response - __attribute__((unused))) { + [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::unique_ptr evt; @@ -2198,9 +2198,9 @@ grpc::Status engine_impl::SignalProcess(grpc::ServerContext* context * @return Status::OK */ grpc::Status engine_impl::DelayHostNotification( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const HostDelayIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_host; @@ -2255,9 +2255,9 @@ grpc::Status engine_impl::DelayHostNotification( * @return Status::OK */ grpc::Status engine_impl::DelayServiceNotification( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ServiceDelayIdentifier* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_service; @@ -2308,10 +2308,10 @@ grpc::Status engine_impl::DelayServiceNotification( } grpc::Status engine_impl::ChangeHostObjectIntVar(grpc::ServerContext* context - __attribute__((unused)), + [[maybe_unused]], const ChangeObjectInt* request, CommandSuccess* response - __attribute__((unused))) { + [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_host; @@ -2419,9 +2419,9 @@ grpc::Status engine_impl::ChangeHostObjectIntVar(grpc::ServerContext* context } grpc::Status engine_impl::ChangeServiceObjectIntVar( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ChangeObjectInt* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_service; @@ -2535,9 +2535,9 @@ grpc::Status engine_impl::ChangeServiceObjectIntVar( } grpc::Status engine_impl::ChangeContactObjectIntVar( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ChangeContactObjectInt* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_contact; @@ -2595,9 +2595,9 @@ grpc::Status engine_impl::ChangeContactObjectIntVar( } grpc::Status engine_impl::ChangeHostObjectCharVar( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ChangeObjectChar* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_host; @@ -2646,7 +2646,11 @@ grpc::Status engine_impl::ChangeHostObjectCharVar( /* update the variable */ switch (request->mode()) { case ChangeObjectChar_Mode_CHANGE_GLOBAL_EVENT_HANDLER: +#ifdef LEGACY_CONF config->global_host_event_handler(request->charval()); +#else + pb_config.set_global_host_event_handler(request->charval()); +#endif global_host_event_handler_ptr = cmd_found->second.get(); attr = MODATTR_EVENT_HANDLER_COMMAND; /* set the modified host attribute */ @@ -2711,9 +2715,9 @@ grpc::Status engine_impl::ChangeHostObjectCharVar( } grpc::Status engine_impl::ChangeServiceObjectCharVar( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ChangeObjectChar* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { std::string err; auto fn = std::packaged_task([&err, request]() -> int32_t { std::shared_ptr temp_service; @@ -2764,7 +2768,11 @@ grpc::Status engine_impl::ChangeServiceObjectCharVar( /* update the variable */ if (request->mode() == ChangeObjectChar_Mode_CHANGE_GLOBAL_EVENT_HANDLER) { +#ifdef LEGACY_CONF config->global_service_event_handler(request->charval()); +#else + pb_config.set_global_service_event_handler(request->charval()); +#endif global_service_event_handler_ptr = cmd_found->second.get(); attr = MODATTR_EVENT_HANDLER_COMMAND; } else if (request->mode() == ChangeObjectChar_Mode_CHANGE_EVENT_HANDLER) { @@ -2824,9 +2832,9 @@ grpc::Status engine_impl::ChangeServiceObjectCharVar( } grpc::Status engine_impl::ChangeContactObjectCharVar( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ChangeContactObjectChar* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->contact().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, "contact must not be empty"); @@ -2899,9 +2907,9 @@ grpc::Status engine_impl::ChangeContactObjectCharVar( } grpc::Status engine_impl::ChangeHostObjectCustomVar( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ChangeObjectCustomVar* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->host_name().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, "host_name must not be empty"); @@ -2940,9 +2948,9 @@ grpc::Status engine_impl::ChangeHostObjectCustomVar( } grpc::Status engine_impl::ChangeServiceObjectCustomVar( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ChangeObjectCustomVar* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->host_name().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, "host_name must not be empty"); @@ -2985,9 +2993,9 @@ grpc::Status engine_impl::ChangeServiceObjectCustomVar( } grpc::Status engine_impl::ChangeContactObjectCustomVar( - grpc::ServerContext* context __attribute__((unused)), + grpc::ServerContext* context [[maybe_unused]], const ChangeObjectCustomVar* request, - CommandSuccess* response __attribute__((unused))) { + CommandSuccess* response [[maybe_unused]]) { if (request->contact().empty()) return grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, "contact must not be empty"); @@ -3034,9 +3042,9 @@ grpc::Status engine_impl::ChangeContactObjectCustomVar( * @return Status::OK */ grpc::Status engine_impl::ShutdownProgram( - grpc::ServerContext* context __attribute__((unused)), - const ::google::protobuf::Empty* request __attribute__((unused)), - ::google::protobuf::Empty* response __attribute__((unused))) { + grpc::ServerContext* context [[maybe_unused]], + const ::google::protobuf::Empty* request [[maybe_unused]], + ::google::protobuf::Empty* response [[maybe_unused]]) { auto fn = std::packaged_task([]() -> int32_t { exit(0); return 0; @@ -3070,9 +3078,9 @@ grpc::Status engine_impl::ShutdownProgram( } ::grpc::Status engine_impl::EnableHostAndChildNotifications( - ::grpc::ServerContext* context, + ::grpc::ServerContext* context [[maybe_unused]], const ::com::centreon::engine::HostIdentifier* request, - ::com::centreon::engine::CommandSuccess* response) { + ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { HOST_METHOD_BEGIN commands::processing::wrapper_enable_host_and_child_notifications( host_info.first.get()); @@ -3080,9 +3088,9 @@ ::grpc::Status engine_impl::EnableHostAndChildNotifications( } ::grpc::Status engine_impl::DisableHostAndChildNotifications( - ::grpc::ServerContext* context, + ::grpc::ServerContext* context [[maybe_unused]], const ::com::centreon::engine::HostIdentifier* request, - ::com::centreon::engine::CommandSuccess* response) { + ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { HOST_METHOD_BEGIN commands::processing::wrapper_disable_host_and_child_notifications( host_info.first.get()); @@ -3090,61 +3098,61 @@ ::grpc::Status engine_impl::DisableHostAndChildNotifications( } ::grpc::Status engine_impl::DisableHostNotifications( - ::grpc::ServerContext* context, + ::grpc::ServerContext* context [[maybe_unused]], const ::com::centreon::engine::HostIdentifier* request, - ::com::centreon::engine::CommandSuccess* response) { + ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { HOST_METHOD_BEGIN disable_host_notifications(host_info.first.get()); return grpc::Status::OK; } ::grpc::Status engine_impl::EnableHostNotifications( - ::grpc::ServerContext* context, + ::grpc::ServerContext* context [[maybe_unused]], const ::com::centreon::engine::HostIdentifier* request, - ::com::centreon::engine::CommandSuccess* response) { + ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { HOST_METHOD_BEGIN enable_host_notifications(host_info.first.get()); return grpc::Status::OK; } ::grpc::Status engine_impl::DisableNotifications( - ::grpc::ServerContext* context, + ::grpc::ServerContext* context [[maybe_unused]], const ::google::protobuf::Empty*, - ::com::centreon::engine::CommandSuccess* response) { + ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { disable_all_notifications(); return grpc::Status::OK; } ::grpc::Status engine_impl::EnableNotifications( - ::grpc::ServerContext* context, + ::grpc::ServerContext* context [[maybe_unused]], const ::google::protobuf::Empty*, - ::com::centreon::engine::CommandSuccess* response) { + ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { enable_all_notifications(); return grpc::Status::OK; } ::grpc::Status engine_impl::DisableServiceNotifications( - ::grpc::ServerContext* context, + ::grpc::ServerContext* context [[maybe_unused]], const ::com::centreon::engine::ServiceIdentifier* request, - ::com::centreon::engine::CommandSuccess* response) { + ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { SERV_METHOD_BEGIN disable_service_notifications(serv_info.first.get()); return grpc::Status::OK; } ::grpc::Status engine_impl::EnableServiceNotifications( - ::grpc::ServerContext* context, + ::grpc::ServerContext* context [[maybe_unused]], const ::com::centreon::engine::ServiceIdentifier* request, - ::com::centreon::engine::CommandSuccess* response) { + ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { SERV_METHOD_BEGIN enable_service_notifications(serv_info.first.get()); return grpc::Status::OK; } ::grpc::Status engine_impl::ChangeAnomalyDetectionSensitivity( - ::grpc::ServerContext* context, + ::grpc::ServerContext* context [[maybe_unused]], const ::com::centreon::engine::ChangeServiceNumber* serv_and_value, - ::com::centreon::engine::CommandSuccess* response) { + ::com::centreon::engine::CommandSuccess* response [[maybe_unused]]) { SPDLOG_LOGGER_DEBUG(external_command_logger, "{}({})", __FUNCTION__, serv_and_value->serv()); auto serv_info = get_serv(serv_and_value->serv()); @@ -3352,9 +3360,9 @@ grpc::Status engine_impl::GetProcessStats( * @return grpc::Status */ grpc::Status engine_impl::SendBench( - grpc::ServerContext* context, + grpc::ServerContext* context [[maybe_unused]], const com::centreon::engine::BenchParam* request, - google::protobuf::Empty* response) { + google::protobuf::Empty* response [[maybe_unused]]) { std::chrono::system_clock::time_point client_ts = std::chrono::system_clock::time_point::min(); diff --git a/engine/inc/com/centreon/engine/broker.hh b/engine/inc/com/centreon/engine/broker.hh index d958522d091..843c9589477 100644 --- a/engine/inc/com/centreon/engine/broker.hh +++ b/engine/inc/com/centreon/engine/broker.hh @@ -425,8 +425,8 @@ int broker_contact_notification_method_data( void broker_contact_status(int type, com::centreon::engine::contact* cntct); void broker_custom_variable(int type, void* data, - char const* varname, - char const* varvalue, + std::string_view&& varname, + std::string_view&& varvalue, struct timeval const* timestamp); void broker_downtime_data(int type, int attr, diff --git a/engine/inc/com/centreon/engine/check_result.hh b/engine/inc/com/centreon/engine/check_result.hh index 9d02da8ef3b..214fd08a82c 100644 --- a/engine/inc/com/centreon/engine/check_result.hh +++ b/engine/inc/com/centreon/engine/check_result.hh @@ -51,8 +51,6 @@ class check_result { return _object_check_type; } void set_object_check_type(enum check_source object_check_type); - uint64_t get_command_id() const { return _command_id; } - void set_command_id(uint64_t command_id) { _command_id = command_id; } inline notifier* get_notifier() { return _notifier; } void set_notifier(notifier* notifier); @@ -81,7 +79,6 @@ class check_result { private: enum check_source _object_check_type; // is this a service or a host check? - uint64_t _command_id; notifier* _notifier; // was this an active or passive service check? enum checkable::check_type _check_type; diff --git a/engine/inc/com/centreon/engine/checks/checker.hh b/engine/inc/com/centreon/engine/checks/checker.hh index 964a272f9cd..5419a86a248 100644 --- a/engine/inc/com/centreon/engine/checks/checker.hh +++ b/engine/inc/com/centreon/engine/checks/checker.hh @@ -23,9 +23,8 @@ #include "com/centreon/engine/anomalydetection.hh" #include "com/centreon/engine/commands/command.hh" -namespace com::centreon::engine { +namespace com::centreon::engine::checks { -namespace checks { /** * @class checks check_result.hh * @brief Run object and reap the result. @@ -57,6 +56,9 @@ class checker : public commands::command_listener { void wait_completion(e_completion_filter filter = e_completion_filter::all); + template + void inspect_reap_partial(queue_handler&& handler) const; + private: checker(bool used_by_test); checker(checker const& right); @@ -66,7 +68,7 @@ class checker : public commands::command_listener { host::host_state _execute_sync(host* hst); /* A mutex to protect access on _waiting_check_result and _to_reap_partial */ - std::mutex _mut_reap; + mutable std::mutex _mut_reap; /* * Here is the list of prepared check results but with a command being * running. When the command will be finished, each check result is get back @@ -92,8 +94,19 @@ class checker : public commands::command_listener { std::condition_variable _finish_cond; bool _finished; }; -} // namespace checks +/** + * @brief allow to inspect _to_reap_partial + * + * @tparam queue_handler + * @param handler must have () (const std::deque &) + */ +template +void checker::inspect_reap_partial(queue_handler&& handler) const { + std::lock_guard lock(_mut_reap); + handler(_to_reap_partial); } +} // namespace com::centreon::engine::checks + #endif // !CCE_CHECKS_CHECKER_HH diff --git a/engine/inc/com/centreon/engine/commands/otel_connector.hh b/engine/inc/com/centreon/engine/commands/otel_connector.hh index 7a21e4c4589..bb0925efe7d 100644 --- a/engine/inc/com/centreon/engine/commands/otel_connector.hh +++ b/engine/inc/com/centreon/engine/commands/otel_connector.hh @@ -31,8 +31,7 @@ namespace com::centreon::engine::commands { * open telemetry request run command line configure converter who converts * data_points to result */ -class otel_connector : public command, - public std::enable_shared_from_this { +class otel_connector : public command { otel::host_serv_list::pointer _host_serv_list; public: @@ -43,16 +42,17 @@ class otel_connector : public command, static otel_connector_container _commands; std::shared_ptr _extractor; - std::shared_ptr _conv_conf; + std::shared_ptr _check_result_builder; std::shared_ptr _logger; void init(); public: - static void create(const std::string& connector_name, - const std::string& cmd_line, - commands::command_listener* listener); + static std::shared_ptr create( + const std::string& connector_name, + const std::string& cmd_line, + commands::command_listener* listener); static bool remove(const std::string& connector_name); @@ -62,6 +62,10 @@ class otel_connector : public command, static std::shared_ptr get_otel_connector( const std::string& connector_name); + static std::shared_ptr get_otel_connector_from_host_serv( + const std::string_view& host, + const std::string_view& serv); + static void clear(); static void init_all(); @@ -76,6 +80,11 @@ class otel_connector : public command, void update(const std::string& cmd_line); + void process_data_pts( + const std::string_view& host, + const std::string_view& serv, + const modules::opentelemetry::metric_to_datapoints& data_pts); + virtual uint64_t run(const std::string& processed_cmd, nagios_macros& macros, uint32_t timeout, diff --git a/engine/inc/com/centreon/engine/commands/otel_interface.hh b/engine/inc/com/centreon/engine/commands/otel_interface.hh index 7c26706c86e..0e6a6b18704 100644 --- a/engine/inc/com/centreon/engine/commands/otel_interface.hh +++ b/engine/inc/com/centreon/engine/commands/otel_interface.hh @@ -22,6 +22,10 @@ #include "com/centreon/engine/commands/result.hh" #include "com/centreon/engine/macros/defines.hh" +namespace com::centreon::engine::modules::opentelemetry { +class metric_to_datapoints; +} + namespace com::centreon::engine::commands::otel { /** @@ -66,14 +70,34 @@ class host_serv_list { const std::string& service_description); void remove(const std::string& host, const std::string& service_description); - bool contains(const std::string& host, - const std::string& service_description) const; + template + bool contains(const string_type& host, + const string_type& service_description) const; template host_serv_metric match(const host_set& hosts, const service_set& services) const; }; +/** + * @brief test if a host serv pair is contained in list + * + * @param host + * @param service_description + * @return true found + * @return false not found + */ +template +bool host_serv_list::contains(const string_type& host, + const string_type& service_description) const { + absl::ReaderMutexLock l(&_data_m); + auto host_search = _data.find(host); + if (host_search != _data.end()) { + return host_search->second.contains(service_description); + } + return false; +} + template host_serv_metric host_serv_list::match(const host_set& hosts, const service_set& services) const { @@ -111,13 +135,15 @@ class host_serv_extractor { virtual ~host_serv_extractor() = default; }; -class check_result_builder_config { +class otl_check_result_builder_base { public: - virtual ~check_result_builder_config() = default; + virtual ~otl_check_result_builder_base() = default; + virtual void process_data_pts( + const std::string_view& host, + const std::string_view& serv, + const modules::opentelemetry::metric_to_datapoints& data_pts) = 0; }; -using result_callback = std::function; - class open_telemetry_base; /** @@ -139,17 +165,8 @@ class open_telemetry_base const std::string& cmdline, const host_serv_list::pointer& host_serv_list) = 0; - virtual std::shared_ptr - create_check_result_builder_config(const std::string& cmd_line) = 0; - - virtual bool check( - const std::string& processed_cmd, - const std::shared_ptr& conv_conf, - uint64_t command_id, - nagios_macros& macros, - uint32_t timeout, - commands::result& res, - result_callback&& handler) = 0; + virtual std::shared_ptr + create_check_result_builder(const std::string& cmdline) = 0; }; }; // namespace com::centreon::engine::commands::otel diff --git a/engine/inc/com/centreon/engine/configuration/applier/anomalydetection.hh b/engine/inc/com/centreon/engine/configuration/applier/anomalydetection.hh index 7e8df53d199..e7d9ea80a2e 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/anomalydetection.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/anomalydetection.hh @@ -20,9 +20,13 @@ #define CCE_CONFIGURATION_APPLIER_ANOMALYDETECTION_HH #include "com/centreon/engine/configuration/applier/state.hh" -namespace com::centreon::engine { +#ifndef LEGACY_CONF +#include "common/engine_conf/anomalydetection_helper.hh" +#include "common/engine_conf/state.pb.h" +#endif + +namespace com::centreon::engine::configuration { -namespace configuration { // Forward declarations. class anomalydetection; class state; @@ -39,16 +43,24 @@ class anomalydetection { anomalydetection(const anomalydetection&) = delete; ~anomalydetection() noexcept = default; anomalydetection& operator=(const anomalydetection&) = delete; +#ifdef LEGACY_CONF void add_object(configuration::anomalydetection const& obj); void modify_object(configuration::anomalydetection const& obj); void remove_object(configuration::anomalydetection const& obj); void expand_objects(configuration::state& s); void resolve_object(configuration::anomalydetection const& obj, error_cnt& err); +#else + void add_object(const configuration::Anomalydetection& obj); + void modify_object(configuration::Anomalydetection* old_obj, + const configuration::Anomalydetection& new_obj); + void remove_object(ssize_t idx); + void expand_objects(configuration::State& s); + void resolve_object(const configuration::Anomalydetection& obj, + error_cnt& err); +#endif }; } // namespace applier -} // namespace configuration - -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_ANOMALYDETECTION_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/command.hh b/engine/inc/com/centreon/engine/configuration/applier/command.hh index e55cfdde7bf..056a673a7c3 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/command.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/command.hh @@ -20,6 +20,10 @@ #define CCE_CONFIGURATION_APPLIER_COMMAND_HH #include "com/centreon/engine/configuration/applier/state.hh" +#ifndef LEGACY_CONF +#include "common/engine_conf/command_helper.hh" +#endif + namespace com::centreon::engine { // Forward declarations. @@ -35,18 +39,28 @@ class state; namespace applier { class command { public: - command(); + command() = default; command(command const&) = delete; command& operator=(command const&) = delete; - ~command() noexcept; + ~command() noexcept = default; +#ifdef LEGACY_CONF void add_object(configuration::command const& obj); void expand_objects(configuration::state& s); void modify_object(configuration::command const& obj); void remove_object(configuration::command const& obj); void resolve_object(configuration::command const& obj, error_cnt& err); +#else + void add_object(const configuration::Command& obj); + void expand_objects(configuration::State& s); + void modify_object(configuration::Command* to_modify, + const configuration::Command& new_obj); + void remove_object(ssize_t idx); + void resolve_object(const configuration::Command& obj, + error_cnt& err); +#endif }; } // namespace applier } // namespace configuration diff --git a/engine/inc/com/centreon/engine/configuration/applier/connector.hh b/engine/inc/com/centreon/engine/configuration/applier/connector.hh index ad82485acaf..95d06ff7638 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/connector.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/connector.hh @@ -20,6 +20,10 @@ #define CCE_CONFIGURATION_APPLIER_CONNECTOR_HH #include "com/centreon/engine/configuration/applier/state.hh" +#ifndef LEGACY_CONF +#include "common/engine_conf/connector_helper.hh" +#endif + namespace com::centreon::engine { namespace configuration { @@ -40,11 +44,21 @@ class connector { ~connector() noexcept = default; connector(const connector&) = delete; connector& operator=(const connector&) = delete; +#ifdef LEGACY_CONF void add_object(configuration::connector const& obj); void modify_object(const configuration::connector& obj); void remove_object(configuration::connector const& obj); void expand_objects(configuration::state& s); void resolve_object(configuration::connector const& obj, error_cnt& err); +#else + void add_object(const configuration::Connector& obj); + void modify_object(configuration::Connector* to_modify, + const configuration::Connector& new_obj); + void remove_object(ssize_t idx); + void expand_objects(configuration::State& s); + void resolve_object(const configuration::Connector& obj, + error_cnt& err); +#endif }; } // namespace applier } // namespace configuration diff --git a/engine/inc/com/centreon/engine/configuration/applier/contact.hh b/engine/inc/com/centreon/engine/configuration/applier/contact.hh index 3ece9e9ac74..43d28250ecd 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/contact.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/contact.hh @@ -20,12 +20,16 @@ #define CCE_CONFIGURATION_APPLIER_CONTACT_HH #include "com/centreon/engine/configuration/applier/state.hh" -namespace com::centreon::engine { +#ifndef LEGACY_CONF +#include "common/engine_conf/contact_helper.hh" +#endif -namespace configuration { +namespace com::centreon::engine::configuration { // Forward declarations. +#ifdef LEGACY_CONF class contact; class state; +#endif namespace applier { class contact { @@ -42,15 +46,22 @@ class contact { contact(contact const&) = delete; contact& operator=(const contact&) = delete; +#ifdef LEGACY_CONF void add_object(const configuration::contact& obj); void modify_object(const configuration::contact& obj); void remove_object(const configuration::contact& obj); void expand_objects(configuration::state& s); void resolve_object(const configuration::contact& obj, error_cnt& err); +#else + void add_object(const configuration::Contact& obj); + void modify_object(configuration::Contact* to_modify, + const configuration::Contact& new_object); + void remove_object(ssize_t idx); + void expand_objects(configuration::State& s); + void resolve_object(const configuration::Contact& obj, error_cnt& err); +#endif }; } // namespace applier -} // namespace configuration - -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_CONTACT_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/contactgroup.hh b/engine/inc/com/centreon/engine/configuration/applier/contactgroup.hh index a17825bdb32..d264b9dd06e 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/contactgroup.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/contactgroup.hh @@ -20,16 +20,20 @@ #define CCE_CONFIGURATION_APPLIER_CONTACTGROUP_HH #include "com/centreon/engine/configuration/applier/state.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/contactgroup.hh" +#else +#include "common/engine_conf/contactgroup_helper.hh" +#endif -namespace com::centreon::engine { +namespace com::centreon::engine::configuration { -namespace configuration { // Forward declarations. class state; namespace applier { class contactgroup { +#ifdef LEGACY_CONF typedef std::map resolved_set; @@ -38,6 +42,11 @@ class contactgroup { void _resolve_members(configuration::state& s, configuration::contactgroup const& obj); +#else + void _resolve_members(configuration::State& s, + configuration::Contactgroup & obj, + absl::flat_hash_set& resolved); +#endif public: /** @@ -50,15 +59,23 @@ class contactgroup { ~contactgroup() noexcept = default; contactgroup(const contactgroup&) = delete; contactgroup& operator=(const contactgroup&) = delete; +#ifdef LEGACY_CONF void add_object(configuration::contactgroup const& obj); void modify_object(configuration::contactgroup const& obj); void remove_object(configuration::contactgroup const& obj); void expand_objects(configuration::state& s); void resolve_object(configuration::contactgroup const& obj, error_cnt& err); +#else + void add_object(const configuration::Contactgroup& obj); + void modify_object(configuration::Contactgroup* to_modify, + const configuration::Contactgroup& new_object); + void remove_object(ssize_t idx); + void expand_objects(configuration::State& s); + void resolve_object(const configuration::Contactgroup& obj, error_cnt& err); +#endif }; } // namespace applier -} // namespace configuration -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_CONTACTGROUP_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/globals.hh b/engine/inc/com/centreon/engine/configuration/applier/globals.hh index 1ee1d7eb431..6bcce7f005f 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/globals.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/globals.hh @@ -19,11 +19,14 @@ #ifndef CCE_CONFIGURATION_APPLIER_GLOBALS_HH #define CCE_CONFIGURATION_APPLIER_GLOBALS_HH +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/state.hh" +#else +#include "common/engine_conf/state_helper.hh" +#endif -namespace com::centreon::engine { +namespace com::centreon::engine::configuration { -namespace configuration { namespace applier { /** * @class globals globals.hh @@ -42,13 +45,16 @@ class globals { void _set_global(char*& property, std::string const& value); public: +#ifdef LEGACY_CONF void apply(configuration::state& globals); +#else + void apply(configuration::State& globals); +#endif static globals& instance(); void clear(); }; } // namespace applier -} // namespace configuration -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_GLOBALS_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/host.hh b/engine/inc/com/centreon/engine/configuration/applier/host.hh index 51f0f5d1667..542831e671d 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/host.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/host.hh @@ -20,29 +20,41 @@ #define CCE_CONFIGURATION_APPLIER_HOST_HH #include "com/centreon/engine/configuration/applier/state.hh" -namespace com::centreon::engine { +#ifndef LEGACY_CONF +#include "common/engine_conf/host_helper.hh" +#endif -namespace configuration { +namespace com::centreon::engine::configuration { + +#ifdef LEGACY_CONF // Forward declarations. class host; class state; +#endif namespace applier { class host { public: - host(); - host(host const& right) = delete; - ~host() throw(); - host& operator=(host const& right) = delete; - void add_object(configuration::host const& obj); + host() = default; + host(host const&) = delete; + ~host() noexcept = default; + host& operator=(host const&) = delete; +#ifdef LEGACY_CONF + void add_object(const configuration::host& obj); void expand_objects(configuration::state& s); void modify_object(configuration::host const& obj); void remove_object(configuration::host const& obj); void resolve_object(configuration::host const& obj, error_cnt& err); +#else + void add_object(const configuration::Host& obj); + void expand_objects(configuration::State& s); + void modify_object(configuration::Host* old_obj, + const configuration::Host& new_obj); + void remove_object(ssize_t idx); + void resolve_object(const configuration::Host& obj, error_cnt& err); +#endif }; } // namespace applier -} // namespace configuration - -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_HOST_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/hostdependency.hh b/engine/inc/com/centreon/engine/configuration/applier/hostdependency.hh index 5c4f0278a96..a2f8092096f 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/hostdependency.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/hostdependency.hh @@ -21,6 +21,10 @@ #include "com/centreon/engine/configuration/applier/state.hh" +#ifndef LEGACY_CONF +#include "common/engine_conf/hostdependency_helper.hh" +#endif + namespace com::centreon::engine { namespace configuration { @@ -30,21 +34,32 @@ class state; namespace applier { class hostdependency { +#ifdef LEGACY_CONF void _expand_hosts(std::set const& hosts, std::set const& hostgroups, configuration::state& s, std::set& expanded); +#endif public: hostdependency() = default; hostdependency(const hostdependency&) = delete; ~hostdependency() noexcept = default; hostdependency& operator=(const hostdependency&) = delete; +#ifdef LEGACY_CONF void add_object(configuration::hostdependency const& obj); void modify_object(configuration::hostdependency const& obj); void remove_object(configuration::hostdependency const& obj); void expand_objects(configuration::state& s); void resolve_object(configuration::hostdependency const& obj, error_cnt& err); +#else + void add_object(const configuration::Hostdependency& obj); + void modify_object(configuration::Hostdependency* to_modify, + const configuration::Hostdependency& new_obj); + void remove_object(ssize_t idx); + void expand_objects(configuration::State& s); + void resolve_object(const configuration::Hostdependency& obj, error_cnt& err); +#endif }; } // namespace applier } // namespace configuration diff --git a/engine/inc/com/centreon/engine/configuration/applier/hostescalation.hh b/engine/inc/com/centreon/engine/configuration/applier/hostescalation.hh index 36bc1f51f0f..e402c6140a8 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/hostescalation.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/hostescalation.hh @@ -19,6 +19,10 @@ #define CCE_CONFIGURATION_APPLIER_HOSTESCALATION_HH #include "com/centreon/engine/configuration/applier/state.hh" +#ifndef LEGACY_CONF +#include "common/engine_conf/hostescalation_helper.hh" +#endif + namespace com::centreon::engine { namespace configuration { @@ -40,11 +44,20 @@ class hostescalation { ~hostescalation() noexcept = default; hostescalation(hostescalation const&) = delete; hostescalation& operator=(hostescalation const&) = delete; +#ifdef LEGACY_CONF void add_object(const configuration::hostescalation& obj); void modify_object(configuration::hostescalation const& obj); void remove_object(configuration::hostescalation const& obj); void expand_objects(configuration::state& s); void resolve_object(configuration::hostescalation const& obj, error_cnt& err); +#else + void add_object(const configuration::Hostescalation& obj); + void modify_object(configuration::Hostescalation* old_obj, + const configuration::Hostescalation& new_obj); + void remove_object(ssize_t idx); + void expand_objects(configuration::State& s); + void resolve_object(const configuration::Hostescalation& obj, error_cnt& err); +#endif }; } // namespace applier } // namespace configuration diff --git a/engine/inc/com/centreon/engine/configuration/applier/hostgroup.hh b/engine/inc/com/centreon/engine/configuration/applier/hostgroup.hh index 75815f1bff6..b5a9307ad54 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/hostgroup.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/hostgroup.hh @@ -20,28 +20,41 @@ #define CCE_CONFIGURATION_APPLIER_HOSTGROUP_HH #include "com/centreon/engine/configuration/applier/state.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/hostgroup.hh" +#else +#include "common/engine_conf/hostgroup_helper.hh" +#endif -namespace com::centreon::engine { +namespace com::centreon::engine::configuration { -namespace configuration { // Forward declarations. class state; namespace applier { class hostgroup { public: - hostgroup(); - hostgroup(hostgroup const& right); - ~hostgroup() throw(); + hostgroup() = default; + hostgroup(hostgroup const&) = delete; + ~hostgroup() noexcept = default; hostgroup& operator=(hostgroup const& right) = delete; +#ifdef LEGACY_CONF void add_object(configuration::hostgroup const& obj); void expand_objects(configuration::state& s); void modify_object(configuration::hostgroup const& obj); void remove_object(configuration::hostgroup const& obj); void resolve_object(configuration::hostgroup const& obj, error_cnt& err); +#else + void add_object(const configuration::Hostgroup& obj); + void expand_objects(configuration::State& s); + void modify_object(configuration::Hostgroup* old_obj, + const configuration::Hostgroup& new_obj); + void remove_object(ssize_t idx); + void resolve_object(const configuration::Hostgroup& obj, error_cnt& err); +#endif private: +#ifdef LEGACY_CONF typedef std::map resolved_set; @@ -49,10 +62,9 @@ class hostgroup { configuration::hostgroup const& obj); resolved_set _resolved; +#endif }; } // namespace applier -} // namespace configuration - -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_HOSTGROUP_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/logging.hh b/engine/inc/com/centreon/engine/configuration/applier/logging.hh index ff0c90a2702..cf64a97e89b 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/logging.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/logging.hh @@ -22,7 +22,11 @@ #include "com/centreon/logging/file.hh" #include "com/centreon/logging/syslogger.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/state.hh" +#else +#include "common/engine_conf/state_helper.hh" +#endif namespace com::centreon::engine { @@ -36,21 +40,34 @@ namespace applier { */ class logging { public: +#ifdef LEGACY_CONF void apply(configuration::state& config); +#else + void apply(configuration::State& config); +#endif static logging& instance(); void clear(); private: logging(); +#ifdef LEGACY_CONF logging(configuration::state& config); +#else + logging(configuration::State& config); +#endif logging(logging const&); ~logging() throw(); logging& operator=(logging const&); void _add_stdout(); void _add_stderr(); void _add_syslog(); +#ifdef LEGACY_CONF void _add_log_file(configuration::state const& config); void _add_debug(configuration::state const& config); +#else + void _add_log_file(configuration::State const& config); + void _add_debug(configuration::State const& config); +#endif void _del_syslog(); void _del_log_file(); void _del_debug(); diff --git a/engine/inc/com/centreon/engine/configuration/applier/macros.hh b/engine/inc/com/centreon/engine/configuration/applier/macros.hh index fb8462b034f..a47919028d9 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/macros.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/macros.hh @@ -19,7 +19,11 @@ #ifndef CCE_CONFIGURATION_APPLIER_MACROS_HH #define CCE_CONFIGURATION_APPLIER_MACROS_HH +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/state.hh" +#else +#include "common/engine_conf/state_helper.hh" +#endif // Forward declaration. class nagios_macros; @@ -36,7 +40,11 @@ namespace applier { */ class macros { public: +#ifdef LEGACY_CONF void apply(configuration::state& config); +#else + void apply(configuration::State& config); +#endif static macros& instance(); void clear(); diff --git a/engine/inc/com/centreon/engine/configuration/applier/pb_difference.hh b/engine/inc/com/centreon/engine/configuration/applier/pb_difference.hh new file mode 100644 index 00000000000..457c9ed3e13 --- /dev/null +++ b/engine/inc/com/centreon/engine/configuration/applier/pb_difference.hh @@ -0,0 +1,153 @@ +/** + * Copyright 2023 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCE_CONFIGURATION_APPLIER_PB_DIFFERENCE_HH +#define CCE_CONFIGURATION_APPLIER_PB_DIFFERENCE_HH + +#include +#include +#include + +#include + +namespace com::centreon::engine { + +using MessageDifferencer = ::google::protobuf::util::MessageDifferencer; + +namespace configuration::applier { +/** + * @brief This class computes the difference between two "lists" of Protobuf + * configuration objects. They are not really lists but RepeatedPtrFields or + * similar things. + * + * When the class is instantiated, we can then call the parse() method to + * compare two lists, for example the older one and the new one. + * + * The result is composed of three attributes: + * * _added : objects that are in the new list and not in the old one. + * * _deleted: objects in the old list but not in the new one. + * * _modified: objects that changed from the old list to the new one. + * + * @tparam T The Protobuf type to compare with pb_difference, for example + * configuration::Host, configuration::Service, etc... + * @tparam Key The key type used to store these objects, for example an + * std::string, an integer, etc... + * @tparam Container The container type used to store the objects, by default a + * RepeatedPtrField. + */ +template > +class pb_difference { + // What are the new objects + std::vector _added; + // What index to delete + std::vector> _deleted; + // A vector of pairs, the pointer to the old one and the new one. + std::vector> _modified; + + public: + /** + * @brief Default constructor. + */ + pb_difference() = default; + + /** + * @brief Destructor. + */ + ~pb_difference() noexcept = default; + pb_difference(const pb_difference&) = delete; + pb_difference& operator=(const pb_difference&) = delete; + const std::vector& added() const noexcept { return _added; } + const std::vector>& deleted() const noexcept { + return _deleted; + } + const std::vector>& modified() const noexcept { + return _modified; + } + + /** + * @brief The main function of pb_difference. It takes two iterators of the + * old list, two iterators of the new one, and also a function giving the key + * to recognize it. The function usually is connector_name(), + * timeperiod_name(),... but it can also be a lambda returning a pair of IDs + * (for example in the case of services). + * The key returned by this last function is important since two different + * objects with the same key represent a modification. + * + * @tparam Function + * @param old_list the container of the current object configurations, + * @param new_list the container of the new object configurations, + * @param f The function returning the key of each object. + */ + template + void parse(Container& old_list, const Container& new_list, Function f) { + absl::flat_hash_map keys_values; + for (auto it = old_list.begin(); it != old_list.end(); ++it) { + T& item = *it; + static_assert(std::is_same::value || + std::is_same::value || + std::is_same::value, + "Invalid key function: it must match Key"); + keys_values[f(item)] = &item; + } + + absl::flat_hash_set new_keys; + for (auto it = new_list.begin(); it != new_list.end(); ++it) { + const T& item = *it; + auto inserted = new_keys.insert(f(item)); + if (!keys_values.contains(*inserted.first)) { + // New object to add + _added.push_back(&item); + } else { + // Object to modify or equal + if (!MessageDifferencer::Equals(item, *keys_values[f(item)])) { + // There are changes in this object + _modified.push_back(std::make_pair(keys_values[f(item)], &item)); + } + } + } + + ssize_t i = 0; + for (auto it = old_list.begin(); it != old_list.end(); ++it) { + const T& item = *it; + if (!new_keys.contains(f(item))) + _deleted.push_back({i, f(item)}); + ++i; + } + } + + void parse(Container& old_list, + const Container& new_list, + Key (T::*key)() const) { + std::function f = key; + parse>(old_list, new_list, f); + } + + void parse(Container& old_list, + const Container& new_list, + const Key& (T::*key)() const) { + std::function f = key; + parse>(old_list, new_list, f); + } +}; +} // namespace configuration::applier + +} // namespace com::centreon::engine + +#endif // !CCE_CONFIGURATION_APPLIER_PB_DIFFERENCE_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/scheduler.hh b/engine/inc/com/centreon/engine/configuration/applier/scheduler.hh index 87f4ad37c19..c7d61ab0d59 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/scheduler.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/scheduler.hh @@ -20,7 +20,12 @@ #include "com/centreon/engine/configuration/applier/difference.hh" #include "com/centreon/engine/exceptions/error.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/state.hh" +#else +#include "com/centreon/engine/configuration/applier/pb_difference.hh" +#include "common/engine_conf/state.pb.h" +#endif // Forward declaration. namespace com::centreon::engine { @@ -28,8 +33,7 @@ class host; class service; class timed_event; -namespace configuration { -namespace applier { +namespace configuration::applier { /** * @class scheduler scheduler.hh * @brief Simple configuration applier for scheduler class. @@ -38,10 +42,20 @@ namespace applier { */ class scheduler { public: +#ifdef LEGACY_CONF void apply(configuration::state& config, difference const& diff_hosts, difference const& diff_services, difference const& diff_anomalydetections); +#else + void apply(configuration::State& config, + const pb_difference& diff_hosts, + const pb_difference >& diff_services, + const pb_difference >& + diff_anomalydetections); +#endif static scheduler& instance(); void clear(); void remove_host(uint64_t host_id); @@ -52,6 +66,7 @@ class scheduler { scheduler(scheduler const&) = delete; ~scheduler() noexcept; scheduler& operator=(scheduler const&) = delete; +#ifdef LEGACY_CONF void _apply_misc_event(); void _calculate_host_inter_check_delay( configuration::state::inter_check_delay method); @@ -60,11 +75,22 @@ class scheduler { configuration::state::inter_check_delay method); void _calculate_service_interleave_factor( configuration::state::interleave_factor method); +#else + void _apply_misc_event(); + void _calculate_host_inter_check_delay( + const configuration::InterCheckDelay& method); + void _calculate_host_scheduling_params(); + void _calculate_service_inter_check_delay( + const configuration::InterCheckDelay& method); + void _calculate_service_interleave_factor( + const configuration::InterleaveFactor& method); +#endif void _calculate_service_scheduling_params(); timed_event* _create_misc_event(int type, time_t start, unsigned long interval, void* data = nullptr); +#ifdef LEGACY_CONF std::vector _get_hosts( set_host const& hst_added, bool throw_if_not_found = true); @@ -74,6 +100,17 @@ class scheduler { std::vector _get_services( set_service const& svc_cfg, bool throw_if_not_found = true); +#else + std::vector _get_hosts( + const std::vector& hst_ids, + bool throw_if_not_found); + std::vector _get_anomalydetections( + const std::vector >& ad_ids, + bool throw_if_not_found); + std::vector _get_services( + const std::vector >& ad_ids, + bool throw_if_not_found); +#endif void _remove_misc_event(timed_event*& evt); void _schedule_host_events( @@ -84,7 +121,11 @@ class scheduler { void _unschedule_service_events( std::vector const& services); +#ifdef LEGACY_CONF configuration::state* _config; +#else + configuration::State* _pb_config; +#endif timed_event* _evt_check_reaper; timed_event* _evt_command_check; timed_event* _evt_hfreshness_check; @@ -97,16 +138,11 @@ class scheduler { unsigned int _old_check_reaper_interval; int _old_command_check_interval; unsigned int _old_host_freshness_check_interval; - std::string _old_host_perfdata_file_processing_command; - unsigned int _old_host_perfdata_file_processing_interval; unsigned int _old_retention_update_interval; unsigned int _old_service_freshness_check_interval; - std::string _old_service_perfdata_file_processing_command; - unsigned int _old_service_perfdata_file_processing_interval; unsigned int _old_status_update_interval; }; -} // namespace applier -} // namespace configuration +} // namespace configuration::applier } // namespace com::centreon::engine diff --git a/engine/inc/com/centreon/engine/configuration/applier/service.hh b/engine/inc/com/centreon/engine/configuration/applier/service.hh index 6461ef6055c..dc204016458 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/service.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/service.hh @@ -20,35 +20,54 @@ #define CCE_CONFIGURATION_APPLIER_SERVICE_HH #include "com/centreon/engine/configuration/applier/state.hh" -namespace com::centreon::engine { +#ifndef LEGACY_CONF +#include "common/engine_conf/service_helper.hh" +#endif -namespace configuration { +namespace com::centreon::engine::configuration { + +#ifdef LEGACY_CONF // Forward declarations. class service; class state; +#endif namespace applier { class service { public: - service(); - service(service const& right); - ~service(); - service& operator=(service const& right); + service() = default; + service(service const&) = delete; + ~service() noexcept = default; + service& operator=(service const&) = delete; +#ifdef LEGACY_CONF void add_object(configuration::service const& obj); void expand_objects(configuration::state& s); void modify_object(configuration::service const& obj); void remove_object(configuration::service const& obj); void resolve_object(configuration::service const& obj, error_cnt& err); +#else + void add_object(const configuration::Service& obj); + void expand_objects(configuration::State& s); + void modify_object(configuration::Service* old_obj, + const configuration::Service& new_obj); + void remove_object(ssize_t idx); + void resolve_object(const configuration::Service& obj, error_cnt& err); +#endif private: +#ifdef LEGACY_CONF void _expand_service_memberships(configuration::service& obj, configuration::state& s); void _inherits_special_vars(configuration::service& obj, configuration::state const& s); +#else + void _expand_service_memberships(configuration::Service& obj, + configuration::State& s); + void _inherits_special_vars(configuration::Service& obj, + const configuration::State& s); +#endif }; } // namespace applier -} // namespace configuration - -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_SERVICE_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/servicedependency.hh b/engine/inc/com/centreon/engine/configuration/applier/servicedependency.hh index 06b83ac0448..5f7ec05d839 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/servicedependency.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/servicedependency.hh @@ -21,17 +21,23 @@ #include "com/centreon/engine/configuration/applier/state.hh" -namespace com::centreon::engine { +#ifndef LEGACY_CONF +#include "common/engine_conf/servicedependency_helper.hh" +#endif -namespace configuration { +namespace com::centreon::engine::configuration { + +#ifdef LEGACY_CONF // Forward declarations. class servicedependency; class state; +#endif size_t servicedependency_key(const servicedependency& sd); namespace applier { class servicedependency { +#ifdef LEGACY_CONF void _expand_services( std::list const& hst, std::list const& hg, @@ -39,22 +45,39 @@ class servicedependency { std::list const& sg, configuration::state& s, std::set>& expanded); +#else + void _expand_services( + const ::google::protobuf::RepeatedPtrField& hst, + const ::google::protobuf::RepeatedPtrField& hg, + const ::google::protobuf::RepeatedPtrField& svc, + const ::google::protobuf::RepeatedPtrField& sg, + configuration::State& s, + absl::flat_hash_set>& expanded); +#endif public: servicedependency() = default; ~servicedependency() noexcept = default; servicedependency(const servicedependency&) = delete; servicedependency& operator=(const servicedependency&) = delete; +#ifdef LEGACY_CONF void add_object(configuration::servicedependency const& obj); void modify_object(configuration::servicedependency const& obj); void expand_objects(configuration::state& s); void remove_object(configuration::servicedependency const& obj); void resolve_object(configuration::servicedependency const& obj, error_cnt& err); +#else + void add_object(const configuration::Servicedependency& obj); + void modify_object(configuration::Servicedependency* old_obj, + const configuration::Servicedependency& new_obj); + void expand_objects(configuration::State& s); + void remove_object(ssize_t idx); + void resolve_object(const configuration::Servicedependency& obj, + error_cnt& err); +#endif }; } // namespace applier -} // namespace configuration - -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_SERVICEDEPENDENCY_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/serviceescalation.hh b/engine/inc/com/centreon/engine/configuration/applier/serviceescalation.hh index dfa3ec3d0be..07f1b40289a 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/serviceescalation.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/serviceescalation.hh @@ -20,15 +20,21 @@ #define CCE_CONFIGURATION_APPLIER_SERVICEESCALATION_HH #include "com/centreon/engine/configuration/applier/state.hh" -namespace com::centreon::engine { +#ifndef LEGACY_CONF +#include "common/engine_conf/serviceescalation_helper.hh" +#endif + +namespace com::centreon::engine::configuration { -namespace configuration { // Forward declarations. +#ifdef LEGACY_CONF class serviceescalation; class state; +#endif namespace applier { class serviceescalation { +#ifdef LEGACY_CONF void _expand_services( std::list const& hst, std::list const& hg, @@ -38,22 +44,31 @@ class serviceescalation { std::set >& expanded); void _inherits_special_vars(configuration::serviceescalation& obj, configuration::state const& s); +#endif public: serviceescalation() = default; serviceescalation(const serviceescalation&) = delete; ~serviceescalation() noexcept = default; serviceescalation& operator=(const serviceescalation&) = delete; +#ifdef LEGACY_CONF void add_object(const configuration::serviceescalation& obj); void modify_object(const configuration::serviceescalation& obj); void remove_object(const configuration::serviceescalation& obj); void expand_objects(configuration::state& s); void resolve_object(const configuration::serviceescalation& obj, error_cnt& err); +#else + void add_object(const configuration::Serviceescalation& obj); + void modify_object(configuration::Serviceescalation* old_obj, + const configuration::Serviceescalation& new_obj); + void remove_object(ssize_t idx); + void expand_objects(configuration::State& s); + void resolve_object(const configuration::Serviceescalation& obj, + error_cnt& err); +#endif }; } // namespace applier -} // namespace configuration - -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_SERVICEESCALATION_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/servicegroup.hh b/engine/inc/com/centreon/engine/configuration/applier/servicegroup.hh index 7e5cb60bd96..a81540af111 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/servicegroup.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/servicegroup.hh @@ -20,13 +20,18 @@ #define CCE_CONFIGURATION_APPLIER_SERVICEGROUP_HH #include "com/centreon/engine/configuration/applier/state.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/servicegroup.hh" +#else +#include "common/engine_conf/servicegroup_helper.hh" +#endif -namespace com::centreon::engine { +namespace com::centreon::engine::configuration { -namespace configuration { +#ifdef LEGACY_CONF // Forward declarations. class state; +#endif namespace applier { class servicegroup { @@ -35,13 +40,24 @@ class servicegroup { servicegroup(servicegroup const& right); ~servicegroup() throw(); servicegroup& operator=(servicegroup const& right); +#ifdef LEGACY_CONF void add_object(configuration::servicegroup const& obj); void expand_objects(configuration::state& s); void modify_object(configuration::servicegroup const& obj); void remove_object(configuration::servicegroup const& obj); void resolve_object(configuration::servicegroup const& obj, error_cnt& err); +#else + void add_object(const configuration::Servicegroup& obj); + void expand_objects(configuration::State& s); + void modify_object(configuration::Servicegroup* to_modify, + const configuration::Servicegroup& new_object); + void remove_object(ssize_t idx); + void resolve_object(const configuration::Servicegroup& obj, + error_cnt& err); +#endif private: +#ifdef LEGACY_CONF typedef std::map resolved_set; @@ -50,10 +66,16 @@ class servicegroup { configuration::state const& s); resolved_set _resolved; +#else + void _resolve_members( + configuration::State& s, + configuration::Servicegroup* sg_conf, + absl::flat_hash_set& resolved, + const absl::flat_hash_map& + sg_by_name); +#endif }; } // namespace applier -} // namespace configuration - -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_SERVICEGROUP_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/severity.hh b/engine/inc/com/centreon/engine/configuration/applier/severity.hh index cd741a8014c..1371e7bea69 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/severity.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/severity.hh @@ -1,5 +1,5 @@ /* - * Copyright 2022 Centreon (https://www.centreon.com/) + * Copyright 2022-2024 Centreon (https://www.centreon.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,12 +19,16 @@ #ifndef CCE_CONFIGURATION_APPLIER_SEVERITY_HH #define CCE_CONFIGURATION_APPLIER_SEVERITY_HH +#ifndef LEGACY_CONF +#include "common/engine_conf/severity_helper.hh" +#endif -namespace com::centreon::engine { +namespace com::centreon::engine::configuration { -namespace configuration { +#ifdef LEGACY_CONF class severity; class state; +#endif namespace applier { class severity { @@ -32,15 +36,22 @@ class severity { severity() = default; ~severity() noexcept = default; severity& operator=(const severity& other) = delete; +#ifdef LEGACY_CONF void add_object(const configuration::severity& obj); void expand_objects(configuration::state& s); void modify_object(const configuration::severity& obj); void remove_object(const configuration::severity& obj); void resolve_object(const configuration::severity& obj); +#else + void add_object(const configuration::Severity& obj); + void modify_object(configuration::Severity* to_modify, + const configuration::Severity& new_object); + void remove_object(ssize_t idx); + void resolve_object(const configuration::Severity& obj, + error_cnt& err); +#endif }; } // namespace applier -} // namespace configuration - -} +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_SEVERITY_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/state.hh b/engine/inc/com/centreon/engine/configuration/applier/state.hh index 62c4296d497..e9ce73491ba 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/state.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/state.hh @@ -20,7 +20,11 @@ #include "com/centreon/engine/configuration/applier/difference.hh" #include "com/centreon/engine/servicedependency.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/state.hh" +#else +#include "com/centreon/engine/configuration/applier/pb_difference.hh" +#endif namespace com::centreon::engine { @@ -46,17 +50,26 @@ namespace applier { */ class state { public: +#ifdef LEGACY_CONF void apply(configuration::state& new_cfg, error_cnt& err, retention::state* state = nullptr); void apply_log_config(configuration::state& new_cfg); +#else + void apply(configuration::State& new_cfg, + error_cnt& err, + retention::state* state = nullptr); + void apply_log_config(configuration::State& new_cfg); +#endif static state& instance(); void clear(); servicedependency_mmap const& servicedependencies() const throw(); servicedependency_mmap& servicedependencies() throw(); +#ifdef LEGACY_CONF servicedependency_mmap::iterator servicedependencies_find( configuration::servicedependency::key_type const& k); +#endif std::unordered_map& user_macros(); std::unordered_map::const_iterator user_macros_find( std::string const& key) const; @@ -85,6 +98,7 @@ class state { #endif state& operator=(state const&); +#ifdef LEGACY_CONF void _apply(configuration::state const& new_cfg, error_cnt& err); template void _apply(difference> const& diff, @@ -92,6 +106,16 @@ class state { void _apply(configuration::state& new_cfg, retention::state& state, error_cnt& err); +#else + void _apply(const configuration::State& new_cfg, error_cnt& err); + template + void _apply(const pb_difference& diff, + error_cnt& err); + void _apply(configuration::State& new_cfg, + retention::state& state, + error_cnt& err); +#endif +#ifdef LEGACY_CONF template void _expand(configuration::state& new_state, error_cnt& err); void _processing(configuration::state& new_cfg, @@ -99,9 +123,22 @@ class state { retention::state* state = nullptr); template void _resolve(std::set& cfg, error_cnt& err); +#else + template + void _expand(configuration::State& new_state, error_cnt& err); + void _processing(configuration::State& new_cfg, + error_cnt& err, + retention::state* state = nullptr); + template + void _resolve( + const ::google::protobuf::RepeatedPtrField& cfg, + error_cnt& err); +#endif std::mutex _apply_lock; +#ifdef LEGACY_CONF state* _config; +#endif processing_state _processing_state; servicedependency_mmap _servicedependencies; diff --git a/engine/inc/com/centreon/engine/configuration/applier/tag.hh b/engine/inc/com/centreon/engine/configuration/applier/tag.hh index 036b43e4edb..afad6cbd311 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/tag.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/tag.hh @@ -1,4 +1,4 @@ -/* +/** * Copyright 2022 Centreon (https://www.centreon.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,12 +19,18 @@ #ifndef CCE_CONFIGURATION_APPLIER_TAG_HH #define CCE_CONFIGURATION_APPLIER_TAG_HH +#ifdef LEGACY_CONF +#include "common/engine_legacy_conf/object.hh" +#else +#include "common/engine_conf/tag_helper.hh" +#endif -namespace com::centreon::engine { +namespace com::centreon::engine::configuration { -namespace configuration { +#ifdef LEGACY_CONF class tag; class state; +#endif namespace applier { class tag { @@ -32,15 +38,22 @@ class tag { tag() = default; ~tag() noexcept = default; tag& operator=(const tag& other) = delete; +#ifdef LEGACY_CONF void add_object(const configuration::tag& obj); void expand_objects(configuration::state& s); void modify_object(const configuration::tag& obj); void remove_object(const configuration::tag& obj); - void resolve_object(const configuration::tag& obj); + void resolve_object(const configuration::tag& obj, error_cnt& err); +#else + void add_object(const configuration::Tag& obj); + void modify_object(configuration::Tag* to_modify, + const configuration::Tag& new_object); + void remove_object(ssize_t idx); + void resolve_object(const configuration::Tag& obj, + error_cnt& err); +#endif }; } // namespace applier -} // namespace configuration - -} +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_TAG_HH diff --git a/engine/inc/com/centreon/engine/configuration/applier/timeperiod.hh b/engine/inc/com/centreon/engine/configuration/applier/timeperiod.hh index 3a47437b3e0..a5bb0401bc2 100644 --- a/engine/inc/com/centreon/engine/configuration/applier/timeperiod.hh +++ b/engine/inc/com/centreon/engine/configuration/applier/timeperiod.hh @@ -21,13 +21,18 @@ #include "com/centreon/engine/configuration/applier/state.hh" #include "com/centreon/engine/timeperiod.hh" +#ifndef LEGACY_CONF +#include "common/engine_conf/timeperiod_helper.hh" +#endif + // Forward declaration. -namespace com::centreon::engine { +namespace com::centreon::engine::configuration { -namespace configuration { +#ifdef LEGACY_CONF // Forward declarations. class state; class timeperiod; +#endif namespace applier { class timeperiod { @@ -46,15 +51,23 @@ class timeperiod { ~timeperiod() noexcept = default; timeperiod(const timeperiod&) = delete; timeperiod& operator=(const timeperiod&) = delete; +#ifdef LEGACY_CONF void add_object(const configuration::timeperiod& obj); void expand_objects(configuration::state& s); void modify_object(configuration::timeperiod const& obj); void remove_object(configuration::timeperiod const& obj); void resolve_object(configuration::timeperiod const& obj, error_cnt& err); +#else + void add_object(const configuration::Timeperiod& obj); + void expand_objects(configuration::State& s); + void modify_object(configuration::Timeperiod* to_modify, + const configuration::Timeperiod& new_object); + void remove_object(ssize_t idx); + void resolve_object(const configuration::Timeperiod& obj, + error_cnt& err); +#endif }; } // namespace applier -} // namespace configuration - -} // namespace com::centreon::engine +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_APPLIER_TIMEPERIOD_HH diff --git a/engine/inc/com/centreon/engine/configuration/extended_conf.hh b/engine/inc/com/centreon/engine/configuration/extended_conf.hh index 28df0aa9958..161d3759b9c 100644 --- a/engine/inc/com/centreon/engine/configuration/extended_conf.hh +++ b/engine/inc/com/centreon/engine/configuration/extended_conf.hh @@ -20,6 +20,9 @@ #define CCE_CONFIGURATION_EXTENDED_STATE_HH #include "com/centreon/common/rapidjson_helper.hh" +#ifndef LEGACY_CONF +#include "common/engine_conf/state_helper.hh" +#endif namespace com::centreon::engine::configuration { @@ -45,7 +48,11 @@ class extended_conf { extended_conf& operator=(const extended_conf&) = delete; void reload(); +#ifdef LEGACY_CONF static void update_state(state& dest); +#else + static void update_state(State* pb_config); +#endif template static void load_all(file_path_iterator begin, file_path_iterator); diff --git a/engine/inc/com/centreon/engine/contact.hh b/engine/inc/com/centreon/engine/contact.hh index b9f3b26d9c7..fc6069a017d 100644 --- a/engine/inc/com/centreon/engine/contact.hh +++ b/engine/inc/com/centreon/engine/contact.hh @@ -61,7 +61,10 @@ class contact { // Base properties. std::string const& get_address(int index) const; std::vector const& get_addresses() const; - void set_addresses(std::vector const& addresses); +#ifdef LEGACY_CONF + void set_addresses(const std::vector& addresses); +#endif + void set_addresses(std::vector&& addresses); std::string const& get_alias() const; void set_alias(std::string const& alias); bool get_can_submit_commands() const; @@ -184,7 +187,7 @@ std::shared_ptr add_contact( std::string const& alias, std::string const& email, std::string const& pager, - std::array const& addresses, + std::vector&& addresses, std::string const& svc_notification_period, std::string const& host_notification_period, int notify_service_ok, diff --git a/engine/inc/com/centreon/engine/contactgroup.hh b/engine/inc/com/centreon/engine/contactgroup.hh index 66345a29e60..d56ba49ef55 100644 --- a/engine/inc/com/centreon/engine/contactgroup.hh +++ b/engine/inc/com/centreon/engine/contactgroup.hh @@ -24,6 +24,10 @@ #include #include +#ifndef LEGACY_CONF +#include "common/engine_conf/contactgroup_helper.hh" +#endif + /* Forward declaration. */ namespace com::centreon::engine { class contact; @@ -46,8 +50,12 @@ namespace com::centreon::engine { class contactgroup { public: - contactgroup(); + contactgroup() = default; +#ifdef LEGACY_CONF contactgroup(configuration::contactgroup const& obj); +#else + contactgroup(const configuration::Contactgroup& obj); +#endif virtual ~contactgroup(); std::string const& get_name() const; std::string const& get_alias() const; diff --git a/engine/inc/com/centreon/engine/daterange.hh b/engine/inc/com/centreon/engine/daterange.hh index 6bf2967c507..a46e027b6de 100644 --- a/engine/inc/com/centreon/engine/daterange.hh +++ b/engine/inc/com/centreon/engine/daterange.hh @@ -22,6 +22,10 @@ #include "com/centreon/engine/common.hh" #include "com/centreon/engine/timerange.hh" +#ifndef LEGACY_CONF +#include "common/engine_conf/state.pb.h" +#endif + struct timeperiod_struct; namespace com::centreon::engine { @@ -45,6 +49,7 @@ class daterange { week_day = 4 }; +#ifdef LEGACY_CONF daterange(type_range type, int syear, int smon, @@ -58,6 +63,23 @@ class daterange { int ewday_offset, int skip_interval, const std::list& timeranges); +#else + daterange(type_range type, + int syear, + int smon, + int smday, + int swday, + int swday_offset, + int eyear, + int emon, + int emday, + int ewday, + int ewday_offset, + int skip_interval, + const google::protobuf::RepeatedPtrField& + timeranges); +#endif + daterange(type_range type); type_range get_type() const { return _type; } diff --git a/engine/inc/com/centreon/engine/globals.hh b/engine/inc/com/centreon/engine/globals.hh index bbe3ea115b5..274928a6a52 100644 --- a/engine/inc/com/centreon/engine/globals.hh +++ b/engine/inc/com/centreon/engine/globals.hh @@ -29,7 +29,11 @@ #include "com/centreon/engine/nebmods.hh" #include "com/centreon/engine/restart_stats.hh" #include "com/centreon/engine/utils.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/state.hh" +#else +#include "common/engine_conf/state.pb.h" +#endif #include "common/log_v2/log_v2.hh" /* Start/Restart statistics */ @@ -47,9 +51,14 @@ extern std::shared_ptr macros_logger; extern std::shared_ptr notifications_logger; extern std::shared_ptr process_logger; extern std::shared_ptr runtime_logger; +extern std::shared_ptr otel_logger; +#ifdef LEGACY_CONF extern com::centreon::engine::configuration::state* config; -extern char* config_file; +#else +extern com::centreon::engine::configuration::State pb_config; +#endif +extern std::string config_file; extern com::centreon::engine::commands::command* global_host_event_handler_ptr; extern com::centreon::engine::commands::command* diff --git a/engine/inc/com/centreon/engine/hostdependency.hh b/engine/inc/com/centreon/engine/hostdependency.hh index 6bef7d90e05..d770da3de63 100644 --- a/engine/inc/com/centreon/engine/hostdependency.hh +++ b/engine/inc/com/centreon/engine/hostdependency.hh @@ -20,7 +20,9 @@ #define CCE_OBJECTS_HOSTDEPENDENCY_HH #include "com/centreon/engine/dependency.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/hostdependency.hh" +#endif /* Forward declaration. */ namespace com::centreon::engine { @@ -29,7 +31,7 @@ class hostdependency; class timeperiod; } // namespace com::centreon::engine -typedef std::unordered_multimap< +typedef absl::btree_multimap< std::string, std::shared_ptr> hostdependency_mmap; @@ -64,8 +66,13 @@ class hostdependency : public dependency { bool operator<(hostdependency const& obj) throw(); static hostdependency_mmap hostdependencies; +#ifdef LEGACY_CONF static hostdependency_mmap::iterator hostdependencies_find( - configuration::hostdependency const& k); + const configuration::hostdependency& k); +#else + static hostdependency_mmap::iterator hostdependencies_find( + const std::pair& key); +#endif host* master_host_ptr; host* dependent_host_ptr; diff --git a/engine/inc/com/centreon/engine/hostescalation.hh b/engine/inc/com/centreon/engine/hostescalation.hh index f5e24720c18..8243424cb64 100644 --- a/engine/inc/com/centreon/engine/hostescalation.hh +++ b/engine/inc/com/centreon/engine/hostescalation.hh @@ -32,7 +32,11 @@ typedef std::unordered_multimap< namespace com::centreon::engine { namespace configuration { +#ifdef LEGACY_CONF class hostescalation; +#else +class Hostescalation; +#endif } class hostescalation : public escalation { @@ -50,7 +54,11 @@ class hostescalation : public escalation { bool is_viable(int state, uint32_t notification_number) const override; void resolve(uint32_t& w, uint32_t& e) override; +#ifdef LEGACY_CONF bool matches(const configuration::hostescalation& obj) const; +#else + bool matches(const configuration::Hostescalation& obj) const; +#endif static hostescalation_mmap hostescalations; diff --git a/engine/inc/com/centreon/engine/macros/defines.hh b/engine/inc/com/centreon/engine/macros/defines.hh index 0d92a4634a7..601d65c71c9 100644 --- a/engine/inc/com/centreon/engine/macros/defines.hh +++ b/engine/inc/com/centreon/engine/macros/defines.hh @@ -101,8 +101,8 @@ #define MACRO_LOGFILE 69 #define MACRO_RESOURCEFILE 70 #define MACRO_COMMANDFILE 71 -#define MACRO_HOSTPERFDATAFILE 72 -#define MACRO_SERVICEPERFDATAFILE 73 +#define MACRO_HOSTPERFDATAFILE 72 // Not used anymore +#define MACRO_SERVICEPERFDATAFILE 73 // Not used anymore #define MACRO_HOSTACTIONURL 74 #define MACRO_HOSTNOTESURL 75 #define MACRO_HOSTNOTES 76 diff --git a/engine/inc/com/centreon/engine/nebstructs.hh b/engine/inc/com/centreon/engine/nebstructs.hh index 02073edc4eb..786e1f56b53 100644 --- a/engine/inc/com/centreon/engine/nebstructs.hh +++ b/engine/inc/com/centreon/engine/nebstructs.hh @@ -95,10 +95,10 @@ typedef struct nebstruct_comment_struct { /* Custom variable structure. */ typedef struct nebstruct_custom_variable_struct { int type; - struct timeval timestamp; - char* var_name; - char* var_value; - void* object_ptr; + struct timeval timestamp = {}; + std::string_view var_name; + std::string_view var_value; + void* object_ptr = nullptr; } nebstruct_custom_variable_data; /* Downtime data structure. */ diff --git a/engine/inc/com/centreon/engine/retention/applier/anomalydetection.hh b/engine/inc/com/centreon/engine/retention/applier/anomalydetection.hh index 2a1e7be6cc4..565c5ea8e61 100644 --- a/engine/inc/com/centreon/engine/retention/applier/anomalydetection.hh +++ b/engine/inc/com/centreon/engine/retention/applier/anomalydetection.hh @@ -28,22 +28,38 @@ class anomalydetection; // Forward declaration. namespace configuration { +#ifdef LEGACY_CONF class state; -} +#else +class State; +#endif +} // namespace configuration namespace retention { namespace applier { class anomalydetection { public: +#ifdef LEGACY_CONF static void apply(configuration::state const& config, list_anomalydetection const& lst, bool scheduling_info_is_ok); - +#else + static void apply(const configuration::State& config, + const list_anomalydetection& lst, + bool scheduling_info_is_ok); +#endif private: +#ifdef LEGACY_CONF static void _update(configuration::state const& config, retention::anomalydetection const& state, com::centreon::engine::anomalydetection& obj, bool scheduling_info_is_ok); +#else + static void _update(const configuration::State& config, + const retention::anomalydetection& state, + com::centreon::engine::anomalydetection& obj, + bool scheduling_info_is_ok); +#endif }; } // namespace applier } // namespace retention diff --git a/engine/inc/com/centreon/engine/retention/applier/contact.hh b/engine/inc/com/centreon/engine/retention/applier/contact.hh index f0cdcaad4db..e0148a36f0a 100644 --- a/engine/inc/com/centreon/engine/retention/applier/contact.hh +++ b/engine/inc/com/centreon/engine/retention/applier/contact.hh @@ -28,23 +28,37 @@ class contact; // Forward declaration. namespace configuration { +#ifdef LEGACY_CONF class state; -} +#else +class State; +#endif +} // namespace configuration namespace retention { namespace applier { class contact { public: +#ifdef LEGACY_CONF void apply(configuration::state const& config, list_contact const& lst); +#else + void apply(const configuration::State& config, list_contact const& lst); +#endif private: +#ifdef LEGACY_CONF void _update(configuration::state const& config, retention::contact const& state, com::centreon::engine::contact* obj); +#else + void _update(const configuration::State& config, + const retention::contact& state, + com::centreon::engine::contact* obj); +#endif }; } // namespace applier } // namespace retention -} +} // namespace com::centreon::engine #endif // !CCE_RETENTION_APPLIER_CONTACT_HH diff --git a/engine/inc/com/centreon/engine/retention/applier/host.hh b/engine/inc/com/centreon/engine/retention/applier/host.hh index d24b74b5919..735d24a9705 100644 --- a/engine/inc/com/centreon/engine/retention/applier/host.hh +++ b/engine/inc/com/centreon/engine/retention/applier/host.hh @@ -35,19 +35,31 @@ namespace retention { namespace applier { class host { public: +#ifdef LEGACY_CONF void apply(configuration::state const& config, list_host const& lst, bool scheduling_info_is_ok); - +#else + void apply(const configuration::State& config, + list_host const& lst, + bool scheduling_info_is_ok); +#endif private: +#ifdef LEGACY_CONF void _update(configuration::state const& config, retention::host const& state, engine::host& obj, bool scheduling_info_is_ok); +#else + void _update(const configuration::State& config, + const retention::host& state, + engine::host& obj, + bool scheduling_info_is_ok); +#endif }; } // namespace applier } // namespace retention -} +} // namespace com::centreon::engine #endif // !CCE_RETENTION_APPLIER_HOST_HH diff --git a/engine/inc/com/centreon/engine/retention/applier/program.hh b/engine/inc/com/centreon/engine/retention/applier/program.hh index f79fea775f8..0e88bcae65c 100644 --- a/engine/inc/com/centreon/engine/retention/applier/program.hh +++ b/engine/inc/com/centreon/engine/retention/applier/program.hh @@ -26,14 +26,22 @@ namespace com::centreon::engine { // Forward declaration. namespace configuration { +#ifdef LEGACY_CONF class state; +#else +class State; +#endif } namespace retention { namespace applier { class program { public: +#ifdef LEGACY_CONF void apply(configuration::state& config, retention::program const& obj); +#else + void apply(configuration::State& config, retention::program const& obj); +#endif private: bool _find_command(std::string const& command_line); diff --git a/engine/inc/com/centreon/engine/retention/applier/service.hh b/engine/inc/com/centreon/engine/retention/applier/service.hh index ee0365bf8ee..473902d7ad5 100644 --- a/engine/inc/com/centreon/engine/retention/applier/service.hh +++ b/engine/inc/com/centreon/engine/retention/applier/service.hh @@ -29,13 +29,18 @@ class service; // Forward declaration. namespace configuration { +#ifdef LEGACY_CONF class state; -} +#else +class State; +#endif +} // namespace configuration namespace retention { namespace applier { class service { public: +#ifdef LEGACY_CONF static void apply(configuration::state const& config, list_service const& lst, bool scheduling_info_is_ok); @@ -44,10 +49,20 @@ class service { retention::service const& state, com::centreon::engine::service& obj, bool scheduling_info_is_ok); +#else + static void apply(const configuration::State& config, + const list_service& lst, + bool scheduling_info_is_ok); + + static void update(const configuration::State& config, + const retention::service& state, + com::centreon::engine::service& obj, + bool scheduling_info_is_ok); +#endif }; } // namespace applier } // namespace retention -} +} // namespace com::centreon::engine #endif // !CCE_RETENTION_APPLIER_SERVICE_HH diff --git a/engine/inc/com/centreon/engine/retention/applier/state.hh b/engine/inc/com/centreon/engine/retention/applier/state.hh index 114ed26718c..62753be73ea 100644 --- a/engine/inc/com/centreon/engine/retention/applier/state.hh +++ b/engine/inc/com/centreon/engine/retention/applier/state.hh @@ -26,14 +26,22 @@ namespace com::centreon::engine { // Forward declaration. namespace configuration { +#ifdef LEGACY_CONF class state; +#else +class State; +#endif } namespace retention { namespace applier { class state { public: +#ifdef LEGACY_CONF void apply(configuration::state& config, retention::state const& state); +#else + void apply(configuration::State& config, retention::state const& state); +#endif }; } // namespace applier } // namespace retention diff --git a/engine/inc/com/centreon/engine/service.hh b/engine/inc/com/centreon/engine/service.hh index c2c1e853c7f..a6b4d84ee44 100644 --- a/engine/inc/com/centreon/engine/service.hh +++ b/engine/inc/com/centreon/engine/service.hh @@ -38,12 +38,55 @@ class servicegroup; class serviceescalation; } // namespace com::centreon::engine +/** + * @brief pair with host_name in first and serv in second + * + */ +using host_serv_pair = std::pair; + +/** + * @brief This struct is used to lookup in a host_serv_pair indexed container + * with a std::pair + * + */ +struct host_serv_hash_eq { + using is_transparent = void; + using host_serv_string_view = std::pair; + + size_t operator()(const host_serv_pair& to_hash) const { + return absl::Hash()(to_hash); + } + size_t operator()(const host_serv_string_view& to_hash) const { + return absl::Hash()(to_hash); + } + + bool operator()(const host_serv_pair& left, + const host_serv_pair& right) const { + return left == right; + } + bool operator()(const host_serv_pair& left, + const host_serv_string_view& right) const { + return left.first == right.first && left.second == right.second; + } + bool operator()(const host_serv_string_view& left, + const host_serv_pair& right) const { + return left.first == right.first && left.second == right.second; + } + bool operator()(const host_serv_string_view& left, + const host_serv_string_view& right) const { + return left == right; + } +}; + using service_map = - absl::flat_hash_map, - std::shared_ptr>; -using service_map_unsafe = - absl::flat_hash_map, - com::centreon::engine::service*>; + absl::flat_hash_map, + host_serv_hash_eq, + host_serv_hash_eq>; +using service_map_unsafe = absl::flat_hash_map; using service_id_map = absl::btree_map, std::shared_ptr>; diff --git a/engine/inc/com/centreon/engine/servicedependency.hh b/engine/inc/com/centreon/engine/servicedependency.hh index 9b25112a4c6..a9ab85af2a4 100644 --- a/engine/inc/com/centreon/engine/servicedependency.hh +++ b/engine/inc/com/centreon/engine/servicedependency.hh @@ -21,7 +21,9 @@ #define CCE_OBJECTS_SERVICEDEPENDENCY_HH #include "com/centreon/engine/dependency.hh" #include "com/centreon/engine/hash.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/servicedependency.hh" +#endif /* Forward declaration. */ namespace com::centreon::engine { @@ -85,8 +87,13 @@ class servicedependency : public dependency { service* dependent_service_ptr; static servicedependency_mmap servicedependencies; +#ifdef LEGACY_CONF static servicedependency_mmap::iterator servicedependencies_find( configuration::servicedependency const& k); +#else + static servicedependency_mmap::iterator servicedependencies_find( + const std::tuple& key); +#endif }; }; // namespace com::centreon::engine diff --git a/engine/inc/com/centreon/engine/serviceescalation.hh b/engine/inc/com/centreon/engine/serviceescalation.hh index f634f32b83c..b1eaaa7da90 100644 --- a/engine/inc/com/centreon/engine/serviceescalation.hh +++ b/engine/inc/com/centreon/engine/serviceescalation.hh @@ -35,7 +35,11 @@ typedef std::unordered_multimap< namespace com::centreon::engine { namespace configuration { +#ifdef LEGACY_CONF class serviceescalation; +#else +class Serviceescalation; +#endif } class serviceescalation : public escalation { @@ -53,7 +57,11 @@ class serviceescalation : public escalation { std::string const& get_description() const; bool is_viable(int state, uint32_t notification_number) const override; void resolve(uint32_t& w, uint32_t& e) override; +#ifdef LEGACY_CONF bool matches(const configuration::serviceescalation& obj) const; +#else + bool matches(const configuration::Serviceescalation& obj) const; +#endif static serviceescalation_mmap serviceescalations; diff --git a/engine/inc/com/centreon/engine/string.hh b/engine/inc/com/centreon/engine/string.hh index 89c9bb5df44..7a47803cd05 100644 --- a/engine/inc/com/centreon/engine/string.hh +++ b/engine/inc/com/centreon/engine/string.hh @@ -29,7 +29,6 @@ #include #include - namespace com::centreon::engine { namespace string { @@ -205,6 +204,8 @@ std::string& remove_thresholds(std::string& perfdata) noexcept; void unescape(char* buffer); +void unescape(std::string& str); + /** * @brief this class is a thread safe replacement for my_strtok * An instance is not thread safe but sevaral instances can be used in different @@ -226,6 +227,6 @@ class c_strtok { } // namespace string -} +} // namespace com::centreon::engine #endif // !CCE_MISC_STRING_HH diff --git a/engine/inc/com/centreon/engine/timeperiod.hh b/engine/inc/com/centreon/engine/timeperiod.hh index 44e24f13061..8c0a041d9bd 100644 --- a/engine/inc/com/centreon/engine/timeperiod.hh +++ b/engine/inc/com/centreon/engine/timeperiod.hh @@ -21,6 +21,9 @@ #define CCE_OBJECTS_TIMEPERIOD_HH #include "com/centreon/engine/daterange.hh" +#ifndef LEGACY_CONF +#include "common/engine_conf/timeperiod_helper.hh" +#endif /* Forward declaration. */ namespace com::centreon::engine { @@ -37,14 +40,29 @@ namespace com::centreon::engine { class timeperiod { public: +#ifdef LEGACY_CONF timeperiod(std::string const& name, std::string const& alias); +#else + timeperiod(const configuration::Timeperiod& obj); + void set_exclusions(const configuration::StringSet& exclusions); + void set_exceptions(const configuration::ExceptionArray& array); + void set_days(const configuration::DaysArray& array); +#endif - std::string const& get_name() const { return _name; }; - void set_name(std::string const& name); - std::string const get_alias() const { return _alias; }; - void set_alias(std::string const& alias); - timeperiodexclusion const& get_exclusions() const { return _exclusions; }; - timeperiodexclusion& get_exclusions() { return _exclusions; }; + std::string const& get_name() const { + return _name; + }; + void set_name(const std::string& name); + const std::string& get_alias() const { + return _alias; + }; + void set_alias(const std::string& alias); + const timeperiodexclusion& get_exclusions() const { + return _exclusions; + }; + timeperiodexclusion& get_exclusions() { + return _exclusions; + }; void get_next_valid_time_per_timeperiod(time_t preferred_time, time_t* invalid_time, bool notif_timeperiod); @@ -54,8 +72,8 @@ class timeperiod { void resolve(uint32_t& w, uint32_t& e); - bool operator==(timeperiod const& obj) throw(); - bool operator!=(timeperiod const& obj) throw(); + bool operator==(timeperiod const& obj) noexcept; + bool operator!=(timeperiod const& obj) noexcept; days_array days; exception_array exceptions; diff --git a/engine/inc/com/centreon/engine/version.hh.in b/engine/inc/com/centreon/engine/version.hh.in index c6f1dd8ede7..2438a09458c 100644 --- a/engine/inc/com/centreon/engine/version.hh.in +++ b/engine/inc/com/centreon/engine/version.hh.in @@ -21,9 +21,9 @@ #define CCE_VERSION_HH // Compile-time values. -#define CENTREON_ENGINE_VERSION_MAJOR @COLLECT_MAJOR@ -#define CENTREON_ENGINE_VERSION_MINOR @COLLECT_MINOR@ -#define CENTREON_ENGINE_VERSION_PATCH @COLLECT_PATCH@ +constexpr unsigned CENTREON_ENGINE_VERSION_MAJOR = @COLLECT_MAJOR@; +constexpr unsigned CENTREON_ENGINE_VERSION_MINOR =@COLLECT_MINOR@.0; +constexpr unsigned CENTREON_ENGINE_VERSION_PATCH =@COLLECT_PATCH@.0; #define CENTREON_ENGINE_VERSION_STRING "@CENTREON_ENGINE_VERSION@" #endif // !CCE_VERSION_HH diff --git a/engine/modules/CMakeLists.txt b/engine/modules/CMakeLists.txt index 6f6e0c8b3b7..fbf70d568c1 100644 --- a/engine/modules/CMakeLists.txt +++ b/engine/modules/CMakeLists.txt @@ -21,10 +21,6 @@ add_subdirectory("external_commands") include_directories(${CMAKE_SOURCE_DIR}/clib/inc) -set(EXTERNALCMD_MODULE - "${EXTERNALCMD_MODULE}" - PARENT_SCOPE) - # Benchmark module. add_subdirectory("bench") diff --git a/engine/modules/external_commands/CMakeLists.txt b/engine/modules/external_commands/CMakeLists.txt index 9a742cc7a53..de832e231ce 100644 --- a/engine/modules/external_commands/CMakeLists.txt +++ b/engine/modules/external_commands/CMakeLists.txt @@ -1,21 +1,19 @@ -## -## Copyright 2011-2013 Merethis -## -## This file is part of Centreon Engine. -## -## Centreon Engine is free software: you can redistribute it and/or -## modify it under the terms of the GNU General Public License version 2 -## as published by the Free Software Foundation. -## -## Centreon Engine is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## General Public License for more details. -## -## You should have received a copy of the GNU General Public License -## along with Centreon Engine. If not, see -## . -## +# +# Copyright 2011-2013 Merethis Copyright 2014-2024 Centreon +# +# This file is part of Centreon Engine. +# +# Centreon Engine is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published by +# the Free Software Foundation. +# +# Centreon Engine is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# Centreon Engine. If not, see . +# # Set directories. set(MODULE_DIR "${PROJECT_SOURCE_DIR}/modules/external_commands") @@ -26,27 +24,42 @@ set(INC_DIR "${MODULE_DIR}/inc/com/centreon/engine/modules/external_commands") include_directories(${MODULE_DIR}/inc ${CMAKE_SOURCE_DIR}/clib/inc) link_directories(${CMAKE_SOURCE_DIR}/build/centreon-clib/) -# mod_externalcmd target. -add_library( - externalcmd - SHARED +if(LEGACY_ENGINE) + # mod_externalcmd target. + add_library( + externalcmd SHARED + # Sources. + "${SRC_DIR}/main.cc" "${SRC_DIR}/utils.cc" + # Headers. + "${INC_DIR}/utils.hh") + set_property(TARGET "externalcmd" PROPERTY PREFIX "") + target_precompile_headers(externalcmd PRIVATE precomp_inc/precomp.hh) + add_dependencies(externalcmd centreon_clib pb_neb_lib) + target_link_libraries(externalcmd centreon_clib spdlog::spdlog) - # Sources. - "${SRC_DIR}/main.cc" - "${SRC_DIR}/utils.cc" + # Install rule. + install( + TARGETS externalcmd + DESTINATION "${ENGINE_MODULES_DIR}" + COMPONENT "runtime") +else() + # mod_externalcmd target. + add_library( + externalcmd SHARED + # Sources. + "${SRC_DIR}/main.cc" "${SRC_DIR}/utils.cc" + # Headers. + "${INC_DIR}/utils.hh") - # Headers. - "${INC_DIR}/utils.hh" -) -# Prettier name. -set_property(TARGET "externalcmd" PROPERTY PREFIX "") -target_precompile_headers("externalcmd" PRIVATE precomp_inc/precomp.hh) + # Prettier name. + set_property(TARGET "externalcmd" PROPERTY PREFIX "") + target_precompile_headers("externalcmd" PRIVATE precomp_inc/precomp.hh) -set(EXTERNALCMD_MODULE "${EXTERNALCMD_MODULE}" PARENT_SCOPE) -add_dependencies(externalcmd centreon_clib pb_neb_lib) -target_link_libraries(externalcmd centreon_clib spdlog::spdlog) + add_dependencies(externalcmd centreon_clib pb_neb_lib) + target_link_libraries(externalcmd centreon_clib spdlog::spdlog) -# Install rule. -install(TARGETS "externalcmd" - DESTINATION "${ENGINE_MODULES_DIR}" - COMPONENT "runtime") + install( + TARGETS "externalcmd" + DESTINATION "${ENGINE_MODULES_DIR}" + COMPONENT "runtime") +endif() diff --git a/engine/modules/external_commands/src/utils.cc b/engine/modules/external_commands/src/utils.cc index db249d10003..6a77e64028d 100644 --- a/engine/modules/external_commands/src/utils.cc +++ b/engine/modules/external_commands/src/utils.cc @@ -1,23 +1,22 @@ /** * Copyright 2011-2013 Merethis - * Copyright 2020-2021 Centreon + * Copyright 2020-2024 Centreon * - * This file is part of Centreon Engine. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * Centreon Engine is free software: you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. + * http://www.apache.org/licenses/LICENSE-2.0 * - * Centreon Engine is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com * - * You should have received a copy of the GNU General Public License - * along with Centreon Engine. If not, see - * . */ - #include "com/centreon/engine/modules/external_commands/utils.hh" #include "com/centreon/engine/commands/processing.hh" #include "com/centreon/engine/common.hh" @@ -42,8 +41,15 @@ int open_command_file(void) { struct stat st; /* if we're not checking external commands, don't do anything */ - if (config->check_external_commands() == false) +#ifdef LEGACY_CONF + if (!config->check_external_commands()) + return OK; + const std::string& command_file{config->command_file()}; +#else + if (!pb_config.check_external_commands()) return OK; + const std::string& command_file{pb_config.command_file()}; +#endif /* the command file was already created */ if (command_file_created) @@ -53,15 +59,13 @@ int open_command_file(void) { umask(S_IWOTH); /* use existing FIFO if possible */ - if (!(stat(config->command_file().c_str(), &st) != -1 && - (st.st_mode & S_IFIFO))) { + if (!(stat(command_file.c_str(), &st) != -1 && (st.st_mode & S_IFIFO))) { /* create the external command file as a named pipe (FIFO) */ - if (mkfifo(config->command_file().c_str(), - S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) != 0) { + if (mkfifo(command_file.c_str(), S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) != + 0) { engine_logger(log_runtime_error, basic) - << "Error: Could not create external command file '" - << config->command_file() << "' as named pipe: (" << errno << ") -> " - << strerror(errno) + << "Error: Could not create external command file '" << command_file + << "' as named pipe: (" << errno << ") -> " << strerror(errno) << ". If this file already exists and " "you are sure that another copy of Centreon Engine is not " "running, " @@ -72,7 +76,7 @@ int open_command_file(void) { "you are sure that another copy of Centreon Engine is not " "running, " "you should delete this file.", - config->command_file(), errno, strerror(errno)); + command_file, errno, strerror(errno)); return ERROR; } } @@ -80,8 +84,7 @@ int open_command_file(void) { /* open the command file for reading (non-blocked) - O_TRUNC flag cannot be * used due to errors on some systems */ /* NOTE: file must be opened read-write for poll() to work */ - if ((command_file_fd = - open(config->command_file().c_str(), O_RDWR | O_NONBLOCK)) < 0) { + if ((command_file_fd = open(command_file.c_str(), O_RDWR | O_NONBLOCK)) < 0) { engine_logger(log_runtime_error, basic) << "Error: Could not open external command file for reading " "via open(): (" @@ -145,7 +148,7 @@ int open_command_file(void) { fclose(command_file_fp); /* delete the named pipe */ - unlink(config->command_file().c_str()); + unlink(command_file.c_str()); return ERROR; } @@ -159,8 +162,13 @@ int open_command_file(void) { /* closes the external command file FIFO and deletes it */ int close_command_file(void) { /* if we're not checking external commands, don't do anything */ - if (config->check_external_commands() == false) +#ifdef LEGACY_CONF + if (!config->check_external_commands()) return OK; +#else + if (!pb_config.check_external_commands()) + return OK; +#endif /* the command file wasn't created or was already cleaned up */ if (command_file_created == false) @@ -255,8 +263,13 @@ static void command_file_worker_thread() { select(0, nullptr, nullptr, nullptr, &tv); } +#ifdef LEGACY_CONF external_command_buffer.set_capacity( config->external_command_buffer_slots()); +#else + external_command_buffer.set_capacity( + pb_config.external_command_buffer_slots()); +#endif /* process all commands in the file (named pipe) if there's some space in * the buffer */ diff --git a/engine/modules/opentelemetry/CMakeLists.txt b/engine/modules/opentelemetry/CMakeLists.txt index 16d1976fdc3..f6a8b94974f 100644 --- a/engine/modules/opentelemetry/CMakeLists.txt +++ b/engine/modules/opentelemetry/CMakeLists.txt @@ -67,9 +67,9 @@ ${SRC_DIR}/centreon_agent/agent.pb.cc ${SRC_DIR}/centreon_agent/agent_check_result_builder.cc ${SRC_DIR}/centreon_agent/agent_config.cc ${SRC_DIR}/centreon_agent/agent_impl.cc +${SRC_DIR}/centreon_agent/agent_reverse_client.cc ${SRC_DIR}/centreon_agent/agent_service.cc -${SRC_DIR}/data_point_fifo.cc -${SRC_DIR}/data_point_fifo_container.cc +${SRC_DIR}/centreon_agent/to_agent_connector.cc ${SRC_DIR}/grpc_config.cc ${SRC_DIR}/host_serv_extractor.cc ${SRC_DIR}/open_telemetry.cc @@ -88,6 +88,8 @@ target_precompile_headers(opentelemetry PRIVATE precomp_inc/precomp.hh) # set(EXTERNALCMD_MODULE "${EXTERNALCMD_MODULE}" PARENT_SCOPE) target_link_libraries(opentelemetry spdlog::spdlog + -L${PROTOBUF_LIB_DIR} + protobuf -L${Boost_LIBRARY_DIR_RELEASE} boost_program_options) diff --git a/engine/modules/opentelemetry/doc/opentelemetry.md b/engine/modules/opentelemetry/doc/opentelemetry.md index 4e5867924f5..73568749bb3 100644 --- a/engine/modules/opentelemetry/doc/opentelemetry.md +++ b/engine/modules/opentelemetry/doc/opentelemetry.md @@ -4,8 +4,8 @@ Engine can receive open telemetry data on a grpc server A new module is added opentelemetry It works like that: * metrics are received -* extractors tries to extract host name and service description for each otl_data_point. On success, otl_data_point are pushed on fifos indexed by host, service -* a service that used these datas wants to do a check. The cmd line identifies the otl_check_result_builder that will construct check result from host service otl_data_point fifos. If converter achieves to build a result from metrics, it returns right now, if it doesn't, a handler will be called as soon as needed metrics will be available or timeout expires. +* extractors tries to extract host name and service description for each otl_data_point. +* On success, it searches a check_result_builder used by the passive otel service. Then the check_result_builder converts otl_data_point in check_result and update passive service. ### open telemetry request The proto is organized like that @@ -115,11 +115,9 @@ The proto is organized like that ### Concepts and classes * otl_data_point: otl_data_point is the smallest unit of received request, otl_data_point class contains otl_data_point protobuf object and all his parents (resource, scope, metric) * host serv extractors: When we receive otel metrics, we must extract host and service, this is his job. It can be configurable in order for example to search host name in otl_data_point attribute or in scope. host serv extractors also contains host serv allowed. This list is updated by register_host_serv command method -* otl_data_point fifo: a container that contains data points indexed by timestamp -* otl_data_point fifo container: fifos indexed by host service * otel_connector: a fake connector that is used to make the link between engine and otel module * otl_server: a grpc server that accept otel collector incoming connections -* otl_check_result_builder: This short lived object is created each time engine wants to do a check. His final class as his configuration is done from the command line of the check. His job is to create a check result from otl_data_point fifo container datas. It's destroyed when he achieved to create a check result or when timeout expires. +* otl_check_result_builder: His final class as his configuration is done from the command line of the check. His job is to create a check result from otl_data_point. * host_serv_list: in order to extract host and service, an host_serv extractor must known allowed host service pairs. As otel_connector may be notified of host service using it by register_host_serv method while otel module is not yet loaded. This object shared between otel_connector and host_serv_extractor is actualized from otel_connector::register_host_serv. ### How engine access to otl object @@ -128,16 +126,9 @@ Object used by both otel module and engine are inherited from these interfaces. Engine only knows a singleton of the interface open_telemetry_base. This singleton is initialized at otl module loading. ### How to configure it -We use a fake connector. When configuration is loaded, if a connector command line begins with "open_telemetry", we create an otel_connector. Arguments following "open_telemetry" are used to create an host service extractor. If otel module is loaded, we create extractor, otherwise, the otel_connector initialization will be done at otel module loading. -So user has to build one connector by host serv extractor configuration. -Then commands can use these fake connectors (class otel_connector) to run checks. - -### How a service do a check -When otel_connector::run is called, it calls the check method of open_telemetry singleton. -The check method of open_telemetry object will use command line passed to run to create an otl_check_result_builder object that has to convert metrics to check result. -The open_telemetry call sync_build_result_from_metrics, if it can't achieve to build a result, otl_check_result_builder is stored in a container. -When a metric of a waiting service is received, async_build_result_from_metrics of otl_check_result_builder is called. -In open_telemetry object, a second timer is also used to call async_time_out of otl_check_result_builder on timeout expire. +We use a fake connector. When configuration is loaded, if a connector command line begins with "open_telemetry", we create an otel_connector. Arguments following "open_telemetry" are used to create an host service extractor and a check_result_builder. If otel module is loaded, we create extractor, otherwise, the otel_connector initialization will be done at otel module loading. +So user has to build one connector by host serv extractor, check_result_builder configuration. +Then received otel data_points will be converted in check_result. ### other configuration other configuration parameters are stored in a dedicated json file. The path of this file is passed as argument in centengine.cfg @@ -209,6 +200,8 @@ An example of configuration: ``` ### centreon monitoring agent + +#### agent connects to engine Even if all protobuf objects are opentelemetry objects, grpc communication is made in streaming mode. It is more efficient, it allows reverse connection (engine can connect to an agent running in a DMZ) and Engine can send configuration on each config update. You can find all grpc definitions are agent/proto/agent.proto. @@ -360,4 +353,32 @@ Configuration of agent is divided in two parts: The first part is owned by agent protobuf service (agent_service.cc), the second is build by a common code shared with telegraf server (conf_helper.hh) -So when centengine receives a HUP signal, opentelemetry::reload check configuration changes on each established connection and update also agent service conf part1 which is used to configure future incoming connections. \ No newline at end of file +So when centengine receives a HUP signal, opentelemetry::reload check configuration changes on each established connection and update also agent service conf part1 which is used to configure future incoming connections. + +#### engine connects to agent + +##### configuration +Each agent has its own grpc configuration. Each object in this array is a grpc configuration object like those we can find in Agent or server + +An example: +```json +{ + "max_length_grpc_log": 0, + "centreon_agent": { + "check_interval": 10, + "export_period": 15, + "reverse_connections": [ + { + "host": "127.0.0.1", + "port": 4317 + } + ] + } +} +``` + +#### classes +From this configuration an agent_reverse_client object maintains a list of endpoints engine has to connect to. It manages also agent list updates. +It contains a map of to_agent_connector indexed by config. +The role to_agent_connector is to maintain an alive connection to agent (agent_connection class). It owns an agent_connection class and recreates it in case of network failure. +Agent_connection holds a weak_ptr to agent_connection to warn it about connection failure. \ No newline at end of file diff --git a/engine/modules/opentelemetry/doc/otel_configuration.odg b/engine/modules/opentelemetry/doc/otel_configuration.odg new file mode 100644 index 00000000000..c14e698328b Binary files /dev/null and b/engine/modules/opentelemetry/doc/otel_configuration.odg differ diff --git a/engine/modules/opentelemetry/doc/otel_configuration.pdf b/engine/modules/opentelemetry/doc/otel_configuration.pdf new file mode 100644 index 00000000000..3bff7928002 Binary files /dev/null and b/engine/modules/opentelemetry/doc/otel_configuration.pdf differ diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_check_result_builder.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_check_result_builder.hh index adcee312878..c2b403979a0 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_check_result_builder.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_check_result_builder.hh @@ -91,25 +91,13 @@ namespace com::centreon::engine::modules::opentelemetry::centreon_agent { * */ class agent_check_result_builder : public otl_check_result_builder { - protected: - bool _build_result_from_metrics(metric_name_to_fifo& fifos, - commands::result& res) override; - public: agent_check_result_builder(const std::string& cmd_line, - uint64_t command_id, - const host& host, - const service* service, - std::chrono::system_clock::time_point timeout, - commands::otel::result_callback&& handler, const std::shared_ptr& logger) - : otl_check_result_builder(cmd_line, - command_id, - host, - service, - timeout, - std::move(handler), - logger) {} + : otl_check_result_builder(cmd_line, logger) {} + + bool build_result_from_metrics(const metric_to_datapoints& data_pts, + check_result& res) override; }; } // namespace com::centreon::engine::modules::opentelemetry::centreon_agent diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh new file mode 100644 index 00000000000..cc02b91e8af --- /dev/null +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh @@ -0,0 +1,62 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCE_MOD_OTL_CENTREON_AGENT_AGENT_REVERSE_CLIENT_HH +#define CCE_MOD_OTL_CENTREON_AGENT_AGENT_REVERSE_CLIENT_HH + +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh" +#include "com/centreon/engine/modules/opentelemetry/otl_data_point.hh" + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +class to_agent_connector; + +class agent_reverse_client { + protected: + std::shared_ptr _io_context; + agent_config::pointer _conf; + const metric_handler _metric_handler; + std::shared_ptr _logger; + + using config_to_client = absl::btree_map, + grpc_config_compare>; + absl::Mutex _agents_m; + config_to_client _agents ABSL_GUARDED_BY(_agents_m); + + virtual config_to_client::iterator _create_new_client_connection( + const grpc_config::pointer& agent_endpoint, + const agent_config::pointer& agent_conf) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(_agents_m); + + virtual void _shutdown_connection(config_to_client::const_iterator to_delete); + + public: + agent_reverse_client( + const std::shared_ptr& io_context, + const metric_handler& handler, + const std::shared_ptr& logger); + + virtual ~agent_reverse_client(); + + void update(const agent_config::pointer& new_conf); +}; + +} // namespace com::centreon::engine::modules::opentelemetry::centreon_agent + +#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh new file mode 100644 index 00000000000..3fc016aebb9 --- /dev/null +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh @@ -0,0 +1,78 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCE_MOD_OTL_CENTREON_AGENT_AGENT_CLIENT_HH +#define CCE_MOD_OTL_CENTREON_AGENT_AGENT_CLIENT_HH + +#include "centreon_agent/agent.grpc.pb.h" +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh" + +#include "com/centreon/common/grpc/grpc_client.hh" +#include "com/centreon/engine/modules/opentelemetry/otl_data_point.hh" + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +class agent_connection; + +/** + * @brief this class is used in case of reverse connection + * it maintains one connection to agent server and reconnect in case of failure + * + */ +class to_agent_connector + : public common::grpc::grpc_client_base, + public std::enable_shared_from_this { + std::shared_ptr _io_context; + metric_handler _metric_handler; + agent_config::pointer _conf; + + bool _alive; + std::unique_ptr _stub; + + absl::Mutex _connection_m; + std::shared_ptr _connection ABSL_GUARDED_BY(_connection_m); + + public: + to_agent_connector(const grpc_config::pointer& agent_endpoint_conf, + const std::shared_ptr& io_context, + const agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger); + + virtual ~to_agent_connector(); + + virtual void start(); + + static std::shared_ptr load( + const grpc_config::pointer& agent_endpoint_conf, + const std::shared_ptr& io_context, + const agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger); + + void refresh_agent_configuration_if_needed( + const agent_config::pointer& new_conf); + + virtual void shutdown(); + + void on_error(); +}; + +} // namespace com::centreon::engine::modules::opentelemetry::centreon_agent + +#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/conf_helper.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/conf_helper.hh index c3a0456eeae..a2bbb242c08 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/conf_helper.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/conf_helper.hh @@ -41,15 +41,17 @@ bool get_otel_commands(const std::string& host_name, command_handler&& handler, const std::shared_ptr& logger) { auto use_otl_command = [](const checkable& to_test) -> bool { - if (to_test.get_check_command_ptr()->get_type() == - commands::command::e_type::otel) - return true; - if (to_test.get_check_command_ptr()->get_type() == - commands::command::e_type::forward) { - return std::static_pointer_cast( - to_test.get_check_command_ptr()) - ->get_sub_command() - ->get_type() == commands::command::e_type::otel; + if (to_test.get_check_command_ptr()) { + if (to_test.get_check_command_ptr()->get_type() == + commands::command::e_type::otel) + return true; + if (to_test.get_check_command_ptr()->get_type() == + commands::command::e_type::forward) { + return std::static_pointer_cast( + to_test.get_check_command_ptr()) + ->get_sub_command() + ->get_type() == commands::command::e_type::otel; + } } return false; }; diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo.hh deleted file mode 100644 index bf78b223b7b..00000000000 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo.hh +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Copyright 2024 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ -#ifndef CCE_MOD_OTL_SERVER_DATA_POINT_FIFO_HH -#define CCE_MOD_OTL_SERVER_DATA_POINT_FIFO_HH - -#include "otl_data_point.hh" - -namespace com::centreon::engine::modules::opentelemetry { - -/** - * @brief This class is a multiset of opentelemetry otl_data_point ordered by - * nano_timestamp - * - */ -class data_point_fifo { - struct time_unix_nano_compare { - /** - * @brief mandatory for heterogenous search (abseil or standard associative - * (C++20)) - * https://en.cppreference.com/w/cpp/utility/functional - * - */ - using is_transparent = void; - bool operator()(const otl_data_point& left, - const otl_data_point& right) const { - return left.get_nano_timestamp() < right.get_nano_timestamp(); - } - bool operator()(const otl_data_point& left, - uint64_t nano_timestamp_right) const { - return left.get_nano_timestamp() < nano_timestamp_right; - } - bool operator()(uint64_t nano_timestamp_left, - const otl_data_point& right) const { - return nano_timestamp_left < right.get_nano_timestamp(); - } - }; - - public: - using container = - absl::btree_multiset; - - private: - static time_t _second_datapoint_expiry; - static size_t _max_size; - - container _fifo; - - public: - const container& get_fifo() const { return _fifo; } - - bool empty() const { return _fifo.empty(); } - - void clear() { _fifo.clear(); } - - size_t size() const { return _fifo.size(); } - - void add_data_point(const otl_data_point& data_pt); - - void clean(); - - void clean_oldest(uint64_t expiry); - - static void update_fifo_limit(time_t second_datapoint_expiry, - size_t max_size); -}; - -using metric_name_to_fifo = absl::flat_hash_map; - -} // namespace com::centreon::engine::modules::opentelemetry - -#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo_container.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo_container.hh deleted file mode 100644 index 7406ea65648..00000000000 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo_container.hh +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright 2024 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * - * http://www.apache.org/licenses/LICENSE-2.0 * You may obtain a copy of the - License at - - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ -#ifndef CCE_MOD_OTL_SERVER_DATA_POINT_FIFO_CONTAINER_HH -#define CCE_MOD_OTL_SERVER_DATA_POINT_FIFO_CONTAINER_HH - -#include "data_point_fifo.hh" - -namespace com::centreon::engine::modules::opentelemetry { - -/** - * @brief This class is a - * map host_serv -> map metric -> data_point_fifo (list of data_points) - * - */ -class data_point_fifo_container { - public: - private: - /** - * @brief - * metrics are ordered like this: - * => metric1 => data_points list - * => metric2 => data_points list - * - */ - using host_serv_to_metrics = absl::flat_hash_map; - - host_serv_to_metrics _data; - - static metric_name_to_fifo _empty; - - std::mutex _data_m; - - public: - void clean(); - - static void clean_empty_fifos(metric_name_to_fifo& to_clean); - - void add_data_point(const std::string_view& host, - const std::string_view& service, - const std::string_view& metric, - const otl_data_point& data_pt); - - const metric_name_to_fifo& get_fifos(const std::string& host, - const std::string& service) const; - - metric_name_to_fifo& get_fifos(const std::string& host, - const std::string& service); - - void lock() { _data_m.lock(); } - - void unlock() { _data_m.unlock(); } - - void dump(std::string& output) const; -}; - -} // namespace com::centreon::engine::modules::opentelemetry - -namespace fmt { -template <> -struct formatter< - com::centreon::engine::modules::opentelemetry::data_point_fifo_container> - : formatter { - template - auto format(const com::centreon::engine::modules::opentelemetry:: - data_point_fifo_container& cont, - FormatContext& ctx) const -> decltype(ctx.out()) { - std::string output; - cont.dump(output); - return formatter::format(output, ctx); - } -}; - -} // namespace fmt - -#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh index ee2d82e7ef3..b30ba4664b3 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh @@ -22,7 +22,7 @@ #include "com/centreon/engine/commands/otel_interface.hh" -#include "data_point_fifo_container.hh" +#include "centreon_agent/agent_reverse_client.hh" #include "host_serv_extractor.hh" #include "otl_check_result_builder.hh" #include "otl_config.hh" @@ -45,55 +45,22 @@ class otl_server; * */ class open_telemetry : public commands::otel::open_telemetry_base { - asio::system_timer _second_timer; std::shared_ptr _otl_server; std::shared_ptr _telegraf_conf_server; + std::unique_ptr _agent_reverse_client; using cmd_line_to_extractor_map = absl::btree_map>; cmd_line_to_extractor_map _extractors; - data_point_fifo_container _fifo; std::string _config_file_path; std::unique_ptr _conf; std::shared_ptr _logger; - struct host_serv_getter { - using result_type = host_serv; - const result_type& operator()( - const std::shared_ptr& node) const { - return node->get_host_serv(); - } - }; - - struct time_out_getter { - using result_type = std::chrono::system_clock::time_point; - result_type operator()( - const std::shared_ptr& node) const { - return node->get_time_out(); - } - }; - - /** - * @brief when check can't return data right now, we have no metrics in fifo, - * converter is stored in this container. It's indexed by host,serv and by - * timeout - * - */ - using waiting_converter = boost::multi_index::multi_index_container< - std::shared_ptr, - boost::multi_index::indexed_by< - boost::multi_index::hashed_non_unique, - boost::multi_index::ordered_non_unique>>; - - waiting_converter _waiting; - std::shared_ptr _io_context; mutable std::mutex _protect; void _forward_to_broker(const std::vector& unknown); - void _second_timer_handler(); - void _create_telegraf_conf_server( const telegraf::conf_server_config::pointer& conf); @@ -101,9 +68,7 @@ class open_telemetry : public commands::otel::open_telemetry_base { virtual void _create_otl_server( const grpc_config::pointer& server_conf, const centreon_agent::agent_config::pointer& agent_conf); - void _on_metric(const metric_request_ptr& metric); void _reload(); - void _start_second_timer(); void _shutdown(); public: @@ -129,21 +94,14 @@ class open_telemetry : public commands::otel::open_telemetry_base { static void unload(const std::shared_ptr& logger); - bool check(const std::string& processed_cmd, - const std::shared_ptr& - conv_conf, - uint64_t command_id, - nagios_macros& macros, - uint32_t timeout, - commands::result& res, - commands::otel::result_callback&& handler) override; + void on_metric(const metric_request_ptr& metric); std::shared_ptr create_extractor( const std::string& cmdline, const commands::otel::host_serv_list::pointer& host_serv_list) override; - std::shared_ptr - create_check_result_builder_config(const std::string& cmd_line) override; + std::shared_ptr + create_check_result_builder(const std::string& cmdline) override; }; } // namespace com::centreon::engine::modules::opentelemetry diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh index 71b44670c3a..1f2f32a8b6e 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh @@ -19,35 +19,40 @@ #ifndef CCE_MOD_OTL_CHECK_RESULT_BUILDER_HH #define CCE_MOD_OTL_CHECK_RESULT_BUILDER_HH +#include "com/centreon/engine/check_result.hh" + #include "com/centreon/engine/commands/otel_interface.hh" -#include "data_point_fifo.hh" +#include "otl_data_point.hh" namespace com::centreon::engine::modules::opentelemetry { -class data_point_fifo_container; - /** - * @brief converter are asynchronous object created on each check - * In order to not parse command line on each check, we parse it once and then - * create a converter config that will be used to create converter + * @brief compare data_points with nano_timestamp * */ -class check_result_builder_config - : public commands::otel::check_result_builder_config { - public: - enum class converter_type { - nagios_check_result_builder, - centreon_agent_check_result_builder - }; +struct otl_data_point_pointer_compare { + using is_transparent = void; - private: - const converter_type _type; + bool operator()(const otl_data_point& left, + const otl_data_point& right) const { + return left.get_nano_timestamp() < right.get_nano_timestamp(); + } - public: - check_result_builder_config(converter_type conv_type) : _type(conv_type) {} - converter_type get_type() const { return _type; } + bool operator()(const otl_data_point& left, uint64_t right) const { + return left.get_nano_timestamp() < right; + } + + bool operator()(uint64_t left, const otl_data_point& right) const { + return left < right.get_nano_timestamp(); + } }; +class metric_to_datapoints + : public absl::flat_hash_map< + std::string_view, + absl::btree_multiset> {}; + /** * @brief The goal of this converter is to convert otel metrics in result * This object is synchronous and asynchronous @@ -57,67 +62,32 @@ class check_result_builder_config * */ class otl_check_result_builder - : public std::enable_shared_from_this { + : public commands::otel::otl_check_result_builder_base { const std::string _cmd_line; - const uint64_t _command_id; - const std::pair _host_serv; - const std::chrono::system_clock::time_point _timeout; - const commands::otel::result_callback _callback; protected: std::shared_ptr _logger; - virtual bool _build_result_from_metrics(metric_name_to_fifo&, - commands::result& res) = 0; - public: otl_check_result_builder(const std::string& cmd_line, - uint64_t command_id, - const host& host, - const service* service, - std::chrono::system_clock::time_point timeout, - commands::otel::result_callback&& handler, const std::shared_ptr& logger); virtual ~otl_check_result_builder() = default; const std::string& get_cmd_line() const { return _cmd_line; } - uint64_t get_command_id() const { return _command_id; } - - const std::string& get_host_name() const { return _host_serv.first; } - const std::string& get_service_description() const { - return _host_serv.second; - } - - const std::pair& get_host_serv() const { - return _host_serv; - } - - std::chrono::system_clock::time_point get_time_out() const { - return _timeout; - } - - bool sync_build_result_from_metrics(data_point_fifo_container& data_pts, - commands::result& res); - - bool async_build_result_from_metrics(data_point_fifo_container& data_pts); - void async_time_out(); - virtual void dump(std::string& output) const; + void process_data_pts(const std::string_view& host, + const std::string_view& serv, + const metric_to_datapoints& data_pts) override; + static std::shared_ptr create( const std::string& cmd_line, - const std::shared_ptr& conf, - uint64_t command_id, - const host& host, - const service* service, - std::chrono::system_clock::time_point timeout, - commands::otel::result_callback&& handler, const std::shared_ptr& logger); - static std::shared_ptr - create_check_result_builder_config(const std::string& cmd_line); + virtual bool build_result_from_metrics(const metric_to_datapoints& data_pts, + check_result& res) = 0; }; } // namespace com::centreon::engine::modules::opentelemetry diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_config.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_config.hh index 5b87b0db2fb..6b124c4276c 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_config.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_config.hh @@ -34,12 +34,6 @@ class otl_config { bool _json_grpc_log = false; // if true, otel object are logged in json // format instead of protobuf debug format - // this two attributes are limits used by otel otl_data_point fifos - // if fifo size exceed _max_fifo_size, oldest data_points are removed - // Also, data_points older than _second_fifo_expiry are removed from fifos - unsigned _second_fifo_expiry; - size_t _max_fifo_size; - public: otl_config(const std::string_view& file_path, asio::io_context& io_context); @@ -56,9 +50,6 @@ class otl_config { int get_max_length_grpc_log() const { return _max_length_grpc_log; } bool get_json_grpc_log() const { return _json_grpc_log; } - unsigned get_second_fifo_expiry() const { return _second_fifo_expiry; } - size_t get_max_fifo_size() const { return _max_fifo_size; } - bool operator==(const otl_config& right) const; inline bool operator!=(const otl_config& right) const { diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh index bad1bc2236e..76c79038413 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh @@ -45,45 +45,6 @@ struct initialized_data_class : public data_class { } }; -/** - * @brief pair with host_name in first and serv in second - * - */ -using host_serv = std::pair; - -/** - * @brief This struct is used to lookup in a host_serv indexed container with - * a std::pair - * - */ -struct host_serv_hash_eq { - using is_transparent = void; - using host_serv_string_view = std::pair; - - size_t operator()(const host_serv& to_hash) const { - return absl::Hash()(to_hash); - } - size_t operator()(const host_serv_string_view& to_hash) const { - return absl::Hash()(to_hash); - } - - bool operator()(const host_serv& left, const host_serv& right) const { - return left == right; - } - bool operator()(const host_serv& left, - const host_serv_string_view& right) const { - return left.first == right.first && left.second == right.second; - } - bool operator()(const host_serv_string_view& left, - const host_serv& right) const { - return left.first == right.first && left.second == right.second; - } - bool operator()(const host_serv_string_view& left, - const host_serv_string_view& right) const { - return left == right; - } -}; - using metric_request_ptr = std::shared_ptr<::opentelemetry::proto::collector::metrics::v1:: ExportMetricsServiceRequest>; @@ -122,6 +83,7 @@ class otl_data_point { ::opentelemetry::proto::common::v1::KeyValue>& _data_point_attributes; const ::google::protobuf::RepeatedPtrField< ::opentelemetry::proto::metrics::v1::Exemplar>& _exemplars; + uint64_t _start_nano_timestamp; uint64_t _nano_timestamp; data_point_type _type; double _value; @@ -173,6 +135,7 @@ class otl_data_point { return _data_point; } + uint64_t get_start_nano_timestamp() const { return _start_nano_timestamp; } uint64_t get_nano_timestamp() const { return _nano_timestamp; } data_point_type get_type() { return _type; } diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/nagios_check_result_builder.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/nagios_check_result_builder.hh index 77bcd34b533..f07a618e6ba 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/nagios_check_result_builder.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/nagios_check_result_builder.hh @@ -92,25 +92,13 @@ namespace com::centreon::engine::modules::opentelemetry::telegraf { * */ class nagios_check_result_builder : public otl_check_result_builder { - protected: - bool _build_result_from_metrics(metric_name_to_fifo& fifos, - commands::result& res) override; - public: nagios_check_result_builder(const std::string& cmd_line, - uint64_t command_id, - const host& host, - const service* service, - std::chrono::system_clock::time_point timeout, - commands::otel::result_callback&& handler, const std::shared_ptr& logger) - : otl_check_result_builder(cmd_line, - command_id, - host, - service, - timeout, - std::move(handler), - logger) {} + : otl_check_result_builder(cmd_line, logger) {} + + bool build_result_from_metrics(const metric_to_datapoints& data_pts, + check_result& res) override; }; } // namespace com::centreon::engine::modules::opentelemetry::telegraf diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_check_result_builder.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_check_result_builder.cc index 769869ea12e..4eb528115f6 100644 --- a/engine/modules/opentelemetry/src/centreon_agent/agent_check_result_builder.cc +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_check_result_builder.cc @@ -16,8 +16,6 @@ * For more information : contact@centreon.com */ -#include "data_point_fifo_container.hh" - #include "otl_check_result_builder.hh" #include "centreon_agent/agent_check_result_builder.hh" @@ -127,59 +125,60 @@ void perf_data::append_to_string(std::string* to_append) { * @return true * @return false */ -bool agent_check_result_builder::_build_result_from_metrics( - metric_name_to_fifo& fifos, - commands::result& res) { +bool agent_check_result_builder::build_result_from_metrics( + const metric_to_datapoints& data_pts, + check_result& res) { // first we search last state timestamp from status uint64_t last_time = 0; - for (auto& metric_to_fifo : fifos) { - if (metric_to_fifo.first == "status") { - auto& fifo = metric_to_fifo.second.get_fifo(); - if (!fifo.empty()) { - const auto& last_sample = *fifo.rbegin(); - last_time = last_sample.get_nano_timestamp(); - res.exit_code = last_sample.get_value(); - // output of plugins is stored in description metric field - res.output = last_sample.get_metric().description(); - metric_to_fifo.second.clean_oldest(last_time); - } - break; - } - } - if (!last_time) { + auto status_metric = data_pts.find("status"); + if (status_metric == data_pts.end()) { return false; } - res.command_id = get_command_id(); - res.exit_status = process::normal; - res.end_time = res.start_time = - timestamp(last_time / 1000000000, (last_time / 1000) % 1000000); + const auto& last_sample = status_metric->second.rbegin(); + last_time = last_sample->get_nano_timestamp(); + res.set_return_code(last_sample->get_value()); + + // output of plugins is stored in description metric field + std::string output = last_sample->get_metric().description(); + + res.set_finish_time( + {.tv_sec = static_cast(last_time / 1000000000), + .tv_usec = static_cast((last_time / 1000) % 1000000)}); + + if (last_sample->get_start_nano_timestamp() > 0) { + res.set_start_time( + {.tv_sec = static_cast(last_sample->get_start_nano_timestamp() / + 1000000000), + .tv_usec = static_cast( + (last_sample->get_start_nano_timestamp() / 1000) % 1000000)}); + } else { + res.set_start_time(res.get_finish_time()); + } - res.output.push_back('|'); + output.push_back('|'); - for (auto& metric_to_fifo : fifos) { - if (metric_to_fifo.first == "status") + for (const auto& metric_to_data_pt : data_pts) { + if (metric_to_data_pt.first == "status") continue; - auto& fifo = metric_to_fifo.second.get_fifo(); - auto data_pt_search = fifo.find(last_time); - if (data_pt_search != fifo.end()) { - res.output.push_back(' '); + auto data_pt_search = metric_to_data_pt.second.find(last_time); + if (data_pt_search != metric_to_data_pt.second.end()) { + output.push_back(' '); const otl_data_point& data_pt = *data_pt_search; - absl::StrAppend(&res.output, metric_to_fifo.first, "=", + absl::StrAppend(&output, metric_to_data_pt.first, "=", data_pt.get_value(), data_pt.get_metric().unit(), ";"); - // all other metric value (warning_lt, critical_gt, min... are stored in - // exemplars) + // all other metric value (warning_lt, critical_gt, min... are stored + // in exemplars) detail::perf_data to_append; for (const auto& exemplar : data_pt.get_exemplars()) { to_append.apply_exemplar(exemplar); } - to_append.append_to_string(&res.output); + to_append.append_to_string(&output); } - metric_to_fifo.second.clean_oldest(last_time); } - data_point_fifo_container::clean_empty_fifos(fifos); + res.set_output(output); return true; } diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_config.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_config.cc index d5cbce16780..0d49927f5c7 100644 --- a/engine/modules/opentelemetry/src/centreon_agent/agent_config.cc +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_config.cc @@ -78,7 +78,7 @@ agent_config::agent_config(const rapidjson::Value& json_config_v) { _max_concurrent_checks = file_content.get_unsigned("max_concurrent_checks", 100); _export_period = file_content.get_unsigned("export_period", 60); - _check_timeout = file_content.get_unsigned("_check_timeout", 30); + _check_timeout = file_content.get_unsigned("check_timeout", 30); if (file_content.has_member("reverse_connections")) { const auto& reverse_array = file_content.get_member("reverse_connections"); diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_reverse_client.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_reverse_client.cc new file mode 100644 index 00000000000..7c38cee5ad4 --- /dev/null +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_reverse_client.cc @@ -0,0 +1,127 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "centreon_agent/agent_reverse_client.hh" +#include "centreon_agent/to_agent_connector.hh" + +using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; + +/** + * @brief Construct a new agent reverse client::agent reverse client object + * + * @param io_context + * @param handler handler that will process received metrics + * @param logger + */ +agent_reverse_client::agent_reverse_client( + const std::shared_ptr& io_context, + const metric_handler& handler, + const std::shared_ptr& logger) + : _io_context(io_context), _metric_handler(handler), _logger(logger) {} + +/** + * @brief Destroy the agent reverse client::agent reverse client object + * it also shutdown all connectors + * + */ +agent_reverse_client::~agent_reverse_client() { + absl::MutexLock l(&_agents_m); + for (auto& conn : _agents) { + conn.second->shutdown(); + } + _agents.clear(); +} + +/** + * @brief update agent list by doing a symmetric difference + * + * @param new_conf + */ +void agent_reverse_client::update(const agent_config::pointer& new_conf) { + absl::MutexLock l(&_agents_m); + + auto connection_iterator = _agents.begin(); + + if (!new_conf) { + while (connection_iterator != _agents.end()) { + _shutdown_connection(connection_iterator); + connection_iterator = _agents.erase(connection_iterator); + } + return; + } + + auto conf_iterator = new_conf->get_agent_grpc_reverse_conf().begin(); + + while (connection_iterator != _agents.end() && + conf_iterator != new_conf->get_agent_grpc_reverse_conf().end()) { + int compare_res = connection_iterator->first->compare(**conf_iterator); + if (compare_res > 0) { + connection_iterator = + _create_new_client_connection(*conf_iterator, new_conf); + ++connection_iterator; + ++conf_iterator; + } else if (compare_res < 0) { + _shutdown_connection(connection_iterator); + connection_iterator = _agents.erase(connection_iterator); + } else { + connection_iterator->second->refresh_agent_configuration_if_needed( + new_conf); + ++connection_iterator; + ++conf_iterator; + } + } + + while (connection_iterator != _agents.end()) { + _shutdown_connection(connection_iterator); + connection_iterator = _agents.erase(connection_iterator); + } + + for (; conf_iterator != new_conf->get_agent_grpc_reverse_conf().end(); + ++conf_iterator) { + _create_new_client_connection(*conf_iterator, new_conf); + } +} + +/** + * @brief create and start a new agent reversed connection + * + * @param agent_endpoint endpoint to connect + * @param new_conf global agent configuration + * @return agent_reverse_client::config_to_client::iterator iterator to the new + * element inserted + */ +agent_reverse_client::config_to_client::iterator +agent_reverse_client::_create_new_client_connection( + const grpc_config::pointer& agent_endpoint, + const agent_config::pointer& agent_conf) { + auto insert_res = _agents.try_emplace( + agent_endpoint, + to_agent_connector::load(agent_endpoint, _io_context, agent_conf, + _metric_handler, _logger)); + return insert_res.first; +} + +/** + * @brief only shutdown client connection, no container erase + * + * @param to_delete + */ +void agent_reverse_client::_shutdown_connection( + config_to_client::const_iterator to_delete) { + to_delete->second->shutdown(); +} diff --git a/engine/modules/opentelemetry/src/centreon_agent/to_agent_connector.cc b/engine/modules/opentelemetry/src/centreon_agent/to_agent_connector.cc new file mode 100644 index 00000000000..f8cce8607a9 --- /dev/null +++ b/engine/modules/opentelemetry/src/centreon_agent/to_agent_connector.cc @@ -0,0 +1,223 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "com/centreon/common/defer.hh" + +#include "centreon_agent/to_agent_connector.hh" + +#include "centreon_agent/agent_impl.hh" + +using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +/** + * @brief reverse connection to an agent + * + */ +class agent_connection + : public agent_impl<::grpc::ClientBidiReactor> { + std::weak_ptr _parent; + + std::string _peer; + ::grpc::ClientContext _context; + + public: + agent_connection(const std::shared_ptr& io_context, + const std::shared_ptr& parent, + const agent_config::pointer& conf, + const metric_handler& handler, + const std::shared_ptr& logger); + + ::grpc::ClientContext& get_context() { return _context; } + + void on_error() override; + + void shutdown() override; + + const std::string& get_peer() const override { return _peer; } +}; + +/** + * @brief Construct a new agent connection::agent connection object + * + * @param io_context + * @param parent to_agent_connector that had created this object + * @param handler handler called on every metric received + * @param logger + */ +agent_connection::agent_connection( + const std::shared_ptr& io_context, + const std::shared_ptr& parent, + const agent_config::pointer& conf, + const metric_handler& handler, + const std::shared_ptr& logger) + : agent_impl<::grpc::ClientBidiReactor>( + io_context, + "reverse_client", + conf, + handler, + logger), + _parent(parent) { + _peer = parent->get_conf()->get_hostport(); +} + +/** + * @brief called by OnReadDone or OnWriteDone when ok = false + * + */ +void agent_connection::on_error() { + std::shared_ptr parent = _parent.lock(); + if (parent) { + parent->on_error(); + } +} + +/** + * @brief shutdown connection before delete + * + */ +void agent_connection::shutdown() { + absl::MutexLock l(&_protect); + if (_alive) { + _alive = false; + agent_impl<::grpc::ClientBidiReactor>::shutdown(); + RemoveHold(); + _context.TryCancel(); + } +} + +}; // namespace com::centreon::engine::modules::opentelemetry::centreon_agent +/** + * @brief Construct a new agent client::agent client object + * use to_agent_connector instead + * @param conf + * @param io_context + * @param handler handler that will process received metrics + * @param logger + */ +to_agent_connector::to_agent_connector( + const grpc_config::pointer& agent_endpoint_conf, + const std::shared_ptr& io_context, + const agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger) + : common::grpc::grpc_client_base(agent_endpoint_conf, logger), + _io_context(io_context), + _conf(agent_conf), + _metric_handler(handler), + _alive(true) { + _stub = std::move(agent::ReversedAgentService::NewStub(_channel)); +} + +/** + * @brief Destroy the to agent connector::to agent connector object + * shutdown connection + */ +to_agent_connector::~to_agent_connector() { + shutdown(); +} + +/** + * @brief construct an start a new client + * + * @param conf conf of the agent endpoint + * @param io_context + * @param handler handler that will process received metrics + * @param logger + * @return std::shared_ptr client created and started + */ +std::shared_ptr to_agent_connector::load( + const grpc_config::pointer& agent_endpoint_conf, + const std::shared_ptr& io_context, + const agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger) { + std::shared_ptr ret = + std::make_shared(agent_endpoint_conf, io_context, + agent_conf, handler, logger); + ret->start(); + return ret; +} + +/** + * @brief connect to agent and initialize exchange + * + */ +void to_agent_connector::start() { + absl::MutexLock l(&_connection_m); + if (!_alive) { + return; + } + SPDLOG_LOGGER_INFO(get_logger(), "connect to {}", get_conf()->get_hostport()); + if (_connection) { + _connection->shutdown(); + _connection.reset(); + } + _connection = std::make_shared( + _io_context, shared_from_this(), _conf, _metric_handler, get_logger()); + agent_connection::register_stream(_connection); + _stub->async()->Import(&_connection->get_context(), _connection.get()); + _connection->start_read(); + _connection->AddHold(); + _connection->StartCall(); +} + +/** + * @brief send conf to agent if something has changed (list of services, + * commands...) + * + */ +void to_agent_connector::refresh_agent_configuration_if_needed( + const agent_config::pointer& new_conf) { + absl::MutexLock l(&_connection_m); + if (_connection) { + _connection->calc_and_send_config_if_needed(new_conf); + } +} + +/** + * @brief shutdown configuration, once this method has been called, this object + * is dead and must be deleted + * + */ +void to_agent_connector::shutdown() { + absl::MutexLock l(&_connection_m); + if (_alive) { + SPDLOG_LOGGER_INFO(get_logger(), "shutdown client of {}", + get_conf()->get_hostport()); + if (_connection) { + _connection->shutdown(); + _connection.reset(); + } + _alive = false; + } +} + +/** + * @brief called by connection + * reconnection is delayed of 10 second + * + */ +void to_agent_connector::on_error() { + common::defer(_io_context, std::chrono::seconds(10), + [me = shared_from_this()] { me->start(); }); +} diff --git a/engine/modules/opentelemetry/src/data_point_fifo.cc b/engine/modules/opentelemetry/src/data_point_fifo.cc deleted file mode 100644 index 00e4bec9d58..00000000000 --- a/engine/modules/opentelemetry/src/data_point_fifo.cc +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Copyright 2024 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#include "data_point_fifo.hh" - -using namespace com::centreon::engine::modules::opentelemetry; - -time_t data_point_fifo::_second_datapoint_expiry = 600; -size_t data_point_fifo::_max_size = 2; - -/** - * @brief opentelemetry fifo limits share a same value - * The goal of this isto fix these limits - * - * @param second_datapoint_expiry - * @param max_size - */ -void data_point_fifo::update_fifo_limit(time_t second_datapoint_expiry, - size_t max_size) { - _second_datapoint_expiry = second_datapoint_expiry; - _max_size = max_size; -} - -/** - * @brief add one data point to fifo - * - * @param data_pt - */ -void data_point_fifo::add_data_point(const otl_data_point& data_pt) { - clean(); - _fifo.insert(data_pt); -} - -/** - * @brief erase to older data points - * - */ -void data_point_fifo::clean() { - if (!_fifo.empty()) { - auto first = _fifo.begin(); - time_t expiry = time(nullptr) - _second_datapoint_expiry; - if (expiry < 0) { - expiry = 0; - } - - while (!_fifo.empty() && - first->get_nano_timestamp() / 1000000000 < expiry) { - first = _fifo.erase(first); - } - - if (_fifo.size() >= _max_size) { - _fifo.erase(first); - } - } -} - -/** - * @brief erase oldest element - * - * @param expiry data points older than this nano timestamp are erased - */ -void data_point_fifo::clean_oldest(uint64_t expiry) { - while (!_fifo.empty() && _fifo.begin()->get_nano_timestamp() < expiry) { - _fifo.erase(_fifo.begin()); - } -} diff --git a/engine/modules/opentelemetry/src/data_point_fifo_container.cc b/engine/modules/opentelemetry/src/data_point_fifo_container.cc deleted file mode 100644 index 112ffb271d5..00000000000 --- a/engine/modules/opentelemetry/src/data_point_fifo_container.cc +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Copyright 2024 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#include "data_point_fifo_container.hh" - -using namespace com::centreon::engine::modules::opentelemetry; - -metric_name_to_fifo data_point_fifo_container::_empty; - -/** - * @brief clean olds data_points - * no need to lock mutex - */ -void data_point_fifo_container::clean() { - std::lock_guard l(_data_m); - for (auto serv_to_fifo_iter = _data.begin(); - !_data.empty() && serv_to_fifo_iter != _data.end();) { - for (auto& fifo : serv_to_fifo_iter->second) { - fifo.second.clean(); - } - if (serv_to_fifo_iter->second.empty()) { - auto to_erase = serv_to_fifo_iter++; - _data.erase(to_erase); - } else { - ++serv_to_fifo_iter; - } - } -} - -/** - * @brief erase empty fifos - * mutex of the owner of to_clean must be locked before call - * - * @param to_clean map metric_name -> fifos - */ -void data_point_fifo_container::clean_empty_fifos( - metric_name_to_fifo& to_clean) { - for (auto to_clean_iter = to_clean.begin(); - !to_clean.empty() && to_clean_iter != to_clean.end();) { - if (to_clean_iter->second.empty()) { - auto to_erase = to_clean_iter++; - to_clean.erase(to_erase); - } else { - ++to_clean_iter; - } - } -} - -/** - * @brief add a data point in the corresponding fifo - * mutex must be locked during returned data use - * - * @param data_pt otl_data_point to add - */ -void data_point_fifo_container::add_data_point(const std::string_view& host, - const std::string_view& service, - const std::string_view& metric, - const otl_data_point& data_pt) { - metric_name_to_fifo& fifos = _data[std::make_pair(host, service)]; - auto exist = fifos.find(metric); - if (exist == fifos.end()) { - exist = fifos.emplace(metric, data_point_fifo()).first; - } - exist->second.add_data_point(data_pt); -} - -/** - * @brief get all fifos of a service - * mutex must be locked during returned data use - * - * @param host - * @param service - * @return const metric_name_to_fifo& - */ -const metric_name_to_fifo& data_point_fifo_container::get_fifos( - const std::string& host, - const std::string& service) const { - auto exist = _data.find({host, service}); - return exist == _data.end() ? _empty : exist->second; -} - -/** - * @brief get all fifos of a service - * mutex must be locked during returned data use - * - * @param host - * @param service - * @return metric_name_to_fifo& - */ -metric_name_to_fifo& data_point_fifo_container::get_fifos( - const std::string& host, - const std::string& service) { - auto exist = _data.find({host, service}); - return exist == _data.end() ? _empty : exist->second; -} - -/** - * @brief debug output - * - * @param output string to log - */ -void data_point_fifo_container::dump(std::string& output) const { - output.push_back('{'); - for (const auto& host_serv : _data) { - output.push_back('"'); - output.append(host_serv.first.first); - output.push_back(','); - output.append(host_serv.first.second); - output.append("\":{"); - for (const auto& metric_to_fifo : host_serv.second) { - output.push_back('"'); - output.append(metric_to_fifo.first); - output.append("\":"); - absl::StrAppend(&output, metric_to_fifo.second.size()); - output.push_back(','); - } - output.append("},"); - } - output.push_back('}'); -} \ No newline at end of file diff --git a/engine/modules/opentelemetry/src/main.cc b/engine/modules/opentelemetry/src/main.cc index 54a63103f57..0c7ef3158f3 100644 --- a/engine/modules/opentelemetry/src/main.cc +++ b/engine/modules/opentelemetry/src/main.cc @@ -56,7 +56,7 @@ extern std::shared_ptr g_io_context; * @return 0 on success, any other value on failure. */ extern "C" int nebmodule_deinit(int /*flags*/, int /*reason*/) { - open_telemetry::unload(log_v2::instance().get(log_v2::OTEL)); + open_telemetry::unload(log_v2::instance().get(log_v2::OTL)); return 0; } @@ -107,7 +107,7 @@ extern "C" int nebmodule_init(int flags, char const* args, void* handle) { throw msg_fmt("main: no configuration file provided"); open_telemetry::load(conf_file_path, g_io_context, - log_v2::instance().get(log_v2::OTEL)); + log_v2::instance().get(log_v2::OTL)); commands::otel_connector::init_all(); return 0; @@ -118,6 +118,6 @@ extern "C" int nebmodule_init(int flags, char const* args, void* handle) { * */ extern "C" int nebmodule_reload() { - open_telemetry::reload(log_v2::instance().get(log_v2::OTEL)); + open_telemetry::reload(log_v2::instance().get(log_v2::OTL)); return 0; } diff --git a/engine/modules/opentelemetry/src/open_telemetry.cc b/engine/modules/opentelemetry/src/open_telemetry.cc index a89910b7293..776cecfba11 100644 --- a/engine/modules/opentelemetry/src/open_telemetry.cc +++ b/engine/modules/opentelemetry/src/open_telemetry.cc @@ -20,8 +20,15 @@ #include "centreon_agent/agent_impl.hh" #include "com/centreon/common/http/https_connection.hh" -#include "com/centreon/engine/modules/opentelemetry/open_telemetry.hh" +#include "com/centreon/engine/host.hh" +#include "com/centreon/engine/service.hh" + +#include "com/centreon/engine/command_manager.hh" + +#include "open_telemetry.hh" + +#include "com/centreon/engine/commands/otel_connector.hh" #include "otl_fmt.hh" #include "otl_server.hh" @@ -37,8 +44,7 @@ open_telemetry::open_telemetry( const std::string_view config_file_path, const std::shared_ptr& io_context, const std::shared_ptr& logger) - : _second_timer(*io_context), - _config_file_path(config_file_path), + : _config_file_path(config_file_path), _logger(logger), _io_context(io_context) { SPDLOG_LOGGER_INFO(_logger, "load of open telemetry module"); @@ -88,10 +94,19 @@ void open_telemetry::_reload() { fmt::formatter<::opentelemetry::proto::collector::metrics::v1:: ExportMetricsServiceRequest>::json_grpc_format = new_conf->get_json_grpc_log(); - data_point_fifo::update_fifo_limit(new_conf->get_second_fifo_expiry(), - new_conf->get_max_fifo_size()); _conf = std::move(new_conf); + + if (!_agent_reverse_client) { + _agent_reverse_client = + std::make_unique( + _io_context, + [me = shared_from_this()](const metric_request_ptr& request) { + me->on_metric(request); + }, + _logger); + } + _agent_reverse_client->update(_conf->get_centreon_agent_config()); } // push new configuration to connected agents centreon_agent::agent_impl<::grpc::ServerBidiReactor open_telemetry::load( _instance = std::make_shared(config_path, io_context, logger); instance()->_reload(); - instance()->_start_second_timer(); } return instance(); } @@ -141,7 +155,7 @@ void open_telemetry::_create_otl_server( _otl_server = otl_server::load( _io_context, server_conf, agent_conf, [me = shared_from_this()](const metric_request_ptr& request) { - me->_on_metric(request); + me->on_metric(request); }, _logger); } catch (const std::exception& e) { @@ -235,8 +249,6 @@ void open_telemetry::_shutdown() { if (to_shutdown) { to_shutdown->shutdown(std::chrono::seconds(10)); } - std::lock_guard l(_protect); - _second_timer.cancel(); } /** @@ -289,80 +301,10 @@ open_telemetry::create_extractor( } } -/** - * @brief converter is created for each check, so in order to not parse otel - * connector command line on each check , we create a - * check_result_builder_config object that is used to create converter it search - * the flag extractor - * - * @param cmd_line - * @return - * std::shared_ptr - */ std::shared_ptr< - com::centreon::engine::commands::otel::check_result_builder_config> -open_telemetry::create_check_result_builder_config( - const std::string& cmd_line) { - return otl_check_result_builder::create_check_result_builder_config(cmd_line); -} - -/** - * @brief simulate a check by reading in metrics fifos - * It creates an otel_converter, the first word of processed_cmd is the name - * of converter such as nagios_telegraf. Following parameters are used by - * converter - * - * @param processed_cmd converter type with arguments - * @param command_id command id - * @param macros - * @param timeout - * @param res filled if it returns true - * @param handler called later if it returns false - * @return true res is filled with a result - * @return false result will be passed to handler as soon as available or - * timeout - * @throw if converter type is unknown - */ -bool open_telemetry::check( - const std::string& processed_cmd, - const std::shared_ptr& - conv_config, - uint64_t command_id, - nagios_macros& macros, - uint32_t timeout, - commands::result& res, - commands::otel::result_callback&& handler) { - std::shared_ptr to_use; - try { - to_use = otl_check_result_builder::create( - processed_cmd, - std::static_pointer_cast(conv_config), - command_id, *macros.host_ptr, macros.service_ptr, - std::chrono::system_clock::now() + std::chrono::seconds(timeout), - std::move(handler), _logger); - } catch (const std::exception& e) { - SPDLOG_LOGGER_ERROR(_logger, "fail to create converter for {} : {}", - processed_cmd, e.what()); - throw; - }; - - bool res_available = to_use->sync_build_result_from_metrics(_fifo, res); - - if (res_available) { - SPDLOG_LOGGER_TRACE(_logger, "data available for command {} converter:{}", - command_id, *to_use); - return true; - } - - SPDLOG_LOGGER_TRACE( - _logger, "data unavailable for command {} timeout: {} converter:{}", - command_id, timeout, *to_use); - - // metrics not yet available = wait for data or until timeout - std::lock_guard l(_protect); - _waiting.insert(to_use); - - return false; + com::centreon::engine::commands::otel::otl_check_result_builder_base> +open_telemetry::create_check_result_builder(const std::string& cmdline) { + return otl_check_result_builder::create(cmdline, _logger); } /** @@ -374,7 +316,7 @@ bool open_telemetry::check( * * @param metrics collector request */ -void open_telemetry::_on_metric(const metric_request_ptr& metrics) { +void open_telemetry::on_metric(const metric_request_ptr& metrics) { std::vector unknown; { std::lock_guard l(_protect); @@ -384,13 +326,15 @@ void open_telemetry::_on_metric(const metric_request_ptr& metrics) { unknown.push_back(data_pt); }); } else { - waiting_converter::nth_index<0>::type& host_serv_index = - _waiting.get<0>(); - std::vector> to_notify; + std::shared_ptr, metric_to_datapoints>> + known_data_pt = std::make_shared< + absl::flat_hash_map, + metric_to_datapoints>>(); auto last_success = _extractors.begin(); otl_data_point::extract_data_points( - metrics, [this, &unknown, &last_success, &host_serv_index, - &to_notify](const otl_data_point& data_pt) { + metrics, [this, &unknown, &last_success, + known_data_pt](const otl_data_point& data_pt) { bool data_point_known = false; // we try all extractors and we begin with the last which has // achieved to extract host @@ -399,17 +343,10 @@ void open_telemetry::_on_metric(const metric_request_ptr& metrics) { last_success->second->extract_host_serv_metric(data_pt); if (!hostservmetric.host.empty()) { // match - _fifo.add_data_point(hostservmetric.host, - hostservmetric.service, - hostservmetric.metric, data_pt); - - // converters waiting this metric? - auto waiting = host_serv_index.equal_range( - host_serv{hostservmetric.host, hostservmetric.service}); - while (waiting.first != waiting.second) { - to_notify.push_back(*waiting.first); - waiting.first = host_serv_index.erase(waiting.first); - } + (*known_data_pt)[std::make_pair(hostservmetric.host, + hostservmetric.service)] + [data_pt.get_metric().name()] + .insert(data_pt); data_point_known = true; break; } @@ -424,16 +361,29 @@ void open_telemetry::_on_metric(const metric_request_ptr& metrics) { data_pt); // unknown metric => forward to broker } }); - SPDLOG_LOGGER_TRACE(_logger, "fifos:{}", _fifo); - // we wait that all request datas have been computed to give us more - // chance of converter success - for (auto to_callback : to_notify) { - if (!to_callback->async_build_result_from_metrics( - _fifo)) { // not enough data => repush in _waiting - _waiting.insert(to_callback); - } - } - SPDLOG_LOGGER_TRACE(_logger, "fifos:{}", _fifo); + + // we post all check results in the main thread + auto fn = std::packaged_task( + [known_data_pt, metrics, logger = _logger]() { + // for each host or service, we generate a result + for (const auto& host_serv_data : *known_data_pt) { + // get connector for this service + std::shared_ptr conn = + commands::otel_connector::get_otel_connector_from_host_serv( + host_serv_data.first.first, host_serv_data.first.second); + if (!conn) { + SPDLOG_LOGGER_ERROR( + logger, "no opentelemetry connector found for {}:{}", + host_serv_data.first.first, host_serv_data.first.second); + } else { + conn->process_data_pts(host_serv_data.first.first, + host_serv_data.first.second, + host_serv_data.second); + } + } + return OK; + }); + command_manager::instance().enqueue(std::move(fn)); } } if (!unknown.empty()) { @@ -442,51 +392,6 @@ void open_telemetry::_on_metric(const metric_request_ptr& metrics) { } } -/** - * @brief the second timer is used to handle converter timeouts - * - */ -void open_telemetry::_start_second_timer() { - std::lock_guard l(_protect); - _second_timer.expires_from_now(std::chrono::seconds(1)); - _second_timer.async_wait( - [me = shared_from_this()](const boost::system::error_code& err) { - if (!err) { - me->_second_timer_handler(); - } - }); -} - -/** - * @brief notify all timeouts - * - */ -void open_telemetry::_second_timer_handler() { - std::vector> to_notify; - { - std::lock_guard l(_protect); - std::chrono::system_clock::time_point now = - std::chrono::system_clock::now(); - waiting_converter::nth_index<1>::type& expiry_index = _waiting.get<1>(); - while (!_waiting.empty()) { - auto oldest = expiry_index.begin(); - if ((*oldest)->get_time_out() > now) { - break; - } - to_notify.push_back(*oldest); - expiry_index.erase(oldest); - } - } - - // notify all timeout - for (std::shared_ptr to_not : to_notify) { - SPDLOG_LOGGER_DEBUG(_logger, "time out: {}", *to_not); - to_not->async_time_out(); - } - - _start_second_timer(); -} - /** * @brief unknown metrics are directly forwarded to broker * diff --git a/engine/modules/opentelemetry/src/otl_check_result_builder.cc b/engine/modules/opentelemetry/src/otl_check_result_builder.cc index 517374773a5..3660d4fe73c 100644 --- a/engine/modules/opentelemetry/src/otl_check_result_builder.cc +++ b/engine/modules/opentelemetry/src/otl_check_result_builder.cc @@ -16,10 +16,14 @@ * For more information : contact@centreon.com */ +#include "com/centreon/engine/checks/checker.hh" #include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/host.hh" +#include "com/centreon/engine/notifier.hh" +#include "com/centreon/engine/service.hh" + #include "com/centreon/exceptions/msg_fmt.hh" -#include "data_point_fifo_container.hh" #include "otl_check_result_builder.hh" #include "centreon_agent/agent_check_result_builder.hh" @@ -35,157 +39,22 @@ using namespace com::centreon::engine::modules::opentelemetry; * object * * @param cmd_line - * @param command_id - * @param host - * @param service - * @param timeout - * @param handler called when mandatory metrics will be available * @param logger */ otl_check_result_builder::otl_check_result_builder( const std::string& cmd_line, - uint64_t command_id, - const host& host, - const service* service, - std::chrono::system_clock::time_point timeout, - commands::otel::result_callback&& handler, const std::shared_ptr& logger) - : _cmd_line(cmd_line), - _command_id(command_id), - _host_serv{host.name(), service ? service->description() : ""}, - _timeout(timeout), - _callback(handler), - _logger(logger) {} - -/** - * @brief try to build a check result - * - * @param data_pts - * @param res - * @return true all mandatory metrics are available and a check_result is built - * @return false - */ -bool otl_check_result_builder::sync_build_result_from_metrics( - data_point_fifo_container& data_pts, - commands::result& res) { - std::lock_guard l(data_pts); - auto& fifos = data_pts.get_fifos(_host_serv.first, _host_serv.second); - if (!fifos.empty() && _build_result_from_metrics(fifos, res)) { - return true; - } - // no data available - return false; -} - -/** - * @brief called when data is received from otel - * clients - * - * @param data_pts - * @return true otl_check_result_builder has managed to create check result - * @return false - */ -bool otl_check_result_builder::async_build_result_from_metrics( - data_point_fifo_container& data_pts) { - commands::result res; - bool success = false; - { - std::lock_guard l(data_pts); - auto& fifos = data_pts.get_fifos(_host_serv.first, _host_serv.second); - success = !fifos.empty() && _build_result_from_metrics(fifos, res); - } - if (success) { - _callback(res); - } - return success; -} - -/** - * @brief called when no data is received before - * _timeout - * - */ -void otl_check_result_builder::async_time_out() { - commands::result res; - res.exit_status = process::timeout; - res.command_id = _command_id; - _callback(res); -} + : _cmd_line(cmd_line), _logger(logger) {} /** * @brief create a otl_converter_config from a command line - * first field identify type of config - * Example: - * @code {.c++} - * std::shared_ptr converter = - * otl_check_result_builder::create("--processor=nagios_telegraf - * --fifo_depth=5", conf, 5, *host, serv, timeout_point, [](const - * commads::result &res){}, _logger); - * @endcode * * @param cmd_line - * @param conf bean configuration object created by - * create_check_result_builder_config - * @param command_id - * @param host - * @param service - * @param timeout - * @param handler handler that will be called once we have all metrics mandatory - * to create a check_result * @return std::shared_ptr */ std::shared_ptr otl_check_result_builder::create( const std::string& cmd_line, - const std::shared_ptr& conf, - uint64_t command_id, - const host& host, - const service* service, - std::chrono::system_clock::time_point timeout, - commands::otel::result_callback&& handler, const std::shared_ptr& logger) { - switch (conf->get_type()) { - case check_result_builder_config::converter_type:: - nagios_check_result_builder: - return std::make_shared( - cmd_line, command_id, host, service, timeout, std::move(handler), - logger); - case check_result_builder_config::converter_type:: - centreon_agent_check_result_builder: - return std::make_shared( - cmd_line, command_id, host, service, timeout, std::move(handler), - logger); - default: - SPDLOG_LOGGER_ERROR(logger, "unknown converter type:{}", cmd_line); - throw exceptions::msg_fmt("unknown converter type:{}", cmd_line); - } -} - -/** - * @brief debug infos - * - * @param output string to log - */ -void otl_check_result_builder::dump(std::string& output) const { - output = fmt::format( - "host:{}, service:{}, command_id={}, timeout:{} cmdline: \"{}\"", - _host_serv.first, _host_serv.second, _command_id, _timeout, _cmd_line); -} - -/** - * @brief create a otl_converter_config from a command line - * --processor flag identifies type of converter - * Example: - * @code {.c++} - * std::shared_ptr converter = - * otl_converter::create_check_result_builder_config("--processor=nagios_telegraf - * --fifo_depth=5"); - * - * @param cmd_line - * @return std::shared_ptr - */ -std::shared_ptr -otl_check_result_builder::create_check_result_builder_config( - const std::string& cmd_line) { static initialized_data_class desc( [](po::options_description& desc) { desc.add_options()("processor", po::value(), @@ -204,21 +73,75 @@ otl_check_result_builder::create_check_result_builder_config( } std::string extractor_type = vm["processor"].as(); if (extractor_type == "nagios_telegraf") { - return std::make_shared( - check_result_builder_config::converter_type:: - nagios_check_result_builder); + return std::make_shared(cmd_line, + logger); } else if (extractor_type == "centreon_agent") { - return std::make_shared( - check_result_builder_config::converter_type:: - centreon_agent_check_result_builder); + return std::make_shared( + cmd_line, logger); } else { throw exceptions::msg_fmt("unknown processor in {}", cmd_line); } } catch (const std::exception& e) { - SPDLOG_LOGGER_ERROR( - config_logger, - "fail to get opentelemetry converter configuration from {}: {}", - cmd_line, e.what()); + SPDLOG_LOGGER_ERROR(config_logger, + "fail to get opentelemetry check_result_builder " + "configuration from {}: {}", + cmd_line, e.what()); throw; } } + +/** + * @brief convert opentelemetry datas in check_result and post it to + * checks::checker::instance() Caution, this function must be called from engine + * main thread + * + * @param host + * @param serv empty if result of host check + * @param data_pts opentelemetry data points + */ +void otl_check_result_builder::process_data_pts( + const std::string_view& hst, + const std::string_view& serv, + const metric_to_datapoints& data_pts) { + check_source notifier_type = check_source::service_check; + notifier* host_or_serv = nullptr; + + if (serv.empty()) { + notifier_type = check_source::host_check; + auto found = host::hosts.find(hst); + if (found == host::hosts.end()) { + SPDLOG_LOGGER_ERROR(_logger, "unknow host: {}", hst); + return; + } + host_or_serv = found->second.get(); + } else { + auto found = service::services.find(std::make_pair(hst, serv)); + if (found == service::services.end()) { + SPDLOG_LOGGER_ERROR(_logger, "unknow service {} for host", serv, hst); + return; + } + host_or_serv = found->second.get(); + } + timeval zero = {0, 0}; + std::shared_ptr res = std::make_shared( + notifier_type, host_or_serv, checkable::check_type::check_passive, + CHECK_OPTION_NONE, false, 0, zero, zero, false, true, 0, ""); + if (build_result_from_metrics(data_pts, *res)) { + checks::checker::instance().add_check_result_to_reap(res); + } else { + SPDLOG_LOGGER_ERROR( + _logger, + "fail to convert opentelemetry datas in centreon check_result for host " + "{}, serv {}", + hst, serv); + } +} + +/** + * @brief debug infos + * + * @param output string to log + */ +void otl_check_result_builder::dump(std::string& output) const { + output = _cmd_line; +} diff --git a/engine/modules/opentelemetry/src/otl_config.cc b/engine/modules/opentelemetry/src/otl_config.cc index f0c62dda374..386615aaf19 100644 --- a/engine/modules/opentelemetry/src/otl_config.cc +++ b/engine/modules/opentelemetry/src/otl_config.cc @@ -47,16 +47,6 @@ static constexpr std::string_view _grpc_config_schema(R"( "description": "true if we log otl grpc object to json format", "type": "boolean" }, - "second_fifo_expiry": { - "description:": "lifetime of data points in fifos", - "type": "integer", - "min": 30 - }, - "max_fifo_size": { - "description:": "max number of data points in fifos", - "type": "integer", - "min": 1 - }, "otel_server": { "description": "otel grpc config", "type": "object" @@ -98,8 +88,6 @@ otl_config::otl_config(const std::string_view& file_path, file_content.validate(validator); _max_length_grpc_log = file_content.get_unsigned("max_length_grpc_log", 400); _json_grpc_log = file_content.get_bool("grpc_json_log", false); - _second_fifo_expiry = file_content.get_unsigned("second_fifo_expiry", 600); - _max_fifo_size = file_content.get_unsigned("max_fifo_size", 5); if (file_content.has_member("otel_server")) { try { _grpc_conf = @@ -158,9 +146,7 @@ bool otl_config::operator==(const otl_config& right) const { } bool ret = *_grpc_conf == *right._grpc_conf && _max_length_grpc_log == right._max_length_grpc_log && - _json_grpc_log == right._json_grpc_log && - _second_fifo_expiry == right._second_fifo_expiry && - _max_fifo_size == right._max_fifo_size; + _json_grpc_log == right._json_grpc_log; if (!ret) { return false; diff --git a/engine/modules/opentelemetry/src/otl_data_point.cc b/engine/modules/opentelemetry/src/otl_data_point.cc index 7e5273725f1..2cf7374cd3b 100644 --- a/engine/modules/opentelemetry/src/otl_data_point.cc +++ b/engine/modules/opentelemetry/src/otl_data_point.cc @@ -43,6 +43,7 @@ otl_data_point::otl_data_point( _data_point(data_pt), _data_point_attributes(data_pt.attributes()), _exemplars(data_pt.exemplars()), + _start_nano_timestamp(data_pt.start_time_unix_nano()), _nano_timestamp(data_pt.time_unix_nano()), _type(data_point_type::number) { _value = data_pt.as_double() ? data_pt.as_double() : data_pt.as_int(); @@ -61,6 +62,7 @@ otl_data_point::otl_data_point( _data_point(data_pt), _data_point_attributes(data_pt.attributes()), _exemplars(data_pt.exemplars()), + _start_nano_timestamp(data_pt.start_time_unix_nano()), _nano_timestamp(data_pt.time_unix_nano()), _type(data_point_type::histogram) { _value = data_pt.count(); @@ -80,6 +82,7 @@ otl_data_point::otl_data_point( _data_point(data_pt), _data_point_attributes(data_pt.attributes()), _exemplars(data_pt.exemplars()), + _start_nano_timestamp(data_pt.start_time_unix_nano()), _nano_timestamp(data_pt.time_unix_nano()), _type(data_point_type::exponential_histogram) { _value = data_pt.count(); @@ -98,6 +101,7 @@ otl_data_point::otl_data_point( _data_point(data_pt), _data_point_attributes(data_pt.attributes()), _exemplars(_empty_exemplars), + _start_nano_timestamp(data_pt.start_time_unix_nano()), _nano_timestamp(data_pt.time_unix_nano()), _type(data_point_type::summary) { _value = data_pt.count(); diff --git a/engine/modules/opentelemetry/src/telegraf/nagios_check_result_builder.cc b/engine/modules/opentelemetry/src/telegraf/nagios_check_result_builder.cc index e8515b2e217..5bf775b9f3b 100644 --- a/engine/modules/opentelemetry/src/telegraf/nagios_check_result_builder.cc +++ b/engine/modules/opentelemetry/src/telegraf/nagios_check_result_builder.cc @@ -16,7 +16,6 @@ * For more information : contact@centreon.com */ -#include "data_point_fifo_container.hh" #include "otl_check_result_builder.hh" #include "telegraf/nagios_check_result_builder.hh" @@ -153,47 +152,58 @@ static std::string_view get_nagios_telegraf_suffix( } /** - * @brief + * @brief fill a check_result from otel datas * - * @param fifos fifos indexed by metric_name such as check_icmp_critical_gt, - * check_icmp_state - * @return com::centreon::engine::commands::result + * @param data_pts + * @param res + * @return true if res is filled + * @return false */ -bool nagios_check_result_builder::_build_result_from_metrics( - metric_name_to_fifo& fifos, - commands::result& res) { +bool nagios_check_result_builder::build_result_from_metrics( + const metric_to_datapoints& data_pts, + check_result& res) { // first we search last state timestamp uint64_t last_time = 0; - for (auto& metric_to_fifo : fifos) { - if (get_nagios_telegraf_suffix(metric_to_fifo.first) == "state") { - auto& fifo = metric_to_fifo.second.get_fifo(); - if (!fifo.empty()) { - const auto& last_sample = *fifo.rbegin(); - last_time = last_sample.get_nano_timestamp(); - res.exit_code = last_sample.get_value(); - metric_to_fifo.second.clean_oldest(last_time); + for (const auto& metric_to_data_pts : data_pts) { + if (get_nagios_telegraf_suffix(metric_to_data_pts.first) == "state") { + const auto& last_sample = metric_to_data_pts.second.rbegin(); + last_time = last_sample->get_nano_timestamp(); + res.set_return_code(last_sample->get_value()); + + res.set_finish_time( + {.tv_sec = static_cast(last_time / 1000000000), + .tv_usec = static_cast((last_time / 1000) % 1000000)}); + + if (last_sample->get_start_nano_timestamp() > 0) { + res.set_start_time( + {.tv_sec = static_cast( + last_sample->get_start_nano_timestamp() / 1000000000), + .tv_usec = static_cast( + (last_sample->get_start_nano_timestamp() / 1000) % 1000000)}); + } else { + res.set_start_time(res.get_finish_time()); } break; } } + if (!last_time) { return false; } - res.command_id = get_command_id(); - res.exit_status = process::normal; - res.end_time = res.start_time = last_time / 1000000000; // construct perfdata list by perfdata name std::map perfs; - for (auto& metric_to_fifo : fifos) { - std::string_view suffix = get_nagios_telegraf_suffix(metric_to_fifo.first); - const data_point_fifo::container& data_points = - metric_to_fifo.second.get_fifo(); + for (const auto& metric_to_data_pts : data_pts) { + std::string_view suffix = + get_nagios_telegraf_suffix(metric_to_data_pts.first); + if (suffix == "state") { + continue; + } // we scan all data points for that metric (example check_icmp_critical_gt // can contain a data point for pl and another for rta) - auto data_pt_search = data_points.equal_range(last_time); + auto data_pt_search = metric_to_data_pts.second.equal_range(last_time); for (; data_pt_search.first != data_pt_search.second; ++data_pt_search.first) { const auto attributes = data_pt_search.first->get_data_point_attributes(); @@ -218,49 +228,53 @@ bool nagios_check_result_builder::_build_result_from_metrics( _logger); } } - metric_to_fifo.second.clean_oldest(last_time); } - data_point_fifo_container::clean_empty_fifos(fifos); + std::string output; // then format all in a string with format: // 'label'=value[UOM];[warn];[crit];[min];[max] - if (res.exit_code >= 0 && res.exit_code < 4) { - res.output = state_str[res.exit_code]; + if (res.get_return_code() >= 0 && res.get_return_code() < 4) { + output = state_str[res.get_return_code()]; } - res.output.push_back('|'); + output.push_back('|'); for (const auto& perf : perfs) { if (perf.second.val) { - absl::StrAppend(&res.output, perf.first, "=", *perf.second.val, + absl::StrAppend(&output, perf.first, "=", *perf.second.val, perf.second.unit, ";"); if (perf.second.warning_le) { - absl::StrAppend(&res.output, "@", *perf.second.warning_le, ":", + absl::StrAppend(&output, "@", *perf.second.warning_le, ":", *perf.second.warning_ge); } else if (perf.second.warning_lt) { - absl::StrAppend(&res.output, *perf.second.warning_lt, ":", + absl::StrAppend(&output, *perf.second.warning_lt, ":", *perf.second.warning_gt); } - res.output.push_back(';'); + output.push_back(';'); if (perf.second.critical_le) { - absl::StrAppend(&res.output, "@", *perf.second.critical_le, ":", + absl::StrAppend(&output, "@", *perf.second.critical_le, ":", *perf.second.critical_ge); } else if (perf.second.critical_lt) { - absl::StrAppend(&res.output, *perf.second.critical_lt, ":", + absl::StrAppend(&output, *perf.second.critical_lt, ":", *perf.second.critical_gt); } - res.output.push_back(';'); + output.push_back(';'); if (perf.second.min) { - absl::StrAppend(&res.output, *perf.second.min); + absl::StrAppend(&output, *perf.second.min); } - res.output.push_back(';'); + output.push_back(';'); if (perf.second.max) { - absl::StrAppend(&res.output, *perf.second.max); + absl::StrAppend(&output, *perf.second.max); } - res.output.push_back(' '); + output.push_back(' '); } } // remove last space - res.output.pop_back(); + if (*output.rbegin() == ' ') { + output.pop_back(); + } + + res.set_output(output); + return true; } diff --git a/engine/src/broker.cc b/engine/src/broker.cc index b6c25032068..8b5ae22dab3 100644 --- a/engine/src/broker.cc +++ b/engine/src/broker.cc @@ -1,23 +1,23 @@ /** -* Copyright 2002-2010 Ethan Galstad -* Copyright 2010 Nagios Core Development Team -* Copyright 2011-2013,2020 Centreon -* -* This file is part of Centreon Engine. -* -* Centreon Engine is free software: you can redistribute it and/or -* modify it under the terms of the GNU General Public License version 2 -* as published by the Free Software Foundation. -* -* Centreon Engine is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -* General Public License for more details. -* -* You should have received a copy of the GNU General Public License -* along with Centreon Engine. If not, see -* . -*/ + * Copyright 2002-2010 Ethan Galstad + * Copyright 2010 Nagios Core Development Team + * Copyright 2011-2013,2020-2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #include "com/centreon/engine/broker.hh" #include @@ -54,8 +54,13 @@ void broker_acknowledgement_data( int notify_contacts, int persistent_comment) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_ACKNOWLEDGEMENT_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_ACKNOWLEDGEMENT_DATA)) + return; +#endif // Fill struct with relevant data. host* temp_host(NULL); @@ -122,8 +127,13 @@ void broker_adaptive_contact_data( */ void broker_adaptive_severity_data(int type, void* data) { /* Config check. */ +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_ADAPTIVE_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_ADAPTIVE_DATA)) + return; +#endif /* Fill struct with relevant data. */ nebstruct_adaptive_severity_data ds; @@ -142,8 +152,13 @@ void broker_adaptive_severity_data(int type, void* data) { */ void broker_adaptive_tag_data(int type, void* data) { /* Config check. */ +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_ADAPTIVE_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_ADAPTIVE_DATA)) + return; +#endif /* Fill struct with relevant data. */ nebstruct_adaptive_tag_data ds; @@ -162,8 +177,13 @@ void broker_adaptive_tag_data(int type, void* data) { */ void broker_adaptive_dependency_data(int type, void* data) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_ADAPTIVE_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_ADAPTIVE_DATA)) + return; +#endif // Fill struct with relevant data. nebstruct_adaptive_dependency_data ds; @@ -205,8 +225,13 @@ void broker_adaptive_host_data(int type, host* hst, unsigned long modattr) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_ADAPTIVE_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_ADAPTIVE_DATA)) + return; +#endif // Fill struct with relevant data. nebstruct_adaptive_host_data ds; @@ -259,8 +284,13 @@ void broker_adaptive_service_data(int type, com::centreon::engine::service* svc, unsigned long modattr) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_ADAPTIVE_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_ADAPTIVE_DATA)) + return; +#endif // Fill struct with relevant data. nebstruct_adaptive_service_data ds; @@ -353,8 +383,13 @@ void broker_comment_data(int type, time_t expire_time, unsigned long comment_id) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_COMMENT_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_COMMENT_DATA)) + return; +#endif // Fill struct with relevant data. nebstruct_comment_data ds; @@ -455,8 +490,13 @@ int broker_contact_notification_method_data( */ void broker_contact_status(int type, contact* cntct) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_STATUS_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_STATUS_DATA)) + return; +#endif // Fill struct with relevant data. nebstruct_service_status_data ds; @@ -478,20 +518,26 @@ void broker_contact_status(int type, contact* cntct) { */ void broker_custom_variable(int type, void* data, - char const* varname, - char const* varvalue, + std::string_view&& varname, + std::string_view&& varvalue, struct timeval const* timestamp) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_CUSTOMVARIABLE_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_CUSTOMVARIABLE_DATA)) + return; +#endif // Fill struct with relevant data. - nebstruct_custom_variable_data ds; - ds.type = type; - ds.timestamp = get_broker_timestamp(timestamp); - ds.object_ptr = data; - ds.var_name = const_cast(varname); - ds.var_value = const_cast(varvalue); + nebstruct_custom_variable_data ds{ + .type = type, + .timestamp = get_broker_timestamp(timestamp), + .var_name = varname, + .var_value = varvalue, + .object_ptr = data, + }; // Make callback. neb_make_callbacks(NEBCALLBACK_CUSTOM_VARIABLE_DATA, &ds); @@ -532,8 +578,13 @@ void broker_downtime_data(int type, unsigned long downtime_id, struct timeval const* timestamp) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_DOWNTIME_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_DOWNTIME_DATA)) + return; +#endif // Fill struct with relevant data. nebstruct_downtime_data ds; @@ -570,8 +621,13 @@ void broker_external_command(int type, char* command_args, struct timeval const* timestamp) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_EXTERNALCOMMAND_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_EXTERNALCOMMAND_DATA)) + return; +#endif // Fill struct with relevant data. nebstruct_external_command_data ds; @@ -593,8 +649,13 @@ void broker_external_command(int type, */ void broker_group(int type, void* data) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_GROUP_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_GROUP_DATA)) + return; +#endif // Fill struct with relevant data. nebstruct_group_data ds; @@ -614,8 +675,13 @@ void broker_group(int type, void* data) { */ void broker_group_member(int type, void* object, void* group) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_GROUP_MEMBER_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_GROUP_MEMBER_DATA)) + return; +#endif // Fill struct will relevant data. nebstruct_group_member_data ds; @@ -644,8 +710,13 @@ int broker_host_check(int type, char const* cmdline, char* output) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_HOST_CHECKS)) return OK; +#else + if (!(pb_config.event_broker_options() & BROKER_HOST_CHECKS)) + return OK; +#endif if (!hst) return ERROR; @@ -674,8 +745,13 @@ int broker_host_check(int type, */ void broker_host_status(int type, host* hst) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_STATUS_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_STATUS_DATA)) + return; +#endif // Fill struct with relevant data. nebstruct_host_status_data ds; @@ -694,9 +770,15 @@ void broker_host_status(int type, host* hst) { */ void broker_log_data(char* data, time_t entry_time) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_LOGGED_DATA) || !config->log_legacy_enabled()) return; +#else + if (!(pb_config.event_broker_options() & BROKER_LOGGED_DATA) || + !pb_config.log_legacy_enabled()) + return; +#endif // Fill struct with relevant data. nebstruct_log_data ds; @@ -750,8 +832,13 @@ int broker_notification_data(int type [[maybe_unused]], */ void broker_program_state(int type, int flags) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_PROGRAM_STATE)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_PROGRAM_STATE)) + return; +#endif // Fill struct with relevant data. nebstruct_process_data ds; @@ -766,6 +853,7 @@ void broker_program_state(int type, int flags) { * Sends program status updates to broker. */ void broker_program_status() { +#ifdef LEGACY_CONF // Config check. if (!(config->event_broker_options() & BROKER_STATUS_DATA)) return; @@ -787,6 +875,29 @@ void broker_program_status() { // Make callbacks. neb_make_callbacks(NEBCALLBACK_PROGRAM_STATUS_DATA, &ds); +#else + // Config check. + if (!(pb_config.event_broker_options() & BROKER_STATUS_DATA)) + return; + + // Fill struct with relevant data. + nebstruct_program_status_data ds; + ds.last_command_check = last_command_check; + ds.notifications_enabled = pb_config.enable_notifications(); + ds.active_service_checks_enabled = pb_config.execute_service_checks(); + ds.passive_service_checks_enabled = pb_config.accept_passive_service_checks(); + ds.active_host_checks_enabled = pb_config.execute_host_checks(); + ds.passive_host_checks_enabled = pb_config.accept_passive_host_checks(); + ds.event_handlers_enabled = pb_config.enable_event_handlers(); + ds.flap_detection_enabled = pb_config.enable_flap_detection(); + ds.obsess_over_hosts = pb_config.obsess_over_hosts(); + ds.obsess_over_services = pb_config.obsess_over_services(); + ds.global_host_event_handler = pb_config.global_host_event_handler(); + ds.global_service_event_handler = pb_config.global_service_event_handler(); + + // Make callbacks. + neb_make_callbacks(NEBCALLBACK_PROGRAM_STATUS_DATA, &ds); +#endif } /** @@ -804,8 +915,13 @@ void broker_relation_data(int type, host* dep_hst, com::centreon::engine::service* dep_svc) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_RELATION_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_RELATION_DATA)) + return; +#endif if (!hst || !dep_hst) return; @@ -850,8 +966,13 @@ int broker_service_check(int type, int check_type, const char* cmdline) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_SERVICE_CHECKS)) return OK; +#else + if (!(pb_config.event_broker_options() & BROKER_SERVICE_CHECKS)) + return OK; +#endif if (!svc) return ERROR; @@ -880,8 +1001,13 @@ int broker_service_check(int type, */ void broker_service_status(int type, com::centreon::engine::service* svc) { // Config check. +#ifdef LEGACY_CONF if (!(config->event_broker_options() & BROKER_STATUS_DATA)) return; +#else + if (!(pb_config.event_broker_options() & BROKER_STATUS_DATA)) + return; +#endif // Fill struct with relevant data. nebstruct_service_status_data ds; diff --git a/engine/src/check_result.cc b/engine/src/check_result.cc index d94e2f4fe2e..a319290246f 100644 --- a/engine/src/check_result.cc +++ b/engine/src/check_result.cc @@ -27,7 +27,6 @@ using namespace com::centreon::engine; check_result::check_result() : _object_check_type{check_source::service_check}, - _command_id(0), _notifier{nullptr}, _check_type(checkable::check_type::check_passive), _check_options{0}, @@ -52,7 +51,6 @@ check_result::check_result(enum check_source object_check_type, int return_code, std::string output) : _object_check_type{object_check_type}, - _command_id(0), _notifier{notifier}, _check_type(check_type), _check_options{check_options}, @@ -124,8 +122,7 @@ void check_result::set_check_options(unsigned check_options) { namespace com::centreon::engine { std::ostream& operator<<(std::ostream& stream, const check_result& res) { - stream << "command_id=" << res.get_command_id() - << " timeout=" << res.get_early_timeout() + stream << " timeout=" << res.get_early_timeout() << " ok=" << res.get_exited_ok() << " ret_code=" << res.get_return_code() << " output:" << res.get_output(); diff --git a/engine/src/checks/checker.cc b/engine/src/checks/checker.cc index 6293272e1fa..235d9ff63e4 100644 --- a/engine/src/checks/checker.cc +++ b/engine/src/checks/checker.cc @@ -1,20 +1,22 @@ /** - * Copyright 2024 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ +* Copyright 1999-2010 Ethan Galstad +* Copyright 2011-2024 Centreon +* +* This file is part of Centreon Engine. +* +* Centreon Engine is free software: you can redistribute it and/or +* modify it under the terms of the GNU General Public License version 2 +* as published by the Free Software Foundation. +* +* Centreon Engine is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with Centreon Engine. If not, see +* . +*/ #include "com/centreon/engine/checks/checker.hh" @@ -38,12 +40,6 @@ using namespace com::centreon::engine::checks; checker* checker::_instance = nullptr; static constexpr time_t max_check_reaper_time = 30; -/************************************** - * * - * Public Methods * - * * - **************************************/ - /** * Get instance of the checker singleton. * @@ -397,7 +393,6 @@ void checker::finished(commands::result const& res) noexcept { result->set_exited_ok(res.exit_status == process::normal || res.exit_status == process::timeout); result->set_output(res.output); - result->set_command_id(res.command_id); // Queue check result. lock.lock(); @@ -519,10 +514,17 @@ com::centreon::engine::host::host_state checker::_execute_sync(host* hst) { timeval start_cmd; timeval end_cmd{0, 0}; gettimeofday(&start_cmd, nullptr); +#ifdef LEGACY_CONF broker_system_command(NEBTYPE_SYSTEM_COMMAND_START, NEBFLAG_NONE, NEBATTR_NONE, start_cmd, end_cmd, 0, config->host_check_timeout(), false, 0, tmp_processed_cmd, nullptr, nullptr); +#else + broker_system_command(NEBTYPE_SYSTEM_COMMAND_START, NEBFLAG_NONE, + NEBATTR_NONE, start_cmd, end_cmd, 0, + pb_config.host_check_timeout(), false, 0, + tmp_processed_cmd, nullptr, nullptr); +#endif commands::result res; @@ -550,7 +552,11 @@ com::centreon::engine::host::host_state checker::_execute_sync(host* hst) { } else { // Run command. try { +#ifdef LEGACY_CONF cmd->run(processed_cmd, *macros, config->host_check_timeout(), res); +#else + cmd->run(processed_cmd, *macros, pb_config.host_check_timeout(), res); +#endif } catch (std::exception const& e) { run_failure("(Execute command failed)"); @@ -575,28 +581,41 @@ com::centreon::engine::host::host_state checker::_execute_sync(host* hst) { memset(&end_cmd, 0, sizeof(end_time)); end_cmd.tv_sec = res.end_time.to_seconds(); end_cmd.tv_usec = res.end_time.to_useconds() - end_cmd.tv_sec * 1000000ull; +#ifdef LEGACY_CONF broker_system_command(NEBTYPE_SYSTEM_COMMAND_END, NEBFLAG_NONE, NEBATTR_NONE, start_cmd, end_cmd, execution_time, config->host_check_timeout(), res.exit_status == process::timeout, res.exit_code, tmp_processed_cmd, res.output.c_str(), nullptr); +#else + broker_system_command(NEBTYPE_SYSTEM_COMMAND_END, NEBFLAG_NONE, NEBATTR_NONE, + start_cmd, end_cmd, execution_time, + pb_config.host_check_timeout(), + res.exit_status == process::timeout, res.exit_code, + tmp_processed_cmd, res.output.c_str(), nullptr); +#endif // Cleanup. clear_volatile_macros_r(macros); // If the command timed out. +#ifdef LEGACY_CONF +uint32_t host_check_timeout = config->host_check_timeout(); +#else +uint32_t host_check_timeout = pb_config.host_check_timeout(); +#endif if (res.exit_status == process::timeout) { res.output = fmt::format("Host check timed out after {} seconds", - config->host_check_timeout()); + host_check_timeout); engine_logger(log_runtime_warning, basic) << "Warning: Host check command '" << processed_cmd << "' for host '" - << hst->name() << "' timed out after " << config->host_check_timeout() + << hst->name() << "' timed out after " << host_check_timeout << " seconds"; SPDLOG_LOGGER_WARN( runtime_logger, "Warning: Host check command '{}' for host '{}' timed out after {} " "seconds", - processed_cmd, hst->name(), config->host_check_timeout()); + processed_cmd, hst->name(), host_check_timeout); } // Update values. diff --git a/engine/src/command_manager.cc b/engine/src/command_manager.cc index 1718b63442e..18207450d8a 100644 --- a/engine/src/command_manager.cc +++ b/engine/src/command_manager.cc @@ -83,8 +83,13 @@ int command_manager::process_passive_service_check( /* skip this service check result if we aren't accepting passive service * checks */ +#ifdef LEGACY_CONF if (!config->accept_passive_service_checks()) return ERROR; +#else + if (!pb_config.accept_passive_service_checks()) + return ERROR; +#endif /* make sure we have a reasonable return code */ if (return_code > 3) @@ -137,6 +142,12 @@ int command_manager::process_passive_service_check( if (!found->second->passive_checks_enabled()) return ERROR; + SPDLOG_LOGGER_DEBUG(runtime_logger, + "process_passive_service_check check_time={}, " + "host_name={}, service={}, return_code={}, output={}", + check_time, host_name, svc_description, return_code, + output); + timeval tv; gettimeofday(&tv, nullptr); @@ -168,8 +179,13 @@ int command_manager::process_passive_host_check(time_t check_time, const std::string* real_host_name = nullptr; /* skip this host check result if we aren't accepting passive host checks */ +#ifdef LEGACY_CONF if (!config->accept_passive_service_checks()) return ERROR; +#else + if (!pb_config.accept_passive_service_checks()) + return ERROR; +#endif /* make sure we have a reasonable return code */ if (return_code > 2) @@ -282,12 +298,21 @@ int command_manager::get_stats(std::string const& request, Stats* response) { uint32_t used_external_command_buffer_slots = 0; uint32_t high_external_command_buffer_slots = 0; // get number f items in the command buffer +#ifdef LEGACY_CONF if (config->check_external_commands()) { used_external_command_buffer_slots = external_command_buffer.size(); high_external_command_buffer_slots = external_command_buffer.high(); } response->mutable_program_status()->set_total_external_command_buffer_slots( config->external_command_buffer_slots()); +#else + if (pb_config.check_external_commands()) { + used_external_command_buffer_slots = external_command_buffer.size(); + high_external_command_buffer_slots = external_command_buffer.high(); + } + response->mutable_program_status()->set_total_external_command_buffer_slots( + pb_config.external_command_buffer_slots()); +#endif response->mutable_program_status()->set_used_external_command_buffer_slots( used_external_command_buffer_slots); response->mutable_program_status()->set_high_external_command_buffer_slots( diff --git a/engine/src/commands/command.cc b/engine/src/commands/command.cc index 134c459a8d5..c1043c996aa 100644 --- a/engine/src/commands/command.cc +++ b/engine/src/commands/command.cc @@ -178,8 +178,12 @@ bool commands::command::gest_call_interval( caller_to_last_call_map::iterator group_search = _result_cache.find(caller); if (group_search != _result_cache.end()) { time_t now = time(nullptr); - if (group_search->second->launch_time + config->interval_length() >= - now && +#ifdef LEGACY_CONF + uint32_t interval_length = config->interval_length(); +#else + uint32_t interval_length = pb_config.interval_length(); +#endif + if (group_search->second->launch_time + interval_length >= now && group_search->second->res) { // old check is too recent result_to_reuse = std::make_shared(*group_search->second->res); result_to_reuse->command_id = command_id; diff --git a/engine/src/commands/commands.cc b/engine/src/commands/commands.cc index ec6e7d53d1a..072e14870d4 100644 --- a/engine/src/commands/commands.cc +++ b/engine/src/commands/commands.cc @@ -55,8 +55,14 @@ int check_for_external_commands() { functions_logger->trace("check_for_external_commands()"); +#ifdef LEGACY_CONF + bool check_external_commands = config->check_external_commands(); +#else + bool check_external_commands = pb_config.check_external_commands(); +#endif + /* bail out if we shouldn't be checking for external commands */ - if (!config->check_external_commands()) + if (!check_external_commands) return ERROR; /* update last command check time */ @@ -497,9 +503,16 @@ void cmd_signal_process(int cmd, char* args) { int cmd_process_service_check_result(int cmd [[maybe_unused]], time_t check_time, char* args) { +#ifdef LEGACY_CONF + bool accept_passive_service_checks = config->accept_passive_service_checks(); +#else + bool accept_passive_service_checks = + pb_config.accept_passive_service_checks(); +#endif + /* skip this service check result if we aren't accepting passive service * checks */ - if (!config->accept_passive_service_checks()) + if (!accept_passive_service_checks) return ERROR; auto a{absl::StrSplit(args, absl::MaxSplits(';', 3))}; @@ -571,8 +584,8 @@ int cmd_process_service_check_result(int cmd [[maybe_unused]], ++ait; // replace \\n with \n - std::string output; - absl::CUnescape(*ait, &output); + std::string output(ait->data(), ait->size()); + string::unescape(output); timeval tv; gettimeofday(&tv, nullptr); @@ -608,9 +621,16 @@ int process_passive_service_check(time_t check_time, char const* output) { char const* real_host_name(nullptr); +#ifdef LEGACY_CONF + bool accept_passive_service_checks = config->accept_passive_service_checks(); +#else + bool accept_passive_service_checks = + pb_config.accept_passive_service_checks(); +#endif + /* skip this service check result if we aren't accepting passive service * checks */ - if (config->accept_passive_service_checks() == false) + if (!accept_passive_service_checks) return ERROR; /* make sure we have all required data */ @@ -738,8 +758,16 @@ int process_passive_host_check(time_t check_time, int return_code, char const* output) { char const* real_host_name(nullptr); + +#ifdef LEGACY_CONF + bool accept_passive_service_checks = config->accept_passive_service_checks(); +#else + bool accept_passive_service_checks = + pb_config.accept_passive_service_checks(); +#endif + /* skip this host check result if we aren't accepting passive host checks */ - if (!config->accept_passive_service_checks()) + if (!accept_passive_service_checks) return ERROR; /* make sure we have all required data */ @@ -1918,13 +1946,21 @@ int cmd_change_object_char_var(int cmd, char* args) { /* update the variable */ switch (cmd) { case CMD_CHANGE_GLOBAL_HOST_EVENT_HANDLER: +#ifdef LEGACY_CONF config->global_host_event_handler(temp_ptr); +#else + pb_config.set_global_host_event_handler(temp_ptr); +#endif global_host_event_handler_ptr = cmd_found->second.get(); attr = MODATTR_EVENT_HANDLER_COMMAND; break; case CMD_CHANGE_GLOBAL_SVC_EVENT_HANDLER: +#ifdef LEGACY_CONF config->global_service_event_handler(temp_ptr); +#else + pb_config.set_global_service_event_handler(temp_ptr); +#endif global_service_event_handler_ptr = cmd_found->second.get(); attr = MODATTR_EVENT_HANDLER_COMMAND; break; @@ -2255,8 +2291,14 @@ void enable_service_checks(service* svc) { void enable_all_notifications(void) { constexpr uint32_t attr = MODATTR_NOTIFICATIONS_ENABLED; +#ifdef LEGACY_CONF + bool enable_notifications = config->enable_notifications(); +#else + bool enable_notifications = pb_config.enable_notifications(); +#endif + /* bail out if we're already set... */ - if (config->enable_notifications()) + if (enable_notifications) return; /* set the attribute modified flag */ @@ -2264,7 +2306,11 @@ void enable_all_notifications(void) { modified_service_process_attributes |= attr; /* update notification status */ +#ifdef LEGACY_CONF config->enable_notifications(true); +#else + pb_config.set_enable_notifications(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -2280,8 +2326,14 @@ void enable_all_notifications(void) { void disable_all_notifications(void) { constexpr uint32_t attr = MODATTR_NOTIFICATIONS_ENABLED; +#ifdef LEGACY_CONF + bool enable_notifications = config->enable_notifications(); +#else + bool enable_notifications = pb_config.enable_notifications(); +#endif + /* bail out if we're already set... */ - if (config->enable_notifications() == false) + if (!enable_notifications) return; /* set the attribute modified flag */ @@ -2289,7 +2341,11 @@ void disable_all_notifications(void) { modified_service_process_attributes |= attr; /* update notification status */ +#ifdef LEGACY_CONF config->enable_notifications(false); +#else + pb_config.set_enable_notifications(false); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -2701,15 +2757,25 @@ void remove_service_acknowledgement(service* svc) { void start_executing_service_checks(void) { constexpr uint32_t attr = MODATTR_ACTIVE_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool execute_service_checks = config->execute_service_checks(); +#else + bool execute_service_checks = pb_config.execute_service_checks(); +#endif + /* bail out if we're already executing services */ - if (config->execute_service_checks()) + if (execute_service_checks) return; /* set the attribute modified flag */ modified_service_process_attributes |= attr; /* set the service check execution flag */ +#ifdef LEGACY_CONF config->execute_service_checks(true); +#else + pb_config.set_execute_service_checks(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -2725,15 +2791,25 @@ void start_executing_service_checks(void) { void stop_executing_service_checks(void) { unsigned long attr = MODATTR_ACTIVE_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool execute_service_checks = config->execute_service_checks(); +#else + bool execute_service_checks = pb_config.execute_service_checks(); +#endif + /* bail out if we're already not executing services */ - if (config->execute_service_checks() == false) + if (!execute_service_checks) return; /* set the attribute modified flag */ modified_service_process_attributes |= attr; /* set the service check execution flag */ +#ifdef LEGACY_CONF config->execute_service_checks(false); +#else + pb_config.set_execute_service_checks(false); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -2749,15 +2825,26 @@ void stop_executing_service_checks(void) { void start_accepting_passive_service_checks(void) { constexpr uint32_t attr = MODATTR_PASSIVE_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool accept_passive_service_checks = config->accept_passive_service_checks(); +#else + bool accept_passive_service_checks = + pb_config.accept_passive_service_checks(); +#endif + /* bail out if we're already accepting passive services */ - if (config->accept_passive_service_checks()) + if (accept_passive_service_checks) return; /* set the attribute modified flag */ modified_service_process_attributes |= attr; /* set the service check flag */ +#ifdef LEGACY_CONF config->accept_passive_service_checks(true); +#else + pb_config.set_accept_passive_service_checks(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -2773,15 +2860,25 @@ void start_accepting_passive_service_checks(void) { void stop_accepting_passive_service_checks(void) { constexpr uint32_t attr = MODATTR_PASSIVE_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool accept_passive_service_checks = config->accept_passive_service_checks(); +#else + bool accept_passive_service_checks = pb_config.accept_passive_service_checks(); +#endif + /* bail out if we're already not accepting passive services */ - if (config->accept_passive_service_checks() == false) + if (!accept_passive_service_checks) return; /* set the attribute modified flag */ modified_service_process_attributes |= attr; /* set the service check flag */ +#ifdef LEGACY_CONF config->accept_passive_service_checks(false); +#else + pb_config.set_accept_passive_service_checks(false); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -2835,15 +2932,25 @@ void disable_passive_service_checks(service* svc) { void start_executing_host_checks(void) { constexpr uint32_t attr = MODATTR_ACTIVE_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool execute_host_checks = config->execute_host_checks(); +#else + bool execute_host_checks = pb_config.execute_host_checks(); +#endif + /* bail out if we're already executing hosts */ - if (config->execute_host_checks()) + if (execute_host_checks) return; /* set the attribute modified flag */ modified_host_process_attributes |= attr; /* set the host check execution flag */ +#ifdef LEGACY_CONF config->execute_host_checks(true); +#else + pb_config.set_execute_host_checks(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -2859,15 +2966,24 @@ void start_executing_host_checks(void) { void stop_executing_host_checks(void) { constexpr uint32_t attr = MODATTR_ACTIVE_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool execute_host_checks = config->execute_host_checks(); +#else + bool execute_host_checks = pb_config.execute_host_checks(); +#endif /* bail out if we're already not executing hosts */ - if (config->execute_host_checks() == false) + if (!execute_host_checks) return; /* set the attribute modified flag */ modified_host_process_attributes |= attr; /* set the host check execution flag */ - config->execute_host_checks(false); +#ifdef LEGACY_CONF + config->execute_host_checks(true); +#else + pb_config.set_execute_host_checks(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -2883,15 +2999,25 @@ void stop_executing_host_checks(void) { void start_accepting_passive_host_checks(void) { constexpr uint32_t attr = MODATTR_PASSIVE_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool accept_passive_host_checks = config->accept_passive_host_checks(); +#else + bool accept_passive_host_checks = pb_config.accept_passive_host_checks(); +#endif + /* bail out if we're already accepting passive hosts */ - if (config->accept_passive_host_checks()) + if (accept_passive_host_checks) return; /* set the attribute modified flag */ modified_host_process_attributes |= attr; /* set the host check flag */ +#ifdef LEGACY_CONF config->accept_passive_host_checks(true); +#else + pb_config.set_accept_passive_host_checks(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -2907,15 +3033,25 @@ void start_accepting_passive_host_checks(void) { void stop_accepting_passive_host_checks(void) { constexpr uint32_t attr = MODATTR_PASSIVE_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool accept_passive_host_checks = config->accept_passive_host_checks(); +#else + bool accept_passive_host_checks = pb_config.accept_passive_host_checks(); +#endif + /* bail out if we're already not accepting passive hosts */ - if (config->accept_passive_host_checks() == false) + if (!accept_passive_host_checks) return; /* set the attribute modified flag */ modified_host_process_attributes |= attr; /* set the host check flag */ +#ifdef LEGACY_CONF config->accept_passive_host_checks(false); +#else + pb_config.set_accept_passive_host_checks(false); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -2968,8 +3104,14 @@ void disable_passive_host_checks(host* hst) { void start_using_event_handlers(void) { constexpr uint32_t attr = MODATTR_EVENT_HANDLER_ENABLED; +#ifdef LEGACY_CONF + bool enable_event_handlers = config->enable_event_handlers(); +#else + bool enable_event_handlers = pb_config.enable_event_handlers(); +#endif + /* no change */ - if (config->enable_event_handlers()) + if (enable_event_handlers) return; /* set the attribute modified flag */ @@ -2977,7 +3119,11 @@ void start_using_event_handlers(void) { modified_service_process_attributes |= attr; /* set the event handler flag */ +#ifdef LEGACY_CONF config->enable_event_handlers(true); +#else + pb_config.set_enable_event_handlers(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -2993,8 +3139,14 @@ void start_using_event_handlers(void) { void stop_using_event_handlers(void) { constexpr uint32_t attr = MODATTR_EVENT_HANDLER_ENABLED; +#ifdef LEGACY_CONF + bool enable_event_handlers = config->enable_event_handlers(); +#else + bool enable_event_handlers = pb_config.enable_event_handlers(); +#endif + /* no change */ - if (config->enable_event_handlers() == false) + if (!enable_event_handlers) return; /* set the attribute modified flag */ @@ -3002,7 +3154,11 @@ void stop_using_event_handlers(void) { modified_service_process_attributes |= attr; /* set the event handler flag */ +#ifdef LEGACY_CONF config->enable_event_handlers(false); +#else + pb_config.set_enable_event_handlers(false); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -3156,15 +3312,25 @@ void enable_host_checks(host* hst) { void start_obsessing_over_service_checks(void) { constexpr uint32_t attr = MODATTR_OBSESSIVE_HANDLER_ENABLED; +#ifdef LEGACY_CONF + bool obsess_over_services = config->obsess_over_services(); +#else + bool obsess_over_services = pb_config.obsess_over_services(); +#endif + /* no change */ - if (config->obsess_over_services()) + if (obsess_over_services) return; /* set the attribute modified flag */ modified_service_process_attributes |= attr; /* set the service obsession flag */ +#ifdef LEGACY_CONF config->obsess_over_services(true); +#else + pb_config.set_obsess_over_services(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -3180,15 +3346,25 @@ void start_obsessing_over_service_checks(void) { void stop_obsessing_over_service_checks(void) { constexpr uint32_t attr = MODATTR_OBSESSIVE_HANDLER_ENABLED; +#ifdef LEGACY_CONF + bool obsess_over_services = config->obsess_over_services(); +#else + bool obsess_over_services = pb_config.obsess_over_services(); +#endif + /* no change */ - if (config->obsess_over_services() == false) + if (!obsess_over_services) return; /* set the attribute modified flag */ modified_service_process_attributes |= attr; /* set the service obsession flag */ +#ifdef LEGACY_CONF config->obsess_over_services(false); +#else + pb_config.set_obsess_over_services(false); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -3204,15 +3380,25 @@ void stop_obsessing_over_service_checks(void) { void start_obsessing_over_host_checks(void) { unsigned long attr = MODATTR_OBSESSIVE_HANDLER_ENABLED; +#ifdef LEGACY_CONF + bool obsess_over_hosts = config->obsess_over_hosts(); +#else + bool obsess_over_hosts = pb_config.obsess_over_hosts(); +#endif + /* no change */ - if (config->obsess_over_hosts()) + if (obsess_over_hosts) return; /* set the attribute modified flag */ modified_host_process_attributes |= attr; /* set the host obsession flag */ +#ifdef LEGACY_CONF config->obsess_over_hosts(true); +#else + pb_config.set_obsess_over_hosts(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -3228,15 +3414,25 @@ void start_obsessing_over_host_checks(void) { void stop_obsessing_over_host_checks(void) { constexpr uint32_t attr = MODATTR_OBSESSIVE_HANDLER_ENABLED; +#ifdef LEGACY_CONF + bool obsess_over_hosts = config->obsess_over_hosts(); +#else + bool obsess_over_hosts = pb_config.obsess_over_hosts(); +#endif + /* no change */ - if (config->obsess_over_hosts() == false) + if (!obsess_over_hosts) return; /* set the attribute modified flag */ modified_host_process_attributes |= attr; /* set the host obsession flag */ +#ifdef LEGACY_CONF config->obsess_over_hosts(false); +#else + pb_config.set_obsess_over_hosts(false); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -3252,15 +3448,25 @@ void stop_obsessing_over_host_checks(void) { void enable_service_freshness_checks(void) { constexpr uint32_t attr = MODATTR_FRESHNESS_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool check_service_freshness = config->check_service_freshness(); +#else + bool check_service_freshness = pb_config.check_service_freshness(); +#endif + /* no change */ - if (config->check_service_freshness()) + if (check_service_freshness) return; /* set the attribute modified flag */ modified_service_process_attributes |= attr; /* set the freshness check flag */ +#ifdef LEGACY_CONF config->check_service_freshness(true); +#else + pb_config.set_check_service_freshness(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -3276,15 +3482,25 @@ void enable_service_freshness_checks(void) { void disable_service_freshness_checks(void) { constexpr uint32_t attr = MODATTR_FRESHNESS_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool check_service_freshness = config->check_service_freshness(); +#else + bool check_service_freshness = pb_config.check_service_freshness(); +#endif + /* no change */ - if (config->check_service_freshness() == false) + if (!check_service_freshness) return; /* set the attribute modified flag */ modified_service_process_attributes |= attr; /* set the freshness check flag */ +#ifdef LEGACY_CONF config->check_service_freshness(false); +#else + pb_config.set_check_service_freshness(false); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -3300,15 +3516,25 @@ void disable_service_freshness_checks(void) { void enable_host_freshness_checks(void) { constexpr uint32_t attr = MODATTR_FRESHNESS_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool check_host_freshness = config->check_host_freshness(); +#else + bool check_host_freshness = pb_config.check_host_freshness(); +#endif + /* no change */ - if (config->check_host_freshness()) + if (check_host_freshness) return; /* set the attribute modified flag */ modified_host_process_attributes |= attr; /* set the freshness check flag */ +#ifdef LEGACY_CONF config->check_host_freshness(true); +#else + pb_config.set_check_host_freshness(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -3323,15 +3549,25 @@ void enable_host_freshness_checks(void) { void disable_host_freshness_checks(void) { constexpr uint32_t attr = MODATTR_FRESHNESS_CHECKS_ENABLED; +#ifdef LEGACY_CONF + bool check_host_freshness = config->check_host_freshness(); +#else + bool check_host_freshness = pb_config.check_host_freshness(); +#endif + /* no change */ - if (config->check_host_freshness() == false) + if (!check_host_freshness) return; /* set the attribute modified flag */ modified_host_process_attributes |= attr; /* set the freshness check flag */ +#ifdef LEGACY_CONF config->check_host_freshness(false); +#else + pb_config.set_check_host_freshness(false); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -3347,15 +3583,25 @@ void disable_host_freshness_checks(void) { void enable_performance_data(void) { constexpr uint32_t attr = MODATTR_PERFORMANCE_DATA_ENABLED; +#ifdef LEGACY_CONF + bool process_performance_data = config->process_performance_data(); +#else + bool process_performance_data = pb_config.process_performance_data(); +#endif + /* bail out if we're already set... */ - if (config->process_performance_data()) + if (process_performance_data) return; /* set the attribute modified flag */ modified_host_process_attributes |= attr; modified_service_process_attributes |= attr; +#ifdef LEGACY_CONF config->process_performance_data(true); +#else + pb_config.set_process_performance_data(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -3371,15 +3617,25 @@ void enable_performance_data(void) { void disable_performance_data(void) { constexpr uint32_t attr = MODATTR_PERFORMANCE_DATA_ENABLED; +#ifdef LEGACY_CONF + bool process_performance_data = config->process_performance_data(); +#else + bool process_performance_data = pb_config.process_performance_data(); +#endif + /* bail out if we're already set... */ - if (config->process_performance_data() == false) + if (!process_performance_data) return; /* set the attribute modified flag */ modified_host_process_attributes |= attr; modified_service_process_attributes |= attr; +#ifdef LEGACY_CONF config->process_performance_data(false); +#else + pb_config.set_process_performance_data(false); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, diff --git a/engine/src/commands/connector.cc b/engine/src/commands/connector.cc index d95dc58ee6c..1b48116397f 100644 --- a/engine/src/commands/connector.cc +++ b/engine/src/commands/connector.cc @@ -73,9 +73,18 @@ connector::connector(const std::string& connector_name, } { UNIQUE_LOCK(lck, _lock); +#ifdef LEGACY_CONF _process.setpgid_on_exec(config->use_setpgid()); +#else + _process.setpgid_on_exec(pb_config.use_setpgid()); +#endif } - if (config->enable_environment_macros()) { +#ifdef LEGACY_CONF + bool enable_environment_macros = config->enable_environment_macros(); +#else + bool enable_environment_macros = pb_config.enable_environment_macros(); +#endif + if (enable_environment_macros) { engine_logger(log_runtime_warning, basic) << "Warning: Connector does not enable environment macros"; runtime_logger->warn( @@ -424,10 +433,17 @@ void connector::_connector_close() { _send_query_quit(); // Waiting connector quit. +#ifdef LEGACY_CONF bool is_timeout{ _cv_query.wait_for( lock, std::chrono::seconds(config->service_check_timeout())) == std::cv_status::timeout}; +#else + bool is_timeout{ + _cv_query.wait_for( + lock, std::chrono::seconds(pb_config.service_check_timeout())) == + std::cv_status::timeout}; +#endif if (is_timeout || !_query_quit_ok) { _process.kill(); if (is_timeout) { @@ -471,9 +487,15 @@ void connector::_connector_start() { _send_query_version(); // Waiting connector version, or 1 seconds. +#ifdef LEGACY_CONF bool is_timeout{!_cv_query.wait_for( lock, std::chrono::seconds(config->service_check_timeout()), [this] { return _version_set; })}; +#else + bool is_timeout{!_cv_query.wait_for( + lock, std::chrono::seconds(pb_config.service_check_timeout()), + [this] { return _version_set; })}; +#endif if (is_timeout || !_query_version_ok) { _process.kill(); diff --git a/engine/src/commands/otel_connector.cc b/engine/src/commands/otel_connector.cc index 44538b01e0f..6b8433e4d15 100644 --- a/engine/src/commands/otel_connector.cc +++ b/engine/src/commands/otel_connector.cc @@ -37,15 +37,17 @@ absl::flat_hash_map> * @param cmd_line * @param listener */ -void otel_connector::create(const std::string& connector_name, - const std::string& cmd_line, - commands::command_listener* listener) { +std::shared_ptr otel_connector::create( + const std::string& connector_name, + const std::string& cmd_line, + commands::command_listener* listener) { std::shared_ptr cmd( std::make_shared(connector_name, cmd_line, listener)); auto iter_res = _commands.emplace(connector_name, cmd); if (!iter_res.second) { iter_res.first->second = cmd; } + return cmd; } /** @@ -90,6 +92,26 @@ std::shared_ptr otel_connector::get_otel_connector( : std::shared_ptr(); } +/** + * @brief get otel command that is used by host serv + * Caution: This function must be called from engine main thread + * + * @param host + * @param serv + * @return std::shared_ptr null if not found + */ +std::shared_ptr +otel_connector::get_otel_connector_from_host_serv( + const std::string_view& host, + const std::string_view& serv) { + for (const auto& name_to_conn : _commands) { + if (name_to_conn.second->_host_serv_list->contains(host, serv)) { + return name_to_conn.second; + } + } + return {}; +} + /** * @brief erase all otel commands * @@ -122,7 +144,7 @@ otel_connector::otel_connector(const std::string& connector_name, commands::command_listener* listener) : command(connector_name, cmd_line, listener, e_type::otel), _host_serv_list(std::make_shared()), - _logger(log_v2::instance().get(log_v2::OTEL)) { + _logger(log_v2::instance().get(log_v2::OTL)) { init(); } @@ -155,62 +177,8 @@ uint64_t otel_connector::run(const std::string& processed_cmd, uint32_t timeout, const check_result::pointer& to_push_to_checker, const void* caller) { - std::shared_ptr otel = - otel::open_telemetry_base::instance(); - - if (!otel) { - SPDLOG_LOGGER_ERROR(_logger, - "open telemetry module not loaded for connector: {}", - get_name()); - throw exceptions::msg_fmt( - "open telemetry module not loaded for connector: {}", get_name()); - } - - uint64_t command_id(get_uniq_id()); - - if (!gest_call_interval(command_id, to_push_to_checker, caller)) { - return command_id; - } - - if (!_conv_conf) { - SPDLOG_LOGGER_ERROR( - _logger, "{} unable to do a check without a converter configuration", - get_name()); - throw exceptions::msg_fmt( - "{} unable to do a check without a converter configuration", - get_name()); - } - SPDLOG_LOGGER_TRACE( - _logger, - "otel_connector::async_run: connector='{}', command_id={}, " - "cmd='{}', timeout={}", - _name, command_id, processed_cmd, timeout); - - result res; - bool res_available = otel->check( - processed_cmd, _conv_conf, command_id, macros, timeout, res, - [me = shared_from_this(), command_id](const result& async_res) { - SPDLOG_LOGGER_TRACE( - me->_logger, "otel_connector async_run callback: connector='{}' {}", - me->_name, async_res); - me->update_result_cache(command_id, async_res); - if (me->_listener) { - (me->_listener->finished)(async_res); - } - }); - - if (res_available) { - SPDLOG_LOGGER_TRACE(_logger, - "otel_connector data available : connector='{}', " - "cmd='{}', {}", - _name, processed_cmd, res); - update_result_cache(command_id, res); - if (_listener) { - (_listener->finished)(res); - } - } - - return command_id; + SPDLOG_LOGGER_ERROR(_logger, "open telemetry services must be passive"); + throw exceptions::msg_fmt("open telemetry services must be passive"); } /** @@ -227,41 +195,25 @@ void otel_connector::run(const std::string& processed_cmd, nagios_macros& macros, uint32_t timeout, result& res) { - std::shared_ptr otel = - otel::open_telemetry_base::instance(); - if (!otel) { - SPDLOG_LOGGER_ERROR(_logger, - "open telemetry module not loaded for connector: {}", - get_name()); - throw exceptions::msg_fmt( - "open telemetry module not loaded for connector: {}", get_name()); - } - - uint64_t command_id(get_uniq_id()); - - SPDLOG_LOGGER_TRACE(_logger, - "otel_connector::sync_run: connector='{}', cmd='{}', " - "command_id={}, timeout={}", - _name, processed_cmd, command_id, timeout); - - std::condition_variable cv; - std::mutex cv_m; - - bool res_available = - otel->check(processed_cmd, _conv_conf, command_id, macros, timeout, res, - [&res, &cv](const result& async_res) { - res = async_res; - cv.notify_one(); - }); + SPDLOG_LOGGER_ERROR(_logger, "open telemetry services must be passive"); + throw exceptions::msg_fmt("open telemetry services must be passive"); +} - // no otl_data_point available => wait util available or timeout - if (!res_available) { - std::unique_lock l(cv_m); - cv.wait(l); - } - SPDLOG_LOGGER_TRACE( - _logger, "otel_connector::end sync_run: connector='{}', cmd='{}', {}", - _name, processed_cmd, res); +/** + * @brief convert opentelemetry datas in check_result and post it to + * checks::checker::instance() Caution, this function must be called from engine + * main thread + * + * @param host + * @param serv empty if result of host check + * @param data_pts opentelemetry data points + */ +void otel_connector::process_data_pts( + const std::string_view& host, + const std::string_view& serv, + const com::centreon::engine::modules::opentelemetry::metric_to_datapoints& + data_pts) { + _check_result_builder->process_data_pts(host, serv, data_pts); } /** @@ -288,12 +240,12 @@ void otel_connector::init() { get_name(), get_command_line()); } try { - if (!_conv_conf) { + if (!_check_result_builder) { std::shared_ptr otel = otel::open_telemetry_base::instance(); if (otel) { - _conv_conf = - otel->create_check_result_builder_config(get_command_line()); + _check_result_builder = + otel->create_check_result_builder(get_command_line()); } } } catch (const std::exception& e) { diff --git a/engine/src/commands/otel_interface.cc b/engine/src/commands/otel_interface.cc index 19d5559b1fb..b3e3fd67545 100644 --- a/engine/src/commands/otel_interface.cc +++ b/engine/src/commands/otel_interface.cc @@ -45,21 +45,3 @@ void host_serv_list::remove(const std::string& host, } } } - -/** - * @brief test if a host serv pair is contained in list - * - * @param host - * @param service_description - * @return true found - * @return false not found - */ -bool host_serv_list::contains(const std::string& host, - const std::string& service_description) const { - absl::ReaderMutexLock l(&_data_m); - auto host_search = _data.find(host); - if (host_search != _data.end()) { - return host_search->second.contains(service_description); - } - return false; -} diff --git a/engine/src/commands/processing.cc b/engine/src/commands/processing.cc index 173940589e0..4802232030c 100644 --- a/engine/src/commands/processing.cc +++ b/engine/src/commands/processing.cc @@ -848,13 +848,22 @@ bool processing::execute(const std::string& cmdstr) { // Log the external command. if (command_id == CMD_PROCESS_SERVICE_CHECK_RESULT || command_id == CMD_PROCESS_HOST_CHECK_RESULT) { +#ifdef LEGACY_CONF + bool log_passive_check = config->log_passive_checks(); +#else + bool log_passive_check = pb_config.log_passive_checks(); +#endif // Passive checks are logged in checks.c. - if (config->log_passive_checks()) { + if (log_passive_checks) { engine_logger(log_passive_check, basic) << "EXTERNAL COMMAND: " << command_name << ';' << args; checks_logger->info("EXTERNAL COMMAND: {};{}", command_name, args); } +#ifdef LEGACY_CONF } else if (config->log_external_commands()) { +#else + } else if (pb_config.log_external_commands()) { +#endif engine_logger(log_external_command, basic) << "EXTERNAL COMMAND: " << command_name << ';' << args; SPDLOG_LOGGER_INFO(external_command_logger, "EXTERNAL COMMAND: {};{}", @@ -903,9 +912,18 @@ void processing::_wrapper_read_state_information() { try { retention::state state; retention::parser p; - p.parse(config->state_retention_file(), state); +#ifdef LEGACY_CONF + const std::string& retention_file = config->state_retention_file(); +#else + const std::string& retention_file = pb_config.state_retention_file(); +#endif + p.parse(retention_file, state); retention::applier::state app_state; +#ifdef LEGACY_CONF app_state.apply(*config, state); +#else + app_state.apply(pb_config, state); +#endif } catch (std::exception const& e) { engine_logger(log_runtime_error, basic) << "Error: could not load retention file: " << e.what(); @@ -915,7 +933,11 @@ void processing::_wrapper_read_state_information() { } void processing::_wrapper_save_state_information() { +#ifdef LEGACY_CONF retention::dump::save(config->state_retention_file()); +#else + retention::dump::save(pb_config.state_retention_file()); +#endif } void processing::wrapper_enable_host_and_child_notifications(host* hst) { diff --git a/engine/src/commands/raw.cc b/engine/src/commands/raw.cc index b196a80834b..b8d1dab2e2b 100644 --- a/engine/src/commands/raw.cc +++ b/engine/src/commands/raw.cc @@ -480,7 +480,12 @@ void raw::_build_custom_service_macro_environment(nagios_macros& macros, * @param[out] env The environment to fill. */ void raw::_build_environment_macros(nagios_macros& macros, environment& env) { - if (config->enable_environment_macros()) { +#ifdef LEGACY_CONF + bool enable_environment_macros = config->enable_environment_macros(); +#else + bool enable_environment_macros = pb_config.enable_environment_macros(); +#endif + if (enable_environment_macros) { _build_macrosx_environment(macros, env); _build_argv_macro_environment(macros, env); _build_custom_host_macro_environment(macros, env); @@ -497,15 +502,20 @@ void raw::_build_environment_macros(nagios_macros& macros, environment& env) { * @param[out] env The environment to fill. */ void raw::_build_macrosx_environment(nagios_macros& macros, environment& env) { - for (uint32_t i(0); i < MACRO_X_COUNT; ++i) { +#ifdef LEGACY_CONF + bool use_large_installation_tweaks = config->use_large_installation_tweaks(); +#else + bool use_large_installation_tweaks = + pb_config.use_large_installation_tweaks(); +#endif + for (uint32_t i = 0; i < MACRO_X_COUNT; ++i) { int release_memory(0); // Need to grab macros? if (macros.x[i].empty()) { // Skip summary macro in lage instalation tweaks. - if ((i < MACRO_TOTALHOSTSUP) || - (i > MACRO_TOTALSERVICEPROBLEMSUNHANDLED) || - !config->use_large_installation_tweaks()) { + if (i < MACRO_TOTALHOSTSUP || i > MACRO_TOTALSERVICEPROBLEMSUNHANDLED || + !use_large_installation_tweaks) { grab_macrox_value_r(¯os, i, "", "", macros.x[i], &release_memory); } } @@ -537,11 +547,15 @@ process* raw::_get_free_process() { if (_processes_free.empty()) { /* Only the out stream is open */ process* p = new process(this, false, true, false); +#ifdef LEGACY_CONF p->setpgid_on_exec(config->use_setpgid()); +#else + p->setpgid_on_exec(pb_config.use_setpgid()); +#endif return p; } // Get a free process. - process* p(_processes_free.front()); + process* p = _processes_free.front(); _processes_free.pop_front(); return p; } diff --git a/engine/src/config.cc b/engine/src/config.cc index f91eb07a218..544d2be9a0d 100644 --- a/engine/src/config.cc +++ b/engine/src/config.cc @@ -24,7 +24,9 @@ #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/logging/logger.hh" #include "com/centreon/engine/string.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/parser.hh" +#endif using namespace com::centreon::engine; using namespace com::centreon::engine::configuration::applier; diff --git a/engine/src/configuration/applier/anomalydetection.cc b/engine/src/configuration/applier/anomalydetection.cc index 78d062bc6f0..9feef230b3b 100644 --- a/engine/src/configuration/applier/anomalydetection.cc +++ b/engine/src/configuration/applier/anomalydetection.cc @@ -31,23 +31,7 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::downtimes; using namespace com::centreon::engine::configuration; -/** - * Check if the anomalydetection group name matches the configuration object. - */ -class servicegroup_name_comparator { - public: - servicegroup_name_comparator(std::string const& servicegroup_name) { - _servicegroup_name = servicegroup_name; - } - - bool operator()(std::shared_ptr sg) { - return _servicegroup_name == sg->servicegroup_name(); - } - - private: - std::string _servicegroup_name; -}; - +#ifdef LEGACY_CONF /** * Add new anomalydetection. * @@ -156,9 +140,106 @@ void applier::anomalydetection::add_object( if (it->second.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); - broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_ADD, ad, - it->first.c_str(), it->second.value().c_str(), - &tv); + broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_ADD, ad, it->first, + it->second.value(), &tv); + } + } + + // Notify event broker. + broker_adaptive_service_data(NEBTYPE_SERVICE_ADD, NEBFLAG_NONE, NEBATTR_NONE, + ad, MODATTR_ALL); +} +#else +/** + * @brief Add new anomalydetection. + * + * @param obj The new anomalydetection protobuf configuration to add into the + * monitoring engine. + */ +void applier::anomalydetection::add_object( + const configuration::Anomalydetection& obj) { + // Check anomalydetection. + if (!obj.host_id()) + throw engine_error() << fmt::format( + "No host_id available for the host '{} - unable to create " + "anomalydetection '{}'", + obj.host_name(), obj.service_description()); + + // Logging. + SPDLOG_LOGGER_DEBUG(config_logger, + "Creating new anomalydetection '{}' of host '{}'.", + obj.service_description(), obj.host_name()); + + // Add anomalydetection to the global configuration set. + auto* cfg_obj = pb_config.add_anomalydetections(); + cfg_obj->CopyFrom(obj); + + // Create anomalydetection. + engine::anomalydetection* ad{add_anomalydetection( + obj.host_id(), obj.service_id(), obj.host_name(), + obj.service_description(), obj.display_name(), obj.internal_id(), + obj.dependent_service_id(), obj.metric_name(), obj.thresholds_file(), + obj.status_change(), + static_cast(obj.initial_state()), + obj.max_check_attempts(), obj.check_interval(), obj.retry_interval(), + obj.notification_interval(), obj.first_notification_delay(), + obj.recovery_notification_delay(), obj.notification_period(), + static_cast(obj.notification_options() & action_svc_ok), + static_cast(obj.notification_options() & action_svc_unknown), + static_cast(obj.notification_options() & action_svc_warning), + static_cast(obj.notification_options() & action_svc_critical), + static_cast(obj.notification_options() & action_svc_flapping), + static_cast(obj.notification_options() & action_svc_downtime), + obj.notifications_enabled(), obj.is_volatile(), obj.event_handler(), + obj.event_handler_enabled(), obj.checks_active(), obj.checks_passive(), + obj.flap_detection_enabled(), obj.low_flap_threshold(), + obj.high_flap_threshold(), + static_cast(obj.flap_detection_options() & action_svc_ok), + static_cast(obj.flap_detection_options() & action_svc_warning), + static_cast(obj.flap_detection_options() & action_svc_unknown), + static_cast(obj.flap_detection_options() & action_svc_critical), + static_cast(obj.stalking_options() & action_svc_ok), + static_cast(obj.stalking_options() & action_svc_warning), + static_cast(obj.stalking_options() & action_svc_unknown), + static_cast(obj.stalking_options() & action_svc_critical), + obj.process_perf_data(), obj.check_freshness(), obj.freshness_threshold(), + obj.notes(), obj.notes_url(), obj.action_url(), obj.icon_image(), + obj.icon_image_alt(), obj.retain_status_information(), + obj.retain_nonstatus_information(), obj.obsess_over_service(), + obj.timezone(), obj.icon_id(), obj.sensitivity())}; + if (!ad) + throw engine_error() << "Could not register anomalydetection '" + << obj.service_description() << "' of host '" + << obj.host_name() << "'"; + ad->set_initial_notif_time(0); + engine::anomalydetection::services[{obj.host_name(), + obj.service_description()}] + ->set_host_id(obj.host_id()); + engine::anomalydetection::services[{obj.host_name(), + obj.service_description()}] + ->set_service_id(obj.service_id()); + ad->set_acknowledgement_timeout(obj.acknowledgement_timeout() * + pb_config.interval_length()); + ad->set_last_acknowledgement(0); + + // Add contacts. + for (auto& c : obj.contacts().data()) + ad->mut_contacts().insert({c, nullptr}); + + // Add contactgroups. + for (auto& cg : obj.contactgroups().data()) + ad->get_contactgroups().insert({cg, nullptr}); + + // Add custom variables. + for (auto& cv : obj.customvariables()) { + engine::customvariable& c = ad->custom_variables[cv.name()]; + c.set_value(cv.value()); + c.set_sent(cv.is_sent()); + + if (c.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_ADD, ad, cv.name(), + cv.value(), &tv); } } @@ -166,7 +247,9 @@ void applier::anomalydetection::add_object( broker_adaptive_service_data(NEBTYPE_SERVICE_ADD, NEBFLAG_NONE, NEBATTR_NONE, ad, MODATTR_ALL); } +#endif +#ifdef LEGACY_CONF /** * Expand a anomalydetection object. * @@ -191,7 +274,31 @@ void applier::anomalydetection::expand_objects(configuration::state& s) { } s.anomalydetections() = std::move(new_ads); } +#else +/** + * Expand a anomalydetection object. + * + * @param[in,out] s State being applied. + */ +void applier::anomalydetection::expand_objects(configuration::State& s) { + std::list > expanded; + // Let's consider all the macros defined in s. + absl::flat_hash_set cvs; + for (auto& cv : s.macros_filter().data()) + cvs.emplace(cv); + + // Browse all anomalydetections. + for (auto& ad_cfg : *s.mutable_anomalydetections()) { + // Should custom variables be sent to broker ? + for (auto& cv : *ad_cfg.mutable_customvariables()) { + if (!s.enable_macros_filter() || cvs.contains(cv.name())) + cv.set_is_sent(true); + } + } +} +#endif +#ifdef LEGACY_CONF /** * Modified anomalydetection. * @@ -376,7 +483,7 @@ void applier::anomalydetection::modify_object( if (c.second.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_DELETE, s.get(), - c.first.c_str(), c.second.value().c_str(), &tv); + c.first, c.second.value(), &tv); } } s->custom_variables.clear(); @@ -388,16 +495,199 @@ void applier::anomalydetection::modify_object( if (c.second.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_ADD, s.get(), + c.first, c.second.value(), &tv); + } + } + } + + // Notify event broker. + broker_adaptive_service_data(NEBTYPE_SERVICE_UPDATE, NEBFLAG_NONE, + NEBATTR_NONE, s.get(), MODATTR_ALL); +} +#else +/** + * Modified anomalydetection. + * + * @param[in] obj The new anomalydetection to modify into the monitoring + * engine. + */ +void applier::anomalydetection::modify_object( + configuration::Anomalydetection* old_obj, + const configuration::Anomalydetection& new_obj) { + const std::string& host_name(old_obj->host_name()); + const std::string& service_description(old_obj->service_description()); + + // Logging. + SPDLOG_LOGGER_DEBUG(config_logger, + "Modifying new anomalydetection '{}' of host '{}'.", + service_description, host_name); + + // Find anomalydetection object. + service_id_map::iterator it_obj = + engine::anomalydetection::services_by_id.find( + {old_obj->host_id(), old_obj->service_id()}); + if (it_obj == engine::anomalydetection::services_by_id.end()) + throw engine_error() << fmt::format( + "Could not modify non-existing anomalydetection object '{}' of host " + "'{}'", + service_description, host_name); + std::shared_ptr s = + std::static_pointer_cast(it_obj->second); + + // Modify properties. + if (it_obj->second->get_hostname() != new_obj.host_name() || + it_obj->second->description() != new_obj.service_description()) { + engine::service::services.erase( + {it_obj->second->get_hostname(), it_obj->second->description()}); + engine::service::services.insert( + {{new_obj.host_name(), new_obj.service_description()}, it_obj->second}); + } + + s->set_hostname(new_obj.host_name()); + s->set_description(new_obj.service_description()); + s->set_display_name(new_obj.display_name()); + s->set_metric_name(new_obj.metric_name()); + s->set_thresholds_file(new_obj.thresholds_file()); + s->set_event_handler(new_obj.event_handler()); + s->set_event_handler_enabled(new_obj.event_handler_enabled()); + s->set_initial_state(static_cast( + new_obj.initial_state())); + s->set_check_interval(new_obj.check_interval()); + s->set_retry_interval(new_obj.retry_interval()); + s->set_max_attempts(new_obj.max_check_attempts()); + + s->set_notify_on( + (new_obj.notification_options() & action_svc_unknown ? notifier::unknown + : notifier::none) | + (new_obj.notification_options() & action_svc_warning ? notifier::warning + : notifier::none) | + (new_obj.notification_options() & action_svc_critical ? notifier::critical + : notifier::none) | + (new_obj.notification_options() & action_svc_ok ? notifier::ok + : notifier::none) | + (new_obj.notification_options() & action_svc_flapping + ? (notifier::flappingstart | notifier::flappingstop | + notifier::flappingdisabled) + : notifier::none) | + (new_obj.notification_options() & action_svc_downtime ? notifier::downtime + : notifier::none)); + + s->set_notification_interval( + static_cast(new_obj.notification_interval())); + s->set_first_notification_delay( + static_cast(new_obj.first_notification_delay())); + + s->add_stalk_on(new_obj.stalking_options() & action_svc_ok ? notifier::ok + : notifier::none); + s->add_stalk_on(new_obj.stalking_options() & action_svc_warning + ? notifier::warning + : notifier::none); + s->add_stalk_on(new_obj.stalking_options() & action_svc_unknown + ? notifier::unknown + : notifier::none); + s->add_stalk_on(new_obj.stalking_options() & action_svc_critical + ? notifier::critical + : notifier::none); + + s->set_notification_period(new_obj.notification_period()); + s->set_flap_detection_enabled(new_obj.flap_detection_enabled()); + s->set_low_flap_threshold(new_obj.low_flap_threshold()); + s->set_high_flap_threshold(new_obj.high_flap_threshold()); + + s->set_flap_detection_on(notifier::none); + s->add_flap_detection_on(new_obj.flap_detection_options() & action_svc_ok + ? notifier::ok + : notifier::none); + s->add_flap_detection_on(new_obj.flap_detection_options() & action_svc_warning + ? notifier::warning + : notifier::none); + s->add_flap_detection_on(new_obj.flap_detection_options() & action_svc_unknown + ? notifier::unknown + : notifier::none); + s->add_flap_detection_on(new_obj.flap_detection_options() & + action_svc_critical + ? notifier::critical + : notifier::none); + + s->set_process_performance_data( + static_cast(new_obj.process_perf_data())); + s->set_check_freshness(new_obj.check_freshness()); + s->set_freshness_threshold(new_obj.freshness_threshold()); + s->set_accept_passive_checks(new_obj.checks_passive()); + s->set_event_handler(new_obj.event_handler()); + s->set_checks_enabled(new_obj.checks_active()); + s->set_retain_status_information( + static_cast(new_obj.retain_status_information())); + s->set_retain_nonstatus_information( + static_cast(new_obj.retain_nonstatus_information())); + s->set_notifications_enabled(new_obj.notifications_enabled()); + s->set_obsess_over(new_obj.obsess_over_service()); + s->set_notes(new_obj.notes()); + s->set_notes_url(new_obj.notes_url()); + s->set_action_url(new_obj.action_url()); + s->set_icon_image(new_obj.icon_image()); + s->set_icon_image_alt(new_obj.icon_image_alt()); + s->set_is_volatile(new_obj.is_volatile()); + s->set_timezone(new_obj.timezone()); + s->set_host_id(new_obj.host_id()); + s->set_service_id(new_obj.service_id()); + s->set_acknowledgement_timeout(new_obj.acknowledgement_timeout() * + pb_config.interval_length()); + s->set_recovery_notification_delay(new_obj.recovery_notification_delay()); + + // Contacts. + if (!MessageDifferencer::Equals(new_obj.contacts(), old_obj->contacts())) { + // Delete old contacts. + s->mut_contacts().clear(); + + // Add contacts to host. + for (auto& contact_name : new_obj.contacts().data()) + s->mut_contacts().insert({contact_name, nullptr}); + } + + // Contact groups. + if (!MessageDifferencer::Equals(new_obj.contactgroups(), + old_obj->contactgroups())) { + // Delete old contact groups. + s->get_contactgroups().clear(); + + // Add contact groups to host. + for (auto& cg_name : new_obj.contactgroups().data()) + s->get_contactgroups().insert({cg_name, nullptr}); + } + + // Custom variables. + if (!std::equal( + new_obj.customvariables().begin(), new_obj.customvariables().end(), + old_obj->customvariables().begin(), old_obj->customvariables().end(), + MessageDifferencer::Equals)) { + for (auto& c : s->custom_variables) { + if (c.second.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_DELETE, s.get(), c.first.c_str(), c.second.value().c_str(), &tv); } } + s->custom_variables.clear(); + + for (auto& c : new_obj.customvariables()) { + s->custom_variables[c.name()] = c.value(); + + if (c.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_ADD, s.get(), + c.name(), c.value(), &tv); + } + } } // Notify event broker. broker_adaptive_service_data(NEBTYPE_SERVICE_UPDATE, NEBFLAG_NONE, NEBATTR_NONE, s.get(), MODATTR_ALL); } +#endif +#ifdef LEGACY_CONF /** * Remove old anomalydetection. * @@ -452,7 +742,55 @@ void applier::anomalydetection::remove_object( // Remove anomalydetection from the global configuration set. config->anomalydetections().erase(obj); } +#else +void applier::anomalydetection::remove_object(ssize_t idx) { + Anomalydetection& obj = pb_config.mutable_anomalydetections()->at(idx); + const std::string& host_name(obj.host_name()); + const std::string& service_description(obj.service_description()); + // Logging. + SPDLOG_LOGGER_DEBUG(config_logger, + "Removing anomalydetection '{}' of host '{}'.", + service_description, host_name); + + // Find anomalydetection. + auto it = + engine::service::services_by_id.find({obj.host_id(), obj.service_id()}); + if (it != engine::service::services_by_id.end()) { + std::shared_ptr ad( + std::static_pointer_cast(it->second)); + + // Remove anomalydetection comments. + comment::delete_service_comments(obj.host_id(), obj.service_id()); + + // Remove anomalydetection downtimes. + downtime_manager::instance() + .delete_downtime_by_hostname_service_description_start_time_comment( + host_name, service_description, {false, (time_t)0}, ""); + + // Remove events related to this anomalydetection. + applier::scheduler::instance().remove_service(obj.host_id(), + obj.service_id()); + + // remove anomalydetection from servicegroup->members + for (auto& it_s : it->second->get_parent_groups()) + it_s->members.erase({host_name, service_description}); + + // Notify event broker. + broker_adaptive_service_data(NEBTYPE_SERVICE_DELETE, NEBFLAG_NONE, + NEBATTR_NONE, ad.get(), MODATTR_ALL); + + // Unregister anomalydetection. + engine::anomalydetection::services.erase({host_name, service_description}); + engine::anomalydetection::services_by_id.erase(it); + } + + // Remove anomalydetection from the global configuration set. + pb_config.mutable_anomalydetections()->DeleteSubrange(idx, 1); +} +#endif + +#if LEGACY_CONF /** * Resolve a anomalydetection. * @@ -492,7 +830,46 @@ void applier::anomalydetection::resolve_object( // Resolve anomalydetection. it->second->resolve(err.config_warnings, err.config_errors); } +#else +/** + * Resolve a anomalydetection. + * + * @param[in] obj Service object. + */ +void applier::anomalydetection::resolve_object( + const configuration::Anomalydetection& obj, + error_cnt& err) { + // Logging. + SPDLOG_LOGGER_DEBUG(config_logger, + "Resolving anomalydetection '{}' of host '{}'.", + obj.service_description(), obj.host_name()); + + // Find anomalydetection. + service_id_map::iterator it = engine::anomalydetection::services_by_id.find( + {obj.host_id(), obj.service_id()}); + if (engine::anomalydetection::services_by_id.end() == it) + throw engine_error() << "Cannot resolve non-existing anomalydetection '" + << obj.service_description() << "' of host '" + << obj.host_name() << "'"; + + // Remove anomalydetection group links. + it->second->get_parent_groups().clear(); + + // Find host and adjust its counters. + host_id_map::iterator hst(engine::host::hosts_by_id.find(it->first.first)); + if (hst != engine::host::hosts_by_id.end()) { + hst->second->set_total_services(hst->second->get_total_services() + 1); + hst->second->set_total_service_check_interval( + hst->second->get_total_service_check_interval() + + static_cast(it->second->check_interval())); + } + + // Resolve anomalydetection. + it->second->resolve(err.config_warnings, err.config_errors); +} +#endif +#ifdef LEGACY_CONF /** * Expand anomalydetection instance memberships. * @@ -528,7 +905,9 @@ void applier::anomalydetection::_expand_service_memberships( s.servicegroups().insert(backup); } } +#endif +#ifdef LEGACY_CONF /** * @brief Inherits special variables from host. * @@ -569,3 +948,4 @@ void applier::anomalydetection::_inherits_special_vars( obj.timezone(it->timezone()); } } +#endif diff --git a/engine/src/configuration/applier/command.cc b/engine/src/configuration/applier/command.cc index da25e1189ae..8ba46fc3ef8 100644 --- a/engine/src/configuration/applier/command.cc +++ b/engine/src/configuration/applier/command.cc @@ -32,16 +32,7 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::configuration; -/** - * Default constructor. - */ -applier::command::command() {} - -/** - * Destructor. - */ -applier::command::~command() throw() {} - +#ifdef LEGACY_CONF /** * Add new command. * @@ -85,7 +76,52 @@ void applier::command::add_object(configuration::command const& obj) { } } } +#else +/** + * Add new command. + * + * @param[in] obj The new command to add into the monitoring engine. + */ +void applier::command::add_object(const configuration::Command& obj) { + // Logging. + config_logger->debug("Creating new command '{}'.", obj.command_name()); + + // Add command to the global configuration set. + auto* cmd = pb_config.add_commands(); + cmd->CopyFrom(obj); + if (obj.connector().empty()) { + auto raw = std::make_shared( + obj.command_name(), obj.command_line(), &checks::checker::instance()); + commands::command::commands[raw->get_name()] = std::move(raw); + } else { + connector_map::iterator found_con{ + commands::connector::connectors.find(obj.connector())}; + if (found_con != commands::connector::connectors.end() && + found_con->second) { + std::shared_ptr forward{ + std::make_shared( + obj.command_name(), obj.command_line(), found_con->second)}; + commands::command::commands[forward->get_name()] = forward; + } else { + std::shared_ptr otel_cmd = + commands::otel_connector::get_otel_connector(obj.connector()); + if (otel_cmd) { + std::shared_ptr forward{ + std::make_shared(obj.command_name(), + obj.command_line(), otel_cmd)}; + commands::command::commands[forward->get_name()] = forward; + } else { + throw engine_error() << fmt::format( + "Could not register command '{}': unable to find '{}'", + obj.command_name(), obj.connector()); + } + } + } +} +#endif + +#ifdef LEGACY_CONF /** * @brief Expand command. * @@ -97,7 +133,20 @@ void applier::command::add_object(configuration::command const& obj) { void applier::command::expand_objects(configuration::state& s) { (void)s; } +#else +/** + * @brief Expand command. + * + * Command configuration objects do not need expansion. Therefore this + * method does nothing. + * + * @param[in] s Unused. + */ +void applier::command::expand_objects(configuration::State& s + [[maybe_unused]]) {} +#endif +#ifdef LEGACY_CONF /** * Modified command. * @@ -168,7 +217,73 @@ void applier::command::modify_object(configuration::command const& obj) { broker_command_data(NEBTYPE_COMMAND_UPDATE, NEBFLAG_NONE, NEBATTR_NONE, c, &tv); } +#else +/** + * @brief Modify command. + * + * @param obj The new command protobuf configuration for the object to modify + * in the monitoring engine. + */ +void applier::command::modify_object(configuration::Command* to_modify, + const configuration::Command& new_obj) { + // Logging. + config_logger->debug("Modifying command '{}'.", new_obj.command_name()); + + // Find command object. + command_map::iterator it_obj = + commands::command::commands.find(new_obj.command_name()); + if (it_obj == commands::command::commands.end()) + throw engine_error() << fmt::format( + "Could not modify non-existing command object '{}'", + new_obj.command_name()); + + // Update the global configuration set. + to_modify->CopyFrom(new_obj); + + // Command will be temporarily removed from the command set but + // will be added back right after with _create_command. This does + // not create dangling pointers since commands::command object are + // not referenced anywhere, only ::command objects are. + // commands::command::commands.erase(obj.command_name()); + if (new_obj.connector().empty()) { + auto raw = std::make_shared(new_obj.command_name(), + new_obj.command_line(), + &checks::checker::instance()); + it_obj->second = raw; + } else { + connector_map::iterator found_con{ + commands::connector::connectors.find(new_obj.connector())}; + if (found_con != commands::connector::connectors.end() && + found_con->second) { + std::shared_ptr forward{ + std::make_shared(new_obj.command_name(), + new_obj.command_line(), + found_con->second)}; + it_obj->second = forward; + } else { + std::shared_ptr otel_cmd = + commands::otel_connector::get_otel_connector(new_obj.connector()); + if (otel_cmd) { + std::shared_ptr forward{ + std::make_shared( + new_obj.command_name(), new_obj.command_line(), otel_cmd)}; + it_obj->second = forward; + } else { + throw engine_error() << fmt::format( + "Could not register command '{}': unable to find '{}'", + new_obj.command_name(), new_obj.connector()); + } + } + } + // Notify event broker. + timeval tv(get_broker_timestamp(NULL)); + commands::command* c = it_obj->second.get(); + broker_command_data(NEBTYPE_COMMAND_UPDATE, NEBFLAG_NONE, NEBATTR_NONE, c, + &tv); +} +#endif +#ifdef LEGACY_CONF /** * Remove old command. * @@ -200,7 +315,40 @@ void applier::command::remove_object(configuration::command const& obj) { // Remove command from the global configuration set. config->commands().erase(obj); } +#else +/** + * @brief Remove a protobuf command configuration at index idx + * + * @param idx The position in configuration of the configuration to remove. + */ +void applier::command::remove_object(ssize_t idx) { + const configuration::Command& obj = pb_config.commands()[idx]; + // Logging. + config_logger->debug("Removing command '{}'.", obj.command_name()); + // Find command. + std::unordered_map >::iterator + it = commands::command::commands.find(obj.command_name()); + if (it != commands::command::commands.end()) { + commands::command* cmd(it->second.get()); + + // Notify event broker. + timeval tv(get_broker_timestamp(NULL)); + broker_command_data(NEBTYPE_COMMAND_DELETE, NEBFLAG_NONE, NEBATTR_NONE, cmd, + &tv); + + // Erase command (will effectively delete the object). + commands::command::commands.erase(it); + } else + throw engine_error() << fmt::format( + "Could not remove command '{}': it does not exist", obj.command_name()); + + // Remove command from the global configuration set. + pb_config.mutable_commands()->DeleteSubrange(idx, 1); +} +#endif + +#ifdef LEGACY_CONF /** * @brief Resolve command. * @@ -220,3 +368,24 @@ void applier::command::resolve_object(configuration::command const& obj, } } } +#else +/** + * @brief Resolve command. + * + * This method will check for its connector's existence, if command is + * configured to use one. + * + * @param[in] obj Command object. + */ +void applier::command::resolve_object(const configuration::Command& obj, + error_cnt& err [[maybe_unused]]) { + if (!obj.connector().empty()) { + connector_map::iterator found = + commands::connector::connectors.find(obj.connector()); + if (found == commands::connector::connectors.end() || !found->second) { + if (!commands::otel_connector::get_otel_connector(obj.connector())) + throw engine_error() << "unknow command " << obj.connector(); + } + } +} +#endif diff --git a/engine/src/configuration/applier/connector.cc b/engine/src/configuration/applier/connector.cc index f44a12df06c..0ff12f3efe4 100644 --- a/engine/src/configuration/applier/connector.cc +++ b/engine/src/configuration/applier/connector.cc @@ -32,6 +32,7 @@ using namespace com::centreon::engine::configuration; constexpr std::string_view _otel_fake_exe("opentelemetry"); +#ifdef LEGACY_CONF /** * Add new connector. * @@ -47,32 +48,72 @@ void applier::connector::add_object(configuration::connector const& obj) { nagios_macros* macros(get_global_macros()); std::string command_line; process_macros_r(macros, obj.connector_line(), command_line, 0); - std::string processed_cmd(command_line); // Add connector to the global configuration set. config->connectors().insert(obj); // Create connector. - boost::trim(processed_cmd); + boost::trim(command_line); // if executable connector path ends with opentelemetry, it's a fake // opentelemetry connector - size_t end_path = processed_cmd.find(' '); - size_t otel_pos = processed_cmd.find(_otel_fake_exe); + size_t end_path = command_line.find(' '); + size_t otel_pos = command_line.find(_otel_fake_exe); if (otel_pos < end_path) { commands::otel_connector::create( obj.connector_name(), boost::algorithm::trim_copy( - processed_cmd.substr(otel_pos + _otel_fake_exe.length())), + command_line.substr(otel_pos + _otel_fake_exe.length())), &checks::checker::instance()); } else { auto cmd = std::make_shared( - obj.connector_name(), processed_cmd, &checks::checker::instance()); + obj.connector_name(), command_line, &checks::checker::instance()); commands::connector::connectors[obj.connector_name()] = cmd; } } +#else +/** + * @brief Add new connector. + * + * @param obj The new connector to add into the monitoring engine. + */ +void applier::connector::add_object(const configuration::Connector& obj) { + // Logging. + config_logger->debug("Creating new connector '{}'.", obj.connector_name()); + + // Expand command line. + nagios_macros* macros = get_global_macros(); + std::string command_line; + process_macros_r(macros, obj.connector_line(), command_line, 0); + // Add connector to the global configuration set. + auto* cfg_cnn = pb_config.add_connectors(); + cfg_cnn->CopyFrom(obj); + + // Create connector. + boost::trim(command_line); + + // If executable connector path ends with opentelemetry, it's a fake + // opentelemetry connector. + size_t end_path = command_line.find(' '); + size_t otel_pos = command_line.find(_otel_fake_exe); + + if (otel_pos < end_path) { + commands::otel_connector::create( + obj.connector_name(), + boost::algorithm::trim_copy( + command_line.substr(otel_pos + _otel_fake_exe.length())), + &checks::checker::instance()); + } else { + auto cmd = std::make_shared( + obj.connector_name(), command_line, &checks::checker::instance()); + commands::connector::connectors[obj.connector_name()] = cmd; + } +} +#endif + +#ifdef LEGACY_CONF /** * @brief Expand connector. * @@ -84,7 +125,20 @@ void applier::connector::add_object(configuration::connector const& obj) { void applier::connector::expand_objects(configuration::state& s) { (void)s; } +#else +/** + * @brief Expand connector. + * + * Connector configuration objects do not need expansion. Therefore + * this method only copy obj to expanded. + * + * @param[in] s Unused. + */ +void applier::connector::expand_objects(configuration::State& s + [[maybe_unused]]) {} +#endif +#ifdef LEGACY_CONF /** * Modify connector. * @@ -106,27 +160,23 @@ void applier::connector::modify_object(configuration::connector const& obj) { nagios_macros* macros(get_global_macros()); std::string command_line; process_macros_r(macros, obj.connector_line(), command_line, 0); - std::string processed_cmd(command_line); - boost::trim(processed_cmd); + boost::trim(command_line); // if executable connector path ends with opentelemetry, it's a fake // opentelemetry connector - size_t end_path = processed_cmd.find(' '); - size_t otel_pos = processed_cmd.find(_otel_fake_exe); + size_t end_path = command_line.find(' '); + size_t otel_pos = command_line.find(_otel_fake_exe); connector_map::iterator exist_connector( commands::connector::connectors.find(obj.key())); if (otel_pos < end_path) { - std::string otel_cmdline = boost::algorithm::trim_copy( - processed_cmd.substr(otel_pos + _otel_fake_exe.length())); - - if (!commands::otel_connector::update(obj.key(), processed_cmd)) { + if (!commands::otel_connector::update(obj.key(), command_line)) { // connector object become an otel fake connector if (exist_connector != commands::connector::connectors.end()) { commands::connector::connectors.erase(exist_connector); - commands::otel_connector::create(obj.key(), processed_cmd, + commands::otel_connector::create(obj.key(), command_line, &checks::checker::instance()); } else { throw com::centreon::exceptions::msg_fmt( @@ -136,12 +186,12 @@ void applier::connector::modify_object(configuration::connector const& obj) { } else { if (exist_connector != commands::connector::connectors.end()) { // Set the new command line. - exist_connector->second->set_command_line(processed_cmd); + exist_connector->second->set_command_line(command_line); } else { // old otel_connector => connector if (commands::otel_connector::remove(obj.key())) { auto cmd = std::make_shared( - obj.connector_name(), processed_cmd, &checks::checker::instance()); + obj.connector_name(), command_line, &checks::checker::instance()); commands::connector::connectors[obj.connector_name()] = cmd; } else { @@ -155,7 +205,73 @@ void applier::connector::modify_object(configuration::connector const& obj) { config->connectors().erase(it_cfg); config->connectors().insert(obj); } +#else +/** + * @brief Modify connector + * + * @param to_modify The current configuration connector + * @param new_obj The new one. + */ +void applier::connector::modify_object( + configuration::Connector* to_modify, + const configuration::Connector& new_obj) { + // Logging. + config_logger->debug("Modifying connector '{}'.", new_obj.connector_name()); + // Expand command line. + nagios_macros* macros(get_global_macros()); + std::string command_line; + process_macros_r(macros, new_obj.connector_line(), command_line, 0); + + boost::trim(command_line); + + // if executable connector path ends with opentelemetry, it's a fake + // opentelemetry connector + size_t end_path = command_line.find(' '); + size_t otel_pos = command_line.find(_otel_fake_exe); + + connector_map::iterator current_connector( + commands::connector::connectors.find(new_obj.connector_name())); + + if (otel_pos < end_path) { + if (!commands::otel_connector::update(new_obj.connector_name(), + command_line)) { + // connector object becomes an otel fake connector + if (current_connector != commands::connector::connectors.end()) { + commands::connector::connectors.erase(current_connector); + commands::otel_connector::create(new_obj.connector_name(), command_line, + &checks::checker::instance()); + } else { + throw com::centreon::exceptions::msg_fmt( + "unknown open telemetry command to update: {}", + new_obj.connector_name()); + } + } + } else { + if (current_connector != commands::connector::connectors.end()) { + // Set the new command line. + current_connector->second->set_command_line(command_line); + } else { + // old otel_connector => connector + if (commands::otel_connector::remove(new_obj.connector_name())) { + auto cmd = std::make_shared( + new_obj.connector_name(), command_line, + &checks::checker::instance()); + commands::connector::connectors[new_obj.connector_name()] = cmd; + + } else { + throw com::centreon::exceptions::msg_fmt( + "unknown connector to update: {}", new_obj.connector_name()); + } + } + } + + // Update the global configuration set. + to_modify->CopyFrom(new_obj); +} +#endif + +#ifdef LEGACY_CONF /** * Remove old connector. * @@ -180,7 +296,28 @@ void applier::connector::remove_object(configuration::connector const& obj) { // Remove connector from the global configuration set. config->connectors().erase(obj); } +#else +void applier::connector::remove_object(ssize_t idx) { + // Logging. + const configuration::Connector& obj = pb_config.connectors()[idx]; + config_logger->debug("Removing connector '{}'.", obj.connector_name()); + + // Find connector. + connector_map::iterator it = + commands::connector::connectors.find(obj.connector_name()); + if (it != commands::connector::connectors.end()) { + // Remove connector object. + commands::connector::connectors.erase(it); + } + + commands::otel_connector::remove(obj.connector_name()); + + // Remove connector from the global configuration set. + pb_config.mutable_connectors()->DeleteSubrange(idx, 1); +} +#endif +#ifdef LEGACY_CONF /** * @brief Resolve a connector. * @@ -192,3 +329,15 @@ void applier::connector::remove_object(configuration::connector const& obj) { void applier::connector::resolve_object(configuration::connector const& obj [[maybe_unused]], error_cnt& err [[maybe_unused]]) {} +#else +/** + * @brief Resolve a connector. + * + * Connector objects do not need resolution. Therefore this method does + * nothing. + * + * @param[in] obj Unused. + */ +void applier::connector::resolve_object(const configuration::Connector&, + error_cnt& err [[maybe_unused]]) {} +#endif diff --git a/engine/src/configuration/applier/contact.cc b/engine/src/configuration/applier/contact.cc index b96e9a03880..4f00a4bec47 100644 --- a/engine/src/configuration/applier/contact.cc +++ b/engine/src/configuration/applier/contact.cc @@ -31,23 +31,7 @@ using namespace com::centreon; using namespace com::centreon::engine; using namespace com::centreon::engine::configuration; -/** - * Check if the contact group name matches the configuration object. - */ -class contactgroup_name_comparator { - public: - contactgroup_name_comparator(std::string const& contactgroup_name) { - _contactgroup_name = contactgroup_name; - } - - bool operator()(std::shared_ptr cg) { - return _contactgroup_name == cg->contactgroup_name(); - } - - private: - std::string _contactgroup_name; -}; - +#ifdef LEGACY_CONF /** * Add new contact. * @@ -67,19 +51,15 @@ void applier::contact::add_object(configuration::contact const& obj) { config->contacts().insert(obj); // Create address list. - std::array addresses; - { - unsigned int i{0}; - for (tab_string::const_iterator it(obj.address().begin()), - end(obj.address().end()); - it != end; ++it, ++i) - addresses[i] = *it; - } + std::vector addresses; + std::copy(obj.address().begin(), obj.address().end(), + std::back_inserter(addresses)); // Create contact. std::shared_ptr c(add_contact( - obj.contact_name(), obj.alias(), obj.email(), obj.pager(), addresses, - obj.service_notification_period(), obj.host_notification_period(), + obj.contact_name(), obj.alias(), obj.email(), obj.pager(), + std::move(addresses), obj.service_notification_period(), + obj.host_notification_period(), static_cast(obj.service_notification_options() & service::ok), static_cast(obj.service_notification_options() & service::critical), static_cast(obj.service_notification_options() & service::warning), @@ -113,12 +93,81 @@ void applier::contact::add_object(configuration::contact const& obj) { if (it->second.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); broker_custom_variable(NEBTYPE_CONTACTCUSTOMVARIABLE_ADD, c.get(), - it->first.c_str(), it->second.value().c_str(), - &tv); + it->first, it->second.value(), &tv); } } } +#else +/** + * Add new contact. + * + * @param[in] obj The new contact to add into the monitoring engine. + */ +void applier::contact::add_object(const configuration::Contact& obj) { + // Make sure we have the data we need. + if (obj.contact_name().empty()) + throw engine_error() << "Could not register contact with an empty name"; + // Logging. + config_logger->debug("Creating new contact '{}'.", obj.contact_name()); + + // Add contact to the global configuration set. + configuration::Contact* ct_cfg = pb_config.add_contacts(); + ct_cfg->CopyFrom(obj); + + // Create address list. + std::vector addresses; + std::copy(obj.address().begin(), obj.address().end(), + std::back_inserter(addresses)); + + // Create contact. + std::shared_ptr c(add_contact( + obj.contact_name(), obj.alias(), obj.email(), obj.pager(), + std::move(addresses), obj.service_notification_period(), + obj.host_notification_period(), + static_cast(obj.service_notification_options() & action_svc_ok), + static_cast(obj.service_notification_options() & + action_svc_critical), + static_cast(obj.service_notification_options() & + action_svc_warning), + static_cast(obj.service_notification_options() & + action_svc_unknown), + static_cast(obj.service_notification_options() & + action_svc_flapping), + static_cast(obj.service_notification_options() & + action_svc_downtime), + static_cast(obj.host_notification_options() & action_hst_up), + static_cast(obj.host_notification_options() & action_hst_down), + static_cast(obj.host_notification_options() & + action_hst_unreachable), + static_cast(obj.host_notification_options() & action_hst_flapping), + static_cast(obj.host_notification_options() & action_hst_downtime), + obj.host_notifications_enabled(), obj.service_notifications_enabled(), + obj.can_submit_commands(), obj.retain_status_information(), + obj.retain_nonstatus_information())); + if (!c) + throw engine_error() << "Could not register contact '" << obj.contact_name() + << "'"; + c->set_timezone(obj.timezone()); + + // Add new items to the configuration state. + engine::contact::contacts.insert({c->get_name(), c}); + + // Add all custom variables. + for (const configuration::CustomVariable& cv : obj.customvariables()) { + c->get_custom_variables()[cv.name()] = + engine::customvariable(cv.value(), cv.is_sent()); + + if (cv.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_CONTACTCUSTOMVARIABLE_ADD, c.get(), + cv.name(), cv.value(), &tv); + } + } +} +#endif + +#ifdef LEGACY_CONF /** * Modified contact. * @@ -255,8 +304,7 @@ void applier::contact::modify_object(configuration::contact const& obj) { if (cus.second.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); broker_custom_variable(NEBTYPE_CONTACTCUSTOMVARIABLE_DELETE, c, - cus.first.c_str(), cus.second.value().c_str(), - &tv); + cus.first, cus.second.value(), &tv); } } c->get_custom_variables().clear(); @@ -267,13 +315,207 @@ void applier::contact::modify_object(configuration::contact const& obj) { cv.set_sent(cus.second.is_sent()); if (cus.second.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_CONTACTCUSTOMVARIABLE_ADD, c, cus.first, + cus.second.value(), &tv); + } + } + } + + // Notify event broker. + timeval tv(get_broker_timestamp(NULL)); + broker_adaptive_contact_data(NEBTYPE_CONTACT_UPDATE, NEBFLAG_NONE, + NEBATTR_NONE, c, CMD_NONE, MODATTR_ALL, + MODATTR_ALL, MODATTR_ALL, MODATTR_ALL, + MODATTR_ALL, MODATTR_ALL, &tv); +} +#else +/** + * @brief Modify a contact from a protobuf contact configuration. + * + * @param obj The new contact to modify into the monitoring engine. + */ +void applier::contact::modify_object(configuration::Contact* to_modify, + const configuration::Contact& new_object) { + // Logging. + config_logger->debug("Modifying contact '{}'.", new_object.contact_name()); + + // Find contact object. + contact_map::iterator it_obj( + engine::contact::contacts.find(to_modify->contact_name())); + if (it_obj == engine::contact::contacts.end()) + throw engine_error() << fmt::format( + "Could not modify non-existing contact object '{}'", + to_modify->contact_name()); + engine::contact* c = it_obj->second.get(); + + // Update the global configuration set. + to_modify->CopyFrom(new_object); + + // Modify contact. + const std::string& tmp(new_object.alias().empty() ? new_object.contact_name() + : new_object.alias()); + if (c->get_alias() != tmp) + c->set_alias(tmp); + if (c->get_email() != new_object.email()) + c->set_email(new_object.email()); + if (c->get_pager() != new_object.pager()) + c->set_pager(new_object.pager()); + + std::vector addr; + addr.reserve(new_object.address().size()); + std::copy(new_object.address().begin(), new_object.address().end(), + std::back_inserter(addr)); + c->set_addresses(std::move(addr)); + + c->set_notify_on( + notifier::service_notification, + (new_object.service_notification_options() & action_svc_unknown + ? notifier::unknown + : notifier::none) | + (new_object.service_notification_options() & action_svc_warning + ? notifier::warning + : notifier::none) | + (new_object.service_notification_options() & action_svc_critical + ? notifier::critical + : notifier::none) | + (new_object.service_notification_options() & action_svc_ok + ? notifier::ok + : notifier::none) | + (new_object.service_notification_options() & action_svc_flapping + ? (notifier::flappingstart | notifier::flappingstop | + notifier::flappingdisabled) + : notifier::none) | + (new_object.service_notification_options() & action_svc_downtime + ? notifier::downtime + : notifier::none)); + c->set_notify_on( + notifier::host_notification, + (new_object.host_notification_options() & action_hst_down + ? notifier::down + : notifier::none) | + (new_object.host_notification_options() & action_hst_unreachable + ? notifier::unreachable + : notifier::none) | + (new_object.host_notification_options() & action_hst_up + ? notifier::up + : notifier::none) | + (new_object.host_notification_options() & action_hst_flapping + ? (notifier::flappingstart | notifier::flappingstop | + notifier::flappingdisabled) + : notifier::none) | + (new_object.host_notification_options() & action_hst_downtime + ? notifier::downtime + : notifier::none)); + if (c->get_host_notification_period() != + new_object.host_notification_period()) + c->set_host_notification_period(new_object.host_notification_period()); + if (c->get_service_notification_period() != + new_object.service_notification_period()) + c->set_service_notification_period( + new_object.service_notification_period()); + if (c->get_host_notifications_enabled() != + new_object.host_notifications_enabled()) + c->set_host_notifications_enabled(new_object.host_notifications_enabled()); + if (c->get_service_notifications_enabled() != + new_object.service_notifications_enabled()) + c->set_service_notifications_enabled( + new_object.service_notifications_enabled()); + if (c->get_can_submit_commands() != new_object.can_submit_commands()) + c->set_can_submit_commands(new_object.can_submit_commands()); + if (c->get_retain_status_information() != + new_object.retain_status_information()) + c->set_retain_status_information(new_object.retain_status_information()); + if (c->get_retain_nonstatus_information() != + new_object.retain_nonstatus_information()) + c->set_retain_nonstatus_information( + new_object.retain_nonstatus_information()); + c->set_timezone(new_object.timezone()); + + // Host notification commands. + if (!MessageDifferencer::Equals(new_object.host_notification_commands(), + to_modify->host_notification_commands())) { + c->get_host_notification_commands().clear(); + + for (auto& cfg_c : new_object.host_notification_commands().data()) { + command_map::const_iterator itt = commands::command::commands.find(cfg_c); + if (itt != commands::command::commands.end()) + c->get_host_notification_commands().push_back(itt->second); + else + throw engine_error() << fmt::format( + "Could not add host notification command '{}' to contact '{}': the " + "command does not exist", + cfg_c, new_object.contact_name()); + } + } + + // Service notification commands. + if (!MessageDifferencer::Equals(new_object.service_notification_commands(), + to_modify->service_notification_commands())) { + c->get_service_notification_commands().clear(); + + for (auto& cfg_c : new_object.service_notification_commands().data()) { + command_map::const_iterator itt = commands::command::commands.find(cfg_c); + if (itt != commands::command::commands.end()) + c->get_service_notification_commands().push_back(itt->second); + else + throw engine_error() << fmt::format( + "Could not add service notification command '{}' to contact '{}': " + "the command does not exist", + cfg_c, new_object.contact_name()); + } + } + + // Custom variables. + absl::flat_hash_set keys; + for (auto& cfg_cv : new_object.customvariables()) { + keys.emplace(cfg_cv.name()); + auto found = c->get_custom_variables().find(cfg_cv.name()); + if (found != c->get_custom_variables().end()) { + if (found->second.value() != cfg_cv.value() || + found->second.is_sent() != cfg_cv.is_sent() || + found->second.has_been_modified()) { + found->second.set_value(cfg_cv.value()); + found->second.set_sent(cfg_cv.is_sent()); + if (found->second.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_CONTACTCUSTOMVARIABLE_DELETE, c, + found->first.c_str(), + found->second.value().c_str(), &tv); + broker_custom_variable(NEBTYPE_CONTACTCUSTOMVARIABLE_ADD, c, + found->first.c_str(), + found->second.value().c_str(), &tv); + } + } + } else { + c->get_custom_variables().emplace( + cfg_cv.name(), + engine::customvariable(cfg_cv.value(), cfg_cv.is_sent())); + if (cfg_cv.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); broker_custom_variable(NEBTYPE_CONTACTCUSTOMVARIABLE_ADD, c, - cus.first.c_str(), cus.second.value().c_str(), + cfg_cv.name().c_str(), cfg_cv.value().c_str(), &tv); } } } + if (static_cast(new_object.customvariables().size()) != + c->get_custom_variables().size()) { + // There are custom variables to remove... + for (auto it = c->get_custom_variables().begin(); + it != c->get_custom_variables().end();) { + if (!keys.contains(it->first)) { + if (it->second.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_CONTACTCUSTOMVARIABLE_DELETE, c, + it->first.c_str(), it->second.value().c_str(), + &tv); + } + it = c->get_custom_variables().erase(it); + } else + ++it; + } + } // Notify event broker. timeval tv(get_broker_timestamp(NULL)); @@ -282,7 +524,9 @@ void applier::contact::modify_object(configuration::contact const& obj) { MODATTR_ALL, MODATTR_ALL, MODATTR_ALL, MODATTR_ALL, MODATTR_ALL, &tv); } +#endif +#ifdef LEGACY_CONF /** * Remove old contact. * @@ -316,7 +560,43 @@ void applier::contact::remove_object(configuration::contact const& obj) { // Remove contact from the global configuration set. config->contacts().erase(obj); } +#else +/** + * Remove old contact. + * + * @param[in] obj The new contact to remove from the monitoring engine. + */ +void applier::contact::remove_object(ssize_t idx) { + const configuration::Contact& obj = pb_config.contacts()[idx]; + // Logging. + config_logger->debug("Removing contact '{}'.", obj.contact_name()); + + // Find contact. + contact_map::iterator it{engine::contact::contacts.find(obj.contact_name())}; + if (it != engine::contact::contacts.end()) { + engine::contact* cntct(it->second.get()); + + for (auto& it_c : cntct->get_parent_groups()) + it_c.second->get_members().erase(obj.contact_name()); + + // Notify event broker. + timeval tv(get_broker_timestamp(nullptr)); + broker_adaptive_contact_data(NEBTYPE_CONTACT_DELETE, NEBFLAG_NONE, + NEBATTR_NONE, cntct, CMD_NONE, MODATTR_ALL, + MODATTR_ALL, MODATTR_ALL, MODATTR_ALL, + MODATTR_ALL, MODATTR_ALL, &tv); + + // Erase contact object (this will effectively delete the object). + engine::contact::contacts.erase(it); + } + + // Remove contact from the global configuration set. + pb_config.mutable_contacts()->DeleteSubrange(idx, 1); +} +#endif + +#ifdef LEGACY_CONF /** * @brief Expand a contact. * @@ -365,7 +645,50 @@ void applier::contact::expand_objects(configuration::state& s) { } s.contacts() = std::move(new_contacts); } +#else +/** + * @brief Expand a contact. + * + * During expansion, the contact will be added to its contact groups. + * These will be modified in the state. + * + * @param[in,out] s Configuration state. + */ +void applier::contact::expand_objects(configuration::State& s) { + // Let's consider all the macros defined in s. + absl::flat_hash_set cvs; + for (auto& cv : s.macros_filter().data()) + cvs.emplace(cv); + // Browse all contacts. + for (auto& c : *s.mutable_contacts()) { + // Should custom variables be sent to broker ? + for (auto& cv : *c.mutable_customvariables()) { + if (!s.enable_macros_filter() || cvs.contains(cv.name())) + cv.set_is_sent(true); + } + + // Browse current contact's groups. + for (auto& cg : *c.mutable_contactgroups()->mutable_data()) { + // Find contact group. + Contactgroup* found_cg = nullptr; + for (auto& cgg : *s.mutable_contactgroups()) + if (cgg.contactgroup_name() == cg) { + found_cg = &cgg; + break; + } + if (found_cg == nullptr) + throw engine_error() << fmt::format( + "Could not add contact '{}' to non-existing contact group '{}'", + c.contact_name(), cg); + + fill_string_group(found_cg->mutable_members(), c.contact_name()); + } + } +} +#endif + +#ifdef LEGACY_CONF /** * Resolve a contact. * @@ -426,3 +749,60 @@ void applier::contact::resolve_object(const configuration::contact& obj, // Resolve contact. ct_it->second->resolve(err.config_warnings, err.config_errors); } +#else +/** + * Resolve a contact. + * + * @param[in,out] obj Object to resolve. + */ +void applier::contact::resolve_object(const configuration::Contact& obj, + error_cnt& err) { + // Logging. + config_logger->debug("Resolving contact '{}'.", obj.contact_name()); + + // Find contact. + contact_map::const_iterator ct_it{ + engine::contact::contacts.find(obj.contact_name())}; + if (ct_it == engine::contact::contacts.end() || !ct_it->second) + throw engine_error() << fmt::format( + "Cannot resolve non-existing contact '{}'", obj.contact_name()); + + ct_it->second->get_host_notification_commands().clear(); + + // Add all the host notification commands. + for (auto& cmd : obj.host_notification_commands().data()) { + command_map::const_iterator itt(commands::command::commands.find(cmd)); + if (itt != commands::command::commands.end()) + ct_it->second->get_host_notification_commands().push_back(itt->second); + else { + ++err.config_errors; + throw engine_error() << fmt::format( + "Could not add host notification command '{}' to contact '{}': the " + "command does not exist", + cmd, obj.contact_name()); + } + } + + ct_it->second->get_service_notification_commands().clear(); + + // Add all the service notification commands. + for (auto& cmd : obj.service_notification_commands().data()) { + command_map::const_iterator itt(commands::command::commands.find(cmd)); + if (itt != commands::command::commands.end()) + ct_it->second->get_service_notification_commands().push_back(itt->second); + else { + ++err.config_errors; + throw engine_error() << fmt::format( + "Could not add service notification command '{}' to contact '{}': " + "the command does not exist", + cmd, obj.contact_name()); + } + } + + // Remove contact group links. + ct_it->second->get_parent_groups().clear(); + + // Resolve contact. + ct_it->second->resolve(err.config_warnings, err.config_errors); +} +#endif diff --git a/engine/src/configuration/applier/contactgroup.cc b/engine/src/configuration/applier/contactgroup.cc index 38c8522ccbd..bf872426fd6 100644 --- a/engine/src/configuration/applier/contactgroup.cc +++ b/engine/src/configuration/applier/contactgroup.cc @@ -29,6 +29,7 @@ using namespace com::centreon::engine::configuration; using namespace com::centreon::engine::logging; +#ifdef LEGACY_CONF /** * Add new contactgroup * @@ -74,7 +75,49 @@ void applier::contactgroup::add_object(configuration::contactgroup const& obj) { engine::contactgroup::contactgroups.insert({name, cg}); } +#else +/** + * Add new contactgroup + * + * @param[in] obj The new contactgroup to add into the monitoring engine. + */ +void applier::contactgroup::add_object(const configuration::Contactgroup& obj) { + const std::string& name(obj.contactgroup_name()); + + // Logging. + config_logger->debug("Creating new contactgroup '{}'.", name); + + if (engine::contactgroup::contactgroups.find(name) != + engine::contactgroup::contactgroups.end()) + throw engine_error() << "Contactgroup '" << name + << "' has already been defined"; + + // Add contact group to the global configuration set. + configuration::Contactgroup* c_cg = pb_config.add_contactgroups(); + c_cg->CopyFrom(obj); + // Create contact group. + auto cg = std::make_shared(obj); + for (auto& member : obj.members().data()) { + auto ct_it{engine::contact::contacts.find(member)}; + if (ct_it == engine::contact::contacts.end()) { + config_logger->error( + "Error: Contact '{}' specified in contact group '{}' is not defined " + "anywhere!", + member, cg->get_name()); + throw engine_error() << "Error: Cannot resolve contact group " + << obj.contactgroup_name() << "'"; + } else { + cg->get_members().insert({ct_it->first, ct_it->second.get()}); + broker_group(NEBTYPE_CONTACTGROUP_ADD, cg.get()); + } + } + + engine::contactgroup::contactgroups.insert({name, cg}); +} +#endif + +#ifdef LEGACY_CONF /** * Expand all contactgroups. * @@ -94,7 +137,21 @@ void applier::contactgroup::expand_objects(configuration::state& s) { it != end; ++it) s.contactgroups().insert(it->second); } +#else +/** + * @brief Expand all contactgroups. + * + * @param s State being applied. + */ +void applier::contactgroup::expand_objects(configuration::State& s) { + absl::flat_hash_set resolved; + + for (auto& cg : *s.mutable_contactgroups()) + _resolve_members(s, cg, resolved); +} +#endif +#ifdef LEGACY_CONF /** * Modified contactgroup. * @@ -161,7 +218,63 @@ void applier::contactgroup::modify_object( // Notify event broker. broker_group(NEBTYPE_CONTACTGROUP_UPDATE, it_obj->second.get()); } +#else +/** + * @brief Modify a contactgroup configuration. + * + * @param to_modify A pointer to the configuration to modify. + * @param new_object A const reference to the configuration to apply. + */ +void applier::contactgroup::modify_object( + configuration::Contactgroup* to_modify, + const configuration::Contactgroup& new_object) { + // Logging. + config_logger->debug("Modifying contactgroup '{}'", to_modify->contactgroup_name()); + + // Find contact group object. + contactgroup_map::iterator it_obj = + engine::contactgroup::contactgroups.find(new_object.contactgroup_name()); + if (it_obj == engine::contactgroup::contactgroups.end()) + throw engine_error() << fmt::format( + "Error: Could not modify non-existing contact group object '{}", + new_object.contactgroup_name()); + + // Modify properties. + if (it_obj->second->get_alias() != new_object.alias()) { + it_obj->second->set_alias(new_object.alias()); + to_modify->set_alias(new_object.alias()); + } + + if (!MessageDifferencer::Equals(new_object.members(), to_modify->members())) { + // delete all old contact group members + to_modify->mutable_members()->CopyFrom(new_object.members()); + it_obj->second->clear_members(); + + for (auto& contact : new_object.members().data()) { + contact_map::const_iterator ct_it{ + engine::contact::contacts.find(contact)}; + if (ct_it == engine::contact::contacts.end()) { + config_logger->error( + "Error: Contact '{}' specified in contact group '{}' is not " + "defined anywhere!", + contact, it_obj->second->get_name()); + throw engine_error() + << fmt::format("Error: Cannot resolve contact group '{}'", + new_object.contactgroup_name()); + } else { + it_obj->second->get_members().insert( + {ct_it->first, ct_it->second.get()}); + broker_group(NEBTYPE_CONTACTGROUP_ADD, it_obj->second.get()); + } + } + } + + // Notify event broker. + broker_group(NEBTYPE_CONTACTGROUP_UPDATE, it_obj->second.get()); +} +#endif +#ifdef LEGACY_CONF /** * Remove old contactgroup. * @@ -192,7 +305,38 @@ void applier::contactgroup::remove_object( // Remove contact group from the global configuration set. config->contactgroups().erase(obj); } +#else +/** + * @brief Remove an old contactgroup by index. + * + * @param idx The index of the contactgroup configuration to remove. + */ +void applier::contactgroup::remove_object(ssize_t idx) { + const configuration::Contactgroup& obj = pb_config.contactgroups()[idx]; + + // Logging. + config_logger->debug("Removing contactgroup '{}'", obj.contactgroup_name()); + + // Find contact group. + contactgroup_map::iterator it = + engine::contactgroup::contactgroups.find(obj.contactgroup_name()); + if (it != engine::contactgroup::contactgroups.end()) { + // Remove contact group from its list. + // unregister_object(&contactgroup_list, grp); + // Notify event broker. + broker_group(NEBTYPE_CONTACTGROUP_DELETE, it->second.get()); + + // Remove contact group (this will effectively delete the object). + engine::contactgroup::contactgroups.erase(it); + } + + // Remove contact group from the global configuration set. + pb_config.mutable_contactgroups()->DeleteSubrange(idx, 1); +} +#endif + +#ifdef LEGACY_CONF /** * Resolve a contact group. * @@ -216,7 +360,31 @@ void applier::contactgroup::resolve_object( // Resolve contact group. it->second->resolve(err.config_warnings, err.config_errors); } +#else +/** + * @brief Resolve a contact group. + * + * @param obj Contact group configuration to resolve. + */ +void applier::contactgroup::resolve_object( + const configuration::Contactgroup& obj, error_cnt& err) { + // Logging. + config_logger->debug("Resolving contact group '{}'", obj.contactgroup_name()); + + // Find contact group. + contactgroup_map::iterator it = + engine::contactgroup::contactgroups.find(obj.contactgroup_name()); + if (it == engine::contactgroup::contactgroups.end() || !it->second) + throw engine_error() << fmt::format( + "Error: Cannot resolve non-existing contact group '{}'", + obj.contactgroup_name()); + // Resolve contact group. + it->second->resolve(err.config_warnings, err.config_errors); +} +#endif + +#ifdef LEGACY_CONF /** * Resolve members of a contact group. * @@ -263,3 +431,49 @@ void applier::contactgroup::_resolve_members( } } } +#else +/** + * @brief Resolve members of a contact group. A contact group can be defined + * from others contactgroups. But we only want for engine, contactgroups defined + * with contacts. So if it contains contactgroups, we have to copy their + * contacts into this contactgroup and then empty the contactgroups members. + * + * @param s Configuration being applied. + * @param obj Object that should be processed. + * @param resolved a reference to keep a trace of already expanded + * contactgroups. + */ +void applier::contactgroup::_resolve_members( + configuration::State& s, + configuration::Contactgroup& obj, + absl::flat_hash_set& resolved) { + if (resolved.contains(obj.contactgroup_name())) + return; + + resolved.emplace(obj.contactgroup_name()); + if (!obj.contactgroup_members().data().empty()) { + // Logging. + config_logger->debug("Resolving members of contact group '{}'", + obj.contactgroup_name()); + for (auto& cg_name : obj.contactgroup_members().data()) { + auto it = std::find_if(s.mutable_contactgroups()->begin(), + s.mutable_contactgroups()->end(), + [&cg_name](const Contactgroup& cg) { + return cg.contactgroup_name() == cg_name; + }); + + if (it == s.mutable_contactgroups()->end()) + throw engine_error() << fmt::format( + "Error: Could not add non-existing contact group member '{}' to " + "contactgroup '{}'", + cg_name, obj.contactgroup_name()); + + Contactgroup& inner_cg = *it; + _resolve_members(s, inner_cg, resolved); + for (auto& c_name : inner_cg.members().data()) + fill_string_group(obj.mutable_members(), c_name); + } + obj.mutable_contactgroup_members()->clear_data(); + } +} +#endif diff --git a/engine/src/configuration/applier/globals.cc b/engine/src/configuration/applier/globals.cc index c85c97079c6..80eed5f5951 100644 --- a/engine/src/configuration/applier/globals.cc +++ b/engine/src/configuration/applier/globals.cc @@ -24,6 +24,7 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::configuration; +#ifdef LEGACY_CONF /** * Apply new configuration. * @@ -67,6 +68,51 @@ void applier::globals::apply(state& config) { ::use_large_installation_tweaks = config.use_large_installation_tweaks(); ::instance_heartbeat_interval = config.instance_heartbeat_interval(); } +#else +/** + * Apply new configuration. + * + * @param[in] config The new configuration. + */ +void applier::globals::apply(State& config) { + _set_global(::debug_file, config.debug_file()); + _set_global(::global_host_event_handler, config.global_host_event_handler()); + _set_global(::global_service_event_handler, + config.global_service_event_handler()); + _set_global(::illegal_object_chars, config.illegal_object_chars()); + _set_global(::illegal_output_chars, config.illegal_output_chars()); + _set_global(::log_file, config.log_file()); + _set_global(::ochp_command, config.ochp_command()); + _set_global(::ocsp_command, config.ocsp_command()); + _set_global(::use_timezone, config.use_timezone()); + + ::accept_passive_host_checks = config.accept_passive_host_checks(); + ::accept_passive_service_checks = config.accept_passive_service_checks(); + ::additional_freshness_latency = config.additional_freshness_latency(); + ::cached_host_check_horizon = config.cached_host_check_horizon(); + ::check_external_commands = config.check_external_commands(); + ::check_host_freshness = config.check_host_freshness(); + ::check_reaper_interval = config.check_reaper_interval(); + ::check_service_freshness = config.check_service_freshness(); + ::enable_event_handlers = config.enable_event_handlers(); + ::enable_flap_detection = config.enable_flap_detection(); + ::enable_notifications = config.enable_notifications(); + ::execute_host_checks = config.execute_host_checks(); + ::execute_service_checks = config.execute_service_checks(); + ::interval_length = config.interval_length(); + ::log_notifications = config.log_notifications(); + ::log_passive_checks = config.log_passive_checks(); + ::max_host_check_spread = config.max_host_check_spread(); + ::max_service_check_spread = config.max_service_check_spread(); + ::notification_timeout = config.notification_timeout(); + ::obsess_over_hosts = config.obsess_over_hosts(); + ::obsess_over_services = config.obsess_over_services(); + ::process_performance_data = config.process_performance_data(); + ::soft_state_dependencies = config.soft_state_dependencies(); + ::use_large_installation_tweaks = config.use_large_installation_tweaks(); + ::instance_heartbeat_interval = config.instance_heartbeat_interval(); +} +#endif /** * Get the singleton instance of globals applier. diff --git a/engine/src/configuration/applier/host.cc b/engine/src/configuration/applier/host.cc index bf0df59a090..ea2a7dc2f76 100644 --- a/engine/src/configuration/applier/host.cc +++ b/engine/src/configuration/applier/host.cc @@ -29,27 +29,24 @@ #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/logging/logger.hh" #include "com/centreon/engine/severity.hh" +#ifdef LEGACY_CONF +#include "common/engine_legacy_conf/host.hh" +#else +#include "common/engine_conf/severity_helper.hh" +#include "common/engine_conf/state.pb.h" +#endif using namespace com::centreon; using namespace com::centreon::engine; using namespace com::centreon::engine::configuration; -/** - * Default constructor. - */ -applier::host::host() {} - -/** - * Destructor. - */ -applier::host::~host() throw() {} - +#ifdef LEGACY_CONF /** * Add new host. * * @param[in] obj The new host to add into the monitoring engine. */ -void applier::host::add_object(configuration::host const& obj) { +void applier::host::add_object(const configuration::host& obj) { // Logging. engine_logger(logging::dbg_config, logging::more) << "Creating new host '" << obj.host_name() << "'."; @@ -128,9 +125,8 @@ void applier::host::add_object(configuration::host const& obj) { if (it->second.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); - broker_custom_variable(NEBTYPE_HOSTCUSTOMVARIABLE_ADD, h.get(), - it->first.c_str(), it->second.value().c_str(), - &tv); + broker_custom_variable(NEBTYPE_HOSTCUSTOMVARIABLE_ADD, h.get(), it->first, + it->second.value(), &tv); } } @@ -170,7 +166,118 @@ void applier::host::add_object(configuration::host const& obj) { broker_adaptive_host_data(NEBTYPE_HOST_ADD, NEBFLAG_NONE, NEBATTR_NONE, h.get(), MODATTR_ALL); } +#else +/** + * Add new host. + * + * @param[in] obj The new host to add into the monitoring engine. + */ +void applier::host::add_object(const configuration::Host& obj) { + // Logging. + config_logger->debug("Creating new host '{}'.", obj.host_name()); + + // Add host to the global configuration set. + auto* cfg_obj = pb_config.add_hosts(); + cfg_obj->CopyFrom(obj); + + // Create host. + auto h = std::make_shared( + obj.host_id(), obj.host_name(), obj.display_name(), obj.alias(), + obj.address(), obj.check_period(), + static_cast(obj.initial_state()), + obj.check_interval(), obj.retry_interval(), obj.max_check_attempts(), + static_cast(obj.notification_options() & action_hst_up), + static_cast(obj.notification_options() & action_hst_down), + static_cast(obj.notification_options() & action_hst_unreachable), + static_cast(obj.notification_options() & action_hst_flapping), + static_cast(obj.notification_options() & action_hst_downtime), + obj.notification_interval(), obj.first_notification_delay(), + obj.recovery_notification_delay(), obj.notification_period(), + obj.notifications_enabled(), obj.check_command(), obj.checks_active(), + obj.checks_passive(), obj.event_handler(), obj.event_handler_enabled(), + obj.flap_detection_enabled(), obj.low_flap_threshold(), + obj.high_flap_threshold(), + static_cast(obj.flap_detection_options() & action_hst_up), + static_cast(obj.flap_detection_options() & action_hst_down), + static_cast(obj.flap_detection_options() & action_hst_unreachable), + static_cast(obj.stalking_options() & action_hst_up), + static_cast(obj.stalking_options() & action_hst_down), + static_cast(obj.stalking_options() & action_hst_unreachable), + obj.process_perf_data(), obj.check_freshness(), obj.freshness_threshold(), + obj.notes(), obj.notes_url(), obj.action_url(), obj.icon_image(), + obj.icon_image_alt(), obj.vrml_image(), obj.statusmap_image(), + obj.coords_2d().x(), obj.coords_2d().y(), obj.has_coords_2d(), + obj.coords_3d().x(), obj.coords_3d().y(), obj.coords_3d().z(), + obj.has_coords_3d(), + true, // should_be_drawn, enabled by Nagios + obj.retain_status_information(), obj.retain_nonstatus_information(), + obj.obsess_over_host(), obj.timezone(), obj.icon_id()); + + engine::host::hosts.insert({h->name(), h}); + engine::host::hosts_by_id.insert({obj.host_id(), h}); + + h->set_initial_notif_time(0); + h->set_should_reschedule_current_check(false); + h->set_host_id(obj.host_id()); + h->set_acknowledgement_timeout(obj.acknowledgement_timeout() * + pb_config.interval_length()); + h->set_last_acknowledgement(0); + + // Contacts + for (auto& c : obj.contacts().data()) + h->mut_contacts().insert({c, nullptr}); + + // Contact groups. + for (auto& cg : obj.contactgroups().data()) + h->get_contactgroups().insert({cg, nullptr}); + + // Custom variables. + for (auto& cv : obj.customvariables()) { + h->custom_variables[cv.name()] = + engine::customvariable(cv.value(), cv.is_sent()); + + if (cv.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_HOSTCUSTOMVARIABLE_ADD, h.get(), cv.name(), + cv.value(), &tv); + } + } + + // add tags + for (auto& t : obj.tags()) { + auto p = std::make_pair(t.first(), t.second()); + tag_map::iterator it_tag{engine::tag::tags.find(p)}; + if (it_tag == engine::tag::tags.end()) + throw engine_error() << "Could not find tag '" << t.first() + << "' on which to apply host (" << obj.host_id() + << ")"; + else + h->mut_tags().emplace_front(it_tag->second); + } + // Parents. + for (auto& p : obj.parents().data()) + h->add_parent_host(p); + + // Add severity. + if (obj.severity_id()) { + configuration::severity_helper::key_type k = {obj.severity_id(), + SeverityType::host}; + auto sv = engine::severity::severities.find(k); + if (sv == engine::severity::severities.end()) + throw engine_error() << "Could not add the severity (" << k.first << ", " + << k.second << ") to the host '" << obj.host_name() + << "'"; + h->set_severity(sv->second); + } + + // Notify event broker. + broker_adaptive_host_data(NEBTYPE_HOST_ADD, NEBFLAG_NONE, NEBATTR_NONE, + h.get(), MODATTR_ALL); +} +#endif + +#ifdef LEGACY_CONF /** * Modified host. * @@ -335,8 +442,8 @@ void applier::host::modify_object(configuration::host const& obj) { if (c.second.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); broker_custom_variable(NEBTYPE_HOSTCUSTOMVARIABLE_DELETE, - it_obj->second.get(), c.first.c_str(), - c.second.value().c_str(), &tv); + it_obj->second.get(), c.first, c.second.value(), + &tv); } } it_obj->second->custom_variables.clear(); @@ -348,8 +455,8 @@ void applier::host::modify_object(configuration::host const& obj) { if (c.second.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); broker_custom_variable(NEBTYPE_HOSTCUSTOMVARIABLE_ADD, - it_obj->second.get(), c.first.c_str(), - c.second.value().c_str(), &tv); + it_obj->second.get(), c.first, c.second.value(), + &tv); } } } @@ -407,7 +514,250 @@ void applier::host::modify_object(configuration::host const& obj) { broker_adaptive_host_data(NEBTYPE_HOST_UPDATE, NEBFLAG_NONE, NEBATTR_NONE, it_obj->second.get(), MODATTR_ALL); } +#else +/** + * Modified host. + * + * @param[in] obj The new host to modify into the monitoring engine. + */ +void applier::host::modify_object(configuration::Host* old_obj, + const configuration::Host& new_obj) { + // Logging. + config_logger->debug("Modifying host '{}' (id {}).", new_obj.host_name(), + new_obj.host_id()); + + // Find host object. + host_id_map::iterator it_obj = + engine::host::hosts_by_id.find(new_obj.host_id()); + if (it_obj == engine::host::hosts_by_id.end()) + throw engine_error() << fmt::format( + "Could not modify non-existing host object '{}' (id {})", + new_obj.host_name(), new_obj.host_id()); + std::shared_ptr h = it_obj->second; + + // Modify properties. + if (h->name() != new_obj.host_name()) { + engine::host::hosts.erase(h->name()); + engine::host::hosts.insert({new_obj.host_name(), h}); + } + + h->set_name(new_obj.host_name()); + h->set_display_name(new_obj.display_name()); + if (!new_obj.alias().empty()) + h->set_alias(new_obj.alias()); + else + h->set_alias(new_obj.host_name()); + h->set_address(new_obj.address()); + if (new_obj.check_period().empty()) + h->set_check_period(new_obj.check_period()); + h->set_initial_state( + static_cast(new_obj.initial_state())); + h->set_check_interval(static_cast(new_obj.check_interval())); + h->set_retry_interval(static_cast(new_obj.retry_interval())); + h->set_max_attempts(static_cast(new_obj.max_check_attempts())); + h->set_notify_on( + (new_obj.notification_options() & action_hst_up ? notifier::up + : notifier::none) | + (new_obj.notification_options() & action_hst_down ? notifier::down + : notifier::none) | + (new_obj.notification_options() & action_hst_unreachable + ? notifier::unreachable + : notifier::none) | + (new_obj.notification_options() & action_hst_flapping + ? (notifier::flappingstart | notifier::flappingstop | + notifier::flappingdisabled) + : notifier::none) | + (new_obj.notification_options() & action_hst_downtime ? notifier::downtime + : notifier::none)); + h->set_notification_interval( + static_cast(new_obj.notification_interval())); + h->set_first_notification_delay( + static_cast(new_obj.first_notification_delay())); + h->set_notification_period(new_obj.notification_period()); + h->set_notifications_enabled( + static_cast(new_obj.notifications_enabled())); + h->set_check_command(new_obj.check_command()); + h->set_checks_enabled(static_cast(new_obj.checks_active())); + h->set_accept_passive_checks(static_cast(new_obj.checks_passive())); + h->set_event_handler(new_obj.event_handler()); + h->set_event_handler_enabled( + static_cast(new_obj.event_handler_enabled())); + h->set_flap_detection_enabled(new_obj.flap_detection_enabled()); + h->set_low_flap_threshold(new_obj.low_flap_threshold()); + h->set_high_flap_threshold(new_obj.high_flap_threshold()); + h->set_flap_detection_on(notifier::none); + h->add_flap_detection_on(new_obj.flap_detection_options() & action_hst_up + ? notifier::up + : notifier::none); + h->add_flap_detection_on(new_obj.flap_detection_options() & action_hst_down + ? notifier::down + : notifier::none); + h->add_flap_detection_on(new_obj.flap_detection_options() & + action_hst_unreachable + ? notifier::unreachable + : notifier::none); + h->add_stalk_on(new_obj.stalking_options() & action_hst_up ? notifier::up + : notifier::none); + h->add_stalk_on(new_obj.stalking_options() & action_hst_down + ? notifier::down + : notifier::none); + h->add_stalk_on(new_obj.stalking_options() & action_hst_unreachable + ? notifier::unreachable + : notifier::none); + h->set_process_performance_data( + static_cast(new_obj.process_perf_data())); + h->set_check_freshness(static_cast(new_obj.check_freshness())); + h->set_freshness_threshold(static_cast(new_obj.freshness_threshold())); + h->set_notes(new_obj.notes()); + h->set_notes_url(new_obj.notes_url()); + h->set_action_url(new_obj.action_url()); + h->set_icon_image(new_obj.icon_image()); + h->set_icon_image_alt(new_obj.icon_image_alt()); + h->set_vrml_image(new_obj.vrml_image()); + h->set_statusmap_image(new_obj.statusmap_image()); + h->set_x_2d(new_obj.coords_2d().x()); + h->set_y_2d(new_obj.coords_2d().y()); + h->set_have_2d_coords(static_cast(new_obj.has_coords_2d())); + h->set_x_3d(new_obj.coords_3d().x()); + h->set_y_3d(new_obj.coords_3d().y()); + h->set_z_3d(new_obj.coords_3d().z()); + h->set_have_3d_coords(static_cast(new_obj.has_coords_3d())); + h->set_retain_status_information( + static_cast(new_obj.retain_status_information())); + h->set_retain_nonstatus_information( + static_cast(new_obj.retain_nonstatus_information())); + h->set_obsess_over(new_obj.obsess_over_host()); + h->set_timezone(new_obj.timezone()); + h->set_host_id(new_obj.host_id()); + h->set_acknowledgement_timeout(new_obj.acknowledgement_timeout() * + pb_config.interval_length()); + h->set_recovery_notification_delay(new_obj.recovery_notification_delay()); + + // Contacts. + if (!MessageDifferencer::Equals(new_obj.contacts(), old_obj->contacts())) { + // Delete old contacts. + h->mut_contacts().clear(); + + // Add contacts to host. + for (auto& c : new_obj.contacts().data()) + h->mut_contacts().insert({c, nullptr}); + } + + // Contact groups. + if (!MessageDifferencer::Equals(new_obj.contactgroups(), + old_obj->contactgroups())) { + // Delete old contact groups. + h->get_contactgroups().clear(); + + // Add contact groups to host. + for (auto& cg : new_obj.contactgroups().data()) + h->get_contactgroups().insert({cg, nullptr}); + } + + // Custom variables. + if (!std::equal( + new_obj.customvariables().begin(), new_obj.customvariables().end(), + old_obj->customvariables().begin(), old_obj->customvariables().end(), + MessageDifferencer::Equals)) { + for (auto& cv : h->custom_variables) { + if (cv.second.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_HOSTCUSTOMVARIABLE_DELETE, h.get(), + cv.first, cv.second.value(), &tv); + } + } + h->custom_variables.clear(); + + for (auto& c : new_obj.customvariables()) { + h->custom_variables[c.name()] = c.value(); + + if (c.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_HOSTCUSTOMVARIABLE_ADD, h.get(), + c.name(), c.value(), &tv); + } + } + } + + // add tags + bool tags_changed = false; + if (old_obj->tags().size() == new_obj.tags().size()) { + for (auto new_it = new_obj.tags().begin(), old_it = old_obj->tags().begin(); + old_it != old_obj->tags().end() && new_it != new_obj.tags().end(); + ++old_it, ++new_it) { + if (new_it->first() != old_it->first() || + new_it->second() != old_it->second()) { + tags_changed = true; + break; + } + } + } else + tags_changed = true; + + if (tags_changed) { + h->mut_tags().clear(); + old_obj->mutable_tags()->CopyFrom(new_obj.tags()); + for (auto& t : new_obj.tags()) { + tag_map::iterator it_tag = + engine::tag::tags.find({t.first(), t.second()}); + if (it_tag == engine::tag::tags.end()) + throw engine_error() + << fmt::format("Could not find tag '{}' on which to apply host {}", + t.first(), new_obj.host_id()); + else + h->mut_tags().emplace_front(it_tag->second); + } + } + + // Parents. + bool parents_changed = false; + if (old_obj->parents().data().size() == new_obj.parents().data().size()) { + for (auto new_it = new_obj.parents().data().begin(), + old_it = old_obj->parents().data().begin(); + old_it != old_obj->parents().data().end() && + new_it != new_obj.parents().data().end(); + ++old_it, ++new_it) { + if (*new_it != *old_it) { + parents_changed = true; + break; + } + } + } else + parents_changed = true; + + if (parents_changed) { + // Delete old parents. + for (auto it = h->parent_hosts.begin(), end = h->parent_hosts.end(); + it != end; it++) + broker_relation_data(NEBTYPE_PARENT_DELETE, it->second, nullptr, h.get(), + nullptr); + h->parent_hosts.clear(); + + // Create parents. + for (auto& parent_name : new_obj.parents().data()) + h->add_parent_host(parent_name); + } + + // Severity. + if (new_obj.severity_id()) { + configuration::severity_helper::key_type k = {new_obj.severity_id(), + SeverityType::host}; + auto sv = engine::severity::severities.find(k); + if (sv == engine::severity::severities.end()) + throw engine_error() << "Could not update the severity (" << k.first + << ", " << k.second << ") to the host '" + << new_obj.host_name() << "'"; + h->set_severity(sv->second); + } else + h->set_severity(nullptr); + + // Notify event broker. + broker_adaptive_host_data(NEBTYPE_HOST_UPDATE, NEBFLAG_NONE, NEBATTR_NONE, + it_obj->second.get(), MODATTR_ALL); +} +#endif +#ifdef LEGACY_CONF /** * Remove old host. * @@ -454,7 +804,55 @@ void applier::host::remove_object(configuration::host const& obj) { // Remove host from the global configuration set. config->hosts().erase(obj); } +#else +/** + * Remove old host. + * + * @param[in] obj The new host to remove from the monitoring engine. + */ +void applier::host::remove_object(ssize_t idx) { + const Host& obj = pb_config.hosts()[idx]; + // Logging. + config_logger->debug("Removing host '{}'.", obj.host_name()); + + // Find host. + host_id_map::iterator it(engine::host::hosts_by_id.find(obj.host_id())); + if (it != engine::host::hosts_by_id.end()) { + // Remove host comments. + comment::delete_host_comments(obj.host_id()); + + // Remove host downtimes. + downtimes::downtime_manager::instance() + .delete_downtime_by_hostname_service_description_start_time_comment( + obj.host_name(), "", {false, (time_t)0}, ""); + + // Remove events related to this host. + applier::scheduler::instance().remove_host(obj.host_id()); + + // remove host from hostgroup->members + for (auto& it_h : it->second->get_parent_groups()) + it_h->members.erase(it->second->name()); + + // Notify event broker. + for (auto it_s = it->second->services.begin(); + it_s != it->second->services.end(); ++it_s) + broker_adaptive_service_data(NEBTYPE_SERVICE_DELETE, NEBFLAG_NONE, + NEBATTR_NONE, it_s->second, MODATTR_ALL); + broker_adaptive_host_data(NEBTYPE_HOST_DELETE, NEBFLAG_NONE, NEBATTR_NONE, + it->second.get(), MODATTR_ALL); + + // Erase host object (will effectively delete the object). + engine::host::hosts.erase(it->second->name()); + engine::host::hosts_by_id.erase(it); + } + + // Remove host from the global configuration set. + pb_config.mutable_hosts()->DeleteSubrange(idx, 1); +} +#endif + +#ifdef LEGACY_CONF /** * Resolve a host. * @@ -497,7 +895,50 @@ void applier::host::resolve_object(const configuration::host& obj, // Resolve host. it->second->resolve(err.config_warnings, err.config_errors); } +#else +/** + * @brief Resolve a host. + * + * @param obj Host protobuf configuration object. + */ +void applier::host::resolve_object(const configuration::Host& obj, + error_cnt& err) { + // Logging. + config_logger->debug("Resolving host '{}'.", obj.host_name()); + // If it is the very first host to be resolved, + // remove all the child backlinks of all the hosts. + // It is necessary to do it only once to prevent the removal + // of valid child backlinks. + if (&obj == &(*pb_config.hosts().begin())) { + for (host_map::iterator it(engine::host::hosts.begin()), + end(engine::host::hosts.end()); + it != end; ++it) + it->second->child_hosts.clear(); + } + + // Find host. + host_id_map::iterator it = engine::host::hosts_by_id.find(obj.host_id()); + if (engine::host::hosts_by_id.end() == it) + throw engine_error() << fmt::format("Cannot resolve non-existing host '{}'", + obj.host_name()); + + // Remove service backlinks. + it->second->services.clear(); + + // Remove host group links. + it->second->get_parent_groups().clear(); + + // Reset host counters. + it->second->set_total_services(0); + it->second->set_total_service_check_interval(0); + + // Resolve host. + it->second->resolve(err.config_warnings, err.config_errors); +} +#endif + +#ifdef LEGACY_CONF /** * @brief Expand a host. * @@ -546,3 +987,42 @@ void applier::host::expand_objects(configuration::state& s) { } s.hosts() = std::move(new_hosts); } +#else +/** + * @brief Expand a host. + * + * During expansion, the host will be added to its host groups. These + * will be modified in the state. + * + * @param[int,out] s Configuration state. + */ +void applier::host::expand_objects(configuration::State& s) { + // Let's consider all the macros defined in s. + absl::flat_hash_set cvs; + for (auto& cv : s.macros_filter().data()) + cvs.emplace(cv); + + absl::flat_hash_map hgs; + for (auto& hg : *s.mutable_hostgroups()) + hgs.emplace(hg.hostgroup_name(), &hg); + + // Browse all hosts. + for (auto& host_cfg : *s.mutable_hosts()) { + // Should custom variables be sent to broker ? + for (auto& cv : *host_cfg.mutable_customvariables()) { + if (!s.enable_macros_filter() || cvs.contains(cv.name())) + cv.set_is_sent(true); + } + + for (auto& grp : host_cfg.hostgroups().data()) { + auto it = hgs.find(grp); + if (it != hgs.end()) { + fill_string_group(it->second->mutable_members(), host_cfg.host_name()); + } else + throw engine_error() << fmt::format( + "Could not add host '{}' to non-existing host group '{}'", + host_cfg.host_name(), grp); + } + } +} +#endif diff --git a/engine/src/configuration/applier/hostdependency.cc b/engine/src/configuration/applier/hostdependency.cc index 64cd39262d5..ba0d4b75f1f 100644 --- a/engine/src/configuration/applier/hostdependency.cc +++ b/engine/src/configuration/applier/hostdependency.cc @@ -27,6 +27,7 @@ using namespace com::centreon::engine::configuration; +#ifdef LEGACY_CONF /** * Add new hostdependency. * @@ -103,7 +104,74 @@ void applier::hostdependency::add_object( broker_adaptive_dependency_data(NEBTYPE_HOSTDEPENDENCY_ADD, hd.get()); } +#else +/** + * Add new hostdependency. + * + * @param[in] obj The new host dependency to add into the monitoring + * engine. + */ +void applier::hostdependency::add_object( + const configuration::Hostdependency& obj) { + // Check host dependency. + if (obj.hosts().data().size() != 1 || !obj.hostgroups().data().empty() || + obj.dependent_hosts().data().size() != 1 || + !obj.dependent_hostgroups().data().empty()) + throw engine_error() << "Could not create host dependency " + "with multiple (dependent) host / host groups"; + if (obj.dependency_type() != DependencyKind::execution_dependency && + obj.dependency_type() != DependencyKind::notification_dependency) + throw engine_error() << fmt::format( + "Could not create unexpanded host dependency of '{}' on '{}'", + obj.dependent_hosts().data(0), obj.hosts().data(0)); + + // Logging. + config_logger->debug( + "Creating new host dependency of host '{}' on host '{}'.", + obj.dependent_hosts().data(0), obj.hosts().data(0)); + + // Add dependency to the global configuration set. + auto* new_obj = pb_config.add_hostdependencies(); + new_obj->CopyFrom(obj); + + std::shared_ptr hd; + + if (obj.dependency_type() == DependencyKind::execution_dependency) + // Create executon dependency. + hd = std::make_shared( + configuration::hostdependency_key(obj), obj.dependent_hosts().data(0), + obj.hosts().data(0), + static_cast(obj.dependency_type()), + obj.inherits_parent(), + static_cast(obj.execution_failure_options() & action_hd_up), + static_cast(obj.execution_failure_options() & action_hd_down), + static_cast(obj.execution_failure_options() & + action_hd_unreachable), + static_cast(obj.execution_failure_options() & action_hd_pending), + obj.dependency_period()); + else + // Create notification dependency. + hd = std::make_shared( + hostdependency_key(obj), obj.dependent_hosts().data(0), + obj.hosts().data(0), + static_cast(obj.dependency_type()), + obj.inherits_parent(), + static_cast(obj.notification_failure_options() & action_hd_up), + static_cast(obj.notification_failure_options() & action_hd_down), + static_cast(obj.notification_failure_options() & + action_hd_unreachable), + static_cast(obj.notification_failure_options() & + action_hd_pending), + obj.dependency_period()); + + engine::hostdependency::hostdependencies.insert( + {obj.dependent_hosts().data(0), hd}); + + broker_adaptive_dependency_data(NEBTYPE_HOSTDEPENDENCY_ADD, hd.get()); +} +#endif +#ifdef LEGACY_CONF /** * Expand host dependencies. * @@ -169,7 +237,83 @@ void applier::hostdependency::expand_objects(configuration::state& s) { // Set expanded host dependencies in configuration state. s.hostdependencies().swap(expanded); } +#else +/** + * Expand host dependencies. + * + * @param[in,out] s Configuration being applied. + */ +void applier::hostdependency::expand_objects(configuration::State& s) { + std::list > lst; + + config_logger->debug("Expanding host dependencies"); + for (int i = s.hostdependencies_size() - 1; i >= 0; --i) { + auto* hd_conf = s.mutable_hostdependencies(i); + if (hd_conf->hosts().data().size() > 1 || + !hd_conf->hostgroups().data().empty() || + hd_conf->dependent_hosts().data().size() > 1 || + !hd_conf->dependent_hostgroups().data().empty() || + hd_conf->dependency_type() == unknown) { + for (auto& hg_name : hd_conf->dependent_hostgroups().data()) { + auto found = + std::find_if(s.hostgroups().begin(), s.hostgroups().end(), + [&hg_name](const configuration::Hostgroup& hg) { + return hg.hostgroup_name() == hg_name; + }); + if (found != s.hostgroups().end()) { + auto& hg_conf = *found; + for (auto& h : hg_conf.members().data()) + fill_string_group(hd_conf->mutable_dependent_hosts(), h); + } + } + for (auto& hg_name : hd_conf->hostgroups().data()) { + auto found = + std::find_if(s.hostgroups().begin(), s.hostgroups().end(), + [&hg_name](const configuration::Hostgroup& hg) { + return hg.hostgroup_name() == hg_name; + }); + if (found != s.hostgroups().end()) { + auto& hg_conf = *found; + for (auto& h : hg_conf.members().data()) + fill_string_group(hd_conf->mutable_hosts(), h); + } + } + for (auto& h : hd_conf->hosts().data()) { + for (auto& h_dep : hd_conf->dependent_hosts().data()) { + for (int ii = 1; ii <= 2; ii++) { + if (hd_conf->dependency_type() == DependencyKind::unknown || + static_cast(hd_conf->dependency_type()) == ii) { + lst.emplace_back(std::make_unique()); + auto& new_hd = lst.back(); + new_hd->set_dependency_period(hd_conf->dependency_period()); + new_hd->set_inherits_parent(hd_conf->inherits_parent()); + fill_string_group(new_hd->mutable_hosts(), h); + fill_string_group(new_hd->mutable_dependent_hosts(), h_dep); + if (ii == 2) { + new_hd->set_dependency_type( + DependencyKind::execution_dependency); + new_hd->set_execution_failure_options( + hd_conf->execution_failure_options()); + } else { + new_hd->set_dependency_type( + DependencyKind::notification_dependency); + new_hd->set_notification_failure_options( + hd_conf->notification_failure_options()); + } + } + } + } + } + s.mutable_hostdependencies()->DeleteSubrange(i, 1); + } + } + for (auto& hd : lst) + s.mutable_hostdependencies()->AddAllocated(hd.release()); +} +#endif + +#ifdef LEGACY_CONF /** * @brief Modify host dependency. * @@ -187,7 +331,27 @@ void applier::hostdependency::modify_object( << "this is likely a software bug that you should report to " << "Centreon Engine developers"; } +#else +/** + * @brief Modify host dependency. + * + * Host dependencies cannot be defined with anything else than their + * full content. Therefore no modification can occur. + * + * @param[in] obj Unused. + */ +void applier::hostdependency::modify_object( + configuration::Hostdependency* old_obj [[maybe_unused]], + const configuration::Hostdependency& new_obj [[maybe_unused]]) { + throw engine_error() + << "Could not modify a host dependency: Host dependency objects can " + "only " + "be added or removed, this is likely a software bug that you should " + "report to Centreon Engine developers"; +} +#endif +#ifdef LEGACY_CONF /** * Remove old host dependency. * @@ -217,7 +381,40 @@ void applier::hostdependency::remove_object( // Remove dependency from the global configuration set. config->hostdependencies().erase(obj); } +#else +/** + * Remove old host dependency. + * + * @param[in] idx The index of the host dependency configuration to remove + * from engine. + */ +void applier::hostdependency::remove_object(ssize_t idx) { + // Logging. + config_logger->debug("Removing a host dependency."); + + // Find host dependency. + auto& obj = pb_config.hostdependencies(0); + size_t key = hostdependency_key(obj); + + hostdependency_mmap::iterator it = + engine::hostdependency::hostdependencies_find( + {obj.dependent_hosts().data(0), key}); + if (it != engine::hostdependency::hostdependencies.end()) { + com::centreon::engine::hostdependency* dependency(it->second.get()); + + // Notify event broker. + broker_adaptive_dependency_data(NEBTYPE_HOSTDEPENDENCY_DELETE, dependency); + + // Remove host dependency from its list. + engine::hostdependency::hostdependencies.erase(it); + } + + // Remove dependency from the global configuration set. + pb_config.mutable_hostdependencies()->DeleteSubrange(idx, 1); +} +#endif +#ifdef LEGACY_CONF /** * Resolve a hostdependency. * @@ -241,7 +438,33 @@ void applier::hostdependency::resolve_object( // Resolve host dependency. it->second->resolve(err.config_warnings, err.config_errors); } +#else +/** + * Resolve a hostdependency. + * + * @param[in] obj Hostdependency object. + */ +void applier::hostdependency::resolve_object( + const configuration::Hostdependency& obj, + error_cnt& err) { + // Logging. + config_logger->debug("Resolving a host dependency."); + + // Find host escalation + auto k = hostdependency_key(obj); + + auto it = engine::hostdependency::hostdependencies_find( + {obj.dependent_hosts().data(0), k}); + + if (engine::hostdependency::hostdependencies.end() == it) + throw engine_error() << "Cannot resolve non-existing host escalation"; + + // Resolve host dependency. + it->second->resolve(err.config_warnings, err.config_errors); +} +#endif +#ifdef LEGACY_CONF /** * Expand hosts. * @@ -271,3 +494,4 @@ void applier::hostdependency::_expand_hosts( expanded.insert(it_group->members().begin(), it_group->members().end()); } } +#endif diff --git a/engine/src/configuration/applier/hostescalation.cc b/engine/src/configuration/applier/hostescalation.cc index 39a09b2e2ba..616d52d1113 100644 --- a/engine/src/configuration/applier/hostescalation.cc +++ b/engine/src/configuration/applier/hostescalation.cc @@ -27,6 +27,7 @@ using namespace com::centreon::engine::configuration; +#ifdef LEGACY_CONF /** * Add new host escalation. * @@ -81,7 +82,60 @@ void applier::hostescalation::add_object( it != end; ++it) he->get_contactgroups().insert({*it, nullptr}); } +#else +/** + * Add new host escalation. + * + * @param[in] obj The new host escalation to add into the monitoring + * engine. + */ +void applier::hostescalation::add_object( + const configuration::Hostescalation& obj) { + // Check host escalation. + if (obj.hosts().data().size() != 1 || !obj.hostgroups().data().empty()) + throw engine_error() + << "Could not create host escalation with multiple hosts / host groups"; + + // Logging. + config_logger->debug("Creating new escalation for host '{}'.", obj.hosts().data(0)); + + // Add escalation to the global configuration set. + auto* new_obj = pb_config.add_hostescalations(); + new_obj->CopyFrom(obj); + + size_t key = hostescalation_key(obj); + + // Create host escalation. + auto he = std::make_shared( + obj.hosts().data(0), obj.first_notification(), obj.last_notification(), + obj.notification_interval(), obj.escalation_period(), + ((obj.escalation_options() & action_he_down) + ? notifier::down + : notifier::none) | + ((obj.escalation_options() & + action_he_unreachable) + ? notifier::unreachable + : notifier::none) | + ((obj.escalation_options() & action_he_recovery) + ? notifier::up + : notifier::none), + key); + + // Add new items to the configuration state. + engine::hostescalation::hostescalations.insert({he->get_hostname(), he}); + + // Notify event broker. + timeval tv(get_broker_timestamp(nullptr)); + broker_adaptive_escalation_data(NEBTYPE_HOSTESCALATION_ADD, NEBFLAG_NONE, + NEBATTR_NONE, he.get(), &tv); + + // Add contact groups to host escalation. + for (auto& g : obj.contactgroups().data()) + he->get_contactgroups().insert({g, nullptr}); +} +#endif +#ifdef LEGACY_CONF /** * Expand a host escalation. * @@ -116,7 +170,49 @@ void applier::hostescalation::expand_objects(configuration::state& s) { // Set expanded host escalations in configuration state. s.hostescalations().swap(expanded); } +#else +/** + * Expand a host escalation. + * + * @param[in,out] s Configuration being applied. + */ +void applier::hostescalation::expand_objects(configuration::State& s) { + std::list > resolved; + for (auto& he : *s.mutable_hostescalations()) { + if (he.hostgroups().data().size() > 0) { + absl::flat_hash_set host_names; + for (auto& hname : he.hosts().data()) + host_names.emplace(hname); + for (auto& hg_name : he.hostgroups().data()) { + auto found_hg = + std::find_if(s.hostgroups().begin(), s.hostgroups().end(), + [&hg_name](const Hostgroup& hg) { + return hg.hostgroup_name() == hg_name; + }); + if (found_hg != s.hostgroups().end()) { + for (auto& h : found_hg->members().data()) + host_names.emplace(h); + } else + throw engine_error() << fmt::format( + "Could not expand non-existing host group '{}'", hg_name); + } + he.mutable_hostgroups()->clear_data(); + he.mutable_hosts()->clear_data(); + for (auto& n : host_names) { + resolved.emplace_back(std::make_unique()); + auto& e = resolved.back(); + e->CopyFrom(he); + fill_string_group(e->mutable_hosts(), n); + } + } + } + s.clear_hostescalations(); + for (auto& e : resolved) + s.mutable_hostescalations()->AddAllocated(e.release()); +} +#endif +#ifdef LEGACY_CONF /** * @brief Modify host escalation. * @@ -134,7 +230,26 @@ void applier::hostescalation::modify_object( << "this is likely a software bug that you should report to " << "Centreon Engine developers"; } +#else +/** + * @brief Modify host escalation. + * + * Host escalations cannot be defined with anything else than their + * full content. Therefore no modification can occur. + * + * @param[in] obj Unused. + */ +void applier::hostescalation::modify_object( + configuration::Hostescalation* old_obj [[maybe_unused]], + const configuration::Hostescalation& new_obj [[maybe_unused]]) { + throw engine_error() + << "Could not modify a host escalation: host escalation objects can only " + "be added or removed, this is likely a software bug that you should " + "report to Centreon Engine developers"; +} +#endif +#ifdef LEGACY_CONF /** * Remove old hostescalation. * @@ -217,7 +332,85 @@ void applier::hostescalation::remove_object( /* And we clear the configuration */ config->hostescalations().erase(obj); } +#else +/** + * Remove old hostescalation. + * + * @param[in] obj The new hostescalation to remove from the monitoring + * engine. + */ +void applier::hostescalation::remove_object(ssize_t idx) { + configuration::Hostescalation obj = pb_config.hostescalations(idx); + // Logging. + config_logger->debug("Removing a host escalation."); + // Find host escalation. + const std::string& host_name{obj.hosts().data(0)}; + std::pair range{ + engine::hostescalation::hostescalations.equal_range(host_name)}; + bool host_exists; + + /* Let's get the host... */ + host_map::iterator hit{engine::host::hosts.find(host_name)}; + /* ... and its escalations */ + if (hit == engine::host::hosts.end()) { + config_logger->debug("Cannot find host '{}' - already removed.", host_name); + host_exists = false; + } else + host_exists = true; + + for (hostescalation_mmap::iterator it{range.first}, end{range.second}; + it != end; ++it) { + /* It's a pity but for now we don't have any possibility or key to verify + * if the hostescalation is the good one. */ + if (it->second->get_first_notification() == obj.first_notification() && + it->second->get_last_notification() == obj.last_notification() && + it->second->get_notification_interval() == + obj.notification_interval() && + it->second->get_escalation_period() == obj.escalation_period() && + it->second->get_escalate_on(notifier::down) == + static_cast(obj.escalation_options() & + action_he_down) && + it->second->get_escalate_on(notifier::unreachable) == + static_cast(obj.escalation_options() & + action_he_unreachable) && + it->second->get_escalate_on(notifier::up) == + static_cast(obj.escalation_options() & + action_he_recovery)) { + // We have the hostescalation to remove. + + // Notify event broker. + timeval tv(get_broker_timestamp(nullptr)); + broker_adaptive_escalation_data(NEBTYPE_HOSTESCALATION_DELETE, + NEBFLAG_NONE, NEBATTR_NONE, + it->second.get(), &tv); + + if (host_exists) { + config_logger->debug("Host '{}' found - removing escalation from it.", + host_name); + std::list& escalations(hit->second->get_escalations()); + /* We need also to remove the escalation from the host */ + for (std::list::iterator heit{escalations.begin()}, + heend{escalations.end()}; + heit != heend; ++heit) { + if (*heit == it->second.get()) { + escalations.erase(heit); + break; + } + } + } + // Remove host escalation from the global configuration set. + engine::hostescalation::hostescalations.erase(it); + break; + } + } + + /* And we clear the configuration */ + pb_config.mutable_hostescalations()->DeleteSubrange(idx, 1); +} +#endif + +#ifdef LEGACY_CONF /** * Resolve a hostescalation. * @@ -254,7 +447,43 @@ void applier::hostescalation::resolve_object( if (!found) throw engine_error() << "Cannot resolve non-existing host escalation"; } +#else +/** + * Resolve a hostescalation. + * + * @param[in] obj Hostescalation object. + */ +void applier::hostescalation::resolve_object( + const configuration::Hostescalation& obj, error_cnt& err) { + // Logging. + config_logger->debug("Resolving a host escalation."); + + // Find host escalation + bool found = false; + const std::string& hostname{obj.hosts().data(0)}; + auto p = engine::hostescalation::hostescalations.equal_range(hostname); + + if (p.first == p.second) + throw engine_error() << "Cannot find host escalations concerning host '" + << hostname << "'"; + + size_t key = hostescalation_key(obj); + for (hostescalation_mmap::iterator it{p.first}; it != p.second; ++it) { + /* It's a pity but for now we don't have any idea or key to verify if + * the hostescalation is the good one. */ + if (it->second->internal_key() == key) { + found = true; + // Resolve host escalation. + it->second->resolve(err.config_warnings, err.config_errors); + break; + } + } + if (!found) + throw engine_error() << "Cannot resolve non-existing host escalation"; +} +#endif +#ifdef LEGACY_CONF /** * Expand hosts. * @@ -284,7 +513,9 @@ void applier::hostescalation::_expand_hosts( expanded.insert(it_group->members().begin(), it_group->members().end()); } } +#endif +#ifdef LEGACY_CONF /** * Inherits special variables from the host. * @@ -313,3 +544,4 @@ void applier::hostescalation::_inherits_special_vars( obj.escalation_period(it->notification_period()); } } +#endif diff --git a/engine/src/configuration/applier/hostgroup.cc b/engine/src/configuration/applier/hostgroup.cc index e304c6b350e..c4c6b8bba37 100644 --- a/engine/src/configuration/applier/hostgroup.cc +++ b/engine/src/configuration/applier/hostgroup.cc @@ -29,25 +29,7 @@ using namespace com::centreon::engine::configuration; -/** - * Default constructor. - */ -applier::hostgroup::hostgroup() {} - -/** - * Copy constructor. - * - * @param[in] right Object to copy. - */ -applier::hostgroup::hostgroup(applier::hostgroup const& right) { - (void)right; -} - -/** - * Destructor. - */ -applier::hostgroup::~hostgroup() throw() {} - +#ifdef LEGACY_CONF /** * Add new hostgroup. * @@ -79,7 +61,38 @@ void applier::hostgroup::add_object(configuration::hostgroup const& obj) { // Notify event broker. broker_group(NEBTYPE_HOSTGROUP_ADD, hg.get()); } +#else +/** + * Add new hostgroup. + * + * @param[in] obj The new hostgroup to add into the monitoring engine. + */ +void applier::hostgroup::add_object(const configuration::Hostgroup& obj) { + // Logging. + config_logger->debug("Creating new hostgroup '{}'.", obj.hostgroup_name()); + // Add host group to the global configuration state. + auto* new_obj = pb_config.add_hostgroups(); + new_obj->CopyFrom(obj); + + // Create host group. + auto hg = std::make_shared( + obj.hostgroup_id(), obj.hostgroup_name(), obj.alias(), obj.notes(), + obj.notes_url(), obj.action_url()); + + // Add new items to the configuration state. + engine::hostgroup::hostgroups.insert({hg->get_group_name(), hg}); + + // Notify event broker. + broker_group(NEBTYPE_HOSTGROUP_ADD, hg.get()); + + // Apply resolved hosts on hostgroup. + for (auto& h : obj.members().data()) + hg->members.insert({h, nullptr}); +} +#endif + +#ifdef LEGACY_CONF /** * Expand all host groups. * @@ -99,7 +112,17 @@ void applier::hostgroup::expand_objects(configuration::state& s) { it != end; ++it) s.hostgroups().insert(it->second); } +#else +/** + * Expand all host groups. + * + * @param[in,out] s State being applied. + */ +void applier::hostgroup::expand_objects(configuration::State& s + [[maybe_unused]]) {} +#endif +#ifdef LEGACY_CONF /** * Modified hostgroup. * @@ -156,7 +179,57 @@ void applier::hostgroup::modify_object(configuration::hostgroup const& obj) { // Notify event broker. broker_group(NEBTYPE_HOSTGROUP_UPDATE, it_obj->second.get()); } +#else +/** + * Modified hostgroup. + * + * @param[in] obj The new hostgroup to modify into the monitoring + * engine. + */ +void applier::hostgroup::modify_object( + configuration::Hostgroup* old_obj, + const configuration::Hostgroup& new_obj) { + // Logging. + config_logger->debug("Modifying hostgroup '{}'", old_obj->hostgroup_name()); + // Find host group object. + hostgroup_map::iterator it_obj = + engine::hostgroup::hostgroups.find(old_obj->hostgroup_name()); + if (it_obj == engine::hostgroup::hostgroups.end()) + throw engine_error() << fmt::format( + "Could not modify non-existing host group object '{}'", + old_obj->hostgroup_name()); + + it_obj->second->set_action_url(new_obj.action_url()); + it_obj->second->set_alias(new_obj.alias()); + it_obj->second->set_notes(new_obj.notes()); + it_obj->second->set_notes_url(new_obj.notes_url()); + it_obj->second->set_id(new_obj.hostgroup_id()); + + // Were members modified ? + if (!MessageDifferencer::Equals(new_obj.members(), old_obj->members())) { + // Delete all old host group members. + for (host_map_unsafe::iterator it(it_obj->second->members.begin()), + end(it_obj->second->members.end()); + it != end; ++it) { + broker_group_member(NEBTYPE_HOSTGROUPMEMBER_DELETE, it->second, + it_obj->second.get()); + } + it_obj->second->members.clear(); + + for (auto it = new_obj.members().data().begin(), + end = new_obj.members().data().end(); + it != end; ++it) + it_obj->second->members.insert({*it, nullptr}); + } + + old_obj->CopyFrom(new_obj); + // Notify event broker. + broker_group(NEBTYPE_HOSTGROUP_UPDATE, it_obj->second.get()); +} +#endif + +#ifdef LEGACY_CONF /** * Remove old hostgroup. * @@ -184,7 +257,37 @@ void applier::hostgroup::remove_object(configuration::hostgroup const& obj) { // Remove host group from the global configuration set. config->hostgroups().erase(obj); } +#else +/** + * Remove old hostgroup. + * + * @param[in] obj The new hostgroup to remove from the monitoring + * engine. + */ +void applier::hostgroup::remove_object(ssize_t idx) { + const Hostgroup& obj = pb_config.hostgroups(idx); + // Logging. + config_logger->debug("Removing host group '{}'", obj.hostgroup_name()); + + // Find host group. + hostgroup_map::iterator it = + engine::hostgroup::hostgroups.find(obj.hostgroup_name()); + if (it != engine::hostgroup::hostgroups.end()) { + engine::hostgroup* grp(it->second.get()); + + // Notify event broker. + broker_group(NEBTYPE_HOSTGROUP_DELETE, grp); + + // Erase host group object (will effectively delete the object). + engine::hostgroup::hostgroups.erase(it); + } + + // Remove host group from the global configuration set. + pb_config.mutable_hostgroups()->DeleteSubrange(idx, 1); +} +#endif +#ifdef LEGACY_CONF /** * Resolve a host group. * @@ -206,7 +309,30 @@ void applier::hostgroup::resolve_object(configuration::hostgroup const& obj, // Resolve host group. it->second->resolve(err.config_warnings, err.config_errors); } +#else +/** + * Resolve a host group. + * + * @param[in] obj Object to resolved. + */ +void applier::hostgroup::resolve_object(const configuration::Hostgroup& obj, + error_cnt& err) { + // Logging. + config_logger->debug("Resolving host group '{}'", obj.hostgroup_name()); + + // Find host group. + hostgroup_map::iterator it = + engine::hostgroup::hostgroups.find(obj.hostgroup_name()); + if (it == engine::hostgroup::hostgroups.end()) + throw engine_error() << fmt::format( + "Cannot resolve non-existing host group '{}'", obj.hostgroup_name()); + + // Resolve host group. + it->second->resolve(err.config_warnings, err.config_errors); +} +#endif +#ifdef LEGACY_CONF /** * Resolve members of a host group. * @@ -231,3 +357,4 @@ void applier::hostgroup::_resolve_members(configuration::state& s resolved_obj = obj; } } +#endif diff --git a/engine/src/configuration/applier/logging.cc b/engine/src/configuration/applier/logging.cc index 40a77191f82..6644a06e530 100644 --- a/engine/src/configuration/applier/logging.cc +++ b/engine/src/configuration/applier/logging.cc @@ -28,6 +28,7 @@ using namespace com::centreon; using namespace com::centreon::engine::configuration; +#ifdef LEGACY_CONF /** * Apply new configuration. * @@ -74,6 +75,54 @@ void applier::logging::apply(state& config) { _del_syslog(); } } +#else +/** + * Apply new configuration. + * + * @param[in] config The new configuration. + */ +void applier::logging::apply(State& config) { + if (verify_config || test_scheduling) + return; + + if (config.log_legacy_enabled()) { + // Syslog. + if (config.use_syslog() && !_syslog) + _add_syslog(); + else if (!config.use_syslog() && _syslog) + _del_syslog(); + + // Standard log file. + + if (config.log_file() == "") + _del_log_file(); + else if (!_log || config.log_file() != _log->filename()) { + _add_log_file(config); + _del_stdout(); + _del_stderr(); + } + + // Debug file. + if ((config.debug_file() == "") || !config.debug_level() || + !config.debug_verbosity()) { + _del_debug(); + _debug_level = config.debug_level(); + _debug_verbosity = config.debug_verbosity(); + _debug_max_size = config.max_debug_file_size(); + } else if (!_debug || config.debug_file() != _debug->filename() || + config.debug_level() != _debug_level || + config.debug_verbosity() != _debug_verbosity || + config.max_debug_file_size() != _debug_max_size) + _add_debug(config); + } else { + _del_stdout(); + _del_stderr(); + _del_debug(); + _del_log_file(); + _del_syslog(); + } +} +#endif /** * Get the singleton instance of logging applier. @@ -120,6 +169,7 @@ applier::logging::logging() _add_stderr(); } +#ifdef LEGACY_CONF /** * Construct and apply configuration. * @@ -138,11 +188,31 @@ applier::logging::logging(state& config) _add_stderr(); apply(config); } +#else +/** + * Construct and apply configuration. + * + * @param[in] config The initial confiuration. + */ +applier::logging::logging(State& config) + : _debug(nullptr), + _debug_level(0), + _debug_max_size(0), + _debug_verbosity(0), + _log(nullptr), + _stderr(nullptr), + _stdout(nullptr), + _syslog(nullptr) { + _add_stdout(); + _add_stderr(); + apply(config); +} +#endif /** * Default destructor. */ -applier::logging::~logging() throw() { +applier::logging::~logging() noexcept { _del_stdout(); _del_stderr(); _del_syslog(); @@ -200,6 +270,7 @@ void applier::logging::_add_syslog() { } } +#ifdef LEGACY_CONF /** * Add file object logging. */ @@ -210,7 +281,20 @@ void applier::logging::_add_log_file(state const& config) { com::centreon::logging::engine::instance().add(_log, engine::logging::log_all, engine::logging::most); } +#else +/** + * Add file object logging. + */ +void applier::logging::_add_log_file(const State& config) { + _del_log_file(); + _log = new com::centreon::logging::file(config.log_file(), true, + config.log_pid()); + com::centreon::logging::engine::instance().add(_log, engine::logging::log_all, + engine::logging::most); +} +#endif +#ifdef LEGACY_CONF /** * Add debug object logging. */ @@ -224,6 +308,21 @@ void applier::logging::_add_debug(state const& config) { com::centreon::logging::engine::instance().add(_debug, _debug_level, _debug_verbosity); } +#else +/** + * Add debug object logging. + */ +void applier::logging::_add_debug(const State& config) { + _del_debug(); + _debug_level = (config.debug_level() << 32) | engine::logging::log_all; + _debug_verbosity = config.debug_verbosity(); + _debug_max_size = config.max_debug_file_size(); + _debug = new com::centreon::engine::logging::debug_file(config.debug_file(), + _debug_max_size); + com::centreon::logging::engine::instance().add(_debug, _debug_level, + _debug_verbosity); +} +#endif /** * Remove syslog object logging. diff --git a/engine/src/configuration/applier/macros.cc b/engine/src/configuration/applier/macros.cc index 57f2358f801..fd499c133b4 100644 --- a/engine/src/configuration/applier/macros.cc +++ b/engine/src/configuration/applier/macros.cc @@ -50,6 +50,7 @@ static bool is_old_style_user_macro(std::string const& key, unsigned int& val) { return (true); } +#ifdef LEGACY_CONF /** * Apply new configuration. * @@ -80,6 +81,39 @@ void applier::macros::apply(configuration::state& config) { _set_macros_user(val - 1, it->second); } } +#else +/** + * Apply new configuration. + * + * @param[in] config The new configuration. + */ +void applier::macros::apply(configuration::State& pb_config) { + _set_macro(MACRO_ADMINEMAIL, pb_config.admin_email()); + _set_macro(MACRO_ADMINPAGER, pb_config.admin_pager()); + _set_macro(MACRO_COMMANDFILE, pb_config.command_file()); + _set_macro(MACRO_LOGFILE, pb_config.log_file()); + _set_macro(MACRO_MAINCONFIGFILE, pb_config.cfg_main()); + if (pb_config.resource_file().size() > 0) + _set_macro(MACRO_RESOURCEFILE, pb_config.resource_file(0)); + _set_macro(MACRO_STATUSDATAFILE, pb_config.status_file()); + _set_macro(MACRO_RETENTIONDATAFILE, pb_config.state_retention_file()); + _set_macro(MACRO_POLLERNAME, pb_config.poller_name()); + _set_macro(MACRO_POLLERID, std::to_string(pb_config.poller_id())); + + auto& users = applier::state::instance().user_macros(); + users.clear(); + + for (auto& p : pb_config.users()) + users[p.first] = p.second; + + // Save old style user macros into old style structures. + for (auto& p : users) { + unsigned int val = 1; + if (is_old_style_user_macro(p.first, val)) + _set_macros_user(val - 1, p.second); + } +} +#endif /** * Get the singleton instance of macros applier. diff --git a/engine/src/configuration/applier/scheduler.cc b/engine/src/configuration/applier/scheduler.cc index aea739f7aef..a4ae815bdc7 100644 --- a/engine/src/configuration/applier/scheduler.cc +++ b/engine/src/configuration/applier/scheduler.cc @@ -33,6 +33,7 @@ using namespace com::centreon::engine::configuration; using namespace com::centreon::engine::logging; using namespace com::centreon::logging; +#ifdef LEGACY_CONF /** * Apply new configuration. * @@ -199,6 +200,191 @@ void applier::scheduler::apply( } } } +#else +/** + * Apply new configuration. + * + * @param[in] config The new configuration. + * @param[in] diff_hosts The difference between old and the + * new host configuration. + * @param[in] diff_services The difference between old and the + * new service configuration. + * @param[in] diff_anomalydetections The difference between old and the + * new cwanomalydetectionservice configuration. + */ +void applier::scheduler::apply( + configuration::State& config, + const pb_difference& diff_hosts, + const pb_difference >& + diff_services, + const pb_difference >& + diff_anomalydetections) { + // Internal pointer will be used in private methods. + _pb_config = &config; + + // Remove and create misc event. + _apply_misc_event(); + + // Objects set. + std::vector hst_to_unschedule; + for (auto& d : diff_hosts.deleted()) + hst_to_unschedule.emplace_back(d.second); + + std::vector > svc_to_unschedule; + for (auto& d : diff_services.deleted()) + svc_to_unschedule.emplace_back(d.second); + + std::vector > ad_to_unschedule; + for (auto& d : diff_anomalydetections.deleted()) + ad_to_unschedule.emplace_back(d.second); + + std::vector hst_to_schedule; + for (auto& a : diff_hosts.added()) + hst_to_schedule.emplace_back(a->host_id()); + + std::vector > svc_to_schedule; + for (auto& a : diff_services.added()) + svc_to_schedule.emplace_back(a->host_id(), a->service_id()); + + std::vector > ad_to_schedule; + for (auto& a : diff_anomalydetections.added()) + ad_to_schedule.emplace_back(a->host_id(), a->service_id()); + + for (auto& m : diff_hosts.modified()) { + auto it_hst = engine::host::hosts.find(m.second->host_name()); + if (it_hst != engine::host::hosts.end()) { + bool has_event(events::loop::instance().find_event( + events::loop::low, timed_event::EVENT_HOST_CHECK, + it_hst->second.get()) != + events::loop::instance().list_end(events::loop::low)); + bool should_schedule(m.second->checks_active() && + m.second->check_interval() > 0); + if (has_event && should_schedule) { + hst_to_unschedule.emplace_back(m.second->host_id()); + hst_to_schedule.emplace_back(m.second->host_id()); + } else if (!has_event && should_schedule) + hst_to_schedule.emplace_back(m.second->host_id()); + else if (has_event && !should_schedule) + hst_to_unschedule.emplace_back(m.second->host_id()); + // Else it has no event and should not be scheduled, so do nothing. + } + } + + for (auto& m : diff_services.modified()) { + auto it_svc = engine::service::services_by_id.find( + {m.second->host_id(), m.second->service_id()}); + if (it_svc != engine::service::services_by_id.end()) { + bool has_event(events::loop::instance().find_event( + events::loop::low, timed_event::EVENT_SERVICE_CHECK, + it_svc->second.get()) != + events::loop::instance().list_end(events::loop::low)); + bool should_schedule(m.second->checks_active() && + (m.second->check_interval() > 0)); + if (has_event && should_schedule) { + svc_to_unschedule.emplace_back(m.second->host_id(), + m.second->service_id()); + svc_to_schedule.emplace_back(m.second->host_id(), + m.second->service_id()); + } else if (!has_event && should_schedule) + svc_to_schedule.emplace_back(m.second->host_id(), + m.second->service_id()); + else if (has_event && !should_schedule) + svc_to_unschedule.emplace_back(m.second->host_id(), + m.second->service_id()); + // Else it has no event and should not be scheduled, so do nothing. + } + } + + for (auto& m : diff_anomalydetections.modified()) { + auto it_svc = engine::service::services_by_id.find( + {m.second->host_id(), m.second->service_id()}); + if (it_svc != engine::service::services_by_id.end()) { + bool has_event(events::loop::instance().find_event( + events::loop::low, timed_event::EVENT_SERVICE_CHECK, + it_svc->second.get()) != + events::loop::instance().list_end(events::loop::low)); + bool should_schedule = + m.second->checks_active() && m.second->check_interval() > 0; + if (has_event && should_schedule) { + ad_to_unschedule.emplace_back(m.second->host_id(), + m.second->service_id()); + ad_to_schedule.emplace_back(m.second->host_id(), + m.second->service_id()); + } else if (!has_event && should_schedule) + ad_to_schedule.emplace_back(m.second->host_id(), + m.second->service_id()); + else if (has_event && !should_schedule) + ad_to_unschedule.emplace_back(m.second->host_id(), + m.second->service_id()); + // Else it has no event and should not be scheduled, so do nothing. + } + } + + // Remove deleted host check from the scheduler. + { + std::vector old_hosts = + _get_hosts(hst_to_unschedule, false); + _unschedule_host_events(old_hosts); + } + + // Remove deleted service check from the scheduler. + { + std::vector old_services = + _get_services(svc_to_unschedule, false); + _unschedule_service_events(old_services); + } + + // Remove deleted anomalydetection check from the scheduler. + { + std::vector old_anomalydetections = + _get_anomalydetections(ad_to_unschedule, false); + _unschedule_service_events(old_anomalydetections); + } + // Check if we need to add or modify objects into the scheduler. + if (!hst_to_schedule.empty() || !svc_to_schedule.empty() || + !ad_to_schedule.empty()) { + memset(&scheduling_info, 0, sizeof(scheduling_info)); + + if (config.service_interleave_factor_method().type() == + configuration::InterleaveFactor::ilf_user) + scheduling_info.service_interleave_factor = + config.service_interleave_factor_method().user_value(); + if (config.service_inter_check_delay_method().type() == + configuration::InterCheckDelay::user) + scheduling_info.service_inter_check_delay = + config.service_inter_check_delay_method().user_value(); + if (config.host_inter_check_delay_method().type() == + configuration::InterCheckDelay::user) + scheduling_info.host_inter_check_delay = + config.host_inter_check_delay_method().user_value(); + + // Calculate scheduling parameters. + _calculate_host_scheduling_params(); + _calculate_service_scheduling_params(); + + // Get and schedule new hosts. + { + std::vector new_hosts = + _get_hosts(hst_to_schedule, true); + _schedule_host_events(new_hosts); + } + + // Get and schedule new services and anomalydetections. + { + std::vector new_services = + _get_services(svc_to_schedule, true); + std::vector new_anomalydetections = + _get_anomalydetections(ad_to_schedule, true); + new_services.insert( + new_services.end(), + std::make_move_iterator(new_anomalydetections.begin()), + std::make_move_iterator(new_anomalydetections.end())); + _schedule_service_events(new_services); + } + } +} +#endif /** * Get the singleton instance of scheduler applier. @@ -210,6 +396,7 @@ applier::scheduler& applier::scheduler::instance() { return instance; } +#ifdef LEGACY_CONF void applier::scheduler::clear() { _config = nullptr; _evt_check_reaper = nullptr; @@ -224,16 +411,34 @@ void applier::scheduler::clear() { _old_check_reaper_interval = 0; _old_command_check_interval = 0; _old_host_freshness_check_interval = 0; - _old_host_perfdata_file_processing_interval = 0; _old_retention_update_interval = 0; _old_service_freshness_check_interval = 0; - _old_service_perfdata_file_processing_interval = 0; _old_status_update_interval = 0; - _old_host_perfdata_file_processing_command.clear(); - _old_service_perfdata_file_processing_command.clear(); memset(&scheduling_info, 0, sizeof(scheduling_info)); } +#else +void applier::scheduler::clear() { + _pb_config = nullptr; + _evt_check_reaper = nullptr; + _evt_command_check = nullptr; + _evt_hfreshness_check = nullptr; + _evt_orphan_check = nullptr; + _evt_reschedule_checks = nullptr; + _evt_retention_save = nullptr; + _evt_sfreshness_check = nullptr; + _evt_status_save = nullptr; + _old_auto_rescheduling_interval = 0; + _old_check_reaper_interval = 0; + _old_command_check_interval = 0; + _old_host_freshness_check_interval = 0; + _old_retention_update_interval = 0; + _old_service_freshness_check_interval = 0; + _old_status_update_interval = 0; + + memset(&scheduling_info, 0, sizeof(scheduling_info)); +} +#endif /** * Remove some host from scheduling. @@ -269,7 +474,12 @@ void applier::scheduler::remove_service(uint64_t host_id, uint64_t service_id) { * Default constructor. */ applier::scheduler::scheduler() - : _config(nullptr), + : +#ifdef LEGACY_CONF + _config(nullptr), +#else + _pb_config(nullptr), +#endif _evt_check_reaper(nullptr), _evt_command_check(nullptr), _evt_hfreshness_check(nullptr), @@ -282,17 +492,17 @@ applier::scheduler::scheduler() _old_check_reaper_interval(0), _old_command_check_interval(0), _old_host_freshness_check_interval(0), - _old_host_perfdata_file_processing_interval(0), _old_retention_update_interval(0), _old_service_freshness_check_interval(0), - _old_service_perfdata_file_processing_interval(0), - _old_status_update_interval(0) {} + _old_status_update_interval(0) { +} /** * Default destructor. */ applier::scheduler::~scheduler() noexcept {} +#ifdef LEGACY_CONF /** * Remove and create misc event if necessary. */ @@ -406,7 +616,126 @@ void applier::scheduler::_apply_misc_event() { _old_status_update_interval = _config->status_update_interval(); } } +#else +/** + * Remove and create misc event if necessary. + */ +void applier::scheduler::_apply_misc_event() { + // Get current time. + time_t const now = time(nullptr); + + // Remove and add check result reaper event. + if (!_evt_check_reaper || + _old_check_reaper_interval != _pb_config->check_reaper_interval()) { + _remove_misc_event(_evt_check_reaper); + _evt_check_reaper = + _create_misc_event(timed_event::EVENT_CHECK_REAPER, + now + _pb_config->check_reaper_interval(), + _pb_config->check_reaper_interval()); + _old_check_reaper_interval = _pb_config->check_reaper_interval(); + } + + // Remove and add an external command check event. + if ((!_evt_command_check && _pb_config->check_external_commands()) || + (_evt_command_check && !_pb_config->check_external_commands()) || + (_old_command_check_interval != _pb_config->command_check_interval())) { + _remove_misc_event(_evt_command_check); + if (_pb_config->check_external_commands()) { + unsigned long interval(5); + if (_pb_config->command_check_interval() != -1) + interval = (unsigned long)_pb_config->command_check_interval(); + _evt_command_check = _create_misc_event(timed_event::EVENT_COMMAND_CHECK, + now + interval, interval); + } + _old_command_check_interval = _pb_config->command_check_interval(); + } + + // Remove and add a host result "freshness" check event. + if ((!_evt_hfreshness_check && _pb_config->check_host_freshness()) || + (_evt_hfreshness_check && !_pb_config->check_host_freshness()) || + (_old_host_freshness_check_interval != + _pb_config->host_freshness_check_interval())) { + _remove_misc_event(_evt_hfreshness_check); + if (_pb_config->check_host_freshness()) + _evt_hfreshness_check = + _create_misc_event(timed_event::EVENT_HFRESHNESS_CHECK, + now + _pb_config->host_freshness_check_interval(), + _pb_config->host_freshness_check_interval()); + _old_host_freshness_check_interval = + _pb_config->host_freshness_check_interval(); + } + + // Remove and add an orphaned check event. + if ((!_evt_orphan_check && _pb_config->check_orphaned_services()) || + (!_evt_orphan_check && _pb_config->check_orphaned_hosts()) || + (_evt_orphan_check && !_pb_config->check_orphaned_services() && + !_pb_config->check_orphaned_hosts())) { + _remove_misc_event(_evt_orphan_check); + if (_pb_config->check_orphaned_services() || + _pb_config->check_orphaned_hosts()) + _evt_orphan_check = _create_misc_event( + timed_event::EVENT_ORPHAN_CHECK, now + DEFAULT_ORPHAN_CHECK_INTERVAL, + DEFAULT_ORPHAN_CHECK_INTERVAL); + } + + // Remove and add a host and service check rescheduling event. + if ((!_evt_reschedule_checks && _pb_config->auto_reschedule_checks()) || + (_evt_reschedule_checks && !_pb_config->auto_reschedule_checks()) || + (_old_auto_rescheduling_interval != + _pb_config->auto_rescheduling_interval())) { + _remove_misc_event(_evt_reschedule_checks); + if (_pb_config->auto_reschedule_checks()) + _evt_reschedule_checks = + _create_misc_event(timed_event::EVENT_RESCHEDULE_CHECKS, + now + _pb_config->auto_rescheduling_interval(), + _pb_config->auto_rescheduling_interval()); + _old_auto_rescheduling_interval = _pb_config->auto_rescheduling_interval(); + } + // Remove and add a retention data save event if needed. + if ((!_evt_retention_save && _pb_config->retain_state_information()) || + (_evt_retention_save && !_pb_config->retain_state_information()) || + (_old_retention_update_interval != + _pb_config->retention_update_interval())) { + _remove_misc_event(_evt_retention_save); + if (_pb_config->retain_state_information() && + _pb_config->retention_update_interval() > 0) { + unsigned long interval(_pb_config->retention_update_interval() * 60); + _evt_retention_save = _create_misc_event( + timed_event::EVENT_RETENTION_SAVE, now + interval, interval); + } + _old_retention_update_interval = _pb_config->retention_update_interval(); + } + + // Remove add a service result "freshness" check event. + if ((!_evt_sfreshness_check && _pb_config->check_service_freshness()) || + (!_evt_sfreshness_check && !_pb_config->check_service_freshness()) || + _old_service_freshness_check_interval != + _pb_config->service_freshness_check_interval()) { + _remove_misc_event(_evt_sfreshness_check); + if (_pb_config->check_service_freshness()) + _evt_sfreshness_check = _create_misc_event( + timed_event::EVENT_SFRESHNESS_CHECK, + now + _pb_config->service_freshness_check_interval(), + _pb_config->service_freshness_check_interval()); + _old_service_freshness_check_interval = + _pb_config->service_freshness_check_interval(); + } + + // Remove and add a status save event. + if (!_evt_status_save || + (_old_status_update_interval != _pb_config->status_update_interval())) { + _remove_misc_event(_evt_status_save); + _evt_status_save = + _create_misc_event(timed_event::EVENT_STATUS_SAVE, + now + _pb_config->status_update_interval(), + _pb_config->status_update_interval()); + _old_status_update_interval = _pb_config->status_update_interval(); + } +} +#endif + +#ifdef LEGACY_CONF /** * How should we determine the host inter-check delay to use. * @@ -479,7 +808,71 @@ void applier::scheduler::_calculate_host_inter_check_delay( scheduling_info.host_inter_check_delay); } } +#else +/** + * How should we determine the host inter-check delay to use. + * + * @param[in] method The method to use to calculate inter check delay. + */ +void applier::scheduler::_calculate_host_inter_check_delay( + const configuration::InterCheckDelay& method) { + switch (method.type()) { + case configuration::InterCheckDelay::none: + scheduling_info.host_inter_check_delay = 0.0; + break; + case configuration::InterCheckDelay::dumb: + scheduling_info.host_inter_check_delay = 1.0; + break; + + case configuration::InterCheckDelay::user: + // the user specified a delay, so don't try to calculate one. + break; + + case configuration::InterCheckDelay::smart: + default: + // be smart and calculate the best delay to use + // to minimize local load... + if (scheduling_info.total_scheduled_hosts > 0 && + scheduling_info.host_check_interval_total > 0) { + // calculate the average check interval for hosts. + scheduling_info.average_host_check_interval = + scheduling_info.host_check_interval_total / + (double)scheduling_info.total_scheduled_hosts; + + // calculate the average inter check delay (in seconds) + // needed to evenly space the host checks out. + scheduling_info.average_host_inter_check_delay = + scheduling_info.average_host_check_interval / + (double)scheduling_info.total_scheduled_hosts; + + // set the global inter check delay value. + scheduling_info.host_inter_check_delay = + scheduling_info.average_host_inter_check_delay; + + // calculate max inter check delay and see if we should use that + // instead. + double const max_inter_check_delay( + (scheduling_info.max_host_check_spread * 60) / + (double)scheduling_info.total_scheduled_hosts); + if (scheduling_info.host_inter_check_delay > max_inter_check_delay) + scheduling_info.host_inter_check_delay = max_inter_check_delay; + } else + scheduling_info.host_inter_check_delay = 0.0; + + events_logger->debug("Total scheduled host checks: {}", + scheduling_info.total_scheduled_hosts); + events_logger->debug("Host check interval total: {}", + scheduling_info.host_check_interval_total); + events_logger->debug("Average host check interval: {:.2f} sec", + scheduling_info.average_host_check_interval); + events_logger->debug("Host inter-check delay: {:.2f} sec", + scheduling_info.host_inter_check_delay); + } +} +#endif + +#ifdef LEGACY_CONF /** * Calculate host scheduling params. */ @@ -535,7 +928,66 @@ void applier::scheduler::_calculate_host_scheduling_params() { _calculate_host_inter_check_delay(_config->host_inter_check_delay_method()); } +#else +/** + * Calculate host scheduling params. + */ +void applier::scheduler::_calculate_host_scheduling_params() { + engine_logger(dbg_events, most) + << "Determining host scheduling parameters..."; + events_logger->debug("Determining host scheduling parameters..."); + + // get current time. + time_t const now(time(nullptr)); + + // get total hosts and total scheduled hosts. + for (host_map::const_iterator it(engine::host::hosts.begin()), + end(engine::host::hosts.end()); + it != end; ++it) { + com::centreon::engine::host& hst(*it->second); + + bool schedule_check(true); + if (!hst.check_interval() || !hst.active_checks_enabled()) + schedule_check = false; + else { + timezone_locker lock(hst.get_timezone()); + if (!check_time_against_period(now, hst.check_period_ptr)) { + time_t next_valid_time(0); + get_next_valid_time(now, &next_valid_time, hst.check_period_ptr); + if (now == next_valid_time) + schedule_check = false; + } + } + + if (schedule_check) { + hst.set_should_be_scheduled(true); + ++scheduling_info.total_scheduled_hosts; + scheduling_info.host_check_interval_total += + static_cast(hst.check_interval()); + } else { + hst.set_should_be_scheduled(false); + engine_logger(dbg_events, more) + << "Host " << hst.name() << " should not be scheduled."; + events_logger->debug("Host {} should not be scheduled.", hst.name()); + } + + ++scheduling_info.total_hosts; + } + + // Default max host check spread (in minutes). + scheduling_info.max_host_check_spread = _pb_config->max_host_check_spread(); + + // Adjust the check interval total to correspond to + // the interval length. + scheduling_info.host_check_interval_total = + scheduling_info.host_check_interval_total * _pb_config->interval_length(); + + _calculate_host_inter_check_delay( + _pb_config->host_inter_check_delay_method()); +} +#endif +#ifdef LEGACY_CONF /** * How should we determine the service inter-check delay * to use (in seconds). @@ -600,7 +1052,65 @@ void applier::scheduler::_calculate_service_inter_check_delay( scheduling_info.service_inter_check_delay); } } +#else +/** + * How should we determine the service inter-check delay + * to use (in seconds). + * + * @param[in] method The method to use to calculate inter check delay. + */ +void applier::scheduler::_calculate_service_inter_check_delay( + const configuration::InterCheckDelay& method) { + switch (method.type()) { + case configuration::InterCheckDelay::none: + scheduling_info.service_inter_check_delay = 0.0; + break; + + case configuration::InterCheckDelay::dumb: + scheduling_info.service_inter_check_delay = 1.0; + break; + + case configuration::InterCheckDelay::user: + // the user specified a delay, so don't try to calculate one. + break; + + case configuration::InterCheckDelay::smart: + default: + // be smart and calculate the best delay to use to + // minimize local load... + if (scheduling_info.total_scheduled_services > 0 && + scheduling_info.service_check_interval_total > 0) { + // calculate the average inter check delay (in seconds) needed + // to evenly space the service checks out. + scheduling_info.average_service_inter_check_delay = + scheduling_info.average_service_check_interval / + (double)scheduling_info.total_scheduled_services; + // set the global inter check delay value. + scheduling_info.service_inter_check_delay = + scheduling_info.average_service_inter_check_delay; + + // calculate max inter check delay and see if we should use that + // instead. + double const max_inter_check_delay( + (scheduling_info.max_service_check_spread * 60) / + (double)scheduling_info.total_scheduled_services); + if (scheduling_info.service_inter_check_delay > max_inter_check_delay) + scheduling_info.service_inter_check_delay = max_inter_check_delay; + } else + scheduling_info.service_inter_check_delay = 0.0; + + events_logger->debug("Total scheduled service checks: {}", + scheduling_info.total_scheduled_services); + events_logger->debug("Average service check interval: {:.2f} sec", + scheduling_info.average_service_check_interval); + events_logger->debug("Service inter-check delay: {:.2f} sec", + scheduling_info.service_inter_check_delay); + } +} +#endif + +#ifdef LEGACY_CONF /** * How should we determine the service interleave factor. * @@ -634,7 +1144,35 @@ void applier::scheduler::_calculate_service_interleave_factor( scheduling_info.service_interleave_factor); } } +#else +/** + * How should we determine the service interleave factor. + * + * @param[in] method The method to use to calculate interleave factor. + */ +void applier::scheduler::_calculate_service_interleave_factor( + const configuration::InterleaveFactor& method) { + switch (method.type()) { + case configuration::InterleaveFactor::ilf_user: + // the user supplied a value, so don't do any calculation. + break; + + case configuration::InterleaveFactor::ilf_smart: + default: + scheduling_info.service_interleave_factor = + (int)(ceil(scheduling_info.average_scheduled_services_per_host)); + + events_logger->debug("Total scheduled service checks: {}", + scheduling_info.total_scheduled_services); + events_logger->debug("Total hosts: {}", + scheduling_info.total_hosts); + events_logger->debug("Service Interleave factor: {}", + scheduling_info.service_interleave_factor); + } +} +#endif +#ifdef LEGACY_CONF /** * Calculate service scheduling params. */ @@ -711,6 +1249,80 @@ void applier::scheduler::_calculate_service_scheduling_params() { _calculate_service_interleave_factor( _config->service_interleave_factor_method()); } +#else +/** + * Calculate service scheduling params. + */ +void applier::scheduler::_calculate_service_scheduling_params() { + events_logger->debug("Determining service scheduling parameters..."); + + // get current time. + time_t const now(time(nullptr)); + + // get total services and total scheduled services. + for (service_id_map::const_iterator + it(engine::service::services_by_id.begin()), + end(engine::service::services_by_id.end()); + it != end; ++it) { + engine::service& svc(*it->second); + + bool schedule_check(true); + if (!svc.check_interval() || !svc.active_checks_enabled()) + schedule_check = false; + + { + timezone_locker lock(svc.get_timezone()); + if (!check_time_against_period(now, svc.check_period_ptr)) { + time_t next_valid_time(0); + get_next_valid_time(now, &next_valid_time, svc.check_period_ptr); + if (now == next_valid_time) + schedule_check = false; + } + } + + if (schedule_check) { + svc.set_should_be_scheduled(true); + ++scheduling_info.total_scheduled_services; + scheduling_info.service_check_interval_total += + static_cast(svc.check_interval()); + } else { + svc.set_should_be_scheduled(false); + events_logger->debug("Service {} on host {} should not be scheduled.", + svc.description(), svc.get_hostname()); + } + ++scheduling_info.total_services; + } + + // default max service check spread (in minutes). + scheduling_info.max_service_check_spread = + _pb_config->max_service_check_spread(); + + // used later in inter-check delay calculations. + scheduling_info.service_check_interval_total = + scheduling_info.service_check_interval_total * + _pb_config->interval_length(); + + if (scheduling_info.total_hosts) { + scheduling_info.average_services_per_host = + scheduling_info.total_services / (double)scheduling_info.total_hosts; + scheduling_info.average_scheduled_services_per_host = + scheduling_info.total_scheduled_services / + (double)scheduling_info.total_hosts; + } + + // calculate rolling average execution time (available + // from retained state information). + if (scheduling_info.total_scheduled_services) + scheduling_info.average_service_check_interval = + scheduling_info.service_check_interval_total / + (double)scheduling_info.total_scheduled_services; + + _calculate_service_inter_check_delay( + _pb_config->service_inter_check_delay_method()); + _calculate_service_interleave_factor( + _pb_config->service_interleave_factor_method()); +} +#endif /** * Create and register new misc event. @@ -733,6 +1345,7 @@ timed_event* applier::scheduler::_create_misc_event(int type, return retval; } +#ifdef LEGACY_CONF /** * Get engine hosts struct with configuration hosts objects. * @@ -761,7 +1374,32 @@ std::vector applier::scheduler::_get_hosts( } return retval; } +#else +/** + * Get engine hosts struct with configuration hosts objects. + * + * @param[in] hst_ids The list of host IDs to get. + * @param[in] throw_if_not_found Flag to throw if an host is not + * found. + */ +std::vector applier::scheduler::_get_hosts( + const std::vector& hst_ids, + bool throw_if_not_found) { + std::vector retval; + for (auto host_id : hst_ids) { + auto it_hst = engine::host::hosts_by_id.find(host_id); + if (it_hst == engine::host::hosts_by_id.end()) { + if (throw_if_not_found) + throw engine_error() + << "Could not schedule non-existing host with ID " << host_id; + } else + retval.push_back(it_hst->second.get()); + } + return retval; +} +#endif +#ifdef LEGACY_CONF /** * Get engine services struct with configuration services objects. * @@ -791,7 +1429,35 @@ std::vector applier::scheduler::_get_services( } return retval; } +#else +/** + * Get engine services struct with configuration services objects. + * + * @param[in] svc_ids The list of configuration service IDs + * objects. + * @param[in] throw_if_not_found Flag to throw if an host is not + * found. + * @return a vector of services. + */ +std::vector applier::scheduler::_get_services( + const std::vector >& svc_ids, + bool throw_if_not_found) { + std::vector retval; + for (auto& p : svc_ids) { + service_id_map::const_iterator it_svc = + engine::service::services_by_id.find({p.first, p.second}); + if (it_svc == engine::service::services_by_id.end()) { + if (throw_if_not_found) + throw engine_error() << fmt::format( + "Cannot schedule non-existing service ({},{})", p.first, p.second); + } else + retval.push_back(it_svc->second.get()); + } + return retval; +} +#endif +#ifdef LEGACY_CONF /** * Get engine services struct with configuration services objects. * @@ -821,6 +1487,34 @@ applier::scheduler::_get_anomalydetections(set_anomalydetection const& ad_cfg, } return retval; } +#else +/** + * Get engine services struct with configuration services objects. + * + * @param[in] svc_cfg The list of configuration services objects. + * @param[in] throw_if_not_found Flag to throw if an host is not + * found. + * @return a vector of services. + */ +std::vector +applier::scheduler::_get_anomalydetections( + const std::vector >& ad_ids, + bool throw_if_not_found) { + std::vector retval; + for (auto& p : ad_ids) { + service_id_map::const_iterator it_svc = + engine::service::services_by_id.find({p.first, p.second}); + if (it_svc == engine::service::services_by_id.end()) { + if (throw_if_not_found) + throw engine_error() << fmt::format( + "Cannot schedule non-existing anomalydetection ({},{})", p.first, + p.second); + } else + retval.push_back(it_svc->second.get()); + } + return retval; +} +#endif /** * Remove misc event. @@ -909,7 +1603,8 @@ void applier::scheduler::_schedule_host_events( // add scheduled host checks to event queue. for (engine::host* h : hosts) { // update status of all hosts (scheduled or not). - h->update_status(); + // FIXME DBO: Is this really needed? + // h->update_status(); // skip most hosts that shouldn't be scheduled. if (!h->get_should_be_scheduled()) { @@ -1017,7 +1712,8 @@ void applier::scheduler::_schedule_service_events( // add scheduled service checks to event queue. for (engine::service* s : services) { // update status of all services (scheduled or not). - s->update_status(); + // FIXME DBO: Is this really needed? + // s->update_status(); // skip most services that shouldn't be scheduled. if (!s->get_should_be_scheduled()) { diff --git a/engine/src/configuration/applier/service.cc b/engine/src/configuration/applier/service.cc index 7cdeb69ea28..43800266ab2 100644 --- a/engine/src/configuration/applier/service.cc +++ b/engine/src/configuration/applier/service.cc @@ -22,66 +22,22 @@ #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/config.hh" #include "com/centreon/engine/configuration/applier/scheduler.hh" -#include "com/centreon/engine/configuration/applier/state.hh" #include "com/centreon/engine/downtimes/downtime_manager.hh" #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/logging/logger.hh" #include "com/centreon/engine/severity.hh" +#ifndef LEGACY_CONF +#include "common/engine_conf/severity_helper.hh" +#include "common/engine_conf/state.pb.h" +#endif using namespace com::centreon; using namespace com::centreon::engine; using namespace com::centreon::engine::downtimes; using namespace com::centreon::engine::configuration; -/** - * Check if the service group name matches the configuration object. - */ -class servicegroup_name_comparator { - public: - servicegroup_name_comparator(std::string const& servicegroup_name) { - _servicegroup_name = servicegroup_name; - } - - bool operator()(std::shared_ptr sg) { - return _servicegroup_name == sg->servicegroup_name(); - } - - private: - std::string _servicegroup_name; -}; - -/** - * Default constructor. - */ -applier::service::service() {} - -/** - * Copy constructor. - * - * @param[in] right Object to copy. - */ -applier::service::service(applier::service const& right) { - (void)right; -} - -/** - * Destructor. - */ -applier::service::~service() {} - -/** - * Assignment operator. - * - * @param[in] right Object to copy. - * - * @return This object. - */ -applier::service& applier::service::operator=(applier::service const& right) { - (void)right; - return *this; -} - +#ifdef LEGACY_CONF /** * Add new service. * @@ -183,9 +139,8 @@ void applier::service::add_object(configuration::service const& obj) { if (it->second.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); - broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_ADD, svc, - it->first.c_str(), it->second.value().c_str(), - &tv); + broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_ADD, svc, it->first, + it->second.value(), &tv); } } @@ -220,7 +175,129 @@ void applier::service::add_object(configuration::service const& obj) { broker_adaptive_service_data(NEBTYPE_SERVICE_ADD, NEBFLAG_NONE, NEBATTR_NONE, svc, MODATTR_ALL); } +#else +/** + * @brief Add a new service. + * + * @param obj The new service protobuf configuration to add into the monitoring + * engine. + */ +void applier::service::add_object(const configuration::Service& obj) { + // Check service. + if (obj.host_name().empty()) + throw engine_error() << fmt::format( + "Could not create service '{}' with no host defined", + obj.service_description()); + else if (obj.host_id() == 0) + throw engine_error() << fmt::format( + "No host_id available for the host '{}' - unable to create service " + "'{}'", + obj.host_name(), obj.service_description()); + + // Logging. + config_logger->debug("Creating new service '{}' of host '{}'.", + obj.service_description(), obj.host_name()); + + // Add service to the global configuration set. + auto* cfg_svc = pb_config.add_services(); + cfg_svc->CopyFrom(obj); + // Create service. + engine::service* svc{add_service( + obj.host_id(), obj.service_id(), obj.host_name(), + obj.service_description(), obj.display_name(), obj.check_period(), + static_cast(obj.initial_state()), + obj.max_check_attempts(), obj.check_interval(), obj.retry_interval(), + obj.notification_interval(), obj.first_notification_delay(), + obj.recovery_notification_delay(), obj.notification_period(), + static_cast(obj.notification_options() & action_svc_ok), + static_cast(obj.notification_options() & action_svc_unknown), + static_cast(obj.notification_options() & action_svc_warning), + static_cast(obj.notification_options() & action_svc_critical), + static_cast(obj.notification_options() & action_svc_flapping), + static_cast(obj.notification_options() & action_svc_downtime), + obj.notifications_enabled(), obj.is_volatile(), obj.event_handler(), + obj.event_handler_enabled(), obj.check_command(), obj.checks_active(), + obj.checks_passive(), obj.flap_detection_enabled(), + obj.low_flap_threshold(), obj.high_flap_threshold(), + static_cast(obj.flap_detection_options() & action_svc_ok), + static_cast(obj.flap_detection_options() & action_svc_warning), + static_cast(obj.flap_detection_options() & action_svc_unknown), + static_cast(obj.flap_detection_options() & action_svc_critical), + static_cast(obj.stalking_options() & action_svc_ok), + static_cast(obj.stalking_options() & action_svc_warning), + static_cast(obj.stalking_options() & action_svc_unknown), + static_cast(obj.stalking_options() & action_svc_critical), + obj.process_perf_data(), obj.check_freshness(), obj.freshness_threshold(), + obj.notes(), obj.notes_url(), obj.action_url(), obj.icon_image(), + obj.icon_image_alt(), obj.retain_status_information(), + obj.retain_nonstatus_information(), obj.obsess_over_service(), + obj.timezone(), obj.icon_id())}; + if (!svc) + throw engine_error() << fmt::format( + "Could not register service '{}' of host '{}'", + obj.service_description(), obj.host_name()); + svc->set_initial_notif_time(0); + engine::service::services[{obj.host_name(), obj.service_description()}] + ->set_host_id(obj.host_id()); + engine::service::services[{obj.host_name(), obj.service_description()}] + ->set_service_id(obj.service_id()); + svc->set_acknowledgement_timeout(obj.acknowledgement_timeout() * + pb_config.interval_length()); + svc->set_last_acknowledgement(0); + + // Add contacts. + for (auto& c : obj.contacts().data()) + svc->mut_contacts().insert({c, nullptr}); + + // Add contactgroups. + for (auto& cg : obj.contactgroups().data()) + svc->get_contactgroups().insert({cg, nullptr}); + + // Add custom variables. + for (auto& cv : obj.customvariables()) { + svc->custom_variables.emplace( + cv.name(), engine::customvariable(cv.value(), cv.is_sent())); + + if (cv.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_ADD, svc, cv.name(), + cv.value(), &tv); + } + } + + // Add severity. + if (obj.severity_id()) { + configuration::severity_helper::key_type k = {obj.severity_id(), + SeverityType::service}; + auto sv = engine::severity::severities.find(k); + if (sv == engine::severity::severities.end()) + throw engine_error() << fmt::format( + "Could not add the severity ({}, {}) to the service '{}' of host " + "'{}'", + k.first, k.second, obj.service_description(), obj.host_name()); + svc->set_severity(sv->second); + } + + // add tags + for (auto& t : obj.tags()) { + auto k = std::make_pair(t.first(), t.second()); + tag_map::iterator it_tag{engine::tag::tags.find(k)}; + if (it_tag == engine::tag::tags.end()) + throw engine_error() << fmt::format( + "Could not find tag ({}, {}) on which to apply service ({}, {})", + k.first, k.second, obj.host_id(), obj.service_id()); + else + svc->mut_tags().emplace_front(it_tag->second); + } + + // Notify event broker. + broker_adaptive_service_data(NEBTYPE_SERVICE_ADD, NEBFLAG_NONE, NEBATTR_NONE, + svc, MODATTR_ALL); +} +#endif + +#ifdef LEGACY_CONF /** * Expand a service object. * @@ -253,7 +330,41 @@ void applier::service::expand_objects(configuration::state& s) { // Set expanded services in configuration state. s.mut_services() = std::move(expanded); } +#else +/** + * Expand a service object. + * + * @param[in,out] s State being applied. + */ +void applier::service::expand_objects(configuration::State& s) { + std::list> expanded; + // Let's consider all the macros defined in s. + absl::flat_hash_set cvs; + for (auto& cv : s.macros_filter().data()) + cvs.emplace(cv); + + absl::flat_hash_map hgs; + for (auto& hg : *s.mutable_hostgroups()) + hgs.emplace(hg.hostgroup_name(), &hg); + + // Browse all services. + for (auto& service_cfg : *s.mutable_services()) { + // Should custom variables be sent to broker ? + for (auto& cv : *service_cfg.mutable_customvariables()) { + if (!s.enable_macros_filter() || cvs.contains(cv.name())) + cv.set_is_sent(true); + } + // Expand membershipts. + _expand_service_memberships(service_cfg, s); + + // Inherits special vars. + _inherits_special_vars(service_cfg, s); + } +} +#endif + +#ifdef LEGACY_CONF /** * Modified service. * @@ -432,7 +543,7 @@ void applier::service::modify_object(configuration::service const& obj) { if (c.second.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_DELETE, s.get(), - c.first.c_str(), c.second.value().c_str(), &tv); + c.first, c.second.value(), &tv); } } s->custom_variables.clear(); @@ -444,7 +555,7 @@ void applier::service::modify_object(configuration::service const& obj) { if (c.second.is_sent()) { timeval tv(get_broker_timestamp(nullptr)); broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_ADD, s.get(), - c.first.c_str(), c.second.value().c_str(), &tv); + c.first, c.second.value(), &tv); } } } @@ -480,7 +591,229 @@ void applier::service::modify_object(configuration::service const& obj) { broker_adaptive_service_data(NEBTYPE_SERVICE_UPDATE, NEBFLAG_NONE, NEBATTR_NONE, s.get(), MODATTR_ALL); } +#else +/** + * @brief Modify a service configuration and the real time associated service. + * + * @param[in] old_obj The service to modify into the monitoring + * engine. + * @param[in] new_obj The new service to apply. + */ +void applier::service::modify_object(configuration::Service* old_obj, + const configuration::Service& new_obj) { + const std::string& host_name(old_obj->host_name()); + const std::string& service_description(old_obj->service_description()); + + // Logging. + config_logger->debug("Modifying service '{}' of host '{}'.", + service_description, host_name); + + // Find service object. + service_id_map::iterator it_obj = engine::service::services_by_id.find( + {old_obj->host_id(), old_obj->service_id()}); + if (it_obj == engine::service::services_by_id.end()) + throw engine_error() << fmt::format( + "Could not modify non-existing service object '{}' of host '{}'", + service_description, host_name); + std::shared_ptr s = it_obj->second; + + // Modify properties. + if (it_obj->second->get_hostname() != new_obj.host_name() || + it_obj->second->description() != new_obj.service_description()) { + engine::service::services.erase( + {it_obj->second->get_hostname(), it_obj->second->description()}); + engine::service::services.insert( + {{new_obj.host_name(), new_obj.service_description()}, it_obj->second}); + } + + s->set_hostname(new_obj.host_name()); + s->set_description(new_obj.service_description()); + s->set_display_name(new_obj.display_name()); + s->set_check_command(new_obj.check_command()); + s->set_event_handler(new_obj.event_handler()); + s->set_event_handler_enabled(new_obj.event_handler_enabled()); + s->set_initial_state( + static_cast(new_obj.initial_state())); + s->set_check_interval(new_obj.check_interval()); + s->set_retry_interval(new_obj.retry_interval()); + s->set_max_attempts(new_obj.max_check_attempts()); + + s->set_notify_on( + (new_obj.notification_options() & action_svc_unknown ? notifier::unknown + : notifier::none) | + (new_obj.notification_options() & action_svc_warning ? notifier::warning + : notifier::none) | + (new_obj.notification_options() & action_svc_critical ? notifier::critical + : notifier::none) | + (new_obj.notification_options() & action_svc_ok ? notifier::ok + : notifier::none) | + (new_obj.notification_options() & action_svc_flapping + ? (notifier::flappingstart | notifier::flappingstop | + notifier::flappingdisabled) + : notifier::none) | + (new_obj.notification_options() & action_svc_downtime ? notifier::downtime + : notifier::none)); + + s->set_notification_interval( + static_cast(new_obj.notification_interval())); + s->set_first_notification_delay( + static_cast(new_obj.first_notification_delay())); + + s->add_stalk_on(new_obj.stalking_options() & action_svc_ok ? notifier::ok + : notifier::none); + s->add_stalk_on(new_obj.stalking_options() & action_svc_warning + ? notifier::warning + : notifier::none); + s->add_stalk_on(new_obj.stalking_options() & action_svc_unknown + ? notifier::unknown + : notifier::none); + s->add_stalk_on(new_obj.stalking_options() & action_svc_critical + ? notifier::critical + : notifier::none); + s->set_notification_period(new_obj.notification_period()); + s->set_check_period(new_obj.check_period()); + s->set_flap_detection_enabled(new_obj.flap_detection_enabled()); + s->set_low_flap_threshold(new_obj.low_flap_threshold()); + s->set_high_flap_threshold(new_obj.high_flap_threshold()); + + s->set_flap_detection_on(notifier::none); + s->add_flap_detection_on(new_obj.flap_detection_options() & action_svc_ok + ? notifier::ok + : notifier::none); + s->add_flap_detection_on(new_obj.flap_detection_options() & action_svc_warning + ? notifier::warning + : notifier::none); + s->add_flap_detection_on(new_obj.flap_detection_options() & action_svc_unknown + ? notifier::unknown + : notifier::none); + s->add_flap_detection_on(new_obj.flap_detection_options() & + action_svc_critical + ? notifier::critical + : notifier::none); + + s->set_process_performance_data( + static_cast(new_obj.process_perf_data())); + s->set_check_freshness(new_obj.check_freshness()); + s->set_freshness_threshold(new_obj.freshness_threshold()); + s->set_accept_passive_checks(new_obj.checks_passive()); + s->set_event_handler(new_obj.event_handler()); + s->set_checks_enabled(new_obj.checks_active()); + s->set_retain_status_information( + static_cast(new_obj.retain_status_information())); + s->set_retain_nonstatus_information( + static_cast(new_obj.retain_nonstatus_information())); + s->set_notifications_enabled(new_obj.notifications_enabled()); + s->set_obsess_over(new_obj.obsess_over_service()); + s->set_notes(new_obj.notes()); + s->set_notes_url(new_obj.notes_url()); + s->set_action_url(new_obj.action_url()); + s->set_icon_image(new_obj.icon_image()); + s->set_icon_image_alt(new_obj.icon_image_alt()); + s->set_is_volatile(new_obj.is_volatile()); + s->set_timezone(new_obj.timezone()); + s->set_host_id(new_obj.host_id()); + s->set_service_id(new_obj.service_id()); + s->set_acknowledgement_timeout(new_obj.acknowledgement_timeout() * + pb_config.interval_length()); + s->set_recovery_notification_delay(new_obj.recovery_notification_delay()); + + // Contacts. + if (!MessageDifferencer::Equals(new_obj.contacts(), old_obj->contacts())) { + // Delete old contacts. + s->mut_contacts().clear(); + + // Add contacts to host. + for (auto& contact_name : new_obj.contacts().data()) + s->mut_contacts().insert({contact_name, nullptr}); + } + + // Contact groups. + if (!MessageDifferencer::Equals(new_obj.contactgroups(), + old_obj->contactgroups())) { + // Delete old contact groups. + s->get_contactgroups().clear(); + + // Add contact groups to host. + for (auto& cg_name : new_obj.contactgroups().data()) + s->get_contactgroups().insert({cg_name, nullptr}); + } + + // Custom variables. + if (!std::equal( + new_obj.customvariables().begin(), new_obj.customvariables().end(), + old_obj->customvariables().begin(), old_obj->customvariables().end(), + MessageDifferencer::Equals)) { + for (auto& c : s->custom_variables) { + if (c.second.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_DELETE, s.get(), + c.first.c_str(), c.second.value().c_str(), &tv); + } + } + s->custom_variables.clear(); + + for (auto& c : new_obj.customvariables()) { + s->custom_variables[c.name()] = c.value(); + + if (c.is_sent()) { + timeval tv(get_broker_timestamp(nullptr)); + broker_custom_variable(NEBTYPE_SERVICECUSTOMVARIABLE_ADD, s.get(), + c.name(), c.value(), &tv); + } + } + } + + // Severity. + if (new_obj.severity_id()) { + configuration::severity_helper::key_type k = {new_obj.severity_id(), + SeverityType::service}; + auto sv = engine::severity::severities.find(k); + if (sv == engine::severity::severities.end()) + throw engine_error() << "Could not update the severity (" << k.first + << ", " << k.second << ") to the service '" + << new_obj.service_description() << "' of host '" + << new_obj.host_name() << "'"; + s->set_severity(sv->second); + } else + s->set_severity(nullptr); + + // add tags + bool tags_changed = false; + if (old_obj->tags().size() == new_obj.tags().size()) { + for (auto new_it = new_obj.tags().begin(), old_it = old_obj->tags().begin(); + old_it != old_obj->tags().end() && new_it != new_obj.tags().end(); + ++old_it, ++new_it) { + if (new_it->first() != old_it->first() || + new_it->second() != old_it->second()) { + tags_changed = true; + break; + } + } + } else + tags_changed = true; + + if (tags_changed) { + s->mut_tags().clear(); + old_obj->mutable_tags()->CopyFrom(new_obj.tags()); + for (auto& t : new_obj.tags()) { + tag_map::iterator it_tag = + engine::tag::tags.find({t.first(), t.second()}); + if (it_tag == engine::tag::tags.end()) + throw engine_error() << fmt::format( + "Could not find tag '{}' on which to apply service ({}, {})", + t.first(), new_obj.host_id(), new_obj.service_id()); + else + s->mut_tags().emplace_front(it_tag->second); + } + } + // Notify event broker. + broker_adaptive_service_data(NEBTYPE_SERVICE_UPDATE, NEBFLAG_NONE, + NEBATTR_NONE, s.get(), MODATTR_ALL); +} +#endif + +#ifdef LEGACY_CONF /** * Remove old service. * @@ -542,7 +875,70 @@ void applier::service::remove_object(configuration::service const& obj) { // Remove service from the global configuration set. config->mut_services().erase(obj); } +#else +/** + * Remove old service. + * + * @param[in] obj The new service to remove from the monitoring + * engine. + */ +void applier::service::remove_object(ssize_t idx) { + Service& obj = pb_config.mutable_services()->at(idx); + const std::string& host_name = obj.host_name(); + const std::string& service_description = obj.service_description(); + + // Logging. + config_logger->debug("Removing service '{}' of host '{}'.", + service_description, host_name); + + // Find anomaly detections depending on this service + for (auto cad : pb_config.anomalydetections()) { + if (cad.host_id() == obj.host_id() && + cad.dependent_service_id() == obj.service_id()) { + auto ad = engine::service::services_by_id.find( + {cad.host_id(), cad.service_id()}); + if (ad != engine::service::services_by_id.end()) + std::static_pointer_cast(ad->second) + ->set_dependent_service(nullptr); + } + } + // Find service. + auto it = + engine::service::services_by_id.find({obj.host_id(), obj.service_id()}); + if (it != engine::service::services_by_id.end()) { + auto svc = it->second; + + // Remove service comments. + comment::delete_service_comments(obj.host_id(), obj.service_id()); + + // Remove service downtimes. + downtime_manager::instance() + .delete_downtime_by_hostname_service_description_start_time_comment( + host_name, service_description, {false, (time_t)0}, ""); + + // Remove events related to this service. + applier::scheduler::instance().remove_service(obj.host_id(), + obj.service_id()); + + // remove service from servicegroup->members + for (auto& it_s : svc->get_parent_groups()) + it_s->members.erase({host_name, service_description}); + + // Notify event broker. + broker_adaptive_service_data(NEBTYPE_SERVICE_DELETE, NEBFLAG_NONE, + NEBATTR_NONE, svc.get(), MODATTR_ALL); + + // Unregister service. + engine::service::services.erase({host_name, service_description}); + engine::service::services_by_id.erase(it); + } + + // Remove service from the global configuration set. + pb_config.mutable_services()->DeleteSubrange(idx, 1); +} +#endif +#ifdef LEGACY_CONF /** * Resolve a service. * @@ -579,7 +975,44 @@ void applier::service::resolve_object(configuration::service const& obj, // Resolve service. it->second->resolve(err.config_warnings, err.config_errors); } +#else +/** + * Resolve a service. + * + * @param[in] obj Service object. + */ +void applier::service::resolve_object(const configuration::Service& obj, + error_cnt& err) { + // Logging. + config_logger->debug("Resolving service '{}' of host '{}'.", + obj.service_description(), obj.host_name()); + + // Find service. + service_id_map::iterator it = + engine::service::services_by_id.find({obj.host_id(), obj.service_id()}); + if (engine::service::services_by_id.end() == it) + throw engine_error() << "Cannot resolve non-existing service '" + << obj.service_description() << "' of host '" + << obj.host_name() << "'"; + + // Remove service group links. + it->second->get_parent_groups().clear(); + + // Find host and adjust its counters. + host_id_map::iterator hst(engine::host::hosts_by_id.find(it->first.first)); + if (hst != engine::host::hosts_by_id.end()) { + hst->second->set_total_services(hst->second->get_total_services() + 1); + hst->second->set_total_service_check_interval( + hst->second->get_total_service_check_interval() + + static_cast(it->second->check_interval())); + } + + // Resolve service. + it->second->resolve(err.config_warnings, err.config_errors); +} +#endif +#ifdef LEGACY_CONF /** * Expand service instance memberships. * @@ -614,7 +1047,37 @@ void applier::service::_expand_service_memberships(configuration::service& obj, s.servicegroups().insert(backup); } } +#else +/** + * Expand service instance memberships. + * + * @param[in] obj Target service. + * @param[out] s Configuration state. + */ +void applier::service::_expand_service_memberships(configuration::Service& obj, + configuration::State& s) { + absl::flat_hash_map sgs; + for (auto& sg : *s.mutable_servicegroups()) + sgs[sg.servicegroup_name()] = &sg; + + // Browse service groups. + for (auto& sg_name : obj.servicegroups().data()) { + // Find service group. + auto found = sgs.find(sg_name); + if (found == sgs.end()) + throw engine_error() << fmt::format( + "Could not add service '{}' of host '{}' to non-existing service " + "group '{}'", + obj.service_description(), obj.host_name(), sg_name); + + // Add service to service members + fill_pair_string_group(found->second->mutable_members(), obj.host_name(), + obj.service_description()); + } +} +#endif +#ifdef LEGACY_CONF /** * @brief Inherits special variables from host. * @@ -655,3 +1118,47 @@ void applier::service::_inherits_special_vars(configuration::service& obj, obj.timezone(it->timezone()); } } +#else +/** + * @brief Inherits special variables from host. + * + * These special variables, if not defined are inherited from host. + * They are contact_groups, notification_interval and + * notification_period. + * + * @param[in,out] obj Target service. + * @param[in] s Configuration state. + */ +void applier::service::_inherits_special_vars(configuration::Service& obj, + const configuration::State& s) { + // Detect if any special variable has not been defined. + if (!obj.host_id() || obj.contacts().data().empty() || + obj.contactgroups().data().empty() || obj.notification_interval() == 0 || + obj.notification_period().empty() || obj.timezone().empty()) { + // Find host. + auto it = std::find_if(s.hosts().begin(), s.hosts().end(), + [name = obj.host_name()](const Host& h) { + return h.host_name() == name; + }); + if (it == s.hosts().end()) + throw engine_error() << fmt::format( + "Could not inherit special variables for service '{}': host '{}' " + "does not exist", + obj.service_description(), obj.host_name()); + + // Inherits variables. + if (!obj.host_id()) + obj.set_host_id(it->host_id()); + if (obj.contacts().data().empty() && obj.contactgroups().data().empty()) { + obj.mutable_contacts()->CopyFrom(it->contacts()); + obj.mutable_contactgroups()->CopyFrom(it->contactgroups()); + } + if (obj.notification_interval() == 0) + obj.set_notification_interval(it->notification_interval()); + if (obj.notification_period().empty()) + obj.set_notification_period(it->notification_period()); + if (obj.timezone().empty()) + obj.set_timezone(it->timezone()); + } +} +#endif diff --git a/engine/src/configuration/applier/servicedependency.cc b/engine/src/configuration/applier/servicedependency.cc index c756237b3bf..fc78b8dc120 100644 --- a/engine/src/configuration/applier/servicedependency.cc +++ b/engine/src/configuration/applier/servicedependency.cc @@ -24,11 +24,16 @@ #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/logging/logger.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/object.hh" #include "common/engine_legacy_conf/servicedependency.hh" +#else +#include "common/engine_conf/state.pb.h" +#endif using namespace com::centreon::engine::configuration; +#ifdef LEGACY_CONF /** * Add new service dependency. * @@ -127,7 +132,94 @@ void applier::servicedependency::add_object( // Notify event broker. broker_adaptive_dependency_data(NEBTYPE_SERVICEDEPENDENCY_ADD, sd.get()); } +#else +/** + * Add new service dependency. + * + * @param[in] obj The new servicedependency to add into the monitoring + * engine. + */ +void applier::servicedependency::add_object( + const configuration::Servicedependency& obj) { + // Check service dependency. + if (obj.hosts().data().size() != 1 || !obj.hostgroups().data().empty() || + obj.service_description().data().size() != 1 || + !obj.servicegroups().data().empty() || + obj.dependent_hosts().data().size() != 1 || + !obj.dependent_hostgroups().data().empty() || + obj.dependent_service_description().data().size() != 1 || + !obj.dependent_servicegroups().data().empty()) + throw engine_error() + << "Could not create service " + << "dependency with multiple (dependent) hosts / host groups " + << "/ services / service groups"; + + if (obj.dependency_type() != execution_dependency && + obj.dependency_type() != notification_dependency) + throw engine_error() + << "Could not create unexpanded dependency of service '" + << obj.dependent_service_description().data()[0] << "' of host '" + << obj.dependent_hosts().data()[0] << "' on service '" + << obj.service_description().data()[0] << "' of host '" + << obj.hosts().data()[0] << "'"; + // Logging. + config_logger->debug( + "Creating new service dependency of service '{}' of host '{}' on service " + "'{}' of host '{}'.", + obj.dependent_service_description().data()[0], + obj.dependent_hosts().data()[0], obj.service_description().data()[0], + obj.hosts().data()[0]); + + // Add dependency to the global configuration set. + auto* new_obj = pb_config.add_servicedependencies(); + new_obj->CopyFrom(obj); + + std::shared_ptr sd; + + if (obj.dependency_type() == execution_dependency) + // Create execution dependency. + sd = std::make_shared( + configuration::servicedependency_key(obj), + obj.dependent_hosts().data()[0], + obj.dependent_service_description().data()[0], obj.hosts().data()[0], + obj.service_description().data()[0], dependency::execution, + obj.inherits_parent(), + static_cast(obj.execution_failure_options() & action_sd_ok), + static_cast(obj.execution_failure_options() & action_sd_warning), + static_cast(obj.execution_failure_options() & action_sd_unknown), + static_cast(obj.execution_failure_options() & action_sd_critical), + static_cast(obj.execution_failure_options() & action_sd_pending), + obj.dependency_period()); + else + // Create notification dependency. + sd = std::make_shared( + servicedependency_key(obj), obj.dependent_hosts().data()[0], + obj.dependent_service_description().data()[0], obj.hosts().data()[0], + obj.service_description().data()[0], dependency::notification, + obj.inherits_parent(), + static_cast(obj.notification_failure_options() & action_sd_ok), + static_cast(obj.notification_failure_options() & + action_sd_warning), + static_cast(obj.notification_failure_options() & + action_sd_unknown), + static_cast(obj.notification_failure_options() & + action_sd_critical), + static_cast(obj.notification_failure_options() & + action_sd_pending), + obj.dependency_period()); + + // Add new items to the global list. + engine::servicedependency::servicedependencies.insert( + {{sd->get_dependent_hostname(), sd->get_dependent_service_description()}, + sd}); + + // Notify event broker. + broker_adaptive_dependency_data(NEBTYPE_SERVICEDEPENDENCY_ADD, sd.get()); +} +#endif + +#ifdef LEGACY_CONF /** * Expand service dependencies. * @@ -210,7 +302,85 @@ void applier::servicedependency::expand_objects(configuration::state& s) { // Set expanded service dependencies in configuration state. s.servicedependencies().swap(expanded); } +#else +/** + * Expand service dependencies. + * + * @param[in,out] s Configuration being applied. + */ +void applier::servicedependency::expand_objects(configuration::State& s) { + // Browse all dependencies. + std::list> expanded; + for (auto& dep : s.servicedependencies()) { + // Expand service dependency instances. + if (dep.hosts().data().size() != 1 || !dep.hostgroups().data().empty() || + dep.service_description().data().size() != 1 || + !dep.servicegroups().data().empty() || + dep.dependent_hosts().data().size() != 1 || + !dep.dependent_hostgroups().data().empty() || + dep.dependent_service_description().data().size() != 1 || + !dep.dependent_servicegroups().data().empty() || + dep.dependency_type() == DependencyKind::unknown) { + // Expand depended services. + absl::flat_hash_set> + depended_services; + _expand_services(dep.hosts().data(), dep.hostgroups().data(), + dep.service_description().data(), + dep.servicegroups().data(), s, depended_services); + + // Expand dependent services. + absl::flat_hash_set> + dependent_services; + _expand_services( + dep.dependent_hosts().data(), dep.dependent_hostgroups().data(), + dep.dependent_service_description().data(), + dep.dependent_servicegroups().data(), s, dependent_services); + + // Browse all depended and dependent services. + for (auto& p1 : depended_services) + for (auto& p2 : dependent_services) { + // Create service dependency instance. + for (int32_t i = 1; i <= 2; i++) { + if (dep.dependency_type() == DependencyKind::unknown || + static_cast(dep.dependency_type()) == i) { + auto sdep = std::make_unique(); + sdep->CopyFrom(dep); + sdep->clear_hostgroups(); + sdep->clear_hosts(); + sdep->mutable_hosts()->add_data(p1.first); + sdep->clear_servicegroups(); + sdep->clear_service_description(); + sdep->mutable_service_description()->add_data(p1.second); + sdep->clear_dependent_hostgroups(); + sdep->clear_dependent_hosts(); + sdep->mutable_dependent_hosts()->add_data(p2.first); + sdep->clear_dependent_servicegroups(); + sdep->clear_dependent_service_description(); + sdep->mutable_dependent_service_description()->add_data( + p2.second); + if (i == 2) { + sdep->set_dependency_type(DependencyKind::execution_dependency); + sdep->set_notification_failure_options(0); + } else { + sdep->set_dependency_type( + DependencyKind::notification_dependency); + sdep->set_execution_failure_options(0); + } + expanded.push_back(std::move(sdep)); + } + } + } + } + } + + // Set expanded service dependencies in configuration state. + s.clear_servicedependencies(); + for (auto& e : expanded) + s.mutable_servicedependencies()->AddAllocated(e.release()); +} +#endif +#ifdef LEGACY_CONF /** * @brief Modify service dependency. * @@ -228,7 +398,26 @@ void applier::servicedependency::modify_object( << "or removed, this is likely a software bug that you should " << "report to Centreon Engine developers"; } +#else +/** + * @brief Modify service dependency. + * + * Service dependencies cannot be defined with anything else than their + * full content. Therefore no modification can occur. + * + * @param[in] obj Unused. + */ +void applier::servicedependency::modify_object( + configuration::Servicedependency* old_obj [[maybe_unused]], + const configuration::Servicedependency& new_obj [[maybe_unused]]) { + throw engine_error() + << "Could not modify a service dependency: service dependency objects " + "can only be added or removed, this is likely a software bug that " + "you should report to Centreon Engine developers"; +} +#endif +#ifdef LEGACY_CONF /** * Remove old service dependency. * @@ -257,7 +446,41 @@ void applier::servicedependency::remove_object( // Remove dependency from the global configuration set. config->servicedependencies().erase(obj); } +#else +/** + * Remove old service dependency. + * + * @param[in] obj The service dependency to remove from the monitoring + * engine. + */ +void applier::servicedependency::remove_object(ssize_t idx) { + // Logging. + config_logger->debug("Removing a service dependency."); + // Find service dependency. + auto& obj = pb_config.servicedependencies(idx); + size_t key = servicedependency_key(obj); + + servicedependency_mmap::iterator it = + engine::servicedependency::servicedependencies_find( + std::make_tuple(obj.dependent_hosts().data(0), + obj.dependent_service_description().data(0), key)); + if (it != engine::servicedependency::servicedependencies.end()) { + // Notify event broker. + timeval tv(get_broker_timestamp(nullptr)); + broker_adaptive_dependency_data(NEBTYPE_SERVICEDEPENDENCY_DELETE, + it->second.get()); + + // Remove service dependency from its list. + engine::servicedependency::servicedependencies.erase(it); + } + + // Remove dependency from the global configuration set. + pb_config.mutable_servicedependencies()->DeleteSubrange(idx, 1); +} +#endif + +#ifdef LEGACY_CONF /** * Resolve a servicedependency. * @@ -280,7 +503,33 @@ void applier::servicedependency::resolve_object( // Resolve service dependency. it->second->resolve(err.config_warnings, err.config_errors); } +#else +/** + * Resolve a servicedependency. + * + * @param[in] obj Servicedependency object. + */ +void applier::servicedependency::resolve_object( + const configuration::Servicedependency& obj, + error_cnt& err) { + // Logging. + config_logger->debug("Resolving a service dependency."); + // Find service dependency. + size_t key = configuration::servicedependency_key(obj); + servicedependency_mmap::iterator it = + engine::servicedependency::servicedependencies_find( + {obj.dependent_hosts().data(0), + obj.dependent_service_description().data(0), key}); + if (engine::servicedependency::servicedependencies.end() == it) + throw engine_error() << "Cannot resolve non-existing service dependency"; + + // Resolve service dependency. + it->second->resolve(err.config_warnings, err.config_errors); +} +#endif + +#ifdef LEGACY_CONF /** * Expand services. * @@ -342,3 +591,64 @@ void applier::servicedependency::_expand_services( expanded.insert(*it_member); } } +#else +/** + * Expand services. + * + * @param[in] hst Hosts. + * @param[in] hg Host groups. + * @param[in] svc Service descriptions. + * @param[in] sg Service groups. + * @param[in,out] s Configuration state. + * @param[out] expanded Expanded services. + */ +void applier::servicedependency::_expand_services( + const ::google::protobuf::RepeatedPtrField& hst, + const ::google::protobuf::RepeatedPtrField& hg, + const ::google::protobuf::RepeatedPtrField& svc, + const ::google::protobuf::RepeatedPtrField& sg, + configuration::State& s, + absl::flat_hash_set>& expanded) { + // Expanded hosts. + absl::flat_hash_set all_hosts; + + // Base hosts. + all_hosts.insert(hst.begin(), hst.end()); + + // Host groups. + for (auto& hgn : hg) { + // Find host group + auto found = std::find_if( + s.hostgroups().begin(), s.hostgroups().end(), + [&hgn](const Hostgroup& hgg) { return hgg.hostgroup_name() == hgn; }); + if (found == s.hostgroups().end()) + throw engine_error() << fmt::format("Could not resolve host group '{}'", + hgn); + // Add host group members. + all_hosts.insert(found->members().data().begin(), + found->members().data().end()); + } + + // Hosts * services. + for (auto& h : all_hosts) + for (auto& s : svc) + expanded.insert({h, s}); + + // Service groups. + for (auto& sgn : sg) { + // Find service group. + auto found = + std::find_if(s.servicegroups().begin(), s.servicegroups().end(), + [&sgn](const Servicegroup& sgg) { + return sgg.servicegroup_name() == sgn; + }); + if (found == s.servicegroups().end()) + throw engine_error() << fmt::format( + "Coulx not resolve service group '{}'", sgn); + + // Add service group members. + for (auto& m : found->members().data()) + expanded.insert({m.first(), m.second()}); + } +} +#endif diff --git a/engine/src/configuration/applier/serviceescalation.cc b/engine/src/configuration/applier/serviceescalation.cc index 92aa9062a5f..8321eddd772 100644 --- a/engine/src/configuration/applier/serviceescalation.cc +++ b/engine/src/configuration/applier/serviceescalation.cc @@ -23,10 +23,13 @@ #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/logging/logger.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/serviceescalation.hh" +#endif using namespace com::centreon::engine::configuration; +#ifdef LEGACY_CONF /** * Add new service escalation. * @@ -91,7 +94,67 @@ void applier::serviceescalation::add_object( it != end; ++it) se->get_contactgroups().insert({*it, nullptr}); } +#else +/** + * Add new service escalation. + * + * @param[in] obj The new service escalation to add into the + * monitoring engine. + */ +void applier::serviceescalation::add_object( + const configuration::Serviceescalation& obj) { + // Check service escalation. + if (obj.hosts().data().size() != 1 || !obj.hostgroups().data().empty() || + obj.service_description().data().size() != 1 || + !obj.servicegroups().data().empty()) { + throw engine_error() << "Could not create service escalation with multiple " + "hosts / host groups / services / service groups: " + << obj.DebugString(); + } + + // Logging. + config_logger->debug("Creating new escalation for service '{}' of host '{}'", + obj.service_description().data()[0], + obj.hosts().data()[0]); + + // Add escalation to the global configuration set. + auto* se_cfg = pb_config.add_serviceescalations(); + se_cfg->CopyFrom(obj); + + size_t key = configuration::serviceescalation_key(obj); + + // Create service escalation. + auto se = std::make_shared( + obj.hosts().data()[0], obj.service_description().data()[0], + obj.first_notification(), obj.last_notification(), + obj.notification_interval(), obj.escalation_period(), + ((obj.escalation_options() & action_se_warning) ? notifier::warning + : notifier::none) | + ((obj.escalation_options() & action_se_unknown) ? notifier::unknown + : notifier::none) | + ((obj.escalation_options() & action_se_critical) ? notifier::critical + : notifier::none) | + ((obj.escalation_options() & action_se_recovery) ? notifier::ok + : notifier::none), + key); + + // Add new items to the global list. + engine::serviceescalation::serviceescalations.insert( + {{se->get_hostname(), se->get_description()}, se}); + + // Notify event broker. + timeval tv{get_broker_timestamp(nullptr)}; + broker_adaptive_escalation_data(NEBTYPE_SERVICEESCALATION_ADD, NEBFLAG_NONE, + NEBATTR_NONE, se.get(), &tv); + + // Add contact groups to service escalation. + for (auto& cg : obj.contactgroups().data()) { + se->get_contactgroups().insert({cg, nullptr}); + } +} +#endif +#ifdef LEGACY_CONF /** * Expand all service escalations. * @@ -136,7 +199,78 @@ void applier::serviceescalation::expand_objects(configuration::state& s) { // Set expanded service escalations in configuration state. s.serviceescalations().swap(expanded); } +#else +/** + * Expand all service escalations. + * + * @param[in,out] s Configuration being applied. + */ +void applier::serviceescalation::expand_objects(configuration::State& s) { + std::list> resolved; + // Browse all escalations. + config_logger->debug("Expanding service escalations"); + + for (auto& se : *s.mutable_serviceescalations()) { + /* A set of all the hosts related to this escalation */ + absl::flat_hash_set host_names; + for (auto& hname : se.hosts().data()) + host_names.insert(hname); + if (se.hostgroups().data().size() > 0) { + for (auto& hg_name : se.hostgroups().data()) { + auto found_hg = + std::find_if(s.hostgroups().begin(), s.hostgroups().end(), + [&hg_name](const Hostgroup& hg) { + return hg.hostgroup_name() == hg_name; + }); + if (found_hg != s.hostgroups().end()) { + for (auto& h : found_hg->members().data()) + host_names.emplace(h); + } else + throw engine_error() << fmt::format( + "Could not expand non-existing host group '{}'", hg_name); + } + } + + /* A set of all the pairs (hostname, service-description) impacted by this + * escalation. */ + absl::flat_hash_set> expanded; + for (auto& hn : host_names) { + for (auto& sn : se.service_description().data()) + expanded.emplace(hn, sn); + } + + for (auto& sg_name : se.servicegroups().data()) { + auto found = + std::find_if(s.servicegroups().begin(), s.servicegroups().end(), + [&sg_name](const Servicegroup& sg) { + return sg.servicegroup_name() == sg_name; + }); + if (found == s.servicegroups().end()) + throw engine_error() + << fmt::format("Could not resolve service group '{}'", sg_name); + + for (auto& m : found->members().data()) + expanded.emplace(m.first(), m.second()); + } + se.mutable_hostgroups()->clear_data(); + se.mutable_hosts()->clear_data(); + se.mutable_servicegroups()->clear_data(); + se.mutable_service_description()->clear_data(); + for (auto& p : expanded) { + resolved.emplace_back(std::make_unique()); + auto& e = resolved.back(); + e->CopyFrom(se); + fill_string_group(e->mutable_hosts(), p.first); + fill_string_group(e->mutable_service_description(), p.second); + } + } + s.clear_serviceescalations(); + for (auto& e : resolved) + s.mutable_serviceescalations()->AddAllocated(e.release()); +} +#endif +#ifdef LEGACY_CONF /** * @brief Modify service escalation. * @@ -154,7 +288,26 @@ void applier::serviceescalation::modify_object( << "or removed, this is likely a software bug that you should " << "report to Centreon Engine developers"; } +#else +/** + * @brief Modify service escalation. + * + * Service escalations cannot be defined with anything else than their + * full content. Therefore no modification can occur. + * + * @param[in] obj Unused. + */ +void applier::serviceescalation::modify_object( + configuration::Serviceescalation* old_obj [[maybe_unused]], + const configuration::Serviceescalation& new_obj [[maybe_unused]]) { + throw engine_error() + << "Could not modify a service escalation: service escalation objects " + "can only be added or removed, this is likely a software bug that you " + "should report to Centreon Engine developers"; +} +#endif +#ifdef LEGACY_CONF /** * Remove old service escalation. * @@ -232,7 +385,75 @@ void applier::serviceescalation::remove_object( /* And we clear the configuration */ config->serviceescalations().erase(obj); } +#else +/** + * Remove old service escalation. + * + * @param[in] obj The service escalation to remove from the monitoring + * engine. + */ +void applier::serviceescalation::remove_object(ssize_t idx) { + // Logging. + config_logger->debug("Removing a service escalation."); + + configuration::Serviceescalation& obj = + pb_config.mutable_serviceescalations()->at(idx); + // Find service escalation. + const std::string& host_name{obj.hosts().data()[0]}; + const std::string& description{obj.service_description().data()[0]}; + /* Let's get a range of escalations for the concerned service */ + auto range{engine::serviceescalation::serviceescalations.equal_range( + {host_name, description})}; + bool service_exists; + + /* Let's get the service... */ + service_map::iterator sit{ + engine::service::services.find({host_name, description})}; + /* ... and its escalations */ + if (sit == engine::service::services.end()) { + config_logger->debug("Cannot find service '{}/{}' - already removed.", + host_name, description); + service_exists = false; + } else + service_exists = true; + + size_t key = serviceescalation_key(obj); + for (serviceescalation_mmap::iterator it = range.first, end = range.second; + it != end; ++it) { + if (it->second->internal_key() == key) { + // We have the serviceescalation to remove. + + // Notify event broker. + timeval tv(get_broker_timestamp(nullptr)); + broker_adaptive_escalation_data(NEBTYPE_SERVICEESCALATION_DELETE, + NEBFLAG_NONE, NEBATTR_NONE, + it->second.get(), &tv); + + if (service_exists) { + config_logger->debug( + "Service '{}/{}' found - removing escalation from it.", host_name, + description); + std::list& srv_escalations = + sit->second->get_escalations(); + /* We need also to remove the escalation from the service */ + srv_escalations.remove_if( + [my_escal = it->second.get()](const escalation* e) { + return e == my_escal; + }); + } + // Remove escalation from the global configuration set. + engine::serviceescalation::serviceescalations.erase(it); + break; + } + } + + /* And we clear the configuration */ + pb_config.mutable_serviceescalations()->DeleteSubrange(idx, 1); +} +#endif + +#ifdef LEGACY_CONF /** * Resolve a serviceescalation. * @@ -268,7 +489,42 @@ void applier::serviceescalation::resolve_object( if (!found) throw engine_error() << "Cannot resolve non-existing service escalation"; } +#else +/** + * Resolve a serviceescalation. + * + * @param[in] obj Serviceescalation object. + */ +void applier::serviceescalation::resolve_object( + const configuration::Serviceescalation& obj, + error_cnt& err) { + // Logging. + config_logger->debug("Resolving a service escalation."); + + // Find service escalation + bool found = false; + const std::string& hostname{obj.hosts().data(0)}; + const std::string& desc{obj.service_description().data(0)}; + auto p = engine::serviceescalation::serviceescalations.equal_range( + {hostname, desc}); + if (p.first == p.second) + throw engine_error() << "Cannot find service escalations concerning host '" + << hostname << "' and service '" << desc << "'"; + size_t key = configuration::serviceescalation_key(obj); + for (serviceescalation_mmap::iterator it = p.first; it != p.second; ++it) { + if (it->second->internal_key() == key) { + found = true; + // Resolve service escalation. + it->second->resolve(err.config_warnings, err.config_errors); + break; + } + } + if (!found) + throw engine_error() << "Cannot resolve non-existing service escalation"; +} +#endif +#ifdef LEGACY_CONF /** * Expand services. * @@ -329,7 +585,9 @@ void applier::serviceescalation::_expand_services( expanded.insert(*it_member); } } +#endif +#ifdef LEGACY_CONF /** * Inherits special variables from the service. * @@ -361,3 +619,4 @@ void applier::serviceescalation::_inherits_special_vars( obj.escalation_period(it->notification_period()); } } +#endif diff --git a/engine/src/configuration/applier/servicegroup.cc b/engine/src/configuration/applier/servicegroup.cc index 8f8e42bf855..53075d9375c 100644 --- a/engine/src/configuration/applier/servicegroup.cc +++ b/engine/src/configuration/applier/servicegroup.cc @@ -59,6 +59,7 @@ applier::servicegroup& applier::servicegroup::operator=( return (*this); } +#ifdef LEGACY_CONF /** * Add new servicegroup. * @@ -95,7 +96,42 @@ void applier::servicegroup::add_object(configuration::servicegroup const& obj) { // Notify event broker. broker_group(NEBTYPE_SERVICEGROUP_ADD, sg.get()); } +#else +/** + * @brief Add a new Service group given as a Protobuf object. + * + * @param obj The new service group to add into the monitoring engine. + */ +void applier::servicegroup::add_object(const configuration::Servicegroup& obj) { + // Logging. + config_logger->debug("Creating new servicegroup '{}'", + obj.servicegroup_name()); + + // Add service group to the global configuration set. + auto* new_obj = pb_config.add_servicegroups(); + new_obj->CopyFrom(obj); + + // Create servicegroup. + auto sg = std::make_shared( + obj.servicegroup_id(), obj.servicegroup_name(), obj.alias(), obj.notes(), + obj.notes_url(), obj.action_url()); + + // Add new items to the list. + engine::servicegroup::servicegroups.insert({sg->get_group_name(), sg}); + + // Add servicegroup id to the other props. + sg->set_id(obj.servicegroup_id()); + + // Notify event broker. + broker_group(NEBTYPE_SERVICEGROUP_ADD, sg.get()); + + // Apply resolved services on servicegroup. + for (auto& m : obj.members().data()) + sg->members[{m.first(), m.second()}] = nullptr; +} +#endif +#ifdef LEGACY_CONF /** * Expand all service groups. * @@ -116,7 +152,37 @@ void applier::servicegroup::expand_objects(configuration::state& s) { it != end; ++it) s.servicegroups().insert(it->second); } +#else +/** + * Expand all service groups. + * + * @param[in,out] s State being applied. + */ +void applier::servicegroup::expand_objects(configuration::State& s) { + // This set stores resolved service groups. + absl::flat_hash_set resolved; + + // Here, we store each Servicegroup pointer by its name. + absl::flat_hash_map + sg_by_name; + for (auto& sg_conf : *s.mutable_servicegroups()) + sg_by_name[sg_conf.servicegroup_name()] = &sg_conf; + + // Each servicegroup can contain servicegroups, that is to mean the services + // in the sub servicegroups are also in our servicegroup. + // So, we iterate through all the servicegroups defined in the configuration, + // and for each one if it has servicegroup members, we fill its service + // members with theirs and then we clear the servicegroup members. At that + // step, a servicegroup is considered as resolved. + for (auto& sg_conf : *s.mutable_servicegroups()) { + if (!resolved.contains(sg_conf.servicegroup_name())) { + _resolve_members(s, &sg_conf, resolved, sg_by_name); + } + } +} +#endif +#ifdef LEGACY_CONF /** * Modify servicegroup. * @@ -179,7 +245,62 @@ void applier::servicegroup::modify_object( // Notify event broker. broker_group(NEBTYPE_SERVICEGROUP_UPDATE, sg); } +#else +/** + * Modify servicegroup. + * + * @param[in] obj The new servicegroup to modify into the monitoring + * engine. + */ +void applier::servicegroup::modify_object( + configuration::Servicegroup* to_modify, + const configuration::Servicegroup& new_object) { + // Logging. + config_logger->debug("Modifying servicegroup '{}'", + to_modify->servicegroup_name()); + + // Find service group object. + servicegroup_map::iterator it_obj = + engine::servicegroup::servicegroups.find(to_modify->servicegroup_name()); + + if (it_obj == engine::servicegroup::servicegroups.end()) + throw engine_error() << fmt::format( + "Could not modify non-existing service group object '{}'", + to_modify->servicegroup_name()); + engine::servicegroup* sg = it_obj->second.get(); + + // Modify properties. + sg->set_id(new_object.servicegroup_id()); + sg->set_action_url(new_object.action_url()); + sg->set_alias(new_object.alias().empty() ? new_object.servicegroup_name() + : new_object.alias()); + sg->set_notes(new_object.notes()); + sg->set_notes_url(new_object.notes_url()); + // Were members modified ? + if (!MessageDifferencer::Equals(new_object.members(), to_modify->members())) { + // Delete all old service group members. + for (service_map_unsafe::iterator it = it_obj->second->members.begin(), + end = it_obj->second->members.end(); + it != end; ++it) { + broker_group_member(NEBTYPE_SERVICEGROUPMEMBER_DELETE, it->second, sg); + } + it_obj->second->members.clear(); + + // Create new service group members. + for (auto& m : new_object.members().data()) + sg->members[{m.first(), m.second()}] = nullptr; + } + + // Update the global configuration set. + to_modify->CopyFrom(new_object); + + // Notify event broker. + broker_group(NEBTYPE_SERVICEGROUP_UPDATE, sg); +} +#endif + +#ifdef LEGACY_CONF /** * Remove old servicegroup. * @@ -207,7 +328,34 @@ void applier::servicegroup::remove_object( // Remove service group from the global configuration state. config->servicegroups().erase(obj); } +#else +/** + * Remove old servicegroup. + * + * @param[in] idw Index of the servicegroup to remove in the configuration. + */ +void applier::servicegroup::remove_object(ssize_t idx) { + // Logging. + auto obj = pb_config.servicegroups(idx); + config_logger->debug("Removing servicegroup '{}'", obj.servicegroup_name()); + // Find service group. + servicegroup_map::iterator it = + engine::servicegroup::servicegroups.find(obj.servicegroup_name()); + if (it != engine::servicegroup::servicegroups.end()) { + // Notify event broker. + broker_group(NEBTYPE_SERVICEGROUP_DELETE, it->second.get()); + + // Remove service dependency from its list. + engine::servicegroup::servicegroups.erase(it); + } + + // Remove service group from the global configuration state. + pb_config.mutable_servicegroups()->DeleteSubrange(idx, 1); +} +#endif + +#ifdef LEGACY_CONF /** * Resolve a servicegroup. * @@ -231,7 +379,27 @@ void applier::servicegroup::resolve_object( // Resolve service group. it->second->resolve(err.config_warnings, err.config_errors); } +#else +void applier::servicegroup::resolve_object( + const configuration::Servicegroup& obj, + error_cnt& err) { + // Logging. + config_logger->debug("Removing service group '{}'", obj.servicegroup_name()); + + // Find service group. + servicegroup_map::const_iterator it = + engine::servicegroup::servicegroups.find(obj.servicegroup_name()); + if (it == engine::servicegroup::servicegroups.end()) + throw engine_error() << fmt::format( + "Cannot resolve non-existing service group '{}'", + obj.servicegroup_name()); + + // Resolve service group. + it->second->resolve(err.config_warnings, err.config_errors); +} +#endif +#ifdef LEGACY_CONF /** * Resolve members of a service group. * @@ -279,3 +447,41 @@ void applier::servicegroup::_resolve_members( } } } +#else +/** + * @brief Resolve the servicegroup sg_conf, so for each of its servicegroup + * member, we get all its service members and copy them into sg_conf. + * Once this done, resolved is completed with the name of sg_conf. + * + * @param s The full configuration for this poller. + * @param sg_conf The servicegroup configuration to resolve. + * @param resolved The set of servicegroup configurations already resolved. + * @param sg_by_name A const table of servicegroup configurations indexed by + * their name. + */ +void applier::servicegroup::_resolve_members( + configuration::State& s, + configuration::Servicegroup* sg_conf, + absl::flat_hash_set& resolved, + const absl::flat_hash_map& + sg_by_name) { + for (auto& sgm : sg_conf->servicegroup_members().data()) { + configuration::Servicegroup* sgm_conf = + sg_by_name.at(std::string_view(sgm)); + if (sgm_conf == nullptr) + throw engine_error() << fmt::format( + "Could not add non-existing service group member '{}' to service " + "group '{}'", + sgm, sg_conf->servicegroup_name()); + if (!resolved.contains(sgm_conf->servicegroup_name())) + _resolve_members(s, sgm_conf, resolved, sg_by_name); + + for (auto& sm : sgm_conf->members().data()) { + fill_pair_string_group(sg_conf->mutable_members(), sm.first(), + sm.second()); + } + } + sg_conf->clear_servicegroup_members(); + resolved.emplace(sg_conf->servicegroup_name()); +} +#endif diff --git a/engine/src/configuration/applier/severity.cc b/engine/src/configuration/applier/severity.cc index fd3018fc8c2..20afd0ac748 100644 --- a/engine/src/configuration/applier/severity.cc +++ b/engine/src/configuration/applier/severity.cc @@ -18,18 +18,25 @@ */ #include "com/centreon/engine/configuration/applier/severity.hh" - #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/config.hh" #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/severity.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/severity.hh" +#else +#include +#endif using namespace com::centreon; using namespace com::centreon::engine; using namespace com::centreon::engine::configuration; +#ifndef LEGACY_CONF +using MessageDifferencer = ::google::protobuf::util::MessageDifferencer; +#endif +#ifdef LEGACY_CONF /** * Add new severity. * @@ -55,7 +62,36 @@ void applier::severity::add_object(const configuration::severity& obj) { broker_adaptive_severity_data(NEBTYPE_SEVERITY_ADD, sv.get()); } +#else +/** + * @brief Add new severity. + * + * @param obj The new severity to add into the monitoring engine. + */ +void applier::severity::add_object(const configuration::Severity& obj) { + // Logging. + config_logger->debug("Creating new severity ({}, {}).", obj.key().id(), + obj.key().type()); + + // Add severity to the global configuration set. + auto* new_sv = pb_config.add_severities(); + new_sv->CopyFrom(obj); + auto sv{std::make_shared(obj.key().id(), obj.level(), + obj.icon_id(), obj.severity_name(), + obj.key().type())}; + if (!sv) + throw engine_error() << fmt::format("Could not register severity ({},{})", + obj.key().id(), obj.key().type()); + + // Add new items to the configuration state. + engine::severity::severities.insert({{obj.key().id(), obj.key().type()}, sv}); + + broker_adaptive_severity_data(NEBTYPE_SEVERITY_ADD, sv.get()); +} +#endif + +#ifdef LEGACY_CONF /** * @brief Expand a contact. * @@ -65,7 +101,9 @@ void applier::severity::add_object(const configuration::severity& obj) { * @param[in,out] s Configuration state. */ void applier::severity::expand_objects(configuration::state&) {} +#endif +#ifdef LEGACY_CONF /** * Modify severity. * @@ -106,7 +144,53 @@ void applier::severity::modify_object(const configuration::severity& obj) { config_logger->debug("Severity ({}, {}) did not change", obj.key().first, obj.key().second); } +#else +/** + * @brief Modify severity. + * + * @param to_modify A pointer to the current configuration of a severity. + * @param new_object The new configuration to apply. + */ +void applier::severity::modify_object( + configuration::Severity* to_modify, + const configuration::Severity& new_object) { + // Logging. + config_logger->debug("Modifying severity ({}, {}).", new_object.key().id(), + new_object.key().type()); + + // Find severity object. + severity_map::iterator it_obj = engine::severity::severities.find( + {new_object.key().id(), new_object.key().type()}); + if (it_obj == engine::severity::severities.end()) + throw engine_error() << fmt::format( + "Could not modify non-existing severity object ({}, {})", + new_object.key().id(), new_object.key().type()); + engine::severity* s = it_obj->second.get(); + + // Update the global configuration set. + if (!MessageDifferencer::Equals(*to_modify, new_object)) { + if (to_modify->severity_name() != new_object.severity_name()) { + s->set_name(new_object.severity_name()); + to_modify->set_severity_name(new_object.severity_name()); + } + if (to_modify->level() != new_object.level()) { + s->set_level(new_object.level()); + to_modify->set_level(new_object.level()); + } + if (to_modify->icon_id() != new_object.icon_id()) { + s->set_icon_id(new_object.icon_id()); + to_modify->set_icon_id(new_object.icon_id()); + } + + // Notify event broker. + broker_adaptive_severity_data(NEBTYPE_SEVERITY_UPDATE, s); + } else + config_logger->debug("Severity ({}, {}) did not change", + new_object.key().id(), new_object.key().type()); +} +#endif +#ifdef LEGACY_CONF /** * Remove old severity. * @@ -132,7 +216,40 @@ void applier::severity::remove_object(const configuration::severity& obj) { // Remove severity from the global configuration set. config->mut_severities().erase(obj); } +#else +/** + * @brief Remove old severity at index idx. + * + * @param idx The index of the object to remove. + */ +void applier::severity::remove_object(ssize_t idx) { + const configuration::Severity& obj = pb_config.severities()[idx]; + + // Logging. + + config_logger->debug("Removing severity ({}, {}).", obj.key().id(), + obj.key().type()); + + // Find severity. + severity_map::iterator it = + engine::severity::severities.find({obj.key().id(), obj.key().type()}); + + if (it != engine::severity::severities.end()) { + engine::severity* sv = it->second.get(); + + // Notify event broker. + broker_adaptive_severity_data(NEBTYPE_SEVERITY_DELETE, sv); + + // Erase severity object (this will effectively delete the object). + engine::severity::severities.erase(it); + } + + // Remove severity from the global configuration set. + pb_config.mutable_severities()->DeleteSubrange(idx, 1); +} +#endif +#ifdef LEGACY_CONF /** * Resolve a severity. * @@ -146,3 +263,4 @@ void applier::severity::resolve_object(const configuration::severity& obj) { << fmt::format("({}, {})", obj.key().first, obj.key().second); } +#endif diff --git a/engine/src/configuration/applier/state.cc b/engine/src/configuration/applier/state.cc index 15453d1912e..8cfd81d0e0f 100644 --- a/engine/src/configuration/applier/state.cc +++ b/engine/src/configuration/applier/state.cc @@ -50,7 +50,9 @@ #include "com/centreon/engine/retention/applier/state.hh" #include "com/centreon/engine/version.hh" #include "com/centreon/engine/xsddefault.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/command.hh" +#endif #include "common/log_v2/log_v2.hh" using namespace com::centreon; @@ -62,6 +64,7 @@ using com::centreon::engine::logging::broker_sink_mt; static bool has_already_been_loaded(false); +#ifdef LEGACY_CONF /** * Apply new configuration. * @@ -96,6 +99,38 @@ void applier::state::apply(configuration::state& new_cfg, } } } +#else +/** + * Apply new protobuf configuration. + * + * @param[in] new_cfg The new protobuf configuration. + * @param[in] state The retention to use. + */ +void applier::state::apply(configuration::State& new_cfg, + error_cnt& err, + retention::state* state) { + configuration::State save; + save.CopyFrom(pb_config); + try { + _processing_state = state_ready; + _processing(new_cfg, err, state); + } catch (const std::exception& e) { + // If is the first time to load configuration, we don't + // have a valid configuration to restore. + if (!has_already_been_loaded) + throw; + + // If is not the first time, we can restore the old one. + config_logger->error("Cannot apply new configuration: {}", e.what()); + + // Check if we need to restore old configuration. + if (_processing_state == state_error) { + config_logger->debug("configuration: try to restore old configuration"); + _processing(save, err, state); + } + } +} +#endif /** * Get the singleton instance of state applier. @@ -133,13 +168,21 @@ void applier::state::clear() { applier::logging::instance().clear(); _processing_state = state_ready; +#ifdef LEGACY_CONF _config = nullptr; +#endif } /** * Default constructor. */ -applier::state::state() : _config(nullptr), _processing_state(state_ready) {} +applier::state::state() + : +#ifdef LEGACY_CONF + _config(nullptr), +#endif + _processing_state(state_ready) { +} /** * Destructor. @@ -202,6 +245,7 @@ void applier::state::unlock() { _apply_lock.unlock(); } +#ifdef LEGACY_CONF /* * Update all new globals. * @@ -378,6 +422,7 @@ void applier::state::_apply(configuration::state const& new_cfg, config->log_level_comments(new_cfg.log_level_comments()); config->log_level_macros(new_cfg.log_level_macros()); config->log_level_otl(new_cfg.log_level_otl()); + config->log_level_runtime(new_cfg.log_level_runtime()); config->use_true_regexp_matching(new_cfg.use_true_regexp_matching()); config->use_send_recovery_notifications_anyways( new_cfg.use_send_recovery_notifications_anyways()); @@ -487,7 +532,295 @@ void applier::state::_apply(configuration::state const& new_cfg, ochp_command_ptr = found->second.get(); } } +#else +/* + * Update all new globals. + * + * @param[in] new_cfg The new configuration state. + */ +void applier::state::_apply(const configuration::State& new_cfg, + error_cnt& err) { + // Check variables should not be change after the first execution. + if (has_already_been_loaded) { + if (!std::equal( + pb_config.broker_module().begin(), pb_config.broker_module().end(), + new_cfg.broker_module().begin(), new_cfg.broker_module().end())) { + config_logger->warn( + "Warning: Broker modules cannot be changed nor reloaded"); + ++err.config_warnings; + } + if (pb_config.broker_module_directory() != + new_cfg.broker_module_directory()) { + config_logger->warn("Warning: Broker module directory cannot be changed"); + ++err.config_warnings; + } + if (pb_config.command_file() != new_cfg.command_file()) { + config_logger->warn("Warning: Command file cannot be changed"); + ++err.config_warnings; + } + if (pb_config.external_command_buffer_slots() != + new_cfg.external_command_buffer_slots()) { + config_logger->warn( + "Warning: External command buffer slots cannot be changed"); + ++err.config_warnings; + } + if (pb_config.use_timezone() != new_cfg.use_timezone()) { + config_logger->warn("Warning: Timezone can not be changed"); + ++err.config_warnings; + } + } + + // Initialize status file. + bool modify_status(false); + if (!has_already_been_loaded || + pb_config.status_file() != new_cfg.status_file()) + modify_status = true; + + // Cleanup. + // if (modify_perfdata) + // xpddefault_cleanup_performance_data(); + if (modify_status) + xsddefault_cleanup_status_data(true); + + // Set new values. + pb_config.set_accept_passive_host_checks( + new_cfg.accept_passive_host_checks()); + pb_config.set_accept_passive_service_checks( + new_cfg.accept_passive_service_checks()); + pb_config.set_additional_freshness_latency( + new_cfg.additional_freshness_latency()); + pb_config.set_admin_email(new_cfg.admin_email()); + pb_config.set_admin_pager(new_cfg.admin_pager()); + pb_config.set_allow_empty_hostgroup_assignment( + new_cfg.allow_empty_hostgroup_assignment()); + pb_config.set_auto_reschedule_checks(new_cfg.auto_reschedule_checks()); + pb_config.set_auto_rescheduling_interval( + new_cfg.auto_rescheduling_interval()); + pb_config.set_auto_rescheduling_window(new_cfg.auto_rescheduling_window()); + pb_config.set_cached_host_check_horizon(new_cfg.cached_host_check_horizon()); + pb_config.set_cached_service_check_horizon( + new_cfg.cached_service_check_horizon()); + pb_config.set_cfg_main(new_cfg.cfg_main()); + pb_config.set_check_external_commands(new_cfg.check_external_commands()); + pb_config.set_check_host_freshness(new_cfg.check_host_freshness()); + pb_config.set_check_orphaned_hosts(new_cfg.check_orphaned_hosts()); + pb_config.set_check_orphaned_services(new_cfg.check_orphaned_services()); + pb_config.set_check_reaper_interval(new_cfg.check_reaper_interval()); + pb_config.set_check_service_freshness(new_cfg.check_service_freshness()); + pb_config.set_command_check_interval(new_cfg.command_check_interval()); + pb_config.set_command_check_interval_is_seconds( + new_cfg.command_check_interval_is_seconds()); + pb_config.set_date_format(new_cfg.date_format()); + pb_config.set_debug_file(new_cfg.debug_file()); + pb_config.set_debug_level(new_cfg.debug_level()); + pb_config.set_debug_verbosity(new_cfg.debug_verbosity()); + pb_config.set_enable_environment_macros(new_cfg.enable_environment_macros()); + pb_config.set_enable_event_handlers(new_cfg.enable_event_handlers()); + pb_config.set_enable_flap_detection(new_cfg.enable_flap_detection()); + pb_config.set_enable_notifications(new_cfg.enable_notifications()); + pb_config.set_enable_predictive_host_dependency_checks( + new_cfg.enable_predictive_host_dependency_checks()); + pb_config.set_enable_predictive_service_dependency_checks( + new_cfg.enable_predictive_service_dependency_checks()); + pb_config.set_event_broker_options(new_cfg.event_broker_options()); + pb_config.set_event_handler_timeout(new_cfg.event_handler_timeout()); + pb_config.set_execute_host_checks(new_cfg.execute_host_checks()); + pb_config.set_execute_service_checks(new_cfg.execute_service_checks()); + pb_config.set_global_host_event_handler(new_cfg.global_host_event_handler()); + pb_config.set_global_service_event_handler( + new_cfg.global_service_event_handler()); + pb_config.set_high_host_flap_threshold(new_cfg.high_host_flap_threshold()); + pb_config.set_high_service_flap_threshold( + new_cfg.high_service_flap_threshold()); + pb_config.set_host_check_timeout(new_cfg.host_check_timeout()); + pb_config.set_host_freshness_check_interval( + new_cfg.host_freshness_check_interval()); + pb_config.mutable_host_inter_check_delay_method()->CopyFrom( + new_cfg.host_inter_check_delay_method()); + pb_config.set_illegal_object_chars(new_cfg.illegal_object_chars()); + pb_config.set_illegal_output_chars(new_cfg.illegal_output_chars()); + pb_config.set_interval_length(new_cfg.interval_length()); + pb_config.set_log_event_handlers(new_cfg.log_event_handlers()); + pb_config.set_log_external_commands(new_cfg.log_external_commands()); + pb_config.set_log_file(new_cfg.log_file()); + pb_config.set_log_host_retries(new_cfg.log_host_retries()); + pb_config.set_log_notifications(new_cfg.log_notifications()); + pb_config.set_log_passive_checks(new_cfg.log_passive_checks()); + pb_config.set_log_service_retries(new_cfg.log_service_retries()); + pb_config.set_low_host_flap_threshold(new_cfg.low_host_flap_threshold()); + pb_config.set_low_service_flap_threshold( + new_cfg.low_service_flap_threshold()); + pb_config.set_max_debug_file_size(new_cfg.max_debug_file_size()); + pb_config.set_max_host_check_spread(new_cfg.max_host_check_spread()); + pb_config.set_max_log_file_size(new_cfg.max_log_file_size()); + pb_config.set_max_parallel_service_checks( + new_cfg.max_parallel_service_checks()); + pb_config.set_max_service_check_spread(new_cfg.max_service_check_spread()); + pb_config.set_notification_timeout(new_cfg.notification_timeout()); + pb_config.set_obsess_over_hosts(new_cfg.obsess_over_hosts()); + pb_config.set_obsess_over_services(new_cfg.obsess_over_services()); + pb_config.set_ochp_command(new_cfg.ochp_command()); + pb_config.set_ochp_timeout(new_cfg.ochp_timeout()); + pb_config.set_ocsp_command(new_cfg.ocsp_command()); + pb_config.set_ocsp_timeout(new_cfg.ocsp_timeout()); + pb_config.set_perfdata_timeout(new_cfg.perfdata_timeout()); + pb_config.set_process_performance_data(new_cfg.process_performance_data()); + pb_config.mutable_resource_file()->CopyFrom(new_cfg.resource_file()); + pb_config.set_retain_state_information(new_cfg.retain_state_information()); + pb_config.set_retained_contact_host_attribute_mask( + new_cfg.retained_contact_host_attribute_mask()); + pb_config.set_retained_contact_service_attribute_mask( + new_cfg.retained_contact_service_attribute_mask()); + pb_config.set_retained_host_attribute_mask( + new_cfg.retained_host_attribute_mask()); + pb_config.set_retained_process_host_attribute_mask( + new_cfg.retained_process_host_attribute_mask()); + pb_config.set_retention_scheduling_horizon( + new_cfg.retention_scheduling_horizon()); + pb_config.set_retention_update_interval(new_cfg.retention_update_interval()); + pb_config.set_service_check_timeout(new_cfg.service_check_timeout()); + pb_config.set_service_freshness_check_interval( + new_cfg.service_freshness_check_interval()); + pb_config.mutable_service_inter_check_delay_method()->CopyFrom( + new_cfg.service_inter_check_delay_method()); + pb_config.mutable_service_interleave_factor_method()->CopyFrom( + new_cfg.service_interleave_factor_method()); + pb_config.set_sleep_time(new_cfg.sleep_time()); + pb_config.set_soft_state_dependencies(new_cfg.soft_state_dependencies()); + pb_config.set_state_retention_file(new_cfg.state_retention_file()); + pb_config.set_status_file(new_cfg.status_file()); + pb_config.set_status_update_interval(new_cfg.status_update_interval()); + pb_config.set_time_change_threshold(new_cfg.time_change_threshold()); + pb_config.set_use_large_installation_tweaks( + new_cfg.use_large_installation_tweaks()); + pb_config.set_instance_heartbeat_interval( + new_cfg.instance_heartbeat_interval()); + pb_config.set_use_regexp_matches(new_cfg.use_regexp_matches()); + pb_config.set_use_retained_program_state( + new_cfg.use_retained_program_state()); + pb_config.set_use_retained_scheduling_info( + new_cfg.use_retained_scheduling_info()); + pb_config.set_use_setpgid(new_cfg.use_setpgid()); + pb_config.set_use_syslog(new_cfg.use_syslog()); + pb_config.set_log_v2_enabled(new_cfg.log_v2_enabled()); + pb_config.set_log_legacy_enabled(new_cfg.log_legacy_enabled()); + pb_config.set_log_v2_logger(new_cfg.log_v2_logger()); + pb_config.set_log_level_functions(new_cfg.log_level_functions()); + pb_config.set_log_level_config(new_cfg.log_level_config()); + pb_config.set_log_level_events(new_cfg.log_level_events()); + pb_config.set_log_level_checks(new_cfg.log_level_checks()); + pb_config.set_log_level_notifications(new_cfg.log_level_notifications()); + pb_config.set_log_level_eventbroker(new_cfg.log_level_eventbroker()); + pb_config.set_log_level_external_command( + new_cfg.log_level_external_command()); + pb_config.set_log_level_commands(new_cfg.log_level_commands()); + pb_config.set_log_level_downtimes(new_cfg.log_level_downtimes()); + pb_config.set_log_level_comments(new_cfg.log_level_comments()); + pb_config.set_log_level_macros(new_cfg.log_level_macros()); + pb_config.set_log_level_otl(new_cfg.log_level_otl()); + pb_config.set_log_level_runtime(new_cfg.log_level_runtime()); + pb_config.set_use_true_regexp_matching(new_cfg.use_true_regexp_matching()); + pb_config.set_send_recovery_notifications_anyways( + new_cfg.send_recovery_notifications_anyways()); + pb_config.set_host_down_disable_service_checks( + new_cfg.host_down_disable_service_checks()); + pb_config.clear_user(); + for (auto& p : new_cfg.user()) + pb_config.mutable_user()->at(p.first) = p.second; + + // Set this variable just the first time. + if (!has_already_been_loaded) { + pb_config.mutable_broker_module()->CopyFrom(new_cfg.broker_module()); + pb_config.set_broker_module_directory(new_cfg.broker_module_directory()); + pb_config.set_command_file(new_cfg.command_file()); + pb_config.set_external_command_buffer_slots( + new_cfg.external_command_buffer_slots()); + pb_config.set_use_timezone(new_cfg.use_timezone()); + } + + // Initialize. + if (modify_status) + xsddefault_initialize_status_data(); + + // Check global event handler commands... + if (verify_config) { + events_logger->info("Checking global event handlers..."); + } + if (!pb_config.global_host_event_handler().empty()) { + // Check the event handler command. + std::string temp_command_name(pb_config.global_host_event_handler().substr( + 0, pb_config.global_host_event_handler().find_first_of('!'))); + command_map::iterator found{ + commands::command::commands.find(temp_command_name)}; + if (found == commands::command::commands.end() || !found->second) { + config_logger->error( + "Error: Global host event handler command '{}' is not defined " + "anywhere!", + temp_command_name); + ++err.config_errors; + global_host_event_handler_ptr = nullptr; + } else + global_host_event_handler_ptr = found->second.get(); + } + if (!pb_config.global_service_event_handler().empty()) { + // Check the event handler command. + std::string temp_command_name( + pb_config.global_service_event_handler().substr( + 0, pb_config.global_service_event_handler().find_first_of('!'))); + command_map::iterator found{ + commands::command::commands.find(temp_command_name)}; + if (found == commands::command::commands.end() || !found->second) { + config_logger->error( + "Error: Global service event handler command '{}' is not defined " + "anywhere!", + temp_command_name); + ++err.config_errors; + global_service_event_handler_ptr = nullptr; + } else + global_service_event_handler_ptr = found->second.get(); + } + + // Check obsessive processor commands... + if (verify_config) { + events_logger->info("Checking obsessive compulsive processor commands..."); + } + if (!pb_config.ocsp_command().empty()) { + std::string temp_command_name(pb_config.ocsp_command().substr( + 0, pb_config.ocsp_command().find_first_of('!'))); + command_map::iterator found{ + commands::command::commands.find(temp_command_name)}; + if (found == commands::command::commands.end() || !found->second) { + engine_logger(log_verification_error, basic) + << "Error: Obsessive compulsive service processor command '" + << temp_command_name << "' is not defined anywhere!"; + config_logger->error( + "Error: Obsessive compulsive service processor command '{}' is not " + "defined anywhere!", + temp_command_name); + ++err.config_errors; + ocsp_command_ptr = nullptr; + } else + ocsp_command_ptr = found->second.get(); + } + if (!pb_config.ochp_command().empty()) { + std::string temp_command_name(pb_config.ochp_command().substr( + 0, pb_config.ochp_command().find_first_of('!'))); + command_map::iterator found{ + commands::command::commands.find(temp_command_name)}; + if (found == commands::command::commands.end() || !found->second) { + config_logger->error( + "Error: Obsessive compulsive host processor command '{}' is not " + "defined anywhere!", + temp_command_name); + ++err.config_errors; + ochp_command_ptr = nullptr; + } else + ochp_command_ptr = found->second.get(); + } +} +#endif +#ifdef LEGACY_CONF /** * @brief Apply configuration of a specific object type. * @@ -500,9 +833,8 @@ void applier::state::_apply(configuration::state const& new_cfg, * @param[in] new_cfg New configuration set. */ template -void applier::state::_apply( - difference > const& diff, - error_cnt& err) { +void applier::state::_apply(difference> const& diff, + error_cnt& err) { // Type alias. typedef std::set cfg_set; @@ -564,6 +896,71 @@ void applier::state::_apply( } } } +#else +/** + * @brief Apply protobuf configuration of a specific object type. + * + * This method will perform a diff on cur_cfg and new_cfg to create the + * three element sets : added, modified and removed. The type applier + * will then be called to: + * * 1) modify existing objects (the modification must be done in first since + * remove and create changes indices). + * * 2) remove old objects + * * 3) create new objects + * + * @param[in] cur_cfg Current configuration set. + * @param[in] new_cfg New configuration set. + */ +template +void applier::state::_apply(const pb_difference& diff, + error_cnt& err) { + // Applier. + ApplierType aplyr; + + // Modify objects. + for (auto& p : diff.modified()) { + if (!verify_config) + aplyr.modify_object(p.first, *p.second); + else { + try { + aplyr.modify_object(p.first, *p.second); + } catch (const std::exception& e) { + ++err.config_errors; + events_logger->info(e.what()); + } + } + } + + // Erase objects. + for (auto it = diff.deleted().rbegin(); it != diff.deleted().rend(); ++it) { + ssize_t idx = it->first; + if (!verify_config) + aplyr.remove_object(idx); + else { + try { + aplyr.remove_object(idx); + } catch (const std::exception& e) { + ++err.config_errors; + events_logger->info(e.what()); + } + } + } + + // Add objects. + for (auto& obj : diff.added()) { + if (!verify_config) + aplyr.add_object(*obj); + else { + try { + aplyr.add_object(*obj); + } catch (const std::exception& e) { + ++err.config_errors; + events_logger->info(e.what()); + } + } + } +} +#endif #ifdef DEBUG_CONFIG /** @@ -1057,6 +1454,7 @@ void applier::state::_check_hosts() const { #endif +#ifdef LEGACY_CONF void applier::state::apply_log_config(configuration::state& new_cfg) { /* During the verification, loggers write to stdout. After this step, they * will log as it is written in their configurations. So if we check the @@ -1102,7 +1500,83 @@ void applier::state::apply_log_config(configuration::state& new_cfg) { log_cfg.set_level("macros", new_cfg.log_level_macros()); log_cfg.set_level("process", new_cfg.log_level_process()); log_cfg.set_level("runtime", new_cfg.log_level_runtime()); - log_cfg.set_level("otel", new_cfg.log_level_otl()); + log_cfg.set_level("otl", new_cfg.log_level_otl()); + if (has_already_been_loaded) + log_cfg.allow_only_atomic_changes(true); + log_v2::instance().apply(log_cfg); + } else { + if (!new_cfg.log_file().empty()) + log_type = log_v2_config::logger_type::LOGGER_FILE; + else + log_type = log_v2_config::logger_type::LOGGER_STDOUT; + log_v2_config log_cfg("centengine", log_type, new_cfg.log_flush_period(), + new_cfg.log_pid(), new_cfg.log_file_line()); + if (!new_cfg.log_file().empty()) { + log_cfg.set_log_path(new_cfg.log_file()); + log_cfg.set_max_size(new_cfg.max_log_file_size()); + } + log_v2::instance().apply(log_cfg); + log_v2::instance().disable( + {log_v2::CORE, log_v2::CONFIG, log_v2::PROCESS, log_v2::FUNCTIONS, + log_v2::EVENTS, log_v2::CHECKS, log_v2::NOTIFICATIONS, + log_v2::EVENTBROKER, log_v2::EXTERNAL_COMMAND, log_v2::COMMANDS, + log_v2::DOWNTIMES, log_v2::COMMENTS, log_v2::MACROS, log_v2::RUNTIME, + log_v2::OTL}); + } + init_loggers(); +} +#else +void applier::state::apply_log_config(configuration::State& new_cfg) { + /* During the verification, loggers write to stdout. After this step, they + * will log as it is written in their configurations. So if we check the + * configuration, we don't want to change them. */ + if (verify_config || test_scheduling) + return; + + using log_v2_config = com::centreon::common::log_v2::config; + log_v2_config::logger_type log_type; + if (new_cfg.log_v2_enabled()) { + if (new_cfg.log_v2_logger() == "file") { + if (!new_cfg.log_file().empty()) + log_type = log_v2_config::logger_type::LOGGER_FILE; + else + log_type = log_v2_config::logger_type::LOGGER_STDOUT; + } else + log_type = log_v2_config::logger_type::LOGGER_SYSLOG; + + log_v2_config log_cfg("centengine", log_type, new_cfg.log_flush_period(), + new_cfg.log_pid(), new_cfg.log_file_line()); + if (log_type == log_v2_config::logger_type::LOGGER_FILE) { + log_cfg.set_log_path(new_cfg.log_file()); + log_cfg.set_max_size(new_cfg.max_log_file_size()); + } + auto broker_sink = std::make_shared(); + broker_sink->set_level(spdlog::level::info); + log_cfg.add_custom_sink(broker_sink); + + log_cfg.apply_custom_sinks( + {"functions", "config", "events", "checks", "notifications", + "eventbroker", "external_command", "commands", "downtimes", "comments", + "macros", "otl", "process", "runtime"}); + log_cfg.set_level("functions", + LogLevel_Name(new_cfg.log_level_functions())); + log_cfg.set_level("config", LogLevel_Name(new_cfg.log_level_config())); + log_cfg.set_level("events", LogLevel_Name(new_cfg.log_level_events())); + log_cfg.set_level("checks", LogLevel_Name(new_cfg.log_level_checks())); + log_cfg.set_level("notifications", + LogLevel_Name(new_cfg.log_level_notifications())); + log_cfg.set_level("eventbroker", + LogLevel_Name(new_cfg.log_level_eventbroker())); + log_cfg.set_level("external_command", + LogLevel_Name(new_cfg.log_level_external_command())); + log_cfg.set_level("commands", LogLevel_Name(new_cfg.log_level_commands())); + log_cfg.set_level("downtimes", + LogLevel_Name(new_cfg.log_level_downtimes())); + log_cfg.set_level("comments", LogLevel_Name(new_cfg.log_level_comments())); + log_cfg.set_level("macros", LogLevel_Name(new_cfg.log_level_macros())); + log_cfg.set_level("otl", LogLevel_Name(new_cfg.log_level_otl())); + log_cfg.set_level("process", LogLevel_Name(new_cfg.log_level_process())); + log_cfg.set_level("runtime", LogLevel_Name(new_cfg.log_level_runtime())); if (has_already_been_loaded) log_cfg.allow_only_atomic_changes(true); log_v2::instance().apply(log_cfg); @@ -1122,11 +1596,14 @@ void applier::state::apply_log_config(configuration::state& new_cfg) { {log_v2::CORE, log_v2::CONFIG, log_v2::PROCESS, log_v2::FUNCTIONS, log_v2::EVENTS, log_v2::CHECKS, log_v2::NOTIFICATIONS, log_v2::EVENTBROKER, log_v2::EXTERNAL_COMMAND, log_v2::COMMANDS, - log_v2::DOWNTIMES, log_v2::COMMENTS, log_v2::MACROS, log_v2::RUNTIME}); + log_v2::DOWNTIMES, log_v2::COMMENTS, log_v2::MACROS, log_v2::RUNTIME, + log_v2::OTL}); } init_loggers(); } +#endif +#ifdef LEGACY_CONF /** * Apply retention. * @@ -1148,9 +1625,33 @@ void applier::state::_apply(configuration::state& new_cfg, } } } - +#else /** - * Expand objects. + * Apply retention. + * + * @param[in] new_cfg New configuration set. + * @param[in] state The retention state to use. + */ +void applier::state::_apply(configuration::State& new_cfg, + retention::state& state, + error_cnt& err) { + retention::applier::state app_state; + if (!verify_config) + app_state.apply(new_cfg, state); + else { + try { + app_state.apply(new_cfg, state); + } catch (std::exception const& e) { + ++err.config_errors; + std::cout << e.what(); + } + } +} +#endif + +#ifdef LEGACY_CONF +/** + * Expand objects. * * @param[in,out] new_state New configuration state. * @param[in,out] cfg Configuration objects. @@ -1168,7 +1669,29 @@ void applier::state::_expand(configuration::state& new_state, error_cnt& err) { throw; } } +#else +/** + * Expand objects. + * + * @param[in,out] new_state New configuration state. + * @param[in,out] cfg Configuration objects. + */ +template +void applier::state::_expand(configuration::State& new_state, error_cnt& err) { + ApplierType aplyr; + try { + aplyr.expand_objects(new_state); + } catch (std::exception const& e) { + if (verify_config) { + ++err.config_errors; + std::cout << e.what(); + } else + throw; + } +} +#endif +#ifdef LEGACY_CONF /** * Process new configuration and apply it. * @@ -1550,7 +2073,429 @@ void applier::state::_processing(configuration::state& new_cfg, has_already_been_loaded = true; _processing_state = state_ready; } +#else +/** + * Process new configuration and apply it. + * + * @param[in] new_cfg The new configuration. + * @param[in] state The retention to use. + */ +void applier::state::_processing(configuration::State& new_cfg, + error_cnt& err, + retention::state* state) { + // Timing. + struct timeval tv[5]; + + // Call prelauch broker event the first time to run applier state. + if (!has_already_been_loaded) + broker_program_state(NEBTYPE_PROCESS_PRELAUNCH, NEBFLAG_NONE); + + // + // Expand all objects. + // + gettimeofday(tv, nullptr); + // Expand timeperiods. + _expand(new_cfg, err); + + // Expand connectors. + _expand(new_cfg, err); + + // Expand commands. + _expand(new_cfg, err); + + // Expand contacts. + _expand(new_cfg, err); + + // Expand contactgroups. + _expand(new_cfg, err); + + // Expand hosts. + _expand(new_cfg, err); + + // Expand hostgroups. + _expand(new_cfg, err); + + // Expand services. + _expand(new_cfg, err); + + // Expand anomalydetections. + _expand(new_cfg, + err); + + // Expand servicegroups. + _expand(new_cfg, err); + + // Expand hostdependencies. + _expand(new_cfg, err); + + // Expand servicedependencies. + _expand(new_cfg, + err); + + // Expand hostescalations. + _expand(new_cfg, err); + + // Expand serviceescalations. + _expand(new_cfg, + err); + + // + // Build difference for all objects. + // + + // Build difference for timeperiods. + pb_difference diff_timeperiods; + google::protobuf::RepeatedPtrField< + ::com::centreon::engine::configuration::Timeperiod> + old = *pb_config.mutable_timeperiods(); + const google::protobuf::RepeatedPtrField< + ::com::centreon::engine::configuration::Timeperiod> + new_conf = new_cfg.timeperiods(); + diff_timeperiods.parse(old, new_conf, + &configuration::Timeperiod::timeperiod_name); + + // Build difference for connectors. + pb_difference diff_connectors; + diff_connectors.parse(*pb_config.mutable_connectors(), new_cfg.connectors(), + &configuration::Connector::connector_name); + + // Build difference for commands. + pb_difference diff_commands; + diff_commands.parse(*pb_config.mutable_commands(), new_cfg.commands(), + &configuration::Command::command_name); + + // Build difference for severities. + pb_difference> + diff_severities; + diff_severities.parse( + *pb_config.mutable_severities(), new_cfg.severities(), + [](const configuration::Severity& sev) -> std::pair { + return std::make_pair(sev.key().id(), sev.key().type()); + }); + + // Build difference for tags. + pb_difference> diff_tags; + diff_tags.parse( + *pb_config.mutable_tags(), new_cfg.tags(), + [](const configuration::Tag& tg) -> std::pair { + return std::make_pair(tg.key().id(), tg.key().type()); + }); + + // Build difference for contacts. + pb_difference diff_contacts; + diff_contacts.parse(*pb_config.mutable_contacts(), new_cfg.contacts(), + &configuration::Contact::contact_name); + + // Build difference for contactgroups. + pb_difference diff_contactgroups; + diff_contactgroups.parse(*pb_config.mutable_contactgroups(), + new_cfg.contactgroups(), + &configuration::Contactgroup::contactgroup_name); + + // Build difference for hosts. + pb_difference diff_hosts; + diff_hosts.parse(*pb_config.mutable_hosts(), new_cfg.hosts(), + &configuration::Host::host_id); + + // Build difference for hostgroups. + pb_difference diff_hostgroups; + diff_hostgroups.parse(*pb_config.mutable_hostgroups(), new_cfg.hostgroups(), + &configuration::Hostgroup::hostgroup_name); + + // Build difference for services. + pb_difference> + diff_services; + diff_services.parse(*pb_config.mutable_services(), new_cfg.services(), + [](const configuration::Service& s) { + return std::make_pair(s.host_id(), s.service_id()); + }); + + // Build difference for anomalydetections. + pb_difference> + diff_anomalydetections; + diff_anomalydetections.parse( + *pb_config.mutable_anomalydetections(), new_cfg.anomalydetections(), + [](const configuration::Anomalydetection& ad) { + return std::make_pair(ad.host_id(), ad.service_id()); + }); + + // Build difference for servicegroups. + pb_difference diff_servicegroups; + diff_servicegroups.parse(*pb_config.mutable_servicegroups(), + new_cfg.servicegroups(), + &configuration::Servicegroup::servicegroup_name); + + // Build difference for hostdependencies. + pb_difference diff_hostdependencies; + typedef size_t (*key_func)(const configuration::Hostdependency&); + diff_hostdependencies.parse(*pb_config.mutable_hostdependencies(), + new_cfg.hostdependencies(), + configuration::hostdependency_key); + + // Build difference for servicedependencies. + pb_difference + diff_servicedependencies; + typedef size_t (*key_func_sd)(const configuration::Servicedependency&); + diff_servicedependencies.parse( + *pb_config.mutable_servicedependencies(), new_cfg.servicedependencies(), + configuration::servicedependency_key); + + // Build difference for hostdependencies. + pb_difference diff_hostescalations; + typedef size_t (*key_func_he)(const configuration::Hostescalation&); + diff_hostescalations.parse(*pb_config.mutable_hostescalations(), + new_cfg.hostescalations(), + configuration::hostescalation_key); + + // Build difference for servicedependencies. + pb_difference + diff_serviceescalations; + typedef size_t (*key_func_se)(const configuration::Serviceescalation&); + diff_serviceescalations.parse( + *pb_config.mutable_serviceescalations(), new_cfg.serviceescalations(), + configuration::serviceescalation_key); + + // Timing. + gettimeofday(tv + 1, nullptr); + + try { + std::lock_guard locker(_apply_lock); + + // Apply logging configurations. + + applier::logging::instance().apply(new_cfg); + + apply_log_config(new_cfg); + + // Apply globals configurations. + applier::globals::instance().apply(new_cfg); + + // Apply macros configurations. + applier::macros::instance().apply(new_cfg); + + // Timing. + gettimeofday(tv + 2, nullptr); + + if (!has_already_been_loaded && !verify_config && !test_scheduling) { + // This must be logged after we read config data, + // as user may have changed location of main log file. + process_logger->info( + "Centreon Engine {} starting ... (PID={}) (Protobuf configuration)", + CENTREON_ENGINE_VERSION_STRING, getpid()); + + // Log the local time - may be different than clock + // time due to timezone offset. + process_logger->info("Local time is {}", string::ctime(program_start)); + process_logger->info("LOG VERSION: {}", LOG_VERSION_2); + } + + // + // Apply and resolve all objects. + // + + // Apply timeperiods. + _apply( + diff_timeperiods, err); + _resolve( + pb_config.timeperiods(), err); + + // Apply connectors. + _apply( + diff_connectors, err); + _resolve( + pb_config.connectors(), err); + + // Apply commands. + _apply(diff_commands, + err); + _resolve(pb_config.commands(), + err); + + // Apply contacts and contactgroups. + _apply(diff_contacts, + err); + _apply( + diff_contactgroups, err); + _resolve( + pb_config.contactgroups(), err); + _resolve(pb_config.contacts(), + err); + + // Apply severities. + _apply, + applier::severity>(diff_severities, err); + + // Apply tags. + _apply, applier::tag>( + diff_tags, err); + + // Apply hosts and hostgroups. + _apply(diff_hosts, err); + _apply( + diff_hostgroups, err); + + // Apply services. + _apply, + applier::service>(diff_services, err); + + // Apply anomalydetections. + _apply, + applier::anomalydetection>(diff_anomalydetections, err); + + // Apply servicegroups. + _apply( + diff_servicegroups, err); + + // Resolve hosts, services, host groups. + _resolve(pb_config.hosts(), err); + _resolve( + pb_config.hostgroups(), err); + + // Resolve services. + _resolve(pb_config.services(), + err); + + // Resolve anomalydetections. + _resolve( + pb_config.anomalydetections(), err); + + // Resolve service groups. + _resolve( + pb_config.servicegroups(), err); + + // Apply host dependencies. + _apply( + diff_hostdependencies, err); + _resolve( + pb_config.hostdependencies(), err); + + // Apply service dependencies. + _apply(diff_servicedependencies, err); + _resolve( + pb_config.servicedependencies(), err); + + // Apply host escalations. + _apply( + diff_hostescalations, err); + _resolve( + pb_config.hostescalations(), err); + + // Apply service escalations. + _apply(diff_serviceescalations, err); + _resolve( + pb_config.serviceescalations(), err); + +#ifdef DEBUG_CONFIG + std::cout << "WARNING!! You are using a version of Centreon Engine for " + "developers!!! This is not a production version."; + // Checks on configuration + _check_serviceescalations(); + _check_hostescalations(); + _check_contacts(); + _check_contactgroups(); + _check_services(); + _check_hosts(); +#endif + + // Load retention. + if (state) + _apply(new_cfg, *state, err); + + // Apply scheduler. + if (!verify_config) + applier::scheduler::instance().apply(new_cfg, diff_hosts, diff_services, + diff_anomalydetections); + + // Apply new global on the current state. + if (!verify_config) { + _apply(new_cfg, err); + whitelist::reload(); + } else { + try { + _apply(new_cfg, err); + } catch (std::exception const& e) { + ++err.config_errors; + events_logger->info(e.what()); + } + } + + // Timing. + gettimeofday(tv + 3, nullptr); + + // Check for circular paths between hosts. + pre_flight_circular_check(&err.config_warnings, &err.config_errors); + + // Call start broker event the first time to run applier state. + if (!has_already_been_loaded) { + neb_load_all_modules(); + + broker_program_state(NEBTYPE_PROCESS_START, NEBFLAG_NONE); + } else + neb_reload_all_modules(); + + // Print initial states of new hosts and services. + if (!verify_config && !test_scheduling) { + for (auto a : diff_hosts.added()) { + auto it_hst = engine::host::hosts_by_id.find(a->host_id()); + if (it_hst != engine::host::hosts_by_id.end()) + log_host_state(INITIAL_STATES, it_hst->second.get()); + } + for (auto a : diff_services.added()) { + auto it_svc = engine::service::services_by_id.find( + {a->host_id(), a->service_id()}); + if (it_svc != engine::service::services_by_id.end()) + log_service_state(INITIAL_STATES, it_svc->second.get()); + } + } + + // Timing. + gettimeofday(tv + 4, nullptr); + if (test_scheduling) { + double runtimes[5]; + runtimes[4] = 0.0; + for (unsigned int i(0); i < (sizeof(runtimes) / sizeof(*runtimes) - 1); + ++i) { + runtimes[i] = tv[i + 1].tv_sec - tv[i].tv_sec + + (tv[i + 1].tv_usec - tv[i].tv_usec) / 1000000.0; + runtimes[4] += runtimes[i]; + } + std::cout + << "\nTiming information on configuration verification is listed " + "below.\n\n" + "CONFIG VERIFICATION TIMES (* = Potential for speedup " + "with -x option)\n" + "----------------------------------\n" + "Template Resolutions: " + << runtimes[0] + << " sec\n" + "Object Relationships: " + << runtimes[2] + << " sec\n" + "Circular Paths: " + << runtimes[3] + << " sec *\n" + "Misc: " + << runtimes[1] + << " sec\n" + " ============\n" + "TOTAL: " + << runtimes[4] << " sec * = " << runtimes[3] << " sec (" + << (runtimes[3] * 100.0 / runtimes[4]) << "%) estimated savings\n"; + } + } catch (...) { + _processing_state = state_error; + throw; + } + + has_already_been_loaded = true; + _processing_state = state_ready; +} +#endif + +#ifdef LEGACY_CONF /** * Resolve objects. * @@ -1574,3 +2519,29 @@ void applier::state::_resolve(std::set& cfg, } } } +#else +/** + * @brief Resolve objects. + * + * @tparam ConfigurationType The protobuf object configuration. + * @tparam ApplierType The applier used to handle the configuration. + * @param cfg + */ +template +void applier::state::_resolve( + const ::google::protobuf::RepeatedPtrField& cfg, + error_cnt& err) { + ApplierType aplyr; + for (auto& obj : cfg) { + try { + aplyr.resolve_object(obj, err); + } catch (const std::exception& e) { + if (verify_config) { + ++err.config_errors; + std::cout << e.what() << std::endl; + } else + throw; + } + } +} +#endif diff --git a/engine/src/configuration/applier/tag.cc b/engine/src/configuration/applier/tag.cc index 3f82237998f..8bb50803e32 100644 --- a/engine/src/configuration/applier/tag.cc +++ b/engine/src/configuration/applier/tag.cc @@ -24,12 +24,16 @@ #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/tag.hh" +#include "gtest/gtest.h" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/tag.hh" +#endif using namespace com::centreon; using namespace com::centreon::engine; using namespace com::centreon::engine::configuration; +#ifdef LEGACY_CONF /** * Add new tag. * @@ -59,7 +63,43 @@ void applier::tag::add_object(const configuration::tag& obj) { broker_adaptive_tag_data(NEBTYPE_TAG_ADD, tg.get()); } +#else +/** + * @brief Add a new tag. + * + * @param obj The new protobuf configuration tag to add. + */ +void applier::tag::add_object(const configuration::Tag& obj) { + // Logging. + config_logger->debug("Creating new tag ({},{}).", obj.key().id(), + obj.key().type()); + + // Add tag to the global configuration set. + configuration::Tag* new_tg = pb_config.add_tags(); + new_tg->CopyFrom(obj); + + auto tg = std::make_shared( + new_tg->key().id(), + static_cast(new_tg->key().type()), + new_tg->tag_name()); + if (!tg) + throw engine_error() << fmt::format("Could not register tag ({},{})", + new_tg->key().id(), + new_tg->key().type()); + + // Add new items to the configuration state. + auto res = engine::tag::tags.insert( + {{new_tg->key().id(), new_tg->key().type()}, tg}); + if (!res.second) + config_logger->error( + "Could not insert tag ({},{}) into cache because it already exists", + new_tg->key().id(), new_tg->key().type()); + + broker_adaptive_tag_data(NEBTYPE_TAG_ADD, tg.get()); +} +#endif +#ifdef LEGACY_CONF /** * @brief Expand a contact. * @@ -69,7 +109,9 @@ void applier::tag::add_object(const configuration::tag& obj) { * @param[in,out] s Configuration state. */ void applier::tag::expand_objects(configuration::state&) {} +#endif +#ifdef LEGACY_CONF /** * Modify tag. * @@ -109,7 +151,43 @@ void applier::tag::modify_object(const configuration::tag& obj) { config_logger->debug("Tag ({},{}) did not change", obj.key().first, obj.key().second); } +#else +/** + * @brief Modify tag. + * + * @param obj The new tag protobuf configuration. + */ +void applier::tag::modify_object(configuration::Tag* to_modify, + const configuration::Tag& new_object) { + // Logging. + config_logger->debug("Modifying tag ({},{}).", to_modify->key().id(), + to_modify->key().type()); + + // Find tag object. + tag_map::iterator it_obj = + engine::tag::tags.find({new_object.key().id(), new_object.key().type()}); + if (it_obj == engine::tag::tags.end()) { + throw engine_error() << fmt::format( + "Could not modify non-existing tag object ({},{})", + new_object.key().id(), new_object.key().type()); + } + engine::tag* t = it_obj->second.get(); + + // Update the global configuration set. + if (to_modify->tag_name() != new_object.tag_name()) { + to_modify->set_tag_name(new_object.tag_name()); + t->set_name(new_object.tag_name()); + + // Notify event broker. + broker_adaptive_tag_data(NEBTYPE_TAG_UPDATE, t); + } else + config_logger->debug("Tag ({},{}) did not change", new_object.key().id(), + new_object.key().type()); +} +#endif + +#ifdef LEGACY_CONF /** * Remove old tag. * @@ -136,16 +214,65 @@ void applier::tag::remove_object(const configuration::tag& obj) { // Remove tag from the global configuration set. config->mut_tags().erase(obj); } +#else +/** + * @brief Remove old tag. + * + * @param idx The idx in the tags configuration objects to remove. + */ +void applier::tag::remove_object(ssize_t idx) { + const configuration::Tag& obj = pb_config.tags().at(idx); + + // Logging. + config_logger->debug("Removing tag ({},{}).", obj.key().id(), + obj.key().type()); + + // Find tag. + tag_map::iterator it = + engine::tag::tags.find({obj.key().id(), obj.key().type()}); + if (it != engine::tag::tags.end()) { + engine::tag* tg = it->second.get(); + + // Notify event broker. + broker_adaptive_tag_data(NEBTYPE_TAG_DELETE, tg); + + // Erase tag object (this will effectively delete the object). + engine::tag::tags.erase(it); + } + + // Remove tag from the global configuration set. + pb_config.mutable_tags()->DeleteSubrange(idx, 1); +} +#endif +#ifdef LEGACY_CONF /** * Resolve a tag. * * @param[in] obj Object to resolve. */ -void applier::tag::resolve_object(const configuration::tag& obj) { +void applier::tag::resolve_object(const configuration::tag& obj, + error_cnt& err) { tag_map::const_iterator tg_it{engine::tag::tags.find(obj.key())}; if (tg_it == engine::tag::tags.end() || !tg_it->second) { + err.config_errors++; throw engine_error() << "Cannot resolve non-existing tag (" << obj.key().first << "," << obj.key().second << ")"; } } +#else +/** + * Resolve a tag. + * + * @param[in] obj Object to resolve. + */ +void applier::tag::resolve_object(const configuration::Tag& obj, + error_cnt& err [[maybe_unused]]) { + tag_map::const_iterator tg_it{ + engine::tag::tags.find({obj.key().id(), obj.key().type()})}; + if (tg_it == engine::tag::tags.end() || !tg_it->second) { + throw engine_error() << "Cannot resolve non-existing tag (" + << obj.key().id() << "," << obj.key().type() << ")"; + } +} +#endif diff --git a/engine/src/configuration/applier/timeperiod.cc b/engine/src/configuration/applier/timeperiod.cc index 0b4d1a298d7..5dc2a9c843a 100644 --- a/engine/src/configuration/applier/timeperiod.cc +++ b/engine/src/configuration/applier/timeperiod.cc @@ -31,6 +31,7 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::configuration; +#ifdef LEGACY_CONF /** * Add new time period. * @@ -73,7 +74,39 @@ void applier::timeperiod::add_object(configuration::timeperiod const& obj) { } _add_exclusions(obj.exclude(), tp.get()); } +#else +/** + * @brief Add new time period. + * + * @param[in] obj The new time period to add in the monitoring engine. + */ +void applier::timeperiod::add_object(const configuration::Timeperiod& obj) { + // Logging. + config_logger->debug("Creating new time period '{}'.", obj.timeperiod_name()); + + if (obj.timeperiod_name().empty() || obj.alias().empty()) { + throw engine_error() << fmt::format( + "Could not register time period '{}' (alias '{}'): timeperiod name and " + "alias must not be empty", + obj.timeperiod_name(), obj.alias()); + } + + // Add time period to the global configuration set. + configuration::Timeperiod* c_tp = pb_config.add_timeperiods(); + c_tp->CopyFrom(obj); + + // Create time period. + auto tp = std::make_shared(obj); + engine::timeperiod::timeperiods.insert({obj.timeperiod_name(), tp}); + + // Notify event broker. + timeval tv(get_broker_timestamp(nullptr)); + broker_adaptive_timeperiod_data(NEBTYPE_TIMEPERIOD_ADD, NEBFLAG_NONE, + NEBATTR_NONE, tp.get(), CMD_NONE, &tv); +} +#endif +#ifdef LEGACY_CONF /** * @brief Expand time period. * @@ -85,7 +118,20 @@ void applier::timeperiod::add_object(configuration::timeperiod const& obj) { void applier::timeperiod::expand_objects(configuration::state& s) { (void)s; } +#else +/** + * @brief Expand time period. + * + * Time period objects do not need expansion. Therefore this method + * does nothing. + * + * @param[in] s Unused. + */ +void applier::timeperiod::expand_objects(configuration::State& s + [[maybe_unused]]) {} +#endif +#ifdef LEGACY_CONF /** * Modify time period. * @@ -159,7 +205,67 @@ void applier::timeperiod::modify_object(configuration::timeperiod const& obj) { broker_adaptive_timeperiod_data(NEBTYPE_TIMEPERIOD_UPDATE, NEBFLAG_NONE, NEBATTR_NONE, tp, CMD_NONE, &tv); } +#else +/** + * Modify time period. + * + * @param[in] obj The time period to modify in the monitoring engine. + */ +void applier::timeperiod::modify_object( + configuration::Timeperiod* to_modify, + const configuration::Timeperiod& new_obj) { + // Logging. + config_logger->debug("Modifying time period '{}'.", + to_modify->timeperiod_name()); + + // Find time period object. + timeperiod_map::iterator it_obj = + engine::timeperiod::timeperiods.find(to_modify->timeperiod_name()); + if (it_obj == engine::timeperiod::timeperiods.end() || !it_obj->second) + throw engine_error() << fmt::format( + "Could not modify non-existing time period object '{}'", + to_modify->timeperiod_name()); + engine::timeperiod* tp(it_obj->second.get()); + + // Modify properties. + if (to_modify->alias() != new_obj.alias()) { + tp->set_alias(new_obj.alias().empty() ? new_obj.timeperiod_name() + : new_obj.alias()); + to_modify->set_alias(new_obj.alias()); + } + + // Time ranges modified ? + if (!MessageDifferencer::Equals(to_modify->timeranges(), + new_obj.timeranges())) { + tp->set_days(new_obj.timeranges()); + to_modify->mutable_timeranges()->CopyFrom(new_obj.timeranges()); + } + + // Exceptions modified ? + if (!MessageDifferencer::Equals(to_modify->exceptions(), + new_obj.exceptions())) { + tp->set_exceptions(new_obj.exceptions()); + to_modify->mutable_exceptions()->CopyFrom(new_obj.exceptions()); + } + + // Exclusions modified ? + if (!MessageDifferencer::Equals(to_modify->exclude(), new_obj.exclude())) { + // Delete old exclusions. + tp->get_exclusions().clear(); + // Create new exclusions. + tp->set_exclusions(new_obj.exclude()); + to_modify->mutable_exclude()->CopyFrom(new_obj.exclude()); + } + + // Notify event broker. + timeval tv(get_broker_timestamp(nullptr)); + broker_adaptive_timeperiod_data(NEBTYPE_TIMEPERIOD_UPDATE, NEBFLAG_NONE, + NEBATTR_NONE, tp, CMD_NONE, &tv); +} +#endif + +#ifdef LEGACY_CONF /** * Remove old time period. * @@ -187,7 +293,32 @@ void applier::timeperiod::remove_object(configuration::timeperiod const& obj) { // Remove time period from the global configuration set. config->timeperiods().erase(obj); } +#else +void applier::timeperiod::remove_object(ssize_t idx) { + /* obj is the object to remove */ + auto& obj = pb_config.timeperiods()[idx]; + config_logger->debug("Removing time period '{}'.", obj.timeperiod_name()); + + // Find time period. + timeperiod_map::iterator it = + engine::timeperiod::timeperiods.find(obj.timeperiod_name()); + if (it != engine::timeperiod::timeperiods.end() && it->second) { + // Notify event broker. + timeval tv(get_broker_timestamp(nullptr)); + broker_adaptive_timeperiod_data(NEBTYPE_TIMEPERIOD_DELETE, NEBFLAG_NONE, + NEBATTR_NONE, it->second.get(), CMD_NONE, + &tv); + + // Erase time period (will effectively delete the object). + engine::timeperiod::timeperiods.erase(it); + } + // Remove time period from the global configuration set. + pb_config.mutable_timeperiods()->DeleteSubrange(idx, 1); +} +#endif + +#ifdef LEGACY_CONF /** * @brief Resolve a time period object. * @@ -212,7 +343,33 @@ void applier::timeperiod::resolve_object(configuration::timeperiod const& obj, // Resolve time period. it->second->resolve(err.config_warnings, err.config_errors); } +#else +/** + * @brief Resolve a time period object. + * + * This method does nothing because a time period object does not rely + * on any external object. + * + * @param[in] obj Unused. + */ +void applier::timeperiod::resolve_object(const configuration::Timeperiod& obj, + error_cnt& err) { + // Logging. + config_logger->debug("Resolving time period '{}'.", obj.timeperiod_name()); + + // Find time period. + timeperiod_map::iterator it = + engine::timeperiod::timeperiods.find(obj.timeperiod_name()); + if (engine::timeperiod::timeperiods.end() == it || !it->second) + throw engine_error() << "Cannot resolve non-existing " + << "time period '" << obj.timeperiod_name() << "'"; + + // Resolve time period. + it->second->resolve(err.config_warnings, err.config_errors); +} +#endif +#ifdef LEGACY_CONF /** * Add exclusions to a time period. * @@ -226,3 +383,4 @@ void applier::timeperiod::_add_exclusions( it != end; ++it) tp->get_exclusions().insert({*it, nullptr}); } +#endif diff --git a/engine/src/configuration/extended_conf.cc b/engine/src/configuration/extended_conf.cc index 8042d50cde9..1f11c3741a4 100644 --- a/engine/src/configuration/extended_conf.cc +++ b/engine/src/configuration/extended_conf.cc @@ -17,8 +17,13 @@ */ #include "com/centreon/engine/configuration/extended_conf.hh" +#include #include "com/centreon/exceptions/msg_fmt.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/state.hh" +#else +#include "common/engine_conf/state_helper.hh" +#endif #include "common/log_v2/log_v2.hh" using namespace com::centreon::engine::configuration; @@ -77,6 +82,7 @@ void extended_conf::reload() { } } +#ifdef LEGACY_CONF /** * @brief reload all optional configuration files if needed * Then these configuration content are applied to dest @@ -89,3 +95,36 @@ void extended_conf::update_state(state& dest) { dest.apply_extended_conf(conf_file->_path, conf_file->_content); } } +#else +/** + * @brief reload all optional configuration files if needed + * Then these configuration content are applied to dest + * + * @param dest + */ +void extended_conf::update_state(State* pb_config) { + for (auto& conf_file : _confs) { + conf_file->reload(); + std::ifstream f(conf_file->_path, std::ios::in); + std::string content; + if (f) { + f.seekg(0, std::ios::end); + content.resize(f.tellg()); + f.seekg(0, std::ios::beg); + f.read(&content[0], content.size()); + f.close(); + State new_conf; + google::protobuf::util::JsonParseOptions options; + options.ignore_unknown_fields = false; + options.case_insensitive_enum_parsing = true; + google::protobuf::util::JsonStringToMessage(content, &new_conf); + pb_config->MergeFrom(new_conf); + } else { + SPDLOG_LOGGER_ERROR( + conf_file->_logger, + "extended_conf::extended_conf : fail to read json content '{}': {}", + conf_file->_path, strerror(errno)); + } + } +} +#endif diff --git a/engine/src/configuration/whitelist.cc b/engine/src/configuration/whitelist.cc index 41e4bb6d0f2..cc7a0298019 100644 --- a/engine/src/configuration/whitelist.cc +++ b/engine/src/configuration/whitelist.cc @@ -1,4 +1,4 @@ -/* +/** * Copyright 2023 Centreon (https://www.centreon.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +16,6 @@ * For more information : contact@centreon.com * */ - #define C4_NO_DEBUG_BREAK 1 #include "com/centreon/engine/configuration/whitelist.hh" diff --git a/engine/src/contact.cc b/engine/src/contact.cc index 3873f6854e5..e05503d3883 100644 --- a/engine/src/contact.cc +++ b/engine/src/contact.cc @@ -73,6 +73,16 @@ std::vector const& contact::get_addresses() const { return _addresses; } +/** + * Set addresses. + * + * @param[in] addresses New addresses. + */ +void contact::set_addresses(std::vector&& addresses) { + _addresses = std::move(addresses); +} + +#ifdef LEGACY_CONF /** * Set addresses. * @@ -81,6 +91,7 @@ std::vector const& contact::get_addresses() const { void contact::set_addresses(std::vector const& addresses) { _addresses = addresses; } +#endif /** * Return the contact alias @@ -510,7 +521,7 @@ std::shared_ptr add_contact( std::string const& alias, std::string const& email, std::string const& pager, - std::array const& addresses, + std::vector&& addresses, std::string const& svc_notification_period, std::string const& host_notification_period, int notify_service_ok, @@ -537,8 +548,7 @@ std::shared_ptr add_contact( } // Check if the contact already exist. - std::string const& id(name); - if (contact::contacts.count(id)) { + if (contact::contacts.count(name)) { engine_logger(log_config_error, basic) << "Error: Contact '" << name << "' has already been defined"; config_logger->error("Error: Contact '{}' has already been defined", name); @@ -556,12 +566,8 @@ std::shared_ptr add_contact( obj->set_host_notification_period(host_notification_period); obj->set_pager(pager); obj->set_service_notification_period(svc_notification_period); - std::vector addr; - addr.resize(MAX_CONTACT_ADDRESSES); - for (unsigned int x(0); x < MAX_CONTACT_ADDRESSES; ++x) - addr[x] = addresses[x]; - obj->set_addresses(addr); + obj->set_addresses(std::move(addresses)); // Set remaining contact properties. obj->set_can_submit_commands(can_submit_commands > 0); diff --git a/engine/src/contactgroup.cc b/engine/src/contactgroup.cc index 1118b04e731..70827c4ec1a 100644 --- a/engine/src/contactgroup.cc +++ b/engine/src/contactgroup.cc @@ -16,7 +16,9 @@ * For more information : contact@centreon.com * */ +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/contactgroup.hh" +#endif #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/configuration/applier/state.hh" #include "com/centreon/engine/contact.hh" @@ -32,11 +34,7 @@ using namespace com::centreon::engine::logging; contactgroup_map contactgroup::contactgroups; -/** - * Constructor. - */ -contactgroup::contactgroup() {} - +#ifdef LEGACY_CONF /** * Constructor from a configuration contactgroup * @@ -52,6 +50,20 @@ contactgroup::contactgroup(configuration::contactgroup const& obj) // Notify event broker. broker_group(NEBTYPE_CONTACTGROUP_ADD, this); } +#else +/** + * Constructor from a protobuf configuration contactgroup + * + * @param obj Configuration contactgroup + */ +contactgroup::contactgroup(const configuration::Contactgroup& obj) + : _alias{obj.alias().empty() ? obj.contactgroup_name() : obj.alias()}, + _name{obj.contactgroup_name()} { + assert(!_name.empty()); + // Notify event broker. + broker_group(NEBTYPE_CONTACTGROUP_ADD, this); +} +#endif /** * Assignment operator. diff --git a/engine/src/daterange.cc b/engine/src/daterange.cc index 1afa0e99a95..566f51f2a4a 100644 --- a/engine/src/daterange.cc +++ b/engine/src/daterange.cc @@ -27,6 +27,7 @@ using namespace com::centreon::engine; +#ifdef LEGACY_CONF /** * Create a new exception to a timeperiod. * @@ -73,6 +74,56 @@ daterange::daterange(type_range type, for (auto& tr : timeranges) add_timerange({tr.range_start(), tr.range_end()}); } +#else +/** + * Create a new exception to a timeperiod. + * + * @param[in] period Base period. + * @param[in] type + * @param[in] syear + * @param[in] smon + * @param[in] smday + * @param[in] swday + * @param[in] swday_offset + * @param[in] eyear + * @param[in] emon + * @param[in] emday + * @param[in] ewday + * @param[in] ewday_offset + * @param[in] skip_interval + * @param[in] a list of timeranges. + */ +daterange::daterange( + type_range type, + int syear, + int smon, + int smday, + int swday, + int swday_offset, + int eyear, + int emon, + int emday, + int ewday, + int ewday_offset, + int skip_interval, + const google::protobuf::RepeatedPtrField& + timeranges) + : _type{type}, + _syear{syear}, + _smon{smon}, + _smday{smday}, + _swday{swday}, + _swday_offset{swday_offset}, + _eyear{eyear}, + _emon{emon}, + _emday{emday}, + _ewday{ewday}, + _ewday_offset{ewday_offset}, + _skip_interval{skip_interval} { + for (auto& tr : timeranges) + add_timerange({tr.range_start(), tr.range_end()}); +} +#endif daterange::daterange(type_range type) : _type(type), diff --git a/engine/src/diagnostic.cc b/engine/src/diagnostic.cc index b204e6d57b0..70749c6257d 100644 --- a/engine/src/diagnostic.cc +++ b/engine/src/diagnostic.cc @@ -24,8 +24,13 @@ #include "com/centreon/engine/version.hh" #include "com/centreon/io/file_stream.hh" #include "com/centreon/process.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/parser.hh" #include "common/engine_legacy_conf/state.hh" +#else +#include "common/engine_conf/parser.hh" +#include "common/engine_conf/state_helper.hh" +#endif using namespace com::centreon; using namespace com::centreon::engine; @@ -154,6 +159,7 @@ void diagnostic::generate(std::string const& cfg_file, // Parse configuration file. std::cout << "Diagnostic: Parsing configuration file '" << cfg_file << "'" << std::endl; +#ifdef LEGACY_CONF configuration::state conf; try { configuration::error_cnt err; @@ -163,6 +169,17 @@ void diagnostic::generate(std::string const& cfg_file, std::cerr << "Diagnostic: configuration file '" << cfg_file << "' parsing failed: " << e.what() << std::endl; } +#else + configuration::State conf; + try { + configuration::error_cnt err; + configuration::parser parsr; + parsr.parse(cfg_file, &conf, err); + } catch (std::exception const& e) { + std::cerr << "Diagnostic: configuration file '" << cfg_file + << "' parsing failed: " << e.what() << std::endl; + } +#endif // Create temporary configuration directory. std::string tmp_cfg_dir(tmp_dir + "/cfg/"); @@ -181,8 +198,7 @@ void diagnostic::generate(std::string const& cfg_file, } // Copy other configuration files. - for (std::list::const_iterator it(conf.cfg_file().begin()), - end(conf.cfg_file().end()); + for (auto it = conf.cfg_file().begin(), end = conf.cfg_file().end(); it != end; ++it) { std::string target_path(_build_target_path(tmp_cfg_dir, *it)); to_remove.push_back(target_path); diff --git a/engine/src/events/loop.cc b/engine/src/events/loop.cc index 9d65742fe7c..20500f7a155 100644 --- a/engine/src/events/loop.cc +++ b/engine/src/events/loop.cc @@ -32,7 +32,11 @@ #include "com/centreon/engine/logging/logger.hh" #include "com/centreon/engine/statusdata.hh" #include "com/centreon/logging/engine.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/parser.hh" +#else +#include "common/engine_conf/parser.hh" +#endif using namespace com::centreon::engine; using namespace com::centreon::engine::events; @@ -91,6 +95,7 @@ void loop::run() { */ loop::loop() : _need_reload(0), _reload_running(false) {} +#ifdef LEGACY_CONF static void apply_conf(std::atomic* reloading) { configuration::error_cnt err; engine_logger(log_info_message, more) << "Starting to reload configuration."; @@ -115,6 +120,28 @@ static void apply_conf(std::atomic* reloading) { engine_logger(log_info_message, more) << "Reload configuration finished."; process_logger->info("Reload configuration finished."); } +#else +static void apply_conf(std::atomic* reloading) { + configuration::error_cnt err; + process_logger->info("Starting to reload configuration."); + try { + configuration::State config; + configuration::state_helper config_hlp(&config); + { + configuration::parser p; + std::string path(::pb_config.cfg_main()); + p.parse(path, &config, err); + } + configuration::extended_conf::update_state(&config); + configuration::applier::state::instance().apply(config, err); + process_logger->info("Configuration reloaded, main loop continuing."); + } catch (std::exception const& e) { + config_logger->error("Error: {}", e.what()); + } + *reloading = false; + process_logger->info("Reload configuration finished."); +} +#endif /** * Slot to dispatch Centreon Engine events. @@ -164,6 +191,26 @@ void loop::_dispatching() { configuration::applier::state::instance().lock(); +#ifdef LEGACY_CONF + time_t time_change_threshold = config->time_change_threshold(); + uint32_t max_parallel_service_checks = + config->max_parallel_service_checks(); + bool execute_service_checks = config->execute_service_checks(); + bool execute_host_checks = config->execute_host_checks(); + uint32_t interval_length = config->interval_length(); + double sleep_time = config->sleep_time(); + int32_t command_check_interval = config->command_check_interval(); +#else + time_t time_change_threshold = pb_config.time_change_threshold(); + uint32_t max_parallel_service_checks = + pb_config.max_parallel_service_checks(); + bool execute_service_checks = pb_config.execute_service_checks(); + bool execute_host_checks = pb_config.execute_host_checks(); + uint32_t interval_length = pb_config.interval_length(); + double sleep_time = pb_config.sleep_time(); + int32_t command_check_interval = pb_config.command_check_interval(); +#endif + // Hey, wait a second... we traveled back in time! if (current_time < _last_time) compensate_for_system_time_change( @@ -172,7 +219,7 @@ void loop::_dispatching() { // Else if the time advanced over the specified threshold, // try and compensate... else if ((current_time - _last_time) >= - static_cast(config->time_change_threshold())) + static_cast(time_change_threshold)) compensate_for_system_time_change( static_cast(_last_time), static_cast(current_time)); @@ -207,10 +254,10 @@ void loop::_dispatching() { } engine_logger(dbg_events, more) << "Current/Max Service Checks: " << currently_running_service_checks - << '/' << config->max_parallel_service_checks(); + << '/' << max_parallel_service_checks; events_logger->debug("Current/Max Service Checks: {}/{}", currently_running_service_checks, - config->max_parallel_service_checks()); + max_parallel_service_checks); // Update status information occassionally - NagVis watches the // NDOUtils DB to see if Engine is alive. @@ -251,46 +298,43 @@ void loop::_dispatching() { // Don't run a service check if we're already maxed out on the // number of parallel service checks... - if (config->max_parallel_service_checks() != 0 && - currently_running_service_checks >= - config->max_parallel_service_checks()) { + if (max_parallel_service_checks != 0 && + currently_running_service_checks >= max_parallel_service_checks) { // Move it at least 5 seconds (to overcome the current peak), // with a random 10 seconds (to spread the load). nudge_seconds = 5 + (rand() % 10); engine_logger(dbg_events | dbg_checks, basic) << "**WARNING** Max concurrent service checks (" << currently_running_service_checks << "/" - << config->max_parallel_service_checks() - << ") has been reached! Nudging " << temp_service->get_hostname() - << ":" << temp_service->description() << " by " << nudge_seconds + << max_parallel_service_checks << ") has been reached! Nudging " + << temp_service->get_hostname() << ":" + << temp_service->description() << " by " << nudge_seconds << " seconds..."; events_logger->trace( "**WARNING** Max concurrent service checks ({}/{}) has been " "reached! Nudging {}:{} by {} seconds...", - currently_running_service_checks, - config->max_parallel_service_checks(), + currently_running_service_checks, max_parallel_service_checks, temp_service->get_hostname(), temp_service->description(), nudge_seconds); engine_logger(log_runtime_warning, basic) << "\tMax concurrent service checks (" << currently_running_service_checks << "/" - << config->max_parallel_service_checks() - << ") has been reached. Nudging " << temp_service->get_hostname() - << ":" << temp_service->description() << " by " << nudge_seconds + << max_parallel_service_checks << ") has been reached. Nudging " + << temp_service->get_hostname() << ":" + << temp_service->description() << " by " << nudge_seconds << " seconds..."; runtime_logger->warn( "\tMax concurrent service checks ({}/{}) has been reached. " "Nudging {}:{} by {} seconds...", - currently_running_service_checks, - config->max_parallel_service_checks(), + currently_running_service_checks, max_parallel_service_checks, temp_service->get_hostname(), temp_service->description(), nudge_seconds); run_event = false; } // Don't run a service check if active checks are disabled. - if (!config->execute_service_checks()) { + if (!execute_service_checks) { engine_logger(dbg_events | dbg_checks, more) << "We're not executing service checks right now, " << "so we'll skip this event."; @@ -325,13 +369,11 @@ void loop::_dispatching() { temp_service->get_current_state() != service::state_ok) temp_service->set_next_check( (time_t)(temp_service->get_next_check() + - temp_service->retry_interval() * - config->interval_length())); + temp_service->retry_interval() * interval_length)); else temp_service->set_next_check( (time_t)(temp_service->get_next_check() + - (temp_service->check_interval() * - config->interval_length()))); + (temp_service->check_interval() * interval_length))); } temp_event->run_time = temp_service->get_next_check(); reschedule_event(std::move(temp_event), events::loop::low); @@ -348,7 +390,7 @@ void loop::_dispatching() { static_cast(_event_list_low.front()->event_data)); // Don't run a host check if active checks are disabled. - if (!config->execute_host_checks()) { + if (!execute_host_checks) { engine_logger(dbg_events | dbg_checks, more) << "We're not executing host checks right now, " << "so we'll skip this event."; @@ -374,13 +416,13 @@ void loop::_dispatching() { // Reschedule. if ((notifier::soft == temp_host->get_state_type()) && (temp_host->get_current_state() != host::state_up)) - temp_host->set_next_check((time_t)(temp_host->get_next_check() + - temp_host->retry_interval() * - config->interval_length())); + temp_host->set_next_check( + (time_t)(temp_host->get_next_check() + + temp_host->retry_interval() * interval_length)); else - temp_host->set_next_check((time_t)(temp_host->get_next_check() + - temp_host->check_interval() * - config->interval_length())); + temp_host->set_next_check( + (time_t)(temp_host->get_next_check() + + temp_host->check_interval() * interval_length)); temp_event->run_time = temp_host->get_next_check(); reschedule_event(std::move(temp_event), events::loop::low); temp_host->update_status(); @@ -410,7 +452,7 @@ void loop::_dispatching() { << "Did not execute scheduled event. Idling for a bit..."; events_logger->debug( "Did not execute scheduled event. Idling for a bit..."); - uint64_t d = static_cast(config->sleep_time() * 1000000000); + uint64_t d = static_cast(sleep_time * 1000000000); std::this_thread::sleep_for(std::chrono::nanoseconds(d)); } } @@ -426,7 +468,7 @@ void loop::_dispatching() { // Check for external commands if we're supposed to check as // often as possible. - if (config->command_check_interval() == -1) { + if (command_check_interval == -1) { // Send data to event broker. broker_external_command(NEBTYPE_EXTERNALCOMMAND_CHECK, CMD_NONE, nullptr, nullptr); @@ -434,19 +476,18 @@ void loop::_dispatching() { auto t1 = std::chrono::system_clock::now(); auto delay = std::chrono::nanoseconds( - static_cast(1000000000 * config->sleep_time())); + static_cast(1000000000 * sleep_time)); command_manager::instance().execute(); // Set time to sleep so we don't hog the CPU... - timespec sleep_time; - sleep_time.tv_sec = (time_t)config->sleep_time(); - sleep_time.tv_nsec = - (long)((config->sleep_time() - (double)sleep_time.tv_sec) * - 1000000000ull); + timespec stime; + stime.tv_sec = (time_t)sleep_time; + stime.tv_nsec = + (long)((sleep_time - (double)stime.tv_sec) * 1000000000ull); // Populate fake "sleep" event. _sleep_event.run_time = current_time; - _sleep_event.event_data = (void*)&sleep_time; + _sleep_event.event_data = (void*)&stime; // Send event data to broker. broker_timed_event(NEBTYPE_TIMEDEVENT_SLEEP, NEBFLAG_NONE, NEBATTR_NONE, @@ -492,8 +533,13 @@ void loop::adjust_check_scheduling() { // determine our adjustment window. time_t current_time(time(nullptr)); time_t first_window_time(current_time); +#ifdef LEGACY_CONF time_t last_window_time(first_window_time + config->auto_rescheduling_window()); +#else + time_t last_window_time(first_window_time + + pb_config.auto_rescheduling_window()); +#endif // get current scheduling data. for (timed_event_list::iterator it{_event_list_low.begin()}, @@ -553,6 +599,7 @@ void loop::adjust_check_scheduling() { if (total_checks == 0 || adjust_scheduling == false) return; +#ifdef LEGACY_CONF if ((unsigned long)total_check_exec_time > config->auto_rescheduling_window()) { inter_check_delay = 0.0; @@ -564,6 +611,20 @@ void loop::adjust_check_scheduling() { (double)(total_checks * 1.0)); exec_time_factor = 1.0; } +#else + if ((unsigned long)total_check_exec_time > + pb_config.auto_rescheduling_window()) { + inter_check_delay = 0.0; + exec_time_factor = (double)((double)pb_config.auto_rescheduling_window() / + total_check_exec_time); + } else { + inter_check_delay = + (double)((((double)pb_config.auto_rescheduling_window()) - + total_check_exec_time) / + (double)(total_checks * 1.0)); + exec_time_factor = 1.0; + } +#endif auto compute_new_run_time = [](double current_exec_time_offset, double current_icd_offset, diff --git a/engine/src/events/sched_info.cc b/engine/src/events/sched_info.cc index 69701f6ca24..d92eebbd884 100644 --- a/engine/src/events/sched_info.cc +++ b/engine/src/events/sched_info.cc @@ -25,6 +25,9 @@ #include "com/centreon/engine/logging/logger.hh" #include "com/centreon/engine/statusdata.hh" #include "com/centreon/engine/string.hh" +#ifndef LEGACY_CONF +#include "common/engine_conf/state.pb.h" +#endif using namespace com::centreon::engine; using namespace com::centreon::engine::logging; @@ -32,6 +35,7 @@ using namespace com::centreon::engine::logging; /** * Displays service check scheduling information. */ +#ifdef LEGACY_CONF void display_scheduling_info() { // Notice. std::cout << "\nProjected scheduling information for host and service " @@ -191,6 +195,167 @@ void display_scheduling_info() { std::cout << "I have no suggestions - things look okay.\n"; } } +#else +void display_scheduling_info() { + // Notice. + std::cout << "\nProjected scheduling information for host and service " + "checks\n is listed below. This information assumes that you " + "are going\n to start running Centreon Engine with your current " + "config files.\n\n"; + + // Host scheduling information. + std::cout << "HOST SCHEDULING INFORMATION\n" + "---------------------------\n" + "Total hosts: " + << scheduling_info.total_hosts + << "\n" + "Total scheduled hosts: " + << scheduling_info.total_scheduled_hosts << "\n"; + if (pb_config.host_inter_check_delay_method().type() == + configuration::InterCheckDelay_IcdType_none) { + std::cout << "Host inter-check delay method: NONE\n"; + } else if (pb_config.host_inter_check_delay_method().type() == + configuration::InterCheckDelay_IcdType_dumb) { + std::cout << "Host inter-check delay method: DUMB\n"; + } else if (pb_config.host_inter_check_delay_method().type() == + configuration::InterCheckDelay_IcdType_smart) { + std::cout << "Host inter-check delay method: SMART\n" + "Average host check interval: " + << scheduling_info.average_host_check_interval << " sec\n"; + } else { + std::cout << "Host inter-check delay method: USER-SUPPLIED VALUE\n"; + } + std::cout << "Host inter-check delay: " + << scheduling_info.host_inter_check_delay + << " sec\n Max host check spread: " + << scheduling_info.max_host_check_spread + << " min\n First scheduled check: " + << (scheduling_info.total_scheduled_hosts == 0 + ? "N/A\n" + : ctime(&scheduling_info.first_host_check)) + << "Last scheduled check: " + << (scheduling_info.total_scheduled_hosts == 0 + ? "N/A\n" + : ctime(&scheduling_info.last_host_check)) + << "\n"; + // Service scheduling information. + std::cout << "SERVICE SCHEDULING INFORMATION\n" + "-------------------------------\n" + "Total services: " + << scheduling_info.total_services + << "\n" + "Total scheduled services: " + << scheduling_info.total_scheduled_services << "\n"; + if (pb_config.service_inter_check_delay_method().type() == + configuration::InterCheckDelay_IcdType_none) { + std::cout << "Service inter-check delay method: NONE\n"; + } else if (pb_config.service_inter_check_delay_method().type() == + configuration::InterCheckDelay_IcdType_dumb) { + std::cout << "Service inter-check delay method: DUMB\n"; + } else if (pb_config.service_inter_check_delay_method().type() == + configuration::InterCheckDelay_IcdType_smart) { + std::cout << "Service inter-check delay method: SMART\n" + "Average service check interval: " + << scheduling_info.average_service_check_interval << " sec\n"; + } else { + std::cout << "Service inter-check delay method: USER-SUPPLIED VALUE\n"; + } + std::cout << "Inter-check delay: " + << scheduling_info.service_inter_check_delay + << " sec\n Interleave factor method: " + << (pb_config.service_interleave_factor_method().type() == + configuration::InterleaveFactor::ilf_user + ? "USER-SUPPLIED VALUE\n" + : "SMART\n"); + if (pb_config.service_interleave_factor_method().type() == + configuration::InterleaveFactor::ilf_smart) { + std::cout << "Average services per host: " + << scheduling_info.average_services_per_host << "\n"; + } + std::cout << "Service interleave factor: " + << scheduling_info.service_interleave_factor + << "\n" + "Max service check spread: " + << scheduling_info.max_service_check_spread + << " min\n" + "First scheduled check: " + << ctime(&scheduling_info.first_service_check) + << "Last scheduled check: " + << ctime(&scheduling_info.last_service_check) << "\n"; + // Check processing information. + std::cout << "CHECK PROCESSING INFORMATION\n" + "----------------------------\n" + "Check result reaper interval: " + << pb_config.check_reaper_interval() << " sec\n"; + if (pb_config.max_parallel_service_checks() == 0) { + std::cout << "Max concurrent service checks: Unlimited\n"; + } else { + std::cout << "Max concurrent service checks: " + << pb_config.max_parallel_service_checks() << "\n"; + } + std::cout << "\n"; + // Performance suggestions. + std::cout << "PERFORMANCE SUGGESTIONS\n" + "-----------------------\n"; + int suggestions(0); + + // MAX REAPER INTERVAL RECOMMENDATION. + // Assume a 100% (2x) check burst for check reaper. + // Assume we want a max of 2k files in the result queue + // at any given time. + float max_reaper_interval(0.0); + max_reaper_interval = floor(2000 * scheduling_info.service_inter_check_delay); + if (max_reaper_interval < 2.0) + max_reaper_interval = 2.0; + if (max_reaper_interval > 30.0) + max_reaper_interval = 30.0; + if (max_reaper_interval < pb_config.check_reaper_interval()) { + std::cout << "* Value for 'check_result_reaper_frequency' should be <= " + << static_cast(max_reaper_interval) << " seconds\n"; + ++suggestions; + } + if (pb_config.check_reaper_interval() < 2) { + std::cout << "* Value for 'check_result_reaper_frequency' should be >= 2 " + "seconds\n"; + ++suggestions; + } + + // MINIMUM CONCURRENT CHECKS RECOMMENDATION. + // First method (old) - assume a 100% (2x) service check + // burst for max concurrent checks. + float minimum_concurrent_checks(0.0); + float minimum_concurrent_checks1(0.0); + float minimum_concurrent_checks2(0.0); + if (scheduling_info.service_inter_check_delay == 0.0) + minimum_concurrent_checks1 = ceil(pb_config.check_reaper_interval() * 2.0); + else + minimum_concurrent_checks1 = + ceil((pb_config.check_reaper_interval() * 2.0) / + scheduling_info.service_inter_check_delay); + // Second method (new) - assume a 25% (1.25x) service check + // burst for max concurrent checks. + minimum_concurrent_checks2 = + ceil((((double)scheduling_info.total_scheduled_services) / + scheduling_info.average_service_check_interval) * + 1.25 * pb_config.check_reaper_interval() * + scheduling_info.average_service_execution_time); + // Use max of computed values. + if (minimum_concurrent_checks1 > minimum_concurrent_checks2) + minimum_concurrent_checks = minimum_concurrent_checks1; + else + minimum_concurrent_checks = minimum_concurrent_checks2; + // Compare with configured value. + if ((minimum_concurrent_checks > pb_config.max_parallel_service_checks()) && + pb_config.max_parallel_service_checks() != 0) { + std::cout << "* Value for 'max_concurrent_checks' option should be >= " + << static_cast(minimum_concurrent_checks) << "\n"; + ++suggestions; + } + if (suggestions == 0) { + std::cout << "I have no suggestions - things look okay.\n"; + } +} +#endif /** * Equal operator. @@ -320,5 +485,5 @@ std::ostream& operator<<(std::ostream& os, sched_info const& obj) { << string::ctime(obj.last_host_check) << "\n" "}\n"; - return (os); + return os; } diff --git a/engine/src/events/timed_event.cc b/engine/src/events/timed_event.cc index e30946218a6..8bfff4087aa 100644 --- a/engine/src/events/timed_event.cc +++ b/engine/src/events/timed_event.cc @@ -195,6 +195,7 @@ void timed_event::_exec_event_check_reaper() { } } +#ifdef LEGACY_CONF /** * Execute orphan check. * @@ -210,7 +211,25 @@ void timed_event::_exec_event_orphan_check() { if (config->check_orphaned_services()) service::check_for_orphaned(); } +#else +/** + * Execute orphan check. + * + */ +void timed_event::_exec_event_orphan_check() { + engine_logger(dbg_events, basic) + << "** Orphaned Host and Service Check Event"; + events_logger->trace("** Orphaned Host and Service Check Event"); + // check for orphaned hosts and services. + if (pb_config.check_orphaned_hosts()) + host::check_for_orphaned(); + if (pb_config.check_orphaned_services()) + service::check_for_orphaned(); +} +#endif + +#ifdef LEGACY_CONF /** * Execute retention save. * @@ -222,6 +241,19 @@ void timed_event::_exec_event_retention_save() { // save state retention data. retention::dump::save(config->state_retention_file()); } +#else +/** + * Execute retention save. + * + */ +void timed_event::_exec_event_retention_save() { + engine_logger(dbg_events, basic) << "** Retention Data Save Event"; + events_logger->trace("** Retention Data Save Event"); + + // save state retention data. + retention::dump::save(pb_config.state_retention_file()); +} +#endif /** * Execute status save. diff --git a/engine/src/flapping.cc b/engine/src/flapping.cc index 680752b89db..ae9d00ca90a 100644 --- a/engine/src/flapping.cc +++ b/engine/src/flapping.cc @@ -40,15 +40,24 @@ void enable_flap_detection_routines() { functions_logger->trace("enable_flap_detection_routines()"); /* bail out if we're already set */ +#ifdef LEGACY_CONF if (config->enable_flap_detection()) return; +#else + if (pb_config.enable_flap_detection()) + return; +#endif /* set the attribute modified flag */ modified_host_process_attributes |= attr; modified_service_process_attributes |= attr; /* set flap detection flag */ +#ifdef LEGACY_CONF config->enable_flap_detection(true); +#else + pb_config.set_enable_flap_detection(true); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, @@ -78,15 +87,24 @@ void disable_flap_detection_routines() { functions_logger->trace("disable_flap_detection_routines()"); /* bail out if we're already set */ +#ifdef LEGACY_CONF if (!config->enable_flap_detection()) return; +#else + if (!pb_config.enable_flap_detection()) + return; +#endif /* set the attribute modified flag */ modified_host_process_attributes |= attr; modified_service_process_attributes |= attr; /* set flap detection flag */ +#ifdef LEGACY_CONF config->enable_flap_detection(false); +#else + pb_config.set_enable_flap_detection(false); +#endif /* send data to event broker */ broker_adaptive_program_data(NEBTYPE_ADAPTIVEPROGRAM_UPDATE, NEBFLAG_NONE, diff --git a/engine/src/globals.cc b/engine/src/globals.cc index 99346e158e1..f2517d0bc23 100644 --- a/engine/src/globals.cc +++ b/engine/src/globals.cc @@ -29,7 +29,11 @@ using namespace com::centreon::engine; using com::centreon::common::log_v2::log_v2; +#ifdef LEGACY_CONF configuration::state* config = nullptr; +#else +configuration::State pb_config; +#endif char const* sigs[] = {"EXIT", "HUP", "INT", "QUIT", "ILL", "TRAP", "ABRT", "BUS", "FPE", "KILL", "USR1", "SEGV", @@ -52,8 +56,9 @@ std::shared_ptr macros_logger; std::shared_ptr notifications_logger; std::shared_ptr process_logger; std::shared_ptr runtime_logger; +std::shared_ptr otl_logger; -char* config_file(NULL); +std::string config_file; char* debug_file(NULL); char* global_host_event_handler(NULL); char* global_service_event_handler(NULL); @@ -143,4 +148,5 @@ void init_loggers() { notifications_logger = log_v2::instance().get(log_v2::NOTIFICATIONS); process_logger = log_v2::instance().get(log_v2::PROCESS); runtime_logger = log_v2::instance().get(log_v2::RUNTIME); + otl_logger = log_v2::instance().get(log_v2::OTL); } diff --git a/engine/src/host.cc b/engine/src/host.cc index 2abfd5b67bc..f8a374ca624 100644 --- a/engine/src/host.cc +++ b/engine/src/host.cc @@ -1246,6 +1246,15 @@ int host::handle_async_check_result_3x( /* get the current time */ time_t current_time = std::time(nullptr); + bool accept_passive_host_checks; + uint32_t cached_host_check_horizon; +#ifdef LEGACY_CONF + accept_passive_host_checks = config->accept_passive_host_checks(); + cached_host_check_horizon = config->cached_host_check_horizon(); +#else + accept_passive_host_checks = pb_config.accept_passive_host_checks(); + cached_host_check_horizon = pb_config.cached_host_check_horizon(); +#endif double execution_time = static_cast(queued_check_result.get_finish_time().tv_sec - @@ -1311,7 +1320,7 @@ int host::handle_async_check_result_3x( * skip this host check results if its passive and we aren't accepting passive * check results */ if (queued_check_result.get_check_type() == check_passive) { - if (!config->accept_passive_host_checks()) { + if (!accept_passive_host_checks) { engine_logger(dbg_checks, basic) << "Discarding passive host check result because passive host " "checks are disabled globally."; @@ -1558,8 +1567,7 @@ int host::handle_async_check_result_3x( /* process the host check result */ process_check_result_3x(hst_res, old_plugin_output, CHECK_OPTION_NONE, - reschedule_check, true, - config->cached_host_check_horizon()); + reschedule_check, true, cached_host_check_horizon); engine_logger(dbg_checks, more) << "** Async check result for host '" << name() @@ -1589,6 +1597,13 @@ int host::run_scheduled_check(int check_options, double latency) { time_t next_valid_time = 0L; bool time_is_valid = true; + uint32_t interval_length; +#ifdef LEGACY_CONF + interval_length = config->interval_length(); +#else + interval_length = pb_config.interval_length(); +#endif + engine_logger(dbg_functions, basic) << "run_scheduled_host_check_3x()"; SPDLOG_LOGGER_TRACE(functions_logger, "run_scheduled_host_check_3x()"); @@ -1625,8 +1640,7 @@ int host::run_scheduled_check(int check_options, double latency) { current_time + static_cast(check_interval() <= 0 ? 300 - : check_interval() * - config->interval_length()); + : check_interval() * interval_length); // Make sure we rescheduled the next host check at a valid time. { @@ -1724,6 +1738,13 @@ int host::run_async_check(int check_options, if (!verify_check_viability(check_options, time_is_valid, preferred_time)) return ERROR; + int32_t host_check_timeout; +#ifdef LEGACY_CONF + host_check_timeout = config->host_check_timeout(); +#else + host_check_timeout = pb_config.host_check_timeout(); +#endif + // If this check is a rescheduled check, propagate the rescheduled check // flag to the host. This solves the problem when a new host check is bound // to be rescheduled but would be discarded because a host check is already @@ -1855,8 +1876,7 @@ int host::run_async_check(int check_options, retry = false; try { // Run command. - get_check_command_ptr()->run(processed_cmd, *macros, - config->host_check_timeout(), + get_check_command_ptr()->run(processed_cmd, *macros, host_check_timeout, check_result_info); } catch (com::centreon::exceptions::interruption const& e) { retry = true; @@ -2055,6 +2075,23 @@ void host::check_for_flapping(bool update, double low_curve_value = 0.75; double high_curve_value = 1.25; + uint32_t interval_length; + float low_host_flap_threshold; + float high_host_flap_threshold; + bool enable_flap_detection; + +#ifdef LEGACY_CONF + interval_length = config->interval_length(); + low_host_flap_threshold = config->low_host_flap_threshold(); + high_host_flap_threshold = config->high_host_flap_threshold(); + enable_flap_detection = config->enable_flap_detection(); +#else + interval_length = pb_config.interval_length(); + low_host_flap_threshold = pb_config.low_host_flap_threshold(); + high_host_flap_threshold = pb_config.high_host_flap_threshold(); + enable_flap_detection = pb_config.enable_flap_detection(); +#endif + engine_logger(dbg_functions, basic) << "host::check_for_flapping()"; SPDLOG_LOGGER_TRACE(functions_logger, "host::check_for_flapping()"); @@ -2069,11 +2106,11 @@ void host::check_for_flapping(bool update, */ if (get_total_services() == 0) wait_threshold = static_cast(get_notification_interval() * - config->interval_length()); + interval_length); else - wait_threshold = static_cast( - (get_total_service_check_interval() * config->interval_length()) / - get_total_services()); + wait_threshold = + static_cast(get_total_service_check_interval() * + interval_length / get_total_services()); update_history = update; @@ -2095,11 +2132,10 @@ void host::check_for_flapping(bool update, } /* what thresholds should we use (global or host-specific)? */ - low_threshold = (get_low_flap_threshold() <= 0.0) - ? config->low_host_flap_threshold() - : get_low_flap_threshold(); + low_threshold = (get_low_flap_threshold() <= 0.0) ? low_host_flap_threshold + : get_low_flap_threshold(); high_threshold = (get_high_flap_threshold() <= 0.0) - ? config->high_host_flap_threshold() + ? high_host_flap_threshold : get_high_flap_threshold(); /* record current host state */ @@ -2156,7 +2192,7 @@ void host::check_for_flapping(bool update, /* don't do anything if we don't have flap detection enabled on a program-wide * basis */ - if (!config->enable_flap_detection()) + if (!enable_flap_detection) return; /* don't do anything if we don't have flap detection enabled for this host */ @@ -2316,6 +2352,17 @@ void host::check_for_expired_acknowledgement() { int host::handle_state() { bool state_change = false; time_t current_time; + bool log_host_retries; + +#ifdef LEGACY_CONF + log_host_retries = config->log_host_retries(); + bool use_host_down_disable_service_checks = + config->use_host_down_disable_service_checks(); +#else + log_host_retries = pb_config.log_host_retries(); + bool use_host_down_disable_service_checks = + pb_config.host_down_disable_service_checks(); +#endif engine_logger(dbg_functions, basic) << "handle_host_state()"; SPDLOG_LOGGER_TRACE(functions_logger, "handle_host_state()"); @@ -2340,15 +2387,13 @@ int host::handle_state() { case host::state_down: set_last_time_down(current_time); have_to_change_service_state = - config->use_host_down_disable_service_checks() && - get_state_type() == hard; + use_host_down_disable_service_checks && get_state_type() == hard; break; case host::state_unreachable: set_last_time_unreachable(current_time); have_to_change_service_state = - config->use_host_down_disable_service_checks() && - get_state_type() == hard; + use_host_down_disable_service_checks && get_state_type() == hard; break; default: @@ -2413,7 +2458,7 @@ int host::handle_state() { /* write the host state change to the main log file */ if (get_state_type() == hard || - (get_state_type() == soft && config->log_host_retries() == true)) + (get_state_type() == soft && log_host_retries)) log_event(); /* check for start of flexible (non-fixed) scheduled downtime */ @@ -2448,7 +2493,7 @@ int host::handle_state() { notify(reason_recovery, "", "", notifier::notification_option_none); /* if we're in a soft state and we should log host retries, do so now... */ - if (get_state_type() == soft && config->log_host_retries()) + if (get_state_type() == soft && log_host_retries) log_event(); } @@ -2458,7 +2503,13 @@ int host::handle_state() { /* updates host performance data */ void host::update_performance_data() { /* should we be processing performance data for anything? */ - if (!config->process_performance_data()) + +#ifdef LEGACY_CONF + bool process_performance_data = config->process_performance_data(); +#else + bool process_performance_data = pb_config.process_performance_data(); +#endif + if (!process_performance_data) return; /* should we process performance data for this host? */ @@ -2493,14 +2544,18 @@ bool host::verify_check_viability(int check_options, engine_logger(dbg_functions, basic) << "check_host_check_viability_3x()"; SPDLOG_LOGGER_TRACE(functions_logger, "check_host_check_viability_3x()"); + uint32_t interval_length; +#ifdef LEGACY_CONF + interval_length = config->interval_length(); +#else + interval_length = pb_config.interval_length(); +#endif /* get the check interval to use if we need to reschedule the check */ if (this->get_state_type() == soft && this->get_current_state() != host::state_up) - check_interval = - static_cast(this->retry_interval() * config->interval_length()); + check_interval = static_cast(this->retry_interval() * interval_length); else - check_interval = - static_cast(this->check_interval() * config->interval_length()); + check_interval = static_cast(this->check_interval() * interval_length); /* make sure check interval is positive - otherwise use 5 minutes out for next * check */ @@ -2575,6 +2630,16 @@ int host::notify_contact(nagios_macros* mac, << "** Notifying contact '" << cntct->get_name() << "'"; notifications_logger->debug("** Notifying contact '{}'", cntct->get_name()); + bool log_notifications; + uint32_t notification_timeout; +#ifdef LEGACY_CONF + log_notifications = config->log_notifications(); + notification_timeout = config->notification_timeout(); +#else + log_notifications = pb_config.log_notifications(); + notification_timeout = pb_config.notification_timeout(); +#endif + /* get start time */ gettimeofday(&start_time, nullptr); @@ -2632,7 +2697,7 @@ int host::notify_contact(nagios_macros* mac, processed_command); /* log the notification to program log file */ - if (config->log_notifications()) { + if (log_notifications) { char const* host_state_str("UP"); if ((unsigned int)_current_state < tab_host_states.size()) // sizeof(tab_host_state_str) / sizeof(*tab_host_state_str)) @@ -2671,7 +2736,7 @@ int host::notify_contact(nagios_macros* mac, if (command_is_allowed_by_whitelist(processed_command, NOTIF_TYPE)) { try { std::string out; - my_system_r(mac, processed_command, config->notification_timeout(), + my_system_r(mac, processed_command, notification_timeout, &early_timeout, &exectime, out, 0); } catch (std::exception const& e) { engine_logger(log_runtime_error, basic) @@ -2693,12 +2758,11 @@ int host::notify_contact(nagios_macros* mac, engine_logger(log_host_notification | log_runtime_warning, basic) << "Warning: Contact '" << cntct->get_name() << "' host notification command '" << processed_command - << "' timed out after " << config->notification_timeout() - << " seconds"; + << "' timed out after " << notification_timeout << " seconds"; notifications_logger->info( "Warning: Contact '{}' host notification command '{}' timed out " "after {} seconds", - cntct->get_name(), processed_command, config->notification_timeout()); + cntct->get_name(), processed_command, notification_timeout); } /* get end time */ @@ -2874,6 +2938,19 @@ bool host::is_result_fresh(time_t current_time, int log_this) { int tminutes = 0; int tseconds = 0; + uint32_t interval_length; + int32_t additional_freshness_latency; + uint32_t max_host_check_spread; +#ifdef LEGACY_CONF + interval_length = config->interval_length(); + additional_freshness_latency = config->additional_freshness_latency(); + max_host_check_spread = config->max_host_check_spread(); +#else + interval_length = pb_config.interval_length(); + additional_freshness_latency = pb_config.additional_freshness_latency(); + max_host_check_spread = pb_config.max_host_check_spread(); +#endif + engine_logger(dbg_checks, most) << "Checking freshness of host '" << name() << "'..."; SPDLOG_LOGGER_DEBUG(checks_logger, "Checking freshness of host '{}'...", @@ -2887,9 +2964,9 @@ bool host::is_result_fresh(time_t current_time, int log_this) { interval = check_interval(); else interval = retry_interval(); - freshness_threshold = static_cast( - (interval * config->interval_length()) + get_latency() + - config->additional_freshness_latency()); + freshness_threshold = + static_cast(interval * interval_length + get_latency() + + additional_freshness_latency); } else freshness_threshold = get_freshness_threshold(); @@ -2913,9 +2990,8 @@ bool host::is_result_fresh(time_t current_time, int log_this) { * suggested by Altinity */ else if (active_checks_enabled() && event_start > get_last_check() && get_freshness_threshold() == 0) - expiration_time = - (time_t)(event_start + freshness_threshold + - (config->max_host_check_spread() * config->interval_length())); + expiration_time = (time_t)(event_start + freshness_threshold + + max_host_check_spread * interval_length); else expiration_time = (time_t)(get_last_check() + freshness_threshold); @@ -3087,8 +3163,23 @@ int host::process_check_result_3x(enum host::host_state new_state, std::list check_hostlist; host::host_state parent_state = host::state_up; time_t current_time = 0L; - time_t next_check{get_last_check() + - check_interval() * config->interval_length()}; + + uint32_t interval_length; + bool log_passive_checks; + bool enable_predictive_host_dependency_checks; +#ifdef LEGACY_CONF + interval_length = config->interval_length(); + log_passive_checks = config->log_passive_checks(); + enable_predictive_host_dependency_checks = + config->enable_predictive_host_dependency_checks(); +#else + interval_length = pb_config.interval_length(); + log_passive_checks = pb_config.log_passive_checks(); + enable_predictive_host_dependency_checks = + pb_config.enable_predictive_host_dependency_checks(); +#endif + + time_t next_check{get_last_check() + check_interval() * interval_length}; time_t preferred_time = 0L; time_t next_valid_time = 0L; int run_async_check = true; @@ -3121,7 +3212,7 @@ int host::process_check_result_3x(enum host::host_state new_state, /* log passive checks - we need to do this here, as some my bypass external * commands by getting dropped in checkresults dir */ if (get_check_type() == check_passive) { - if (config->log_passive_checks()) + if (log_passive_checks) engine_logger(log_passive_check, basic) << "PASSIVE HOST CHECK: " << name() << ";" << new_state << ";" << get_plugin_output(); @@ -3231,8 +3322,7 @@ int host::process_check_result_3x(enum host::host_state new_state, /* schedule a re-check of the host at the retry interval because we * can't determine its final state yet... */ if (get_state_type() == soft) - next_check = - get_last_check() + retry_interval() * config->interval_length(); + next_check = get_last_check() + retry_interval() * interval_length; } } } @@ -3387,8 +3477,7 @@ int host::process_check_result_3x(enum host::host_state new_state, /* schedule a re-check of the host at the retry interval because we * can't determine its final state yet... */ - next_check = - get_last_check() + retry_interval() * config->interval_length(); + next_check = get_last_check() + retry_interval() * interval_length; /* propagate checks to immediate parents if they are UP */ /* we do this because a parent host (or grandparent) may have gone down @@ -3440,7 +3529,7 @@ int host::process_check_result_3x(enum host::host_state new_state, } /* check dependencies on second to last host check */ - if (config->enable_predictive_host_dependency_checks() && + if (enable_predictive_host_dependency_checks && get_current_attempt() == max_check_attempts() - 1) { /* propagate checks to hosts that THIS ONE depends on for * notifications AND execution */ @@ -3454,9 +3543,8 @@ int host::process_check_result_3x(enum host::host_state new_state, "Propagating predictive dependency checks to hosts this " "one depends on..."); - for (hostdependency_mmap::const_iterator - it{hostdependency::hostdependencies.find(name())}, - end{hostdependency::hostdependencies.end()}; + for (auto it = hostdependency::hostdependencies.find(name()), + end = hostdependency::hostdependencies.end(); it != end && it->first == name(); ++it) { hostdependency* temp_dependency(it->second.get()); if (temp_dependency->dependent_host_ptr == this && @@ -3706,6 +3794,12 @@ bool host::authorized_by_dependencies(dependency::types dependency_type) const { engine_logger(dbg_functions, basic) << "host::authorized_by_dependencies()"; SPDLOG_LOGGER_TRACE(functions_logger, "host::authorized_by_dependencies()"); +#ifdef LEGACY_CONF + bool soft_state_dependencies = config->soft_state_dependencies(); +#else + bool soft_state_dependencies = pb_config.soft_state_dependencies(); +#endif + auto p(hostdependency::hostdependencies.equal_range(name())); for (hostdependency_mmap::const_iterator it{p.first}, end{p.second}; it != end; ++it) { @@ -3719,7 +3813,7 @@ bool host::authorized_by_dependencies(dependency::types dependency_type) const { if (!dep->master_host_ptr) continue; - /* Skip this dependency if it has a timepriod and the current time is + /* Skip this dependency if it has a timeperiod and the current time is * not valid */ time_t current_time{std::time(nullptr)}; if (!dep->get_dependency_period().empty() && @@ -3730,7 +3824,7 @@ bool host::authorized_by_dependencies(dependency::types dependency_type) const { * state) */ host_state state = (dep->master_host_ptr->get_state_type() == notifier::soft && - !config->soft_state_dependencies()) + !soft_state_dependencies) ? dep->master_host_ptr->get_last_hard_state() : dep->master_host_ptr->get_current_state(); @@ -3756,6 +3850,13 @@ bool host::authorized_by_dependencies(dependency::types dependency_type) const { void host::check_result_freshness() { time_t current_time = 0L; + bool check_host_freshness; +#ifdef LEGACY_CONF + check_host_freshness = config->check_host_freshness(); +#else + check_host_freshness = pb_config.check_host_freshness(); +#endif + engine_logger(dbg_functions, basic) << "check_host_result_freshness()"; SPDLOG_LOGGER_TRACE(functions_logger, "check_host_result_freshness()"); engine_logger(dbg_checks, most) @@ -3765,7 +3866,7 @@ void host::check_result_freshness() { "Attempting to check the freshness of host check results..."); /* bail out if we're not supposed to be checking freshness */ - if (!config->check_host_freshness()) { + if (!check_host_freshness) { engine_logger(dbg_checks, most) << "Host freshness checking is disabled."; SPDLOG_LOGGER_DEBUG(checks_logger, "Host freshness checking is disabled."); return; @@ -3867,6 +3968,16 @@ void host::check_for_orphaned() { engine_logger(dbg_functions, basic) << "check_for_orphaned_hosts()"; SPDLOG_LOGGER_TRACE(functions_logger, "check_for_orphaned_hosts()"); + int32_t host_check_timeout; + uint32_t check_reaper_interval; +#ifdef LEGACY_CONF + host_check_timeout = config->host_check_timeout(); + check_reaper_interval = config->check_reaper_interval(); +#else + host_check_timeout = pb_config.host_check_timeout(); + check_reaper_interval = pb_config.check_reaper_interval(); +#endif + /* get the current time */ time(¤t_time); @@ -3886,8 +3997,7 @@ void host::check_for_orphaned() { * 10 minutes slack time) */ expected_time = (time_t)(it->second->get_next_check() + it->second->get_latency() + - config->host_check_timeout() + - config->check_reaper_interval() + 600); + host_check_timeout + check_reaper_interval + 600); /* this host was supposed to have executed a while ago, but for some reason * the results haven't come back in... */ diff --git a/engine/src/hostdependency.cc b/engine/src/hostdependency.cc index 497ac2695ab..04327efefe9 100644 --- a/engine/src/hostdependency.cc +++ b/engine/src/hostdependency.cc @@ -301,6 +301,7 @@ void hostdependency::resolve(uint32_t& w [[maybe_unused]], uint32_t& e) { } } +#ifdef LEGACY_CONF /** * Find a service dependency from its key. * @@ -323,3 +324,25 @@ hostdependency_mmap::iterator hostdependency::hostdependencies_find( } return p.first == p.second ? hostdependencies.end() : p.first; } +#else +/** + * Find a service dependency from its key. + * + * @param[in] k The service dependency configuration. + * + * @return Iterator to the element if found, + * servicedependencies().end() otherwise. + */ +hostdependency_mmap::iterator hostdependency::hostdependencies_find( + const std::pair& key) { + std::pair p; + + p = hostdependencies.equal_range(key.first); + while (p.first != p.second) { + if (p.first->second->internal_key() == key.second) + break; + ++p.first; + } + return p.first == p.second ? hostdependencies.end() : p.first; +} +#endif diff --git a/engine/src/hostescalation.cc b/engine/src/hostescalation.cc index d3bd6bf6064..e1ed434ddbb 100644 --- a/engine/src/hostescalation.cc +++ b/engine/src/hostescalation.cc @@ -23,6 +23,10 @@ #include "com/centreon/engine/logging/logger.hh" #include "com/centreon/engine/shared.hh" #include "com/centreon/engine/string.hh" +#ifdef LEGACY_CONF +#else +#include "common/engine_conf/state.pb.h" +#endif using namespace com::centreon; using namespace com::centreon::engine; @@ -126,6 +130,7 @@ void hostescalation::resolve(uint32_t& w [[maybe_unused]], uint32_t& e) { } } +#ifdef LEGACY_CONF /** * @brief Checks that this hostescalation corresponds to the Configuration * object obj. This function doesn't check contactgroups as it is usually used @@ -156,3 +161,35 @@ bool hostescalation::matches(const configuration::hostescalation& obj) const { return true; } +#else +/** + * @brief Checks that this hostescalation corresponds to the Configuration + * object obj. This function doesn't check contactgroups as it is usually used + * to modify them. + * + * @param obj A host escalation configuration object. + * + * @return A boolean that is True if they match. + */ +bool hostescalation::matches(const configuration::Hostescalation& obj) const { + uint32_t escalate_on = + ((obj.escalation_options() & configuration::action_he_down) + ? notifier::down + : notifier::none) | + ((obj.escalation_options() & configuration::action_he_unreachable) + ? notifier::unreachable + : notifier::none) | + ((obj.escalation_options() & configuration::action_he_recovery) + ? notifier::up + : notifier::none); + if (_hostname != *obj.hosts().data().begin() || + get_first_notification() != obj.first_notification() || + get_last_notification() != obj.last_notification() || + get_notification_interval() != obj.notification_interval() || + get_escalation_period() != obj.escalation_period() || + get_escalate_on() != escalate_on) + return false; + + return true; +} +#endif diff --git a/engine/src/macros.cc b/engine/src/macros.cc index 673b76b8856..943e56efe57 100644 --- a/engine/src/macros.cc +++ b/engine/src/macros.cc @@ -673,9 +673,14 @@ std::string clean_macro_chars(std::string const& macro, int options) { if (ch < 32 || ch == 127) continue; - /* illegal user-specified characters */ + /* illegal user-specified characters */ +#ifdef LEGACY_CONF if (config->illegal_output_chars().find(ch) == std::string::npos) retval[y++] = retval[x]; +#else + if (pb_config.illegal_output_chars().find(ch) == std::string::npos) + retval[y++] = retval[x]; +#endif } retval.resize(y); diff --git a/engine/src/main.cc b/engine/src/main.cc index 59d9ab6c217..9dee3f52c85 100644 --- a/engine/src/main.cc +++ b/engine/src/main.cc @@ -69,8 +69,13 @@ namespace asio = boost::asio; #include "com/centreon/engine/version.hh" #include "com/centreon/io/directory_entry.hh" #include "com/centreon/logging/engine.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/parser.hh" #include "common/engine_legacy_conf/state.hh" +#else +#include "common/engine_conf/parser.hh" +#include "common/engine_conf/state_helper.hh" +#endif #include "common/log_v2/log_v2.hh" using namespace com::centreon::engine; @@ -118,16 +123,21 @@ int main(int argc, char* argv[]) { // Load singletons and global variable. log_v2::load("centengine"); - /* It's time to set the logger. Later, we will have acceses from multiple - * threads and we'll only be able to change atomic values. */ + /* It's time to set the logger. Later, we will have access from multiple + * threads and we'll only be able to change loggers atomic values. */ +#ifdef LEGACY_CONF config = new configuration::state; +#endif init_loggers(); configuration::applier::logging::instance(); com::centreon::common::pool::load(g_io_context, runtime_logger); +#ifdef LEGACY_CONF config_logger->info("Configuration mechanism used: legacy"); - config = new configuration::state; +#else + config_logger->info("Configuration mechanism used: protobuf"); +#endif logging::broker backend_broker_log; @@ -186,15 +196,15 @@ int main(int argc, char* argv[]) { error = true; else { // Config file is last argument specified. - config_file = string::dup(argv[optind]); + config_file = argv[optind]; // Make sure the config file uses an absolute path. if (config_file[0] != '/') { // Get absolute path of current working directory. - std::string buffer(com::centreon::io::directory_entry::current_path()); - buffer.append("/"); - buffer.append(config_file); - string::setstr(config_file, buffer); + std::string buffer{ + fmt::format("{}/{}", std::string{std::filesystem::current_path()}, + config_file)}; + config_file = std::move(buffer); } } @@ -274,6 +284,7 @@ int main(int argc, char* argv[]) { // We're just verifying the configuration. else if (verify_config) { try { +#ifdef LEGACY_CONF // Read in the configuration files (main config file, // resource and object config files). configuration::error_cnt err; @@ -284,7 +295,17 @@ int main(int argc, char* argv[]) { } configuration::applier::state::instance().apply(config, err); - +#else + // Read in the configuration files (main config file, + // resource and object config files). + configuration::error_cnt err; + configuration::State pb_config; + { + configuration::parser p; + p.parse(config_file, &pb_config, err); + } + configuration::applier::state::instance().apply(pb_config, err); +#endif std::cout << "\n Checked " << commands::command::commands.size() << " commands.\n Checked " << commands::connector::connectors.size() @@ -331,6 +352,7 @@ int main(int argc, char* argv[]) { // We're just testing scheduling. else if (test_scheduling) { try { +#ifdef LEGACY_CONF // Parse configuration. configuration::state config; configuration::error_cnt err; @@ -353,6 +375,30 @@ int main(int argc, char* argv[]) { // Apply configuration. configuration::applier::state::instance().apply(config, err, &state); +#else + // Parse configuration. + configuration::State pb_config; + configuration::error_cnt err; + { + configuration::parser p; + p.parse(config_file, &pb_config, err); + } + + // Parse retention. + retention::state state; + if (!pb_config.state_retention_file().empty()) { + retention::parser p; + try { + p.parse(pb_config.state_retention_file(), state); + } catch (std::exception const& e) { + std::cout << "Error while parsing the retention: {}" << e.what() + << std::endl; + } + } + + // Apply configuration. + configuration::applier::state::instance().apply(pb_config, err, &state); +#endif display_scheduling_info(); retval = EXIT_SUCCESS; @@ -367,7 +413,19 @@ int main(int argc, char* argv[]) { } // Else start to monitor things. else { + auto generate_port = [] { + std::random_device rd; // Will be used to obtain a seed for the + // random number engine + std::mt19937 gen( + rd()); // Standard mersenne_twister_engine seeded with rd() + std::uniform_int_distribution dis(50000, 50999); + + uint16_t port = dis(gen); + return port; + }; + try { +#ifdef LEGACY_CONF // Parse configuration. configuration::error_cnt err; configuration::state config; @@ -382,15 +440,8 @@ int main(int argc, char* argv[]) { configuration::extended_conf::update_state(config); uint16_t port = config.rpc_port(); - if (!port) { - std::random_device rd; // Will be used to obtain a seed for the - // random number engine - std::mt19937 gen( - rd()); // Standard mersenne_twister_engine seeded with rd() - std::uniform_int_distribution dis(50000, 50999); - - port = dis(gen); - } + if (!port) + port = generate_port(); const std::string& listen_address = config.rpc_listen_address(); @@ -420,14 +471,11 @@ int main(int argc, char* argv[]) { mac->x[MACRO_PROCESSSTARTTIME] = std::to_string(program_start); // Load broker modules. - for (std::list::const_iterator - it(config.broker_module().begin()), - end(config.broker_module().end()); - it != end; ++it) { + for (auto& m : config.broker_module()) { std::string filename; std::string args; - if (!string::split(*it, filename, args, ' ')) - filename = *it; + if (!string::split(m, filename, args, ' ')) + filename = m; broker::loader::instance().add_module(filename, args); } neb_init_callback_list(); @@ -438,6 +486,66 @@ int main(int argc, char* argv[]) { // Apply configuration. configuration::applier::state::instance().apply(config, err, &state); +#else + // Parse configuration. + configuration::error_cnt err; + configuration::State pb_config; + { + configuration::parser p; + p.parse(config_file, &pb_config, err); + } + + configuration::extended_conf::load_all(extended_conf_file.begin(), + extended_conf_file.end()); + + configuration::extended_conf::update_state(&pb_config); + uint16_t port = pb_config.grpc_port(); + + if (!port) + port = generate_port(); + + const std::string& listen_address = pb_config.rpc_listen_address(); + + std::unique_ptr > rpc( + new enginerpc(listen_address, port), [](enginerpc* rpc) { + rpc->shutdown(); + delete rpc; + }); + + // Parse retention. + retention::state state; + { + retention::parser p; + try { + p.parse(pb_config.state_retention_file(), state); + } catch (const std::exception& e) { + config_logger->error("{}", e.what()); + engine_logger(logging::log_config_error, logging::basic) + << e.what(); + } + } + + // Get program (re)start time and save as macro. Needs to be + // done after we read config files, as user may have overridden + // timezone offset. + program_start = std::time(nullptr); + mac->x[MACRO_PROCESSSTARTTIME] = std::to_string(program_start); + + // Load broker modules. + for (auto& m : pb_config.broker_module()) { + std::pair p = + absl::StrSplit(m, absl::MaxSplits(' ', 1)); + broker::loader::instance().add_module(p.first, p.second); + } + neb_init_callback_list(); + + // Add broker backend. + com::centreon::logging::engine::instance().add( + &backend_broker_log, logging::log_all, logging::basic); + + // Apply configuration. + configuration::applier::state::instance().apply(pb_config, err, &state); +#endif // Handle signals (interrupts). setup_sighandler(); @@ -484,8 +592,12 @@ int main(int argc, char* argv[]) { broker_program_state(NEBTYPE_PROCESS_SHUTDOWN, NEBFLAG_USER_INITIATED); - // Save service and host state information. + // Save service and host state information. +#ifdef LEGACY_CONF retention::dump::save(::config->state_retention_file()); +#else + retention::dump::save(::pb_config.state_retention_file()); +#endif // Clean up the status data. cleanup_status_data(true); @@ -513,8 +625,6 @@ int main(int argc, char* argv[]) { // Memory cleanup. cleanup(); spdlog::shutdown(); - delete[] config_file; - config_file = NULL; } catch (std::exception const& e) { engine_logger(logging::log_runtime_error, logging::basic) << "Error: " << e.what(); @@ -522,8 +632,11 @@ int main(int argc, char* argv[]) { } // Unload singletons and global objects. +#ifdef LEGACY_CONF delete config; config = nullptr; +#endif + g_io_context->stop(); com::centreon::common::pool::unload(); diff --git a/engine/src/nebmods.cc b/engine/src/nebmods.cc index 528dbc0097b..43cf720fe5f 100644 --- a/engine/src/nebmods.cc +++ b/engine/src/nebmods.cc @@ -99,7 +99,11 @@ int neb_load_all_modules() { try { loader& ldr(loader::instance()); - const std::string& mod_dir(config->broker_module_directory()); +#ifdef LEGACY_CONF + const std::string& mod_dir = config->broker_module_directory(); +#else + const std::string& mod_dir = pb_config.broker_module_directory(); +#endif if (!mod_dir.empty()) ldr.load_directory(mod_dir); diff --git a/engine/src/notifier.cc b/engine/src/notifier.cc index 418229df236..50ba8b8b2f5 100644 --- a/engine/src/notifier.cc +++ b/engine/src/notifier.cc @@ -244,7 +244,12 @@ bool notifier::_is_notification_viable_normal(reason_type type } /* are notifications enabled? */ - if (!config->enable_notifications()) { +#ifdef LEGACY_CONF + bool enable_notifications = config->enable_notifications(); +#else + bool enable_notifications = pb_config.enable_notifications(); +#endif + if (!enable_notifications) { engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; @@ -355,19 +360,24 @@ bool notifier::_is_notification_viable_normal(reason_type type return false; } +#ifdef LEGACY_CONF + uint32_t interval_length = config->interval_length(); +#else + uint32_t interval_length = pb_config.interval_length(); +#endif if (_first_notification_delay > 0 && !_notification[cat_normal] && get_last_hard_state_change() + - _first_notification_delay * config->interval_length() > + _first_notification_delay * interval_length > now) { engine_logger(dbg_notifications, more) << "This notifier is configured with a first notification delay, we " "won't send notification until timestamp " - << (_first_notification_delay * config->interval_length()); + << (_first_notification_delay * interval_length); SPDLOG_LOGGER_DEBUG( notifications_logger, "This notifier is configured with a first notification delay, we " "won't send notification until timestamp {}", - _first_notification_delay * config->interval_length()); + _first_notification_delay * interval_length); return false; } @@ -399,19 +409,17 @@ bool notifier::_is_notification_viable_normal(reason_type type _last_notification); return false; } else if (notification_interval > 0) { - if (_last_notification + - notification_interval * config->interval_length() > + if (_last_notification + notification_interval * interval_length > now) { engine_logger(dbg_notifications, more) << "This notifier problem has been sent at " << _last_notification << " so it won't be sent until " - << (notification_interval * config->interval_length()); + << (notification_interval * interval_length); SPDLOG_LOGGER_DEBUG( notifications_logger, "This notifier problem has been sent at {} so it won't be sent " "until {}", - _last_notification, - notification_interval * config->interval_length()); + _last_notification, notification_interval * interval_length); return false; } } @@ -431,8 +439,13 @@ bool notifier::_is_notification_viable_recovery(reason_type type bool retval{true}; bool send_later{false}; +#ifdef LEGACY_CONF + bool enable_notifications = config->enable_notifications(); +#else + bool enable_notifications = pb_config.enable_notifications(); +#endif /* are notifications enabled? */ - if (!config->enable_notifications()) { + if (!enable_notifications) { engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; @@ -456,10 +469,20 @@ bool notifier::_is_notification_viable_recovery(reason_type type std::time_t now; std::time(&now); +#ifdef LEGACY_CONF + uint32_t interval_length = config->interval_length(); + bool use_send_recovery_notifications_anyways = + config->use_send_recovery_notifications_anyways(); +#else + uint32_t interval_length = pb_config.interval_length(); + bool use_send_recovery_notifications_anyways = + pb_config.send_recovery_notifications_anyways(); +#endif + // if use_send_recovery_notifications_anyways flag is set, we don't take // timeperiod into account for recovery if (!check_time_against_period_for_notif(now, tp)) { - if (config->use_send_recovery_notifications_anyways()) { + if (use_send_recovery_notifications_anyways) { SPDLOG_LOGGER_DEBUG(notifications_logger, "send_recovery_notifications_anyways flag enabled, " "recovery notification is viable even if we are " @@ -523,7 +546,7 @@ bool notifier::_is_notification_viable_recovery(reason_type type retval = false; send_later = false; } else if (get_last_hard_state_change() + - _recovery_notification_delay * config->interval_length() > + _recovery_notification_delay * interval_length > now) { engine_logger(dbg_notifications, more) << "This notifier is configured with a recovery notification delay. " @@ -591,8 +614,13 @@ bool notifier::_is_notification_viable_acknowledgement( return true; } +#ifdef LEGACY_CONF + bool enable_notifications = config->enable_notifications(); +#else + bool enable_notifications = pb_config.enable_notifications(); +#endif /* are notifications enabled? */ - if (!config->enable_notifications()) { + if (!enable_notifications) { engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; @@ -641,7 +669,12 @@ bool notifier::_is_notification_viable_flapping(reason_type type, } /* are notifications enabled? */ - if (!config->enable_notifications()) { +#ifdef LEGACY_CONF + bool enable_notifications = config->enable_notifications(); +#else + bool enable_notifications = pb_config.enable_notifications(); +#endif + if (!enable_notifications) { engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; @@ -752,7 +785,12 @@ bool notifier::_is_notification_viable_downtime(reason_type type } /* are notifications enabled? */ - if (!config->enable_notifications()) { +#ifdef LEGACY_CONF + bool enable_notifications = config->enable_notifications(); +#else + bool enable_notifications = pb_config.enable_notifications(); +#endif + if (!enable_notifications) { engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; @@ -773,7 +811,7 @@ bool notifier::_is_notification_viable_downtime(reason_type type return false; } - if (!config->enable_notifications()) { + if (!enable_notifications) { engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications won't be sent out."; SPDLOG_LOGGER_DEBUG( @@ -824,7 +862,12 @@ bool notifier::_is_notification_viable_custom(reason_type type } /* are notifications enabled? */ - if (!config->enable_notifications()) { +#ifdef LEGACY_CONF + bool enable_notifications = config->enable_notifications(); +#else + bool enable_notifications = pb_config.enable_notifications(); +#endif + if (!enable_notifications) { engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; @@ -1646,9 +1689,13 @@ time_t notifier::get_next_notification_time(time_t offset) { interval_to_use); /* calculate next notification time */ +#ifdef LEGACY_CONF + uint32_t interval_length = config->interval_length(); +#else + uint32_t interval_length = pb_config.interval_length(); +#endif time_t next_notification{ - offset + - static_cast(interval_to_use * config->interval_length())}; + offset + static_cast(interval_to_use * interval_length)}; return next_notification; } diff --git a/engine/src/retention/applier/anomalydetection.cc b/engine/src/retention/applier/anomalydetection.cc index 7c979042767..1ea6b27de86 100644 --- a/engine/src/retention/applier/anomalydetection.cc +++ b/engine/src/retention/applier/anomalydetection.cc @@ -1,5 +1,5 @@ /** -* Copyright 2022 Centreon +* Copyright 2022-2024 Centreon * * This file is part of Centreon Engine. * @@ -32,6 +32,7 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::configuration::applier; using namespace com::centreon::engine::retention; +#ifdef LEGACY_CONF /** * Update service list. * @@ -55,7 +56,33 @@ void applier::anomalydetection::apply(configuration::state const& config, } } } +#else +/** + * Update service list. + * + * @param[in] config The global configuration. + * @param[in] lst The service list to update. + * @param[in] scheduling_info_is_ok True if the retention is not + * outdated. + */ +void applier::anomalydetection::apply(const configuration::State & config, + const list_anomalydetection & lst, + bool scheduling_info_is_ok) { + for (auto& s : lst) { + try { + std::pair id{ + get_host_and_service_id(s->host_name(), s->service_description())}; + engine::service& svc(find_service(id.first, id.second)); + _update(config, *s, dynamic_cast(svc), + scheduling_info_is_ok); + } catch (...) { + // ignore exception for the retention. + } + } +} +#endif +#ifdef LEGACY_CONF /** * Update internal service base on service retention. * @@ -76,3 +103,25 @@ void applier::anomalydetection::_update( obj.set_sensitivity(state.sensitivity()); } } +#else +/** + * Update internal service base on service retention. + * + * @param[in] config The global configuration. + * @param[in] state The service retention state. + * @param[in, out] obj The anomalydetection to update. + * @param[in] scheduling_info_is_ok True if the retention is + * not outdated. + */ +void applier::anomalydetection::_update( + const configuration::State & config, + const retention::anomalydetection & state, + engine::anomalydetection& obj, + bool scheduling_info_is_ok) { + applier::service::update(config, state, static_cast(obj), + scheduling_info_is_ok); + if (state.sensitivity().is_set()) { + obj.set_sensitivity(state.sensitivity()); + } +} +#endif diff --git a/engine/src/retention/applier/comment.cc b/engine/src/retention/applier/comment.cc index 1ba24e5a3c8..b6a95d5ac88 100644 --- a/engine/src/retention/applier/comment.cc +++ b/engine/src/retention/applier/comment.cc @@ -1,5 +1,6 @@ /** * Copyright 2011-2013 Merethis +* Copyright 2014-2024 Centreon * * This file is part of Centreon Engine. * @@ -77,7 +78,7 @@ void applier::comment::_add_host_comment( } /** - * Add serivce comment. + * Add service comment. * * @param[in] obj The comment to add into the service. */ diff --git a/engine/src/retention/applier/contact.cc b/engine/src/retention/applier/contact.cc index 5ffce899e14..e3ea10ff219 100644 --- a/engine/src/retention/applier/contact.cc +++ b/engine/src/retention/applier/contact.cc @@ -27,6 +27,7 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::configuration::applier; using namespace com::centreon::engine::retention; +#ifdef LEGACY_CONF /** * Update contact list. * @@ -43,7 +44,20 @@ void applier::contact::apply(configuration::state const& config, _update(config, **it, ct_it->second.get()); } } +#else +void applier::contact::apply(const configuration::State& config, + list_contact const& lst) { + for (list_contact::const_iterator it{lst.begin()}, end{lst.end()}; it != end; + ++it) { + contact_map::const_iterator ct_it{ + engine::contact::contacts.find((*it)->contact_name())}; + if (ct_it != engine::contact::contacts.end()) + _update(config, **it, ct_it->second.get()); + } +} +#endif +#ifdef LEGACY_CONF /** * Update internal contact base on contact retention. * @@ -153,3 +167,114 @@ void applier::contact::_update(configuration::state const& config, // update contact status. obj->update_status_info(false); } +#else +/** + * Update internal contact base on contact retention. + * + * @param[in] config The global configuration. + * @param[in] state The contact retention state. + * @param[in, out] obj The contact to update. + */ +void applier::contact::_update(const configuration::State& config, + const retention::contact& state, + com::centreon::engine::contact* obj) { + if (state.modified_attributes().is_set()) { + obj->set_modified_attributes(*state.modified_attributes() & ~0L); + // mask out attributes we don't want to retain. + } + if (state.modified_host_attributes().is_set()) { + obj->set_modified_host_attributes( + *state.modified_host_attributes() & + ~config.retained_contact_host_attribute_mask()); + // mask out attributes we don't want to retain. + } + if (state.modified_service_attributes().is_set()) { + obj->set_modified_service_attributes( + *state.modified_service_attributes() & + ~config.retained_contact_service_attribute_mask()); + // mask out attributes we don't want to retain. + } + if (obj->get_retain_status_information()) { + if (state.last_host_notification().is_set()) + obj->set_last_host_notification(*state.last_host_notification()); + if (state.last_service_notification().is_set()) + obj->set_last_service_notification(*state.last_service_notification()); + } + if (obj->get_retain_nonstatus_information()) { + if (state.host_notification_period().is_set()) { + if (obj->get_modified_host_attributes() & + MODATTR_NOTIFICATION_TIMEPERIOD) { + timeperiod* temp_timeperiod(nullptr); + timeperiod_map::const_iterator found( + timeperiod::timeperiods.find(state.host_notification_period())); + + if (found != timeperiod::timeperiods.end()) + temp_timeperiod = found->second.get(); + + if (!temp_timeperiod) + obj->set_modified_host_attributes( + obj->get_modified_host_attributes() - + MODATTR_NOTIFICATION_TIMEPERIOD); + else + obj->set_host_notification_period(*state.host_notification_period()); + } + } + if (state.service_notification_period().is_set()) { + if (obj->get_modified_service_attributes() & + MODATTR_NOTIFICATION_TIMEPERIOD) { + timeperiod* temp_timeperiod(nullptr); + timeperiod_map::const_iterator found( + timeperiod::timeperiods.find(state.host_notification_period())); + + if (found != timeperiod::timeperiods.end()) + temp_timeperiod = found->second.get(); + + if (!temp_timeperiod) + obj->set_modified_service_attributes( + obj->get_modified_service_attributes() - + MODATTR_NOTIFICATION_TIMEPERIOD); + else + obj->set_service_notification_period( + *state.service_notification_period()); + } + } + if (state.host_notifications_enabled().is_set()) { + if (obj->get_modified_host_attributes() & MODATTR_NOTIFICATIONS_ENABLED) + obj->set_host_notifications_enabled( + *state.host_notifications_enabled()); + } + if (state.service_notifications_enabled().is_set()) { + if (obj->get_modified_service_attributes() & + MODATTR_NOTIFICATIONS_ENABLED) + obj->set_service_notifications_enabled( + *state.service_notifications_enabled()); + } + + if (!state.customvariables().empty() && + (obj->get_modified_attributes() & MODATTR_CUSTOM_VARIABLE)) { + for (auto const& cv : state.customvariables()) { + obj->get_custom_variables()[cv.first].update(cv.second.value()); + } + } + } + // Adjust modified attributes if necessary. + else + obj->set_modified_attributes(MODATTR_NONE); + + // Adjust modified attributes if no custom variable has been changed. + if (obj->get_modified_attributes() & MODATTR_CUSTOM_VARIABLE) { + bool at_least_one_modified(false); + for (auto const& cv : obj->get_custom_variables()) + if (cv.second.has_been_modified()) { + at_least_one_modified = true; + break; + } + if (!at_least_one_modified) + obj->set_modified_attributes(obj->get_modified_attributes() - + MODATTR_CUSTOM_VARIABLE); + } + + // update contact status. + obj->update_status_info(false); +} +#endif diff --git a/engine/src/retention/applier/host.cc b/engine/src/retention/applier/host.cc index c1f6e94f6a9..30e8058297b 100644 --- a/engine/src/retention/applier/host.cc +++ b/engine/src/retention/applier/host.cc @@ -29,6 +29,7 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::configuration::applier; using namespace com::centreon::engine::retention; +#ifdef LEGACY_CONF /** * Update host list. * @@ -51,7 +52,32 @@ void applier::host::apply(configuration::state const& config, } } } +#else +/** + * Update host list. + * + * @param[in] config The global configuration. + * @param[in] lst The host list to update. + * @param[in] scheduling_info_is_ok True if the retention is not + * outdated. + */ +void applier::host::apply(const configuration::State& config, + list_host const& lst, + bool scheduling_info_is_ok) { + for (list_host::const_iterator it(lst.begin()), end(lst.end()); it != end; + ++it) { + try { + com::centreon::engine::host& hst( + find_host(get_host_id((*it)->host_name().c_str()))); + _update(config, **it, hst, scheduling_info_is_ok); + } catch (...) { + // ignore exception for the retention. + } + } +} +#endif +#ifdef LEGACY_CONF /** * Update internal host base on host retention. * @@ -313,3 +339,266 @@ void applier::host::_update(configuration::state const& config, obj.update_adaptive_data(); obj.update_status(); } +#else +/** + * Update internal host base on host retention. + * + * @param[in] config The global configuration. + * @param[in] state The host retention state. + * @param[in, out] obj The host to update. + * @param[in] scheduling_info_is_ok True if the retention is + * not outdated. + */ +void applier::host::_update(const configuration::State& config, + const retention::host& state, + com::centreon::engine::host& obj, + bool scheduling_info_is_ok) { + if (state.modified_attributes().is_set()) { + obj.set_modified_attributes(*state.modified_attributes()); + // mask out attributes we don't want to retain. + obj.set_modified_attributes(obj.get_modified_attributes() & + ~config.retained_host_attribute_mask()); + } + + if (obj.get_retain_status_information()) { + if (state.has_been_checked().is_set()) + obj.set_has_been_checked(*state.has_been_checked()); + if (state.check_execution_time().is_set()) + obj.set_execution_time(*state.check_execution_time()); + if (state.check_latency().is_set()) + obj.set_latency(*state.check_latency()); + if (state.check_type().is_set()) + obj.set_check_type( + static_cast(*state.check_type())); + if (state.current_state().is_set()) + obj.set_current_state( + static_cast(*state.current_state())); + if (state.last_state().is_set()) + obj.set_last_state( + static_cast(*state.last_state())); + if (state.last_hard_state().is_set()) + obj.set_last_hard_state( + static_cast(*state.last_hard_state())); + if (state.plugin_output().is_set()) + obj.set_plugin_output(*state.plugin_output()); + if (state.long_plugin_output().is_set()) + obj.set_long_plugin_output(*state.long_plugin_output()); + if (state.performance_data().is_set()) + obj.set_perf_data(*state.performance_data()); + if (state.last_acknowledgement().is_set()) + obj.set_last_acknowledgement(*state.last_acknowledgement()); + if (state.last_check().is_set()) + obj.set_last_check(*state.last_check()); + if (state.next_check().is_set() && config.use_retained_scheduling_info() && + scheduling_info_is_ok) + obj.set_next_check(*state.next_check()); + if (state.check_options().is_set() && + config.use_retained_scheduling_info() && scheduling_info_is_ok) + obj.set_check_options(*state.check_options()); + if (state.current_attempt().is_set()) + obj.set_current_attempt(*state.current_attempt()); + if (state.current_event_id().is_set()) + obj.set_current_event_id(*state.current_event_id()); + if (state.last_event_id().is_set()) + obj.set_last_event_id(*state.last_event_id()); + if (state.current_problem_id().is_set()) + obj.set_current_problem_id(*state.current_problem_id()); + if (state.last_problem_id().is_set()) + obj.set_last_problem_id(*state.last_problem_id()); + if (state.state_type().is_set()) + obj.set_state_type( + static_cast(*state.state_type())); + if (state.last_state_change().is_set()) + obj.set_last_state_change(*state.last_state_change()); + if (state.last_hard_state_change().is_set()) + obj.set_last_hard_state_change(*state.last_hard_state_change()); + if (state.last_time_up().is_set()) + obj.set_last_time_up(*state.last_time_up()); + if (state.last_time_down().is_set()) + obj.set_last_time_down(*state.last_time_down()); + if (state.last_time_unreachable().is_set()) + obj.set_last_time_unreachable(*state.last_time_unreachable()); + obj.set_notified_on( + (state.notified_on_down().is_set() && *state.notified_on_down() + ? notifier::down + : notifier::none) | + (state.notified_on_unreachable().is_set() && + *state.notified_on_unreachable() + ? notifier::unreachable + : notifier::none)); + if (state.last_notification().is_set()) + obj.set_last_notification(*state.last_notification()); + if (state.current_notification_number().is_set()) + obj.set_notification_number(*state.current_notification_number()); + if (state.current_notification_id().is_set()) + obj.set_current_notification_id(*state.current_notification_id()); + if (state.has_notifications()) { + for (int i = 0; i < 6; i++) + obj.set_notification(i, state.notifications()[i]); + } + if (state.percent_state_change().is_set()) + obj.set_percent_state_change(*state.percent_state_change()); + if (state.state_history().is_set()) { + utils::set_state_history(*state.state_history(), obj.get_state_history()); + obj.set_state_history_index(0); + } + } + + if (obj.get_retain_nonstatus_information()) { + if (state.acknowledgement_type().is_set()) + obj.set_acknowledgement( + static_cast(*state.acknowledgement_type())); + else + obj.set_acknowledgement(engine::AckType::NONE); + + if (state.notifications_enabled().is_set() && + (obj.get_modified_attributes() & MODATTR_NOTIFICATIONS_ENABLED)) + obj.set_notifications_enabled(*state.notifications_enabled()); + + if (state.active_checks_enabled().is_set() && + (obj.get_modified_attributes() & MODATTR_ACTIVE_CHECKS_ENABLED)) + obj.set_checks_enabled(*state.active_checks_enabled()); + + if (state.passive_checks_enabled().is_set() && + (obj.get_modified_attributes() & MODATTR_PASSIVE_CHECKS_ENABLED)) + obj.set_accept_passive_checks(*state.passive_checks_enabled()); + + if (state.event_handler_enabled().is_set() && + (obj.get_modified_attributes() & MODATTR_EVENT_HANDLER_ENABLED)) + obj.set_event_handler_enabled(*state.event_handler_enabled()); + + if (state.flap_detection_enabled().is_set() && + (obj.get_modified_attributes() & MODATTR_FLAP_DETECTION_ENABLED)) + obj.set_flap_detection_enabled(*state.flap_detection_enabled()); + + if (state.process_performance_data().is_set() && + (obj.get_modified_attributes() & MODATTR_PERFORMANCE_DATA_ENABLED)) + obj.set_process_performance_data(*state.process_performance_data()); + + if (state.obsess_over_host().is_set() && + (obj.get_modified_attributes() & MODATTR_OBSESSIVE_HANDLER_ENABLED)) + obj.set_obsess_over(*state.obsess_over_host()); + + if (state.check_command().is_set() && + (obj.get_modified_attributes() & MODATTR_CHECK_COMMAND)) { + if (utils::is_command_exist(*state.check_command())) + obj.set_check_command(*state.check_command()); + else + obj.set_modified_attributes(obj.get_modified_attributes() - + MODATTR_CHECK_COMMAND); + } + + if (state.check_period().is_set() && + (obj.get_modified_attributes() & MODATTR_CHECK_TIMEPERIOD)) { + timeperiod_map::const_iterator it( + timeperiod::timeperiods.find(*state.check_period())); + if (it != timeperiod::timeperiods.end()) + obj.set_check_period(*state.check_period()); + else + obj.set_modified_attributes(obj.get_modified_attributes() - + MODATTR_CHECK_TIMEPERIOD); + } + + if (state.notification_period().is_set() && + (obj.get_modified_attributes() & MODATTR_NOTIFICATION_TIMEPERIOD)) { + timeperiod_map::const_iterator it( + timeperiod::timeperiods.find(*state.notification_period())); + if (it != timeperiod::timeperiods.end()) + obj.set_notification_period(*state.notification_period()); + else + obj.set_modified_attributes(obj.get_modified_attributes() - + MODATTR_NOTIFICATION_TIMEPERIOD); + } + + if (state.event_handler().is_set() && + (obj.get_modified_attributes() & MODATTR_EVENT_HANDLER_COMMAND)) { + if (utils::is_command_exist(*state.event_handler())) + obj.set_check_command(*state.event_handler()); + else + obj.set_modified_attributes(obj.get_modified_attributes() - + MODATTR_CHECK_COMMAND); + } + + if (state.normal_check_interval().is_set() && + (obj.get_modified_attributes() & MODATTR_NORMAL_CHECK_INTERVAL)) + obj.set_check_interval(*state.normal_check_interval()); + + if (state.retry_check_interval().is_set() && + (obj.get_modified_attributes() & MODATTR_RETRY_CHECK_INTERVAL)) + obj.set_retry_interval(*state.retry_check_interval()); + + if (state.max_attempts().is_set() && + (obj.get_modified_attributes() & MODATTR_MAX_CHECK_ATTEMPTS)) { + obj.set_max_attempts(*state.max_attempts()); + + // adjust current attempt number if in a hard state. + if (obj.get_state_type() == notifier::hard && + obj.get_current_state() != engine::host::state_up && + obj.get_current_attempt() > 1) + obj.set_current_attempt(obj.max_check_attempts()); + } + + if (!state.customvariables().empty() && + (obj.get_modified_attributes() & MODATTR_CUSTOM_VARIABLE)) { + for (map_customvar::const_iterator it(state.customvariables().begin()), + end(state.customvariables().end()); + it != end; ++it) + obj.custom_variables[it->first].update(it->second.value()); + } + } + // Adjust modified attributes if necessary. + else + obj.set_modified_attributes(MODATTR_NONE); + + bool allow_flapstart_notification(true); + + // Adjust modified attributes if no custom variable has been changed. + if (obj.get_modified_attributes() & MODATTR_CUSTOM_VARIABLE) { + bool at_least_one_modified(false); + for (auto const& cv : obj.custom_variables) { + if (cv.second.has_been_modified()) { + at_least_one_modified = true; + break; + } + } + if (!at_least_one_modified) + obj.set_modified_attributes(obj.get_modified_attributes() - + MODATTR_CUSTOM_VARIABLE); + } + + // calculate next possible notification time. + if (obj.get_current_state() != engine::host::state_up && + obj.get_last_notification()) + obj.set_next_notification( + obj.get_next_notification_time(obj.get_last_notification())); + + // ADDED 01/23/2009 adjust current check attempts if host in hard + // problem state (max attempts may have changed in config + // since restart). + if (obj.get_current_state() != engine::host::state_up && + obj.get_state_type() == notifier::hard) + obj.set_current_attempt(obj.max_check_attempts()); + + // ADDED 02/20/08 assume same flapping state if large install + // tweaks enabled. + if (config.use_large_installation_tweaks()) + obj.set_is_flapping(state.is_flapping()); + // else use normal startup flap detection logic. + else { + // host was flapping before program started. + // 11/10/07 don't allow flapping notifications to go out. + allow_flapstart_notification = !state.is_flapping(); + + // check for flapping. + obj.check_for_flapping(false, false, allow_flapstart_notification); + } + + // handle new vars added in 2.x. + if (!obj.get_last_hard_state_change()) + obj.set_last_hard_state_change(obj.get_last_state_change()); + + // update host status. + obj.update_adaptive_data(); + obj.update_status(); +} +#endif diff --git a/engine/src/retention/applier/program.cc b/engine/src/retention/applier/program.cc index add93b1b82d..2665a7d8910 100644 --- a/engine/src/retention/applier/program.cc +++ b/engine/src/retention/applier/program.cc @@ -27,6 +27,7 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::retention; +#ifdef LEGACY_CONF /** * Restore programe informations. * @@ -132,3 +133,110 @@ void applier::program::apply(configuration::state& config, modified_service_process_attributes = MODATTR_NONE; } } +#else +/** + * Restore programe informations. + * + * @param[in, out] config The global configuration to update. + * @param[in] obj The global informations. + */ +void applier::program::apply(configuration::State& config, + retention::program const& obj) { + // XXX: don't use globals, replace it by config! + + if (obj.modified_host_attributes().is_set()) { + modified_host_process_attributes = *obj.modified_host_attributes(); + // mask out attributes we don't want to retain. + modified_host_process_attributes &= + ~config.retained_process_host_attribute_mask(); + } + + if (obj.modified_service_attributes().is_set()) { + modified_service_process_attributes = *obj.modified_service_attributes(); + // mask out attributes we don't want to retain. + modified_service_process_attributes &= + ~config.retained_process_host_attribute_mask(); + } + + if (config.use_retained_program_state()) { + if (obj.enable_notifications().is_set() && + (modified_host_process_attributes & MODATTR_NOTIFICATIONS_ENABLED)) + enable_notifications = *obj.enable_notifications(); + + if (obj.active_service_checks_enabled().is_set() && + (modified_service_process_attributes & MODATTR_ACTIVE_CHECKS_ENABLED)) + execute_service_checks = *obj.active_service_checks_enabled(); + + if (obj.passive_service_checks_enabled().is_set() && + (modified_service_process_attributes & MODATTR_PASSIVE_CHECKS_ENABLED)) + accept_passive_service_checks = *obj.passive_service_checks_enabled(); + + if (obj.active_host_checks_enabled().is_set() && + (modified_host_process_attributes & MODATTR_ACTIVE_CHECKS_ENABLED)) + execute_host_checks = *obj.active_host_checks_enabled(); + + if (obj.passive_host_checks_enabled().is_set() && + (modified_host_process_attributes & MODATTR_PASSIVE_CHECKS_ENABLED)) + accept_passive_host_checks = *obj.passive_host_checks_enabled(); + + if (obj.enable_event_handlers().is_set() && + (modified_host_process_attributes & MODATTR_EVENT_HANDLER_ENABLED)) + enable_event_handlers = *obj.enable_event_handlers(); + + if (obj.obsess_over_services().is_set() && + (modified_service_process_attributes & + MODATTR_OBSESSIVE_HANDLER_ENABLED)) + obsess_over_services = *obj.obsess_over_services(); + + if (obj.obsess_over_hosts().is_set() && + (modified_host_process_attributes & MODATTR_OBSESSIVE_HANDLER_ENABLED)) + obsess_over_hosts = *obj.obsess_over_hosts(); + + if (obj.check_service_freshness().is_set() && + (modified_service_process_attributes & + MODATTR_FRESHNESS_CHECKS_ENABLED)) + check_service_freshness = *obj.check_service_freshness(); + + if (obj.check_host_freshness().is_set() && + (modified_host_process_attributes & MODATTR_FRESHNESS_CHECKS_ENABLED)) + check_host_freshness = *obj.check_host_freshness(); + + if (obj.enable_flap_detection().is_set() && + (modified_host_process_attributes & MODATTR_FLAP_DETECTION_ENABLED)) + enable_flap_detection = *obj.enable_flap_detection(); + + if (obj.process_performance_data().is_set() && + (modified_host_process_attributes & MODATTR_PERFORMANCE_DATA_ENABLED)) + process_performance_data = *obj.process_performance_data(); + + if (obj.global_host_event_handler().is_set() && + (modified_host_process_attributes & MODATTR_EVENT_HANDLER_COMMAND) && + utils::is_command_exist(*obj.global_host_event_handler())) + string::setstr(global_host_event_handler, + *obj.global_host_event_handler()); + + if (obj.global_service_event_handler().is_set() && + (modified_service_process_attributes & MODATTR_EVENT_HANDLER_COMMAND) && + utils::is_command_exist(*obj.global_service_event_handler())) + string::setstr(global_service_event_handler, + *obj.global_service_event_handler()); + + if (obj.next_comment_id().is_set()) + comment::set_next_comment_id(*obj.next_comment_id()); + + if (obj.next_event_id().is_set()) + next_event_id = *obj.next_event_id(); + + if (obj.next_problem_id().is_set()) + next_problem_id = *obj.next_problem_id(); + + if (obj.next_notification_id().is_set()) + next_notification_id = *obj.next_notification_id(); + } + + if (!config.use_retained_program_state()) { + modified_host_process_attributes = MODATTR_NONE; + modified_service_process_attributes = MODATTR_NONE; + } +} +#endif diff --git a/engine/src/retention/applier/service.cc b/engine/src/retention/applier/service.cc index 13783948e2d..26288819bcb 100644 --- a/engine/src/retention/applier/service.cc +++ b/engine/src/retention/applier/service.cc @@ -30,6 +30,7 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::configuration::applier; using namespace com::centreon::engine::retention; +#ifdef LEGACY_CONF /** * Update service list. * @@ -52,7 +53,310 @@ void applier::service::apply(configuration::state const& config, } } } +#else +/** + * Update service list. + * + * @param[in] config The global configuration. + * @param[in] lst The service list to update. + * @param[in] scheduling_info_is_ok True if the retention is not + * outdated. + */ +void applier::service::apply(const configuration::State& config, + const list_service& lst, + bool scheduling_info_is_ok) { + for (auto& s : lst) { + try { + std::pair id{ + get_host_and_service_id(s->host_name(), s->service_description())}; + engine::service& svc(find_service(id.first, id.second)); + update(config, *s, svc, scheduling_info_is_ok); + } catch (...) { + // ignore exception for the retention. + } + } +} +#endif + +#ifdef LEGACY_CONF +/** + * Update internal service base on service retention. + * + * @param[in] config The global configuration. + * @param[in] state The service retention state. + * @param[in, out] obj The service to update. + * @param[in] scheduling_info_is_ok True if the retention is + * not outdated. + */ +void applier::service::update(const configuration::state& config, + const retention::service& state, + engine::service& obj, + bool scheduling_info_is_ok) { + if (state.modified_attributes().is_set()) { + obj.set_modified_attributes(*state.modified_attributes()); + // mask out attributes we don't want to retain. + obj.set_modified_attributes(obj.get_modified_attributes() & + ~config.retained_host_attribute_mask()); + } + + if (obj.get_retain_status_information()) { + if (state.has_been_checked().is_set()) + obj.set_has_been_checked(*state.has_been_checked()); + if (state.check_execution_time().is_set()) + obj.set_execution_time(*state.check_execution_time()); + if (state.check_latency().is_set()) + obj.set_latency(*state.check_latency()); + if (state.check_type().is_set()) + obj.set_check_type( + static_cast(*state.check_type())); + if (state.current_state().is_set()) + obj.set_current_state( + static_cast(*state.current_state())); + if (state.last_state().is_set()) + obj.set_last_state( + static_cast(*state.last_state())); + if (state.last_hard_state().is_set()) + obj.set_last_hard_state(static_cast( + *state.last_hard_state())); + if (state.current_attempt().is_set()) + obj.set_current_attempt(*state.current_attempt()); + if (state.current_event_id().is_set()) + obj.set_current_event_id(*state.current_event_id()); + if (state.last_event_id().is_set()) + obj.set_last_event_id(*state.last_event_id()); + if (state.current_problem_id().is_set()) + obj.set_current_problem_id(*state.current_problem_id()); + if (state.last_problem_id().is_set()) + obj.set_last_problem_id(*state.last_problem_id()); + if (state.state_type().is_set()) + obj.set_state_type( + static_cast(*state.state_type())); + if (state.last_state_change().is_set()) + obj.set_last_state_change(*state.last_state_change()); + if (state.last_hard_state_change().is_set()) + obj.set_last_hard_state_change(*state.last_hard_state_change()); + if (state.last_time_ok().is_set()) + obj.set_last_time_ok(*state.last_time_ok()); + if (state.last_time_warning().is_set()) + obj.set_last_time_warning(*state.last_time_warning()); + if (state.last_time_unknown().is_set()) + obj.set_last_time_unknown(*state.last_time_unknown()); + if (state.last_time_critical().is_set()) + obj.set_last_time_critical(*state.last_time_critical()); + if (state.plugin_output().is_set()) + obj.set_plugin_output(*state.plugin_output()); + if (state.long_plugin_output().is_set()) + obj.set_long_plugin_output(*state.long_plugin_output()); + if (state.performance_data().is_set()) + obj.set_perf_data(*state.performance_data()); + if (state.last_acknowledgement().is_set()) + obj.set_last_acknowledgement(*state.last_acknowledgement()); + if (state.last_check().is_set()) + obj.set_last_check(*state.last_check()); + if (state.next_check().is_set() && config.use_retained_scheduling_info() && + scheduling_info_is_ok) + obj.set_next_check(*state.next_check()); + if (state.check_options().is_set() && + config.use_retained_scheduling_info() && scheduling_info_is_ok) + obj.set_check_options(*state.check_options()); + obj.set_notified_on( + (state.notified_on_unknown().is_set() && *state.notified_on_unknown() + ? notifier::unknown + : notifier::none) | + (state.notified_on_warning().is_set() && *state.notified_on_warning() + ? notifier::warning + : notifier::none) | + (state.notified_on_critical().is_set() && *state.notified_on_critical() + ? notifier::critical + : notifier::none)); + + if (state.current_notification_number().is_set()) + obj.set_notification_number(*state.current_notification_number()); + if (state.current_notification_id().is_set()) + obj.set_current_notification_id(*state.current_notification_id()); + if (state.last_notification().is_set()) + obj.set_last_notification(*state.last_notification()); + if (state.percent_state_change().is_set()) + obj.set_percent_state_change(*state.percent_state_change()); + if (state.check_flapping_recovery_notification().is_set()) + obj.set_check_flapping_recovery_notification( + *state.check_flapping_recovery_notification()); + if (state.has_notifications()) { + for (int i = 0; i < 6; i++) + obj.set_notification(i, state.notifications()[i]); + } + if (state.state_history().is_set()) { + utils::set_state_history(*state.state_history(), obj.get_state_history()); + obj.set_state_history_index(0); + } + } + if (obj.get_retain_nonstatus_information()) { + if (state.acknowledgement_type().is_set()) + obj.set_acknowledgement( + static_cast(*state.acknowledgement_type())); + else + obj.set_acknowledgement(engine::AckType::NONE); + + if (state.notifications_enabled().is_set() && + (obj.get_modified_attributes() & MODATTR_NOTIFICATIONS_ENABLED)) + obj.set_notifications_enabled(*state.notifications_enabled()); + + if (state.active_checks_enabled().is_set() && + (obj.get_modified_attributes() & MODATTR_ACTIVE_CHECKS_ENABLED)) + obj.set_checks_enabled(*state.active_checks_enabled()); + + if (state.passive_checks_enabled().is_set() && + (obj.get_modified_attributes() & MODATTR_PASSIVE_CHECKS_ENABLED)) + obj.set_accept_passive_checks(*state.passive_checks_enabled()); + + if (state.event_handler_enabled().is_set() && + (obj.get_modified_attributes() & MODATTR_EVENT_HANDLER_ENABLED)) + obj.set_event_handler_enabled(*state.event_handler_enabled()); + + if (state.flap_detection_enabled().is_set() && + (obj.get_modified_attributes() & MODATTR_FLAP_DETECTION_ENABLED)) + obj.set_flap_detection_enabled(*state.flap_detection_enabled()); + + if (state.process_performance_data().is_set() && + (obj.get_modified_attributes() & MODATTR_PERFORMANCE_DATA_ENABLED)) + obj.set_process_performance_data(*state.process_performance_data()); + + if (state.obsess_over_service().is_set() && + (obj.get_modified_attributes() & MODATTR_OBSESSIVE_HANDLER_ENABLED)) + obj.set_obsess_over(*state.obsess_over_service()); + + if (state.check_command().is_set() && + (obj.get_modified_attributes() & MODATTR_CHECK_COMMAND)) { + if (utils::is_command_exist(*state.check_command())) + obj.set_check_command(*state.check_command()); + else + obj.set_modified_attributes(obj.get_modified_attributes() - + MODATTR_CHECK_COMMAND); + } + + if (state.check_period().is_set() && + (obj.get_modified_attributes() & MODATTR_CHECK_TIMEPERIOD)) { + timeperiod_map::const_iterator it( + timeperiod::timeperiods.find(*state.check_period())); + if (it != timeperiod::timeperiods.end()) + obj.set_check_period(*state.check_period()); + else + obj.set_modified_attributes(obj.get_modified_attributes() - + MODATTR_CHECK_TIMEPERIOD); + } + + if (state.notification_period().is_set() && + (obj.get_modified_attributes() & MODATTR_NOTIFICATION_TIMEPERIOD)) { + timeperiod_map::const_iterator it( + timeperiod::timeperiods.find(*state.notification_period())); + if (it != timeperiod::timeperiods.end()) + obj.set_notification_period(*state.notification_period()); + else + obj.set_modified_attributes(obj.get_modified_attributes() - + MODATTR_NOTIFICATION_TIMEPERIOD); + } + + if (state.event_handler().is_set() && + (obj.get_modified_attributes() & MODATTR_EVENT_HANDLER_COMMAND)) { + if (utils::is_command_exist(*state.event_handler())) + obj.set_event_handler(*state.event_handler()); + else + obj.set_modified_attributes(obj.get_modified_attributes() - + MODATTR_EVENT_HANDLER_COMMAND); + } + + if (state.normal_check_interval().is_set() && + (obj.get_modified_attributes() & MODATTR_NORMAL_CHECK_INTERVAL)) + obj.set_check_interval(*state.normal_check_interval()); + + if (state.retry_check_interval().is_set() && + (obj.get_modified_attributes() & MODATTR_RETRY_CHECK_INTERVAL)) + obj.set_retry_interval(*state.retry_check_interval()); + + if (state.max_attempts().is_set() && + (obj.get_modified_attributes() & MODATTR_MAX_CHECK_ATTEMPTS)) { + obj.set_max_attempts(*state.max_attempts()); + + // adjust current attempt number if in a hard state. + if (obj.get_state_type() == notifier::hard && + obj.get_current_state() != engine::service::state_ok && + obj.get_current_attempt() > 1) + obj.set_current_attempt(obj.max_check_attempts()); + } + + if (!state.customvariables().empty() && + (obj.get_modified_attributes() & MODATTR_CUSTOM_VARIABLE)) { + for (map_customvar::const_iterator it(state.customvariables().begin()), + end(state.customvariables().end()); + it != end; ++it) + obj.custom_variables[it->first].update(it->second.value()); + } + } + // Adjust modified attributes if necessary. + else + obj.set_modified_attributes(MODATTR_NONE); + + bool allow_flapstart_notification(true); + + // Adjust modified attributes if no custom variable has been changed. + if (obj.get_modified_attributes() & MODATTR_CUSTOM_VARIABLE) { + bool at_least_one_modified(false); + for (auto const& cv : obj.custom_variables) + if (cv.second.has_been_modified()) { + at_least_one_modified = true; + break; + } + if (!at_least_one_modified) + obj.set_modified_attributes(obj.get_modified_attributes() - + MODATTR_CUSTOM_VARIABLE); + } + + // calculate next possible notification time. + if (obj.get_current_state() != engine::service::state_ok && + obj.get_last_notification()) + obj.set_next_notification( + obj.get_next_notification_time(obj.get_last_notification())); + + // fix old vars. + if (!obj.has_been_checked() && obj.get_state_type() == notifier::soft) + obj.set_state_type(notifier::hard); + + // ADDED 01/23/2009 adjust current check attempt if service is + // in hard problem state (max attempts may have changed in config + // since restart). + if (obj.get_current_state() != engine::service::state_ok && + obj.get_state_type() == notifier::hard) + obj.set_current_attempt(obj.max_check_attempts()); + + // ADDED 02/20/08 assume same flapping state if large + // install tweaks enabled. + if (config.use_large_installation_tweaks()) + obj.set_is_flapping(state.is_flapping()); + // else use normal startup flap detection logic. + else { + // service was flapping before program started. + // 11/10/07 don't allow flapping notifications to go out. + allow_flapstart_notification = !state.is_flapping(); + + // check for flapping. + obj.check_for_flapping(false, allow_flapstart_notification); + + // service was flapping before and isn't now, so clear + // recovery check variable if service isn't flapping now. + if (state.is_flapping() && !obj.get_is_flapping()) + obj.set_check_flapping_recovery_notification(false); + } + + // handle new vars added in 2.x. + if (obj.get_last_hard_state_change()) + obj.set_last_hard_state_change(obj.get_last_state_change()); + + // update service status. + obj.update_adaptive_data(); + obj.update_status(); +} +#else /** * Update internal service base on service retention. * @@ -62,8 +366,8 @@ void applier::service::apply(configuration::state const& config, * @param[in] scheduling_info_is_ok True if the retention is * not outdated. */ -void applier::service::update(configuration::state const& config, - retention::service const& state, +void applier::service::update(const configuration::State& config, + const retention::service& state, engine::service& obj, bool scheduling_info_is_ok) { if (state.modified_attributes().is_set()) { @@ -330,3 +634,4 @@ void applier::service::update(configuration::state const& config, obj.update_adaptive_data(); obj.update_status(); } +#endif diff --git a/engine/src/retention/applier/state.cc b/engine/src/retention/applier/state.cc index eaf8ef268f5..76a83d5783d 100644 --- a/engine/src/retention/applier/state.cc +++ b/engine/src/retention/applier/state.cc @@ -31,6 +31,7 @@ using namespace com::centreon::engine::retention; +#ifdef LEGACY_CONF /** * Restore retention state. * @@ -83,3 +84,57 @@ void applier::state::apply(configuration::state& config, broker_retention_data(NEBTYPE_RETENTIONDATA_ENDLOAD, NEBFLAG_NONE, NEBATTR_NONE, NULL); } +#else +/** + * Restore retention state. + * + * @param[in, out] config The global configuration to update. + * @param[in] state The retention informations. + */ +void applier::state::apply(configuration::State& config, + const retention::state& state) { + if (!config.retain_state_information()) + return; + + // send data to event broker. + broker_retention_data(NEBTYPE_RETENTIONDATA_STARTLOAD, NEBFLAG_NONE, + NEBATTR_NONE, NULL); + + try { + time_t current_time(time(NULL)); + bool scheduling_info_is_ok(false); + if ((current_time - state.informations().created()) < + static_cast(config.retention_scheduling_horizon())) + scheduling_info_is_ok = true; + + applier::program app_program; + app_program.apply(config, state.globals()); + + applier::comment app_comments; + app_comments.apply(state.comments()); + + applier::downtime::apply(state.downtimes()); + + applier::contact app_contacts; + app_contacts.apply(config, state.contacts()); + + applier::host app_hosts; + app_hosts.apply(config, state.hosts(), scheduling_info_is_ok); + + applier::service::apply(config, state.services(), scheduling_info_is_ok); + + applier::anomalydetection::apply(config, state.anomalydetection(), + scheduling_info_is_ok); + + } catch (...) { + // send data to event broker. + broker_retention_data(NEBTYPE_RETENTIONDATA_ENDLOAD, NEBFLAG_NONE, + NEBATTR_NONE, NULL); + throw; + } + + // send data to event broker. + broker_retention_data(NEBTYPE_RETENTIONDATA_ENDLOAD, NEBFLAG_NONE, + NEBATTR_NONE, NULL); +} +#endif diff --git a/engine/src/retention/dump.cc b/engine/src/retention/dump.cc index cc129497c71..0f4da4fade2 100644 --- a/engine/src/retention/dump.cc +++ b/engine/src/retention/dump.cc @@ -127,6 +127,18 @@ std::ostream& dump::comments(std::ostream& os) { */ std::ostream& dump::contact(std::ostream& os, com::centreon::engine::contact const& obj) { +#ifdef LEGACY_CONF + uint32_t retained_contact_host_attribute_mask = + config->retained_contact_host_attribute_mask(); + uint32_t retained_contact_service_attribute_mask = + config->retained_contact_service_attribute_mask(); +#else + uint32_t retained_contact_host_attribute_mask = + pb_config.retained_contact_host_attribute_mask(); + uint32_t retained_contact_service_attribute_mask = + pb_config.retained_contact_service_attribute_mask(); +#endif + os << "contact {\n" "contact_name=" << obj.get_name() @@ -148,11 +160,11 @@ std::ostream& dump::contact(std::ostream& os, << "\n" "modified_host_attributes=" << (obj.get_modified_host_attributes() & - ~config->retained_contact_host_attribute_mask()) + ~retained_contact_host_attribute_mask) << "\n" "modified_service_attributes=" << (obj.get_modified_service_attributes() & - ~config->retained_contact_service_attribute_mask()) + ~retained_contact_service_attribute_mask) << "\n" "service_notification_period=" << obj.get_service_notification_period() @@ -267,6 +279,13 @@ std::ostream& dump::header(std::ostream& os) { */ std::ostream& dump::host(std::ostream& os, com::centreon::engine::host const& obj) { +#ifdef LEGACY_CONF + uint32_t retained_host_attribute_mask = + config->retained_host_attribute_mask(); +#else + uint32_t retained_host_attribute_mask = + pb_config.retained_host_attribute_mask(); +#endif os << "host {\n" "host_name=" << obj.name() @@ -374,8 +393,7 @@ std::ostream& dump::host(std::ostream& os, << obj.max_check_attempts() << "\n" "modified_attributes=" - << (obj.get_modified_attributes() & - ~config->retained_host_attribute_mask()) + << (obj.get_modified_attributes() & ~retained_host_attribute_mask) << "\n" "next_check=" << static_cast(obj.get_next_check()) @@ -466,6 +484,7 @@ std::ostream& dump::info(std::ostream& os) { return os; } +#ifdef LEGACY_CONF /** * Dump retention of program. * @@ -540,6 +559,82 @@ std::ostream& dump::program(std::ostream& os) { "}\n"; return os; } +#else +/** + * Dump retention of program. + * + * @param[out] os The output stream. + * + * @return The output stream. + */ +std::ostream& dump::program(std::ostream& os) { + os << "program {\n" + "active_host_checks_enabled=" + << pb_config.execute_host_checks() + << "\n" + "active_service_checks_enabled=" + << pb_config.execute_service_checks() + << "\n" + "check_host_freshness=" + << pb_config.check_host_freshness() + << "\n" + "check_service_freshness=" + << pb_config.check_service_freshness() + << "\n" + "enable_event_handlers=" + << pb_config.enable_event_handlers() + << "\n" + "enable_flap_detection=" + << pb_config.enable_flap_detection() + << "\n" + "enable_notifications=" + << pb_config.enable_notifications() + << "\n" + "global_host_event_handler=" + << pb_config.global_host_event_handler().c_str() + << "\n" + "global_service_event_handler=" + << pb_config.global_service_event_handler().c_str() + << "\n" + "modified_host_attributes=" + << (modified_host_process_attributes & + ~pb_config.retained_process_host_attribute_mask()) + << "\n" + "modified_service_attributes=" + << (modified_service_process_attributes & + ~pb_config.retained_process_host_attribute_mask()) + << "\n" + "next_comment_id=" + << comment::get_next_comment_id() + << "\n" + "next_event_id=" + << next_event_id + << "\n" + "next_notification_id=" + << next_notification_id + << "\n" + "next_problem_id=" + << next_problem_id + << "\n" + "obsess_over_hosts=" + << pb_config.obsess_over_hosts() + << "\n" + "obsess_over_services=" + << pb_config.obsess_over_services() + << "\n" + "passive_host_checks_enabled=" + << pb_config.accept_passive_host_checks() + << "\n" + "passive_service_checks_enabled=" + << pb_config.accept_passive_service_checks() + << "\n" + "process_performance_data=" + << pb_config.process_performance_data() + << "\n" + "}\n"; + return os; +} +#endif /** * Save all data. @@ -549,8 +644,13 @@ std::ostream& dump::program(std::ostream& os) { * @return True on success, otherwise false. */ bool dump::save(std::string const& path) { +#ifdef LEGACY_CONF if (!config->retain_state_information()) return true; +#else + if (!pb_config.retain_state_information()) + return true; +#endif // send data to event broker broker_retention_data(NEBTYPE_RETENTIONDATA_STARTSAVE, NEBFLAG_NONE, @@ -560,8 +660,13 @@ bool dump::save(std::string const& path) { try { std::ofstream stream(path.c_str(), std::ios::binary | std::ios::trunc); if (!stream.is_open()) - throw(engine_error() << "Cannot open retention file '" - << config->state_retention_file() << "'"); +#ifdef LEGACY_CONF + throw engine_error() << "Cannot open retention file '" + << config->state_retention_file() << "'"; +#else + throw engine_error() << "Cannot open retention file '" + << pb_config.state_retention_file() << "'"; +#endif dump::header(stream); dump::info(stream); dump::program(stream); @@ -719,8 +824,13 @@ std::ostream& dump::service(std::ostream& os, << obj.max_check_attempts() << "\n" "modified_attributes=" +#ifdef LEGACY_CONF << (obj.get_modified_attributes() & ~config->retained_host_attribute_mask()) +#else + << (obj.get_modified_attributes() & + ~pb_config.retained_host_attribute_mask()) +#endif << "\n" "next_check=" << static_cast(obj.get_next_check()) diff --git a/engine/src/sehandlers.cc b/engine/src/sehandlers.cc index 18872b09f17..a3a9ea5a80c 100644 --- a/engine/src/sehandlers.cc +++ b/engine/src/sehandlers.cc @@ -50,6 +50,18 @@ int obsessive_compulsive_host_check_processor( int macro_options = STRIP_ILLEGAL_MACRO_CHARS | ESCAPE_MACRO_CHARS; nagios_macros* mac(get_global_macros()); + bool obsess_over_hosts; + uint32_t ochp_timeout; +#ifdef LEGACY_CONF + obsess_over_hosts = config->obsess_over_hosts(); + const std::string& ochp_command = config->ochp_command(); + ochp_timeout = config->ochp_timeout(); +#else + obsess_over_hosts = pb_config.obsess_over_hosts(); + const std::string& ochp_command = pb_config.ochp_command(); + ochp_timeout = pb_config.ochp_timeout(); +#endif + engine_logger(dbg_functions, basic) << "obsessive_compulsive_host_check_processor()"; functions_logger->trace("obsessive_compulsive_host_check_processor()"); @@ -58,20 +70,20 @@ int obsessive_compulsive_host_check_processor( return ERROR; /* bail out if we shouldn't be obsessing */ - if (!config->obsess_over_hosts()) + if (!obsess_over_hosts) return OK; if (!hst->obsess_over()) return OK; /* if there is no valid command, exit */ - if (config->ochp_command().empty()) + if (ochp_command.empty()) return ERROR; /* update macros */ grab_host_macros_r(mac, hst); /* get the raw command line */ - get_raw_command_line_r(mac, ochp_command_ptr, config->ochp_command().c_str(), + get_raw_command_line_r(mac, ochp_command_ptr, ochp_command.c_str(), raw_command, macro_options); if (raw_command.empty()) { clear_volatile_macros_r(mac); @@ -105,7 +117,7 @@ int obsessive_compulsive_host_check_processor( /* run the command */ try { std::string tmp; - my_system_r(mac, processed_command, config->ochp_timeout(), + my_system_r(mac, processed_command, ochp_timeout, &early_timeout, &exectime, tmp, 0); } catch (std::exception const& e) { engine_logger(log_runtime_error, basic) @@ -128,11 +140,11 @@ int obsessive_compulsive_host_check_processor( if (early_timeout) engine_logger(log_runtime_warning, basic) << "Warning: OCHP command '" << processed_command << "' for host '" - << hst->name() << "' timed out after " << config->ochp_timeout() + << hst->name() << "' timed out after " << ochp_timeout << " seconds"; runtime_logger->warn( "Warning: OCHP command '{}' for host '{}' timed out after {} seconds", - processed_command, hst->name(), config->ochp_timeout()); + processed_command, hst->name(), ochp_timeout); return OK; } @@ -156,15 +168,30 @@ int run_global_service_event_handler(nagios_macros* mac, engine_logger(dbg_functions, basic) << "run_global_service_event_handler()"; functions_logger->trace("run_global_service_event_handler()"); + bool enable_event_handlers; + bool log_event_handlers; + uint32_t event_handler_timeout; +#ifdef LEGACY_CONF + enable_event_handlers = config->enable_event_handlers(); + const std::string& global_service_event_handler = config->global_service_event_handler(); + log_event_handlers = config->log_event_handlers(); + event_handler_timeout = config->event_handler_timeout(); +#else + enable_event_handlers = pb_config.enable_event_handlers(); + const std::string& global_service_event_handler = pb_config.global_service_event_handler(); + log_event_handlers = pb_config.log_event_handlers(); + event_handler_timeout = pb_config.event_handler_timeout(); +#endif + if (svc == nullptr) return ERROR; /* bail out if we shouldn't be running event handlers */ - if (config->enable_event_handlers() == false) + if (!enable_event_handlers) return OK; /* a global service event handler command has not been defined */ - if (config->global_service_event_handler().empty()) + if (global_service_event_handler.empty()) return ERROR; engine_logger(dbg_eventhandlers, more) @@ -179,11 +206,10 @@ int run_global_service_event_handler(nagios_macros* mac, /* get the raw command line */ get_raw_command_line_r(mac, global_service_event_handler_ptr, - config->global_service_event_handler().c_str(), + global_service_event_handler.c_str(), raw_command, macro_options); - if (raw_command.empty()) { + if (raw_command.empty()) return ERROR; - } engine_logger(dbg_eventhandlers, most) << "Raw global service event handler command line: " << raw_command; @@ -203,12 +229,12 @@ int run_global_service_event_handler(nagios_macros* mac, "Processed global service event handler command line: {}", processed_command); - if (config->log_event_handlers()) { + if (log_event_handlers) { std::ostringstream oss; oss << "GLOBAL SERVICE EVENT HANDLER: " << svc->get_hostname() << ';' << svc->description() << ";$SERVICESTATE$;$SERVICESTATETYPE$;$SERVICEATTEMPT$;" - << config->global_service_event_handler(); + << global_service_event_handler; process_macros_r(mac, oss.str(), processed_logentry, macro_options); engine_logger(log_event_handler, basic) << processed_logentry; events_logger->debug(processed_logentry); @@ -220,7 +246,7 @@ int run_global_service_event_handler(nagios_macros* mac, cached_cmd)) { /* run the command */ try { - my_system_r(mac, processed_command, config->event_handler_timeout(), + my_system_r(mac, processed_command, event_handler_timeout, &early_timeout, &exectime, command_output, 0); } catch (std::exception const& e) { engine_logger(log_runtime_error, basic) @@ -244,11 +270,11 @@ int run_global_service_event_handler(nagios_macros* mac, engine_logger(log_event_handler | log_runtime_warning, basic) << "Warning: Global service event handler command '" << processed_command << "' timed out after " - << config->event_handler_timeout() << " seconds"; + << event_handler_timeout << " seconds"; events_logger->info( "Warning: Global service event handler command '{}' timed out after {} " "seconds", - processed_command, config->event_handler_timeout()); + processed_command, event_handler_timeout); } return OK; } @@ -265,6 +291,16 @@ int run_service_event_handler(nagios_macros* mac, struct timeval start_time; int macro_options = STRIP_ILLEGAL_MACRO_CHARS | ESCAPE_MACRO_CHARS; + bool log_event_handlers; + uint32_t event_handler_timeout; +#ifdef LEGACY_CONF + log_event_handlers = config->log_event_handlers(); + event_handler_timeout = config->event_handler_timeout(); +#else + log_event_handlers = pb_config.log_event_handlers(); + event_handler_timeout = pb_config.event_handler_timeout(); +#endif + engine_logger(dbg_functions, basic) << "run_service_event_handler()"; functions_logger->trace("run_service_event_handler()"); @@ -306,7 +342,7 @@ int run_service_event_handler(nagios_macros* mac, events_logger->debug("Processed service event handler command line: {}", processed_command); - if (config->log_event_handlers()) { + if (log_event_handlers) { std::ostringstream oss; oss << "SERVICE EVENT HANDLER: " << svc->get_hostname() << ';' << svc->description() @@ -321,7 +357,7 @@ int run_service_event_handler(nagios_macros* mac, checkable::EVH_TYPE)) { /* run the command */ try { - my_system_r(mac, processed_command, config->event_handler_timeout(), + my_system_r(mac, processed_command, event_handler_timeout, &early_timeout, &exectime, command_output, 0); } catch (std::exception const& e) { engine_logger(log_runtime_error, basic) @@ -342,12 +378,12 @@ int run_service_event_handler(nagios_macros* mac, if (early_timeout) { engine_logger(log_event_handler | log_runtime_warning, basic) << "Warning: Service event handler command '" << processed_command - << "' timed out after " << config->event_handler_timeout() + << "' timed out after " << event_handler_timeout << " seconds"; events_logger->info( "Warning: Service event handler command '{}' timed out after {} " "seconds", - processed_command, config->event_handler_timeout()); + processed_command, event_handler_timeout); } return OK; } @@ -366,6 +402,16 @@ int handle_host_event(com::centreon::engine::host* hst) { if (hst == nullptr) return ERROR; + bool enable_event_handlers; + std::string_view global_host_event_handler; +#ifdef LEGACY_CONF + enable_event_handlers = config->enable_event_handlers(); + global_host_event_handler = config->global_host_event_handler(); +#else + enable_event_handlers = pb_config.enable_event_handlers(); + global_host_event_handler = pb_config.global_host_event_handler(); +#endif + /* send event data to broker */ broker_statechange_data( NEBTYPE_STATECHANGE_END, NEBFLAG_NONE, NEBATTR_NONE, HOST_STATECHANGE, @@ -373,7 +419,7 @@ int handle_host_event(com::centreon::engine::host* hst) { hst->get_current_attempt(), hst->max_check_attempts(), nullptr); /* bail out if we shouldn't be running event handlers */ - if (!config->enable_event_handlers()) + if (!enable_event_handlers) return OK; if (!hst->event_handler_enabled()) return OK; @@ -410,15 +456,30 @@ int run_global_host_event_handler(nagios_macros* mac, engine_logger(dbg_functions, basic) << "run_global_host_event_handler()"; functions_logger->trace("run_global_host_event_handler()"); + bool enable_event_handlers; + bool log_event_handlers; + uint32_t event_handler_timeout; +#ifdef LEGACY_CONF + enable_event_handlers = config->enable_event_handlers(); + const std::string& global_host_event_handler = config->global_host_event_handler(); + log_event_handlers = config->log_event_handlers(); + event_handler_timeout = config->event_handler_timeout(); +#else + enable_event_handlers = pb_config.enable_event_handlers(); + const std::string& global_host_event_handler = pb_config.global_host_event_handler(); + log_event_handlers = pb_config.log_event_handlers(); + event_handler_timeout = pb_config.event_handler_timeout(); +#endif + if (hst == nullptr) return ERROR; /* bail out if we shouldn't be running event handlers */ - if (config->enable_event_handlers() == false) + if (!enable_event_handlers) return OK; /* no global host event handler command is defined */ - if (config->global_host_event_handler() == "") + if (global_host_event_handler.empty()) return ERROR; engine_logger(dbg_eventhandlers, more) @@ -431,7 +492,7 @@ int run_global_host_event_handler(nagios_macros* mac, /* get the raw command line */ get_raw_command_line_r(mac, global_host_event_handler_ptr, - config->global_host_event_handler().c_str(), + global_host_event_handler.c_str(), raw_command, macro_options); if (raw_command.empty()) return ERROR; @@ -453,11 +514,11 @@ int run_global_host_event_handler(nagios_macros* mac, events_logger->debug("Processed global host event handler command line: {}", processed_command); - if (config->log_event_handlers() == true) { + if (log_event_handlers) { std::ostringstream oss; oss << "GLOBAL HOST EVENT HANDLER: " << hst->name() << "$HOSTSTATE$;$HOSTSTATETYPE$;$HOSTATTEMPT$;" - << config->global_host_event_handler(); + << global_host_event_handler; process_macros_r(mac, oss.str(), processed_logentry, macro_options); engine_logger(log_event_handler, basic) << processed_logentry; events_logger->info(processed_logentry); @@ -468,7 +529,7 @@ int run_global_host_event_handler(nagios_macros* mac, if (host::command_is_allowed_by_whitelist(processed_command, cached_cmd)) { /* run the command */ try { - my_system_r(mac, processed_command, config->event_handler_timeout(), + my_system_r(mac, processed_command, event_handler_timeout, &early_timeout, &exectime, command_output, 0); } catch (std::exception const& e) { engine_logger(log_runtime_error, basic) @@ -490,12 +551,12 @@ int run_global_host_event_handler(nagios_macros* mac, if (early_timeout) { engine_logger(log_event_handler | log_runtime_warning, basic) << "Warning: Global host event handler command '" << processed_command - << "' timed out after " << config->event_handler_timeout() + << "' timed out after " << event_handler_timeout << " seconds"; events_logger->info( "Warning: Global host event handler command '{}' timed out after {} " "seconds", - processed_command, config->event_handler_timeout()); + processed_command, event_handler_timeout); } return OK; @@ -513,6 +574,16 @@ int run_host_event_handler(nagios_macros* mac, struct timeval start_time; int macro_options = STRIP_ILLEGAL_MACRO_CHARS | ESCAPE_MACRO_CHARS; + bool log_event_handlers; + uint32_t event_handler_timeout; +#ifdef LEGACY_CONF + log_event_handlers = config->log_event_handlers(); + event_handler_timeout = config->event_handler_timeout(); +#else + log_event_handlers = pb_config.log_event_handlers(); + event_handler_timeout = pb_config.event_handler_timeout(); +#endif + engine_logger(dbg_functions, basic) << "run_host_event_handler()"; functions_logger->trace("run_host_event_handler()"); @@ -551,7 +622,7 @@ int run_host_event_handler(nagios_macros* mac, events_logger->debug("Processed host event handler command line: {}", processed_command); - if (config->log_event_handlers() == true) { + if (log_event_handlers) { std::ostringstream oss; oss << "HOST EVENT HANDLER: " << hst->name() << ";$HOSTSTATE$;$HOSTSTATETYPE$;$HOSTATTEMPT$;" @@ -565,7 +636,7 @@ int run_host_event_handler(nagios_macros* mac, checkable::EVH_TYPE)) { /* run the command */ try { - my_system_r(mac, processed_command, config->event_handler_timeout(), + my_system_r(mac, processed_command, event_handler_timeout, &early_timeout, &exectime, command_output, 0); } catch (std::exception const& e) { engine_logger(log_runtime_error, basic) @@ -586,15 +657,11 @@ int run_host_event_handler(nagios_macros* mac, if (early_timeout) { engine_logger(log_event_handler | log_runtime_warning, basic) << "Warning: Host event handler command '" << processed_command - << "' timed out after " << config->event_handler_timeout() + << "' timed out after " << event_handler_timeout << " seconds"; events_logger->info( "Warning: Host event handler command '{}' timed out after {} seconds", - processed_command, config->event_handler_timeout()); + processed_command, event_handler_timeout); } return OK; } - -/******************************************************************/ -/****************** HOST STATE HANDLER FUNCTIONS ******************/ -/******************************************************************/ diff --git a/engine/src/service.cc b/engine/src/service.cc index abe5f65711a..ed514b431e2 100644 --- a/engine/src/service.cc +++ b/engine/src/service.cc @@ -1090,8 +1090,29 @@ int service::handle_async_check_result( com::centreon::engine::service* master_service = nullptr; int run_async_check = true; int flapping_check_done = false; +#ifdef LEGACY_CONF + uint32_t interval_length = config->interval_length(); + bool accept_passive_service_checks = config->accept_passive_service_checks(); + bool log_passive_checks = config->log_passive_checks(); + uint32_t cached_host_check_horizon = config->cached_host_check_horizon(); + bool obsess_over_services = config->obsess_over_services(); + bool enable_predictive_service_dependency_checks = + config->enable_predictive_service_dependency_checks(); + uint32_t cached_service_check_horizon = + config->cached_service_check_horizon(); +#else + uint32_t interval_length = pb_config.interval_length(); + bool accept_passive_service_checks = + pb_config.accept_passive_service_checks(); + bool log_passive_checks = pb_config.log_passive_checks(); + uint32_t cached_host_check_horizon = pb_config.cached_host_check_horizon(); + bool obsess_over_services = pb_config.obsess_over_services(); + bool enable_predictive_service_dependency_checks = + pb_config.enable_predictive_service_dependency_checks(); + uint32_t cached_service_check_horizon = + pb_config.cached_service_check_horizon(); +#endif - engine_logger(dbg_functions, basic) << "handle_async_service_check_result()"; SPDLOG_LOGGER_TRACE(functions_logger, "handle_async_service_check_result()"); /* get the current time */ @@ -1146,7 +1167,7 @@ int service::handle_async_check_result( * skip this service check results if its passive and we aren't accepting * passive check results */ if (queued_check_result.get_check_type() == check_passive) { - if (!config->accept_passive_service_checks()) { + if (!accept_passive_service_checks) { engine_logger(dbg_checks, basic) << "Discarding passive service check result because passive " "service checks are disabled globally."; @@ -1365,7 +1386,7 @@ int service::handle_async_check_result( * commands by getting dropped in checkresults dir */ if (get_check_type() == check_passive) { - if (config->log_passive_checks()) + if (log_passive_checks) engine_logger(log_passive_check, basic) << "PASSIVE SERVICE CHECK: " << _hostname << ";" << name() << ";" << _current_state << ";" << get_plugin_output(); @@ -1561,7 +1582,7 @@ int service::handle_async_check_result( if ((!state_change || state_changes_use_cached_state) && hst->has_been_checked() && (static_cast(current_time - hst->get_last_check()) <= - config->cached_host_check_horizon())) { + cached_host_check_horizon)) { engine_logger(dbg_checks, more) << "* Using cached host state: " << hst->get_current_state(); SPDLOG_LOGGER_DEBUG(checks_logger, "* Using cached host state: {}", @@ -1635,7 +1656,7 @@ int service::handle_async_check_result( notify(reason_recovery, "", "", notification_option_none); /* should we obsessive over service checks? */ - if (config->obsess_over_services()) + if (obsess_over_services) obsessive_compulsive_service_check_processor(); /* reset all service variables because its okay now... */ @@ -1649,8 +1670,7 @@ int service::handle_async_check_result( if (reschedule_check) next_service_check = - (time_t)(get_last_check() + - check_interval() * config->interval_length()); + (time_t)(get_last_check() + check_interval() * interval_length); } /*******************************************/ @@ -1676,8 +1696,8 @@ int service::handle_async_check_result( /* only use cached host state if no service state change has occurred */ if ((!state_change || state_changes_use_cached_state) && hst->has_been_checked() && - (static_cast(current_time - hst->get_last_check()) <= - config->cached_host_check_horizon())) { + static_cast(current_time - hst->get_last_check()) <= + cached_host_check_horizon) { /* use current host state as route result */ route_result = hst->get_current_state(); engine_logger(dbg_checks, more) @@ -1834,8 +1854,7 @@ int service::handle_async_check_result( * interval */ if (reschedule_check) next_service_check = - (time_t)(get_last_check() + - check_interval() * config->interval_length()); + (time_t)(get_last_check() + check_interval() * interval_length); /* log the problem as a hard state if the host just went down */ if (hard_state_change) { @@ -1866,12 +1885,11 @@ int service::handle_async_check_result( if (reschedule_check) next_service_check = - (time_t)(get_last_check() + - retry_interval() * config->interval_length()); + (time_t)(get_last_check() + retry_interval() * interval_length); } /* perform dependency checks on the second to last check of the service */ - if (config->enable_predictive_service_dependency_checks() && + if (enable_predictive_service_dependency_checks && get_current_attempt() == max_check_attempts() - 1) { engine_logger(dbg_checks, more) << "Looking for services to check for predictive " @@ -1967,12 +1985,11 @@ int service::handle_async_check_result( /* reschedule the next check at the regular interval */ if (reschedule_check) next_service_check = - (time_t)(get_last_check() + - check_interval() * config->interval_length()); + (time_t)(get_last_check() + check_interval() * interval_length); } /* should we obsessive over service checks? */ - if (config->obsess_over_services()) + if (obsess_over_services) obsessive_compulsive_service_check_processor(); } @@ -2071,7 +2088,7 @@ int service::handle_async_check_result( /* we can get by with a cached state, so don't check the service */ if (static_cast(current_time - svc->get_last_check()) <= - config->cached_service_check_horizon()) { + cached_service_check_horizon) { run_async_check = false; /* update check statistics */ @@ -2097,7 +2114,12 @@ int service::handle_async_check_result( * @return Return true on success. */ int service::log_event() { - if (get_state_type() == soft && !config->log_service_retries()) +#ifdef LEGACY_CONF + bool log_service_retries = config->log_service_retries(); +#else + bool log_service_retries = pb_config.log_service_retries(); +#endif + if (get_state_type() == soft && !log_service_retries) return OK; uint32_t log_options{NSLOG_SERVICE_UNKNOWN}; @@ -2135,6 +2157,20 @@ void service::check_for_flapping(bool update, double low_curve_value = 0.75; double high_curve_value = 1.25; + float low_service_flap_threshold; + float high_service_flap_threshold; + bool enable_flap_detection; + +#ifdef LEGACY_CONF + low_service_flap_threshold = config->low_service_flap_threshold(); + high_service_flap_threshold = config->high_service_flap_threshold(); + enable_flap_detection = config->enable_flap_detection(); +#else + low_service_flap_threshold = pb_config.low_service_flap_threshold(); + high_service_flap_threshold = pb_config.high_service_flap_threshold(); + enable_flap_detection = pb_config.enable_flap_detection(); +#endif + /* large install tweaks skips all flap detection logic - including state * change calculation */ @@ -2149,11 +2185,10 @@ void service::check_for_flapping(bool update, name(), _hostname); /* what threshold values should we use (global or service-specific)? */ - low_threshold = (get_low_flap_threshold() <= 0.0) - ? config->low_service_flap_threshold() - : get_low_flap_threshold(); + low_threshold = (get_low_flap_threshold() <= 0.0) ? low_service_flap_threshold + : get_low_flap_threshold(); high_threshold = (get_high_flap_threshold() <= 0.0) - ? config->high_service_flap_threshold() + ? high_service_flap_threshold : get_high_flap_threshold(); update_history = update; @@ -2223,7 +2258,7 @@ void service::check_for_flapping(bool update, /* don't do anything if we don't have flap detection enabled on a program-wide * basis */ - if (!config->enable_flap_detection()) + if (!enable_flap_detection) return; /* don't do anything if we don't have flap detection enabled for this service @@ -2278,7 +2313,12 @@ int service::handle_service_event() { max_check_attempts(), nullptr); /* bail out if we shouldn't be running event handlers */ - if (!config->enable_event_handlers()) +#ifdef LEGACY_CONF + bool enable_event_handlers = config->enable_event_handlers(); +#else + bool enable_event_handlers = pb_config.enable_event_handlers(); +#endif + if (!enable_event_handlers) return OK; if (!event_handler_enabled()) return OK; @@ -2316,19 +2356,31 @@ int service::obsessive_compulsive_service_check_processor() { int macro_options = STRIP_ILLEGAL_MACRO_CHARS | ESCAPE_MACRO_CHARS; nagios_macros* mac(get_global_macros()); + bool obsess_over_services; + uint32_t ocsp_timeout; +#ifdef LEGACY_CONF + obsess_over_services = config->obsess_over_services(); + const std::string& ocsp_command = config->ocsp_command(); + ocsp_timeout = config->ocsp_timeout(); +#else + obsess_over_services = pb_config.obsess_over_services(); + const std::string& ocsp_command = pb_config.ocsp_command(); + ocsp_timeout = pb_config.ocsp_timeout(); +#endif + engine_logger(dbg_functions, basic) << "obsessive_compulsive_service_check_processor()"; SPDLOG_LOGGER_TRACE(functions_logger, "obsessive_compulsive_service_check_processor()"); /* bail out if we shouldn't be obsessing */ - if (config->obsess_over_services() == false) + if (!obsess_over_services) return OK; if (!obsess_over()) return OK; /* if there is no valid command, exit */ - if (config->ocsp_command().empty()) + if (ocsp_command.empty()) return ERROR; /* find the associated host */ @@ -2340,7 +2392,7 @@ int service::obsessive_compulsive_service_check_processor() { grab_service_macros_r(mac, this); /* get the raw command line */ - get_raw_command_line_r(mac, ocsp_command_ptr, config->ocsp_command().c_str(), + get_raw_command_line_r(mac, ocsp_command_ptr, ocsp_command.c_str(), raw_command, macro_options); if (raw_command.empty()) { clear_volatile_macros_r(mac); @@ -2375,8 +2427,8 @@ int service::obsessive_compulsive_service_check_processor() { /* run the command */ try { std::string tmp; - my_system_r(mac, processed_command, config->ocsp_timeout(), - &early_timeout, &exectime, tmp, 0); + my_system_r(mac, processed_command, ocsp_timeout, &early_timeout, + &exectime, tmp, 0); } catch (std::exception const& e) { engine_logger(log_runtime_error, basic) << "Error: can't execute compulsive service processor command line '" @@ -2401,12 +2453,12 @@ int service::obsessive_compulsive_service_check_processor() { engine_logger(log_runtime_warning, basic) << "Warning: OCSP command '" << processed_command << "' for service '" << name() << "' on host '" << _hostname << "' timed out after " - << config->ocsp_timeout() << " seconds"; + << ocsp_timeout << " seconds"; SPDLOG_LOGGER_WARN( runtime_logger, "Warning: OCSP command '{}' for service '{}' on host '{}' timed out " "after {} seconds", - processed_command, name(), _hostname, config->ocsp_timeout()); + processed_command, name(), _hostname, ocsp_timeout); return OK; } @@ -2414,7 +2466,12 @@ int service::obsessive_compulsive_service_check_processor() { /* updates service performance data */ int service::update_service_performance_data() { /* should we be processing performance data for anything? */ - if (!config->process_performance_data()) +#ifdef LEGACY_CONF + bool process_pd = config->process_performance_data(); +#else + bool process_pd = pb_config.process_performance_data(); +#endif + if (!process_pd) return OK; /* should we process performance data for this service? */ @@ -2465,13 +2522,18 @@ int service::run_scheduled_check(int check_options, double latency) { * if service has no check interval, schedule it again for 5 * minutes from now * */ - if (current_time >= preferred_time) + if (current_time >= preferred_time) { +#ifdef LEGACY_CONF + uint32_t interval_length = config->interval_length(); +#else + uint32_t interval_length = pb_config.interval_length(); +#endif preferred_time = current_time + static_cast(check_interval() <= 0 ? 300 - : check_interval() * - config->interval_length()); + : check_interval() * interval_length); + } // Make sure we rescheduled the next service check at a valid time. { @@ -2613,9 +2675,15 @@ int service::run_async_check_local(int check_options, // Service check was cancelled by NEB module. reschedule check later. if (NEBERROR_CALLBACKCANCEL == res) { - if (preferred_time != nullptr) + if (preferred_time != nullptr) { +#ifdef LEGACY_CONF + uint32_t interval_length = config->interval_length(); +#else + uint32_t interval_length = pb_config.interval_length(); +#endif *preferred_time += - static_cast(check_interval() * config->interval_length()); + static_cast(check_interval() * interval_length); + } engine_logger(log_runtime_error, basic) << "Error: Some broker module cancelled check of service '" << description() << "' on host '" << get_hostname(); @@ -2708,8 +2776,15 @@ int service::run_async_check_local(int check_options, checks::checker::instance().add_check_result_to_reap(check_result_info); }; +#ifdef LEGACY_CONF + bool use_host_down_disable_service_checks = + config->use_host_down_disable_service_checks(); +#else + bool use_host_down_disable_service_checks = + pb_config.host_down_disable_service_checks(); +#endif bool has_to_execute_check = true; - if (config->use_host_down_disable_service_checks()) { + if (use_host_down_disable_service_checks) { auto hst = host::hosts_by_id.find(_host_id); if (hst != host::hosts_by_id.end() && hst->second->get_current_state() != host::state_up) { @@ -2741,9 +2816,14 @@ int service::run_async_check_local(int check_options, retry = false; try { // Run command. - uint64_t id = get_check_command_ptr()->run( - processed_cmd, *macros, config->service_check_timeout(), - check_result_info, this); +#ifdef LEGACY_CONF + uint32_t service_check_timeout = config->service_check_timeout(); +#else + uint32_t service_check_timeout = pb_config.service_check_timeout(); +#endif + uint64_t id = get_check_command_ptr()->run(processed_cmd, *macros, + service_check_timeout, + check_result_info, this); SPDLOG_LOGGER_DEBUG(checks_logger, "run id={} {} for service {} host {}", id, processed_cmd, _service_id, _hostname); @@ -3124,12 +3204,15 @@ bool service::verify_check_viability(int check_options, SPDLOG_LOGGER_TRACE(functions_logger, "check_service_check_viability()"); /* get the check interval to use if we need to reschedule the check */ +#ifdef LEGACY_CONF + uint32_t interval_length = config->interval_length(); +#else + uint32_t interval_length = pb_config.interval_length(); +#endif if (get_state_type() == soft && _current_state != service::state_ok) - check_interval = - static_cast(retry_interval() * config->interval_length()); + check_interval = static_cast(retry_interval() * interval_length); else - check_interval = - static_cast(this->check_interval() * config->interval_length()); + check_interval = static_cast(this->check_interval() * interval_length); /* get the current time */ time(¤t_time); @@ -3277,7 +3360,12 @@ int service::notify_contact(nagios_macros* mac, processed_command); /* log the notification to program log file */ - if (config->log_notifications()) { +#ifdef LEGACY_CONF + bool log_notifications = config->log_notifications(); +#else + bool log_notifications = pb_config.log_notifications(); +#endif + if (log_notifications) { char const* service_state_str("UNKNOWN"); if ((unsigned int)_current_state < tab_service_states.size()) service_state_str = tab_service_states[_current_state].second.c_str(); @@ -3314,9 +3402,14 @@ int service::notify_contact(nagios_macros* mac, /* run the notification command */ if (command_is_allowed_by_whitelist(processed_command, NOTIF_TYPE)) { +#ifdef LEGACY_CONF + uint32_t notification_timeout = config->notification_timeout(); +#else + uint32_t notification_timeout = pb_config.notification_timeout(); +#endif try { std::string tmp; - my_system_r(mac, processed_command, config->notification_timeout(), + my_system_r(mac, processed_command, notification_timeout, &early_timeout, &exectime, tmp, 0); } catch (std::exception const& e) { engine_logger(log_runtime_error, basic) @@ -3339,12 +3432,11 @@ int service::notify_contact(nagios_macros* mac, engine_logger(log_service_notification | log_runtime_warning, basic) << "Warning: Contact '" << cntct->get_name() << "' service notification command '" << processed_command - << "' timed out after " << config->notification_timeout() - << " seconds"; + << "' timed out after " << notification_timeout << " seconds"; notifications_logger->info( "Warning: Contact '{}' service notification command '{}' timed out " "after {} seconds", - cntct->get_name(), processed_command, config->notification_timeout()); + cntct->get_name(), processed_command, notification_timeout); } /* get end time */ @@ -3470,17 +3562,30 @@ bool service::is_result_fresh(time_t current_time, int log_this) { "Checking freshness of service '{}' on host '{}'...", this->description(), this->get_hostname()); + uint32_t interval_length; + int32_t additional_freshness_latency; + uint32_t max_service_check_spread; +#ifdef LEGACY_CONF + interval_length = config->interval_length(); + additional_freshness_latency = config->additional_freshness_latency(); + max_service_check_spread = config->max_service_check_spread(); +#else + interval_length = pb_config.interval_length(); + additional_freshness_latency = pb_config.additional_freshness_latency(); + max_service_check_spread = pb_config.max_service_check_spread(); +#endif + /* use user-supplied freshness threshold or auto-calculate a freshness * threshold to use? */ if (get_freshness_threshold() == 0) { if (get_state_type() == hard || this->_current_state == service::state_ok) - freshness_threshold = static_cast( - check_interval() * config->interval_length() + get_latency() + - config->additional_freshness_latency()); + freshness_threshold = + static_cast(check_interval() * interval_length + get_latency() + + additional_freshness_latency); else - freshness_threshold = static_cast( - this->retry_interval() * config->interval_length() + get_latency() + - config->additional_freshness_latency()); + freshness_threshold = + static_cast(this->retry_interval() * interval_length + + get_latency() + additional_freshness_latency); } else freshness_threshold = this->get_freshness_threshold(); @@ -3509,8 +3614,7 @@ bool service::is_result_fresh(time_t current_time, int log_this) { else if (this->active_checks_enabled() && event_start > get_last_check() && this->get_freshness_threshold() == 0) expiration_time = (time_t)(event_start + freshness_threshold + - (config->max_service_check_spread() * - config->interval_length())); + max_service_check_spread * interval_length); else expiration_time = (time_t)(get_last_check() + freshness_threshold); @@ -3667,11 +3771,16 @@ bool service::authorized_by_dependencies( !check_time_against_period(current_time, dep->dependency_period_ptr)) return true; - /* Get the status to use (use last hard state if it's currently in a soft - * state) */ + /* Get the status to use (use last hard state if it's currently in a soft + * state) */ +#ifdef LEGACY_CONF + bool soft_state_dependencies = config->soft_state_dependencies(); +#else + bool soft_state_dependencies = pb_config.soft_state_dependencies(); +#endif service_state state = (dep->master_service_ptr->get_state_type() == notifier::soft && - !config->soft_state_dependencies()) + !soft_state_dependencies) ? dep->master_service_ptr->get_last_hard_state() : dep->master_service_ptr->get_current_state(); @@ -3705,6 +3814,15 @@ void service::check_for_orphaned() { /* get the current time */ time(¤t_time); + uint32_t service_check_timeout; + uint32_t check_reaper_interval; +#ifdef LEGACY_CONF + service_check_timeout = config->service_check_timeout(); + check_reaper_interval = config->check_reaper_interval(); +#else + service_check_timeout = pb_config.service_check_timeout(); + check_reaper_interval = pb_config.check_reaper_interval(); +#endif /* check all services... */ for (service_map::iterator it(service::services.begin()), end(service::services.end()); @@ -3717,8 +3835,7 @@ void service::check_for_orphaned() { * 10 minutes slack time) */ expected_time = (time_t)(it->second->get_next_check() + it->second->get_latency() + - config->service_check_timeout() + - config->check_reaper_interval() + 600); + service_check_timeout + check_reaper_interval + 600); /* this service was supposed to have executed a while ago, but for some * reason the results haven't come back in... */ @@ -3772,7 +3889,13 @@ void service::check_result_freshness() { "Checking the freshness of service check results..."); /* bail out if we're not supposed to be checking freshness */ - if (!config->check_service_freshness()) { + +#ifdef LEGACY_CONF + bool check_service_freshness = config->check_service_freshness(); +#else + bool check_service_freshness = pb_config.check_service_freshness(); +#endif + if (!check_service_freshness) { engine_logger(dbg_checks, more) << "Service freshness checking is disabled."; SPDLOG_LOGGER_DEBUG(checks_logger, @@ -3837,8 +3960,13 @@ const std::string& service::get_current_state_as_string() const { } bool service::get_notify_on_current_state() const { +#ifdef LEGACY_CONF + bool soft_state_dependencies = config->soft_state_dependencies(); +#else + bool soft_state_dependencies = pb_config.soft_state_dependencies(); +#endif if (_host_ptr->get_current_state() != host::state_up && - (_host_ptr->get_state_type() || config->soft_state_dependencies())) + (_host_ptr->get_state_type() || soft_state_dependencies)) return false; notification_flag type[]{ok, warning, critical, unknown}; return get_notify_on(type[get_current_state()]); diff --git a/engine/src/servicedependency.cc b/engine/src/servicedependency.cc index 07de74d3558..ad1e657b427 100644 --- a/engine/src/servicedependency.cc +++ b/engine/src/servicedependency.cc @@ -378,6 +378,7 @@ void servicedependency::resolve(uint32_t& w [[maybe_unused]], uint32_t& e) { } } +#ifdef LEGACY_CONF /** * Find a service dependency from its key. * @@ -400,3 +401,26 @@ servicedependency_mmap::iterator servicedependency::servicedependencies_find( } return p.first == p.second ? servicedependencies.end() : p.first; } +#else +/** + * @brief Find a service dependency from the given key. + * + * @param key A tuple containing a host name, a service description and a hash + * matching the service dependency. + * + * @return Iterator to the element if found, servicedependencies().end() + * otherwise. + */ +servicedependency_mmap::iterator servicedependency::servicedependencies_find( + const std::tuple& key) { + size_t k = std::get<2>(key); + std::pair + p = servicedependencies.equal_range({std::get<0>(key), std::get<1>(key)}); + while (p.first != p.second) { + if (p.first->second->internal_key() == k) + break; + ++p.first; + } + return p.first == p.second ? servicedependencies.end() : p.first; +} +#endif diff --git a/engine/src/serviceescalation.cc b/engine/src/serviceescalation.cc index 27751dfc13f..e982d4eba2b 100644 --- a/engine/src/serviceescalation.cc +++ b/engine/src/serviceescalation.cc @@ -123,6 +123,7 @@ void serviceescalation::resolve(uint32_t& w [[maybe_unused]], uint32_t& e) { } } +#ifdef LEGACY_CONF /** * @brief Checks that this serviceescalation corresponds to the Configuration * object obj. This function doesn't check contactgroups as it is usually used @@ -158,3 +159,40 @@ bool serviceescalation::matches( return true; } +#else +/** + * @brief Checks that this serviceescalation corresponds to the Configuration + * object obj. This function doesn't check contactgroups as it is usually used + * to modify them. + * + * @param obj A service escalation configuration object. + * + * @return A boolean that is True if they match. + */ +bool serviceescalation::matches( + const configuration::Serviceescalation& obj) const { + uint32_t escalate_on = + ((obj.escalation_options() & configuration::action_se_warning) + ? notifier::warning + : notifier::none) | + ((obj.escalation_options() & configuration::action_se_unknown) + ? notifier::unknown + : notifier::none) | + ((obj.escalation_options() & configuration::action_se_critical) + ? notifier::critical + : notifier::none) | + ((obj.escalation_options() & configuration::action_se_recovery) + ? notifier::ok + : notifier::none); + if (_hostname != obj.hosts().data(0) || + _description != obj.service_description().data(0) || + get_first_notification() != obj.first_notification() || + get_last_notification() != obj.last_notification() || + get_notification_interval() != obj.notification_interval() || + get_escalation_period() != obj.escalation_period() || + get_escalate_on() != escalate_on) + return false; + + return true; +} +#endif diff --git a/engine/src/shared.cc b/engine/src/shared.cc index 76afe843019..71a4c1ece27 100644 --- a/engine/src/shared.cc +++ b/engine/src/shared.cc @@ -1,6 +1,7 @@ /** * Copyright 1999-2011 Ethan Galstad * Copyright 2011-2013 Merethis + * Copyright 2023-2024 Centreon * * This file is part of Centreon Engine. * @@ -141,6 +142,13 @@ void get_datetime_string(time_t const* raw_time, char const* tzone(tm_s.tm_isdst ? tzname[1] : tzname[0]); #endif /* HAVE_TM_ZONE || HAVE_TZNAME */ + int32_t date_format; +#ifdef LEGACY_CONF + date_format = config->date_format(); +#else + date_format = pb_config.date_format(); +#endif + /* ctime() style date/time */ if (type == LONG_DATE_TIME) snprintf(buffer, buffer_length, "%s %s %d %02d:%02d:%02d %s %d", @@ -149,16 +157,15 @@ void get_datetime_string(time_t const* raw_time, /* short date/time */ else if (type == SHORT_DATE_TIME) { - if (config->date_format() == DATE_FORMAT_EURO) + if (date_format == DATE_FORMAT_EURO) snprintf(buffer, buffer_length, "%02d-%02d-%04d %02d:%02d:%02d", day, month, year, hour, minute, second); - else if (config->date_format() == DATE_FORMAT_ISO8601 || - config->date_format() == DATE_FORMAT_STRICT_ISO8601) - snprintf( - buffer, buffer_length, "%04d-%02d-%02d%c%02d:%02d:%02d", year, month, - day, - (config->date_format() == DATE_FORMAT_STRICT_ISO8601) ? 'T' : ' ', - hour, minute, second); + else if (date_format == DATE_FORMAT_ISO8601 || + date_format == DATE_FORMAT_STRICT_ISO8601) + snprintf(buffer, buffer_length, "%04d-%02d-%02d%c%02d:%02d:%02d", year, + month, day, + (date_format == DATE_FORMAT_STRICT_ISO8601) ? 'T' : ' ', hour, + minute, second); else snprintf(buffer, buffer_length, "%02d-%02d-%04d %02d:%02d:%02d", month, day, year, hour, minute, second); @@ -166,10 +173,10 @@ void get_datetime_string(time_t const* raw_time, /* short date */ else if (type == SHORT_DATE) { - if (config->date_format() == DATE_FORMAT_EURO) + if (date_format == DATE_FORMAT_EURO) snprintf(buffer, buffer_length, "%02d-%02d-%04d", day, month, year); - else if (config->date_format() == DATE_FORMAT_ISO8601 || - config->date_format() == DATE_FORMAT_STRICT_ISO8601) + else if (date_format == DATE_FORMAT_ISO8601 || + date_format == DATE_FORMAT_STRICT_ISO8601) snprintf(buffer, buffer_length, "%04d-%02d-%02d", year, month, day); else snprintf(buffer, buffer_length, "%02d-%02d-%04d", month, day, year); diff --git a/engine/src/statistics.cc b/engine/src/statistics.cc index 37959b07f90..443c2bd0ec8 100644 --- a/engine/src/statistics.cc +++ b/engine/src/statistics.cc @@ -54,6 +54,7 @@ pid_t statistics::get_pid() const noexcept { * * @return A boolean telling if the struct has been filled. */ +#ifdef LEGACY_CONF bool statistics::get_external_command_buffer_stats( buffer_stats& retval) const noexcept { if (config->check_external_commands()) { @@ -64,3 +65,15 @@ bool statistics::get_external_command_buffer_stats( } else return false; } +#else +bool statistics::get_external_command_buffer_stats( + buffer_stats& retval) const noexcept { + if (pb_config.check_external_commands()) { + retval.used = external_command_buffer.size(); + retval.high = external_command_buffer.high(); + retval.total = pb_config.external_command_buffer_slots(); + return true; + } else + return false; +} +#endif diff --git a/engine/src/string.cc b/engine/src/string.cc index 595ecb98b3a..b4cc09d9773 100644 --- a/engine/src/string.cc +++ b/engine/src/string.cc @@ -449,3 +449,16 @@ void string::unescape(char* buffer) { *buffer = 0; } } + +/** + * @brief * @brief Unescape the string buffer. Works with \t, \n, \r and \\. + * The buffer is directly changed. No copy is made. + * + * @param str in out modified string + */ +void string::unescape(std::string& str) { + boost::replace_all(str, "\\n", "\n"); + boost::replace_all(str, "\\r", "\r"); + boost::replace_all(str, "\\t", "\t"); + boost::replace_all(str, "\\\\", "\\"); +} diff --git a/engine/src/timeperiod.cc b/engine/src/timeperiod.cc index 8f40a90edba..f2d116a0f1f 100644 --- a/engine/src/timeperiod.cc +++ b/engine/src/timeperiod.cc @@ -44,7 +44,7 @@ timeperiod_map timeperiod::timeperiods; * @param[in] alias Time period alias. * */ - +#ifdef LEGACY_CONF timeperiod::timeperiod(std::string const& name, std::string const& alias) : _name{name}, _alias{alias} { if (name.empty() || alias.empty()) { @@ -64,6 +64,94 @@ timeperiod::timeperiod(std::string const& name, std::string const& alias) throw engine_error() << "Could not register time period '" << name << "'"; } } +#else +/** + * @brief Constructor of a timeperiod from its configuration protobuf object. + * + * @param obj The configuration protobuf object. + */ +timeperiod::timeperiod(const configuration::Timeperiod& obj) + : _name{obj.timeperiod_name()}, _alias{obj.alias()} { + if (_name.empty() || _alias.empty()) { + engine_logger(log_config_error, basic) + << "Error: Name or alias for timeperiod is NULL"; + config_logger->error("Error: Name or alias for timeperiod is NULL"); + throw engine_error() << "Could not register time period '" << _name << "'"; + } + + // Check if the timeperiod already exist. + timeperiod_map::const_iterator it{timeperiod::timeperiods.find(_name)}; + if (it != timeperiod::timeperiods.end()) { + config_logger->error("Error: Timeperiod '{}' has already been defined", + _name); + throw engine_error() << "Could not register time period '" << _name << "'"; + } + + // Fill time period structure. + for (auto& r : obj.timeranges().sunday()) + days[0].emplace_back(r.range_start(), r.range_end()); + for (auto& r : obj.timeranges().monday()) + days[1].emplace_back(r.range_start(), r.range_end()); + for (auto& r : obj.timeranges().tuesday()) + days[2].emplace_back(r.range_start(), r.range_end()); + for (auto& r : obj.timeranges().wednesday()) + days[3].emplace_back(r.range_start(), r.range_end()); + for (auto& r : obj.timeranges().thursday()) + days[4].emplace_back(r.range_start(), r.range_end()); + for (auto& r : obj.timeranges().friday()) + days[5].emplace_back(r.range_start(), r.range_end()); + for (auto& r : obj.timeranges().saturday()) + days[6].emplace_back(r.range_start(), r.range_end()); + + auto fill_exceptions = [this](const auto& obj_daterange, int idx) { + for (auto& r : obj_daterange) { + exceptions[idx].emplace_back(static_cast(r.type()), + r.syear(), r.smon(), r.smday(), r.swday(), + r.swday_offset(), r.eyear(), r.emon(), + r.emday(), r.ewday(), r.ewday_offset(), + r.skip_interval(), r.timerange()); + } + }; + + fill_exceptions(obj.exceptions().calendar_date(), 0); + fill_exceptions(obj.exceptions().month_date(), 1); + fill_exceptions(obj.exceptions().month_day(), 2); + fill_exceptions(obj.exceptions().month_week_day(), 3); + fill_exceptions(obj.exceptions().week_day(), 4); + + set_exclusions(obj.exclude()); +} + +void timeperiod::set_exclusions(const configuration::StringSet& exclusions) { + _exclusions.clear(); + for (auto& s : exclusions.data()) + _exclusions.emplace(s, nullptr); +} + +void timeperiod::set_exceptions(const configuration::ExceptionArray& array) { + for (auto& e : exceptions) + e.clear(); + + auto fill_exceptions = [this](const auto& obj_daterange, int idx) { + for (auto& r : obj_daterange) { + // std::list tr; + // for (auto& t : r.timerange()) + // tr.emplace_back(t.range_start(), t.range_end()); + exceptions[idx].emplace_back(static_cast(r.type()), + r.syear(), r.smon(), r.smday(), r.swday(), + r.swday_offset(), r.eyear(), r.emon(), + r.emday(), r.ewday(), r.ewday_offset(), + r.skip_interval(), r.timerange()); + } + }; + + fill_exceptions(array.calendar_date(), 0); + fill_exceptions(array.month_date(), 1); + fill_exceptions(array.month_day(), 2); + fill_exceptions(array.month_week_day(), 3); + fill_exceptions(array.week_day(), 4); +} +#endif void timeperiod::set_name(std::string const& name) { _name = name; @@ -85,7 +173,7 @@ bool timeperiod::operator==(timeperiod const& obj) noexcept { if (_name == obj._name && _alias == obj._alias && (_exclusions.size() == obj._exclusions.size() && std::equal(_exclusions.begin(), _exclusions.end(), - obj._exclusions.begin()))) { + obj._exclusions.begin(), obj._exclusions.end()))) { for (uint32_t i{0}; i < exceptions.size(); ++i) if (exceptions[i] != obj.exceptions[i]) return false; @@ -1158,3 +1246,25 @@ void timeperiod::resolve(uint32_t& w __attribute__((unused)), uint32_t& e) { throw engine_error() << "Cannot resolve time period '" << _name << "'"; } } + +#ifndef LEGACY_CONF +void timeperiod::set_days(const configuration::DaysArray& array) { + for (auto& d : days) + d.clear(); + + for (auto& r : array.sunday()) + days[0].emplace_back(r.range_start(), r.range_end()); + for (auto& r : array.monday()) + days[1].emplace_back(r.range_start(), r.range_end()); + for (auto& r : array.tuesday()) + days[2].emplace_back(r.range_start(), r.range_end()); + for (auto& r : array.wednesday()) + days[3].emplace_back(r.range_start(), r.range_end()); + for (auto& r : array.thursday()) + days[4].emplace_back(r.range_start(), r.range_end()); + for (auto& r : array.friday()) + days[5].emplace_back(r.range_start(), r.range_end()); + for (auto& r : array.saturday()) + days[6].emplace_back(r.range_start(), r.range_end()); +} +#endif diff --git a/engine/src/timerange.cc b/engine/src/timerange.cc index a0e069420af..e4ead3a2669 100644 --- a/engine/src/timerange.cc +++ b/engine/src/timerange.cc @@ -32,14 +32,14 @@ timerange::timerange(uint64_t start, uint64_t end) { config_logger->error("Error: Start time {} is not valid for timeperiod", start); throw engine_error() << "Could not create timerange " - << "start'" << start << "' end '" << end << "'"; + << "start '" << start << "' end '" << end << "'"; } if (end > 86400) { engine_logger(log_config_error, basic) - << "Error: End time " << end << " is not value for timeperiod"; - config_logger->error("Error: End time {} is not value for timeperiod", end); + << "Error: End time " << end << " is not valid for timeperiod"; + config_logger->error("Error: End time {} is not valid for timeperiod", end); throw engine_error() << "Could not create timerange " - << "start'" << start << "' end '" << end << "'"; + << "start '" << start << "' end '" << end << "'"; } _range_start = start; diff --git a/engine/src/xsddefault.cc b/engine/src/xsddefault.cc index 93a219b608d..6c5ec4336d4 100644 --- a/engine/src/xsddefault.cc +++ b/engine/src/xsddefault.cc @@ -49,21 +49,26 @@ static int xsddefault_status_log_fd(-1); /* initialize status data */ int xsddefault_initialize_status_data() { - if (verify_config || config->status_file().empty()) +#ifdef LEGACY_CONF + const std::string& status_file = config->status_file(); +#else + const std::string& status_file = pb_config.status_file(); +#endif + if (verify_config || status_file.empty()) return OK; if (xsddefault_status_log_fd == -1) { // delete the old status log (it might not exist). - unlink(config->status_file().c_str()); + unlink(status_file.c_str()); if ((xsddefault_status_log_fd = - open(config->status_file().c_str(), O_WRONLY | O_CREAT, + open(status_file.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR | S_IRGRP)) == -1) { engine_logger(engine::logging::log_runtime_error, engine::logging::basic) - << "Error: Unable to open status data file '" << config->status_file() + << "Error: Unable to open status data file '" << status_file << "': " << strerror(errno); runtime_logger->error("Error: Unable to open status data file '{}': {}", - config->status_file(), strerror(errno)); + status_file, strerror(errno)); return ERROR; } set_cloexec(xsddefault_status_log_fd); @@ -76,9 +81,14 @@ int xsddefault_cleanup_status_data(int delete_status_data) { if (verify_config) return OK; +#ifdef LEGACY_CONF + const std::string& status_file = config->status_file(); +#else + const std::string& status_file = pb_config.status_file(); +#endif // delete the status log. - if (delete_status_data && !config->status_file().empty()) { - if (unlink(config->status_file().c_str())) + if (delete_status_data && !status_file.empty()) { + if (unlink(status_file.c_str())) return ERROR; } @@ -105,8 +115,44 @@ int xsddefault_save_status_data() { << "save_status_data()"; functions_logger->trace("save_status_data()"); +#ifdef LEGACY_CONF + bool check_external_commands = config->check_external_commands(); + bool enable_notifications = config->enable_notifications(); + bool execute_service_checks = config->execute_service_checks(); + bool accept_passive_service_checks = config->accept_passive_service_checks(); + bool execute_host_checks = config->execute_host_checks(); + bool accept_passive_host_checks = config->accept_passive_host_checks(); + bool enable_event_handlers = config->enable_event_handlers(); + bool obsess_over_services = config->obsess_over_services(); + bool obsess_over_hosts = config->obsess_over_hosts(); + bool check_service_freshness = config->check_service_freshness(); + bool check_host_freshness = config->check_host_freshness(); + bool enable_flap_detection = config->enable_flap_detection(); + bool process_performance_data = config->process_performance_data(); + const std::string& global_host_event_handler = config->global_host_event_handler(); + const std::string& global_service_event_handler = config->global_service_event_handler(); + uint32_t external_command_buffer_slots = config->external_command_buffer_slots(); +#else + bool check_external_commands = pb_config.check_external_commands(); + bool enable_notifications = pb_config.enable_notifications(); + bool execute_service_checks = pb_config.execute_service_checks(); + bool accept_passive_service_checks = pb_config.accept_passive_service_checks(); + bool execute_host_checks = pb_config.execute_host_checks(); + bool accept_passive_host_checks = pb_config.accept_passive_host_checks(); + bool enable_event_handlers = pb_config.enable_event_handlers(); + bool obsess_over_services = pb_config.obsess_over_services(); + bool obsess_over_hosts = pb_config.obsess_over_hosts(); + bool check_service_freshness = pb_config.check_service_freshness(); + bool check_host_freshness = pb_config.check_host_freshness(); + bool enable_flap_detection = pb_config.enable_flap_detection(); + bool process_performance_data = pb_config.process_performance_data(); + const std::string& global_host_event_handler = pb_config.global_host_event_handler(); + const std::string& global_service_event_handler = pb_config.global_service_event_handler(); + uint32_t external_command_buffer_slots = pb_config.external_command_buffer_slots(); +#endif + // get number of items in the command buffer - if (config->check_external_commands()) { + if (check_external_commands) { used_external_command_buffer_slots = external_command_buffer.size(); high_external_command_buffer_slots = external_command_buffer.high(); } @@ -155,46 +201,46 @@ int xsddefault_save_status_data() { << static_cast(last_log_rotation) << "\n" "\tenable_notifications=" - << config->enable_notifications() + << enable_notifications << "\n" "\tactive_service_checks_enabled=" - << config->execute_service_checks() + << execute_service_checks << "\n" "\tpassive_service_checks_enabled=" - << config->accept_passive_service_checks() + << accept_passive_service_checks << "\n" "\tactive_host_checks_enabled=" - << config->execute_host_checks() + << execute_host_checks << "\n" "\tpassive_host_checks_enabled=" - << config->accept_passive_host_checks() + << accept_passive_host_checks << "\n" "\tenable_event_handlers=" - << config->enable_event_handlers() + << enable_event_handlers << "\n" "\tobsess_over_services=" - << config->obsess_over_services() + << obsess_over_services << "\n" "\tobsess_over_hosts=" - << config->obsess_over_hosts() + << obsess_over_hosts << "\n" "\tcheck_service_freshness=" - << config->check_service_freshness() + << check_service_freshness << "\n" "\tcheck_host_freshness=" - << config->check_host_freshness() + << check_host_freshness << "\n" "\tenable_flap_detection=" - << config->enable_flap_detection() + << enable_flap_detection << "\n" "\tprocess_performance_data=" - << config->process_performance_data() + << process_performance_data << "\n" "\tglobal_host_event_handler=" - << config->global_host_event_handler().c_str() + << global_host_event_handler << "\n" "\tglobal_service_event_handler=" - << config->global_service_event_handler().c_str() + << global_service_event_handler << "\n" "\tnext_comment_id=" << comment::get_next_comment_id() @@ -209,7 +255,7 @@ int xsddefault_save_status_data() { << next_notification_id << "\n" "\ttotal_external_command_buffer_slots=" - << config->external_command_buffer_slots() + << external_command_buffer_slots << "\n" "\tused_external_command_buffer_slots=" << used_external_command_buffer_slots @@ -723,16 +769,22 @@ int xsddefault_save_status_data() { // Write data in buffer. stream.flush(); +#ifdef LEGACY_CONF + const std::string& status_file = config->status_file(); +#else + const std::string& status_file = pb_config.status_file(); +#endif + // Prepare status file for overwrite. if ((ftruncate(xsddefault_status_log_fd, 0) == -1) || (fsync(xsddefault_status_log_fd) == -1) || (lseek(xsddefault_status_log_fd, 0, SEEK_SET) == (off_t)-1)) { char const* msg(strerror(errno)); engine_logger(engine::logging::log_runtime_error, engine::logging::basic) - << "Error: Unable to update status data file '" << config->status_file() + << "Error: Unable to update status data file '" << status_file << "': " << msg; runtime_logger->error("Error: Unable to update status data file '{}': {}", - config->status_file(), msg); + status_file, msg); return ERROR; } @@ -746,9 +798,9 @@ int xsddefault_save_status_data() { char const* msg(strerror(errno)); engine_logger(engine::logging::log_runtime_error, engine::logging::basic) << "Error: Unable to update status data file '" - << config->status_file() << "': " << msg; + << status_file << "': " << msg; runtime_logger->error("Error: Unable to update status data file '{}': {}", - config->status_file(), msg); + status_file, msg); return ERROR; } data_ptr += wb; diff --git a/engine/tests/CMakeLists.txt b/engine/tests/CMakeLists.txt old mode 100755 new mode 100644 index 56d3122b9ee..cd114a52211 --- a/engine/tests/CMakeLists.txt +++ b/engine/tests/CMakeLists.txt @@ -23,15 +23,15 @@ if(WITH_TESTING) set(INC_DIR "${MODULE_DIR}/inc/com/centreon/engine/modules/external_commands") set(MODULE_DIR_OTL "${PROJECT_SOURCE_DIR}/modules/opentelemetry") - include_directories(${PROJECT_SOURCE_DIR} ${MODULE_DIR}/inc - ${MODULE_DIR_OTL}/inc - ${CMAKE_SOURCE_DIR}/bbdo - ${CMAKE_SOURCE_DIR}/common/http/inc) + include_directories( + ${PROJECT_SOURCE_DIR} ${MODULE_DIR}/inc ${MODULE_DIR_OTL}/inc + ${CMAKE_SOURCE_DIR}/bbdo ${CMAKE_SOURCE_DIR}/common/http/inc) # Set directory. set(TESTS_DIR "${PROJECT_SOURCE_DIR}/tests") include_directories(${PROJECT_SOURCE_DIR}/enginerpc) - add_definitions(-DENGINERPC_TESTS_PATH="${TESTS_DIR}/enginerpc") + add_definitions("-DENGINERPC_TESTS_PATH=${TESTS_DIR}/enginerpc" + "-DENGINE_CFG_TEST=\"${TESTS_DIR}/cfg_files\"") add_executable(rpc_client_engine ${TESTS_DIR}/enginerpc/client.cc) @@ -53,94 +53,16 @@ if(WITH_TESTING) add_executable(bin_connector_test_run "${TESTS_DIR}/commands/bin_connector_test_run.cc") + if (LEGACY_ENGINE) target_link_libraries(bin_connector_test_run cce_core pthread) target_precompile_headers(bin_connector_test_run REUSE_FROM cce_core) - - set(ut_sources - # Sources. - "${TESTS_DIR}/parse-check-output.cc" - "${TESTS_DIR}/checks/service_check.cc" - "${TESTS_DIR}/checks/service_retention.cc" - "${TESTS_DIR}/checks/anomalydetection.cc" - "${TESTS_DIR}/commands/simple-command.cc" - "${TESTS_DIR}/commands/connector.cc" - "${TESTS_DIR}/commands/environment.cc" - "${TESTS_DIR}/configuration/applier/applier-anomalydetection.cc" - "${TESTS_DIR}/configuration/applier/applier-command.cc" - "${TESTS_DIR}/configuration/applier/applier-connector.cc" - "${TESTS_DIR}/configuration/applier/applier-contact.cc" - "${TESTS_DIR}/configuration/applier/applier-contactgroup.cc" - "${TESTS_DIR}/configuration/applier/applier-global.cc" - "${TESTS_DIR}/configuration/applier/applier-log.cc" - "${TESTS_DIR}/configuration/applier/applier-host.cc" - "${TESTS_DIR}/configuration/applier/applier-hostescalation.cc" - "${TESTS_DIR}/configuration/applier/applier-hostdependency.cc" - "${TESTS_DIR}/configuration/applier/applier-hostgroup.cc" - "${TESTS_DIR}/configuration/applier/applier-service.cc" - "${TESTS_DIR}/configuration/applier/applier-serviceescalation.cc" - "${TESTS_DIR}/configuration/applier/applier-servicegroup.cc" - "${TESTS_DIR}/configuration/applier/applier-state.cc" - "${TESTS_DIR}/configuration/contact.cc" - "${TESTS_DIR}/configuration/host.cc" - "${TESTS_DIR}/configuration/object.cc" - "${TESTS_DIR}/configuration/service.cc" - "${TESTS_DIR}/configuration/severity.cc" - "${TESTS_DIR}/configuration/tag.cc" - "${TESTS_DIR}/configuration/timeperiod-test.cc" - "${TESTS_DIR}/configuration/whitelist-test.cc" - "${TESTS_DIR}/contacts/contactgroup-config.cc" - "${TESTS_DIR}/contacts/simple-contactgroup.cc" - "${TESTS_DIR}/custom_vars/extcmd.cc" - "${TESTS_DIR}/downtimes/downtime.cc" - "${TESTS_DIR}/downtimes/downtime_finder.cc" - "${TESTS_DIR}/enginerpc/enginerpc.cc" - "${TESTS_DIR}/helper.cc" - "${TESTS_DIR}/macros/macro.cc" - "${TESTS_DIR}/macros/macro_hostname.cc" - "${TESTS_DIR}/macros/macro_service.cc" - "${TESTS_DIR}/external_commands/anomalydetection.cc" - "${TESTS_DIR}/external_commands/host.cc" - "${TESTS_DIR}/external_commands/service.cc" - "${TESTS_DIR}/main.cc" - "${TESTS_DIR}/loop/loop.cc" - "${TESTS_DIR}/notifications/host_downtime_notification.cc" - "${TESTS_DIR}/notifications/host_flapping_notification.cc" - "${TESTS_DIR}/notifications/host_normal_notification.cc" - "${TESTS_DIR}/notifications/host_recovery_notification.cc" - "${TESTS_DIR}/notifications/service_normal_notification.cc" - "${TESTS_DIR}/notifications/service_timeperiod_notification.cc" - "${TESTS_DIR}/notifications/service_flapping_notification.cc" - "${TESTS_DIR}/notifications/service_downtime_notification_test.cc" - "${TESTS_DIR}/opentelemetry/agent_check_result_builder_test.cc" - "${TESTS_DIR}/opentelemetry/grpc_config_test.cc" - "${TESTS_DIR}/opentelemetry/host_serv_extractor_test.cc" - "${TESTS_DIR}/opentelemetry/otl_server_test.cc" - "${TESTS_DIR}/opentelemetry/otl_converter_test.cc" - "${TESTS_DIR}/opentelemetry/open_telemetry_test.cc" - "${TESTS_DIR}/retention/host.cc" - "${TESTS_DIR}/retention/service.cc" - "${TESTS_DIR}/string/string.cc" - "${TESTS_DIR}/test_engine.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/between_two_years.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/calendar_date.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/dst_backward.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/dst_forward.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/earliest_daterange_first.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/exclusion.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/exceptions_test.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/generic_month_date.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/normal_weekday.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/offset_weekday_of_generic_month.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/offset_weekday_of_specific_month.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/precedence.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/skip_interval.cc" - "${TESTS_DIR}/timeperiod/get_next_valid_time/specific_month_date.cc" - # # Headers. - "${TESTS_DIR}/test_engine.hh" - "${TESTS_DIR}/timeperiod/utils.hh") +else() + target_link_libraries(bin_connector_test_run cce_core pthread) + target_precompile_headers(bin_connector_test_run REUSE_FROM cce_core) +endif() # Unit test executable. - include_directories(${TESTS_DIR}) + include_directories(${TESTS_DIR} ${CMAKE_BINARY_DIR}) if(WITH_ASAN) set(CMAKE_BUILD_TYPE Debug) @@ -151,72 +73,307 @@ if(WITH_TESTING) ) endif() - # utils.cc can't be compiled with precomp headers so it's compiled apart - add_library(ut_engine_utils STATIC "${TESTS_DIR}/timeperiod/utils.cc") + # file used by timeperiod-test.cc + file(COPY ${TESTS_DIR}/configuration/timeperiods.cfg + DESTINATION ${CMAKE_BINARY_DIR}/tests/) - add_executable(ut_engine ${ut_sources}) - target_include_directories(ut_engine PRIVATE - ${MODULE_DIR_OTL}/src - ${CMAKE_SOURCE_DIR}/common/grpc/inc - ${CMAKE_SOURCE_DIR}/agent/inc - ${CMAKE_SOURCE_DIR}/agent/src) + if(LEGACY_ENGINE) + set(ut_sources_legacy + # Sources. + "${TESTS_DIR}/parse-check-output.cc" + "${TESTS_DIR}/checks/service_check.cc" + "${TESTS_DIR}/checks/service_retention.cc" + "${TESTS_DIR}/checks/anomalydetection.cc" + "${TESTS_DIR}/commands/simple-command.cc" + "${TESTS_DIR}/commands/connector.cc" + "${TESTS_DIR}/commands/environment.cc" + "${TESTS_DIR}/configuration/applier/applier-anomalydetection.cc" + "${TESTS_DIR}/configuration/applier/applier-command.cc" + "${TESTS_DIR}/configuration/applier/applier-connector.cc" + "${TESTS_DIR}/configuration/applier/applier-contact.cc" + "${TESTS_DIR}/configuration/applier/applier-contactgroup.cc" + "${TESTS_DIR}/configuration/applier/applier-global.cc" + "${TESTS_DIR}/configuration/applier/applier-log.cc" + "${TESTS_DIR}/configuration/applier/applier-host.cc" + "${TESTS_DIR}/configuration/applier/applier-hostescalation.cc" + "${TESTS_DIR}/configuration/applier/applier-hostdependency.cc" + "${TESTS_DIR}/configuration/applier/applier-hostgroup.cc" + "${TESTS_DIR}/configuration/applier/applier-service.cc" + "${TESTS_DIR}/configuration/applier/applier-serviceescalation.cc" + "${TESTS_DIR}/configuration/applier/applier-servicegroup.cc" + "${TESTS_DIR}/configuration/applier/applier-state.cc" + "${TESTS_DIR}/configuration/contact.cc" + "${TESTS_DIR}/configuration/host.cc" + "${TESTS_DIR}/configuration/object.cc" + "${TESTS_DIR}/configuration/service.cc" + "${TESTS_DIR}/configuration/severity.cc" + "${TESTS_DIR}/configuration/tag.cc" + "${TESTS_DIR}/configuration/timeperiod-test.cc" + "${TESTS_DIR}/configuration/whitelist-test.cc" + "${TESTS_DIR}/contacts/contactgroup-config.cc" + "${TESTS_DIR}/contacts/simple-contactgroup.cc" + "${TESTS_DIR}/custom_vars/extcmd.cc" + "${TESTS_DIR}/downtimes/downtime.cc" + "${TESTS_DIR}/downtimes/downtime_finder.cc" + "${TESTS_DIR}/enginerpc/enginerpc.cc" + "${TESTS_DIR}/helper.cc" + "${TESTS_DIR}/macros/macro.cc" + "${TESTS_DIR}/macros/macro_hostname.cc" + "${TESTS_DIR}/macros/macro_service.cc" + "${TESTS_DIR}/external_commands/anomalydetection.cc" + "${TESTS_DIR}/external_commands/host.cc" + "${TESTS_DIR}/external_commands/service.cc" + "${TESTS_DIR}/main.cc" + "${TESTS_DIR}/loop/loop.cc" + "${TESTS_DIR}/notifications/host_downtime_notification.cc" + "${TESTS_DIR}/notifications/host_flapping_notification.cc" + "${TESTS_DIR}/notifications/host_normal_notification.cc" + "${TESTS_DIR}/notifications/host_recovery_notification.cc" + "${TESTS_DIR}/notifications/service_normal_notification.cc" + "${TESTS_DIR}/notifications/service_timeperiod_notification.cc" + "${TESTS_DIR}/notifications/service_flapping_notification.cc" + "${TESTS_DIR}/notifications/service_downtime_notification_test.cc" + "${TESTS_DIR}/opentelemetry/agent_check_result_builder_test.cc" + "${TESTS_DIR}/opentelemetry/agent_reverse_client_test.cc" + "${TESTS_DIR}/opentelemetry/agent_to_engine_test.cc" + "${TESTS_DIR}/opentelemetry/grpc_config_test.cc" + "${TESTS_DIR}/opentelemetry/host_serv_extractor_test.cc" + "${TESTS_DIR}/opentelemetry/otl_server_test.cc" + "${TESTS_DIR}/opentelemetry/otl_converter_test.cc" + "${TESTS_DIR}/opentelemetry/open_telemetry_test.cc" + "${TESTS_DIR}/retention/host.cc" + "${TESTS_DIR}/retention/service.cc" + "${TESTS_DIR}/string/string.cc" + "${TESTS_DIR}/test_engine.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/between_two_years.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/calendar_date.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/dst_backward.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/dst_forward.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/earliest_daterange_first.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/exclusion.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/exceptions_test.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/generic_month_date.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/normal_weekday.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/offset_weekday_of_generic_month.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/offset_weekday_of_specific_month.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/precedence.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/skip_interval.cc" + "${TESTS_DIR}/timeperiod/get_next_valid_time/specific_month_date.cc" + # # Headers. + "${TESTS_DIR}/test_engine.hh" + "${TESTS_DIR}/timeperiod/utils.hh") - target_precompile_headers(ut_engine REUSE_FROM cce_core) + # utils.cc can't be compiled with precomp headers so it's compiled apart + add_library(ut_engine_utils_legacy STATIC + "${TESTS_DIR}/timeperiod/utils.cc") - set_target_properties( - ut_engine rpc_client_engine bin_connector_test_run - PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tests - RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR}/tests - RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_BINARY_DIR}/tests - RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO ${CMAKE_BINARY_DIR}/tests - RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${CMAKE_BINARY_DIR}/tests) + add_executable(ut_engine ${ut_sources_legacy}) + target_include_directories( + ut_engine + PRIVATE ${MODULE_DIR_OTL}/src ${CMAKE_SOURCE_DIR}/common/grpc/inc + ${CMAKE_SOURCE_DIR}/agent/inc ${CMAKE_SOURCE_DIR}/agent/src) - # file used by timeperiod-test.cc - file(COPY ${TESTS_DIR}/configuration/timeperiods.cfg - DESTINATION ${CMAKE_BINARY_DIR}/tests/) + target_precompile_headers(ut_engine REUSE_FROM cce_core) - add_test(NAME tests COMMAND ut_engine) + set_target_properties( + ut_engine rpc_client_engine bin_connector_test_run + PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO + ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${CMAKE_BINARY_DIR}/tests) + add_test(NAME tests_legacy COMMAND ut_engine) - if(WITH_COVERAGE) - set(COVERAGE_EXCLUDES - '${PROJECT_BINARY_DIR}/*' '${PROJECT_SOURCE_DIR}/tests/*' - '/usr/include/*' '*/.conan/*') - setup_target_for_coverage(NAME engine-test-coverage EXECUTABLE ut_engine - DEPENDENCIES ut_engine) - set(GCOV gcov) - endif() + target_link_libraries( + ut_engine + PRIVATE -L${PROTOBUF_LIB_DIR} + enginerpc_legacy + ut_engine_utils_legacy + "-Wl,-whole-archive" + cce_core + log_v2 + opentelemetry + centagent_lib + "-Wl,-no-whole-archive" + pb_open_telemetry_lib + centreon_grpc + centreon_http + centreon_process + -L${Boost_LIBRARY_DIR_RELEASE} + boost_url + boost_program_options + pthread + ${GCOV} + GTest::gtest + GTest::gtest_main + GTest::gmock + GTest::gmock_main + gRPC::gpr + gRPC::grpc + gRPC::grpc++ + gRPC::grpc++_alts + crypto + ssl + z + fmt::fmt + ryml::ryml + stdc++fs + dl) - target_link_libraries( - ut_engine - PRIVATE -L${PROTOBUF_LIB_DIR} - ${ENGINERPC} - ut_engine_utils - "-Wl,-whole-archive" - cce_core - log_v2 - opentelemetry - "-Wl,-no-whole-archive" - pb_open_telemetry_lib - centreon_grpc - centreon_http - -L${Boost_LIBRARY_DIR_RELEASE} - boost_url - boost_program_options - pthread - ${GCOV} - GTest::gtest - GTest::gtest_main - GTest::gmock - GTest::gmock_main - gRPC::gpr - gRPC::grpc - gRPC::grpc++ - gRPC::grpc++_alts - crypto - ssl - z - fmt::fmt - ryml::ryml - stdc++fs - dl) + if(WITH_COVERAGE) + set(COVERAGE_EXCLUDES + '${PROJECT_BINARY_DIR}/*' '${PROJECT_SOURCE_DIR}/tests/*' + '/usr/include/*' '*/.conan/*') + setup_target_for_coverage(NAME engine-test-coverage EXECUTABLE + ut_engine DEPENDENCIES ut_engine) + set(GCOV gcov) + endif() + else() + set(ut_sources + # Sources. + ${TESTS_DIR}/parse-check-output.cc + ${TESTS_DIR}/checks/pb_service_check.cc + ${TESTS_DIR}/checks/pb_service_retention.cc + ${TESTS_DIR}/checks/pb_anomalydetection.cc + ${TESTS_DIR}/commands/pbsimple-command.cc + ${TESTS_DIR}/commands/connector.cc + ${TESTS_DIR}/commands/environment.cc + ${TESTS_DIR}/configuration/applier/applier-pbanomalydetection.cc + ${TESTS_DIR}/configuration/applier/applier-pbcommand.cc + ${TESTS_DIR}/configuration/applier/applier-pbconnector.cc + ${TESTS_DIR}/configuration/applier/applier-pbcontact.cc + ${TESTS_DIR}/configuration/applier/applier-pbcontactgroup.cc + ${TESTS_DIR}/configuration/applier/applier-pbglobal.cc + ${TESTS_DIR}/configuration/applier/applier-pblog.cc + ${TESTS_DIR}/configuration/applier/applier-pbhost.cc + ${TESTS_DIR}/configuration/applier/applier-pbhostescalation.cc + ${TESTS_DIR}/configuration/applier/applier-pbhostdependency.cc + ${TESTS_DIR}/configuration/applier/applier-pbhostgroup.cc + ${TESTS_DIR}/configuration/applier/applier-pbservice.cc + ${TESTS_DIR}/configuration/applier/applier-pbserviceescalation.cc + ${TESTS_DIR}/configuration/applier/applier-pbservicegroup.cc + ${TESTS_DIR}/configuration/applier/applier-pbstate.cc + ${TESTS_DIR}/configuration/pbcontact.cc + ${TESTS_DIR}/configuration/pbhost.cc + ${TESTS_DIR}/configuration/pbservice.cc + ${TESTS_DIR}/configuration/pbseverity.cc + ${TESTS_DIR}/configuration/pbtag.cc + ${TESTS_DIR}/configuration/pbtimeperiod-test.cc + ${TESTS_DIR}/configuration/whitelist-test.cc + ${TESTS_DIR}/contacts/contactgroup-config.cc + ${TESTS_DIR}/contacts/simple-contactgroup.cc + ${TESTS_DIR}/custom_vars/pbextcmd.cc + ${TESTS_DIR}/downtimes/pbdowntime.cc + ${TESTS_DIR}/downtimes/pbdowntime_finder.cc + ${TESTS_DIR}/enginerpc/pbenginerpc.cc + ${TESTS_DIR}/helper.cc + ${TESTS_DIR}/macros/pbmacro.cc + ${TESTS_DIR}/macros/pbmacro_hostname.cc + ${TESTS_DIR}/macros/pbmacro_service.cc + ${TESTS_DIR}/external_commands/pbanomalydetection.cc + ${TESTS_DIR}/external_commands/pbhost.cc + ${TESTS_DIR}/external_commands/pbservice.cc + ${TESTS_DIR}/main.cc + ${TESTS_DIR}/loop/loop.cc + ${TESTS_DIR}/notifications/host_downtime_notification.cc + ${TESTS_DIR}/notifications/host_flapping_notification.cc + ${TESTS_DIR}/notifications/host_normal_notification.cc + ${TESTS_DIR}/notifications/host_recovery_notification.cc + ${TESTS_DIR}/notifications/service_normal_notification.cc + ${TESTS_DIR}/notifications/service_timeperiod_notification.cc + ${TESTS_DIR}/notifications/service_flapping_notification.cc + ${TESTS_DIR}/notifications/service_downtime_notification_test.cc + ${TESTS_DIR}/opentelemetry/grpc_config_test.cc + ${TESTS_DIR}/opentelemetry/host_serv_extractor_test.cc + ${TESTS_DIR}/opentelemetry/otl_server_test.cc + ${TESTS_DIR}/opentelemetry/otl_converter_test.cc + ${TESTS_DIR}/opentelemetry/open_telemetry_test.cc + ${TESTS_DIR}/retention/host.cc + ${TESTS_DIR}/retention/service.cc + ${TESTS_DIR}/string/string.cc + ${TESTS_DIR}/test_engine.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/between_two_years.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/calendar_date.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/dst_backward.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/dst_forward.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/earliest_daterange_first.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/exclusion.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/exceptions_test.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/generic_month_date.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/normal_weekday.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/offset_weekday_of_generic_month.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/offset_weekday_of_specific_month.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/precedence.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/skip_interval.cc + ${TESTS_DIR}/timeperiod/get_next_valid_time/specific_month_date.cc + # Headers. + "${TESTS_DIR}/test_engine.hh" + "${TESTS_DIR}/timeperiod/utils.hh") + add_library(ut_engine_utils STATIC "${TESTS_DIR}/timeperiod/utils.cc") + + add_executable(ut_engine ${ut_sources}) + target_include_directories( + ut_engine + PRIVATE ${MODULE_DIR_OTL}/src ${CMAKE_SOURCE_DIR}/common/grpc/inc + ${CMAKE_SOURCE_DIR}/agent/inc ${CMAKE_SOURCE_DIR}/agent/src) + + target_precompile_headers(ut_engine REUSE_FROM cce_core) + + set_target_properties( + ut_engine rpc_client_engine bin_connector_test_run + PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO + ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${CMAKE_BINARY_DIR}/tests) + + add_test(NAME tests COMMAND ut_engine) + + target_link_libraries( + ut_engine + PRIVATE -L${PROTOBUF_LIB_DIR} + enginerpc + ut_engine_utils + "-Wl,-whole-archive" + cce_core + log_v2 + opentelemetry + centagent_lib + "-Wl,-no-whole-archive" + pb_open_telemetry_lib + centreon_grpc + centreon_http + centreon_process + -L${Boost_LIBRARY_DIR_RELEASE} + boost_url + boost_program_options + pthread + ${GCOV} + GTest::gtest + GTest::gtest_main + GTest::gmock + GTest::gmock_main + gRPC::gpr + gRPC::grpc + gRPC::grpc++ + gRPC::grpc++_alts + crypto + ssl + z + fmt::fmt + ryml::ryml + stdc++fs + dl) + + if(WITH_COVERAGE) + set(COVERAGE_EXCLUDES + '${PROJECT_BINARY_DIR}/*' '${PROJECT_SOURCE_DIR}/tests/*' + '/usr/include/*' '*/.conan/*') + setup_target_for_coverage(NAME engine-test-coverage EXECUTABLE ut_engine + DEPENDENCIES ut_engine) + set(GCOV gcov) + endif() + endif() endif() diff --git a/engine/tests/cfg_files/conf1/centengine.cfg b/engine/tests/cfg_files/conf1/centengine.cfg new file mode 100644 index 00000000000..ec539359bf7 --- /dev/null +++ b/engine/tests/cfg_files/conf1/centengine.cfg @@ -0,0 +1,124 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### +cfg_file=/tmp/hostTemplates.cfg +cfg_file=/tmp/hosts.cfg +cfg_file=/tmp/serviceTemplates.cfg +cfg_file=/tmp/services.cfg +cfg_file=/tmp/commands.cfg +cfg_file=/tmp/contactgroups.cfg +cfg_file=/tmp/contacts.cfg +cfg_file=/tmp/hostgroups.cfg +cfg_file=/tmp/servicegroups.cfg +cfg_file=/tmp/timeperiods.cfg +cfg_file=/tmp/escalations.cfg +cfg_file=/tmp/dependencies.cfg +cfg_file=/tmp/connectors.cfg +cfg_file=/tmp/meta_commands.cfg +cfg_file=/tmp/meta_timeperiod.cfg +cfg_file=/tmp/meta_host.cfg +cfg_file=/tmp/meta_services.cfg +cfg_file=/tmp/tags.cfg +cfg_file=/tmp/severities.cfg +broker_module=/usr/lib64/centreon-engine/externalcmd.so +broker_module=/usr/lib64/nagios/cbmod.so /etc/centreon-broker/central-module.json +interval_length=60 +use_timezone=:America/New_York +resource_file=/tmp/resource.cfg +log_file=/tmp/centengine.log +status_file=/tmp/status.dat +status_update_interval=60 +external_command_buffer_slots=4096 +command_check_interval=1s +command_file=/tmp/centengine.cmd +state_retention_file=/tmp/retention.dat +retention_update_interval=60 +service_inter_check_delay_method=s +host_inter_check_delay_method=s +service_interleave_factor=s +max_concurrent_checks=0 +max_service_check_spread=15 +max_host_check_spread=15 +check_result_reaper_frequency=5 +low_service_flap_threshold=25.0 +high_service_flap_threshold=50.0 +low_host_flap_threshold=25.0 +high_host_flap_threshold=50.0 +service_check_timeout=60 +host_check_timeout=12 +event_handler_timeout=30 +notification_timeout=30 +date_format=euro +illegal_object_name_chars=~!$%^&*"|'<>?,()= +illegal_macro_output_chars=`~$^&"|'<> +admin_email=admin@localhost +admin_pager=admin@localhost +event_broker_options=-1 +cached_host_check_horizon=15 +cached_service_check_horizon=15 +additional_freshness_latency=15 +debug_file=/var/log/centreon-engine/centengine.debug +debug_level=0 +debug_verbosity=1 +max_debug_file_size=1000000000 +log_pid=1 +enable_macros_filter=0 +grpc_port=50001 +log_v2_enabled=1 +log_legacy_enabled=0 +log_v2_logger=file +log_level_functions=warning +log_level_config=info +log_level_events=info +log_level_checks=info +log_level_notifications=info +log_level_eventbroker=warning +log_level_external_command=info +log_level_commands=warning +log_level_downtimes=info +log_level_comments=info +log_level_macros=warning +log_level_process=info +log_level_runtime=warning +instance_heartbeat_interval=30 +enable_notifications=1 +execute_service_checks=1 +accept_passive_service_checks=1 +execute_host_checks=1 +accept_passive_host_checks=1 +enable_event_handlers=1 +check_external_commands=1 +use_retained_program_state=1 +use_retained_scheduling_info=1 +use_syslog=0 +log_notifications=1 +log_service_retries=1 +log_host_retries=1 +log_event_handlers=1 +log_external_commands=1 +log_passive_checks=1 +auto_reschedule_checks=0 +soft_state_dependencies=0 +check_for_orphaned_services=1 +check_for_orphaned_hosts=1 +check_service_freshness=0 +check_host_freshness=0 +enable_flap_detection=0 +use_regexp_matching=0 +use_true_regexp_matching=0 +enable_predictive_host_dependency_checks=1 +enable_predictive_service_dependency_checks=1 +enable_environment_macros=0 diff --git a/engine/tests/cfg_files/conf1/commands.cfg b/engine/tests/cfg_files/conf1/commands.cfg new file mode 100644 index 00000000000..ebba74aba37 --- /dev/null +++ b/engine/tests/cfg_files/conf1/commands.cfg @@ -0,0 +1,91 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define command { + command_name base_host_alive + command_line $USER1$/check_icmp -H $HOSTADDRESS$ -w 3000.0,80% -c 5000.0,100% -p 1 +} + +define command { + command_name host-notify-by-email + command_line /bin/sh -c '/usr/bin/printf "%b" "***** centreon Notification *****\n\nType:$NOTIFICATIONTYPE$\nHost: $HOSTNAME$\nState: $HOSTSTATE$\nAddress: $HOSTADDRESS$\nInfo: $HOSTOUTPUT$\nDate/Time: $DATE$" | /bin/mail -s "Host $HOSTSTATE$ alert for $HOSTNAME$!" $CONTACTEMAIL$' +} + +define command { + command_name OS-Linux-SNMP-Process-Generic + command_line $CENTREONPLUGINS$/centreon_linux_snmp.pl --plugin=os::linux::snmp::plugin --mode=processcount --hostname=$HOSTADDRESS$ --snmp-version='$_HOSTSNMPVERSION$' --snmp-community='$_HOSTSNMPCOMMUNITY$' $_HOSTSNMPEXTRAOPTIONS$ --process-name='$_SERVICEPROCESSNAME$' --process-path='$_SERVICEPROCESSPATH$' --process-args='$_SERVICEPROCESSARGS$' --regexp-name --regexp-path --regexp-args --warning='$_SERVICEWARNING$' --critical='$_SERVICECRITICAL$' $_SERVICEEXTRAOPTIONS$ +} + +define command { + command_name base_centreon_ping + command_line $USER1$/check_icmp -H $HOSTADDRESS$ -n $_SERVICEPACKETNUMBER$ -w $_SERVICEWARNING$ -c $_SERVICECRITICAL$ $_SERVICEEXTRAOPTIONS$ +} + +define command { + command_name OS-Linux-SNMP-Swap + command_line $CENTREONPLUGINS$/centreon_linux_snmp.pl --plugin=os::linux::snmp::plugin --mode=swap --hostname=$HOSTADDRESS$ --snmp-version='$_HOSTSNMPVERSION$' --snmp-community='$_HOSTSNMPCOMMUNITY$' $_HOSTSNMPEXTRAOPTIONS$ --warning-usage-prct='$_SERVICEWARNING$' --critical-usage-prct='$_SERVICECRITICAL$' $_SERVICEEXTRAOPTIONS$ +} + +define command { + command_name OS-Linux-SNMP-Memory + command_line $CENTREONPLUGINS$/centreon_linux_snmp.pl --plugin=os::linux::snmp::plugin --mode=memory --hostname=$HOSTADDRESS$ --snmp-version='$_HOSTSNMPVERSION$' --snmp-community='$_HOSTSNMPCOMMUNITY$' $_HOSTSNMPEXTRAOPTIONS$ --warning-usage='$_SERVICEWARNING$' --critical-usage='$_SERVICECRITICAL$' $_SERVICEEXTRAOPTIONS$ +} + +define command { + command_name OS-Linux-SNMP-Load + command_line $CENTREONPLUGINS$/centreon_linux_snmp.pl --plugin=os::linux::snmp::plugin --mode=load --hostname=$HOSTADDRESS$ --snmp-version='$_HOSTSNMPVERSION$' --snmp-community='$_HOSTSNMPCOMMUNITY$' $_HOSTSNMPEXTRAOPTIONS$ --warning='$_SERVICEWARNING$' --critical='$_SERVICECRITICAL$' $_SERVICEEXTRAOPTIONS$ +} + +define command { + command_name OS-Linux-SNMP-Cpu + command_line $CENTREONPLUGINS$/centreon_linux_snmp.pl --plugin=os::linux::snmp::plugin --mode=cpu --hostname=$HOSTADDRESS$ --snmp-version='$_HOSTSNMPVERSION$' --snmp-community='$_HOSTSNMPCOMMUNITY$' $_HOSTSNMPEXTRAOPTIONS$ --warning-average='$_SERVICEWARNING$' --critical-average='$_SERVICECRITICAL$' $_SERVICEEXTRAOPTIONS$ +} + +define command { + command_name App-Centreon-MySQL-Partitioning + command_line $CENTREONPLUGINS$/centreon_centreon_database.pl --plugin=database::mysql::plugin --dyn-mode=apps::centreon::sql::mode::partitioning --host='$HOSTADDRESS$' --username='$_HOSTMYSQLUSERNAME$' --password='$_HOSTMYSQLPASSWORD$' --port='$_HOSTMYSQLPORT$' --tablename='$_SERVICETABLENAME1$' --tablename='$_SERVICETABLENAME2$' --tablename='$_SERVICETABLENAME3$' --tablename='$_SERVICETABLENAME4$' --warning='$_SERVICEWARNING$' --critical='$_SERVICECRITICAL$' +} + +define command { + command_name App-DB-MySQL + command_line $CENTREONPLUGINS$/centreon_mysql.pl --plugin=database::mysql::plugin --host=$HOSTADDRESS$ --username='$_HOSTMYSQLUSERNAME$' --password='$_HOSTMYSQLPASSWORD$' --port='$_HOSTMYSQLPORT$' --mode='$_SERVICEMODE$' --warning='$_SERVICEWARNING$' --critical='$_SERVICECRITICAL$' $_SERVICEEXTRAOPTIONS$ +} + +define command { + command_name App-DB-MySQL-Queries + command_line $CENTREONPLUGINS$/centreon_mysql.pl --plugin=database::mysql::plugin --host=$HOSTADDRESS$ --username='$_HOSTMYSQLUSERNAME$' --password='$_HOSTMYSQLPASSWORD$' --port='$_HOSTMYSQLPORT$' --mode=queries --warning-total='$_SERVICEWARNING$' --critical-total='$_SERVICECRITICAL$' $_SERVICEEXTRAOPTIONS$ +} + +define command { + command_name App-DB-MySQL-Database-Size + command_line $CENTREONPLUGINS$/centreon_mysql.pl --plugin=database::mysql::plugin --host=$HOSTADDRESS$ --username='$_HOSTMYSQLUSERNAME$' --password='$_HOSTMYSQLPASSWORD$' --port='$_HOSTMYSQLPORT$' --mode=databases-size --filter-database='$_SERVICEFILTERDATABASE$' --filter-perfdata='$_SERVICEFILTERPERFDATA$' --warning-table-usage='$_SERVICEWARNINGTABLEUSAGE$' --critical-table-usage='$_SERVICECRITICALTABLEUSAGE$' --warning-table-free='$_SERVICEWARNINGTABLEFREE$' --critical-table-free='$_SERVICECRITICALTABLEFREE$' --warning-table-frag='$_SERVICEWARNINGTABLEFRAG$' --critical-table-frag='$_SERVICECRITICALTABLEFRAG$' --warning-db-usage='$_SERVICEWARNINGDBUSAGE$' --critical-db-usage='$_SERVICECRITICALDBUSAGE$' --warning-db-free='$_SERVICEWARNINGDBFREE$' --critical-db-free='$_SERVICECRITICALDBFREE$' --warning-total-usage='$_SERVICEWARNINGTOTALUSAGE$' --critical-total-usage='$_SERVICECRITICALTOTALUSAGE$' --warning-total-free='$_SERVICEWARNINGTOTALFREE$' --critical-total-free='$_SERVICECRITICALTOTALFREE$' $_SERVICEEXTRAOPTIONS$ +} + +define command { + command_name App-DB-MySQL-Threads-Connected + command_line $CENTREONPLUGINS$/centreon_mysql.pl --plugin=database::mysql::plugin --host=$HOSTADDRESS$ --username='$_HOSTMYSQLUSERNAME$' --password='$_HOSTMYSQLPASSWORD$' --port='$_HOSTMYSQLPORT$' --mode=threads-connected --warning-usage='$_SERVICEWARNING$' --critical-usage='$_SERVICECRITICAL$' --warning-usage-prct='$_SERVICEWARNINGUSAGEPRCT$' --critical-usage-prct='$_SERVICECRITICALUSAGEPRCT$' $_SERVICEEXTRAOPTIONS$ +} + +define command { + command_name OS-Linux-SNMP-Disk-Name + command_line $CENTREONPLUGINS$/centreon_linux_snmp.pl --plugin=os::linux::snmp::plugin --mode=storage --hostname=$HOSTADDRESS$ --snmp-version='$_HOSTSNMPVERSION$' --snmp-community='$_HOSTSNMPCOMMUNITY$' $_HOSTSNMPEXTRAOPTIONS$ --storage '$_SERVICEDISKNAME$' --name --display-transform-src='$_SERVICETRANSFORMSRC$' --display-transform-dst='$_SERVICETRANSFORMDST$' --warning-usage='$_SERVICEWARNING$' --critical-usage='$_SERVICECRITICAL$' $_SERVICEEXTRAOPTIONS$ +} + +define command { + command_name App-Monitoring-Centreon-Central-Broker-Stats + command_line $CENTREONPLUGINS$/centreon_centreon_central.pl --plugin=apps::centreon::local::plugin --hostname=$HOSTADDRESS$ --mode=broker-stats --broker-stats-file='$_HOSTSQLSTATSFILE$' --broker-stats-file='$_HOSTRRDSTATSFILE$' --broker-stats-file='$_HOSTMODULESTATSFILE$' --filter-name='$_SERVICEFILTERNAME$' --warning-speed-events='$_SERVICEWARNINGSPEEDEVENTS$' --critical-speed-events='$_SERVICECRITICALSPEEDEVENTS$' --warning-queued-events='$_SERVICEWARNINGQUEUEDEVENTS$' --critical-queued-events='$_SERVICECRITICALQUEUEDEVENTS$' --warning-unacknowledged-events='$_SERVICEWARNINGUNACKNOWLEDGEDEVENTS$' --critical-unacknowledged-events='$_SERVICECRITICALUNACKNOWLEDGEDEVENTS$' --warning-status='$_SERVICEWARNINGSTATUS$' --critical-status='$_SERVICECRITICALSTATUS$' $_SERVICEEXTRAOPTIONS$ +} diff --git a/engine/tests/cfg_files/conf1/connectors.cfg b/engine/tests/cfg_files/conf1/connectors.cfg new file mode 100644 index 00000000000..f49cb9d4b58 --- /dev/null +++ b/engine/tests/cfg_files/conf1/connectors.cfg @@ -0,0 +1,26 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define connector { + connector_name Perl Connector + connector_line /usr/lib64/centreon-connector/centreon_connector_perl --log-file=/var/log/centreon-engine/connector-perl.log +} + +define connector { + connector_name SSH Connector + connector_line /usr/lib64/centreon-connector/centreon_connector_ssh --log-file=/var/log/centreon-engine/connector-ssh.log +} diff --git a/engine/tests/cfg_files/conf1/contactgroups.cfg b/engine/tests/cfg_files/conf1/contactgroups.cfg new file mode 100644 index 00000000000..fd3dce0e64e --- /dev/null +++ b/engine/tests/cfg_files/conf1/contactgroups.cfg @@ -0,0 +1,27 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define contactgroup { + contactgroup_name Guest + alias Guests Group +} + +define contactgroup { + contactgroup_name Supervisors + alias Centreon supervisors + members John_Doe +} diff --git a/engine/tests/cfg_files/conf1/contacts.cfg b/engine/tests/cfg_files/conf1/contacts.cfg new file mode 100644 index 00000000000..2c592c985de --- /dev/null +++ b/engine/tests/cfg_files/conf1/contacts.cfg @@ -0,0 +1,31 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define contact { + contact_name John_Doe + alias admin + email admin@admin.tld + host_notification_period 24x7 + service_notification_period 24x7 + host_notification_options d,u,r + service_notification_options w,u,c + register 1 + host_notifications_enabled 1 + service_notifications_enabled 1 + host_notification_commands host-notify-by-email + service_notification_commands host-notify-by-email +} diff --git a/engine/tests/cfg_files/conf1/dependencies.cfg b/engine/tests/cfg_files/conf1/dependencies.cfg new file mode 100644 index 00000000000..d130bf9a6ba --- /dev/null +++ b/engine/tests/cfg_files/conf1/dependencies.cfg @@ -0,0 +1,56 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define hostdependency { + ;dependency_name hostdep1 + execution_failure_criteria o,d + notification_failure_criteria u,p + inherits_parent 1 + dependent_host_name Centreon-central_2,Centreon-central_3 + host_name Centreon-central,Centreon-central_1 +} + +define servicedependency { + ;dependency_name servicedep1 + execution_failure_criteria o,u + notification_failure_criteria w,c + inherits_parent 1 + dependent_host_name Centreon-central + host_name Centreon-central + dependent_service_description Connections-Number + service_description Cpu +} + +define servicedependency { + ;dependency_name servicedep1 + execution_failure_criteria o,u + notification_failure_criteria w,c + inherits_parent 1 + dependent_host_name Centreon-central + host_name Centreon-central + dependent_service_description Connections-Number + service_description Connection-Time +} + +define hostdependency { + ;dependency_name hostdep2 + execution_failure_criteria o,d + notification_failure_criteria d,u + inherits_parent 1 + dependent_hostgroup_name Centreon_platform + hostgroup_name hg1 +} diff --git a/engine/tests/cfg_files/conf1/escalations.cfg b/engine/tests/cfg_files/conf1/escalations.cfg new file mode 100644 index 00000000000..bcca495bbb2 --- /dev/null +++ b/engine/tests/cfg_files/conf1/escalations.cfg @@ -0,0 +1,128 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define hostescalation { + ;escalation_name hostescalation + first_notification 5 + last_notification 10 + notification_interval 5 + escalation_period nonworkhours + escalation_options d,u + hostgroup_name hg1 + contact_groups Supervisors +} + +define hostescalation { + ;escalation_name mixedescalation + first_notification 2 + last_notification 10 + notification_interval 5 + escalation_period nonworkhours + escalation_options d,u,r + hostgroup_name hg1 + contact_groups Supervisors +} + +define hostescalation { + ;escalation_name hostescalation + first_notification 5 + last_notification 10 + notification_interval 5 + escalation_period nonworkhours + escalation_options d,u + host_name Centreon-central,Centreon-central_1 + contact_groups Supervisors +} + +define hostescalation { + ;escalation_name mixedescalation + first_notification 2 + last_notification 10 + notification_interval 5 + escalation_period nonworkhours + escalation_options d,u,r + host_name Centreon-central_3,Centreon-central_10 + contact_groups Supervisors +} + +define serviceescalation { + ;escalation_name hostescalation + first_notification 5 + last_notification 10 + notification_interval 5 + escalation_period nonworkhours + host_name Centreon-central + service_description Cpu + contact_groups Supervisors +} + +define serviceescalation { + ;escalation_name hostescalation + first_notification 5 + last_notification 10 + notification_interval 5 + escalation_period nonworkhours + host_name Centreon-central + service_description Database-Size + contact_groups Supervisors +} + +define serviceescalation { + ;escalation_name mixedescalation + first_notification 2 + last_notification 10 + notification_interval 5 + escalation_period nonworkhours + escalation_options w,u,c + host_name Centreon-central + service_description Cpu + contact_groups Supervisors +} + +define serviceescalation { + ;escalation_name mixedescalation + first_notification 2 + last_notification 10 + notification_interval 5 + escalation_period nonworkhours + escalation_options w,u,c + host_name Centreon-central + service_description Connection-Time + contact_groups Supervisors +} + +define serviceescalation { + ;escalation_name serviceescalation + first_notification 4 + last_notification 10 + notification_interval 5 + escalation_period nonworkhours + escalation_options w,u,c + servicegroup_name Database-MySQL + contact_groups Supervisors +} + +define serviceescalation { + ;escalation_name mixedescalation + first_notification 2 + last_notification 10 + notification_interval 5 + escalation_period nonworkhours + escalation_options w,u,c + servicegroup_name Database-MySQL + contact_groups Supervisors +} diff --git a/engine/tests/cfg_files/conf1/hostTemplates.cfg b/engine/tests/cfg_files/conf1/hostTemplates.cfg new file mode 100644 index 00000000000..b6c9d770fde --- /dev/null +++ b/engine/tests/cfg_files/conf1/hostTemplates.cfg @@ -0,0 +1,72 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define host { + name generic-active-host + alias generic-active-host + check_command base_host_alive + check_period 24x7 + max_check_attempts 3 + check_interval 5 + register 0 + active_checks_enabled 1 + passive_checks_enabled 0 + _SNMPCOMMUNITY public + _SNMPVERSION 2c +} + +define host { + name template_for_test + alias Template to check CV + register 0 + _CUSTOM_CV base_value +} + +define host { + name generic-active-host-custom + alias generic-active-host + register 0 + use generic-active-host +} + +define host { + name OS-Linux-SNMP + alias Template to check Linux server using SNMP protocol + register 0 + icon_image ppm/operatingsystems-linux-snmp-linux-128.png + icon_id 1 + use generic-active-host-custom +} + +define host { + name OS-Linux-SNMP-custom + alias Template to check Linux server using SNMP protocol + register 0 + use OS-Linux-SNMP +} + +define host { + name App-Monitoring-Centreon-Central + alias Template to check Centreon Central Server + register 0 + icon_image ppm/applications-monitoring-centreon-central-centreon-128-2.png + icon_id 2 + use generic-active-host-custom,OS-Linux-SNMP-custom + _SQLSTATSFILE /var/lib/centreon-broker/central-broker-master-stats.json + _RRDSTATSFILE /var/lib/centreon-broker/central-rrd-master-stats.json + _MODULESTATSFILE /var/lib/centreon-engine/central-module-master-stats.json +} diff --git a/engine/tests/cfg_files/conf1/hostgroups.cfg b/engine/tests/cfg_files/conf1/hostgroups.cfg new file mode 100644 index 00000000000..255272f2438 --- /dev/null +++ b/engine/tests/cfg_files/conf1/hostgroups.cfg @@ -0,0 +1,31 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define hostgroup { + hostgroup_id 3 + hostgroup_name hg1 + alias hg1 + notes note_hg1 + members Centreon-central_2,Centreon-central_3,Centreon-central_4 +} + +define hostgroup { + hostgroup_id 2 + hostgroup_name Centreon_platform + alias Centreon_platform + members Centreon-central_5,Centreon-central_6,Centreon-central_7,Centreon-central_8,Centreon-central_9 +} diff --git a/engine/tests/cfg_files/conf1/hosts.cfg b/engine/tests/cfg_files/conf1/hosts.cfg new file mode 100644 index 00000000000..a0237f4b9d5 --- /dev/null +++ b/engine/tests/cfg_files/conf1/hosts.cfg @@ -0,0 +1,161 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define host { + host_name Centreon-central + alias Centreon-central + address localhost + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Central,template_for_test + _MYSQLPASSWORD centreon + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 30 + _CUSTOM_CV custom_value +} + +define host { + host_name Centreon-central_1 + alias Centreon-central + address 127.0.0.2 + register 1 + use App-Monitoring-Centreon-Central + _MYSQLPASSWORD centreon + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 31 +} + +define host { + host_name Centreon-central_2 + alias Centreon-central + address 127.0.0.4 + register 1 + use App-Monitoring-Centreon-Central + group_tags 3 + _MYSQLPASSWORD centreon + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 32 +} + +define host { + host_name Centreon-central_3 + alias Centreon-central + address 127.0.0.5 + register 1 + use App-Monitoring-Centreon-Central + group_tags 3 + _MYSQLPASSWORD centreon + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 33 +} + +define host { + host_name Centreon-central_4 + alias Centreon-central + address localhost + register 1 + use App-Monitoring-Centreon-Central + group_tags 3 + _SNMPEXTRAOPTIONS + _MYSQLPASSWORD centreon + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 34 +} + +define host { + host_name Centreon-central_5 + alias Centreon-central + address localhost + register 1 + use App-Monitoring-Centreon-Central + group_tags 2 + _SNMPEXTRAOPTIONS + _MYSQLPASSWORD centreon + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 35 +} + +define host { + host_name Centreon-central_6 + alias Centreon-central + address 127.0.0.6 + register 1 + use App-Monitoring-Centreon-Central + group_tags 2 + _MYSQLPASSWORD centreon + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 36 +} + +define host { + host_name Centreon-central_7 + alias Centreon-central + address 127.0.0.8 + register 1 + use App-Monitoring-Centreon-Central + group_tags 2 + _MYSQLPASSWORD centreon + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 37 +} + +define host { + host_name Centreon-central_8 + alias Centreon-central + address 127.0.0.8 + register 1 + use App-Monitoring-Centreon-Central + group_tags 2 + _MYSQLPASSWORD centreon + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 38 +} + +define host { + host_name Centreon-central_9 + alias Centreon-central + address 127.0.0.9 + register 1 + use App-Monitoring-Centreon-Central + group_tags 2 + _MYSQLPASSWORD centreon + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 39 +} + +define host { + host_name Centreon-central_10 + alias Centreon-central + address 127.0.0.3 + register 1 + use App-Monitoring-Centreon-Central + _MYSQLPASSWORD centreon + _SNMPCOMMUNITY public + _SNMPVERSION 2c + _HOST_ID 40 +} diff --git a/engine/tests/cfg_files/conf1/meta_commands.cfg b/engine/tests/cfg_files/conf1/meta_commands.cfg new file mode 100644 index 00000000000..6d764fbcabc --- /dev/null +++ b/engine/tests/cfg_files/conf1/meta_commands.cfg @@ -0,0 +1,16 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### diff --git a/engine/tests/cfg_files/conf1/meta_host.cfg b/engine/tests/cfg_files/conf1/meta_host.cfg new file mode 100644 index 00000000000..6d764fbcabc --- /dev/null +++ b/engine/tests/cfg_files/conf1/meta_host.cfg @@ -0,0 +1,16 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### diff --git a/engine/tests/cfg_files/conf1/meta_services.cfg b/engine/tests/cfg_files/conf1/meta_services.cfg new file mode 100644 index 00000000000..6d764fbcabc --- /dev/null +++ b/engine/tests/cfg_files/conf1/meta_services.cfg @@ -0,0 +1,16 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### diff --git a/engine/tests/cfg_files/conf1/meta_timeperiod.cfg b/engine/tests/cfg_files/conf1/meta_timeperiod.cfg new file mode 100644 index 00000000000..6d764fbcabc --- /dev/null +++ b/engine/tests/cfg_files/conf1/meta_timeperiod.cfg @@ -0,0 +1,16 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### diff --git a/engine/tests/cfg_files/conf1/resource.cfg b/engine/tests/cfg_files/conf1/resource.cfg new file mode 100644 index 00000000000..0e5cc4a1825 --- /dev/null +++ b/engine/tests/cfg_files/conf1/resource.cfg @@ -0,0 +1,18 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### +$USER1$=/usr/lib64/nagios/plugins +$CENTREONPLUGINS$=/usr/lib/centreon/plugins/ diff --git a/engine/tests/cfg_files/conf1/serviceTemplates.cfg b/engine/tests/cfg_files/conf1/serviceTemplates.cfg new file mode 100644 index 00000000000..536ea94421a --- /dev/null +++ b/engine/tests/cfg_files/conf1/serviceTemplates.cfg @@ -0,0 +1,478 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define service { + service_description generic-active-service + name generic-active-service + check_period 24x7 + max_check_attempts 3 + check_interval 5 + retry_interval 1 + register 0 + is_volatile 0 + active_checks_enabled 1 + passive_checks_enabled 0 +} + +define service { + service_description generic-active-service + name generic-active-service-custom + register 0 + use generic-active-service +} + +define service { + service_description Process-Generic + name OS-Linux-Process-Generic-SNMP + check_command OS-Linux-SNMP-Process-Generic + max_check_attempts 3 + check_interval 5 + retry_interval 1 + register 0 + use generic-active-service-custom + _CRITICAL 1: + _EXTRAOPTIONS + _PROCESSARGS + _PROCESSNAME + _PROCESSPATH + _WARNING +} + +define service { + service_description Process-Generic + name OS-Linux-Process-Generic-SNMP-custom + register 0 + use OS-Linux-Process-Generic-SNMP +} + +define service { + service_description proc-sshd + name App-Monitoring-Centreon-Process-sshd + register 0 + use OS-Linux-Process-Generic-SNMP-custom + _PROCESSNAME sshd +} + +define service { + service_description proc-sshd + name App-Monitoring-Centreon-Process-sshd-custom + register 0 + use App-Monitoring-Centreon-Process-sshd +} + +define service { + service_description proc-httpd + name App-Monitoring-Centreon-Process-httpd + register 0 + use OS-Linux-Process-Generic-SNMP-custom + _PROCESSNAME httpd|apache2 +} + +define service { + service_description proc-httpd + name App-Monitoring-Centreon-Process-httpd-custom + register 0 + use App-Monitoring-Centreon-Process-httpd +} + +define service { + service_description proc-crond + name App-Monitoring-Centreon-Process-crond + register 0 + use OS-Linux-Process-Generic-SNMP-custom + _PROCESSNAME crond|cron +} + +define service { + service_description proc-crond + name App-Monitoring-Centreon-Process-crond-custom + register 0 + use App-Monitoring-Centreon-Process-crond +} + +define service { + service_description proc-centengine + name App-Monitoring-Centreon-Process-centengine + register 0 + use OS-Linux-Process-Generic-SNMP-custom + _PROCESSNAME centengine + _PROCESSPATH /usr/sbin/ + _CRITICAL 1:1 +} + +define service { + service_description proc-centengine + name App-Monitoring-Centreon-Process-centengine-custom + register 0 + use App-Monitoring-Centreon-Process-centengine +} + +define service { + service_description proc-centcore + name App-Monitoring-Centreon-Process-centcore + register 0 + use OS-Linux-Process-Generic-SNMP-custom + _PROCESSNAME centcore + _CRITICAL 1:1 +} + +define service { + service_description proc-centcore + name App-Monitoring-Centreon-Process-centcore-custom + register 0 + use App-Monitoring-Centreon-Process-centcore +} + +define service { + service_description proc-broker-sql + name App-Monitoring-Centreon-Process-broker-sql + register 0 + use OS-Linux-Process-Generic-SNMP-custom + _PROCESSNAME cbd + _PROCESSARGS '/etc/centreon-broker/central-broker(.xml|.json)' + _CRITICAL 1:1 + _EXTRAOPTIONS --cpu --memory +} + +define service { + service_description proc-broker-sql + name App-Monitoring-Centreon-Process-broker-sql-custom + register 0 + use App-Monitoring-Centreon-Process-broker-sql +} + +define service { + service_description proc-broker-rrd + name App-Monitoring-Centreon-Process-broker-rrd + register 0 + use OS-Linux-Process-Generic-SNMP-custom + _PROCESSNAME cbd + _PROCESSARGS '/etc/centreon-broker/central-rrd(.xml|.json)' + _CRITICAL 1:1 + _EXTRAOPTIONS --cpu --memory +} + +define service { + service_description proc-broker-rrd + name App-Monitoring-Centreon-Process-broker-rrd-custom + register 0 + use App-Monitoring-Centreon-Process-broker-rrd +} + +define service { + service_description Ping + name Base-Ping-LAN + check_command base_centreon_ping + register 0 + use generic-active-service-custom + _PACKETNUMBER 5 + _WARNING 200,20% + _CRITICAL 400,50% +} + +define service { + service_description Ping + name Base-Ping-LAN-custom + register 0 + use Base-Ping-LAN +} + +define service { + service_description Swap + name OS-Linux-Swap-SNMP + check_command OS-Linux-SNMP-Swap + max_check_attempts 3 + check_interval 15 + retry_interval 1 + register 0 + use generic-active-service-custom + _CRITICAL 30 + _EXTRAOPTIONS + _WARNING 10 +} + +define service { + service_description Swap + name OS-Linux-Swap-SNMP-custom + register 0 + use OS-Linux-Swap-SNMP +} + +define service { + service_description Memory + name OS-Linux-Memory-SNMP + check_command OS-Linux-SNMP-Memory + max_check_attempts 3 + check_interval 15 + retry_interval 1 + register 0 + use generic-active-service-custom + _CRITICAL 90 + _EXTRAOPTIONS + _WARNING 80 +} + +define service { + service_description Memory + name OS-Linux-Memory-SNMP-custom + register 0 + use OS-Linux-Memory-SNMP +} + +define service { + service_description Load + name OS-Linux-Load-SNMP + check_command OS-Linux-SNMP-Load + max_check_attempts 3 + check_interval 5 + retry_interval 1 + register 0 + use generic-active-service-custom + _CRITICAL 6,5,4 + _EXTRAOPTIONS + _WARNING 4,3,2 +} + +define service { + service_description Load + name OS-Linux-Load-SNMP-custom + register 0 + use OS-Linux-Load-SNMP +} + +define service { + service_description Cpu + name OS-Linux-Cpu-SNMP + check_command OS-Linux-SNMP-Cpu + max_check_attempts 3 + check_interval 5 + retry_interval 1 + register 0 + use generic-active-service-custom + _WARNING 80 + _CRITICAL 90 +} + +define service { + service_description Cpu + name OS-Linux-Cpu-SNMP-custom + register 0 + use OS-Linux-Cpu-SNMP +} + +define service { + service_description Partitioning + name App-Centreon-MySQL-Partitioning + check_command App-Centreon-MySQL-Partitioning + max_check_attempts 5 + check_interval 1440 + retry_interval 5 + register 0 + active_checks_enabled 1 + passive_checks_enabled 0 + use generic-active-service-custom + _WARNING 7: + _CRITICAL 3: + _TABLENAME1 centreon_storage.data_bin + _TABLENAME2 centreon_storage.logs + _TABLENAME3 centreon_storage.log_archive_service + _TABLENAME4 centreon_storage.log_archive_host + _CRITICALITY_LEVEL 2 + _CRITICALITY_ID 6 + severity 6 +} + +define service { + service_description Partitioning + name App-Centreon-MySQL-Partitioning-custom + register 0 + use App-Centreon-MySQL-Partitioning +} + +define service { + service_description Slowqueries + name App-DB-MySQL-Slowqueries + check_command App-DB-MySQL + max_check_attempts 3 + check_interval 5 + retry_interval 1 + register 0 + use generic-active-service-custom + _MODE slow-queries + _WARNING 0 + _CRITICAL 0 +} + +define service { + service_description Slowqueries + name App-DB-MySQL-Slowqueries-custom + register 0 + use App-DB-MySQL-Slowqueries +} + +define service { + service_description Queries + name App-DB-MySQL-Queries + check_command App-DB-MySQL-Queries + register 0 + use generic-active-service-custom +} + +define service { + service_description Queries + name App-DB-MySQL-Queries-custom + register 0 + use App-DB-MySQL-Queries +} + +define service { + service_description Open-Files + name App-DB-MySQL-Open-Files + check_command App-DB-MySQL + max_check_attempts 3 + check_interval 15 + retry_interval 1 + register 0 + use generic-active-service-custom + _MODE open-files + _WARNING 80 + _CRITICAL 95 +} + +define service { + service_description Open-Files + name App-DB-MySQL-Open-Files-custom + register 0 + use App-DB-MySQL-Open-Files +} + +define service { + service_description Myisam-Keycache + name App-DB-MySQL-Myisam-Keycache + check_command App-DB-MySQL + max_check_attempts 3 + check_interval 5 + retry_interval 1 + register 0 + use generic-active-service-custom + _MODE myisam-keycache-hitrate +} + +define service { + service_description Myisam-Keycache + name App-DB-MySQL-Myisam-Keycache-custom + register 0 + use App-DB-MySQL-Myisam-Keycache +} + +define service { + service_description Database-Size + name App-DB-MySQL-Database-Size + check_command App-DB-MySQL-Database-Size + register 0 + use generic-active-service-custom + _FILTERDATABASE ^(?!(information_schema|performance_schema|test)) + _FILTERPERFDATA database + _EXTRAOPTIONS --verbose +} + +define service { + service_description Database-Size + name App-DB-MySQL-Database-Size-custom + register 0 + use App-DB-MySQL-Database-Size +} + +define service { + service_description Connections-Number + name App-DB-MySQL-Connections-Number + check_command App-DB-MySQL-Threads-Connected + register 0 + use generic-active-service-custom +} + +define service { + service_description Connections-Number + name App-DB-MySQL-Connections-Number-custom + register 0 + use App-DB-MySQL-Connections-Number +} + +define service { + service_description Connection-Time + name App-DB-MySQL-Connection-Time + check_command App-DB-MySQL + max_check_attempts 3 + check_interval 5 + retry_interval 1 + register 0 + use generic-active-service-custom + _MODE connection-time + _WARNING 1000 + _CRITICAL 5000 + _CRITICALITY_LEVEL 2 + _CRITICALITY_ID 6 + severity 6 +} + +define service { + service_description Connection-Time + name App-DB-MySQL-Connection-Time-custom + register 0 + use App-DB-MySQL-Connection-Time +} + +define service { + service_description Disk-Generic-Name + name OS-Linux-Disk-Generic-Name-SNMP + check_command OS-Linux-SNMP-Disk-Name + max_check_attempts 3 + check_interval 30 + retry_interval 1 + register 0 + use generic-active-service-custom + _WARNING 80 + _CRITICAL 90 + _EXTRAOPTIONS --filter-perfdata='storage.space|used|free' +} + +define service { + service_description Disk-Generic-Name + name OS-Linux-Disk-Generic-Name-SNMP-custom + register 0 + use OS-Linux-Disk-Generic-Name-SNMP +} + +define service { + service_description Broker-Stats + name App-Monitoring-Centreon-Broker-Stats-Central + check_command App-Monitoring-Centreon-Central-Broker-Stats + max_check_attempts 3 + check_interval 5 + retry_interval 1 + register 0 + use generic-active-service-custom + _CRITICALSTATUS %{type} eq "output" and %{queue_file_enabled} =~ /true|yes/i + _EXTRAOPTIONS --verbose +} + +define service { + service_description Broker-Stats + name App-Monitoring-Centreon-Broker-Stats-Central-custom + register 0 + use App-Monitoring-Centreon-Broker-Stats-Central +} diff --git a/engine/tests/cfg_files/conf1/servicegroups.cfg b/engine/tests/cfg_files/conf1/servicegroups.cfg new file mode 100644 index 00000000000..6fa39a187df --- /dev/null +++ b/engine/tests/cfg_files/conf1/servicegroups.cfg @@ -0,0 +1,23 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define servicegroup { + servicegroup_id 2 + servicegroup_name Database-MySQL + alias Database-MySQL + members Centreon-central,proc-sshd,Centreon-central,Partitioning,Centreon-central,Slowqueries,Centreon-central,Queries,Centreon-central,Myisam-Keycache,Centreon-central,Connections-Number,Centreon-central,Connection-Time,Centreon-central_1,Partitioning,Centreon-central_1,Slowqueries,Centreon-central_1,Queries,Centreon-central_1,Myisam-Keycache,Centreon-central_1,Connections-Number,Centreon-central_1,Connection-Time,Centreon-central_2,Partitioning,Centreon-central_2,Slowqueries,Centreon-central_2,Queries,Centreon-central_2,Myisam-Keycache,Centreon-central_2,Connections-Number,Centreon-central_2,Connection-Time,Centreon-central_3,Partitioning,Centreon-central_3,Slowqueries,Centreon-central_3,Queries,Centreon-central_3,Myisam-Keycache,Centreon-central_3,Connections-Number,Centreon-central_3,Connection-Time,Centreon-central_4,Partitioning,Centreon-central_4,Slowqueries,Centreon-central_4,Queries,Centreon-central_4,Myisam-Keycache,Centreon-central_4,Connections-Number,Centreon-central_4,Connection-Time,Centreon-central_5,Partitioning,Centreon-central_5,Slowqueries,Centreon-central_5,Queries,Centreon-central_5,Myisam-Keycache,Centreon-central_5,Connections-Number,Centreon-central_5,Connection-Time,Centreon-central_6,Partitioning,Centreon-central_6,Slowqueries,Centreon-central_6,Queries,Centreon-central_6,Myisam-Keycache,Centreon-central_6,Connections-Number,Centreon-central_6,Connection-Time,Centreon-central_7,Partitioning,Centreon-central_7,Slowqueries,Centreon-central_7,Queries,Centreon-central_7,Myisam-Keycache,Centreon-central_7,Connections-Number,Centreon-central_7,Connection-Time,Centreon-central_8,Partitioning,Centreon-central_8,Slowqueries,Centreon-central_8,Queries,Centreon-central_8,Myisam-Keycache,Centreon-central_8,Connections-Number,Centreon-central_8,Connection-Time,Centreon-central_9,Partitioning,Centreon-central_9,Slowqueries,Centreon-central_9,Queries,Centreon-central_9,Myisam-Keycache,Centreon-central_9,Connections-Number,Centreon-central_9,Connection-Time,Centreon-central_10,Partitioning,Centreon-central_10,Slowqueries,Centreon-central_10,Queries,Centreon-central_10,Myisam-Keycache,Centreon-central_10,Connections-Number,Centreon-central_10,Connection-Time +} diff --git a/engine/tests/cfg_files/conf1/services.cfg b/engine/tests/cfg_files/conf1/services.cfg new file mode 100644 index 00000000000..f9f13e8c3ac --- /dev/null +++ b/engine/tests/cfg_files/conf1/services.cfg @@ -0,0 +1,3282 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define service { + host_name Centreon-central + service_description proc-sshd + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-sshd-custom + group_tags 2 + _SERVICE_ID 196 +} + +define service { + host_name Centreon-central + service_description proc-httpd + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-httpd-custom + _SERVICE_ID 197 +} + +define service { + host_name Centreon-central + service_description proc-crond + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-crond-custom + _SERVICE_ID 198 +} + +define service { + host_name Centreon-central + service_description proc-centengine + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _SERVICE_ID 199 +} + +define service { + host_name Centreon-central + service_description proc-gorgone + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-centcore-custom + _CRITICAL 1: + _PROCESSNAME gorgone-.* + _SERVICE_ID 200 +} + +define service { + host_name Centreon-central + service_description proc-broker-sql + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-broker-sql-custom + _PROCESSARGS /etc/centreon-broker/central-broker.json + _SERVICE_ID 201 +} + +define service { + host_name Centreon-central + service_description proc-broker-rrd + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-broker-rrd-custom + _PROCESSARGS /etc/centreon-broker/central-rrd.json + _SERVICE_ID 202 +} + +define service { + host_name Centreon-central + service_description Ping + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use Base-Ping-LAN-custom + _SERVICE_ID 203 +} + +define service { + host_name Centreon-central + service_description Swap + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use OS-Linux-Swap-SNMP-custom + _SERVICE_ID 204 +} + +define service { + host_name Centreon-central + service_description Memory + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use OS-Linux-Memory-SNMP-custom + _EXTRAOPTIONS '--redhat' + _SERVICE_ID 205 +} + +define service { + host_name Centreon-central + service_description Load + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use OS-Linux-Load-SNMP-custom + _SERVICE_ID 206 +} + +define service { + host_name Centreon-central + service_description Cpu + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use OS-Linux-Cpu-SNMP-custom + _SERVICE_ID 207 +} + +define service { + host_name Centreon-central + service_description Partitioning + contacts John_Doe + contact_groups Guest,Supervisors + notification_interval 1440 + register 1 + use App-Centreon-MySQL-Partitioning-custom + group_tags 2 + _SERVICE_ID 208 +} + +define service { + host_name Centreon-central + service_description Slowqueries + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-DB-MySQL-Slowqueries-custom + group_tags 2 + _SERVICE_ID 209 +} + +define service { + host_name Centreon-central + service_description Queries + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-DB-MySQL-Queries-custom + group_tags 2 + _SERVICE_ID 210 +} + +define service { + host_name Centreon-central + service_description Open-Files + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-DB-MySQL-Open-Files-custom + _SERVICE_ID 211 +} + +define service { + host_name Centreon-central + service_description Myisam-Keycache + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-DB-MySQL-Myisam-Keycache-custom + group_tags 2 + _SERVICE_ID 212 +} + +define service { + host_name Centreon-central + service_description Database-Size + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-DB-MySQL-Database-Size-custom + _SERVICE_ID 213 +} + +define service { + host_name Centreon-central + service_description Connections-Number + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-DB-MySQL-Connections-Number-custom + group_tags 2 + _SERVICE_ID 214 +} + +define service { + host_name Centreon-central + service_description Connection-Time + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-DB-MySQL-Connection-Time-custom + group_tags 2 + _SERVICE_ID 215 +} + +define service { + host_name Centreon-central + service_description Disk-/ + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME / + _SERVICE_ID 216 +} + +define service { + host_name Centreon-central + service_description Disk-/var/lib/mysql + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/mysql + _SERVICE_ID 217 +} + +define service { + host_name Centreon-central + service_description Disk-/var/cache/centreon/backup + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/cache/centreon/backup + _SERVICE_ID 218 +} + +define service { + host_name Centreon-central + service_description Disk-/var/lib/centreon-broker + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon-broker + _SERVICE_ID 219 +} + +define service { + host_name Centreon-central + service_description Disk-/var/log + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/log + _SERVICE_ID 220 +} + +define service { + host_name Centreon-central + service_description Disk-/var/lib/centreon + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon + _SERVICE_ID 221 +} + +define service { + host_name Centreon-central + service_description proc-centreontrapd + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME centreontrapd + _PROCESSPATH + _SERVICE_ID 222 +} + +define service { + host_name Centreon-central + service_description proc-snmptrapd + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmptrapd + _SERVICE_ID 223 +} + +define service { + host_name Centreon-central + service_description proc-rsyslogd + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME rsyslogd + _SERVICE_ID 224 +} + +define service { + host_name Centreon-central + service_description proc-snmpd + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmpd + _SERVICE_ID 225 +} + +define service { + host_name Centreon-central + service_description proc-broker-watchdog + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME cbwd + _SERVICE_ID 226 +} + +define service { + host_name Centreon-central + service_description proc-postfix + contacts John_Doe + contact_groups Guest,Supervisors + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME master + _PROCESSPATH /usr/libexec/postfix/master + _SERVICE_ID 227 +} + +define service { + host_name Centreon-central + service_description Broker-Stats + contacts John_Doe + contact_groups Guest + notification_period 24x7 + notification_interval 5 + notification_options w,u,c + first_notification_delay 0 + recovery_notification_delay 0 + register 1 + notifications_enabled 1 + use App-Monitoring-Centreon-Broker-Stats-Central-custom + _SERVICE_ID 228 + _CRITICALITY_LEVEL 1 + _CRITICALITY_ID 5 + severity 5 +} + +define service { + host_name Centreon-central_1 + service_description proc-sshd + register 1 + use App-Monitoring-Centreon-Process-sshd-custom + _SERVICE_ID 229 +} + +define service { + host_name Centreon-central_1 + service_description proc-httpd + register 1 + use App-Monitoring-Centreon-Process-httpd-custom + _SERVICE_ID 230 +} + +define service { + host_name Centreon-central_1 + service_description proc-crond + register 1 + use App-Monitoring-Centreon-Process-crond-custom + _SERVICE_ID 231 +} + +define service { + host_name Centreon-central_1 + service_description proc-centengine + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _SERVICE_ID 232 +} + +define service { + host_name Centreon-central_1 + service_description proc-gorgone + register 1 + use App-Monitoring-Centreon-Process-centcore-custom + _CRITICAL 1: + _PROCESSNAME gorgone-.* + _SERVICE_ID 233 +} + +define service { + host_name Centreon-central_1 + service_description proc-broker-sql + register 1 + use App-Monitoring-Centreon-Process-broker-sql-custom + _PROCESSARGS /etc/centreon-broker/central-broker.json + _SERVICE_ID 234 +} + +define service { + host_name Centreon-central_1 + service_description proc-broker-rrd + register 1 + use App-Monitoring-Centreon-Process-broker-rrd-custom + _PROCESSARGS /etc/centreon-broker/central-rrd.json + _SERVICE_ID 235 +} + +define service { + host_name Centreon-central_1 + service_description Ping + register 1 + use Base-Ping-LAN-custom + _SERVICE_ID 236 +} + +define service { + host_name Centreon-central_1 + service_description Swap + register 1 + use OS-Linux-Swap-SNMP-custom + _SERVICE_ID 237 +} + +define service { + host_name Centreon-central_1 + service_description Memory + register 1 + use OS-Linux-Memory-SNMP-custom + _EXTRAOPTIONS '--redhat' + _SERVICE_ID 238 +} + +define service { + host_name Centreon-central_1 + service_description Load + register 1 + use OS-Linux-Load-SNMP-custom + _SERVICE_ID 239 +} + +define service { + host_name Centreon-central_1 + service_description Cpu + register 1 + use OS-Linux-Cpu-SNMP-custom + _SERVICE_ID 240 +} + +define service { + host_name Centreon-central_1 + service_description Partitioning + notification_interval 1440 + register 1 + use App-Centreon-MySQL-Partitioning-custom + group_tags 2 + _SERVICE_ID 241 +} + +define service { + host_name Centreon-central_1 + service_description Slowqueries + register 1 + use App-DB-MySQL-Slowqueries-custom + group_tags 2 + _SERVICE_ID 242 +} + +define service { + host_name Centreon-central_1 + service_description Queries + register 1 + use App-DB-MySQL-Queries-custom + group_tags 2 + _SERVICE_ID 243 +} + +define service { + host_name Centreon-central_1 + service_description Open-Files + register 1 + use App-DB-MySQL-Open-Files-custom + _SERVICE_ID 244 +} + +define service { + host_name Centreon-central_1 + service_description Myisam-Keycache + register 1 + use App-DB-MySQL-Myisam-Keycache-custom + group_tags 2 + _SERVICE_ID 245 +} + +define service { + host_name Centreon-central_1 + service_description Database-Size + register 1 + use App-DB-MySQL-Database-Size-custom + _SERVICE_ID 246 +} + +define service { + host_name Centreon-central_1 + service_description Connections-Number + register 1 + use App-DB-MySQL-Connections-Number-custom + group_tags 2 + _SERVICE_ID 247 +} + +define service { + host_name Centreon-central_1 + service_description Connection-Time + register 1 + use App-DB-MySQL-Connection-Time-custom + group_tags 2 + _SERVICE_ID 248 +} + +define service { + host_name Centreon-central_1 + service_description Disk-/ + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME / + _SERVICE_ID 249 +} + +define service { + host_name Centreon-central_1 + service_description Disk-/var/lib/mysql + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/mysql + _SERVICE_ID 250 +} + +define service { + host_name Centreon-central_1 + service_description Disk-/var/cache/centreon/backup + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/cache/centreon/backup + _SERVICE_ID 251 +} + +define service { + host_name Centreon-central_1 + service_description Disk-/var/lib/centreon-broker + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon-broker + _SERVICE_ID 252 +} + +define service { + host_name Centreon-central_1 + service_description Disk-/var/log + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/log + _SERVICE_ID 253 +} + +define service { + host_name Centreon-central_1 + service_description Disk-/var/lib/centreon + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon + _SERVICE_ID 254 +} + +define service { + host_name Centreon-central_1 + service_description proc-centreontrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME centreontrapd + _PROCESSPATH + _SERVICE_ID 255 +} + +define service { + host_name Centreon-central_1 + service_description proc-snmptrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmptrapd + _SERVICE_ID 256 +} + +define service { + host_name Centreon-central_1 + service_description proc-rsyslogd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME rsyslogd + _SERVICE_ID 257 +} + +define service { + host_name Centreon-central_1 + service_description proc-snmpd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmpd + _SERVICE_ID 258 +} + +define service { + host_name Centreon-central_1 + service_description proc-broker-watchdog + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME cbwd + _SERVICE_ID 259 +} + +define service { + host_name Centreon-central_1 + service_description proc-postfix + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME master + _PROCESSPATH /usr/libexec/postfix/master + _SERVICE_ID 260 +} + +define service { + host_name Centreon-central_1 + service_description Broker-Stats + register 1 + use App-Monitoring-Centreon-Broker-Stats-Central-custom + _SERVICE_ID 261 +} + +define service { + host_name Centreon-central_2 + service_description proc-sshd + register 1 + use App-Monitoring-Centreon-Process-sshd-custom + _SERVICE_ID 262 +} + +define service { + host_name Centreon-central_2 + service_description proc-httpd + register 1 + use App-Monitoring-Centreon-Process-httpd-custom + _SERVICE_ID 263 +} + +define service { + host_name Centreon-central_2 + service_description proc-crond + register 1 + use App-Monitoring-Centreon-Process-crond-custom + _SERVICE_ID 264 +} + +define service { + host_name Centreon-central_2 + service_description proc-centengine + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _SERVICE_ID 265 +} + +define service { + host_name Centreon-central_2 + service_description proc-gorgone + register 1 + use App-Monitoring-Centreon-Process-centcore-custom + _CRITICAL 1: + _PROCESSNAME gorgone-.* + _SERVICE_ID 266 +} + +define service { + host_name Centreon-central_2 + service_description proc-broker-sql + register 1 + use App-Monitoring-Centreon-Process-broker-sql-custom + _PROCESSARGS /etc/centreon-broker/central-broker.json + _SERVICE_ID 267 +} + +define service { + host_name Centreon-central_2 + service_description proc-broker-rrd + register 1 + use App-Monitoring-Centreon-Process-broker-rrd-custom + _PROCESSARGS /etc/centreon-broker/central-rrd.json + _SERVICE_ID 268 +} + +define service { + host_name Centreon-central_2 + service_description Ping + register 1 + use Base-Ping-LAN-custom + _SERVICE_ID 269 +} + +define service { + host_name Centreon-central_2 + service_description Swap + register 1 + use OS-Linux-Swap-SNMP-custom + _SERVICE_ID 270 +} + +define service { + host_name Centreon-central_2 + service_description Memory + register 1 + use OS-Linux-Memory-SNMP-custom + _EXTRAOPTIONS '--redhat' + _SERVICE_ID 271 +} + +define service { + host_name Centreon-central_2 + service_description Load + register 1 + use OS-Linux-Load-SNMP-custom + _SERVICE_ID 272 +} + +define service { + host_name Centreon-central_2 + service_description Cpu + register 1 + use OS-Linux-Cpu-SNMP-custom + _SERVICE_ID 273 +} + +define service { + host_name Centreon-central_2 + service_description Partitioning + notification_interval 1440 + register 1 + use App-Centreon-MySQL-Partitioning-custom + group_tags 2 + _SERVICE_ID 274 +} + +define service { + host_name Centreon-central_2 + service_description Slowqueries + register 1 + use App-DB-MySQL-Slowqueries-custom + group_tags 2 + _SERVICE_ID 275 +} + +define service { + host_name Centreon-central_2 + service_description Queries + register 1 + use App-DB-MySQL-Queries-custom + group_tags 2 + _SERVICE_ID 276 +} + +define service { + host_name Centreon-central_2 + service_description Open-Files + register 1 + use App-DB-MySQL-Open-Files-custom + _SERVICE_ID 277 +} + +define service { + host_name Centreon-central_2 + service_description Myisam-Keycache + register 1 + use App-DB-MySQL-Myisam-Keycache-custom + group_tags 2 + _SERVICE_ID 278 +} + +define service { + host_name Centreon-central_2 + service_description Database-Size + register 1 + use App-DB-MySQL-Database-Size-custom + _SERVICE_ID 279 +} + +define service { + host_name Centreon-central_2 + service_description Connections-Number + register 1 + use App-DB-MySQL-Connections-Number-custom + group_tags 2 + _SERVICE_ID 280 +} + +define service { + host_name Centreon-central_2 + service_description Connection-Time + register 1 + use App-DB-MySQL-Connection-Time-custom + group_tags 2 + _SERVICE_ID 281 +} + +define service { + host_name Centreon-central_2 + service_description Disk-/ + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME / + _SERVICE_ID 282 +} + +define service { + host_name Centreon-central_2 + service_description Disk-/var/lib/mysql + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/mysql + _SERVICE_ID 283 +} + +define service { + host_name Centreon-central_2 + service_description Disk-/var/cache/centreon/backup + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/cache/centreon/backup + _SERVICE_ID 284 +} + +define service { + host_name Centreon-central_2 + service_description Disk-/var/lib/centreon-broker + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon-broker + _SERVICE_ID 285 +} + +define service { + host_name Centreon-central_2 + service_description Disk-/var/log + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/log + _SERVICE_ID 286 +} + +define service { + host_name Centreon-central_2 + service_description Disk-/var/lib/centreon + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon + _SERVICE_ID 287 +} + +define service { + host_name Centreon-central_2 + service_description proc-centreontrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME centreontrapd + _PROCESSPATH + _SERVICE_ID 288 +} + +define service { + host_name Centreon-central_2 + service_description proc-snmptrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmptrapd + _SERVICE_ID 289 +} + +define service { + host_name Centreon-central_2 + service_description proc-rsyslogd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME rsyslogd + _SERVICE_ID 290 +} + +define service { + host_name Centreon-central_2 + service_description proc-snmpd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmpd + _SERVICE_ID 291 +} + +define service { + host_name Centreon-central_2 + service_description proc-broker-watchdog + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME cbwd + _SERVICE_ID 292 +} + +define service { + host_name Centreon-central_2 + service_description proc-postfix + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME master + _PROCESSPATH /usr/libexec/postfix/master + _SERVICE_ID 293 +} + +define service { + host_name Centreon-central_2 + service_description Broker-Stats + register 1 + use App-Monitoring-Centreon-Broker-Stats-Central-custom + _SERVICE_ID 294 +} + +define service { + host_name Centreon-central_3 + service_description proc-sshd + register 1 + use App-Monitoring-Centreon-Process-sshd-custom + _SERVICE_ID 295 +} + +define service { + host_name Centreon-central_3 + service_description proc-httpd + register 1 + use App-Monitoring-Centreon-Process-httpd-custom + _SERVICE_ID 296 +} + +define service { + host_name Centreon-central_3 + service_description proc-crond + register 1 + use App-Monitoring-Centreon-Process-crond-custom + _SERVICE_ID 297 +} + +define service { + host_name Centreon-central_3 + service_description proc-centengine + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _SERVICE_ID 298 +} + +define service { + host_name Centreon-central_3 + service_description proc-gorgone + register 1 + use App-Monitoring-Centreon-Process-centcore-custom + _CRITICAL 1: + _PROCESSNAME gorgone-.* + _SERVICE_ID 299 +} + +define service { + host_name Centreon-central_3 + service_description proc-broker-sql + register 1 + use App-Monitoring-Centreon-Process-broker-sql-custom + _PROCESSARGS /etc/centreon-broker/central-broker.json + _SERVICE_ID 300 +} + +define service { + host_name Centreon-central_3 + service_description proc-broker-rrd + register 1 + use App-Monitoring-Centreon-Process-broker-rrd-custom + _PROCESSARGS /etc/centreon-broker/central-rrd.json + _SERVICE_ID 301 +} + +define service { + host_name Centreon-central_3 + service_description Ping + register 1 + use Base-Ping-LAN-custom + _SERVICE_ID 302 +} + +define service { + host_name Centreon-central_3 + service_description Swap + register 1 + use OS-Linux-Swap-SNMP-custom + _SERVICE_ID 303 +} + +define service { + host_name Centreon-central_3 + service_description Memory + register 1 + use OS-Linux-Memory-SNMP-custom + _EXTRAOPTIONS '--redhat' + _SERVICE_ID 304 +} + +define service { + host_name Centreon-central_3 + service_description Load + register 1 + use OS-Linux-Load-SNMP-custom + _SERVICE_ID 305 +} + +define service { + host_name Centreon-central_3 + service_description Cpu + register 1 + use OS-Linux-Cpu-SNMP-custom + _SERVICE_ID 306 +} + +define service { + host_name Centreon-central_3 + service_description Partitioning + notification_interval 1440 + register 1 + use App-Centreon-MySQL-Partitioning-custom + group_tags 2 + _SERVICE_ID 307 +} + +define service { + host_name Centreon-central_3 + service_description Slowqueries + register 1 + use App-DB-MySQL-Slowqueries-custom + group_tags 2 + _SERVICE_ID 308 +} + +define service { + host_name Centreon-central_3 + service_description Queries + register 1 + use App-DB-MySQL-Queries-custom + group_tags 2 + _SERVICE_ID 309 +} + +define service { + host_name Centreon-central_3 + service_description Open-Files + register 1 + use App-DB-MySQL-Open-Files-custom + _SERVICE_ID 310 +} + +define service { + host_name Centreon-central_3 + service_description Myisam-Keycache + register 1 + use App-DB-MySQL-Myisam-Keycache-custom + group_tags 2 + _SERVICE_ID 311 +} + +define service { + host_name Centreon-central_3 + service_description Database-Size + register 1 + use App-DB-MySQL-Database-Size-custom + _SERVICE_ID 312 +} + +define service { + host_name Centreon-central_3 + service_description Connections-Number + register 1 + use App-DB-MySQL-Connections-Number-custom + group_tags 2 + _SERVICE_ID 313 +} + +define service { + host_name Centreon-central_3 + service_description Connection-Time + register 1 + use App-DB-MySQL-Connection-Time-custom + group_tags 2 + _SERVICE_ID 314 +} + +define service { + host_name Centreon-central_3 + service_description Disk-/ + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME / + _SERVICE_ID 315 +} + +define service { + host_name Centreon-central_3 + service_description Disk-/var/lib/mysql + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/mysql + _SERVICE_ID 316 +} + +define service { + host_name Centreon-central_3 + service_description Disk-/var/cache/centreon/backup + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/cache/centreon/backup + _SERVICE_ID 317 +} + +define service { + host_name Centreon-central_3 + service_description Disk-/var/lib/centreon-broker + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon-broker + _SERVICE_ID 318 +} + +define service { + host_name Centreon-central_3 + service_description Disk-/var/log + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/log + _SERVICE_ID 319 +} + +define service { + host_name Centreon-central_3 + service_description Disk-/var/lib/centreon + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon + _SERVICE_ID 320 +} + +define service { + host_name Centreon-central_3 + service_description proc-centreontrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME centreontrapd + _PROCESSPATH + _SERVICE_ID 321 +} + +define service { + host_name Centreon-central_3 + service_description proc-snmptrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmptrapd + _SERVICE_ID 322 +} + +define service { + host_name Centreon-central_3 + service_description proc-rsyslogd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME rsyslogd + _SERVICE_ID 323 +} + +define service { + host_name Centreon-central_3 + service_description proc-snmpd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmpd + _SERVICE_ID 324 +} + +define service { + host_name Centreon-central_3 + service_description proc-broker-watchdog + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME cbwd + _SERVICE_ID 325 +} + +define service { + host_name Centreon-central_3 + service_description proc-postfix + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME master + _PROCESSPATH /usr/libexec/postfix/master + _SERVICE_ID 326 +} + +define service { + host_name Centreon-central_3 + service_description Broker-Stats + register 1 + use App-Monitoring-Centreon-Broker-Stats-Central-custom + _SERVICE_ID 327 +} + +define service { + host_name Centreon-central_4 + service_description proc-sshd + register 1 + use App-Monitoring-Centreon-Process-sshd-custom + _SERVICE_ID 328 +} + +define service { + host_name Centreon-central_4 + service_description proc-httpd + register 1 + use App-Monitoring-Centreon-Process-httpd-custom + _SERVICE_ID 329 +} + +define service { + host_name Centreon-central_4 + service_description proc-crond + register 1 + use App-Monitoring-Centreon-Process-crond-custom + _SERVICE_ID 330 +} + +define service { + host_name Centreon-central_4 + service_description proc-centengine + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _SERVICE_ID 331 +} + +define service { + host_name Centreon-central_4 + service_description proc-gorgone + register 1 + use App-Monitoring-Centreon-Process-centcore-custom + _CRITICAL 1: + _PROCESSNAME gorgone-.* + _SERVICE_ID 332 +} + +define service { + host_name Centreon-central_4 + service_description proc-broker-sql + register 1 + use App-Monitoring-Centreon-Process-broker-sql-custom + _PROCESSARGS /etc/centreon-broker/central-broker.json + _SERVICE_ID 333 +} + +define service { + host_name Centreon-central_4 + service_description proc-broker-rrd + register 1 + use App-Monitoring-Centreon-Process-broker-rrd-custom + _PROCESSARGS /etc/centreon-broker/central-rrd.json + _SERVICE_ID 334 +} + +define service { + host_name Centreon-central_4 + service_description Ping + register 1 + use Base-Ping-LAN-custom + _SERVICE_ID 335 +} + +define service { + host_name Centreon-central_4 + service_description Swap + register 1 + use OS-Linux-Swap-SNMP-custom + _SERVICE_ID 336 +} + +define service { + host_name Centreon-central_4 + service_description Memory + register 1 + use OS-Linux-Memory-SNMP-custom + _EXTRAOPTIONS '--redhat' + _SERVICE_ID 337 +} + +define service { + host_name Centreon-central_4 + service_description Load + register 1 + use OS-Linux-Load-SNMP-custom + _SERVICE_ID 338 +} + +define service { + host_name Centreon-central_4 + service_description Cpu + register 1 + use OS-Linux-Cpu-SNMP-custom + _SERVICE_ID 339 +} + +define service { + host_name Centreon-central_4 + service_description Partitioning + notification_interval 1440 + register 1 + use App-Centreon-MySQL-Partitioning-custom + group_tags 2 + _SERVICE_ID 340 +} + +define service { + host_name Centreon-central_4 + service_description Slowqueries + register 1 + use App-DB-MySQL-Slowqueries-custom + group_tags 2 + _SERVICE_ID 341 +} + +define service { + host_name Centreon-central_4 + service_description Queries + register 1 + use App-DB-MySQL-Queries-custom + group_tags 2 + _SERVICE_ID 342 +} + +define service { + host_name Centreon-central_4 + service_description Open-Files + register 1 + use App-DB-MySQL-Open-Files-custom + _SERVICE_ID 343 +} + +define service { + host_name Centreon-central_4 + service_description Myisam-Keycache + register 1 + use App-DB-MySQL-Myisam-Keycache-custom + group_tags 2 + _SERVICE_ID 344 +} + +define service { + host_name Centreon-central_4 + service_description Database-Size + register 1 + use App-DB-MySQL-Database-Size-custom + _SERVICE_ID 345 +} + +define service { + host_name Centreon-central_4 + service_description Connections-Number + register 1 + use App-DB-MySQL-Connections-Number-custom + group_tags 2 + _SERVICE_ID 346 +} + +define service { + host_name Centreon-central_4 + service_description Connection-Time + register 1 + use App-DB-MySQL-Connection-Time-custom + group_tags 2 + _SERVICE_ID 347 +} + +define service { + host_name Centreon-central_4 + service_description Disk-/ + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME / + _SERVICE_ID 348 +} + +define service { + host_name Centreon-central_4 + service_description Disk-/var/lib/mysql + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/mysql + _SERVICE_ID 349 +} + +define service { + host_name Centreon-central_4 + service_description Disk-/var/cache/centreon/backup + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/cache/centreon/backup + _SERVICE_ID 350 +} + +define service { + host_name Centreon-central_4 + service_description Disk-/var/lib/centreon-broker + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon-broker + _SERVICE_ID 351 +} + +define service { + host_name Centreon-central_4 + service_description Disk-/var/log + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/log + _SERVICE_ID 352 +} + +define service { + host_name Centreon-central_4 + service_description Disk-/var/lib/centreon + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon + _SERVICE_ID 353 +} + +define service { + host_name Centreon-central_4 + service_description proc-centreontrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME centreontrapd + _PROCESSPATH + _SERVICE_ID 354 +} + +define service { + host_name Centreon-central_4 + service_description proc-snmptrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmptrapd + _SERVICE_ID 355 +} + +define service { + host_name Centreon-central_4 + service_description proc-rsyslogd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME rsyslogd + _SERVICE_ID 356 +} + +define service { + host_name Centreon-central_4 + service_description proc-snmpd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmpd + _SERVICE_ID 357 +} + +define service { + host_name Centreon-central_4 + service_description proc-broker-watchdog + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME cbwd + _SERVICE_ID 358 +} + +define service { + host_name Centreon-central_4 + service_description proc-postfix + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME master + _PROCESSPATH /usr/libexec/postfix/master + _SERVICE_ID 359 +} + +define service { + host_name Centreon-central_4 + service_description Broker-Stats + register 1 + use App-Monitoring-Centreon-Broker-Stats-Central-custom + _SERVICE_ID 360 +} + +define service { + host_name Centreon-central_5 + service_description proc-sshd + register 1 + use App-Monitoring-Centreon-Process-sshd-custom + _SERVICE_ID 361 +} + +define service { + host_name Centreon-central_5 + service_description proc-httpd + register 1 + use App-Monitoring-Centreon-Process-httpd-custom + _SERVICE_ID 362 +} + +define service { + host_name Centreon-central_5 + service_description proc-crond + register 1 + use App-Monitoring-Centreon-Process-crond-custom + _SERVICE_ID 363 +} + +define service { + host_name Centreon-central_5 + service_description proc-centengine + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _SERVICE_ID 364 +} + +define service { + host_name Centreon-central_5 + service_description proc-gorgone + register 1 + use App-Monitoring-Centreon-Process-centcore-custom + _CRITICAL 1: + _PROCESSNAME gorgone-.* + _SERVICE_ID 365 +} + +define service { + host_name Centreon-central_5 + service_description proc-broker-sql + register 1 + use App-Monitoring-Centreon-Process-broker-sql-custom + _PROCESSARGS /etc/centreon-broker/central-broker.json + _SERVICE_ID 366 +} + +define service { + host_name Centreon-central_5 + service_description proc-broker-rrd + register 1 + use App-Monitoring-Centreon-Process-broker-rrd-custom + _PROCESSARGS /etc/centreon-broker/central-rrd.json + _SERVICE_ID 367 +} + +define service { + host_name Centreon-central_5 + service_description Ping + register 1 + use Base-Ping-LAN-custom + _SERVICE_ID 368 +} + +define service { + host_name Centreon-central_5 + service_description Swap + register 1 + use OS-Linux-Swap-SNMP-custom + _SERVICE_ID 369 +} + +define service { + host_name Centreon-central_5 + service_description Memory + register 1 + use OS-Linux-Memory-SNMP-custom + _EXTRAOPTIONS '--redhat' + _SERVICE_ID 370 +} + +define service { + host_name Centreon-central_5 + service_description Load + register 1 + use OS-Linux-Load-SNMP-custom + _SERVICE_ID 371 +} + +define service { + host_name Centreon-central_5 + service_description Cpu + register 1 + use OS-Linux-Cpu-SNMP-custom + _SERVICE_ID 372 +} + +define service { + host_name Centreon-central_5 + service_description Partitioning + notification_interval 1440 + register 1 + use App-Centreon-MySQL-Partitioning-custom + group_tags 2 + _SERVICE_ID 373 +} + +define service { + host_name Centreon-central_5 + service_description Slowqueries + register 1 + use App-DB-MySQL-Slowqueries-custom + group_tags 2 + _SERVICE_ID 374 +} + +define service { + host_name Centreon-central_5 + service_description Queries + register 1 + use App-DB-MySQL-Queries-custom + group_tags 2 + _SERVICE_ID 375 +} + +define service { + host_name Centreon-central_5 + service_description Open-Files + register 1 + use App-DB-MySQL-Open-Files-custom + _SERVICE_ID 376 +} + +define service { + host_name Centreon-central_5 + service_description Myisam-Keycache + register 1 + use App-DB-MySQL-Myisam-Keycache-custom + group_tags 2 + _SERVICE_ID 377 +} + +define service { + host_name Centreon-central_5 + service_description Database-Size + register 1 + use App-DB-MySQL-Database-Size-custom + _SERVICE_ID 378 +} + +define service { + host_name Centreon-central_5 + service_description Connections-Number + register 1 + use App-DB-MySQL-Connections-Number-custom + group_tags 2 + _SERVICE_ID 379 +} + +define service { + host_name Centreon-central_5 + service_description Connection-Time + register 1 + use App-DB-MySQL-Connection-Time-custom + group_tags 2 + _SERVICE_ID 380 +} + +define service { + host_name Centreon-central_5 + service_description Disk-/ + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME / + _SERVICE_ID 381 +} + +define service { + host_name Centreon-central_5 + service_description Disk-/var/lib/mysql + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/mysql + _SERVICE_ID 382 +} + +define service { + host_name Centreon-central_5 + service_description Disk-/var/cache/centreon/backup + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/cache/centreon/backup + _SERVICE_ID 383 +} + +define service { + host_name Centreon-central_5 + service_description Disk-/var/lib/centreon-broker + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon-broker + _SERVICE_ID 384 +} + +define service { + host_name Centreon-central_5 + service_description Disk-/var/log + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/log + _SERVICE_ID 385 +} + +define service { + host_name Centreon-central_5 + service_description Disk-/var/lib/centreon + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon + _SERVICE_ID 386 +} + +define service { + host_name Centreon-central_5 + service_description proc-centreontrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME centreontrapd + _PROCESSPATH + _SERVICE_ID 387 +} + +define service { + host_name Centreon-central_5 + service_description proc-snmptrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmptrapd + _SERVICE_ID 388 +} + +define service { + host_name Centreon-central_5 + service_description proc-rsyslogd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME rsyslogd + _SERVICE_ID 389 +} + +define service { + host_name Centreon-central_5 + service_description proc-snmpd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmpd + _SERVICE_ID 390 +} + +define service { + host_name Centreon-central_5 + service_description proc-broker-watchdog + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME cbwd + _SERVICE_ID 391 +} + +define service { + host_name Centreon-central_5 + service_description proc-postfix + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME master + _PROCESSPATH /usr/libexec/postfix/master + _SERVICE_ID 392 +} + +define service { + host_name Centreon-central_5 + service_description Broker-Stats + register 1 + use App-Monitoring-Centreon-Broker-Stats-Central-custom + _SERVICE_ID 393 +} + +define service { + host_name Centreon-central_6 + service_description proc-sshd + register 1 + use App-Monitoring-Centreon-Process-sshd-custom + _SERVICE_ID 394 +} + +define service { + host_name Centreon-central_6 + service_description proc-httpd + register 1 + use App-Monitoring-Centreon-Process-httpd-custom + _SERVICE_ID 395 +} + +define service { + host_name Centreon-central_6 + service_description proc-crond + register 1 + use App-Monitoring-Centreon-Process-crond-custom + _SERVICE_ID 396 +} + +define service { + host_name Centreon-central_6 + service_description proc-centengine + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _SERVICE_ID 397 +} + +define service { + host_name Centreon-central_6 + service_description proc-gorgone + register 1 + use App-Monitoring-Centreon-Process-centcore-custom + _CRITICAL 1: + _PROCESSNAME gorgone-.* + _SERVICE_ID 398 +} + +define service { + host_name Centreon-central_6 + service_description proc-broker-sql + register 1 + use App-Monitoring-Centreon-Process-broker-sql-custom + _PROCESSARGS /etc/centreon-broker/central-broker.json + _SERVICE_ID 399 +} + +define service { + host_name Centreon-central_6 + service_description proc-broker-rrd + register 1 + use App-Monitoring-Centreon-Process-broker-rrd-custom + _PROCESSARGS /etc/centreon-broker/central-rrd.json + _SERVICE_ID 400 +} + +define service { + host_name Centreon-central_6 + service_description Ping + register 1 + use Base-Ping-LAN-custom + _SERVICE_ID 401 +} + +define service { + host_name Centreon-central_6 + service_description Swap + register 1 + use OS-Linux-Swap-SNMP-custom + _SERVICE_ID 402 +} + +define service { + host_name Centreon-central_6 + service_description Memory + register 1 + use OS-Linux-Memory-SNMP-custom + _EXTRAOPTIONS '--redhat' + _SERVICE_ID 403 +} + +define service { + host_name Centreon-central_6 + service_description Load + register 1 + use OS-Linux-Load-SNMP-custom + _SERVICE_ID 404 +} + +define service { + host_name Centreon-central_6 + service_description Cpu + register 1 + use OS-Linux-Cpu-SNMP-custom + _SERVICE_ID 405 +} + +define service { + host_name Centreon-central_6 + service_description Partitioning + notification_interval 1440 + register 1 + use App-Centreon-MySQL-Partitioning-custom + group_tags 2 + _SERVICE_ID 406 +} + +define service { + host_name Centreon-central_6 + service_description Slowqueries + register 1 + use App-DB-MySQL-Slowqueries-custom + group_tags 2 + _SERVICE_ID 407 +} + +define service { + host_name Centreon-central_6 + service_description Queries + register 1 + use App-DB-MySQL-Queries-custom + group_tags 2 + _SERVICE_ID 408 +} + +define service { + host_name Centreon-central_6 + service_description Open-Files + register 1 + use App-DB-MySQL-Open-Files-custom + _SERVICE_ID 409 +} + +define service { + host_name Centreon-central_6 + service_description Myisam-Keycache + register 1 + use App-DB-MySQL-Myisam-Keycache-custom + group_tags 2 + _SERVICE_ID 410 +} + +define service { + host_name Centreon-central_6 + service_description Database-Size + register 1 + use App-DB-MySQL-Database-Size-custom + _SERVICE_ID 411 +} + +define service { + host_name Centreon-central_6 + service_description Connections-Number + register 1 + use App-DB-MySQL-Connections-Number-custom + group_tags 2 + _SERVICE_ID 412 +} + +define service { + host_name Centreon-central_6 + service_description Connection-Time + register 1 + use App-DB-MySQL-Connection-Time-custom + group_tags 2 + _SERVICE_ID 413 +} + +define service { + host_name Centreon-central_6 + service_description Disk-/ + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME / + _SERVICE_ID 414 +} + +define service { + host_name Centreon-central_6 + service_description Disk-/var/lib/mysql + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/mysql + _SERVICE_ID 415 +} + +define service { + host_name Centreon-central_6 + service_description Disk-/var/cache/centreon/backup + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/cache/centreon/backup + _SERVICE_ID 416 +} + +define service { + host_name Centreon-central_6 + service_description Disk-/var/lib/centreon-broker + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon-broker + _SERVICE_ID 417 +} + +define service { + host_name Centreon-central_6 + service_description Disk-/var/log + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/log + _SERVICE_ID 418 +} + +define service { + host_name Centreon-central_6 + service_description Disk-/var/lib/centreon + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon + _SERVICE_ID 419 +} + +define service { + host_name Centreon-central_6 + service_description proc-centreontrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME centreontrapd + _PROCESSPATH + _SERVICE_ID 420 +} + +define service { + host_name Centreon-central_6 + service_description proc-snmptrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmptrapd + _SERVICE_ID 421 +} + +define service { + host_name Centreon-central_6 + service_description proc-rsyslogd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME rsyslogd + _SERVICE_ID 422 +} + +define service { + host_name Centreon-central_6 + service_description proc-snmpd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmpd + _SERVICE_ID 423 +} + +define service { + host_name Centreon-central_6 + service_description proc-broker-watchdog + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME cbwd + _SERVICE_ID 424 +} + +define service { + host_name Centreon-central_6 + service_description proc-postfix + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME master + _PROCESSPATH /usr/libexec/postfix/master + _SERVICE_ID 425 +} + +define service { + host_name Centreon-central_6 + service_description Broker-Stats + register 1 + use App-Monitoring-Centreon-Broker-Stats-Central-custom + _SERVICE_ID 426 +} + +define service { + host_name Centreon-central_7 + service_description proc-sshd + register 1 + use App-Monitoring-Centreon-Process-sshd-custom + _SERVICE_ID 427 +} + +define service { + host_name Centreon-central_7 + service_description proc-httpd + register 1 + use App-Monitoring-Centreon-Process-httpd-custom + _SERVICE_ID 428 +} + +define service { + host_name Centreon-central_7 + service_description proc-crond + register 1 + use App-Monitoring-Centreon-Process-crond-custom + _SERVICE_ID 429 +} + +define service { + host_name Centreon-central_7 + service_description proc-centengine + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _SERVICE_ID 430 +} + +define service { + host_name Centreon-central_7 + service_description proc-gorgone + register 1 + use App-Monitoring-Centreon-Process-centcore-custom + _CRITICAL 1: + _PROCESSNAME gorgone-.* + _SERVICE_ID 431 +} + +define service { + host_name Centreon-central_7 + service_description proc-broker-sql + register 1 + use App-Monitoring-Centreon-Process-broker-sql-custom + _PROCESSARGS /etc/centreon-broker/central-broker.json + _SERVICE_ID 432 +} + +define service { + host_name Centreon-central_7 + service_description proc-broker-rrd + register 1 + use App-Monitoring-Centreon-Process-broker-rrd-custom + _PROCESSARGS /etc/centreon-broker/central-rrd.json + _SERVICE_ID 433 +} + +define service { + host_name Centreon-central_7 + service_description Ping + register 1 + use Base-Ping-LAN-custom + _SERVICE_ID 434 +} + +define service { + host_name Centreon-central_7 + service_description Swap + register 1 + use OS-Linux-Swap-SNMP-custom + _SERVICE_ID 435 +} + +define service { + host_name Centreon-central_7 + service_description Memory + register 1 + use OS-Linux-Memory-SNMP-custom + _EXTRAOPTIONS '--redhat' + _SERVICE_ID 436 +} + +define service { + host_name Centreon-central_7 + service_description Load + register 1 + use OS-Linux-Load-SNMP-custom + _SERVICE_ID 437 +} + +define service { + host_name Centreon-central_7 + service_description Cpu + register 1 + use OS-Linux-Cpu-SNMP-custom + _SERVICE_ID 438 +} + +define service { + host_name Centreon-central_7 + service_description Partitioning + notification_interval 1440 + register 1 + use App-Centreon-MySQL-Partitioning-custom + group_tags 2 + _SERVICE_ID 439 +} + +define service { + host_name Centreon-central_7 + service_description Slowqueries + register 1 + use App-DB-MySQL-Slowqueries-custom + group_tags 2 + _SERVICE_ID 440 +} + +define service { + host_name Centreon-central_7 + service_description Queries + register 1 + use App-DB-MySQL-Queries-custom + group_tags 2 + _SERVICE_ID 441 +} + +define service { + host_name Centreon-central_7 + service_description Open-Files + register 1 + use App-DB-MySQL-Open-Files-custom + _SERVICE_ID 442 +} + +define service { + host_name Centreon-central_7 + service_description Myisam-Keycache + register 1 + use App-DB-MySQL-Myisam-Keycache-custom + group_tags 2 + _SERVICE_ID 443 +} + +define service { + host_name Centreon-central_7 + service_description Database-Size + register 1 + use App-DB-MySQL-Database-Size-custom + _SERVICE_ID 444 +} + +define service { + host_name Centreon-central_7 + service_description Connections-Number + register 1 + use App-DB-MySQL-Connections-Number-custom + group_tags 2 + _SERVICE_ID 445 +} + +define service { + host_name Centreon-central_7 + service_description Connection-Time + register 1 + use App-DB-MySQL-Connection-Time-custom + group_tags 2 + _SERVICE_ID 446 +} + +define service { + host_name Centreon-central_7 + service_description Disk-/ + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME / + _SERVICE_ID 447 +} + +define service { + host_name Centreon-central_7 + service_description Disk-/var/lib/mysql + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/mysql + _SERVICE_ID 448 +} + +define service { + host_name Centreon-central_7 + service_description Disk-/var/cache/centreon/backup + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/cache/centreon/backup + _SERVICE_ID 449 +} + +define service { + host_name Centreon-central_7 + service_description Disk-/var/lib/centreon-broker + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon-broker + _SERVICE_ID 450 +} + +define service { + host_name Centreon-central_7 + service_description Disk-/var/log + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/log + _SERVICE_ID 451 +} + +define service { + host_name Centreon-central_7 + service_description Disk-/var/lib/centreon + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon + _SERVICE_ID 452 +} + +define service { + host_name Centreon-central_7 + service_description proc-centreontrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME centreontrapd + _PROCESSPATH + _SERVICE_ID 453 +} + +define service { + host_name Centreon-central_7 + service_description proc-snmptrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmptrapd + _SERVICE_ID 454 +} + +define service { + host_name Centreon-central_7 + service_description proc-rsyslogd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME rsyslogd + _SERVICE_ID 455 +} + +define service { + host_name Centreon-central_7 + service_description proc-snmpd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmpd + _SERVICE_ID 456 +} + +define service { + host_name Centreon-central_7 + service_description proc-broker-watchdog + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME cbwd + _SERVICE_ID 457 +} + +define service { + host_name Centreon-central_7 + service_description proc-postfix + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME master + _PROCESSPATH /usr/libexec/postfix/master + _SERVICE_ID 458 +} + +define service { + host_name Centreon-central_7 + service_description Broker-Stats + register 1 + use App-Monitoring-Centreon-Broker-Stats-Central-custom + _SERVICE_ID 459 +} + +define service { + host_name Centreon-central_8 + service_description proc-sshd + register 1 + use App-Monitoring-Centreon-Process-sshd-custom + _SERVICE_ID 460 +} + +define service { + host_name Centreon-central_8 + service_description proc-httpd + register 1 + use App-Monitoring-Centreon-Process-httpd-custom + _SERVICE_ID 461 +} + +define service { + host_name Centreon-central_8 + service_description proc-crond + register 1 + use App-Monitoring-Centreon-Process-crond-custom + _SERVICE_ID 462 +} + +define service { + host_name Centreon-central_8 + service_description proc-centengine + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _SERVICE_ID 463 +} + +define service { + host_name Centreon-central_8 + service_description proc-gorgone + register 1 + use App-Monitoring-Centreon-Process-centcore-custom + _CRITICAL 1: + _PROCESSNAME gorgone-.* + _SERVICE_ID 464 +} + +define service { + host_name Centreon-central_8 + service_description proc-broker-sql + register 1 + use App-Monitoring-Centreon-Process-broker-sql-custom + _PROCESSARGS /etc/centreon-broker/central-broker.json + _SERVICE_ID 465 +} + +define service { + host_name Centreon-central_8 + service_description proc-broker-rrd + register 1 + use App-Monitoring-Centreon-Process-broker-rrd-custom + _PROCESSARGS /etc/centreon-broker/central-rrd.json + _SERVICE_ID 466 +} + +define service { + host_name Centreon-central_8 + service_description Ping + register 1 + use Base-Ping-LAN-custom + _SERVICE_ID 467 +} + +define service { + host_name Centreon-central_8 + service_description Swap + register 1 + use OS-Linux-Swap-SNMP-custom + _SERVICE_ID 468 +} + +define service { + host_name Centreon-central_8 + service_description Memory + register 1 + use OS-Linux-Memory-SNMP-custom + _EXTRAOPTIONS '--redhat' + _SERVICE_ID 469 +} + +define service { + host_name Centreon-central_8 + service_description Load + register 1 + use OS-Linux-Load-SNMP-custom + _SERVICE_ID 470 +} + +define service { + host_name Centreon-central_8 + service_description Cpu + register 1 + use OS-Linux-Cpu-SNMP-custom + _SERVICE_ID 471 +} + +define service { + host_name Centreon-central_8 + service_description Partitioning + notification_interval 1440 + register 1 + use App-Centreon-MySQL-Partitioning-custom + group_tags 2 + _SERVICE_ID 472 +} + +define service { + host_name Centreon-central_8 + service_description Slowqueries + register 1 + use App-DB-MySQL-Slowqueries-custom + group_tags 2 + _SERVICE_ID 473 +} + +define service { + host_name Centreon-central_8 + service_description Queries + register 1 + use App-DB-MySQL-Queries-custom + group_tags 2 + _SERVICE_ID 474 +} + +define service { + host_name Centreon-central_8 + service_description Open-Files + register 1 + use App-DB-MySQL-Open-Files-custom + _SERVICE_ID 475 +} + +define service { + host_name Centreon-central_8 + service_description Myisam-Keycache + register 1 + use App-DB-MySQL-Myisam-Keycache-custom + group_tags 2 + _SERVICE_ID 476 +} + +define service { + host_name Centreon-central_8 + service_description Database-Size + register 1 + use App-DB-MySQL-Database-Size-custom + _SERVICE_ID 477 +} + +define service { + host_name Centreon-central_8 + service_description Connections-Number + register 1 + use App-DB-MySQL-Connections-Number-custom + group_tags 2 + _SERVICE_ID 478 +} + +define service { + host_name Centreon-central_8 + service_description Connection-Time + register 1 + use App-DB-MySQL-Connection-Time-custom + group_tags 2 + _SERVICE_ID 479 +} + +define service { + host_name Centreon-central_8 + service_description Disk-/ + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME / + _SERVICE_ID 480 +} + +define service { + host_name Centreon-central_8 + service_description Disk-/var/lib/mysql + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/mysql + _SERVICE_ID 481 +} + +define service { + host_name Centreon-central_8 + service_description Disk-/var/cache/centreon/backup + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/cache/centreon/backup + _SERVICE_ID 482 +} + +define service { + host_name Centreon-central_8 + service_description Disk-/var/lib/centreon-broker + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon-broker + _SERVICE_ID 483 +} + +define service { + host_name Centreon-central_8 + service_description Disk-/var/log + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/log + _SERVICE_ID 484 +} + +define service { + host_name Centreon-central_8 + service_description Disk-/var/lib/centreon + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon + _SERVICE_ID 485 +} + +define service { + host_name Centreon-central_8 + service_description proc-centreontrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME centreontrapd + _PROCESSPATH + _SERVICE_ID 486 +} + +define service { + host_name Centreon-central_8 + service_description proc-snmptrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmptrapd + _SERVICE_ID 487 +} + +define service { + host_name Centreon-central_8 + service_description proc-rsyslogd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME rsyslogd + _SERVICE_ID 488 +} + +define service { + host_name Centreon-central_8 + service_description proc-snmpd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmpd + _SERVICE_ID 489 +} + +define service { + host_name Centreon-central_8 + service_description proc-broker-watchdog + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME cbwd + _SERVICE_ID 490 +} + +define service { + host_name Centreon-central_8 + service_description proc-postfix + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME master + _PROCESSPATH /usr/libexec/postfix/master + _SERVICE_ID 491 +} + +define service { + host_name Centreon-central_8 + service_description Broker-Stats + register 1 + use App-Monitoring-Centreon-Broker-Stats-Central-custom + _SERVICE_ID 492 +} + +define service { + host_name Centreon-central_9 + service_description proc-sshd + register 1 + use App-Monitoring-Centreon-Process-sshd-custom + _SERVICE_ID 493 +} + +define service { + host_name Centreon-central_9 + service_description proc-httpd + register 1 + use App-Monitoring-Centreon-Process-httpd-custom + _SERVICE_ID 494 +} + +define service { + host_name Centreon-central_9 + service_description proc-crond + register 1 + use App-Monitoring-Centreon-Process-crond-custom + _SERVICE_ID 495 +} + +define service { + host_name Centreon-central_9 + service_description proc-centengine + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _SERVICE_ID 496 +} + +define service { + host_name Centreon-central_9 + service_description proc-gorgone + register 1 + use App-Monitoring-Centreon-Process-centcore-custom + _CRITICAL 1: + _PROCESSNAME gorgone-.* + _SERVICE_ID 497 +} + +define service { + host_name Centreon-central_9 + service_description proc-broker-sql + register 1 + use App-Monitoring-Centreon-Process-broker-sql-custom + _PROCESSARGS /etc/centreon-broker/central-broker.json + _SERVICE_ID 498 +} + +define service { + host_name Centreon-central_9 + service_description proc-broker-rrd + register 1 + use App-Monitoring-Centreon-Process-broker-rrd-custom + _PROCESSARGS /etc/centreon-broker/central-rrd.json + _SERVICE_ID 499 +} + +define service { + host_name Centreon-central_9 + service_description Ping + register 1 + use Base-Ping-LAN-custom + _SERVICE_ID 500 +} + +define service { + host_name Centreon-central_9 + service_description Swap + register 1 + use OS-Linux-Swap-SNMP-custom + _SERVICE_ID 501 +} + +define service { + host_name Centreon-central_9 + service_description Memory + register 1 + use OS-Linux-Memory-SNMP-custom + _EXTRAOPTIONS '--redhat' + _SERVICE_ID 502 +} + +define service { + host_name Centreon-central_9 + service_description Load + register 1 + use OS-Linux-Load-SNMP-custom + _SERVICE_ID 503 +} + +define service { + host_name Centreon-central_9 + service_description Cpu + register 1 + use OS-Linux-Cpu-SNMP-custom + _SERVICE_ID 504 +} + +define service { + host_name Centreon-central_9 + service_description Partitioning + notification_interval 1440 + register 1 + use App-Centreon-MySQL-Partitioning-custom + group_tags 2 + _SERVICE_ID 505 +} + +define service { + host_name Centreon-central_9 + service_description Slowqueries + register 1 + use App-DB-MySQL-Slowqueries-custom + group_tags 2 + _SERVICE_ID 506 +} + +define service { + host_name Centreon-central_9 + service_description Queries + register 1 + use App-DB-MySQL-Queries-custom + group_tags 2 + _SERVICE_ID 507 +} + +define service { + host_name Centreon-central_9 + service_description Open-Files + register 1 + use App-DB-MySQL-Open-Files-custom + _SERVICE_ID 508 +} + +define service { + host_name Centreon-central_9 + service_description Myisam-Keycache + register 1 + use App-DB-MySQL-Myisam-Keycache-custom + group_tags 2 + _SERVICE_ID 509 +} + +define service { + host_name Centreon-central_9 + service_description Database-Size + register 1 + use App-DB-MySQL-Database-Size-custom + _SERVICE_ID 510 +} + +define service { + host_name Centreon-central_9 + service_description Connections-Number + register 1 + use App-DB-MySQL-Connections-Number-custom + group_tags 2 + _SERVICE_ID 511 +} + +define service { + host_name Centreon-central_9 + service_description Connection-Time + register 1 + use App-DB-MySQL-Connection-Time-custom + group_tags 2 + _SERVICE_ID 512 +} + +define service { + host_name Centreon-central_9 + service_description Disk-/ + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME / + _SERVICE_ID 513 +} + +define service { + host_name Centreon-central_9 + service_description Disk-/var/lib/mysql + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/mysql + _SERVICE_ID 514 +} + +define service { + host_name Centreon-central_9 + service_description Disk-/var/cache/centreon/backup + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/cache/centreon/backup + _SERVICE_ID 515 +} + +define service { + host_name Centreon-central_9 + service_description Disk-/var/lib/centreon-broker + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon-broker + _SERVICE_ID 516 +} + +define service { + host_name Centreon-central_9 + service_description Disk-/var/log + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/log + _SERVICE_ID 517 +} + +define service { + host_name Centreon-central_9 + service_description Disk-/var/lib/centreon + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon + _SERVICE_ID 518 +} + +define service { + host_name Centreon-central_9 + service_description proc-centreontrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME centreontrapd + _PROCESSPATH + _SERVICE_ID 519 +} + +define service { + host_name Centreon-central_9 + service_description proc-snmptrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmptrapd + _SERVICE_ID 520 +} + +define service { + host_name Centreon-central_9 + service_description proc-rsyslogd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME rsyslogd + _SERVICE_ID 521 +} + +define service { + host_name Centreon-central_9 + service_description proc-snmpd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmpd + _SERVICE_ID 522 +} + +define service { + host_name Centreon-central_9 + service_description proc-broker-watchdog + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME cbwd + _SERVICE_ID 523 +} + +define service { + host_name Centreon-central_9 + service_description proc-postfix + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME master + _PROCESSPATH /usr/libexec/postfix/master + _SERVICE_ID 524 +} + +define service { + host_name Centreon-central_9 + service_description Broker-Stats + register 1 + use App-Monitoring-Centreon-Broker-Stats-Central-custom + _SERVICE_ID 525 +} + +define service { + host_name Centreon-central_10 + service_description proc-sshd + register 1 + use App-Monitoring-Centreon-Process-sshd-custom + _SERVICE_ID 526 +} + +define service { + host_name Centreon-central_10 + service_description proc-httpd + register 1 + use App-Monitoring-Centreon-Process-httpd-custom + _SERVICE_ID 527 +} + +define service { + host_name Centreon-central_10 + service_description proc-crond + register 1 + use App-Monitoring-Centreon-Process-crond-custom + _SERVICE_ID 528 +} + +define service { + host_name Centreon-central_10 + service_description proc-centengine + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _SERVICE_ID 529 +} + +define service { + host_name Centreon-central_10 + service_description proc-gorgone + register 1 + use App-Monitoring-Centreon-Process-centcore-custom + _CRITICAL 1: + _PROCESSNAME gorgone-.* + _SERVICE_ID 530 +} + +define service { + host_name Centreon-central_10 + service_description proc-broker-sql + register 1 + use App-Monitoring-Centreon-Process-broker-sql-custom + _PROCESSARGS /etc/centreon-broker/central-broker.json + _SERVICE_ID 531 +} + +define service { + host_name Centreon-central_10 + service_description proc-broker-rrd + register 1 + use App-Monitoring-Centreon-Process-broker-rrd-custom + _PROCESSARGS /etc/centreon-broker/central-rrd.json + _SERVICE_ID 532 +} + +define service { + host_name Centreon-central_10 + service_description Ping + register 1 + use Base-Ping-LAN-custom + _SERVICE_ID 533 +} + +define service { + host_name Centreon-central_10 + service_description Swap + register 1 + use OS-Linux-Swap-SNMP-custom + _SERVICE_ID 534 +} + +define service { + host_name Centreon-central_10 + service_description Memory + register 1 + use OS-Linux-Memory-SNMP-custom + _EXTRAOPTIONS '--redhat' + _SERVICE_ID 535 +} + +define service { + host_name Centreon-central_10 + service_description Load + register 1 + use OS-Linux-Load-SNMP-custom + _SERVICE_ID 536 +} + +define service { + host_name Centreon-central_10 + service_description Cpu + register 1 + use OS-Linux-Cpu-SNMP-custom + _SERVICE_ID 537 +} + +define service { + host_name Centreon-central_10 + service_description Partitioning + notification_interval 1440 + register 1 + use App-Centreon-MySQL-Partitioning-custom + group_tags 2 + _SERVICE_ID 538 +} + +define service { + host_name Centreon-central_10 + service_description Slowqueries + register 1 + use App-DB-MySQL-Slowqueries-custom + group_tags 2 + _SERVICE_ID 539 +} + +define service { + host_name Centreon-central_10 + service_description Queries + register 1 + use App-DB-MySQL-Queries-custom + group_tags 2 + _SERVICE_ID 540 +} + +define service { + host_name Centreon-central_10 + service_description Open-Files + register 1 + use App-DB-MySQL-Open-Files-custom + _SERVICE_ID 541 +} + +define service { + host_name Centreon-central_10 + service_description Myisam-Keycache + register 1 + use App-DB-MySQL-Myisam-Keycache-custom + group_tags 2 + _SERVICE_ID 542 +} + +define service { + host_name Centreon-central_10 + service_description Database-Size + register 1 + use App-DB-MySQL-Database-Size-custom + _SERVICE_ID 543 +} + +define service { + host_name Centreon-central_10 + service_description Connections-Number + register 1 + use App-DB-MySQL-Connections-Number-custom + group_tags 2 + _SERVICE_ID 544 +} + +define service { + host_name Centreon-central_10 + service_description Connection-Time + register 1 + use App-DB-MySQL-Connection-Time-custom + group_tags 2 + _SERVICE_ID 545 +} + +define service { + host_name Centreon-central_10 + service_description Disk-/ + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME / + _SERVICE_ID 546 +} + +define service { + host_name Centreon-central_10 + service_description Disk-/var/lib/mysql + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/mysql + _SERVICE_ID 547 +} + +define service { + host_name Centreon-central_10 + service_description Disk-/var/cache/centreon/backup + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/cache/centreon/backup + _SERVICE_ID 548 +} + +define service { + host_name Centreon-central_10 + service_description Disk-/var/lib/centreon-broker + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon-broker + _SERVICE_ID 549 +} + +define service { + host_name Centreon-central_10 + service_description Disk-/var/log + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/log + _SERVICE_ID 550 +} + +define service { + host_name Centreon-central_10 + service_description Disk-/var/lib/centreon + register 1 + use OS-Linux-Disk-Generic-Name-SNMP-custom + _DISKNAME /var/lib/centreon + _SERVICE_ID 551 +} + +define service { + host_name Centreon-central_10 + service_description proc-centreontrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME centreontrapd + _PROCESSPATH + _SERVICE_ID 552 +} + +define service { + host_name Centreon-central_10 + service_description proc-snmptrapd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmptrapd + _SERVICE_ID 553 +} + +define service { + host_name Centreon-central_10 + service_description proc-rsyslogd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME rsyslogd + _SERVICE_ID 554 +} + +define service { + host_name Centreon-central_10 + service_description proc-snmpd + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME snmpd + _SERVICE_ID 555 +} + +define service { + host_name Centreon-central_10 + service_description proc-broker-watchdog + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME cbwd + _SERVICE_ID 556 +} + +define service { + host_name Centreon-central_10 + service_description proc-postfix + register 1 + use App-Monitoring-Centreon-Process-centengine-custom + _PROCESSNAME master + _PROCESSPATH /usr/libexec/postfix/master + _SERVICE_ID 557 +} + +define service { + host_name Centreon-central_10 + service_description Broker-Stats + register 1 + use App-Monitoring-Centreon-Broker-Stats-Central-custom + _SERVICE_ID 558 +} diff --git a/engine/tests/cfg_files/conf1/severities.cfg b/engine/tests/cfg_files/conf1/severities.cfg new file mode 100644 index 00000000000..e3a700cfb14 --- /dev/null +++ b/engine/tests/cfg_files/conf1/severities.cfg @@ -0,0 +1,32 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define severity { + id 6 + severity_name severity2 + level 2 + icon_id 2 + type service +} + +define severity { + id 5 + severity_name severity1 + level 1 + icon_id 3 + type service +} diff --git a/engine/tests/cfg_files/conf1/tags.cfg b/engine/tests/cfg_files/conf1/tags.cfg new file mode 100644 index 00000000000..a4a247dd93c --- /dev/null +++ b/engine/tests/cfg_files/conf1/tags.cfg @@ -0,0 +1,34 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define tag { + id 3 + tag_name hg1 + type hostgroup +} + +define tag { + id 2 + tag_name Centreon_platform + type hostgroup +} + +define tag { + id 2 + tag_name Database-MySQL + type servicegroup +} diff --git a/engine/tests/cfg_files/conf1/timeperiods.cfg b/engine/tests/cfg_files/conf1/timeperiods.cfg new file mode 100644 index 00000000000..6ef1dd017b7 --- /dev/null +++ b/engine/tests/cfg_files/conf1/timeperiods.cfg @@ -0,0 +1,42 @@ +################################################################### +# # +# GENERATED BY CENTREON # +# # +# Developed by : # +# - Julien Mathis # +# - Romain Le Merlus # +# # +# www.centreon.com # +# For information : contact@centreon.com # +################################################################### +# # +# Last modification 2023-05-31 18:43 # +# By John_Doe # +# # +################################################################### + +define timeperiod { + name 24x7 + timeperiod_name 24x7 + alias 24_Hours_A_Day,_7_Days_A_Week + sunday 00:00-24:00 + monday 00:00-24:00 + tuesday 00:00-24:00 + wednesday 00:00-24:00 + thursday 00:00-24:00 + friday 00:00-24:00 + saturday 00:00-24:00 +} + +define timeperiod { + name nonworkhours + timeperiod_name nonworkhours + alias Non-Work Hours + sunday 00:00-24:00 + monday 00:00-09:00,17:00-24:00 + tuesday 00:00-09:00,17:00-24:00 + wednesday 00:00-09:00,17:00-24:00 + thursday 00:00-09:00,17:00-24:00 + friday 00:00-09:00,17:00-24:00 + saturday 00:00-24:00 +} diff --git a/engine/tests/checks/anomalydetection.cc b/engine/tests/checks/anomalydetection.cc index 973241f2cef..27e7984fd6d 100644 --- a/engine/tests/checks/anomalydetection.cc +++ b/engine/tests/checks/anomalydetection.cc @@ -36,8 +36,10 @@ #include "com/centreon/engine/configuration/applier/servicedependency.hh" #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/globals.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" #include "common/engine_legacy_conf/service.hh" +#endif #include "helper.hh" using namespace com::centreon; diff --git a/engine/tests/checks/pb_anomalydetection.cc b/engine/tests/checks/pb_anomalydetection.cc new file mode 100644 index 00000000000..3463de27033 --- /dev/null +++ b/engine/tests/checks/pb_anomalydetection.cc @@ -0,0 +1,1106 @@ +/* + * Copyright 2023 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include "com/centreon/engine/configuration/applier/anomalydetection.hh" + +#include +#include + +#include + +#include "../test_engine.hh" +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/anomalydetection.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/configuration/applier/servicedependency.hh" +#include "com/centreon/engine/globals.hh" +#include "common/engine_conf/message_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +class PbAnomalydetectionCheck : public TestEngine { + public: + void SetUp() override { + init_config_state(); + + init_loggers(); + checks_logger->set_level(spdlog::level::trace); + commands_logger->set_level(spdlog::level::trace); + + configuration::applier::contact ct_aply; + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + + configuration::Host hst{new_pb_configuration_host("test_host", "admin")}; + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::Service svc{ + new_pb_configuration_service("test_host", "test_svc", "admin", 8)}; + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + configuration::Anomalydetection ad{new_pb_configuration_anomalydetection( + "test_host", "test_ad", "admin", 9, 8, + "/tmp/thresholds_status_change.json")}; + configuration::applier::anomalydetection ad_aply; + ad_aply.add_object(ad); + + ad_aply.resolve_object(ad, err); + + host_map const& hm{engine::host::hosts}; + _host = hm.begin()->second; + _host->set_current_state(engine::host::state_up); + _host->set_state_type(checkable::hard); + _host->set_acknowledgement(AckType::NONE); + _host->set_notify_on(static_cast(-1)); + + service_map const& sm{engine::service::services}; + for (auto& p : sm) { + std::shared_ptr svc = p.second; + if (svc->service_id() == 8) + _svc = svc; + else + _ad = std::static_pointer_cast(svc); + } + _svc->set_current_state(engine::service::state_ok); + _svc->set_state_type(checkable::hard); + _svc->set_acknowledgement(AckType::NONE); + _svc->set_notify_on(static_cast(-1)); + } + + void TearDown() override { + _host.reset(); + _svc.reset(); + _ad.reset(); + deinit_config_state(); + } + + void CreateFile(std::string const& filename, std::string const& content) { + std::ofstream oss(filename); + oss << content; + oss.close(); + } + + protected: + std::shared_ptr _host; + std::shared_ptr _svc; + std::shared_ptr _ad; +}; + +// clang-format off + +/* The following test comes from this array (inherited from Nagios behaviour): + * + * | Time | Check # | State | State type | State change | perf data in range | ad State | ad state type | ad do check + * -------------------------------------------------------------------------------------------------------------------- + * | 0 | 1 | OK | HARD | No | Y | OK | H | N + * | 1 | 1 | CRTCL | SOFT | Yes | Y | OK | H | N + * | 2 | 2 | CRTCL | SOFT | No | Y | OK | H | N + * | 3 | 3 | CRTCL | HARD | Yes | Y | OK | H | N + * | 4 | 3 | OK | HARD | Yes | Y | OK | H | N + * | 5 | 3 | OK | HARD | No | N | CRTCL | S | Y + * | 6 | 1 | OK | HARD | No | N | CRTCL | S | Y + * | 7 | 1 | OK | HARD | No | N | CRTCL | H | Y + * | 8 | 1 | OK | HARD | No | Y | OK | H | Y + * | 9 | 1 | OK | HARD | No | Y | OK | H | N + * -------------------------------------------------------------------------------------------------------------------- + */ + +// clang-format on + +enum class e_json_version { V1, V2 }; + +class PbAnomalydetectionCheckStatusChange + : public PbAnomalydetectionCheck, + public testing::WithParamInterface< + std::pair> {}; + +TEST_P(PbAnomalydetectionCheckStatusChange, StatusChanges) { + CreateFile("/tmp/thresholds_status_change.json", GetParam().second); + _ad->init_thresholds(); + _ad->set_status_change(true); + + set_time(50000); + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(50000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + _svc->set_last_check(50000); + + _ad->set_current_state(engine::service::state_ok); + _ad->set_last_hard_state(engine::service::state_ok); + _ad->set_last_hard_state_change(50000); + _ad->set_last_state_change(50000); + _ad->set_state_type(checkable::hard); + _ad->set_current_attempt(1); + _ad->set_last_check(50000); + + // --- 1 ---- + set_time(50500); + time_t now = std::time(nullptr); + std::string cmd(fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " + "critical| metric=80;25;60", + now)); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + ASSERT_EQ(_svc->get_plugin_output(), "service critical"); + ASSERT_EQ(_svc->get_perf_data(), "metric=80;25;60"); + int check_options = 0; + int latency = 0; + bool time_is_valid; + time_t preferred_time; + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "metric=80 metric_lower_thresholds=73.31 " + "metric_upper_thresholds=83.26 metric_fit=78.26 " + "metric_lower_margin=0.00 metric_upper_margin=0.00"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "metric=80 metric_lower_thresholds=73.31 " + "metric_upper_thresholds=83.26 metric_fit=78.26 " + "metric_lower_margin=-4.95 metric_upper_margin=5.00"); + } + + // --- 2 ---- + set_time(51000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " + "critical| metric=80;25;60", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), 50500); + ASSERT_EQ(_svc->get_current_attempt(), 2); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "metric=80 metric_lower_thresholds=72.62 " + "metric_upper_thresholds=82.52 metric_fit=77.52 " + "metric_lower_margin=0.00 metric_upper_margin=0.00"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "metric=80 metric_lower_thresholds=72.62 " + "metric_upper_thresholds=82.52 metric_fit=77.52 " + "metric_lower_margin=-4.90 metric_upper_margin=5.00"); + } + // --- 3 ---- + set_time(51250); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " + "critical| metric=80;25;60", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), 50500); + ASSERT_EQ(_svc->get_current_attempt(), 3); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "metric=80 metric_lower_thresholds=72.28 " + "metric_upper_thresholds=82.15 metric_fit=77.15 " + "metric_lower_margin=0.00 metric_upper_margin=0.00"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "metric=80 metric_lower_thresholds=72.28 " + "metric_upper_thresholds=82.15 metric_fit=77.15 " + "metric_lower_margin=-4.88 metric_upper_margin=5.00"); + } + // --- 4 ---- + set_time(52000); + + now = std::time(nullptr); + time_t previous = now; + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service " + "ok| metric=80foo;25;60", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00foo"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "metric=80foo metric_lower_thresholds=71.24foo " + "metric_upper_thresholds=81.04foo metric_fit=76.04foo " + "metric_lower_margin=0.00foo metric_upper_margin=0.00foo"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "metric=80foo metric_lower_thresholds=71.24foo " + "metric_upper_thresholds=81.04foo metric_fit=76.04foo " + "metric_lower_margin=-4.80foo metric_upper_margin=5.00foo"); + } + // --- 5 ---- + set_time(52500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok| " + "metric=30%;25;60", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), 52000); + ASSERT_EQ(_svc->get_last_state_change(), 52000); + ASSERT_EQ(_svc->get_current_attempt(), 1); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_ad->get_last_state_change(), now); + ASSERT_EQ(_ad->get_plugin_output(), + "NON-OK: Unusual activity, the actual value of metric is 30.00% " + "which is outside the forecasting range [70.55% : 80.30%]"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "metric=30% metric_lower_thresholds=70.55% " + "metric_upper_thresholds=80.30% metric_fit=75.30% " + "metric_lower_margin=0.00% metric_upper_margin=0.00%"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "metric=30% metric_lower_thresholds=70.55% " + "metric_upper_thresholds=80.30% metric_fit=75.30% " + "metric_lower_margin=-4.75% metric_upper_margin=5.00%"); + } + + ASSERT_EQ(_ad->get_current_attempt(), 1); + + // --- 6 ---- + set_time(53000); + + previous = now; + now = std::time(nullptr); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion( + checks::checker::e_completion_filter::service); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_ad->get_last_state_change(), previous); + ASSERT_EQ(_ad->get_plugin_output(), + "NON-OK: Unusual activity, the actual value of metric is 12.00 " + "which is outside the forecasting range [69.86 : 79.56]"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "metric=12 metric_lower_thresholds=69.86 " + "metric_upper_thresholds=79.56 metric_fit=74.56 " + "metric_lower_margin=0.00 metric_upper_margin=0.00"); + + } else { + ASSERT_EQ(_ad->get_perf_data(), + "metric=12 metric_lower_thresholds=69.86 " + "metric_upper_thresholds=79.56 metric_fit=74.56 " + "metric_lower_margin=-4.70 metric_upper_margin=5.00"); + } + ASSERT_EQ(_ad->get_current_attempt(), 2); + + // --- 7 ---- + set_time(53500); + + previous = now; + now = std::time(nullptr); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion( + checks::checker::e_completion_filter::service); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_ad->get_last_hard_state_change(), now); + ASSERT_EQ(_ad->get_last_state_change(), 52500); + ASSERT_EQ(_ad->get_plugin_output(), + "NON-OK: Unusual activity, the actual value of metric is 12.00 " + "which is outside the forecasting range [69.17 : 78.82]"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "metric=12 metric_lower_thresholds=69.17 " + "metric_upper_thresholds=78.82 metric_fit=73.82 " + "metric_lower_margin=0.00 metric_upper_margin=0.00"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "metric=12 metric_lower_thresholds=69.17 " + "metric_upper_thresholds=78.82 metric_fit=73.82 " + "metric_lower_margin=-4.65 metric_upper_margin=5.00"); + } + ASSERT_EQ(_ad->get_current_attempt(), 3); + + // --- 8 ---- + set_time(54000); + _ad->get_check_command_ptr()->set_command_line( + "echo 'output| metric=70%;50;75'"); + previous = now; + now = std::time(nullptr); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion( + checks::checker::e_completion_filter::service); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_hard_state_change(), now); + ASSERT_EQ(_ad->get_last_state_change(), now); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=70.00%"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "metric=70% metric_lower_thresholds=68.48% " + "metric_upper_thresholds=78.08% metric_fit=73.08% " + "metric_lower_margin=0.00% metric_upper_margin=0.00%"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "metric=70% metric_lower_thresholds=68.48% " + "metric_upper_thresholds=78.08% metric_fit=73.08% " + "metric_lower_margin=-4.60% metric_upper_margin=5.00%"); + } + ASSERT_EQ(_ad->get_current_attempt(), 1); + + // --- 9 ---- + set_time(54500); + + previous = now; + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;4;service unknown", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_svc->get_last_hard_state_change(), 52000); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + ASSERT_EQ(_svc->get_plugin_output(), "service unknown"); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_ad->get_last_hard_state_change(), 54000); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_ad->get_plugin_output(), + "UNKNOWN: Unknown activity, metric did not return any values"); + ASSERT_EQ(_ad->get_current_attempt(), 1); + + ::unlink("/tmp/thresholds_status_change.json"); +} + +TEST_P(PbAnomalydetectionCheckStatusChange, StatusChangesWithType) { + CreateFile("/tmp/thresholds_status_change.json", GetParam().second); + _ad->init_thresholds(); + _ad->set_status_change(true); + + set_time(50000); + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(50000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + _svc->set_last_check(50000); + + _ad->set_current_state(engine::service::state_ok); + _ad->set_last_hard_state(engine::service::state_ok); + _ad->set_last_hard_state_change(50000); + _ad->set_last_state_change(50000); + _ad->set_state_type(checkable::hard); + _ad->set_current_attempt(1); + _ad->set_last_check(50000); + + // --- 1 ---- + set_time(50500); + time_t now = std::time(nullptr); + std::string cmd(fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " + "critical| 'g[metric]'=80;25;60", + now)); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + ASSERT_EQ(_svc->get_plugin_output(), "service critical"); + ASSERT_EQ(_svc->get_perf_data(), "'g[metric]'=80;25;60"); + int check_options = 0; + int latency = 0; + bool time_is_valid; + time_t preferred_time; + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "'g[metric]'=80 metric_lower_thresholds=73.31 " + "metric_upper_thresholds=83.26 metric_fit=78.26 " + "metric_lower_margin=0.00 metric_upper_margin=0.00"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "'g[metric]'=80 metric_lower_thresholds=73.31 " + "metric_upper_thresholds=83.26 metric_fit=78.26 " + "metric_lower_margin=-4.95 metric_upper_margin=5.00"); + } + + // --- 2 ---- + set_time(51000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " + "critical| 'g[metric]'=80;25;60", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), 50500); + ASSERT_EQ(_svc->get_current_attempt(), 2); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "'g[metric]'=80 metric_lower_thresholds=72.62 " + "metric_upper_thresholds=82.52 metric_fit=77.52 " + "metric_lower_margin=0.00 metric_upper_margin=0.00"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "'g[metric]'=80 metric_lower_thresholds=72.62 " + "metric_upper_thresholds=82.52 metric_fit=77.52 " + "metric_lower_margin=-4.90 metric_upper_margin=5.00"); + } + // --- 3 ---- + set_time(51250); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " + "critical| 'g[metric]'=80;25;60", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), 50500); + ASSERT_EQ(_svc->get_current_attempt(), 3); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "'g[metric]'=80 metric_lower_thresholds=72.28 " + "metric_upper_thresholds=82.15 metric_fit=77.15 " + "metric_lower_margin=0.00 metric_upper_margin=0.00"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "'g[metric]'=80 metric_lower_thresholds=72.28 " + "metric_upper_thresholds=82.15 metric_fit=77.15 " + "metric_lower_margin=-4.88 metric_upper_margin=5.00"); + } + // --- 4 ---- + set_time(52000); + + now = std::time(nullptr); + time_t previous = now; + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service " + "ok| 'g[metric]'=80foo;25;60", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00foo"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "'g[metric]'=80foo metric_lower_thresholds=71.24foo " + "metric_upper_thresholds=81.04foo metric_fit=76.04foo " + "metric_lower_margin=0.00foo metric_upper_margin=0.00foo"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "'g[metric]'=80foo metric_lower_thresholds=71.24foo " + "metric_upper_thresholds=81.04foo metric_fit=76.04foo " + "metric_lower_margin=-4.80foo metric_upper_margin=5.00foo"); + } + // --- 5 ---- + set_time(52500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok| " + "'g[metric]'=30%;25;60", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), 52000); + ASSERT_EQ(_svc->get_last_state_change(), 52000); + ASSERT_EQ(_svc->get_current_attempt(), 1); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_ad->get_last_state_change(), now); + ASSERT_EQ(_ad->get_plugin_output(), + "NON-OK: Unusual activity, the actual value of metric is 30.00% " + "which is outside the forecasting range [70.55% : 80.30%]"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "'g[metric]'=30% metric_lower_thresholds=70.55% " + "metric_upper_thresholds=80.30% metric_fit=75.30% " + "metric_lower_margin=0.00% metric_upper_margin=0.00%"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "'g[metric]'=30% metric_lower_thresholds=70.55% " + "metric_upper_thresholds=80.30% metric_fit=75.30% " + "metric_lower_margin=-4.75% metric_upper_margin=5.00%"); + } + + ASSERT_EQ(_ad->get_current_attempt(), 1); + + // --- 6 ---- + set_time(53000); + + previous = now; + now = std::time(nullptr); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion( + checks::checker::e_completion_filter::service); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_ad->get_last_state_change(), previous); + ASSERT_EQ(_ad->get_plugin_output(), + "NON-OK: Unusual activity, the actual value of metric is 12.00 " + "which is outside the forecasting range [69.86 : 79.56]"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "metric=12 metric_lower_thresholds=69.86 " + "metric_upper_thresholds=79.56 metric_fit=74.56 " + "metric_lower_margin=0.00 metric_upper_margin=0.00"); + + } else { + ASSERT_EQ(_ad->get_perf_data(), + "metric=12 metric_lower_thresholds=69.86 " + "metric_upper_thresholds=79.56 metric_fit=74.56 " + "metric_lower_margin=-4.70 metric_upper_margin=5.00"); + } + ASSERT_EQ(_ad->get_current_attempt(), 2); + + // --- 7 ---- + set_time(53500); + + previous = now; + now = std::time(nullptr); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion( + checks::checker::e_completion_filter::service); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_ad->get_last_hard_state_change(), now); + ASSERT_EQ(_ad->get_last_state_change(), 52500); + ASSERT_EQ(_ad->get_plugin_output(), + "NON-OK: Unusual activity, the actual value of metric is 12.00 " + "which is outside the forecasting range [69.17 : 78.82]"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "metric=12 metric_lower_thresholds=69.17 " + "metric_upper_thresholds=78.82 metric_fit=73.82 " + "metric_lower_margin=0.00 metric_upper_margin=0.00"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "metric=12 metric_lower_thresholds=69.17 " + "metric_upper_thresholds=78.82 metric_fit=73.82 " + "metric_lower_margin=-4.65 metric_upper_margin=5.00"); + } + ASSERT_EQ(_ad->get_current_attempt(), 3); + + // --- 8 ---- + set_time(54000); + _ad->get_check_command_ptr()->set_command_line( + "echo 'output| metric=70%;50;75'"); + previous = now; + now = std::time(nullptr); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion( + checks::checker::e_completion_filter::service); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_hard_state_change(), now); + ASSERT_EQ(_ad->get_last_state_change(), now); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=70.00%"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "metric=70% metric_lower_thresholds=68.48% " + "metric_upper_thresholds=78.08% metric_fit=73.08% " + "metric_lower_margin=0.00% metric_upper_margin=0.00%"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "metric=70% metric_lower_thresholds=68.48% " + "metric_upper_thresholds=78.08% metric_fit=73.08% " + "metric_lower_margin=-4.60% metric_upper_margin=5.00%"); + } + ASSERT_EQ(_ad->get_current_attempt(), 1); + + // --- 9 ---- + set_time(54500); + + previous = now; + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;4;service unknown", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_svc->get_last_hard_state_change(), 52000); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + ASSERT_EQ(_svc->get_plugin_output(), "service unknown"); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_ad->get_last_hard_state_change(), 54000); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_ad->get_plugin_output(), + "UNKNOWN: Unknown activity, metric did not return any values"); + ASSERT_EQ(_ad->get_current_attempt(), 1); + + ::unlink("/tmp/thresholds_status_change.json"); +} + +INSTANTIATE_TEST_SUITE_P( + PbAnomalydetectionCheckStatusChange, + PbAnomalydetectionCheckStatusChange, + testing::Values( + std::make_pair( + e_json_version::V1, + "[{\n \"host_id\": \"12\",\n \"service_id\": \"9\",\n " + "\"metric_name\": " + "\"metric\",\n \"predict\": [{\n \"timestamp\": 50000,\n " + "\"upper\": " + "84,\n \"lower\": 74,\n \"fit\": 79\n }, {\n \"timestamp\": " + "100000,\n " + "\"upper\": 10,\n \"lower\": 5,\n \"fit\": 5\n }, {\n " + "\"timestamp\": " + "150000,\n \"upper\": 100,\n \"lower\": 93,\n \"fit\": 96.5\n }, " + "{\n " + "\"timestamp\": 200000,\n \"upper\": 100,\n \"lower\": 97,\n " + "\"fit\": " + "98.5\n }, {\n \"timestamp\": 250000,\n \"upper\": 100,\n " + "\"lower\": " + "21,\n \"fit\": 60.5\n }\n]}]"), + std::make_pair( + e_json_version::V2, + "[{\n \"host_id\": \"12\",\n \"service_id\": \"9\",\n " + "\"metric_name\": " + "\"metric\",\n \"sensitivity\":1,\n \"predict\": [{\n " + "\"timestamp\": " + "50000,\n \"upper_margin\": " + "5,\n \"lower_margin\": -5,\n \"fit\": 79\n }, {\n \"timestamp\": " + "100000,\n " + "\"upper_margin\": 5,\n \"lower_margin\": 0,\n \"fit\": 5\n }, {\n " + "\"timestamp\": " + "150000,\n \"upper\": 3.5,\n \"lower\": -3.5,\n \"fit\": 96.5\n }, " + "{\n " + "\"timestamp\": 200000,\n \"upper_margin\": 1.5,\n " + "\"lower_margin\": " + "-1.5,\n \"fit\": " + "98.5\n }, {\n \"timestamp\": 250000,\n \"upper_margin\": 39.5,\n " + "\"lower_margin\": " + "-39.5,\n \"fit\": 60.5\n }\n]}]"))); + +class PbAnomalydetectionCheckMetricWithQuotes + : public PbAnomalydetectionCheck, + public testing::WithParamInterface< + std::pair> {}; + +TEST_P(PbAnomalydetectionCheckMetricWithQuotes, MetricWithQuotes) { + CreateFile("/tmp/thresholds_status_change.json", GetParam().second); + + _ad->init_thresholds(); + _ad->set_status_change(true); + + set_time(50000); + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(50000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + _svc->set_last_check(50000); + + _ad->set_current_state(engine::service::state_ok); + _ad->set_last_hard_state(engine::service::state_ok); + _ad->set_last_hard_state_change(50000); + _ad->set_state_type(checkable::hard); + _ad->set_current_attempt(1); + _ad->set_last_check(50000); + + set_time(50500); + std::ostringstream oss; + std::time_t now{std::time(nullptr)}; + oss << '[' << now << ']' + << " PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical| " + "'metric'=90MT;25;60;0;100"; + std::string cmd{oss.str()}; + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + ASSERT_EQ(_svc->get_plugin_output(), "service critical"); + ASSERT_EQ(_svc->get_perf_data(), "'metric'=90MT;25;60;0;100"); + int check_options = 0; + int latency = 0; + bool time_is_valid; + time_t preferred_time; + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion( + checks::checker::e_completion_filter::service); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_ad->get_last_state_change(), now); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), + "NON-OK: Unusual activity, the actual value of metric is 90.00MT " + "which is outside the forecasting range [73.31MT : 83.26MT]"); + if (GetParam().first == e_json_version::V1) { + ASSERT_EQ(_ad->get_perf_data(), + "'metric'=90MT;;;0;100 metric_lower_thresholds=73.31MT;;;0;100 " + "metric_upper_thresholds=83.26MT;;;0;100 " + "metric_fit=78.26MT;;;0;100 metric_lower_margin=0.00MT;;;0;100 " + "metric_upper_margin=0.00MT;;;0;100"); + } else { + ASSERT_EQ(_ad->get_perf_data(), + "'metric'=90MT;;;0;100 metric_lower_thresholds=73.31MT;;;0;100 " + "metric_upper_thresholds=83.26MT;;;0;100 " + "metric_fit=78.26MT;;;0;100 metric_lower_margin=-4.95MT;;;0;100 " + "metric_upper_margin=5.00MT;;;0;100"); + } + + ::unlink("/tmp/thresholds_status_change.json"); +} + +INSTANTIATE_TEST_SUITE_P( + PbAnomalydetectionCheckMetricWithQuotes, + PbAnomalydetectionCheckMetricWithQuotes, + testing::Values( + std::make_pair( + e_json_version::V1, + "[{\n \"host_id\": \"12\",\n \"service_id\": \"9\",\n " + "\"metric_name\": " + "\"metric\",\n \"predict\": [{\n \"timestamp\": 50000,\n " + "\"upper\": " + "84,\n \"lower\": 74,\n \"fit\": 79\n }, {\n \"timestamp\": " + "100000,\n " + "\"upper\": 10,\n \"lower\": 5,\n \"fit\": 5\n }, {\n " + "\"timestamp\": " + "150000,\n \"upper\": 100,\n \"lower\": 93,\n \"fit\": 96.5\n }, " + "{\n " + "\"timestamp\": 200000,\n \"upper\": 100,\n \"lower\": 97,\n " + "\"fit\": " + "98.5\n }, {\n \"timestamp\": 250000,\n \"upper\": 100,\n " + "\"lower\": " + "21,\n \"fit\": 60.5\n }\n]}]"), + std::make_pair( + e_json_version::V2, + "[{\n \"host_id\": \"12\",\n \"service_id\": \"9\",\n " + "\"metric_name\": " + "\"metric\",\n \"sensitivity\":1,\n \"predict\": [{\n " + "\"timestamp\": " + "50000,\n \"upper_margin\": " + "5,\n \"lower_margin\": -5,\n \"fit\": 79\n }, {\n \"timestamp\": " + "100000,\n " + "\"upper_margin\": 5,\n \"lower_margin\": 0,\n \"fit\": 5\n }, {\n " + "\"timestamp\": " + "150000,\n \"upper_margin\": 3.5,\n \"lower_margin\": -3.5,\n " + "\"fit\": " + "96.5\n }, {\n " + "\"timestamp\": 200000,\n \"upper_margin\": 1.5,\n " + "\"lower_margin\": " + "-1.5,\n \"fit\": " + "98.5\n }, {\n \"timestamp\": 250000,\n \"upper_margin\": 39.5,\n " + "\"lower_margin\": " + "-39.5,\n \"fit\": 60.5\n }\n]}]"))); + +TEST_F(PbAnomalydetectionCheck, BadThresholdsFile) { + ::unlink("/tmp/thresholds_status_change.json"); + set_time(50000); + std::time_t now{std::time(nullptr)}; + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(50000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + _svc->set_last_check(50000); + _svc->set_perf_data("metric=90MT;25;60;0;100"); + + _ad->set_current_state(engine::service::state_ok); + _ad->set_last_hard_state(engine::service::state_ok); + _ad->set_last_hard_state_change(50000); + _ad->set_state_type(checkable::hard); + _ad->set_current_attempt(1); + _ad->set_last_check(50000); + + int check_options = 0; + int latency = 0; + bool time_is_valid; + time_t preferred_time; + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_ad->get_last_state_change(), now); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), + "The thresholds file is not viable for metric metric"); + ASSERT_EQ(_ad->get_perf_data(), "metric=90MT;25;60;0;100"); + + set_time(51000); + now = std::time(nullptr); + // _ad is not OK so _ad will do the check + _ad->get_check_command_ptr()->set_command_line( + "echo 'output| metric=70%;50;75'"); + + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion( + checks::checker::e_completion_filter::service); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 2); + ASSERT_EQ(_ad->get_plugin_output(), + "The thresholds file is not viable for metric metric"); + ASSERT_EQ(_ad->get_perf_data(), "metric=70%;50;75"); + + ::unlink("/tmp/thresholds_status_change.json"); +} + +class PbAnomalydetectionCheckFileTooOld + : public PbAnomalydetectionCheck, + public testing::WithParamInterface< + std::pair> {}; + +TEST_P(PbAnomalydetectionCheckFileTooOld, FileTooOld) { + CreateFile("/tmp/thresholds_status_change.json", GetParam().second); + _ad->init_thresholds(); + _ad->set_status_change(true); + + set_time(300000); + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(300000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + _svc->set_last_check(300000); + _svc->set_perf_data("metric=90MT;25;60;0;100"); + + _ad->set_current_state(engine::service::state_ok); + _ad->set_last_hard_state(engine::service::state_ok); + _ad->set_last_hard_state_change(300000); + _ad->set_state_type(checkable::hard); + _ad->set_current_attempt(1); + _ad->set_last_check(300000); + + int check_options = 0; + int latency = 0; + bool time_is_valid; + time_t preferred_time; + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_ad->get_last_state_change(), 300000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), + "The thresholds file is too old compared to the check timestamp " + "300000 for metric metric"); + ASSERT_EQ(_ad->get_perf_data(), "metric=90MT;25;60;0;100"); + + set_time(301000); + // _ad is not OK so _ad will do the check + _ad->get_check_command_ptr()->set_command_line( + "echo 'output| metric=70%;50;75'"); + + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion( + checks::checker::e_completion_filter::service); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_ad->get_last_state_change(), 300000); + ASSERT_EQ(_ad->get_current_attempt(), 2); + ASSERT_EQ(_ad->get_plugin_output(), + "The thresholds file is too old compared to the check timestamp " + "301000 for metric metric"); + ASSERT_EQ(_ad->get_perf_data(), "metric=70%;50;75"); + + ::unlink("/tmp/thresholds_status_change.json"); +} + +INSTANTIATE_TEST_SUITE_P( + FileTooOld, + PbAnomalydetectionCheckFileTooOld, + testing::Values( + std::make_pair( + e_json_version::V1, + "[{\n \"host_id\": \"12\",\n \"service_id\": \"9\",\n " + "\"metric_name\": " + "\"metric\",\n \"predict\": [{\n \"timestamp\": 50000,\n " + "\"upper\": " + "84,\n \"lower\": 74,\n \"fit\": 79\n }, {\n \"timestamp\": " + "100000,\n " + "\"upper\": 10,\n \"lower\": 5,\n \"fit\": 51.5\n }, {\n " + "\"timestamp\": " + "150000,\n \"upper\": 100,\n \"lower\": 93,\n \"fit\": 96.5\n }, " + "{\n " + "\"timestamp\": 200000,\n \"upper\": 100,\n \"lower\": 97,\n " + "\"fit\": " + "98.5\n }, {\n \"timestamp\": 250000,\n \"upper\": 100,\n " + "\"lower\": " + "21,\n \"fit\": 60.5\n }\n]}]"), + std::make_pair( + e_json_version::V2, + "[{\n \"host_id\": \"12\",\n \"service_id\": \"9\",\n " + "\"metric_name\": " + "\"metric\",\n \"sensitivity\":1,\n \"predict\": [{\n " + "\"timestamp\": " + "50000,\n \"upper_margin\": " + "5,\n \"lower_margin\": -5,\n \"fit\": 79\n }, {\n \"timestamp\": " + "100000,\n " + "\"upper_margin\": 5,\n \"lower\": 0,\n \"fit\": 5\n }, {\n " + "\"timestamp\": " + "150000,\n \"upper_margin\": 3.5,\n \"lower_margin\": -3.5,\n " + "\"fit\": " + "96.5\n }, {\n " + "\"timestamp\": 200000,\n \"upper_margin\": 1.5,\n " + "\"lower_margin\": " + "-1.5,\n \"fit\": " + "98.5\n }, {\n \"timestamp\": 250000,\n \"upper_margin\": 39.5,\n " + "\"lower_margin\": " + "-39.5,\n \"fit\": 60.5\n }\n]}]"))); diff --git a/engine/tests/checks/pb_service_check.cc b/engine/tests/checks/pb_service_check.cc new file mode 100644 index 00000000000..fb04308b8ac --- /dev/null +++ b/engine/tests/checks/pb_service_check.cc @@ -0,0 +1,598 @@ +/* + * Copyright 2020-2022 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include + +#include "../test_engine.hh" +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/configuration/applier/servicedependency.hh" +#include "com/centreon/engine/configuration/applier/serviceescalation.hh" +#include "com/centreon/engine/configuration/applier/state.hh" +#include "com/centreon/engine/configuration/applier/timeperiod.hh" +#include "com/centreon/engine/serviceescalation.hh" +#include "com/centreon/engine/timezone_manager.hh" +#include "common/engine_conf/message_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +extern configuration::State pb_config; + +class PbServiceCheck : public TestEngine { + public: + void SetUp() override { + init_config_state(); + + pb_config.clear_contacts(); + configuration::applier::contact ct_aply; + configuration::Contact ctct = new_pb_configuration_contact("admin", true); + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + + configuration::Host hst = new_pb_configuration_host("test_host", "admin"); + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::Service svc = + new_pb_configuration_service("test_host", "test_svc", "admin"); + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + host_map const& hm{engine::host::hosts}; + _host = hm.begin()->second; + _host->set_current_state(engine::host::state_up); + _host->set_state_type(checkable::hard); + _host->set_acknowledgement(AckType::NONE); + _host->set_notify_on(static_cast(-1)); + + service_map const& sm{engine::service::services}; + _svc = sm.begin()->second; + _svc->set_current_state(engine::service::state_ok); + _svc->set_state_type(checkable::hard); + _svc->set_acknowledgement(AckType::NONE); + _svc->set_notify_on(static_cast(-1)); + + // This is to not be bothered by host checks during service checks + pb_config.set_host_check_timeout(10000); + } + + void TearDown() override { + _host.reset(); + _svc.reset(); + deinit_config_state(); + } + + protected: + std::shared_ptr _host; + std::shared_ptr _svc; +}; + +/* The following test comes from this array (inherited from Nagios behaviour): + * + * | Time | Check # | State | State type | State change | + * ------------------------------------------------------ + * | 0 | 1 | OK | HARD | No | + * | 1 | 1 | CRTCL | SOFT | Yes | + * | 2 | 2 | WARN | SOFT | Yes | + * | 3 | 3 | CRTCL | HARD | Yes | + * | 4 | 3 | WARN | HARD | Yes | + * | 5 | 3 | WARN | HARD | No | + * | 6 | 1 | OK | HARD | Yes | + * | 7 | 1 | OK | HARD | No | + * | 8 | 1 | UNKNWN| SOFT | Yes | + * | 9 | 2 | OK | SOFT | Yes | + * | 10 | 1 | OK | HARD | No | + * ------------------------------------------------------ + */ +TEST_F(PbServiceCheck, SimpleCheck) { + set_time(50000); + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(50000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + + set_time(50500); + + std::time_t now{std::time(nullptr)}; + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now)}; + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(51000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;1;service warning", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_warning); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 2); + + set_time(51500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(52000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;1;service warning", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_warning); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(52500); + + time_t previous = now; + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;1;service warning", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_warning); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(53000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok", now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(53500); + + previous = now; + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok", now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(54000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;4;service unknown", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_svc->get_last_hard_state_change(), now - 1000); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(54500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok", now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 2); + + set_time(55000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok", now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); +} + +/* The following test comes from this array (inherited from Nagios behaviour): + * + * | Time | Check # | State | State type | State change | + * ------------------------------------------------------ + * | 0 | 1 | OK | HARD | No | + * | 1 | 1 | CRTCL | SOFT | Yes | + * | 2 | 2 | CRTCL | SOFT | No | + * | 3 | 3 | CRTCL | HARD | No | + * ------------------------------------------------------ + */ +TEST_F(PbServiceCheck, OkCritical) { + set_time(55000); + + time_t now = std::time(nullptr); + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok", + now)}; + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(55500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(56000); + + time_t previous = now; + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 2); + + set_time(56500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); +} + +/* The following test comes from this array (inherited from Nagios behaviour): + * + * | Time | Check # | State | State type | State change | + * ------------------------------------------------------ + * | 0 | 2 | OK | SOFT | No | + * | 1 | 1 | CRTCL | SOFT | Yes | + * | 2 | 2 | CRTCL | SOFT | No | + * | 3 | 3 | CRTCL | HARD | No | + * ------------------------------------------------------ + */ +TEST_F(PbServiceCheck, OkSoft_Critical) { + set_time(55000); + + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_state_change(55000); + _svc->set_current_attempt(2); + _svc->set_state_type(checkable::soft); + _svc->set_accept_passive_checks(true); + + set_time(55500); + + time_t now = std::time(nullptr); + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now)}; + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(56000); + + time_t previous = now; + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 2); + + set_time(56500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); +} + +/* The following test comes from this array (inherited from Nagios behaviour): + * + * | Time | Check # | State | State type | State change | + * ------------------------------------------------------ + * | 0 | 1 | OK | HARD | No | + * | 1 | 2 | OK | HARD | No | + * | 2 | 3 | WARN | HARD | Yes | + * | 3 | 4 | CRTCL | HARD | Yes | + * | 4 | 5 | CRTCL | HARD | Yes | + * | 5 | 6 | CRTCL | HARD | Yes | + * | 6 | 7 | CRTCL | HARD | No | + * | 7 | 8 | CRTCL | HARD | No | + * ------------------------------------------------------ + */ +TEST_F(PbServiceCheck, OkCriticalStalking) { + set_time(55000); + + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_state_change(55000); + _svc->set_current_attempt(2); + _svc->set_state_type(checkable::soft); + _svc->set_accept_passive_checks(true); + _svc->set_stalk_on(static_cast(-1)); + + set_time(55500); + testing::internal::CaptureStdout(); + time_t now = std::time(nullptr); + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;RAID array " + "optimal", + now)}; + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(56000); + time_t previous = now; + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;RAID array " + "optimal", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(56500); + for (int i = 0; i < 3; i++) { + // When i == 0, the state_critical is soft => no notification + // When i == 1, the state_critical is soft => no notification + // When i == 2, the state_critical is hard down => notification + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;1;RAID array " + "degraded (1 drive bad, 1 hot spare rebuilding)", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + } + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_warning); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(57000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;RAID array " + "degraded (2 drives bad, 1 host spare online, 1 hot spare rebuilding)", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(57500); + previous = now; + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;RAID array " + "degraded (3 drives bad, 2 hot spares online)", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(58000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;RAID array " + "failed", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(58500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;RAID array " + "failed", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(59000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;RAID array " + "failed", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + std::string out{testing::internal::GetCapturedStdout()}; + std::cout << out << std::endl; + ASSERT_NE( + out.find( + "SERVICE ALERT: test_host;test_svc;OK;HARD;1;RAID array optimal"), + std::string::npos); + ASSERT_NE(out.find("SERVICE ALERT: test_host;test_svc;WARNING;HARD;3;RAID " + "array degraded (1 drive bad, 1 hot spare rebuilding)"), + std::string::npos); + ASSERT_NE(out.find("SERVICE ALERT: test_host;test_svc;CRITICAL;HARD;3;RAID " + "array degraded (2 drives bad, 1 host spare online, 1 hot " + "spare rebuilding)"), + std::string::npos); + ASSERT_NE(out.find("SERVICE ALERT: test_host;test_svc;CRITICAL;HARD;3;RAID " + "array degraded (3 drives bad, 2 hot spares online"), + std::string::npos); + ASSERT_NE(out.find("SERVICE ALERT: test_host;test_svc;CRITICAL;HARD;3;RAID " + "array failed"), + std::string::npos); +} + +TEST_F(PbServiceCheck, CheckRemoveCheck) { + set_time(50000); + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(50000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + + set_time(50500); + std::time_t now{std::time(nullptr)}; + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now)}; + process_external_command(cmd.c_str()); + + /* We simulate a reload that destroyed the service */ + engine::service::services.clear(); + engine::service::services_by_id.clear(); + _svc.reset(); + + checks::checker::instance().reap(); +} + +TEST_F(PbServiceCheck, CheckUpdateMultilineOutput) { + set_time(50000); + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(50000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + + set_time(50500); + std::time_t now{std::time(nullptr)}; + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " + "critical\\nline2\\nline3\\nline4\\nline5|res;2;5;5\\n", + now)}; + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_plugin_output(), "service critical"); + ASSERT_EQ(_svc->get_long_plugin_output(), "line2\\nline3\\nline4\\nline5"); + ASSERT_EQ(_svc->get_perf_data(), "res;2;5;5"); +} diff --git a/engine/tests/checks/pb_service_retention.cc b/engine/tests/checks/pb_service_retention.cc new file mode 100644 index 00000000000..451267290bc --- /dev/null +++ b/engine/tests/checks/pb_service_retention.cc @@ -0,0 +1,562 @@ +/* + * Copyright 2023 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include + +#include "../test_engine.hh" +#include "../timeperiod/utils.hh" +#include "com/centreon/clib.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/configuration/applier/servicedependency.hh" +#include "com/centreon/engine/configuration/applier/serviceescalation.hh" +#include "com/centreon/engine/configuration/applier/state.hh" +#include "com/centreon/engine/configuration/applier/timeperiod.hh" +#include "com/centreon/engine/retention/dump.hh" +#include "com/centreon/engine/serviceescalation.hh" +#include "com/centreon/engine/timezone_manager.hh" +#include "common/engine_conf/message_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +extern configuration::State pb_config; + +class PbServiceRetention : public TestEngine { + public: + void SetUp() override { + init_config_state(); + + pb_config.clear_contacts(); + configuration::applier::contact ct_aply; + configuration::Contact ctct = new_pb_configuration_contact("admin", true); + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + + configuration::Host hst = new_pb_configuration_host("test_host", "admin"); + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::Service svc = + new_pb_configuration_service("test_host", "test_svc", "admin"); + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + host_map const& hm{engine::host::hosts}; + _host = hm.begin()->second; + _host->set_current_state(engine::host::state_up); + _host->set_state_type(checkable::hard); + _host->set_acknowledgement(AckType::NONE); + _host->set_notify_on(static_cast(-1)); + + service_map const& sm{engine::service::services}; + _svc = sm.begin()->second; + _svc->set_current_state(engine::service::state_ok); + _svc->set_state_type(checkable::hard); + _svc->set_acknowledgement(AckType::NONE); + _svc->set_notify_on(static_cast(-1)); + } + + void TearDown() override { + _host.reset(); + _svc.reset(); + deinit_config_state(); + } + + protected: + std::shared_ptr _host; + std::shared_ptr _svc; +}; + +TEST_F(PbServiceRetention, RetentionWithMultilineOutput) { + std::ostringstream oss; + set_time(55000); + + time_t now = std::time(nullptr); + oss.str(""); + oss << '[' << now << ']' + << " PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;" + "OK: Response time 0.123s | 'time'=0.123s;0:3;0:5;0; " + "'size'=81439B;;;0;\n" + "Kibana
Loading Kibana

Please " + "upgrade your browser

This Kibana " + "installation has strict security requirements enabled that your " + "current browser does not meet.
"; + + std::string cmd = oss.str(); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_perf_data(), + "'time'=0.123s;0:3;0:5;0; 'size'=81439B;;;0;"); + ASSERT_EQ(_svc->get_current_attempt(), 1); + oss.str(""); + retention::dump::services(oss); + std::string str(oss.str()); + ASSERT_NE( + str.find( + "performance_data='time'=0.123s;0:3;0:5;0; 'size'=81439B;;;0;\n"), + std::string::npos); + + std::shared_ptr cmt = std::make_shared( + comment::service, comment::flapping, _svc->host_id(), _svc->service_id(), + time(nullptr), "test1", "test2", false, comment::internal, false, + (time_t)0); + + comment::comments.insert({cmt->get_comment_id(), cmt}); + + oss.str(""); + retention::dump::comments(oss); + ASSERT_NE(str.find("host_name=test_host"), std::string::npos); + ASSERT_NE(str.find("service_description=test_svc"), std::string::npos); +} diff --git a/engine/tests/commands/bin_connector_test_run.cc b/engine/tests/commands/bin_connector_test_run.cc index b899d59a165..3258433846b 100644 --- a/engine/tests/commands/bin_connector_test_run.cc +++ b/engine/tests/commands/bin_connector_test_run.cc @@ -1,21 +1,21 @@ /** -* Copyright 2011-2013 Merethis -* -* This file is part of Centreon Engine. -* -* Centreon Engine is free software: you can redistribute it and/or -* modify it under the terms of the GNU General Public License version 2 -* as published by the Free Software Foundation. -* -* Centreon Engine is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -* General Public License for more details. -* -* You should have received a copy of the GNU General Public License -* along with Centreon Engine. If not, see -* . -*/ + * Copyright 2011-2013 Merethis + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #include #include diff --git a/engine/tests/commands/pbsimple-command.cc b/engine/tests/commands/pbsimple-command.cc new file mode 100644 index 00000000000..514b63bad00 --- /dev/null +++ b/engine/tests/commands/pbsimple-command.cc @@ -0,0 +1,267 @@ +/* + * Copyright 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include + +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/commands/raw.hh" +#include "common/log_v2/log_v2.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::commands; +using com::centreon::common::log_v2::log_v2; + +static void CreateFile(const std::string& filename, + const std::string& content) { + std::ofstream oss(filename); + oss << content; +} + +class PbSimpleCommand : public ::testing::Test { + protected: + std::shared_ptr logger; + + public: + void SetUp() override { + logger = log_v2::instance().get(log_v2::COMMANDS); + set_time(-1); + init_config_state(); +#ifdef LEGACY_CONF + config->interval_length(1); +#else + pb_config.set_interval_length(1); +#endif + } + + void TearDown() override { + deinit_config_state(); + } +}; + +class my_listener : public commands::command_listener { + mutable std::mutex _mutex; + commands::result _res; + + public: + result& get_result() { + std::lock_guard guard(_mutex); + return _res; + } + + void finished(result const& res) throw() override { + std::lock_guard guard(_mutex); + _res = res; + } +}; + +// Given an empty name +// When the add_command method is called with it as argument, +// Then it returns a NULL pointer. +TEST_F(PbSimpleCommand, NewCommandWithNoName) { + ASSERT_THROW(new commands::raw("", "bar"), std::exception); +} + +// Given a command to store, +// When the add_command method is called with an empty value, +// Then it returns a NULL pointer. +TEST_F(PbSimpleCommand, NewCommandWithNoValue) { + std::unique_ptr cmd; + ASSERT_THROW(cmd.reset(new commands::raw("foo", "")), std::exception); +} + +// Given an already existing command +// When the add_command method is called with the same name +// Then it returns a NULL pointer. +TEST_F(PbSimpleCommand, CommandAlreadyExisting) { + std::unique_ptr cmd; + ASSERT_NO_THROW(cmd.reset(new commands::raw("toto", "/bin/ls"))); +} + +// Given a name and a command line +// When the add_command method is called +// Then a new raw command is built +// When sync executed +// Then we have the output in the result class. +TEST_F(PbSimpleCommand, NewCommandSync) { + std::unique_ptr cmd{ + new commands::raw("test", "/bin/echo bonjour")}; + nagios_macros* mac(get_global_macros()); + commands::result res; + std::string cc(cmd->process_cmd(mac)); + ASSERT_EQ(cc, "/bin/echo bonjour"); + cmd->run(cc, *mac, 2, res); + ASSERT_EQ(res.output, "bonjour\n"); +} + +// Given a name and a command line +// When the add_command method is called +// Then a new raw command is built +// When async executed +// Then we have the output in the result class. +TEST_F(PbSimpleCommand, NewCommandAsync) { + std::unique_ptr lstnr(new my_listener); + std::unique_ptr cmd{ + new commands::raw("test", "/bin/echo bonjour")}; + cmd->set_listener(lstnr.get()); + nagios_macros* mac(get_global_macros()); + std::string cc(cmd->process_cmd(mac)); + ASSERT_EQ(cc, "/bin/echo bonjour"); + cmd->run(cc, *mac, 2, std::make_shared()); + int timeout{0}; + int max_timeout{3000}; + while (timeout < max_timeout && lstnr->get_result().output == "") { + usleep(100000); + ++timeout; + } + ASSERT_TRUE(timeout < max_timeout); + ASSERT_EQ(lstnr->get_result().output, "bonjour\n"); +} + +TEST_F(PbSimpleCommand, LongCommandAsync) { + std::unique_ptr lstnr(new my_listener); + std::unique_ptr cmd{ + new commands::raw("test", "/bin/sleep 10")}; + cmd->set_listener(lstnr.get()); + nagios_macros* mac(get_global_macros()); + std::string cc(cmd->process_cmd(mac)); + ASSERT_EQ(cc, "/bin/sleep 10"); + + // We force the time to be coherent with now because the function gettimeofday + // that is not simulated. + set_time(std::time(nullptr)); + cmd->run(cc, *mac, 2, std::make_shared()); + int timeout{0}; + int max_timeout{15}; + while (timeout < max_timeout && lstnr->get_result().output == "") { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + set_time(std::time(nullptr) + 1); + ++timeout; + } + ASSERT_EQ(lstnr->get_result().output, "(Process Timeout)"); +} + +TEST_F(PbSimpleCommand, TooRecentDoubleCommand) { + logger->set_level(spdlog::level::trace); + CreateFile("/tmp/TooRecentDoubleCommand.sh", + "echo -n tutu | tee -a /tmp/TooRecentDoubleCommand;"); + + const char* path = "/tmp/TooRecentDoubleCommand"; + ::unlink(path); + std::unique_ptr lstnr(std::make_unique()); + std::unique_ptr cmd{std::make_unique( + "test", "/bin/sh /tmp/TooRecentDoubleCommand.sh")}; + cmd->set_listener(lstnr.get()); + const void* caller[] = {nullptr, path}; + cmd->add_caller_group(caller, caller + 2); + nagios_macros* mac(get_global_macros()); + std::string cc(cmd->process_cmd(mac)); + time_t now = 10000; + set_time(now); + cmd->run(cc, *mac, 2, std::make_shared(), caller[0]); + for (int wait_ind = 0; wait_ind != 50 && lstnr->get_result().output == ""; + ++wait_ind) { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + } + std::this_thread::sleep_for(std::chrono::seconds(1)); + ASSERT_EQ(lstnr->get_result().exit_code, 0); + ASSERT_EQ(lstnr->get_result().exit_status, process::status::normal); + ASSERT_EQ(lstnr->get_result().output, "tutu"); + struct stat file_stat; + ASSERT_EQ(stat(path, &file_stat), 0); + ASSERT_EQ(file_stat.st_size, 4); + ++now; + cmd->run(cc, *mac, 2, std::make_shared(), caller[1]); + for (int wait_ind = 0; wait_ind != 50 && lstnr->get_result().output == ""; + ++wait_ind) { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + } + ASSERT_EQ(lstnr->get_result().exit_code, 0); + ASSERT_EQ(lstnr->get_result().exit_status, process::status::normal); + ASSERT_EQ(lstnr->get_result().output, "tutu"); + ASSERT_EQ(stat(path, &file_stat), 0); + ASSERT_EQ(file_stat.st_size, 4); +} + +TEST_F(PbSimpleCommand, SufficientOldDoubleCommand) { + logger->set_level(spdlog::level::trace); + CreateFile("/tmp/TooRecentDoubleCommand.sh", + "echo -n tutu | tee -a /tmp/TooRecentDoubleCommand;"); + + const char* path = "/tmp/TooRecentDoubleCommand"; + ::unlink(path); + std::unique_ptr lstnr(std::make_unique()); + std::unique_ptr cmd{std::make_unique( + "test", "/bin/sh /tmp/TooRecentDoubleCommand.sh")}; + cmd->set_listener(lstnr.get()); + const void* caller[] = {nullptr, path}; + cmd->add_caller_group(caller, caller + 2); + nagios_macros* mac(get_global_macros()); + std::string cc(cmd->process_cmd(mac)); + time_t now = 10000; + set_time(now); + cmd->run(cc, *mac, 2, std::make_shared(), caller[0]); + for (int wait_ind = 0; wait_ind != 50 && lstnr->get_result().output == ""; + ++wait_ind) { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + } + std::this_thread::sleep_for(std::chrono::seconds(1)); + ASSERT_EQ(lstnr->get_result().exit_code, 0); + ASSERT_EQ(lstnr->get_result().exit_status, process::status::normal); + ASSERT_EQ(lstnr->get_result().output, "tutu"); + struct stat file_stat; + ASSERT_EQ(stat(path, &file_stat), 0); + ASSERT_EQ(file_stat.st_size, 4); + now += 10; + set_time(now); + lstnr->get_result().output = ""; + cmd->run(cc, *mac, 2, std::make_shared(), caller[1]); + for (int wait_ind = 0; wait_ind != 50 && lstnr->get_result().output == ""; + ++wait_ind) { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + } + ASSERT_EQ(lstnr->get_result().exit_code, 0); + ASSERT_EQ(lstnr->get_result().exit_status, process::status::normal); + ASSERT_EQ(lstnr->get_result().output, "tutu"); + ASSERT_EQ(stat(path, &file_stat), 0); + ASSERT_EQ(file_stat.st_size, 8); +} + +TEST_F(PbSimpleCommand, WithOneArgument) { + auto lstnr = std::make_unique(); + std::unique_ptr cmd{ + std::make_unique("test", "/bin/echo $ARG1$")}; + cmd->set_listener(lstnr.get()); + nagios_macros* mac(get_global_macros()); + mac->argv[0] = "Hello"; + mac->argv[1] = ""; + std::string cc(cmd->process_cmd(mac)); + ASSERT_EQ(cc, "/bin/echo Hello"); + cmd->run(cc, *mac, 2, std::make_shared()); + int timeout{0}; + int max_timeout{3000}; + while (timeout < max_timeout && lstnr->get_result().output == "") { + usleep(100000); + ++timeout; + } + ASSERT_TRUE(timeout < max_timeout); + ASSERT_EQ(lstnr->get_result().output, "Hello\n"); +} diff --git a/engine/tests/configuration/applier-severity.cc b/engine/tests/configuration/applier-severity.cc index 8c47ac08900..930c1e410a0 100644 --- a/engine/tests/configuration/applier-severity.cc +++ b/engine/tests/configuration/applier-severity.cc @@ -28,11 +28,7 @@ using namespace com::centreon::engine::configuration::applier; class ApplierSeverity : public ::testing::Test { public: - void SetUp() override { - config_errors = 0; - config_warnings = 0; - init_config_state(); - } + void SetUp() override { init_config_state(); } void TearDown() override { deinit_config_state(); } diff --git a/engine/tests/configuration/applier/applier-anomalydetection.cc b/engine/tests/configuration/applier/applier-anomalydetection.cc index a30aa2686f0..2a99bb12eee 100644 --- a/engine/tests/configuration/applier/applier-anomalydetection.cc +++ b/engine/tests/configuration/applier/applier-anomalydetection.cc @@ -60,7 +60,6 @@ TEST_F(ApplierAnomalydetection, // Then the applier add_object throws an exception. TEST_F(ApplierAnomalydetection, NewHostWithoutHostId) { configuration::applier::host hst_aply; - configuration::applier::service ad_aply; configuration::anomalydetection ad; configuration::host hst; ASSERT_TRUE(hst.parse("host_name", "test_host")); diff --git a/engine/tests/configuration/applier/applier-command.cc b/engine/tests/configuration/applier/applier-command.cc index 99dd042d60e..87baf5c07e2 100644 --- a/engine/tests/configuration/applier/applier-command.cc +++ b/engine/tests/configuration/applier/applier-command.cc @@ -26,8 +26,10 @@ #include "com/centreon/engine/configuration/applier/contact.hh" #include "com/centreon/engine/configuration/applier/host.hh" #include "com/centreon/engine/macros/grab_host.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/command.hh" #include "common/engine_legacy_conf/connector.hh" +#endif #include "helper.hh" using namespace com::centreon; diff --git a/engine/tests/configuration/applier/applier-contactgroup.cc b/engine/tests/configuration/applier/applier-contactgroup.cc index 906e0e5e275..6253a7601dc 100644 --- a/engine/tests/configuration/applier/applier-contactgroup.cc +++ b/engine/tests/configuration/applier/applier-contactgroup.cc @@ -22,7 +22,9 @@ #include "com/centreon/engine/configuration/applier/contact.hh" #include "com/centreon/engine/configuration/applier/contactgroup.hh" #include "com/centreon/engine/contactgroup.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/contact.hh" +#endif #include "helper.hh" using namespace com::centreon; diff --git a/engine/tests/configuration/applier/applier-log.cc b/engine/tests/configuration/applier/applier-log.cc index 50b014bd6a3..854a1139b64 100644 --- a/engine/tests/configuration/applier/applier-log.cc +++ b/engine/tests/configuration/applier/applier-log.cc @@ -21,7 +21,6 @@ #include #include "com/centreon/engine/configuration/applier/hostescalation.hh" #include "common/engine_legacy_conf/parser.hh" - #include "common/engine_legacy_conf/state.hh" #include "helper.hh" diff --git a/engine/tests/configuration/applier/applier-pbanomalydetection.cc b/engine/tests/configuration/applier/applier-pbanomalydetection.cc new file mode 100644 index 00000000000..918b37d2246 --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbanomalydetection.cc @@ -0,0 +1,257 @@ +/** + * Copyright 2023 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include "../../test_engine.hh" +#include "../../timeperiod/utils.hh" +#include "com/centreon/engine/anomalydetection.hh" +#include "com/centreon/engine/configuration/applier/anomalydetection.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "common/engine_conf/anomalydetection_helper.hh" +#include "common/engine_conf/command_helper.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/service_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +class ApplierPbAnomalydetection : public TestEngine { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// Given an AD configuration with a host not defined +// Then the applier add_object throws an exception because it needs a service +// command. +TEST_F(ApplierPbAnomalydetection, + PbNewAnomalydetectionWithHostNotDefinedFromConfig) { + configuration::applier::anomalydetection ad_aply; + configuration::Anomalydetection ad; + configuration::anomalydetection_helper hlp(&ad); + ad.set_host_name("test_host"); + ad.set_service_description("test description"); + ASSERT_THROW(ad_aply.add_object(ad), std::exception); +} + +// Given host configuration without host_id +// Then the applier add_object throws an exception. +TEST_F(ApplierPbAnomalydetection, PbNewHostWithoutHostId) { + configuration::applier::host hst_aply; + configuration::Anomalydetection ad; + configuration::anomalydetection_helper hlp(&ad); + configuration::Host hst; + configuration::host_helper hhlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + ASSERT_THROW(hst_aply.add_object(hst), std::exception); +} + +// Given service configuration with a host defined +// Then the applier add_object creates the service +TEST_F(ApplierPbAnomalydetection, PbNewADFromConfig) { + configuration::applier::host hst_aply; + configuration::applier::anomalydetection ad_aply; + configuration::Anomalydetection ad; + configuration::anomalydetection_helper ad_hlp(&ad); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + // The host id is not given + ASSERT_THROW(hst_aply.add_object(hst), std::exception); + hst.set_host_id(12); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::service_helper svc_hmlp(&svc); + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_host_id(12); + svc.set_service_id(13); + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 'output| metric=12;50;75'"); + svc.set_check_command("cmd"); + + configuration::applier::command cmd_aply; + cmd_aply.add_object(cmd); + ASSERT_NO_THROW(svc_aply.add_object(svc)); + + ad.set_service_description("test description"); + ad.set_internal_id(112); + ad.set_dependent_service_id(13); + ad.set_service_id(4); + ad.set_host_id(12); + ad.set_host_name("test_host"); + ad.set_metric_name("foo"); + ad.set_thresholds_file("/etc/centreon-broker/thresholds.json"); + + // No need here to call ad_aply.expand_objects(*config) because the + // configuration service is not stored in configuration::state. We just have + // to set the host_id manually. + ad_aply.add_object(ad); + service_id_map const& sm(engine::service::services_by_id); + ASSERT_EQ(sm.size(), 2u); + auto my_ad = sm.find({12u, 4u}); + ASSERT_EQ(my_ad->first.first, 12u); + ASSERT_EQ(my_ad->first.second, 4u); + + // Service is not resolved, host is null now. + ASSERT_TRUE(!my_ad->second->get_host_ptr()); + ASSERT_EQ(std::static_pointer_cast( + my_ad->second) + ->get_internal_id(), + 112u); + ASSERT_TRUE(my_ad->second->description() == "test description"); +} + +// Given service configuration without service_id +// Then the applier add_object throws an exception +TEST_F(ApplierPbAnomalydetection, PbNewADNoServiceId) { + configuration::applier::host hst_aply; + configuration::applier::anomalydetection ad_aply; + configuration::Anomalydetection ad; + configuration::anomalydetection_helper ad_hlp(&ad); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + // The host id is not given + ASSERT_THROW(hst_aply.add_object(hst), std::exception); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ad.set_service_description("test description"); + ad.set_host_id(1); + ad.set_host_name("test_host"); + + // No need here to call ad_aply.expand_objects(*config) because the + // configuration service is not stored in configuration::state. We just have + // to set the host_id manually. + ASSERT_THROW(ad_aply.add_object(ad), std::exception); +} + +// Given service configuration without host_id +// Then the applier add_object throws an exception +TEST_F(ApplierPbAnomalydetection, PbNewADNoHostId) { + configuration::applier::host hst_aply; + configuration::applier::anomalydetection ad_aply; + configuration::Anomalydetection ad; + configuration::anomalydetection_helper ad_hlp(&ad); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ad.set_service_description("test description"); + ad.set_service_id(4); + ad.set_host_name("test_host"); + + ASSERT_THROW(ad_aply.add_object(ad), std::exception); +} + +// Given service configuration with bad host_id +// Then the applier add_object throws an exception +TEST_F(ApplierPbAnomalydetection, PbNewADBadHostId) { + configuration::applier::host hst_aply; + configuration::applier::anomalydetection ad_aply; + configuration::Anomalydetection ad; + configuration::anomalydetection_helper ad_hlp(&ad); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ad.set_service_description("test description"); + ad.set_host_id(2); + ad.set_service_id(2); + ad.set_dependent_service_id(3); + ad.set_host_name("test_host"); + + // No need here to call ad_aply.expand_objects(*config) because the + // configuration service is not stored in configuration::state. We just have + // to set the host_id manually. + ASSERT_THROW(ad_aply.add_object(ad), std::exception); +} + +// Given service configuration without metric_name +// Then the applier add_object throws an exception +TEST_F(ApplierPbAnomalydetection, PbNewADNoMetric) { + configuration::applier::host hst_aply; + configuration::applier::anomalydetection ad_aply; + configuration::Anomalydetection ad; + configuration::anomalydetection_helper ad_hlp(&ad); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ad.set_service_description("test description"); + ad.set_host_id(1); + ad.set_service_id(4); + ad.set_dependent_service_id(3); + ad.set_host_name("test_host"); + + // No need here to call ad_aply.expand_objects(*config) because the + // configuration service is not stored in configuration::state. We just have + // to set the host_id manually. + ASSERT_THROW(ad_aply.add_object(ad), std::exception); +} + +// Given service configuration without metric_name +// Then the applier add_object throws an exception +TEST_F(ApplierPbAnomalydetection, PbNewADNoThresholds) { + configuration::applier::host hst_aply; + configuration::applier::anomalydetection ad_aply; + configuration::Anomalydetection ad; + configuration::anomalydetection_helper ad_hlp(&ad); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ad.set_service_description("test description"); + ad.set_host_id(1); + ad.set_service_id(4); + ad.set_dependent_service_id(3); + ad.set_host_name("test_host"); + ad.set_metric_name("bar"); + + // No need here to call ad_aply.expand_objects(*config) because the + // configuration service is not stored in configuration::state. We just have + // to set the host_id manually. + ASSERT_THROW(ad_aply.add_object(ad), std::exception); +} diff --git a/engine/tests/configuration/applier/applier-pbcommand.cc b/engine/tests/configuration/applier/applier-pbcommand.cc new file mode 100644 index 00000000000..9eec7e0f60f --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbcommand.cc @@ -0,0 +1,353 @@ +/* + * Copyright 2017-2019,2023 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include +#include + +#include "com/centreon/engine/commands/command.hh" +#include "com/centreon/engine/commands/connector.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/connector.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/macros/grab_host.hh" +#include "common/engine_conf/command_helper.hh" +#include "common/engine_conf/connector_helper.hh" +#include "common/engine_conf/contact_helper.hh" +#include "common/engine_conf/host_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +class ApplierPbCommand : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// Given a command applier +// And a configuration command just with a name +// Then the applier add_object adds the command in the configuration set +// but not in the commands map (the command is unusable). +TEST_F(ApplierPbCommand, PbUnusableCommandFromConfig) { + configuration::applier::command aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + ASSERT_THROW(aply.add_object(cmd), std::exception); + ASSERT_EQ(pb_config.commands().size(), 1u); + ASSERT_EQ(commands::command::commands.size(), 0u); +} + +// Given a command applier +// And a configuration command with a name and a command line +// Then the applier add_object adds the command into the configuration set +// and the commands map (accessible from commands::set::instance()). +TEST_F(ApplierPbCommand, PbNewCommandFromConfig) { + configuration::applier::command aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + aply.add_object(cmd); + ASSERT_EQ(pb_config.commands().size(), 1u); + command_map::iterator found{commands::command::commands.find("cmd")}; + ASSERT_FALSE(found == commands::command::commands.end()); + ASSERT_FALSE(!found->second); + ASSERT_EQ(found->second->get_name(), "cmd"); + ASSERT_EQ(found->second->get_command_line(), "echo 1"); +} +// Given a command applier +// And a configuration command with a name, a command line and a connector +// Then the applier add_object adds the command into the configuration set +// but not in the commands map (the connector is not defined). +TEST_F(ApplierPbCommand, PbNewCommandWithEmptyConnectorFromConfig) { + configuration::applier::command aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + cmd.set_connector("perl"); + ASSERT_THROW(aply.add_object(cmd), std::exception); + ASSERT_EQ(pb_config.commands().size(), 1u); + command_map::iterator found{commands::command::commands.find("cmd")}; + ASSERT_TRUE(found == commands::command::commands.end()); +} + +// Given a command applier +// And a configuration command with a name, a command line and a connector +// And the connector is well defined. +// Then the applier add_object adds the command into the configuration set +// but not in the commands map (the connector is not defined). +TEST_F(ApplierPbCommand, PbNewCommandWithConnectorFromConfig) { + configuration::error_cnt err; + configuration::applier::command aply; + configuration::applier::connector cnn_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + cmd.set_connector("perl"); + configuration::Connector cnn; + configuration::connector_helper cnn_hlp(&cnn); + cnn.set_connector_name("perl"); + + cnn_aply.add_object(cnn); + aply.add_object(cmd); + + ASSERT_EQ(pb_config.commands().size(), 1u); + command_map::iterator found = commands::command::commands.find("cmd"); + ASSERT_EQ(found->second->get_name(), "cmd"); + ASSERT_EQ(found->second->get_command_line(), "echo 1"); + ASSERT_NO_THROW(aply.resolve_object(cmd, err)); +} + +// Given some command/connector appliers +// And a configuration command +// And a connector with the same name. +// Then the applier add_object adds the command into the configuration set +// but not in the commands map (the connector is not defined). +TEST_F(ApplierPbCommand, PbNewCommandAndConnectorWithSameName) { + configuration::error_cnt err; + configuration::applier::command aply; + configuration::applier::connector cnn_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + configuration::Connector cnn; + configuration::connector_helper cnn_hlp(&cnn); + cnn.set_connector_name("cmd"); + cnn.set_connector_line("echo 2"); + + cnn_aply.add_object(cnn); + aply.add_object(cmd); + + ASSERT_EQ(pb_config.commands().size(), 1u); + command_map::iterator found{commands::command::commands.find("cmd")}; + ASSERT_FALSE(found == commands::command::commands.end()); + ASSERT_FALSE(!found->second); + + ASSERT_EQ(found->second->get_name(), "cmd"); + ASSERT_EQ(found->second->get_command_line(), "echo 1"); + + aply.resolve_object(cmd, err); + connector_map::iterator found_con{ + commands::connector::connectors.find("cmd")}; + ASSERT_TRUE(found_con != commands::connector::connectors.end()); + ASSERT_TRUE(found_con->second); + + found = commands::command::commands.find("cmd"); + ASSERT_TRUE(found != commands::command::commands.end()); +} + +// Given some command and connector appliers already applied with +// all objects created. +// When the command is changed from the configuration, +// Then the modify_object() method updated correctly the command. +TEST_F(ApplierPbCommand, PbModifyCommandWithConnector) { + configuration::applier::command aply; + configuration::applier::connector cnn_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + cmd.set_connector("perl"); + configuration::Connector cnn; + configuration::connector_helper cnn_hlp(&cnn); + cnn.set_connector_name("perl"); + + cnn_aply.add_object(cnn); + aply.add_object(cmd); + + configuration::Command* to_modify = &pb_config.mutable_commands()->at(0); + cmd.set_command_line("date"); + aply.modify_object(to_modify, cmd); + command_map::iterator found{commands::command::commands.find("cmd")}; + ASSERT_EQ(found->second->get_name(), "cmd"); + ASSERT_EQ(found->second->get_command_line(), "date"); +} + +// Given simple command (without connector) applier already applied with +// all objects created. +// When the command is removed from the configuration, +// Then the command is totally removed. +TEST_F(ApplierPbCommand, PbRemoveCommand) { + configuration::applier::command aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + + aply.add_object(cmd); + + aply.remove_object(0); + command_map::iterator found{commands::command::commands.find("cmd")}; + ASSERT_EQ(found, commands::command::commands.end()); + ASSERT_TRUE(pb_config.commands().size() == 0); +} + +// Given some command and connector appliers already applied with +// all objects created. +// When the command is removed from the configuration, +// Then the command is totally removed. +TEST_F(ApplierPbCommand, PbRemoveCommandWithConnector) { + configuration::applier::command aply; + configuration::applier::connector cnn_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + cmd.set_connector("perl"); + configuration::Connector cnn; + configuration::connector_helper cnn_hlp(&cnn); + cnn.set_connector_name("perl"); + + cnn_aply.add_object(cnn); + aply.add_object(cmd); + + aply.remove_object(0); + command_map::iterator found{commands::command::commands.find("cmd")}; + ASSERT_EQ(found, commands::command::commands.end()); + ASSERT_TRUE(pb_config.commands().size() == 0); +} + +// Given simple command (without connector) applier already applied with +// all objects created. +// When the command is removed from the configuration, +// Then the command is totally removed. +TEST_F(ApplierPbCommand, PbComplexCommand) { + configuration::error_cnt err; + configuration::applier::command cmd_aply; + configuration::applier::host hst_aply; + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("base_centreon_ping"); + cmd.set_command_line( + "$USER1$/check_icmp -H $HOSTADDRESS$ -n $_HOSTPACKETNUMBER$ -w " + "$_HOSTWARNING$ -c $_HOSTCRITICAL$"); + cmd_aply.add_object(cmd); + + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("hst_test"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + hst.set_host_id(1); + configuration::CustomVariable* cv = hst.add_customvariables(); + cv->set_name("PACKETNUMBER"); + cv->set_value("42"); + cv = hst.add_customvariables(); + cv->set_name("WARNING"); + cv->set_value("200,20%"); + cv = hst.add_customvariables(); + cv->set_name("CRITICAL"); + cv->set_value("400,50%"); + hst.set_check_command("base_centreon_ping"); + hst_aply.add_object(hst); + + command_map::iterator cmd_found{ + commands::command::commands.find("base_centreon_ping")}; + ASSERT_NE(cmd_found, commands::command::commands.end()); + ASSERT_TRUE(pb_config.commands().size() == 1); + + host_map::iterator hst_found{engine::host::hosts.find("hst_test")}; + ASSERT_NE(hst_found, engine::host::hosts.end()); + ASSERT_TRUE(pb_config.hosts().size() == 1); + + hst_aply.expand_objects(pb_config); + hst_aply.resolve_object(hst, err); + ASSERT_TRUE(hst_found->second->custom_variables.size() == 3); + nagios_macros* macros(get_global_macros()); + grab_host_macros_r(macros, hst_found->second.get()); + std::string processed_cmd( + hst_found->second->get_check_command_ptr()->process_cmd(macros)); + ASSERT_EQ(processed_cmd, + "/check_icmp -H 127.0.0.1 -n 42 -w 200,20% -c 400,50%"); +} + +// Given simple command (without connector) applier already applied with +// all objects created. +// When the command is removed from the configuration, +// Then the command is totally removed. +TEST_F(ApplierPbCommand, PbComplexCommandWithContact) { + configuration::error_cnt err; + configuration::applier::command cmd_aply; + configuration::applier::host hst_aply; + configuration::applier::contact cnt_aply; + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("base_centreon_ping"); + cmd.set_command_line( + "$USER1$/check_icmp -H $HOSTADDRESS$ -n $_HOSTPACKETNUMBER$ -w " + "$_HOSTWARNING$ -c $_HOSTCRITICAL$ $CONTACTNAME$"); + cmd_aply.add_object(cmd); + + configuration::Contact cnt; + configuration::contact_helper cnt_hlp(&cnt); + cnt.set_contact_name("user"); + cnt.set_email("contact@centreon.com"); + cnt.set_pager("0473729383"); + cnt.set_host_notification_period("24x7"); + cnt.set_service_notification_period("24x7"); + cnt_aply.add_object(cnt); + + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("hst_test"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + auto* cv = hst.add_customvariables(); + cv->set_name("PACKETNUMBER"); + cv->set_value("42"); + cv = hst.add_customvariables(); + cv->set_name("WARNING"); + cv->set_value("200,20%"); + cv = hst.add_customvariables(); + cv->set_name("CRITICAL"); + cv->set_value("400,50%"); + hst.set_check_command("base_centreon_ping"); + fill_string_group(hst.mutable_contacts(), "user"); + hst_aply.add_object(hst); + + command_map::iterator cmd_found = + commands::command::commands.find("base_centreon_ping"); + ASSERT_NE(cmd_found, commands::command::commands.end()); + ASSERT_TRUE(pb_config.commands().size() == 1); + + host_map::iterator hst_found = engine::host::hosts.find("hst_test"); + ASSERT_NE(hst_found, engine::host::hosts.end()); + ASSERT_TRUE(pb_config.hosts().size() == 1); + + hst_aply.expand_objects(pb_config); + hst_aply.resolve_object(hst, err); + ASSERT_TRUE(hst_found->second->custom_variables.size() == 3); + nagios_macros* macros(get_global_macros()); + grab_host_macros_r(macros, hst_found->second.get()); + std::string processed_cmd( + hst_found->second->get_check_command_ptr()->process_cmd(macros)); + ASSERT_EQ(processed_cmd, + "/check_icmp -H 127.0.0.1 -n 42 -w 200,20% -c 400,50% user"); +} diff --git a/engine/tests/configuration/applier/applier-pbconnector.cc b/engine/tests/configuration/applier/applier-pbconnector.cc new file mode 100644 index 00000000000..10474d42f65 --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbconnector.cc @@ -0,0 +1,92 @@ +/* + * Copyright 2017-2019,2023 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include +#include "../../timeperiod/utils.hh" +#include "com/centreon/engine/commands/connector.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/connector.hh" +#include "com/centreon/engine/globals.hh" +#include "common/engine_conf/connector_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +class ApplierPbConnector : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// Given a connector applier +// And a configuration connector just with a name +// Then the applier add_object adds the connector in the configuration set +// and in the connectors map. +TEST_F(ApplierPbConnector, PbUnusableConnectorFromConfig) { + configuration::applier::connector aply; + configuration::Connector cnn; + configuration::connector_helper cnn_hlp(&cnn); + cnn.set_connector_name("connector"); + aply.add_object(cnn); + ASSERT_EQ(commands::connector::connectors.size(), 1u); +} + +// Given a connector applier already applied +// When the connector is modified from the configuration, +// Then the modify_object() method updated correctly the connector. +TEST_F(ApplierPbConnector, PbModifyConnector) { + configuration::applier::connector aply; + configuration::Connector cnn; + configuration::connector_helper cnn_hlp(&cnn); + cnn.set_connector_name("connector"); + cnn.set_connector_line("perl"); + + aply.add_object(cnn); + + cnn.set_connector_line("date"); + configuration::Connector* old = &pb_config.mutable_connectors()->at(0); + aply.modify_object(old, cnn); + + connector_map::iterator found_con = + commands::connector::connectors.find("connector"); + ASSERT_FALSE(found_con == commands::connector::connectors.end()); + ASSERT_FALSE(!found_con->second); + + ASSERT_EQ(found_con->second->get_name(), "connector"); + ASSERT_EQ(found_con->second->get_command_line(), "date"); +} + +// Given simple connector applier already applied +// When the connector is removed from the configuration, +// Then the connector is totally removed. +TEST_F(ApplierPbConnector, PbRemoveConnector) { + configuration::applier::connector aply; + configuration::Connector cnn; + configuration::connector_helper cnn_hlp(&cnn); + cnn.set_connector_name("connector"); + cnn.set_connector_line("echo 1"); + + aply.add_object(cnn); + aply.remove_object(0); + ASSERT_TRUE(pb_config.connectors().size() == 0); + ASSERT_TRUE(commands::connector::connectors.size() == 0); +} diff --git a/engine/tests/configuration/applier/applier-pbcontact.cc b/engine/tests/configuration/applier/applier-pbcontact.cc new file mode 100644 index 00000000000..d7e1e730815 --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbcontact.cc @@ -0,0 +1,452 @@ +/** + * Copyright 2017-2019,2023-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include "com/centreon/engine/commands/command.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/connector.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/configuration/applier/timeperiod.hh" +#include "com/centreon/engine/contact.hh" +#include "com/centreon/engine/contactgroup.hh" +#include "com/centreon/engine/globals.hh" +#include "common/engine_conf/command_helper.hh" +#include "common/engine_conf/connector_helper.hh" +#include "common/engine_conf/contact_helper.hh" +#include "common/engine_conf/contactgroup_helper.hh" +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/timeperiod_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +class ApplierPbContact : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } + + configuration::Contact valid_pb_contact_config() const { + // Add command. + { + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("true"); + configuration::applier::command aplyr; + aplyr.add_object(cmd); + } + // Add timeperiod. + { + configuration::Timeperiod tperiod; + configuration::timeperiod_helper tp_help(&tperiod); + tperiod.set_timeperiod_name("24x7"); + tperiod.set_alias("24x7"); + configuration::Timerange* tr = tperiod.mutable_timeranges()->add_monday(); + // monday: 00:00-24:00 + tr->set_range_start(0); + tr->set_range_end(24 * 3600); + configuration::applier::timeperiod aplyr; + aplyr.add_object(tperiod); + } + // Valid contact configuration + // (will generate 0 warnings or 0 errors). + configuration::Contact ctct; + ctct.set_contact_name("admin"); + ctct.set_host_notification_period("24x7"); + ctct.set_service_notification_period("24x7"); + fill_string_group(ctct.mutable_host_notification_commands(), "cmd"); + fill_string_group(ctct.mutable_service_notification_commands(), "cmd"); + return ctct; + } +}; + +// Given a contact applier +// And a configuration contact +// When we modify the contact configuration with an unexisting contact +// Then an exception is thrown. +TEST_F(ApplierPbContact, PbModifyUnexistingContactFromConfig) { + configuration::applier::contact aply; + configuration::Contact ctct; + configuration::contact_helper hlp(&ctct); + ctct.set_contact_name("test"); + fill_string_group(ctct.mutable_contactgroups(), "test_group"); + fill_string_group(ctct.mutable_host_notification_commands(), "cmd1,cmd2"); + configuration::Contact* cfg = pb_config.add_contacts(); + cfg->CopyFrom(ctct); + ASSERT_THROW(aply.modify_object(cfg, ctct), std::exception); +} + +// Given contactgroup / contact appliers +// And a configuration contactgroup and a configuration contact +// that are already in configuration +// When we remove the contact configuration applier +// Then it is really removed from the configuration applier. +TEST_F(ApplierPbContact, PbRemoveContactFromConfig) { + configuration::applier::contact aply; + configuration::applier::contactgroup aply_grp; + + configuration::Contactgroup grp; + grp.set_contactgroup_name("test_group"); + + configuration::Contact ctct; + configuration::contact_helper c_helper(&ctct); + ctct.set_contact_name("test"); + ctct.add_address("coucou"); + ctct.add_address("foo"); + ctct.add_address("bar"); + fill_string_group(ctct.mutable_contactgroups(), "test_group"); + fill_string_group(ctct.mutable_host_notification_commands(), "cmd1"); + fill_string_group(ctct.mutable_host_notification_commands(), "cmd2"); + fill_string_group(ctct.mutable_service_notification_commands(), "svc1"); + fill_string_group(ctct.mutable_service_notification_commands(), "svc2"); + configuration::CustomVariable* cv = ctct.add_customvariables(); + cv->set_name("superVar"); + cv->set_value("superValue"); + aply_grp.add_object(grp); + aply.add_object(ctct); + aply.expand_objects(pb_config); + engine::contact* my_contact = engine::contact::contacts.begin()->second.get(); + ASSERT_EQ(my_contact->get_addresses().size(), 3u); + int idx; + bool found = false; + for (idx = 0; idx < pb_config.contacts().size(); idx++) { + if (pb_config.contacts()[idx].contact_name() == "test") { + found = true; + break; + } + } + ASSERT_TRUE(found); + aply.remove_object(idx); + ASSERT_TRUE(engine::contact::contacts.empty()); +} + +TEST_F(ApplierPbContact, PbModifyContactFromConfig) { + configuration::applier::contact aply; + configuration::applier::contactgroup aply_grp; + configuration::Contactgroup grp; + configuration::contactgroup_helper grp_hlp(&grp); + grp.set_contactgroup_name("test_group"); + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + ctct.set_contact_name("test"); + fill_string_group(ctct.mutable_contactgroups(), "test_group"); + fill_string_group(ctct.mutable_host_notification_commands(), "cmd1,cmd2"); + fill_string_group(ctct.mutable_service_notification_commands(), "svc1,svc2"); + ASSERT_TRUE(ctct_hlp.insert_customvariable("_superVar", "SuperValue")); + ASSERT_TRUE(ctct.customvariables().size() == 1); + + configuration::applier::command cmd_aply; + configuration::applier::connector cnn_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + cmd.set_connector("perl"); + configuration::Connector cnn; + configuration::connector_helper cnn_hlp(&cnn); + cnn.set_connector_name("perl"); + cnn_aply.add_object(cnn); + cmd_aply.add_object(cmd); + + aply_grp.add_object(grp); + aply.add_object(ctct); + aply.expand_objects(pb_config); + ctct_hlp.hook("host_notification_commands", "cmd"); + ctct_hlp.hook("service_notification_commands", "svc1,svc2"); + ASSERT_TRUE(ctct_hlp.insert_customvariable("_superVar", "Super")); + ASSERT_TRUE(ctct_hlp.insert_customvariable("_superVar1", "Super1")); + ctct.set_alias("newAlias"); + ASSERT_EQ(ctct.customvariables().size(), 2u); + ctct_hlp.hook("service_notification_options", "n"); + aply.modify_object(&*pb_config.mutable_contacts()->begin(), ctct); + contact_map::const_iterator ct_it{engine::contact::contacts.find("test")}; + ASSERT_TRUE(ct_it != engine::contact::contacts.end()); + ASSERT_EQ(ct_it->second->get_custom_variables().size(), 2u); + ASSERT_EQ(ct_it->second->get_custom_variables()["superVar"].value(), + std::string_view("Super")); + ASSERT_EQ(ct_it->second->get_custom_variables()["superVar1"].value(), + std::string_view("Super1")); + ASSERT_EQ(ct_it->second->get_alias(), std::string_view("newAlias")); + ASSERT_FALSE(ct_it->second->notify_on(notifier::service_notification, + notifier::unknown)); + + bool found = false; + for (auto it = (*pb_config.mutable_commands()).begin(); + it != (*pb_config.mutable_commands()).end(); ++it) { + if (it->command_name() == "cmd") { + pb_config.mutable_commands()->erase(it); + found = true; + break; + } + } + ASSERT_TRUE(found) + << "Command 'cmd' not found among the configuration commands"; + + cmd.set_command_name("cmd"); + cmd.set_command_line("bar"); + configuration::applier::command aplyr; + aplyr.add_object(cmd); + ctct_hlp.hook("host_notification_commands", "cmd"); + auto* old_ct = &pb_config.mutable_contacts()->at(0); + ASSERT_TRUE(old_ct->contact_name() == "test"); + aply.modify_object(old_ct, ctct); + { + command_map::iterator found{commands::command::commands.find("cmd")}; + ASSERT_TRUE(found != commands::command::commands.end()); + ASSERT_TRUE(found->second); + ASSERT_TRUE(found->second->get_command_line() == "bar"); + } +} + +// Given contactgroup / contact appliers +// And a configuration contactgroup and a configuration contact +// that are already in configuration +// When we resolve the contact configuration +// Then the contact contactgroups is cleared, nothing more if the +// contact check is OK. Here, since notification commands are empty, +// an exception is thrown. +TEST_F(ApplierPbContact, PbResolveContactFromConfig) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::applier::contactgroup aply_grp; + configuration::Contactgroup grp; + configuration::contactgroup_helper cg_hlp(&grp); + grp.set_contactgroup_name("test_group"); + + configuration::Contact ctct; + configuration::contact_helper ct_hlp(&ctct); + ctct.set_contact_name("test"); + fill_string_group(ctct.mutable_contactgroups(), "test_group"); + fill_string_group(ctct.mutable_host_notification_commands(), "cmd1"); + fill_string_group(ctct.mutable_host_notification_commands(), "cmd2"); + aply_grp.add_object(grp); + aply.add_object(ctct); + aply.expand_objects(pb_config); + ASSERT_THROW(aply.resolve_object(ctct, err), std::exception); +} + +// Given a contact +// And an applier +// When the contact is resolved by the applier +// Then an exception is thrown +// And 2 warnings and 2 errors are returned: +// * error 1 => no service notification command +// * error 2 => no host notification command +// * warning 1 => no service notification period +// * warning 2 => no host notification period +TEST_F(ApplierPbContact, PbResolveContactNoNotification) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + ctct.set_contact_name("test"); + aply.add_object(ctct); + aply.expand_objects(pb_config); + ASSERT_THROW(aply.resolve_object(ctct, err), std::exception); + ASSERT_EQ(err.config_warnings, 2); + ASSERT_EQ(err.config_errors, 2); +} + +// Given a valid contact +// - valid host notification period +// - valid service notification period +// - valid host notification command +// - valid service notification command +// And an applier +// When resolve_object() is called +// Then no exception is thrown +// And no errors are returned +// And links are properly resolved +TEST_F(ApplierPbContact, PbResolveValidContact) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::Contact ctct(valid_pb_contact_config()); + aply.add_object(ctct); + aply.expand_objects(pb_config); + ASSERT_NO_THROW(aply.resolve_object(ctct, err)); + ASSERT_EQ(err.config_warnings, 0); + ASSERT_EQ(err.config_errors, 0); +} + +// Given a valid contact +// And an applier +// When adding a non-existing service notification period to the contact +// Then the resolve method throws +// And returns 1 error +TEST_F(ApplierPbContact, PbResolveNonExistingServiceNotificationTimeperiod) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::Contact ctct(valid_pb_contact_config()); + ctct.set_service_notification_period("non_existing_period"); + aply.add_object(ctct); + aply.expand_objects(pb_config); + ASSERT_THROW(aply.resolve_object(ctct, err), std::exception); + ASSERT_EQ(err.config_warnings, 0); + ASSERT_EQ(err.config_errors, 1); +} + +// Given a valid contact +// And an applier +// When adding a non-existing host notification period to the contact +// Then the resolve method throws +// And returns 1 error +TEST_F(ApplierPbContact, PbResolveNonExistingHostNotificationTimeperiod) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::Contact ctct(valid_pb_contact_config()); + ctct.set_host_notification_period("non_existing_period"); + aply.add_object(ctct); + aply.expand_objects(pb_config); + ASSERT_THROW(aply.resolve_object(ctct, err), std::exception); + ASSERT_EQ(err.config_warnings, 0); + ASSERT_EQ(err.config_errors, 1); +} + +// Given a valid contact +// And an applier +// When adding a non-existing service command to the contact +// Then the resolve method throws +// And returns 1 error +TEST_F(ApplierPbContact, PbResolveNonExistingServiceCommand) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::Contact ctct(valid_pb_contact_config()); + fill_string_group(ctct.mutable_service_notification_commands(), + "non_existing_command"); + aply.add_object(ctct); + aply.expand_objects(pb_config); + ASSERT_THROW(aply.resolve_object(ctct, err), std::exception); + ASSERT_EQ(err.config_warnings, 0); + ASSERT_EQ(err.config_errors, 1); +} + +// Given a valid contact +// And an applier +// When adding a non-existing host command to the contact +// Then the resolve method throws +// And returns 1 error +TEST_F(ApplierPbContact, PbResolveNonExistingHostCommand) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::Contact ctct(valid_pb_contact_config()); + fill_string_group(ctct.mutable_host_notification_commands(), + "non_existing_command"); + aply.add_object(ctct); + aply.expand_objects(pb_config); + ASSERT_THROW(aply.resolve_object(ctct, err), std::exception); + ASSERT_EQ(err.config_warnings, 0); + ASSERT_EQ(err.config_errors, 1); +} + +// Given a valid contact configuration +// And the contact has multiple host notification commands +// When the applier resolve_object() method is called +// Then the contact has the multiple host notification commands +// TEST_F(ApplierPbContact, ResolveContactWithMultipleHostNotificationCommand) { +// // Given +// configuration::contact ctct(valid_contact_config()); +// +// // And +// { +// configuration::applier::command aplyr; +// for (int i(0); i < 3; ++i) { +// std::ostringstream cmdname; +// cmdname << "command" << i + 1; +// configuration::command cmd; +// cmd.parse("command_name", cmdname.str().c_str()); +// cmd.parse("command_line", "true"); +// aplyr.add_object(cmd); +// } +// aplyr.expand_objects(*config); +// } +// ctct.parse("host_notification_commands", +// "command1!ARG1,command2,command3!ARG3"); configuration::applier::contact +// aplyr; aplyr.add_object(ctct); aplyr.expand_objects(*config); +// +// // When +// aplyr.resolve_object(ctct); +// +// // Then +// std::list > const& +// commands(configuration::applier::state::instance().contacts_find( +// ctct.contact_name())->get_host_notification_commands()); +// ASSERT_EQ(commands.size(), 3); +// std::list >::const_iterator +// it(commands.begin()), +// end(commands.end()); +// ASSERT_EQ(it->first->get_name(), "command1"); +// ASSERT_EQ(it->second, "command1!ARG1"); +// ++it; +// ASSERT_EQ(it->first->get_name(), "command2"); +// ASSERT_EQ(it->second, "command2"); +// ++it; +// ASSERT_EQ(it->first->get_name(), "command3"); +// ASSERT_EQ(it->second, "command3!ARG3"); +//} + +// Given a valid contact +// And the contact is notified on host recovery +// But not on down or unreachable host +// When resolve_object() is called +// Then a warning is returned +TEST_F(ApplierPbContact, PbContactWithOnlyHostRecoveryNotification) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::Contact ctct(valid_pb_contact_config()); + uint16_t options; + fill_host_notification_options(&options, "r"); + ctct.set_host_notification_options(options); + fill_service_notification_options(&options, "n"); + ctct.set_service_notification_options(options); + ctct.set_host_notifications_enabled("1"); + ctct.set_service_notifications_enabled("1"); + aply.add_object(ctct); + aply.expand_objects(pb_config); + aply.resolve_object(ctct, err); + ASSERT_EQ(err.config_warnings, 1); + ASSERT_EQ(err.config_errors, 0); +} + +// Given a valid contact +// And the contact is notified on service recovery +// But not on critical, warning or unknown service +// When resolve_object() is called +// Then a warning is returned +TEST_F(ApplierPbContact, PbContactWithOnlyServiceRecoveryNotification) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::Contact ctct(valid_pb_contact_config()); + uint16_t options; + fill_host_notification_options(&options, "n"); + ctct.set_host_notification_options(options); + fill_service_notification_options(&options, "r"); + ctct.set_service_notification_options(options); + ctct.set_host_notifications_enabled(true); + ctct.set_service_notifications_enabled(true); + aply.add_object(ctct); + aply.expand_objects(pb_config); + aply.resolve_object(ctct, err); + ASSERT_EQ(err.config_warnings, 1); + ASSERT_EQ(err.config_errors, 0); +} diff --git a/engine/tests/configuration/applier/applier-pbcontactgroup.cc b/engine/tests/configuration/applier/applier-pbcontactgroup.cc new file mode 100644 index 00000000000..dc76c3073e7 --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbcontactgroup.cc @@ -0,0 +1,298 @@ +/* + * Copyright 2018 - 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/contactgroup.hh" +#include "com/centreon/engine/globals.hh" +#include "common/engine_conf/contact_helper.hh" +#include "common/engine_conf/contactgroup_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +class ApplierPbContactgroup : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// Given a contactgroup applier +// And a configuration contactgroup +// When we modify the contactgroup configuration with a non existing +// contactgroup +// Then an exception is thrown. +TEST_F(ApplierPbContactgroup, ModifyUnexistingContactgroupFromConfig) { + configuration::applier::contactgroup aply; + configuration::Contactgroup cg; + configuration::contactgroup_helper hlp(&cg); + cg.set_contactgroup_name("test"); + fill_string_group(cg.mutable_members(), "contact"); + auto* new_cg = pb_config.add_contactgroups(); + new_cg->CopyFrom(cg); + ASSERT_THROW(aply.modify_object(new_cg, cg), std::exception); +} + +// Given a contactgroup applier +// And a configuration contactgroup in configuration +// When we modify the contactgroup configuration +// Then the applier modify_object updates the contactgroup. +TEST_F(ApplierPbContactgroup, ModifyContactgroupFromConfig) { + configuration::applier::contact caply; + configuration::applier::contactgroup aply; + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + cg.set_contactgroup_name("test"); + configuration::Contact ct; + configuration::contact_helper ct_hlp(&ct); + ct.set_contact_name("contact"); + + caply.add_object(ct); + + fill_string_group(cg.mutable_members(), "contact"); + cg.set_alias("test"); + aply.add_object(cg); + auto it = std::find_if(pb_config.mutable_contactgroups()->begin(), + pb_config.mutable_contactgroups()->end(), + [](const configuration::Contactgroup& cg) { + return cg.contactgroup_name() == "test"; + }); + + ASSERT_TRUE(it->alias() == "test"); + + cg.set_alias("test_renamed"); + aply.modify_object(&*it, cg); + + it = std::find_if(pb_config.mutable_contactgroups()->begin(), + pb_config.mutable_contactgroups()->end(), + [](const configuration::Contactgroup& cg) { + return cg.contactgroup_name() == "test"; + }); + ASSERT_TRUE(it->alias() == "test_renamed"); +} + +// Given a contactgroup applier +// And a configuration contactgroup in configuration +// When we change remove the configuration +// Then it is really removed +TEST_F(ApplierPbContactgroup, RemoveContactgroupFromConfig) { + configuration::applier::contact caply; + configuration::applier::contactgroup aply; + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + cg.set_contactgroup_name("test"); + configuration::Contact ct; + configuration::contact_helper ct_hlp(&ct); + ct.set_contact_name("contact"); + + caply.add_object(ct); + fill_string_group(cg.mutable_members(), "contact"); + aply.add_object(cg); + ASSERT_FALSE(engine::contactgroup::contactgroups.empty()); + + aply.remove_object(0); + ASSERT_TRUE(engine::contactgroup::contactgroups.empty()); +} + +// Given an empty contactgroup +// When the resolve_object() method is called +// Then no warning, nor error are given +TEST_F(ApplierPbContactgroup, ResolveEmptyContactgroup) { + configuration::error_cnt err; + configuration::applier::contactgroup aplyr; + configuration::Contactgroup grp; + configuration::contactgroup_helper hlp(&grp); + grp.set_contactgroup_name("test"); + aplyr.add_object(grp); + aplyr.expand_objects(pb_config); + aplyr.resolve_object(grp, err); + ASSERT_EQ(err.config_warnings, 0); + ASSERT_EQ(err.config_errors, 0); +} + +// Given a contactgroup with a non-existing contact +// When the resolve_object() method is called +// Then an exception is thrown +// And the method returns 1 error +TEST_F(ApplierPbContactgroup, ResolveInexistentContact) { + configuration::applier::contactgroup aplyr; + configuration::Contactgroup grp; + configuration::contactgroup_helper grp_hlp(&grp); + grp.set_contactgroup_name("test"); + fill_string_group(grp.mutable_members(), "non_existing_contact"); + ASSERT_THROW(aplyr.add_object(grp), std::exception); +} + +// Given a contactgroup with a contact +// When the resolve_object() method is called +// Then the contact is really added to the contact group. +TEST_F(ApplierPbContactgroup, ResolveContactgroup) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::applier::contactgroup aply_grp; + configuration::Contactgroup grp; + configuration::contactgroup_helper hlp(&grp); + grp.set_contactgroup_name("test_group"); + configuration::Contact ctct; + configuration::contact_helper c_hlp(&ctct); + ctct.set_contact_name("test"); + aply.add_object(ctct); + fill_string_group(ctct.mutable_contactgroups(), "test_group"); + fill_string_group(grp.mutable_members(), "test"); + aply_grp.add_object(grp); + aply_grp.expand_objects(pb_config); + ASSERT_NO_THROW(aply_grp.resolve_object(grp, err)); +} + +// Given a contactgroup with a contact already configured +// And a second contactgroup configuration +// When we set the first one as contactgroup member to the second +// Then the parse method returns true and set the first one contacts +// to the second one. +TEST_F(ApplierPbContactgroup, SetContactgroupMembers) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::applier::contactgroup aply_grp; + configuration::Contactgroup grp; + configuration::contactgroup_helper grp_hlp(&grp); + grp.set_contactgroup_name("test_group"); + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + ctct.set_contact_name("test"); + aply.add_object(ctct); + fill_string_group(grp.mutable_members(), "test"); + aply_grp.add_object(grp); + aply_grp.expand_objects(pb_config); + aply_grp.resolve_object(grp, err); + ASSERT_EQ(grp.members().data().size(), 1); + + configuration::Contactgroup grp1; + configuration::contactgroup_helper grp1_hlp(&grp1); + grp1.set_contactgroup_name("big_group"); + fill_string_group(grp1.mutable_contactgroup_members(), "test_group"); + aply_grp.add_object(grp1); + aply_grp.expand_objects(pb_config); + + // grp1 must be reload because the expand_objects reload them totally. + bool found = false; + for (auto& cg : pb_config.contactgroups()) { + if (cg.contactgroup_name() == "big_group") { + ASSERT_EQ(cg.members().data().size(), 1); + found = true; + break; + } + } + ASSERT_TRUE(found); +} + +TEST_F(ApplierPbContactgroup, ContactRemove) { + configuration::error_cnt err; + configuration::applier::contact aply; + configuration::applier::contactgroup aply_grp; + configuration::Contactgroup grp; + configuration::contactgroup_helper grp_hlp(&grp); + grp.set_contactgroup_name("test_group"); + + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + ctct.set_contact_name("test"); + aply.add_object(ctct); + + configuration::Contact ctct2; + configuration::contact_helper ctct2_hlp(&ctct2); + ctct2.set_contact_name("test2"); + aply.add_object(ctct2); + + grp_hlp.hook("members", "test, test2"); + aply_grp.add_object(grp); + aply_grp.expand_objects(pb_config); + aply_grp.resolve_object(grp, err); + ASSERT_EQ( + engine::contactgroup::contactgroups["test_group"]->get_members().size(), + 2u); + + int idx2 = 0; + while (pb_config.contacts()[idx2].contact_name() != "test2") { + idx2++; + ASSERT_LE(idx2, pb_config.contacts().size()); + } + + aply.remove_object(idx2); + ASSERT_EQ( + engine::contactgroup::contactgroups["test_group"]->get_members().size(), + 1u); + grp_hlp.hook("members", "test"); + // grp.parse("members", "test"); + int idx = 0; + while (pb_config.contactgroups()[idx].contactgroup_name() != "test_group") { + idx++; + ASSERT_LE(idx, pb_config.contactgroups().size()); + } + aply_grp.modify_object(&pb_config.mutable_contactgroups()->at(idx), grp); +} + +// Given a contactgroup applier +// And a configuration contactgroup in configuration +// When we modify members in the contactgroup configuration +// Then the applier modify_object updates the contactgroup. +TEST_F(ApplierPbContactgroup, ModifyMembersContactgroupFromConfig) { + configuration::applier::contact caply; + configuration::applier::contactgroup aply; + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + cg.set_contactgroup_name("test"); + configuration::Contact ct; + configuration::contact_helper ct_hlp(&ct); + ct.set_contact_name("contact"); + + configuration::Contact ct1; + configuration::contact_helper ct_hlp1(&ct1); + ct1.set_contact_name("contact1"); + caply.add_object(ct); + caply.add_object(ct1); + + fill_string_group(cg.mutable_members(), "contact,contact1"); + cg.set_alias("test"); + aply.add_object(cg); + auto it = std::find_if(pb_config.mutable_contactgroups()->begin(), + pb_config.mutable_contactgroups()->end(), + [](const configuration::Contactgroup& cg) { + return cg.contactgroup_name() == "test"; + }); + + ASSERT_TRUE(it->alias() == "test"); + + fill_string_group(cg.mutable_members(), "contact1"); + aply.modify_object(&*it, cg); + + it = std::find_if(pb_config.mutable_contactgroups()->begin(), + pb_config.mutable_contactgroups()->end(), + [](const configuration::Contactgroup& cg) { + return cg.contactgroup_name() == "test"; + }); + ASSERT_TRUE(it->members().data().size() == 2); + ASSERT_TRUE(it->members().data()[0] == "contact"); + ASSERT_TRUE(it->members().data()[1] == "contact1"); +} diff --git a/engine/tests/configuration/applier/applier-pbglobal.cc b/engine/tests/configuration/applier/applier-pbglobal.cc new file mode 100644 index 00000000000..be25e9b5fa9 --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbglobal.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include +#include "com/centreon/engine/configuration/applier/hostescalation.hh" +#include "common/engine_conf/parser.hh" +#include "common/engine_conf/state_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; + +class ApplierGlobal : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// Given host configuration without host_id +// Then the applier add_object throws an exception. +TEST_F(ApplierGlobal, PbPollerName) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + + ASSERT_EQ(st.poller_name(), "unknown"); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "poller_name=poller-test" << std::endl; + ofs.close(); + configuration::error_cnt err; + + parser.parse("/tmp/test-config.cfg", &st, err); + std::remove("/tmp/test-config.cfg"); + + ASSERT_EQ(st.poller_name(), "poller-test"); +} diff --git a/engine/tests/configuration/applier/applier-pbhost.cc b/engine/tests/configuration/applier/applier-pbhost.cc new file mode 100644 index 00000000000..5a35c5c4c9f --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbhost.cc @@ -0,0 +1,160 @@ +/** + * Copyright 2023 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include "../../timeperiod/utils.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/host.hh" +#include "com/centreon/engine/timezone_manager.hh" +#include "common/engine_conf/command_helper.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/service_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +class ApplierPbHost : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// Given host configuration without host_id +// Then the applier add_object throws an exception. +TEST_F(ApplierPbHost, PbNewHostWithoutHostId) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + ASSERT_THROW(hst_aply.add_object(hst), std::exception); +} + +// Given a host configuration +// When we change the host name in the configuration +// Then the applier modify_object changes the host name without changing +// the host id. +TEST_F(ApplierPbHost, HostRenamed) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_aply.add_object(hst); + host_map const& hm(engine::host::hosts); + ASSERT_EQ(hm.size(), 1u); + std::shared_ptr h1(hm.begin()->second); + ASSERT_TRUE(h1->name() == "test_host"); + + hst.set_host_name("test_host1"); + hst_aply.modify_object(&pb_config.mutable_hosts()->at(0), hst); + ASSERT_EQ(hm.size(), 1u); + h1 = hm.begin()->second; + ASSERT_TRUE(h1->name() == "test_host1"); + ASSERT_EQ(get_host_id(h1->name()), 12u); +} + +TEST_F(ApplierPbHost, PbHostRemoved) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_aply.add_object(hst); + host_map const& hm(engine::host::hosts); + ASSERT_EQ(hm.size(), 1u); + std::shared_ptr h1(hm.begin()->second); + ASSERT_TRUE(h1->name() == "test_host"); + + hst_aply.remove_object(0); + + ASSERT_EQ(hm.size(), 0u); + hst.set_host_name("test_host1"); + hst_aply.add_object(hst); + h1 = hm.begin()->second; + ASSERT_EQ(hm.size(), 1u); + ASSERT_TRUE(h1->name() == "test_host1"); + ASSERT_EQ(get_host_id(h1->name()), 12u); +} + +TEST_F(ApplierPbHost, PbHostParentChildUnreachable) { + configuration::error_cnt err; + configuration::applier::host hst_aply; + configuration::applier::command cmd_aply; + configuration::Host hst_child; + configuration::host_helper hst_child_hlp(&hst_child); + configuration::Host hst_parent; + configuration::host_helper hst_parent_hlp(&hst_parent); + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("base_centreon_ping"); + cmd.set_command_line( + "$USER1$/check_icmp -H $HOSTADDRESS$ -n $_HOSTPACKETNUMBER$ -w " + "$_HOSTWARNING$ -c $_HOSTCRITICAL$"); + cmd_aply.add_object(cmd); + + hst_child.set_host_name("child_host"); + hst_child.set_address("127.0.0.1"); + hst_child_hlp.hook("parents", "parent_host"); + hst_child.set_host_id(1); + hst_child_hlp.hook("_PACKETNUMBER", "42"); + hst_child_hlp.hook("_WARNING", "200,20%"); + hst_child_hlp.hook("_CRITICAL", "400,50%"); + hst_child.set_check_command("base_centreon_ping"); + hst_aply.add_object(hst_child); + + hst_parent.set_host_name("parent_host"); + hst_parent.set_address("127.0.0.1"); + hst_parent.set_host_id(2); + hst_parent_hlp.hook("_PACKETNUMBER", "42"); + hst_parent_hlp.hook("_WARNING", "200,20%"); + hst_parent_hlp.hook("_CRITICAL", "400,50%"); + hst_parent.set_check_command("base_centreon_ping"); + hst_aply.add_object(hst_parent); + + ASSERT_EQ(engine::host::hosts.size(), 2u); + + hst_aply.expand_objects(pb_config); + hst_aply.resolve_object(hst_child, err); + hst_aply.resolve_object(hst_parent, err); + + host_map::iterator child = engine::host::hosts.find("child_host"); + host_map::iterator parent = engine::host::hosts.find("parent_host"); + + ASSERT_EQ(parent->second->child_hosts.size(), 1u); + ASSERT_EQ(child->second->parent_hosts.size(), 1u); + + engine::host::host_state result; + parent->second->run_sync_check_3x(&result, 0, 0, 0); + ASSERT_EQ(parent->second->get_current_state(), engine::host::state_down); + child->second->run_sync_check_3x(&result, 0, 0, 0); + ASSERT_EQ(child->second->get_current_state(), + engine::host::state_unreachable); +} diff --git a/engine/tests/configuration/applier/applier-pbhostdependency.cc b/engine/tests/configuration/applier/applier-pbhostdependency.cc new file mode 100644 index 00000000000..664aac6f274 --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbhostdependency.cc @@ -0,0 +1,155 @@ +/** + * Copyright 2019, 2023 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include + +#include "../../test_engine.hh" +#include "../../timeperiod/utils.hh" +#include "com/centreon/clib.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/config.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/hostdependency.hh" +#include "com/centreon/engine/configuration/applier/state.hh" +#include "com/centreon/engine/configuration/applier/timeperiod.hh" +#include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/serviceescalation.hh" +#include "com/centreon/engine/timezone_manager.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/hostdependency_helper.hh" +#include "common/engine_conf/service_helper.hh" +#include "common/engine_conf/state_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +extern configuration::state* config; + +class HostDependency : public TestEngine { + public: + void SetUp() override { + init_config_state(); + + configuration::applier::contact ct_aply; + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + configuration::error_cnt err; + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); + ct_aply.resolve_object(ctct, err); + + configuration::applier::host hst_aply; + + configuration::Host hst1{new_pb_configuration_host("host1", "admin", 18)}; + hst_aply.add_object(hst1); + hst_aply.resolve_object(hst1, err); + + configuration::Host hst2{new_pb_configuration_host("host2", "admin", 19)}; + hst_aply.add_object(hst2); + hst_aply.resolve_object(hst2, err); + + configuration::Host hst3{new_pb_configuration_host("host3", "admin", 20)}; + hst_aply.add_object(hst3); + hst_aply.resolve_object(hst3, err); + } + + void TearDown() override { deinit_config_state(); } +}; + +TEST_F(HostDependency, PbCircularDependency2) { + configuration::error_cnt err; + configuration::applier::hostdependency hd_aply; + configuration::Hostdependency hd1{ + new_pb_configuration_hostdependency("host1", "host2")}; + hd_aply.expand_objects(pb_config); + hd_aply.add_object(hd1); + hd_aply.resolve_object(hd1, err); + + configuration::Hostdependency hd2{ + new_pb_configuration_hostdependency("host2", "host1")}; + hd_aply.expand_objects(pb_config); + hd_aply.add_object(hd2); + hd_aply.resolve_object(hd2, err); + + ASSERT_EQ(pre_flight_circular_check(&err.config_warnings, &err.config_errors), + ERROR); +} + +TEST_F(HostDependency, PbCircularDependency3) { + configuration::applier::hostdependency hd_aply; + configuration::Hostdependency hd1{ + new_pb_configuration_hostdependency("host1", "host2")}; + hd_aply.expand_objects(pb_config); + hd_aply.add_object(hd1); + configuration::error_cnt err; + hd_aply.resolve_object(hd1, err); + + configuration::Hostdependency hd2{ + new_pb_configuration_hostdependency("host2", "host3")}; + hd_aply.expand_objects(pb_config); + hd_aply.add_object(hd2); + hd_aply.resolve_object(hd2, err); + + configuration::Hostdependency hd3{ + new_pb_configuration_hostdependency("host3", "host1")}; + hd_aply.expand_objects(pb_config); + hd_aply.add_object(hd3); + hd_aply.resolve_object(hd3, err); + + ASSERT_EQ(pre_flight_circular_check(&err.config_warnings, &err.config_errors), + ERROR); +} + +TEST_F(HostDependency, PbRemoveHostdependency) { + configuration::applier::hostdependency hd_aply; + configuration::Hostdependency hd1{ + new_pb_configuration_hostdependency("host1", "host2")}; + hd_aply.expand_objects(pb_config); + hd_aply.add_object(hd1); + configuration::error_cnt err; + hd_aply.resolve_object(hd1, err); + + ASSERT_EQ(engine::hostdependency::hostdependencies.size(), 1); + hd_aply.remove_object(0); + ASSERT_EQ(engine::hostdependency::hostdependencies.size(), 0); +} + +TEST_F(HostDependency, PbExpandHostdependency) { + configuration::State s; + configuration::Hostdependency hd{ + new_pb_configuration_hostdependency("host1,host3,host5", "host2,host6")}; + auto* new_hd = s.add_hostdependencies(); + new_hd->CopyFrom(std::move(hd)); + configuration::applier::hostdependency hd_aply; + hd_aply.expand_objects(s); + ASSERT_EQ(s.hostdependencies().size(), 6); + ASSERT_TRUE(std::all_of(s.hostdependencies().begin(), + s.hostdependencies().end(), [](const auto& hd) { + return hd.hostgroups().data().empty() && + hd.dependent_hostgroups().data().empty(); + })); +} diff --git a/engine/tests/configuration/applier/applier-pbhostescalation.cc b/engine/tests/configuration/applier/applier-pbhostescalation.cc new file mode 100644 index 00000000000..36ec197ed10 --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbhostescalation.cc @@ -0,0 +1,118 @@ +/* + * Copyright 2023 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/hostescalation.hh" +#include "com/centreon/engine/host.hh" +#include "com/centreon/engine/hostescalation.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/hostescalation_helper.hh" +#include "common/engine_conf/state_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; + +class PbApplierHostEscalation : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +TEST_F(PbApplierHostEscalation, PbAddEscalation) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_aply.add_object(hst); + ASSERT_EQ(host::hosts.size(), 1u); + + configuration::applier::hostescalation he_apply; + configuration::Hostescalation he; + configuration::hostescalation_helper he_hlp(&he); + he_hlp.hook("host_name", "test_host"); + he.set_first_notification(4); + he_apply.add_object(he); + ASSERT_EQ(hostescalation::hostescalations.size(), 1u); + he.set_first_notification(8); + he_apply.add_object(he); + ASSERT_EQ(hostescalation::hostescalations.size(), 2u); +} + +TEST_F(PbApplierHostEscalation, PbRemoveEscalation) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_aply.add_object(hst); + ASSERT_EQ(host::hosts.size(), 1u); + + configuration::applier::hostescalation he_apply; + configuration::Hostescalation he; + configuration::hostescalation_helper he_hlp(&he); + he_hlp.hook("host_name", "test_host"); + he.set_first_notification(4); + he_apply.add_object(he); + ASSERT_EQ(hostescalation::hostescalations.size(), 1u); + he.set_first_notification(8); + he_apply.add_object(he); + ASSERT_EQ(hostescalation::hostescalations.size(), 2u); + + he_apply.remove_object(1); + ASSERT_EQ(hostescalation::hostescalations.size(), 1u); + he_apply.remove_object(0); + ASSERT_EQ(hostescalation::hostescalations.size(), 0u); +} + +TEST_F(PbApplierHostEscalation, RemoveEscalationFromRemovedHost) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_aply.add_object(hst); + ASSERT_EQ(host::hosts.size(), 1u); + + configuration::applier::hostescalation he_apply; + configuration::Hostescalation he; + configuration::hostescalation_helper he_hlp(&he); + he_hlp.hook("host_name", "test_host"); + he.set_first_notification(4); + he_apply.add_object(he); + ASSERT_EQ(hostescalation::hostescalations.size(), 1u); + he.set_first_notification(8); + he_apply.add_object(he); + ASSERT_EQ(hostescalation::hostescalations.size(), 2u); + + hst_aply.remove_object(0); + ASSERT_EQ(host::hosts.size(), 0u); + + he_apply.remove_object(0); + ASSERT_EQ(hostescalation::hostescalations.size(), 1u); + he_apply.remove_object(0); + ASSERT_EQ(hostescalation::hostescalations.size(), 0u); +} diff --git a/engine/tests/configuration/applier/applier-pbhostgroup.cc b/engine/tests/configuration/applier/applier-pbhostgroup.cc new file mode 100644 index 00000000000..ca84fd35c4d --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbhostgroup.cc @@ -0,0 +1,190 @@ +/* + * Copyright 2017 - 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/hostgroup.hh" +#include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/macros/grab_host.hh" +#include "com/centreon/engine/timezone_manager.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/hostgroup_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +class ApplierHostGroup : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// Given host configuration without host_id +// Then the applier add_object throws an exception. +TEST_F(ApplierHostGroup, PbNewHostGroup) { + configuration::error_cnt err; + configuration::applier::hostgroup hg_aply; + configuration::applier::host hst_aply; + configuration::Hostgroup hg; + configuration::hostgroup_helper hg_hlp(&hg); + configuration::Host hst_a; + configuration::host_helper hst_a_hlp(&hst_a); + configuration::Host hst_b; + configuration::host_helper hst_b_hlp(&hst_b); + configuration::Host hst_c; + configuration::host_helper hst_c_hlp(&hst_c); + + hst_a.set_host_name("a"); + hst_a.set_host_id(1); + hst_a.set_address("127.0.0.1"); + + hst_b.set_host_name("b"); + hst_b.set_host_id(2); + hst_b.set_address("127.0.0.1"); + + hst_c.set_host_name("c"); + hst_c.set_host_id(3); + hst_c.set_address("127.0.0.1"); + hst_aply.add_object(hst_a); + hst_aply.add_object(hst_b); + hst_aply.add_object(hst_c); + + hg.set_hostgroup_name("temphg"); + hg_hlp.hook("members", "a,b,c"); + ASSERT_NO_THROW(hg_aply.add_object(hg)); + + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hg_aply.expand_objects(pb_config)); + + ASSERT_NO_THROW(hst_aply.resolve_object(hst_a, err)); + ASSERT_NO_THROW(hst_aply.resolve_object(hst_b, err)); + ASSERT_NO_THROW(hst_aply.resolve_object(hst_c, err)); + ASSERT_NO_THROW(hg_aply.resolve_object(hg, err)); + + ASSERT_EQ(engine::hostgroup::hostgroups.size(), 1u); + ASSERT_EQ(engine::hostgroup::hostgroups.begin()->second->members.size(), 3u); +} + +// Given a host configuration +// When we change the host name in the configuration +// Then the applier modify_object changes the host name without changing +// the host id. +TEST_F(ApplierHostGroup, PbHostRenamed) { + configuration::error_cnt err; + configuration::applier::hostgroup hg_aply; + configuration::applier::host hst_aply; + configuration::Hostgroup hg; + configuration::hostgroup_helper hg_hlp(&hg); + configuration::Host hst_a; + configuration::host_helper hst_a_hlp(&hst_a); + configuration::Host hst_c; + configuration::host_helper hst_c_hlp(&hst_c); + + hst_a.set_host_name("a"); + hst_a.set_host_id(1); + hst_a.set_address("127.0.0.1"); + + hst_c.set_host_name("c"); + hst_c.set_host_id(2); + hst_c.set_address("127.0.0.1"); + + hst_aply.add_object(hst_a); + hst_aply.add_object(hst_c); + + hg.set_hostgroup_name("temphg"); + hg_hlp.hook("members", "a,c"); + ASSERT_NO_THROW(hg_aply.add_object(hg)); + + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hg_aply.expand_objects(pb_config)); + + ASSERT_NO_THROW(hst_aply.resolve_object(hst_a, err)); + ASSERT_NO_THROW(hst_aply.resolve_object(hst_c, err)); + ASSERT_NO_THROW(hg_aply.resolve_object(hg, err)); + + hg.mutable_members()->clear_data(); + hg_hlp.hook("members", "c"); + hg_aply.modify_object(&pb_config.mutable_hostgroups()->at(0), hg); + + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hg_aply.expand_objects(pb_config)); + + ASSERT_EQ(engine::hostgroup::hostgroups.size(), 1u); + ASSERT_EQ(engine::hostgroup::hostgroups.begin()->second->members.size(), 1u); + ASSERT_EQ(engine::hostgroup::hostgroups.begin()->second->get_group_name(), + "temphg"); +} + +TEST_F(ApplierHostGroup, PbHostRemoved) { + configuration::error_cnt err; + configuration::applier::hostgroup hg_aply; + configuration::applier::host hst_aply; + configuration::Hostgroup hg; + configuration::hostgroup_helper hg_hlp(&hg); + configuration::Host hst_a; + configuration::host_helper hst_a_hlp(&hst_a); + configuration::Host hst_c; + configuration::host_helper hst_c_hlp(&hst_c); + + hst_a.set_host_name("a"); + hst_a.set_host_id(1); + hst_a.set_address("127.0.0.1"); + + hst_c.set_host_name("c"); + hst_c.set_host_id(2); + hst_c.set_address("127.0.0.1"); + + hst_aply.add_object(hst_a); + hst_aply.add_object(hst_c); + + hg.set_hostgroup_name("temphg"); + hg_hlp.hook("members", "a,c"); + ASSERT_NO_THROW(hg_aply.add_object(hg)); + + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hg_aply.expand_objects(pb_config)); + + ASSERT_NO_THROW(hst_aply.resolve_object(hst_a, err)); + ASSERT_NO_THROW(hst_aply.resolve_object(hst_c, err)); + ASSERT_NO_THROW(hg_aply.resolve_object(hg, err)); + + engine::hostgroup* hg_obj{engine::hostgroup::hostgroups["temphg"].get()}; + ASSERT_EQ(hg_obj->members.size(), 2u); + ASSERT_NO_THROW(hst_aply.remove_object(0)); + ASSERT_EQ(hg_obj->members.size(), 1u); + + hg.mutable_members()->clear_data(); + hg_hlp.hook("members", "c"); + ASSERT_NO_THROW( + hg_aply.modify_object(&pb_config.mutable_hostgroups()->at(0), hg)); + + hg_aply.remove_object(0); + ASSERT_TRUE(pb_config.hostgroups().empty()); + ASSERT_TRUE(engine::hostgroup::hostgroups.empty()); +} diff --git a/engine/tests/configuration/applier/applier-pblog.cc b/engine/tests/configuration/applier/applier-pblog.cc new file mode 100644 index 00000000000..ff384aa2825 --- /dev/null +++ b/engine/tests/configuration/applier/applier-pblog.cc @@ -0,0 +1,578 @@ +/** + * Copyright 2022 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include +#include "com/centreon/engine/configuration/applier/hostescalation.hh" +#include "common/engine_conf/parser.hh" +#include "common/engine_conf/state.pb.h" +#include "common/engine_conf/state_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; + +class ApplierLog : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// Given host configuration without host_id +// Then the applier add_object throws an exception. +TEST_F(ApplierLog, logV2Enabled) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_v2_enabled(), true); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_v2_enabled=0" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + std::remove("/tmp/test-config.cfg"); + + ASSERT_EQ(st.log_v2_enabled(), false); +} + +TEST_F(ApplierLog, logLegacyEnabled) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_legacy_enabled(), true); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_legacy_enabled=0" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + std::remove("/tmp/test-config.cfg"); + + ASSERT_EQ(st.log_legacy_enabled(), false); +} + +TEST_F(ApplierLog, logV2Logger) { + configuration::error_cnt err; + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + + ASSERT_EQ(st.log_v2_logger(), "file"); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_v2_logger=syslog" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + std::remove("/tmp/test-config.cfg"); + + ASSERT_EQ(st.log_v2_logger(), "syslog"); +} + +TEST_F(ApplierLog, logLevelFunctions) { + configuration::error_cnt err; + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + + ASSERT_EQ(st.log_level_functions(), configuration::LogLevel::error); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_functions=trace" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_functions(), configuration::LogLevel::trace); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_functions=tracerrrr" << std::endl; + ofs.close(); + + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); +} + +TEST_F(ApplierLog, logLevelConfig) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_level_config(), configuration::LogLevel::info); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_config=debug" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_config(), configuration::LogLevel::debug); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_config=tracerrrr" << std::endl; + ofs.close(); + + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_config")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_config(), "debug"); +} + +TEST_F(ApplierLog, logLevelEvents) { + configuration::error_cnt err; + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + + ASSERT_EQ(st.log_level_events(), configuration::LogLevel::info); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_events=warning" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_events(), configuration::LogLevel::warning); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_events=tracerrrr" << std::endl; + ofs.close(); + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_events")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_events(), "warning"); +} + +TEST_F(ApplierLog, logLevelChecks) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_level_checks(), configuration::LogLevel::info); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_checks=error" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_checks(), configuration::LogLevel::error); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_checks=tracerrrr" << std::endl; + ofs.close(); + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_checks")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_checks(), "error"); +} + +TEST_F(ApplierLog, logLevelNotifications) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_level_notifications(), configuration::LogLevel::error); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_notifications=off" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_notifications(), configuration::LogLevel::off); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_notifications=tracerrrr" << std::endl; + ofs.close(); + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_notifications")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_notifications(), "off"); +} + +TEST_F(ApplierLog, logLevelEventBroker) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_level_eventbroker(), configuration::LogLevel::error); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_eventbroker=critical" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_eventbroker(), configuration::LogLevel::critical); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_eventbroker=tracerrrr" << std::endl; + ofs.close(); + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_eventbroker")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_eventbroker(), "critical"); +} + +TEST_F(ApplierLog, logLevelExternalCommand) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_level_external_command(), configuration::LogLevel::error); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_external_command=trace" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_external_command(), configuration::LogLevel::trace); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_external_command=tracerrrr" << std::endl; + ofs.close(); + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_external_command")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_external_command(), "trace"); +} + +TEST_F(ApplierLog, logLevelCommands) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_level_commands(), configuration::LogLevel::error); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_commands=debug" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_commands(), configuration::LogLevel::debug); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_commands=tracerrrr" << std::endl; + ofs.close(); + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_commands")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_commands(), "debug"); +} + +TEST_F(ApplierLog, logLevelDowntimes) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_level_downtimes(), configuration::LogLevel::error); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_downtimes=warning" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_downtimes(), configuration::LogLevel::warning); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_downtimes=tracerrrr" << std::endl; + ofs.close(); + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_downtimes")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_downtimes(), "warning"); +} + +TEST_F(ApplierLog, logLevelComments) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_level_comments(), configuration::LogLevel::error); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_comments=error" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_comments(), configuration::LogLevel::error); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_comments=tracerrrr" << std::endl; + ofs.close(); + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_comments")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_comments(), "error"); +} + +TEST_F(ApplierLog, logLevelMacros) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_level_macros(), configuration::LogLevel::error); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_macros=critical" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_macros(), configuration::LogLevel::critical); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_macros=tracerrrr" << std::endl; + ofs.close(); + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_macros")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_macros(), "critical"); +} + +TEST_F(ApplierLog, logLevelProcess) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_level_process(), configuration::LogLevel::info); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_process=off" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_process(), configuration::LogLevel::off); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_process=tracerrrr" << std::endl; + ofs.close(); + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_process")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_process(), "off"); +} + +TEST_F(ApplierLog, logLevelRuntime) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_level_runtime(), configuration::LogLevel::error); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_level_runtime=off" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + + ASSERT_EQ(st.log_level_runtime(), configuration::LogLevel::off); + + ofs.open("/tmp/test-config.cfg"); + ofs << "log_level_runtime=tracerrrr" << std::endl; + ofs.close(); + ASSERT_THROW(parser.parse("/tmp/test-config.cfg", &st, err), std::exception); + std::remove("/tmp/test-config.cfg"); + // testing::internal::CaptureStdout(); + // parser.parse("/tmp/test-config.cfg", st); + // std::remove("/tmp/test-config.cfg"); + + // std::string out{testing::internal::GetCapturedStdout()}; + // std::cout << out << std::endl; + // size_t step1{ + // out.find("[config] [error] error wrong level setted for " + // "log_level_runtime")}; + // ASSERT_NE(step1, std::string::npos); + // ASSERT_EQ(st.log_level_runtime(), "off"); +} + +TEST_F(ApplierLog, logFile) { + configuration::parser parser; + configuration::State st; + configuration::state_helper st_hlp(&st); + configuration::error_cnt err; + + ASSERT_EQ(st.log_file(), DEFAULT_LOG_FILE); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_file=/tmp/centengine.log" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", &st, err); + std::remove("/tmp/test-config.cfg"); + + ASSERT_EQ(st.log_file(), "/tmp/centengine.log"); +} diff --git a/engine/tests/configuration/applier/applier-pbservice.cc b/engine/tests/configuration/applier/applier-pbservice.cc new file mode 100644 index 00000000000..62045656d29 --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbservice.cc @@ -0,0 +1,838 @@ +/* + * Copyright 2019, 2023 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include "../../test_engine.hh" +#include "../../timeperiod/utils.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/configuration/applier/tag.hh" +#include "com/centreon/engine/contact.hh" +#include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/host.hh" +#include "com/centreon/engine/service.hh" +#include "common/engine_conf/command_helper.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/tag_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +class ApplierService : public TestEngine { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// Given service configuration with an host not defined +// Then the applier add_object throws an exception because it needs a service +// command. +TEST_F(ApplierService, PbNewServiceWithHostNotDefinedFromConfig) { + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc_hlp.hook("_TEST", "Value1"); + ASSERT_THROW(svc_aply.add_object(svc), std::exception); +} + +// Given host configuration without host_id +// Then the applier add_object throws an exception. +TEST_F(ApplierService, PbNewHostWithoutHostId) { + configuration::applier::host hst_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + ASSERT_THROW(hst_aply.add_object(hst), std::exception); +} + +// Given service configuration with a host defined +// Then the applier add_object creates the service +TEST_F(ApplierService, PbNewServiceFromConfig) { + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + // The host id is not given + ASSERT_THROW(hst_aply.add_object(hst), std::exception); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(3); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + svc.set_check_command("cmd"); + cmd_aply.add_object(cmd); + + // No need here to call svc_aply.expand_objects(*config) because the + // configuration service is not stored in configuration::state. We just have + // to set the host_id manually. + svc.set_host_id(1); + svc_aply.add_object(svc); + service_id_map const& sm(engine::service::services_by_id); + ASSERT_EQ(sm.size(), 1u); + ASSERT_EQ(sm.begin()->first.first, 1u); + ASSERT_EQ(sm.begin()->first.second, 3u); + + // Service is not resolved, host is null now. + ASSERT_TRUE(!sm.begin()->second->get_host_ptr()); + ASSERT_TRUE(sm.begin()->second->description() == "test_description"); +} + +// Given service configuration with a host defined +// Then the applier add_object creates the service +TEST_F(ApplierService, PbRenameServiceFromConfig) { + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + // The host id is not given + ASSERT_THROW(hst_aply.add_object(hst), std::exception); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(3); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + svc.set_check_command("cmd"); + cmd_aply.add_object(cmd); + + // We fake here the expand_object on configuration::service + svc.set_host_id(1); + + svc_aply.add_object(svc); + + svc.set_service_description("test_description2"); + svc_aply.modify_object(pb_config.mutable_services(0), svc); + svc_aply.expand_objects(pb_config); + + service_id_map const& sm(engine::service::services_by_id); + ASSERT_EQ(sm.size(), 1u); + ASSERT_EQ(sm.begin()->first.first, 1u); + ASSERT_EQ(sm.begin()->first.second, 3u); + + // Service is not resolved, host is null now. + ASSERT_TRUE(!sm.begin()->second->get_host_ptr()); + ASSERT_TRUE(sm.begin()->second->description() == "test_description2"); + + std::string s{engine::service::services[{"test_host", "test_description2"}] + ->description()}; + ASSERT_TRUE(s == "test_description2"); +} + +// Given service configuration with a host defined +// Then the applier add_object creates the service +TEST_F(ApplierService, PbRemoveServiceFromConfig) { + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + // The host id is not given + ASSERT_THROW(hst_aply.add_object(hst), std::exception); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(3); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + svc.set_check_command("cmd"); + cmd_aply.add_object(cmd); + + // We fake here the expand_object on configuration::service + svc.set_host_id(1); + + svc_aply.add_object(svc); + + ASSERT_EQ(engine::service::services_by_id.size(), 1u); + svc_aply.remove_object(0); + ASSERT_EQ(engine::service::services_by_id.size(), 0u); + + svc.set_service_description("test_description2"); + + // We have to fake the expand_object on configuration::service + svc.set_host_id(1); + + svc_aply.add_object(svc); + + service_id_map const& sm(engine::service::services_by_id); + ASSERT_EQ(sm.size(), 1u); + ASSERT_EQ(sm.begin()->first.first, 1u); + ASSERT_EQ(sm.begin()->first.second, 3u); + + // Service is not resolved, host is null now. + ASSERT_TRUE(!sm.begin()->second->get_host_ptr()); + ASSERT_TRUE(sm.begin()->second->description() == "test_description2"); + + std::string s{engine::service::services[{"test_host", "test_description2"}] + ->description()}; + ASSERT_TRUE(s == "test_description2"); +} + +// Given a service configuration applied to a service, +// When the check_validity() method is executed on the configuration, +// Then it throws an exception because: +// 1. it does not provide a service description +// 2. it is not attached to a host +// 3. the service does not contain any check command. +TEST_F(ApplierService, PbServicesCheckValidity) { + configuration::error_cnt err; + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::Service csvc; + configuration::service_helper csvc_hlp(&csvc); + + // No service description + ASSERT_THROW(csvc_hlp.check_validity(err), std::exception); + + csvc.set_service_description("check_description"); + csvc.set_service_id(53); + + // No host attached to + ASSERT_THROW(csvc_hlp.check_validity(err), std::exception); + + csvc.set_host_name("test_host"); + + // No check command attached to + ASSERT_THROW(csvc_hlp.check_validity(err), std::exception); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + csvc.set_check_command("cmd"); + cmd_aply.add_object(cmd); + + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("10.11.12.13"); + hst.set_host_id(124); + hst_aply.add_object(hst); + + // We fake here the expand_object on configuration::service + csvc.set_host_id(124); + + svc_aply.add_object(csvc); + csvc.set_service_description("foo"); + + // No check command + ASSERT_NO_THROW(csvc_hlp.check_validity(err)); + svc_aply.resolve_object(csvc, err); + + service_map const& sm(engine::service::services); + ASSERT_EQ(sm.size(), 1u); + + host_map const& hm(engine::host::hosts); + ASSERT_EQ(sm.begin()->second->get_host_ptr(), hm.begin()->second.get()); +} + +// Given a service configuration, +// When the flap_detection_options is set to none, +// Then it is well recorded with only none. +TEST_F(ApplierService, PbServicesFlapOptionsNone) { + configuration::Service csvc; + configuration::service_helper csvc_hlp(&csvc); + + csvc.set_service_description("test_description"); + csvc.set_host_name("test_host"); + + csvc_hlp.hook("flap_detection_options", "n"); + ASSERT_EQ(csvc.flap_detection_options(), action_svc_none); +} + +// Given a service configuration, +// When the flap_detection_options is set to all, +// Then it is well recorded with all. +TEST_F(ApplierService, PbServicesFlapOptionsAll) { + configuration::Service csvc; + configuration::service_helper csvc_hlp(&csvc); + csvc_hlp.hook("flap_detection_options", "a"); + ASSERT_EQ(csvc.flap_detection_options(), action_svc_ok | action_svc_warning | + action_svc_critical | + action_svc_unknown); +} + +// Given a service configuration, +// When the initial_state value is set to unknown, +// Then it is well recorded with unknown. +// When the initial_state value is set to whatever +// Then the parse method returns false. +TEST_F(ApplierService, PbServicesInitialState) { + configuration::Service csvc; + configuration::service_helper csvc_hlp(&csvc); + csvc_hlp.hook("initial_state", "u"); + ASSERT_EQ(csvc.initial_state(), engine::service::state_unknown); + ASSERT_FALSE(csvc_hlp.hook("initial_state", "g")); +} + +// Given a service configuration, +// When the stalking options are set to "c,w", +// Then they are well recorded with "critical | warning" +// When the initial_state value is set to "a" +// Then they are well recorded with "ok | warning | unknown | critical" +TEST_F(ApplierService, PbServicesStalkingOptions) { + configuration::Service csvc; + configuration::service_helper csvc_hlp(&csvc); + ASSERT_TRUE(csvc_hlp.hook("stalking_options", "c,w")); + ASSERT_EQ(csvc.stalking_options(), action_svc_critical | action_svc_warning); + + ASSERT_TRUE(csvc_hlp.hook("stalking_options", "a")); + ASSERT_EQ(csvc.stalking_options(), action_svc_ok | action_svc_warning | + action_svc_unknown | + action_svc_critical); +} + +// Given a viable contact +// When it is added to a contactgroup +// And when this contactgroup is added to a service +// Then after the service resolution, we can see the contactgroup stored in the +// service with the contact inside it. +TEST_F(ApplierService, PbContactgroupResolution) { + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + configuration::applier::contact ct_aply; + ct_aply.add_object(ctct); + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + cg.set_contactgroup_name("contactgroup_test"); + fill_string_group(cg.mutable_members(), "admin"); + configuration::applier::contactgroup cg_aply; + cg_aply.add_object(cg); + configuration::error_cnt err; + cg_aply.resolve_object(cg, err); + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + hst_aply.add_object(hst); + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(3); + fill_string_group(svc.mutable_contactgroups(), "contactgroup_test"); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + svc.set_check_command("cmd"); + cmd_aply.add_object(cmd); + + // We fake here the expand_object on configuration::service + svc.set_host_id(1); + + svc_aply.add_object(svc); + svc_aply.resolve_object(svc, err); + service_id_map const& sm(engine::service::services_by_id); + ASSERT_EQ(sm.size(), 1u); + ASSERT_EQ(sm.begin()->first.first, 1u); + ASSERT_EQ(sm.begin()->first.second, 3u); + + contactgroup_map_unsafe cgs{sm.begin()->second->get_contactgroups()}; + ASSERT_EQ(cgs.size(), 1u); + ASSERT_EQ(cgs.begin()->first, "contactgroup_test"); + contact_map_unsafe::iterator itt{ + cgs.begin()->second->get_members().find("admin")}; + + ASSERT_NE(itt, cgs.begin()->second->get_members().end()); + + contact_map::const_iterator it{engine::contact::contacts.find("admin")}; + ASSERT_NE(it, engine::contact::contacts.end()); + + ASSERT_EQ(itt->second, it->second.get()); +} + +TEST_F(ApplierService, PbStalkingOptionsWhenServiceIsModified) { + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::Host hst; + configuration::service_helper svc_hlp(&svc); + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + // The host id is not given + ASSERT_THROW(hst_aply.add_object(hst), std::exception); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(3); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + svc.set_check_command("cmd"); + cmd_aply.add_object(cmd); + svc_hlp.hook("stalking_options", ""); + svc_hlp.hook("notification_options", "a"); + + // We fake here the expand_object on configuration::service + svc.set_host_id(1); + + svc_aply.add_object(svc); + + service_id_map const& sm(engine::service::services_by_id); + std::shared_ptr serv = sm.begin()->second; + + ASSERT_FALSE(serv->get_stalk_on(engine::service::ok)); + ASSERT_FALSE(serv->get_stalk_on(engine::service::warning)); + ASSERT_FALSE(serv->get_stalk_on(engine::service::critical)); + ASSERT_FALSE(serv->get_stalk_on(engine::service::unknown)); + + ASSERT_TRUE(serv->get_notify_on(engine::service::ok)); + ASSERT_TRUE(serv->get_notify_on(engine::service::warning)); + ASSERT_TRUE(serv->get_notify_on(engine::service::critical)); + ASSERT_TRUE(serv->get_notify_on(engine::service::unknown)); + + svc.set_service_description("test_description2"); + svc_aply.modify_object(pb_config.mutable_services(0), svc); + svc_aply.expand_objects(pb_config); + + ASSERT_EQ(sm.size(), 1u); + ASSERT_EQ(sm.begin()->first.first, 1u); + ASSERT_EQ(sm.begin()->first.second, 3u); + + // Service is not resolved, host is null now. + serv = sm.begin()->second; + + ASSERT_TRUE(!serv->get_host_ptr()); + ASSERT_TRUE(serv->description() == "test_description2"); + + std::string s{engine::service::services[{"test_host", "test_description2"}] + ->description()}; + ASSERT_TRUE(s == "test_description2"); + + ASSERT_FALSE(serv->get_stalk_on(engine::service::ok)); + ASSERT_FALSE(serv->get_stalk_on(engine::service::warning)); + ASSERT_FALSE(serv->get_stalk_on(engine::service::critical)); + ASSERT_FALSE(serv->get_stalk_on(engine::service::unknown)); + + ASSERT_TRUE(serv->get_notify_on(engine::service::ok)); + ASSERT_TRUE(serv->get_notify_on(engine::service::warning)); + ASSERT_TRUE(serv->get_notify_on(engine::service::critical)); + ASSERT_TRUE(serv->get_notify_on(engine::service::unknown)); +} + +// Given service configuration with a host defined +// Then the applier add_object creates the service +TEST_F(ApplierService, PbNewServiceFromConfigTags) { + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::Host hst; + configuration::service_helper svc_hlp(&svc); + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + // The host id is not given + ASSERT_THROW(hst_aply.add_object(hst), std::exception); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(3); + svc_hlp.hook("group_tags", "1,2"); + svc_hlp.hook("category_tags", "3"); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + svc.set_check_command("cmd"); + cmd_aply.add_object(cmd); + + configuration::Tag tag; + configuration::tag_helper tag_hlp(&tag); + configuration::applier::tag tag_aply; + tag.mutable_key()->set_id(1); + tag.mutable_key()->set_type(tag_servicegroup); + tag.set_tag_name("foobar1"); + tag_aply.add_object(tag); + + tag.mutable_key()->set_id(2); + tag.mutable_key()->set_type(tag_servicegroup); + tag.set_tag_name("foobar2"); + tag_aply.add_object(tag); + + tag.mutable_key()->set_id(3); + tag.mutable_key()->set_type(tag_servicecategory); + tag.set_tag_name("foobar3"); + tag_aply.add_object(tag); + + // No need here to call svc_aply.expand_objects(*config) because the + // configuration service is not stored in configuration::state. We just have + // to set the host_id manually. + svc.set_host_id(1); + svc_aply.add_object(svc); + service_id_map const& sm(engine::service::services_by_id); + ASSERT_EQ(sm.size(), 1u); + ASSERT_EQ(sm.begin()->first.first, 1u); + ASSERT_EQ(sm.begin()->first.second, 3u); + + // Service is not resolved, host is null now. + ASSERT_TRUE(!sm.begin()->second->get_host_ptr()); + ASSERT_TRUE(sm.begin()->second->description() == "test_description"); +} + +// Given service configuration with a host defined +// Then the applier add_object creates the service +TEST_F(ApplierService, PbRenameServiceFromConfigTags) { + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + // The host id is not given + ASSERT_THROW(hst_aply.add_object(hst), std::exception); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(3); + svc_hlp.hook("group_tags", "1,2"); + svc_hlp.hook("category_tags", "3"); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + svc.set_check_command("cmd"); + cmd_aply.add_object(cmd); + + configuration::Tag tag; + configuration::tag_helper tag_hlp(&tag); + configuration::applier::tag tag_aply; + tag.mutable_key()->set_id(1); + tag.mutable_key()->set_type(tag_servicegroup); + tag.set_tag_name("foobar1"); + tag_aply.add_object(tag); + + tag.mutable_key()->set_id(2); + tag.mutable_key()->set_type(tag_servicegroup); + tag.set_tag_name("foobar2"); + tag_aply.add_object(tag); + + tag.mutable_key()->set_id(3); + tag.mutable_key()->set_type(tag_servicecategory); + tag.set_tag_name("foobar3"); + tag_aply.add_object(tag); + // We fake here the expand_object on configuration::service + svc.set_host_id(1); + + svc_aply.add_object(svc); + + svc.set_service_description("test_description2"); + svc_aply.modify_object(pb_config.mutable_services(0), svc); + svc_aply.expand_objects(pb_config); + + const service_id_map& sm = engine::service::services_by_id; + ASSERT_EQ(sm.size(), 1u); + ASSERT_EQ(sm.begin()->first.first, 1u); + ASSERT_EQ(sm.begin()->first.second, 3u); + + // Service is not resolved, host is null now. + ASSERT_TRUE(!sm.begin()->second->get_host_ptr()); + ASSERT_TRUE(sm.begin()->second->description() == "test_description2"); + + std::string s{engine::service::services[{"test_host", "test_description2"}] + ->description()}; + ASSERT_TRUE(s == "test_description2"); +} + +// Given service configuration with a host defined +// Then the applier add_object creates the service +TEST_F(ApplierService, PbRemoveServiceFromConfigTags) { + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + // The host id is not given + ASSERT_THROW(hst_aply.add_object(hst), std::exception); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(3); + svc_hlp.hook("group_tags", "1,2"); + svc_hlp.hook("category_tags", "3"); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + svc.set_check_command("cmd"); + cmd_aply.add_object(cmd); + + configuration::Tag tag; + configuration::tag_helper tag_hlp(&tag); + configuration::applier::tag tag_aply; + tag.mutable_key()->set_id(1); + tag.mutable_key()->set_type(tag_servicegroup); + tag.set_tag_name("foobar1"); + tag_aply.add_object(tag); + + tag.mutable_key()->set_id(2); + tag.mutable_key()->set_type(tag_servicegroup); + tag.set_tag_name("foobar2"); + tag_aply.add_object(tag); + + tag.mutable_key()->set_id(3); + tag.mutable_key()->set_type(tag_servicecategory); + tag.set_tag_name("foobar3"); + tag_aply.add_object(tag); + // We fake here the expand_object on configuration::service + svc.set_host_id(1); + + svc_aply.add_object(svc); + + ASSERT_EQ(engine::service::services_by_id.size(), 1u); + svc_aply.remove_object(0); + ASSERT_EQ(engine::service::services_by_id.size(), 0u); + + svc.set_service_description("test_description2"); + + // We have to fake the expand_object on configuration::service + svc.set_host_id(1); + + svc_aply.add_object(svc); + + service_id_map const& sm(engine::service::services_by_id); + ASSERT_EQ(sm.size(), 1u); + ASSERT_EQ(sm.begin()->first.first, 1u); + ASSERT_EQ(sm.begin()->first.second, 3u); + + // Service is not resolved, host is null now. + ASSERT_TRUE(!sm.begin()->second->get_host_ptr()); + ASSERT_TRUE(sm.begin()->second->description() == "test_description2"); + + std::string s{engine::service::services[{"test_host", "test_description2"}] + ->description()}; + ASSERT_TRUE(s == "test_description2"); +} + +// Given a service configuration, +// When we duplicate it, we get a configuration equal to the previous one. +// When two services are generated from the same configuration +// Then they are equal. +// When Modifying a configuration changes, +// Then the '!=' effect on configurations. +TEST_F(ApplierService, PbServicesEqualityTags) { + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::Service csvc; + configuration::service_helper csvc_hlp(&csvc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + hst_aply.add_object(hst); + csvc.set_host_name("test_host"); + csvc.set_service_description("test_description1"); + csvc.set_service_id(12345); + csvc.set_acknowledgement_timeout(21); + csvc_hlp.hook("group_tags", "1,2"); + csvc_hlp.hook("category_tags", "3"); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + csvc.set_check_command("cmd"); + cmd_aply.add_object(cmd); + + configuration::Tag tag; + configuration::tag_helper tag_hlp(&tag); + configuration::applier::tag tag_aply; + tag.mutable_key()->set_id(1); + tag.mutable_key()->set_type(tag_servicegroup); + tag.set_tag_name("foobar1"); + tag_aply.add_object(tag); + + tag.mutable_key()->set_id(2); + tag.mutable_key()->set_type(tag_servicegroup); + tag.set_tag_name("foobar2"); + tag_aply.add_object(tag); + + tag.mutable_key()->set_id(3); + tag.mutable_key()->set_type(tag_servicecategory); + tag.set_tag_name("foobar3"); + tag_aply.add_object(tag); + // We have to fake the expand_object on configuration::service + csvc.set_host_id(1); + + svc_aply.add_object(csvc); + csvc.set_service_description("test_description2"); + csvc.set_service_id(12346); + ASSERT_NO_THROW(svc_aply.add_object(csvc)); + service_map const& sm(engine::service::services); + ASSERT_EQ(sm.size(), 2u); + service_map::const_iterator it(sm.begin()); + std::shared_ptr svc1(it->second); + ++it; + std::shared_ptr svc2(it->second); + configuration::Service csvc1; + configuration::service_helper csvc1_hlp(&csvc1); + csvc1.CopyFrom(csvc); + csvc1_hlp.hook("group_tags", "6,8,9"); + csvc_hlp.hook("category_tags", "15,26,34"); + csvc_hlp.hook("group_tags", "6,8,9"); + + ASSERT_TRUE(svc1 != svc2); +} + +// Given a service configuration applied to a service, +// When the check_validity() method is executed on the configuration, +// Then it throws an exception because: +// 1. it does not provide a service description +// 2. it is not attached to a host +// 3. the service does not contain any check command. +TEST_F(ApplierService, PbServicesCheckValidityTags) { + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::Service csvc; + configuration::service_helper csvc_hlp(&csvc); + configuration::error_cnt err; + + // No service description + ASSERT_THROW(csvc_hlp.check_validity(err), std::exception); + + csvc.set_service_description("check_description"); + csvc.set_service_id(53); + csvc_hlp.hook("group_tags", "1,2"); + csvc_hlp.hook("category_tags", "3"); + + // No host attached to + ASSERT_THROW(csvc_hlp.check_validity(err), std::exception); + + csvc.set_host_name("test_host"); + + // No check command attached to + ASSERT_THROW(csvc_hlp.check_validity(err), std::exception); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + csvc.set_check_command("cmd"); + cmd_aply.add_object(cmd); + + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("10.11.12.13"); + hst.set_host_id(124); + hst_aply.add_object(hst); + + configuration::Tag tag; + configuration::tag_helper tag_hlp(&tag); + configuration::applier::tag tag_aply; + + tag.mutable_key()->set_id(1); + tag.mutable_key()->set_type(tag_servicegroup); + tag.set_tag_name("foobar1"); + tag_aply.add_object(tag); + + tag.mutable_key()->set_id(2); + tag.mutable_key()->set_type(tag_servicegroup); + tag.set_tag_name("foobar2"); + tag_aply.add_object(tag); + + tag.mutable_key()->set_id(3); + tag.mutable_key()->set_type(tag_servicecategory); + tag.set_tag_name("foobar3"); + tag_aply.add_object(tag); + + tag_aply.add_object(tag); + // We fake here the expand_object on configuration::service + csvc.set_host_id(124); + + svc_aply.add_object(csvc); + csvc.set_service_description("foo"); + + // No check command + ASSERT_NO_THROW(csvc_hlp.check_validity(err)); + svc_aply.resolve_object(csvc, err); + + service_map const& sm(engine::service::services); + ASSERT_EQ(sm.size(), 1u); + + host_map const& hm(engine::host::hosts); + ASSERT_EQ(sm.begin()->second->get_host_ptr(), hm.begin()->second.get()); +} diff --git a/engine/tests/configuration/applier/applier-pbserviceescalation.cc b/engine/tests/configuration/applier/applier-pbserviceescalation.cc new file mode 100644 index 00000000000..ecde8d72239 --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbserviceescalation.cc @@ -0,0 +1,182 @@ +/* + * Copyright 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/engine_conf/command_helper.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/service_helper.hh" +#include "common/engine_conf/serviceescalation_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; + +class ApplierServiceEscalation : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +TEST_F(ApplierServiceEscalation, PbAddEscalation) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_aply.add_object(hst); + ASSERT_EQ(host::hosts.size(), 1u); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + cmd_aply.add_object(cmd); + + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + svc.set_host_name("test_host"); + svc.set_service_description("test_svc"); + svc.set_service_id(12); + svc.set_check_command("cmd"); + svc.set_host_id(12); + svc_aply.add_object(svc); + ASSERT_EQ(service::services.size(), 1u); + + configuration::applier::serviceescalation se_apply; + configuration::Serviceescalation se; + configuration::serviceescalation_helper se_hlp(&se); + se_hlp.hook("hosts", "test_host"); + se_hlp.hook("service_description", "test_svc"); + se.set_first_notification(4); + se_apply.add_object(se); + ASSERT_EQ(serviceescalation::serviceescalations.size(), 1u); + se.set_first_notification(8); + se_apply.add_object(se); + ASSERT_EQ(serviceescalation::serviceescalations.size(), 2u); +} + +TEST_F(ApplierServiceEscalation, PbRemoveEscalation) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_aply.add_object(hst); + ASSERT_EQ(host::hosts.size(), 1u); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + cmd_aply.add_object(cmd); + + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + svc.set_host_name("test_host"); + svc.set_service_description("test_svc"); + svc.set_service_id(12); + svc.set_check_command("cmd"); + svc.set_host_id(12); + svc_aply.add_object(svc); + ASSERT_EQ(service::services.size(), 1u); + + configuration::applier::serviceescalation se_apply; + configuration::Serviceescalation se1, se2; + configuration::serviceescalation_helper se1_hlp(&se1), se2_hlp(&se2); + se1_hlp.hook("hosts", "test_host"); + se1_hlp.hook("service_description", "test_svc"); + se1.set_first_notification(4); + se_apply.add_object(se1); + se2_hlp.hook("hosts", "test_host"); + se2_hlp.hook("service_description", "test_svc"); + se2.set_first_notification(8); + se_apply.add_object(se2); + ASSERT_EQ(serviceescalation::serviceescalations.size(), 2u); + + se_apply.remove_object(0); + ASSERT_EQ(serviceescalation::serviceescalations.size(), 1u); + se_apply.remove_object(0); + ASSERT_EQ(serviceescalation::serviceescalations.size(), 0u); +} + +TEST_F(ApplierServiceEscalation, PbRemoveEscalationFromRemovedService) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_aply.add_object(hst); + ASSERT_EQ(host::hosts.size(), 1u); + + configuration::applier::command cmd_aply; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + cmd_aply.add_object(cmd); + + configuration::applier::service svc_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + svc.set_host_name("test_host"); + svc.set_service_description("test_svc"); + svc.set_service_id(12); + svc.set_check_command("cmd"); + svc.set_host_id(12); + svc_aply.add_object(svc); + ASSERT_EQ(service::services.size(), 1u); + + configuration::applier::serviceescalation se_apply; + configuration::Serviceescalation se1, se2; + configuration::serviceescalation_helper se1_hlp(&se1), se2_hlp(&se2); + se1_hlp.hook("hosts", "test_host"); + se1_hlp.hook("service_description", "test_svc"); + se1.set_first_notification(4); + se_apply.add_object(se1); + se2_hlp.hook("hosts", "test_host"); + se2_hlp.hook("service_description", "test_svc"); + se2.set_first_notification(8); + se_apply.add_object(se2); + ASSERT_EQ(serviceescalation::serviceescalations.size(), 2u); + + hst_aply.remove_object(0); + ASSERT_EQ(host::hosts.size(), 0u); + svc_aply.remove_object(0); + ASSERT_EQ(service::services.size(), 0u); + + se_apply.remove_object(0); + ASSERT_EQ(serviceescalation::serviceescalations.size(), 1u); + se_apply.remove_object(0); + ASSERT_EQ(serviceescalation::serviceescalations.size(), 0u); +} diff --git a/engine/tests/configuration/applier/applier-pbservicegroup.cc b/engine/tests/configuration/applier/applier-pbservicegroup.cc new file mode 100644 index 00000000000..64a92486a6b --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbservicegroup.cc @@ -0,0 +1,343 @@ +/* + * Copyright 2018 - 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include "../../timeperiod/utils.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/configuration/applier/servicegroup.hh" +#include "com/centreon/engine/servicegroup.hh" +#include "common/engine_conf/command_helper.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/service_helper.hh" +#include "common/engine_conf/servicegroup_helper.hh" +#include "common/engine_conf/state.pb.h" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +class ApplierServicegroup : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// Given a servicegroup applier +// And a configuration servicegroup +// When we modify the servicegroup configuration with a non existing +// servicegroup +// Then an exception is thrown. +TEST_F(ApplierServicegroup, PbModifyUnexistingServicegroupFromConfig) { + configuration::applier::servicegroup aply; + configuration::Servicegroup sg; + configuration::servicegroup_helper sg_hlp(&sg); + sg.set_servicegroup_name("test"); + fill_pair_string_group(sg.mutable_members(), "host1,service1"); + configuration::Servicegroup* new_sg = pb_config.add_servicegroups(); + new_sg->CopyFrom(sg); + ASSERT_THROW(aply.modify_object(new_sg, sg), std::exception); +} + +// Given a servicegroup applier +// And a configuration servicegroup in configuration +// When we modify the servicegroup configuration +// Then the applier modify_object updates the servicegroup. +TEST_F(ApplierServicegroup, PbModifyServicegroupFromConfig) { + configuration::applier::servicegroup aply; + configuration::Servicegroup sg; + configuration::servicegroup_helper sg_hlp(&sg); + sg.set_servicegroup_name("test"); + fill_pair_string_group(sg.mutable_members(), "host1,service1"); + aply.add_object(sg); + auto it = engine::servicegroup::servicegroups.find("test"); + ASSERT_TRUE(it->second->get_alias() == "test"); + + sg.set_alias("test_renamed"); + aply.modify_object(pb_config.mutable_servicegroups(0), sg); + it = engine::servicegroup::servicegroups.find("test"); + ASSERT_TRUE(it->second->get_alias() == "test_renamed"); +} + +// Given an empty servicegroup +// When the resolve_object() method is called +// Then no warning, nor error are given +TEST_F(ApplierServicegroup, PbResolveEmptyservicegroup) { + configuration::error_cnt err; + configuration::applier::servicegroup aplyr; + configuration::Servicegroup grp; + configuration::servicegroup_helper grp_hlp(&grp); + grp.set_servicegroup_name("test"); + aplyr.add_object(grp); + aplyr.expand_objects(pb_config); + aplyr.resolve_object(grp, err); + ASSERT_EQ(err.config_warnings, 0); + ASSERT_EQ(err.config_errors, 0); +} + +// Given a servicegroup with a non-existing service +// When the resolve_object() method is called +// Then an exception is thrown +// And the method returns 1 error +TEST_F(ApplierServicegroup, PbResolveInexistentService) { + configuration::error_cnt err; + configuration::applier::servicegroup aplyr; + configuration::Servicegroup grp; + configuration::servicegroup_helper grp_helper(&grp); + grp.set_servicegroup_name("test"); + fill_pair_string_group(grp.mutable_members(), "host1,non_existing_service"); + aplyr.add_object(grp); + aplyr.expand_objects(pb_config); + ASSERT_THROW(aplyr.resolve_object(grp, err), std::exception); + ASSERT_EQ(err.config_warnings, 0); + ASSERT_EQ(err.config_errors, 1); +} + +// Given a servicegroup with a service and a host +// When the resolve_object() method is called +// Then the service is really added to the service group. +TEST_F(ApplierServicegroup, PbResolveServicegroup) { + configuration::error_cnt err; + configuration::applier::host aply_hst; + configuration::applier::service aply_svc; + configuration::applier::command aply_cmd; + configuration::applier::servicegroup aply_grp; + configuration::Servicegroup grp; + configuration::servicegroup_helper grp_hlp(&grp); + grp.set_servicegroup_name("test_group"); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + aply_hst.add_object(hst); + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + svc.set_service_description("test"); + svc.set_host_name("test_host"); + svc.set_service_id(18); + cmd.set_command_line("echo 1"); + svc.set_check_command("cmd"); + aply_cmd.add_object(cmd); + + // We fake here the expand_object on configuration::service + svc.set_host_id(12); + + aply_svc.add_object(svc); + fill_string_group(svc.mutable_servicegroups(), "test_group"); + fill_pair_string_group(grp.mutable_members(), "test_host,test"); + aply_grp.add_object(grp); + aply_grp.expand_objects(pb_config); + ASSERT_NO_THROW(aply_grp.resolve_object(grp, err)); +} + +// Given a servicegroup with a service already configured +// And a second servicegroup configuration +// When we set the first one as servicegroup member to the second +// Then the parse method returns true and set the first one service +// to the second one. +TEST_F(ApplierServicegroup, PbSetServicegroupMembers) { + configuration::applier::host aply_hst; + configuration::applier::service aply_svc; + configuration::applier::command aply_cmd; + configuration::applier::servicegroup aply_grp; + configuration::Servicegroup grp; + configuration::servicegroup_helper grp_hlp(&grp); + grp.set_servicegroup_name("test_group"); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + aply_hst.add_object(hst); + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + svc.set_service_description("test"); + svc.set_host_name("test_host"); + svc.set_service_id(18); + cmd.set_command_line("echo 1"); + svc.set_check_command("cmd"); + aply_cmd.add_object(cmd); + + // We fake here the expand_object on configuration::service + svc.set_host_id(12); + + configuration::error_cnt err; + aply_svc.add_object(svc); + fill_string_group(svc.mutable_servicegroups(), "test_group"); + fill_pair_string_group(grp.mutable_members(), "test_host,test"); + aply_grp.add_object(grp); + aply_grp.expand_objects(pb_config); + aply_grp.resolve_object(grp, err); + ASSERT_TRUE(grp.members().data().size() == 1); + + configuration::Servicegroup grp1; + configuration::servicegroup_helper grp1_hlp(&grp1); + grp1.set_servicegroup_name("big_group"); + fill_string_group(grp1.mutable_servicegroup_members(), "test_group"); + aply_grp.add_object(grp1); + aply_grp.expand_objects(pb_config); + + // grp1 must be reload because the expand_objects reload them totally. + auto found = std::find_if(pb_config.servicegroups().begin(), + pb_config.servicegroups().end(), + [](const configuration::Servicegroup& sg) { + return sg.servicegroup_name() == "big_group"; + }); + ASSERT_TRUE(found != pb_config.servicegroups().end()); + ASSERT_EQ(found->members().data().size(), 1); +} + +// Given a servicegroup applier +// And a configuration servicegroup in configuration +// When we remove the configuration +// Then it is really removed +TEST_F(ApplierServicegroup, PbRemoveServicegroupFromConfig) { + configuration::applier::host aply_hst; + configuration::applier::service aply_svc; + configuration::applier::command aply_cmd; + configuration::applier::servicegroup aply_grp; + configuration::Servicegroup grp; + configuration::servicegroup_helper grp_hlp(&grp); + grp.set_servicegroup_name("test_group"); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + aply_hst.add_object(hst); + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + svc.set_service_description("test"); + svc.set_host_name("test_host"); + svc.set_service_id(18); + cmd.set_command_line("echo 1"); + svc.set_check_command("cmd"); + aply_cmd.add_object(cmd); + + // We fake here the expand_object on configuration::service + svc.set_host_id(12); + + aply_svc.add_object(svc); + fill_string_group(svc.mutable_servicegroups(), "test_group"); + fill_pair_string_group(grp.mutable_members(), "test_host,test"); + aply_grp.add_object(grp); + aply_grp.expand_objects(pb_config); + configuration::error_cnt err; + aply_grp.resolve_object(grp, err); + ASSERT_EQ(grp.members().data().size(), 1); + + configuration::Servicegroup grp1; + configuration::servicegroup_helper grp1_hlp(&grp1); + grp1.set_servicegroup_name("big_group"); + fill_string_group(grp1.mutable_servicegroup_members(), "test_group"); + aply_grp.add_object(grp1); + aply_grp.expand_objects(pb_config); + auto found = std::find_if(pb_config.servicegroups().begin(), + pb_config.servicegroups().end(), + [](const configuration::Servicegroup& sg) { + return sg.servicegroup_name() == "big_group"; + }); + ASSERT_TRUE(found != pb_config.servicegroups().end()); + ASSERT_EQ(found->members().data().size(), 1); + + ASSERT_EQ(engine::servicegroup::servicegroups.size(), 2u); + aply_grp.remove_object(0); + ASSERT_EQ(engine::servicegroup::servicegroups.size(), 1u); +} + +// Given a servicegroup applier +// And a configuration servicegroup in configuration +// When we remove the configuration +// Then it is really removed +TEST_F(ApplierServicegroup, PbRemoveServiceFromGroup) { + configuration::applier::host aply_hst; + configuration::applier::service aply_svc; + configuration::applier::command aply_cmd; + configuration::applier::servicegroup aply_grp; + configuration::Servicegroup grp; + configuration::servicegroup_helper grp_hlp(&grp); + grp.set_servicegroup_name("test_group"); + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); + aply_cmd.add_object(cmd); + + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + aply_hst.add_object(hst); + + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + svc.set_service_description("test"); + svc_hlp.hook("service_description", "test"); + svc.set_host_name("test_host"); + svc.set_service_id(18); + svc.set_check_command("cmd"); + // We fake here the expand_object on configuration::service + svc.set_host_id(12); + aply_svc.add_object(svc); + svc_hlp.hook("servicegroups", "test_group"); + + svc.set_service_description("test2"); + svc.set_host_name("test_host"); + svc.set_service_id(19); + svc.set_check_command("cmd"); + // We fake here the expand_object on configuration::service + svc.set_host_id(12); + aply_svc.add_object(svc); + svc_hlp.hook("servicegroups", "test_group"); + + grp_hlp.hook("members", "test_host,test,test_host,test2"); + aply_grp.add_object(grp); + aply_grp.expand_objects(pb_config); + configuration::error_cnt err; + aply_grp.resolve_object(grp, err); + ASSERT_EQ(grp.members().data().size(), 2); + + engine::servicegroup* sg = + engine::servicegroup::servicegroups["test_group"].get(); + ASSERT_EQ(sg->members.size(), 2u); + aply_svc.remove_object(1); + ASSERT_EQ(sg->members.size(), 1u); + + grp_hlp.hook("members", "test_host,test,test_host,test2"); + aply_grp.modify_object(pb_config.mutable_servicegroups(0), grp); + + ASSERT_EQ(engine::servicegroup::servicegroups.size(), 1u); +} diff --git a/engine/tests/configuration/applier/applier-pbstate.cc b/engine/tests/configuration/applier/applier-pbstate.cc new file mode 100644 index 00000000000..8586252ba44 --- /dev/null +++ b/engine/tests/configuration/applier/applier-pbstate.cc @@ -0,0 +1,1001 @@ +/* + * Copyright 2023 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include +#include +#include +#include "com/centreon/engine/configuration/applier/state.hh" +#include "com/centreon/engine/globals.hh" +#include "common/engine_conf/contact_helper.hh" +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/parser.hh" +#include "common/engine_conf/state.pb.h" +#include "tests/helper.hh" + +using namespace com::centreon::engine; +using com::centreon::engine::configuration::TagType; + +extern configuration::State pb_config; + +class ApplierState : public ::testing::Test { + protected: + public: + void SetUp() override { + init_config_state(); + auto tps = pb_config.mutable_timeperiods(); + for (int i = 0; i < 10; i++) { + auto* tp = tps->Add(); + tp->set_alias(fmt::format("timeperiod {}", i)); + tp->set_timeperiod_name(fmt::format("Timeperiod {}", i)); + } + for (int i = 0; i < 5; i++) { + configuration::Contact* ct = pb_config.add_contacts(); + configuration::contact_helper ct_hlp(ct); + std::string name(fmt::format("name{:2}", i)); + ct->set_contact_name(name); + ct->set_alias(fmt::format("alias{:2}", i)); + for (int j = 0; j < 3; j++) + ct->add_address(fmt::format("address{:2}", j)); + for (int j = 0; j < 10; j++) { + configuration::CustomVariable* cv = ct->add_customvariables(); + cv->set_name(fmt::format("key_{}_{}", name, j)); + cv->set_value(fmt::format("value_{}_{}", name, j)); + } + } + } + + void TearDown() override { deinit_config_state(); } +}; + +using MessageDifferencer = ::google::protobuf::util::MessageDifferencer; + +static void CreateFile(const std::string& filename, + const std::string& content) { + std::ofstream oss(filename); + oss << content; + oss.close(); +} + +static void AddCfgFile(const std::string& filename) { + std::ifstream ss("/tmp/centengine.cfg"); + std::list lines; + std::string s; + while (getline(ss, s)) { + lines.push_back(std::move(s)); + } + for (auto it = lines.begin(); it != lines.end(); ++it) { + if (it->find("cfg_file") == 0) { + lines.insert(it, fmt::format("cfg_file={}", filename)); + break; + } + } + std::ofstream oss("/tmp/centengine.cfg"); + for (auto& l : lines) + oss << l << std::endl; +} + +static void RmConf() { + std::remove("/tmp/ad.cfg"); + std::remove("/tmp/centengine.cfg"); + std::remove("/tmp/commands.cfg"); + std::remove("/tmp/connectors.cfg"); + std::remove("/tmp/contactgroups.cfg"); + std::remove("/tmp/contacts.cfg"); + std::remove("/tmp/dependencies.cfg"); + std::remove("/tmp/hostescalations.cfg"); + std::remove("/tmp/hostgroups.cfg"); + std::remove("/tmp/hosts.cfg"); + std::remove("/tmp/resource.cfg"); + std::remove("/tmp/servicedependencies.cfg"); + std::remove("/tmp/serviceescalations.cfg"); + std::remove("/tmp/servicegroups.cfg"); + std::remove("/tmp/services.cfg"); + std::remove("/tmp/severities.cfg"); + std::remove("/tmp/tags.cfg"); + std::remove("/tmp/test-config.cfg"); + std::remove("/tmp/timeperiods.cfg"); +} + +enum class ConfigurationObject { + ANOMALYDETECTION = 0, + CONTACTGROUP = 1, + DEPENDENCY = 2, + ESCALATION = 3, + SERVICEGROUP = 4, + SEVERITY = 5, + TAG = 6, + CONTACTGROUP_NE = 7, +}; + +static void CreateConf(int idx) { + constexpr const char* cmd1 = + "for i in " ENGINE_CFG_TEST "/conf1/*.cfg ; do cp $i /tmp ; done"; + switch (idx) { + case 1: + system(cmd1); + break; + default: + ASSERT_EQ(1, 0); + break; + } +} + +static void CreateBadConf(ConfigurationObject obj) { + CreateConf(1); + switch (obj) { + case ConfigurationObject::SERVICEGROUP: + CreateFile("/tmp/servicegroups.cfg", + "define servicegroup {\n" + " servicegroup_id 1000\n" + " name sg_tpl\n" + " members " + "host_1,service_1,host_1,service_2,host_1,service_4\n" + " notes notes for sg template\n" + "}\n" + "define servicegroup {\n" + " servicegroup_id 1\n" + " alias sg1\n" + " members " + "host_1,service_1,host_1,service_2,host_1,service_3\n" + " notes notes for sg1\n" + " notes_url notes url for sg1\n" + " action_url action url for sg1\n" + " use sg_tpl\n" + "}\n"); + break; + case ConfigurationObject::TAG: + CreateFile("/tmp/tags.cfg", + "define tag {\n" + " tag_id 1\n" + " tag_name tag1\n" + "}\n"); + break; + case ConfigurationObject::ANOMALYDETECTION: + CreateFile("/tmp/ad.cfg", + "define anomalydetection {\n" + " service_description service_ad\n" + " host_name Centreon-central\n" + " service_id 2000\n" + " register 1\n" + " dependent_service_id 1\n" + " thresholds_file /tmp/toto\n" + "}\n"); + AddCfgFile("/tmp/ad.cfg"); + break; + case ConfigurationObject::CONTACTGROUP: + CreateFile("/tmp/contactgroups.cfg", + "define contactgroup {\n" + " name cg_tpl\n" + " members user1,user2,user3\n" + " contactgroup_members cg2\n" + "}\n"); + break; + case ConfigurationObject::CONTACTGROUP_NE: + CreateFile("/tmp/contactgroups.cfg", + "define contactgroup {\n" + " contactgroup_name cg1\n" + " alias cg1_a\n" + " members user1,user2\n" + " contactgroup_members +cg3\n" + "}\n" + "define contactgroup {\n" + " contactgroup_name cg2\n" + " alias cg2_a\n" + " members user1,user2\n" + " contactgroup_members cg3\n" + "}\n" + "define contactgroup {\n" + " contactgroup_name cg3\n" + " alias cg3_a\n" + " use cg_tpl\n" + " members +user3\n" + " contactgroup_members cg3\n" + "}\n" + "define contactgroup {\n" + " contactgroup_name cg_tpl\n" + " name cg_tpl\n" + " members user1,user2\n" + " contactgroup_members cg2\n" + "}\n"); + break; + case ConfigurationObject::SEVERITY: + CreateFile("/tmp/severities.cfg", + "define severity {\n" + " severity_name sev1\n" + " id 3\n" + " level 14\n" + " icon_id 123\n" + "}\n"); + break; + case ConfigurationObject::ESCALATION: + CreateFile("/tmp/escalations.cfg", + "define serviceescalation {\n" + " host_name host_1\n" + " description service_2\n" + "}\n" + "define serviceescalation {\n" + " host_name host_1\n" + " contact_groups cg1\n" + "}\n" + "define hostescalation {\n" + " contact_groups cg1,cg2\n" + " name he_tmpl\n" + "}\n" + "define hostescalation {\n" + " contact_groups +cg1\n" + " hostgroup_name hg1,hg2\n" + " use he_tmpl\n" + "}\n"); + break; + case ConfigurationObject::DEPENDENCY: + CreateFile("/tmp/dependencies.cfg", + "define hostdependency {\n" + " dependent_hostgroup_name hg1,hg2\n" + " dependent_host_name host_2\n" + " name hd_tmpl\n" + "}\n" + "define servicedependency {\n" + " servicegroup_name sg1\n" + " host host_1\n" + " dependent_hostgroup_name host_3\n" + " dependent_host_name host_2\n" + " use hd_tmpl\n" + "}\n" + "define servicedependency {\n" + " service_description service_2\n" + " dependent_description service_1\n" + "}\n"); + break; + default: + break; + } +} + +constexpr size_t CFG_FILES = 19u; +constexpr size_t RES_FILES = 1u; +constexpr size_t HOSTS = 11u; +constexpr size_t SERVICES = 363u; +constexpr size_t TIMEPERIODS = 2u; +constexpr size_t CONTACTS = 1u; +constexpr size_t HOSTGROUPS = 2u; +constexpr size_t SERVICEGROUPS = 1u; +constexpr size_t HOSTDEPENDENCIES = 2u; + +// TEST_F(ApplierState, DiffOnTimeperiod) { +// configuration::State new_config; +// new_config.CopyFrom(pb_config); +// +// std::string output; +// MessageDifferencer differencer; +// differencer.set_report_matches(false); +// differencer.ReportDifferencesToString(&output); +// EXPECT_TRUE(differencer.Compare(pb_config, new_config)); +// std::cout << "Output= " << output << std::endl; +// +// configuration::DiffState dstate = +// configuration::applier::state::instance().build_difference(pb_config, +// new_config); +// ASSERT_TRUE(dstate.to_add().empty()); +// ASSERT_TRUE(dstate.to_remove().empty()); +// ASSERT_TRUE(dstate.to_modify().empty()); +// } +// +// TEST_F(ApplierState, DiffOnTimeperiodOneRemoved) { +// configuration::State new_config; +// new_config.CopyFrom(pb_config); +// new_config.mutable_timeperiods()->RemoveLast(); +// +// std::string output; +// MessageDifferencer differencer; +// differencer.set_report_matches(false); +// differencer.ReportDifferencesToString(&output); +// // differencer.set_repeated_field_comparison( +// // util::MessageDifferencer::AS_SMART_LIST); +// EXPECT_FALSE(differencer.Compare(pb_config, new_config)); +// std::cout << "Output= " << output << std::endl; +// +// configuration::DiffState dstate = +// configuration::applier::state::instance().build_difference(pb_config, +// new_config); +// ASSERT_EQ(dstate.to_remove().size(), 1u); +// // Number 142 is to remove. +// ASSERT_EQ(dstate.to_remove()[0].key()[0].i32(), 143); +// ASSERT_EQ(dstate.to_remove()[0].key()[1].i32(), 9); +// ASSERT_EQ(dstate.to_remove()[0].key().size(), 2); +// ASSERT_TRUE(dstate.to_add().empty()); +// ASSERT_TRUE(dstate.to_modify().empty()); +// } + +// TEST_F(ApplierState, DiffOnTimeperiodNewOne) { +// configuration::State new_config; +// new_config.CopyFrom(pb_config); +// auto tps = new_config.mutable_timeperiods(); +// auto* tp = tps->Add(); +// tp->set_alias("timeperiod 11"); +// tp->set_timeperiod_name("Timeperiod 11"); +// +// std::string output; +// MessageDifferencer differencer; +// differencer.set_report_matches(false); +// differencer.ReportDifferencesToString(&output); +// // differencer.set_repeated_field_comparison( +// // util::MessageDifferencer::AS_SMART_LIST); +// EXPECT_FALSE(differencer.Compare(pb_config, new_config)); +// std::cout << "Output= " << output << std::endl; +// +// configuration::DiffState dstate = +// configuration::applier::state::instance().build_difference(pb_config, +// new_config); +// ASSERT_TRUE(dstate.to_remove().empty()); +// ASSERT_TRUE(dstate.to_modify().empty()); +// ASSERT_EQ(dstate.to_add().size(), 1u); +// ASSERT_TRUE(dstate.to_add()[0].val().has_value_tp()); +// const configuration::Timeperiod& new_tp = +// dstate.to_add()[0].val().value_tp(); ASSERT_EQ(new_tp.alias(), +// std::string("timeperiod 11")); ASSERT_EQ(new_tp.timeperiod_name(), +// std::string("Timeperiod 11")); +// } + +// TEST_F(ApplierState, DiffOnTimeperiodAliasRenamed) { +// configuration::State new_config; +// new_config.CopyFrom(pb_config); +// auto tps = new_config.mutable_timeperiods(); +// tps->at(7).set_alias("timeperiod changed"); +// +// std::string output; +// MessageDifferencer differencer; +// differencer.set_report_matches(false); +// differencer.ReportDifferencesToString(&output); +// // differencer.set_repeated_field_comparison( +// // util::MessageDifferencer::AS_SMART_LIST); +// EXPECT_FALSE(differencer.Compare(pb_config, new_config)); +// std::cout << "Output= " << output << std::endl; +// +// configuration::DiffState dstate = +// configuration::applier::state::instance().build_difference(pb_config, +// new_config); +// ASSERT_TRUE(dstate.to_remove().empty()); +// ASSERT_TRUE(dstate.to_add().empty()); +// ASSERT_EQ(dstate.to_modify().size(), 1u); +// const configuration::PathWithValue& path = dstate.to_modify()[0]; +// ASSERT_EQ(path.path().key().size(), 4u); +// // number 142 => timeperiods +// ASSERT_EQ(path.path().key()[0].i32(), 143); +// // index 7 => timeperiods[7] +// ASSERT_EQ(path.path().key()[1].i32(), 7); +// // number 2 => timeperiods.alias +// ASSERT_EQ(path.path().key()[2].i32(), 2); +// // No more key... +// ASSERT_EQ(path.path().key()[3].i32(), -1); +// ASSERT_TRUE(path.val().has_value_str()); +// // The new value of timeperiods[7].alias is "timeperiod changed" +// ASSERT_EQ(path.val().value_str(), std::string("timeperiod changed")); +// } + +// TEST_F(ApplierState, DiffOnContactOneRemoved) { +// configuration::State new_config; +// new_config.CopyFrom(pb_config); +// new_config.mutable_contacts()->DeleteSubrange(4, 1); +// +// std::string output; +// MessageDifferencer differencer; +// differencer.set_report_matches(false); +// differencer.ReportDifferencesToString(&output); +// // differencer.set_repeated_field_comparison( +// // util::MessageDifferencer::AS_SMART_LIST); +// EXPECT_FALSE(differencer.Compare(pb_config, new_config)); +// std::cout << "Output= " << output << std::endl; +// +// configuration::DiffState dstate = +// configuration::applier::state::instance().build_difference(pb_config, +// new_config); +// ASSERT_EQ(dstate.to_remove().size(), 1u); +// +// ASSERT_EQ(dstate.to_remove()[0].key().size(), 2); +// // number 131 => for contacts +// ASSERT_EQ(dstate.to_remove()[0].key()[0].i32(), 132); +// // "name 4" => contacts["name 4"] +// ASSERT_EQ(dstate.to_remove()[0].key()[1].i32(), 4); +// +// ASSERT_TRUE(dstate.to_add().empty()); +// ASSERT_TRUE(dstate.to_modify().empty()); +// } + +// TEST_F(ApplierState, DiffOnContactOneAdded) { +// configuration::State new_config; +// new_config.CopyFrom(pb_config); +// pb_config.mutable_contacts()->DeleteSubrange(4, 1); +// +// std::string output; +// MessageDifferencer differencer; +// differencer.set_report_matches(false); +// differencer.ReportDifferencesToString(&output); +// // differencer.set_repeated_field_comparison( +// // util::MessageDifferencer::AS_SMART_LIST); +// EXPECT_FALSE(differencer.Compare(pb_config, new_config)); +// std::cout << "Output= " << output << std::endl; +// +// configuration::DiffState dstate = +// configuration::applier::state::instance().build_difference(pb_config, +// new_config); +// ASSERT_TRUE(dstate.to_remove().empty()); +// ASSERT_TRUE(dstate.to_modify().empty()); +// ASSERT_EQ(dstate.to_add().size(), 1u); +// const configuration::PathWithValue& to_add = dstate.to_add()[0]; +// ASSERT_EQ(to_add.path().key().size(), 2u); +// // Contact -> number 131 +// ASSERT_EQ(to_add.path().key()[0].i32(), 132); +// // ASSERT_EQ(to_add.path().key()[1].str(), std::string("name 4")); +// ASSERT_TRUE(to_add.val().has_value_ct()); +// } + +/** + * @brief Contact "name 3" has a new address added. Addresses are stored in + * an array. We don't have the information if an address is added or removed + * so we send all the addresses in the difference. That's why the difference + * tells about 4 addresses as difference. + */ +// TEST_F(ApplierState, DiffOnContactOneNewAddress) { +// configuration::State new_config; +// new_config.CopyFrom(pb_config); +// auto& ct = new_config.mutable_contacts()->at(3); +// ct.add_address("new address"); +// +// std::string output; +// MessageDifferencer differencer; +// differencer.set_report_matches(false); +// differencer.ReportDifferencesToString(&output); +// // differencer.set_repeated_field_comparison( +// // util::MessageDifferencer::AS_SMART_LIST); +// EXPECT_FALSE(differencer.Compare(pb_config, new_config)); +// std::cout << "Output= " << output << std::endl; +// +// configuration::DiffState dstate = +// configuration::applier::state::instance().build_difference(pb_config, +// new_config); +// ASSERT_EQ(dstate.to_add().size(), 1u); +// ASSERT_TRUE(dstate.to_modify().empty()); +// ASSERT_TRUE(dstate.to_remove().empty()); +// ASSERT_EQ(dstate.to_add()[0].path().key().size(), 4u); +// // Number of Contacts in State +// ASSERT_EQ(dstate.to_add()[0].path().key()[0].i32(), 132); +// // Key to the context to change +// ASSERT_EQ(dstate.to_add()[0].path().key()[1].i32(), 3); +// // Number of the object to modify +// ASSERT_EQ(dstate.to_add()[0].path().key()[2].i32(), 2); +// // Index of the new object to add. +// ASSERT_EQ(dstate.to_add()[0].path().key()[3].i32(), 3); +// +// ASSERT_EQ(dstate.to_add()[0].val().value_str(), std::string("new +// address")); +// } + +/** + * @brief Contact "name 3" has its first address removed. Addresses are stored + * in an array. We don't have the information if an address is added or removed + * so we send all the addresses in the difference. That's why the difference + * tells about 4 addresses as difference. + */ +// TEST_F(ApplierState, DiffOnContactFirstAddressRemoved) { +// configuration::State new_config; +// new_config.CopyFrom(pb_config); +// auto& ct = new_config.mutable_contacts()->at(3); +// ct.mutable_address()->erase(ct.mutable_address()->begin()); +// +// std::string output; +// MessageDifferencer differencer; +// differencer.set_report_matches(false); +// differencer.ReportDifferencesToString(&output); +// // differencer.set_repeated_field_comparison( +// // util::MessageDifferencer::AS_SMART_LIST); +// EXPECT_FALSE(differencer.Compare(pb_config, new_config)); +// std::cout << "Output= " << output << std::endl; +// +// configuration::DiffState dstate = +// configuration::applier::state::instance().build_difference(pb_config, +// new_config); +// ASSERT_TRUE(dstate.to_add().empty()); +// ASSERT_EQ(dstate.to_modify().size(), 2u); +// ASSERT_EQ(dstate.to_remove().size(), 1u); +// ASSERT_EQ(dstate.to_modify()[0].path().key().size(), 4u); +// // Number of contacts in State +// ASSERT_EQ(dstate.to_modify()[0].path().key()[0].i32(), 132); +// // Key "name 3" to the good contact +// ASSERT_EQ(dstate.to_modify()[0].path().key()[1].i32(), 3); +// // Number of addresses in Contact +// ASSERT_EQ(dstate.to_modify()[0].path().key()[2].i32(), 2); +// // Index of the address to modify +// ASSERT_EQ(dstate.to_modify()[0].path().key()[3].i32(), 0); +// // New value of the address +// ASSERT_EQ(dstate.to_modify()[0].val().value_str(), std::string("address +// 1")); +// +// ASSERT_EQ(dstate.to_remove()[0].key().size(), 4u); +// // Number of contacts in State +// ASSERT_EQ(dstate.to_remove()[0].key()[0].i32(), 132); +// // Key "name 3" to the good contact +// ASSERT_EQ(dstate.to_remove()[0].key()[1].i32(), 3); +// // Number of addresses in Contact +// ASSERT_EQ(dstate.to_remove()[0].key()[2].i32(), 2); +// // Index of the address to remove +// ASSERT_EQ(dstate.to_remove()[0].key()[3].i32(), 2); +// } + +/** + * @brief Contact "name 3" has its first address removed. Addresses are stored + * in an array. We don't have the information if an address is added or removed + * so we send all the addresses in the difference. That's why the difference + * tells about 4 addresses as difference. + */ +// TEST_F(ApplierState, DiffOnContactSecondAddressUpdated) { +// configuration::State new_config; +// new_config.CopyFrom(pb_config); +// auto& ct = new_config.mutable_contacts()->at(3); +// (*ct.mutable_address())[1] = "this address is different"; +// +// std::string output; +// MessageDifferencer differencer; +// differencer.set_report_matches(false); +// differencer.ReportDifferencesToString(&output); +// // differencer.set_repeated_field_comparison( +// // util::MessageDifferencer::AS_SMART_LIST); +// EXPECT_FALSE(differencer.Compare(pb_config, new_config)); +// std::cout << "Output= " << output << std::endl; +// +// configuration::DiffState dstate = +// configuration::applier::state::instance().build_difference(pb_config, +// new_config); +// // ASSERT_TRUE(dstate.dcontacts().to_add().empty()); +// // ASSERT_TRUE(dstate.dcontacts().to_remove().empty()); +// // ASSERT_EQ(dstate.dcontacts().to_modify().size(), 1u); +// // auto to_modify = dstate.dcontacts().to_modify(); +// // ASSERT_EQ(to_modify["name 3"].list().begin()->id(), 2); +// // ASSERT_EQ(to_modify["name 3"].list().begin()->value_str(), "address +// 2"); +// } + +// TEST_F(ApplierState, DiffOnContactRemoveCustomvariable) { +// configuration::State new_config; +// new_config.CopyFrom(pb_config); +// auto& ct = new_config.mutable_contacts()->at(3); +// ct.mutable_customvariables()->erase(ct.mutable_customvariables()->begin()); +// +// std::string output; +// MessageDifferencer differencer; +// differencer.set_report_matches(false); +// differencer.ReportDifferencesToString(&output); +// // differencer.set_repeated_field_comparison( +// // util::MessageDifferencer::AS_SMART_LIST); +// EXPECT_FALSE(differencer.Compare(pb_config, new_config)); +// std::cout << "Output= " << output << std::endl; +// +// configuration::DiffState dstate = +// configuration::applier::state::instance().build_difference(pb_config, +// new_config); +// // ASSERT_TRUE(dstate.dcontacts().to_add().empty()); +// // ASSERT_TRUE(dstate.dcontacts().to_remove().empty()); +// // ASSERT_EQ(dstate.dcontacts().to_modify().size(), 1u); +// // auto to_modify = dstate.dcontacts().to_modify(); +// // ASSERT_EQ(to_modify["name 3"].list().begin()->id(), 2); +// // ASSERT_EQ(to_modify["name 3"].list().begin()->value_str(), "address +// 2"); +// } + +TEST_F(ApplierState, StateParsing) { + configuration::error_cnt err; + configuration::State cfg; + configuration::parser p; + CreateConf(1); + p.parse("/tmp/centengine.cfg", &cfg, err); + ASSERT_EQ(cfg.check_service_freshness(), false); + ASSERT_EQ(cfg.enable_flap_detection(), false); + ASSERT_EQ(cfg.instance_heartbeat_interval(), 30); + ASSERT_EQ(cfg.log_level_functions(), configuration::LogLevel::warning); + ASSERT_EQ(cfg.cfg_file().size(), CFG_FILES); + ASSERT_EQ(cfg.resource_file().size(), RES_FILES); + ASSERT_EQ(cfg.hosts().size(), HOSTS); + ASSERT_EQ(cfg.hosts()[0].host_name(), std::string("Centreon-central")); + ASSERT_TRUE(cfg.hosts()[0].obj().register_()); + ASSERT_EQ(cfg.hosts()[0].host_id(), 30); + ASSERT_EQ(cfg.hosts()[1].host_name(), std::string("Centreon-central_1")); + ASSERT_TRUE(cfg.hosts()[1].obj().register_()); + ASSERT_EQ(cfg.hosts()[1].host_id(), 31); + ASSERT_EQ(cfg.hosts()[2].host_name(), std::string("Centreon-central_2")); + ASSERT_TRUE(cfg.hosts()[2].obj().register_()); + ASSERT_EQ(cfg.hosts()[2].host_id(), 32); + ASSERT_EQ(cfg.hosts()[3].host_name(), std::string("Centreon-central_3")); + ASSERT_TRUE(cfg.hosts()[3].obj().register_()); + ASSERT_EQ(cfg.hosts()[3].host_id(), 33); + + /* Service */ + ASSERT_EQ(cfg.services().size(), SERVICES); + ASSERT_EQ(cfg.services()[0].service_id(), 196); + ASSERT_TRUE(cfg.services()[0].obj().register_()); + ASSERT_TRUE(cfg.services()[0].checks_active()); + ASSERT_EQ(cfg.services()[0].host_name(), + std::string_view("Centreon-central")); + ASSERT_EQ(cfg.services()[0].service_description(), + std::string_view("proc-sshd")); + ASSERT_EQ(cfg.services()[0].contactgroups().data().size(), 2u); + EXPECT_EQ(cfg.services()[0].contactgroups().data()[0], + std::string_view("Guest")); + EXPECT_EQ(cfg.services()[0].contactgroups().data()[1], + std::string_view("Supervisors")); + + EXPECT_EQ(cfg.services()[0].contacts().data().size(), 1u); + EXPECT_EQ(cfg.services()[0].contacts().data()[0], std::string("John_Doe")); + EXPECT_EQ(cfg.services()[0].notification_options(), 0x3f); + std::set> exp{{2, tag::servicegroup}}; + std::set> res; + for (auto& t : cfg.services()[0].tags()) { + uint16_t c; + switch (t.second()) { + case TagType::tag_servicegroup: + c = tag::servicegroup; + break; + case TagType::tag_hostgroup: + c = tag::hostgroup; + break; + case TagType::tag_servicecategory: + c = tag::servicecategory; + break; + case TagType::tag_hostcategory: + c = tag::hostcategory; + break; + default: + assert("Should not be raised" == nullptr); + } + res.emplace(t.first(), c); + } + EXPECT_EQ(res, exp); + + ASSERT_EQ(cfg.commands().size(), 15u); + auto fnd_cmd = + std::find_if(cfg.commands().begin(), cfg.commands().end(), + [](const configuration::Command& cmd) { + return cmd.command_name() == + std::string_view("App-Centreon-MySQL-Partitioning"); + }); + ASSERT_TRUE(fnd_cmd != cfg.commands().end()); + ASSERT_EQ( + fnd_cmd->command_line(), + std::string_view( + "$CENTREONPLUGINS$/centreon_centreon_database.pl " + "--plugin=database::mysql::plugin " + "--dyn-mode=apps::centreon::sql::mode::partitioning " + "--host='$HOSTADDRESS$' --username='$_HOSTMYSQLUSERNAME$' " + "--password='$_HOSTMYSQLPASSWORD$' --port='$_HOSTMYSQLPORT$' " + "--tablename='$_SERVICETABLENAME1$' " + "--tablename='$_SERVICETABLENAME2$' " + "--tablename='$_SERVICETABLENAME3$' " + "--tablename='$_SERVICETABLENAME4$' --warning='$_SERVICEWARNING$' " + "--critical='$_SERVICECRITICAL$'")); + + /* One command inherites from command_template */ + auto cit = std::find_if(cfg.commands().begin(), cfg.commands().end(), + [](const configuration::Command& cmd) { + return cmd.command_name() == + std::string_view("base_host_alive"); + }); + + ASSERT_NE(cit, cfg.commands().end()); + ASSERT_EQ(cit->command_name(), std::string_view("base_host_alive")); + ASSERT_EQ(cit->command_line(), + std::string_view("$USER1$/check_icmp -H $HOSTADDRESS$ -w " + "3000.0,80% -c 5000.0,100% -p 1")); + + ASSERT_EQ(cfg.timeperiods().size(), TIMEPERIODS); + auto tit = + std::find_if(cfg.timeperiods().begin(), cfg.timeperiods().end(), + [](const configuration::Timeperiod& tp) { + return tp.timeperiod_name() == std::string_view("24x7"); + }); + ASSERT_NE(tit, cfg.timeperiods().end()); + EXPECT_EQ(tit->alias(), std::string_view("24_Hours_A_Day,_7_Days_A_Week")); + EXPECT_EQ(tit->timeranges().sunday().size(), + 1u); // std::string("00:00-24:00")); + EXPECT_EQ(tit->timeranges().sunday()[0].range_start(), 0); + EXPECT_EQ(tit->timeranges().sunday()[0].range_end(), 3600 * 24); + EXPECT_EQ(tit->timeranges().monday().size(), 1u); + EXPECT_EQ(tit->timeranges().monday()[0].range_start(), 0); + EXPECT_EQ(tit->timeranges().monday()[0].range_end(), 86400); + EXPECT_EQ(tit->timeranges().monday().size(), 1); + EXPECT_EQ(tit->timeranges().tuesday().size(), 1u); + EXPECT_EQ(tit->timeranges().wednesday().size(), 1u); + EXPECT_EQ(tit->timeranges().thursday().size(), 1u); + EXPECT_EQ(tit->timeranges().friday().size(), 1u); + EXPECT_EQ(tit->timeranges().saturday().size(), 1u); + + ASSERT_EQ(cfg.contacts().size(), CONTACTS); + const auto& ct = cfg.contacts().at(0); + EXPECT_EQ(ct.contact_name(), std::string("John_Doe")); + EXPECT_TRUE(ct.can_submit_commands()); + EXPECT_TRUE(ct.host_notifications_enabled()); + EXPECT_EQ(ct.host_notification_options(), + configuration::action_hst_up | configuration::action_hst_down | + configuration::action_hst_unreachable); + EXPECT_TRUE(ct.retain_nonstatus_information()); + EXPECT_TRUE(ct.retain_status_information()); + EXPECT_TRUE(ct.service_notifications_enabled()); + EXPECT_EQ(ct.service_notification_options(), + configuration::action_svc_warning | + configuration::action_svc_unknown | + configuration::action_svc_critical); + EXPECT_EQ(ct.alias(), std::string_view("admin")); + EXPECT_EQ(ct.contactgroups().data().size(), 0u); + + ASSERT_EQ(cfg.hostgroups().size(), HOSTGROUPS); + auto hgit = cfg.hostgroups().begin(); + while (hgit != cfg.hostgroups().end() && hgit->hostgroup_name() != "hg1") + ++hgit; + ASSERT_TRUE(hgit != cfg.hostgroups().end()); + const auto hg = *hgit; + ASSERT_EQ(hg.hostgroup_id(), 3u); + ASSERT_EQ(hg.hostgroup_name(), std::string_view("hg1")); + ASSERT_EQ(hg.alias(), std::string_view("hg1")); + ASSERT_EQ(hg.members().data().size(), 3u); + { + auto it = hg.members().data().begin(); + ASSERT_EQ(*it, std::string_view("Centreon-central_2")); + ++it; + ASSERT_EQ(*it, std::string_view("Centreon-central_3")); + ++it; + ASSERT_EQ(*it, std::string_view("Centreon-central_4")); + } + ASSERT_EQ(hg.notes(), std::string_view("note_hg1")); + ASSERT_EQ(hg.notes_url(), std::string_view()); + ASSERT_EQ(hg.action_url(), std::string_view()); + + ASSERT_EQ(cfg.servicegroups().size(), SERVICEGROUPS); + auto sgit = cfg.servicegroups().begin(); + while (sgit != cfg.servicegroups().end() && + sgit->servicegroup_name() != "Database-MySQL") + ++sgit; + ASSERT_TRUE(sgit != cfg.servicegroups().end()); + const auto sg = *sgit; + ASSERT_EQ(sg.servicegroup_id(), 2u); + ASSERT_EQ(sg.servicegroup_name(), std::string_view("Database-MySQL")); + ASSERT_EQ(sg.alias(), std::string_view("Database-MySQL")); + ASSERT_EQ(sg.members().data().size(), 67u); + { + auto find_pair = [&data = sg.members().data()](std::string_view first, + std::string_view second) { + auto retval = std::find_if( + data.begin(), data.end(), + [&first, &second](const configuration::PairStringSet_Pair& m) { + return m.first() == first && m.second() == second; + }); + return retval; + }; + + auto it = sg.members().data().begin(); + it = find_pair("Centreon-central", "Connection-Time"); + ASSERT_NE(it, sg.members().data().end()); + + it = find_pair("Centreon-central", "Connections-Number"); + ASSERT_NE(it, sg.members().data().end()); + + it = find_pair("Centreon-central", "Myisam-Keycache"); + ASSERT_NE(it, sg.members().data().end()); + } + ASSERT_EQ(sg.notes(), std::string_view()); + ASSERT_EQ(sg.notes_url(), std::string_view()); + ASSERT_EQ(sg.action_url(), std::string_view()); + + auto sdit = cfg.servicedependencies().begin(); + while (sdit != cfg.servicedependencies().end() && + std::find(sdit->servicegroups().data().begin(), + sdit->servicegroups().data().end(), + "sg1") != sdit->servicegroups().data().end()) + ++sdit; + ASSERT_TRUE(sdit != cfg.servicedependencies().end()); + ASSERT_TRUE(*sdit->hosts().data().begin() == + std::string_view("Centreon-central")); + ASSERT_TRUE(*sdit->dependent_service_description().data().begin() == + std::string_view("Connections-Number")); + ASSERT_TRUE(*sdit->dependent_hosts().data().begin() == + std::string_view("Centreon-central")); + ASSERT_TRUE(sdit->inherits_parent()); + ASSERT_EQ(sdit->execution_failure_options(), + configuration::action_sd_unknown | configuration::action_sd_ok); + ASSERT_EQ( + sdit->notification_failure_options(), + configuration::action_sd_warning | configuration::action_sd_critical); + + // Anomalydetections + ASSERT_TRUE(cfg.anomalydetections().empty()); + // auto adit = cfg.anomalydetections().begin(); + // while (adit != cfg.anomalydetections().end() && + // (adit->service_id() != 2001 || adit->host_id() != 1)) + // ++adit; + // ASSERT_TRUE(adit != cfg.anomalydetections().end()); + // ASSERT_TRUE(adit->service_description() == "service_ad2"); + // ASSERT_EQ(adit->dependent_service_id(), 1); + // ASSERT_TRUE(adit->metric_name() == "metric2"); + // ASSERT_EQ(adit->customvariables().size(), 1); + // ASSERT_EQ(adit->customvariables().at(0).value(), + // std::string("this_is_a_test")); + // ASSERT_EQ(adit->contactgroups().data().size(), 2); + // ASSERT_EQ(adit->contacts().data().size(), 1); + // ASSERT_EQ(adit->servicegroups().data().size(), 2); + + auto cgit = cfg.contactgroups().begin(); + ASSERT_EQ(cfg.contactgroups().size(), 2u); + ASSERT_EQ(cgit->contactgroup_name(), std::string_view("Guest")); + ASSERT_EQ(cgit->alias(), std::string_view("Guests Group")); + ASSERT_EQ(cgit->members().data().size(), 0u); + ASSERT_EQ(cgit->contactgroup_members().data().size(), 0u); + + ++cgit; + ASSERT_TRUE(cgit != cfg.contactgroups().end()); + ASSERT_EQ(cgit->contactgroup_name(), std::string_view("Supervisors")); + ASSERT_EQ(cgit->alias(), std::string_view("Centreon supervisors")); + ASSERT_EQ(cgit->members().data().size(), 1u); + ASSERT_EQ(*cgit->members().data().begin(), "John_Doe"); + ASSERT_EQ(cgit->contactgroup_members().data().size(), 0u); + + ++cgit; + ASSERT_TRUE(cgit == cfg.contactgroups().end()); + + ASSERT_EQ(cfg.connectors().size(), 2); + auto cnit = cfg.connectors().begin(); + ASSERT_TRUE(cnit != cfg.connectors().end()); + ASSERT_EQ(cnit->connector_name(), std::string_view("Perl Connector")); + ASSERT_EQ(cnit->connector_line(), + std::string_view( + "/usr/lib64/centreon-connector/centreon_connector_perl " + "--log-file=/var/log/centreon-engine/connector-perl.log")); + ++cnit; + ASSERT_EQ(cnit->connector_name(), std::string_view("SSH Connector")); + ASSERT_EQ(cnit->connector_line(), + std::string_view( + "/usr/lib64/centreon-connector/centreon_connector_ssh " + "--log-file=/var/log/centreon-engine/connector-ssh.log")); + ++cnit; + ASSERT_TRUE(cnit == cfg.connectors().end()); + + /* Severities */ + ASSERT_EQ(cfg.severities().size(), 2); + auto svit = cfg.severities().begin(); + ++svit; + ASSERT_TRUE(svit != cfg.severities().end()); + ASSERT_EQ(svit->severity_name(), std::string_view("severity1")); + EXPECT_EQ(svit->key().id(), 5); + EXPECT_EQ(svit->level(), 1); + EXPECT_EQ(svit->icon_id(), 3); + ASSERT_EQ(svit->key().type(), configuration::SeverityType::service); + + /* Serviceescalations */ + ASSERT_EQ(cfg.serviceescalations().size(), 6); + auto seit = cfg.serviceescalations().begin(); + EXPECT_TRUE(seit != cfg.serviceescalations().end()); + EXPECT_EQ(*seit->hosts().data().begin(), + std::string_view("Centreon-central")); + ASSERT_EQ(*seit->service_description().data().begin(), + std::string_view("Cpu")); + ++seit; + ASSERT_EQ(seit->hosts().data().size(), 1); + ASSERT_EQ(seit->contactgroups().data().size(), 1); + EXPECT_EQ(*seit->contactgroups().data().begin(), "Supervisors"); + ASSERT_EQ(seit->servicegroups().data().size(), 0); + std::list se_names; + std::list se_base{"Connection-Time", "Cpu", "Cpu", + "Database-Size"}; + for (auto& se : cfg.serviceescalations()) { + if (se.service_description().data().size()) { + ASSERT_EQ(se.service_description().data().size(), 1); + se_names.push_back(*se.service_description().data().begin()); + } + } + se_names.sort(); + ASSERT_EQ(se_names, se_base); + + /*Hostescalations */ + auto heit = cfg.hostescalations().begin(); + ASSERT_TRUE(heit != cfg.hostescalations().end()); + std::set cts{"Supervisors"}; + std::set he_cts; + for (auto& cg : heit->contactgroups().data()) + he_cts.insert(cg); + ASSERT_EQ(he_cts, cts); + ++heit; + + std::set hgs{"hg1"}; + std::set he_hgs; + for (auto& hg : heit->hostgroups().data()) + he_hgs.insert(hg); + ASSERT_EQ(he_hgs, hgs); + + /*Hostdependencies */ + ASSERT_EQ(cfg.hostdependencies().size(), HOSTDEPENDENCIES); + + configuration::applier::state::instance().apply(cfg, err); + + ASSERT_TRUE(std::all_of(cfg.hostdependencies().begin(), + cfg.hostdependencies().end(), [](const auto& hd) { + return hd.hostgroups().data().empty() && + hd.dependent_hostgroups().data().empty(); + })); + + RmConf(); + auto hst = host::hosts_by_id[30]; + ASSERT_TRUE(hst->custom_variables.find("CUSTOM_CV") != + hst->custom_variables.end()); + ASSERT_EQ(hst->custom_variables["CUSTOM_CV"].value(), + std::string_view("custom_value")); +} + +TEST_F(ApplierState, StateParsingServicegroupValidityFailed) { + configuration::State config; + configuration::parser p; + CreateBadConf(ConfigurationObject::SERVICEGROUP); + configuration::error_cnt err; + ASSERT_THROW(p.parse("/tmp/centengine.cfg", &config, err), std::exception); +} + +TEST_F(ApplierState, StateParsingTagValidityFailed) { + configuration::State config; + configuration::parser p; + CreateBadConf(ConfigurationObject::TAG); + configuration::error_cnt err; + ASSERT_THROW(p.parse("/tmp/centengine.cfg", &config, err), std::exception); +} + +TEST_F(ApplierState, StateParsingAnomalydetectionValidityFailed) { + configuration::State config; + configuration::parser p; + CreateBadConf(ConfigurationObject::ANOMALYDETECTION); + configuration::error_cnt err; + ASSERT_THROW(p.parse("/tmp/centengine.cfg", &config, err), std::exception); +} + +TEST_F(ApplierState, StateParsingSeverityWithoutType) { + configuration::State config; + configuration::parser p; + CreateBadConf(ConfigurationObject::SEVERITY); + configuration::error_cnt err; + ASSERT_THROW(p.parse("/tmp/centengine.cfg", &config, err), std::exception); +} + +TEST_F(ApplierState, StateParsingHostdependencyWithoutHost) { + configuration::State config; + configuration::parser p; + CreateBadConf(ConfigurationObject::DEPENDENCY); + configuration::error_cnt err; + ASSERT_THROW(p.parse("/tmp/centengine.cfg", &config, err), std::exception); +} + +TEST_F(ApplierState, StateParsingNonexistingContactgroup) { + configuration::State cfg; + configuration::parser p; + CreateBadConf(ConfigurationObject::CONTACTGROUP_NE); + configuration::error_cnt err; + p.parse("/tmp/centengine.cfg", &cfg, err); + ASSERT_THROW(configuration::applier::state::instance().apply(cfg, err), + std::exception); +} + +TEST_F(ApplierState, StateParsingContactgroupWithoutName) { + configuration::State cfg; + configuration::parser p; + CreateBadConf(ConfigurationObject::CONTACTGROUP); + configuration::error_cnt err; + ASSERT_THROW(p.parse("/tmp/centengine.cfg", &cfg, err), std::exception); +} diff --git a/engine/tests/configuration/applier/applier-service.cc b/engine/tests/configuration/applier/applier-service.cc index 12a4788ccff..4833395a9b8 100644 --- a/engine/tests/configuration/applier/applier-service.cc +++ b/engine/tests/configuration/applier/applier-service.cc @@ -64,7 +64,6 @@ TEST_F(ApplierService, NewServiceWithHostNotDefinedFromConfig) { // Then the applier add_object throws an exception. TEST_F(ApplierService, NewHostWithoutHostId) { configuration::applier::host hst_aply; - configuration::applier::service svc_aply; configuration::service svc; configuration::host hst; ASSERT_TRUE(hst.parse("host_name", "test_host")); diff --git a/engine/tests/configuration/object.cc b/engine/tests/configuration/object.cc index dbf6de5cffe..1079c7df33e 100644 --- a/engine/tests/configuration/object.cc +++ b/engine/tests/configuration/object.cc @@ -1,26 +1,27 @@ /** - * Copyright 2016 Centreon + * Copyright 2016-2024 Centreon * - * This file is part of Centreon Engine. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * Centreon Engine is free software: you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. + * http://www.apache.org/licenses/LICENSE-2.0 * - * Centreon Engine is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com * - * You should have received a copy of the GNU General Public License - * along with Centreon Engine. If not, see - * . */ - #include +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/parser.hh" #include "common/engine_legacy_conf/service.hh" +#endif using namespace com::centreon; using namespace com::centreon::engine; diff --git a/engine/tests/configuration/pbcontact.cc b/engine/tests/configuration/pbcontact.cc new file mode 100644 index 00000000000..130e5e0382a --- /dev/null +++ b/engine/tests/configuration/pbcontact.cc @@ -0,0 +1,43 @@ +/* + * Copyright 2017 - 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include "common/engine_conf/contact_helper.hh" + +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; + +class ConfigContact : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// When I create a configuration::Contact with an empty name +// Then an exception is thrown. +TEST_F(ConfigContact, NewPbContactWithNoName) { + configuration::error_cnt err; + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + ASSERT_THROW(ctct_hlp.check_validity(err), std::exception); +} diff --git a/engine/tests/configuration/pbhost.cc b/engine/tests/configuration/pbhost.cc new file mode 100644 index 00000000000..3a5d656f839 --- /dev/null +++ b/engine/tests/configuration/pbhost.cc @@ -0,0 +1,33 @@ +/** + * Copyright 2016 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ + +#include + +#include "common/engine_conf/host_helper.hh" + +using namespace com::centreon::engine; + +// Given a host configuration object +// When it is default constructed +// Then its acknowledgements timeout is set to 0 +TEST(ConfigurationHostAcknowledgementTimeoutTest, PbDefaultConstruction) { + configuration::Host h; + configuration::host_helper hlp(&h); + ASSERT_EQ(0, h.acknowledgement_timeout()); +} diff --git a/engine/tests/configuration/pbservice.cc b/engine/tests/configuration/pbservice.cc new file mode 100644 index 00000000000..67316ac9649 --- /dev/null +++ b/engine/tests/configuration/pbservice.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2016 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ + +#include +#ifdef LEGACY_CONF +#include "common/engine_legacy_conf/service.hh" +#else +#include "common/engine_conf/service_helper.hh" +#endif + +using namespace com::centreon::engine; + +// Given a service configuration object +// When it is default constructed +// Then its acknowledgements timeout is set to 0 +TEST(ConfigurationServiceAcknowledgementTimeoutTest, PbDefaultConstruction) { + configuration::Service s; + configuration::service_helper hlp(&s); + ASSERT_EQ(0, s.acknowledgement_timeout()); +} + +TEST(ConfigurationServiceParseProperties, SetCustomVariable) { + configuration::Service s; + configuration::service_helper s_hlp(&s); + ASSERT_TRUE(s_hlp.insert_customvariable("_VARNAME", "TEST1")); +} diff --git a/engine/tests/configuration/pbseverity.cc b/engine/tests/configuration/pbseverity.cc new file mode 100644 index 00000000000..ba73839e434 --- /dev/null +++ b/engine/tests/configuration/pbseverity.cc @@ -0,0 +1,104 @@ +/** + * Copyright 2021 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include "common/engine_conf/severity_helper.hh" +#include "common/engine_conf/state.pb.h" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; + +class ConfigSeverity : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// When I create a configuration::severity with a null id +// Then an exception is thrown. +TEST_F(ConfigSeverity, PbNewSeverityWithNoKey) { + configuration::error_cnt err; + configuration::Severity sv; + configuration::severity_helper sev_hlp(&sv); + sev_hlp.hook("severity_id", "0"); + sev_hlp.hook("severity_type", "service"); + ASSERT_THROW(sev_hlp.check_validity(err), std::exception); +} + +// When I create a configuration::severity with a null level +// Then an exception is thrown. +TEST_F(ConfigSeverity, PbNewSeverityWithNoLevel) { + configuration::error_cnt err; + configuration::Severity sv; + configuration::severity_helper sv_hlp(&sv); + sv_hlp.hook("severity_id", "1"); + sv_hlp.hook("severity_type", "service"); + ASSERT_THROW(sv_hlp.check_validity(err), std::exception); +} + +// When I create a configuration::severity with an empty name +// Then an exception is thrown. +TEST_F(ConfigSeverity, PbNewSeverityWithNoName) { + configuration::Severity sv; + configuration::severity_helper sv_hlp(&sv); + sv_hlp.hook("severity_id", "1"); + sv_hlp.hook("severity_type", "service"); + sv.set_level(2); + configuration::error_cnt err; + ASSERT_THROW(sv_hlp.check_validity(err), std::exception); +} + +// When I create a configuration::severity with a non empty name, +// non null id and non null level. +// Then no exception is thrown. +TEST_F(ConfigSeverity, PbNewSeverityWellFilled) { + configuration::error_cnt err; + configuration::Severity sv; + configuration::severity_helper sv_hlp(&sv); + sv_hlp.hook("severity_id", "1"); + sv_hlp.hook("severity_type", "service"); + sv.set_level(2); + sv.set_severity_name("foobar"); + ASSERT_EQ(sv.key().id(), 1); + ASSERT_EQ(sv.level(), 2); + ASSERT_EQ(sv.severity_name(), "foobar"); + ASSERT_EQ(sv.key().type(), configuration::SeverityType::service); + ASSERT_NO_THROW(sv_hlp.check_validity(err)); +} + +// When I create a configuration::severity with an icon id. +// Then we can get its value. +TEST_F(ConfigSeverity, PbNewSeverityIconId) { + configuration::error_cnt err; + configuration::Severity sv; + configuration::severity_helper sv_hlp(&sv); + sv_hlp.hook("severity_id", "1"); + sv_hlp.hook("severity_type", "host"); + sv.set_level(2); + sv.set_severity_name("foobar"); + ASSERT_EQ(sv.key().id(), 1); + ASSERT_EQ(sv.level(), 2); + ASSERT_EQ(sv.severity_name(), "foobar"); + sv.set_icon_id(18); + ASSERT_NO_THROW(sv_hlp.check_validity(err)); +} diff --git a/engine/tests/configuration/pbtag.cc b/engine/tests/configuration/pbtag.cc new file mode 100644 index 00000000000..eeefd2b8d9a --- /dev/null +++ b/engine/tests/configuration/pbtag.cc @@ -0,0 +1,100 @@ +/** + * Copyright 2022 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include "common/engine_conf/tag_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; + +extern configuration::State pb_config; + +class ConfigTag : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// When I create a configuration::tag with a null id +// Then an exception is thrown. +TEST_F(ConfigTag, NewTagWithNoKey) { + configuration::error_cnt err; + configuration::Tag tg; + configuration::tag_helper tag_hlp(&tg); + tg.mutable_key()->set_id(0); + tg.mutable_key()->set_type(0); + ASSERT_THROW(tag_hlp.check_validity(err), std::exception); +} + +// When I create a configuration::tag with a null type +// Then an exception is thrown. +TEST_F(ConfigTag, NewTagWithNoLevel) { + configuration::error_cnt err; + configuration::Tag tg; + configuration::tag_helper tg_hlp(&tg); + tg.mutable_key()->set_id(1); + tg.mutable_key()->set_type(0); + ASSERT_THROW(tg_hlp.check_validity(err), std::exception); +} + +// When I create a configuration::tag with an empty name +// Then an exception is thrown. +TEST_F(ConfigTag, NewTagWithNoName) { + configuration::error_cnt err; + configuration::Tag tg; + configuration::tag_helper tg_hlp(&tg); + tg.mutable_key()->set_id(1); + tg_hlp.hook("type", "hostcategory"); + ASSERT_THROW(tg_hlp.check_validity(err), std::exception); +} + +// When I create a configuration::tag with a non empty name, +// non null id and non null type +// Then no exception is thrown. +TEST_F(ConfigTag, NewTagWellFilled) { + configuration::error_cnt err; + configuration::Tag tg; + configuration::tag_helper tg_hlp(&tg); + tg.mutable_key()->set_id(1); + tg.mutable_key()->set_type(0); + tg_hlp.hook("type", "servicegroup"); + tg.set_tag_name("foobar"); + ASSERT_EQ(tg.key().id(), 1); + ASSERT_EQ(tg.key().type(), tag_servicegroup); + ASSERT_EQ(tg.tag_name(), "foobar"); + ASSERT_NO_THROW(tg_hlp.check_validity(err)); +} + +// When I create a configuration::tag with a non empty name, +// non null id and non null type. +// Then we can get the type value. +TEST_F(ConfigTag, NewTagIconId) { + configuration::error_cnt err; + configuration::Tag tg; + configuration::tag_helper tg_hlp(&tg); + tg.mutable_key()->set_id(1); + tg.mutable_key()->set_type(0); + tg_hlp.hook("type", "hostgroup"); + tg.set_tag_name("foobar"); + ASSERT_EQ(tg.key().type(), tag_hostgroup); + ASSERT_NO_THROW(tg_hlp.check_validity(err)); +} diff --git a/engine/tests/configuration/pbtimeperiod-test.cc b/engine/tests/configuration/pbtimeperiod-test.cc new file mode 100644 index 00000000000..8e14c422905 --- /dev/null +++ b/engine/tests/configuration/pbtimeperiod-test.cc @@ -0,0 +1,958 @@ +/** + * Copyright 2022-2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ +#include + +#include + +#include +#include + +#include "com/centreon/engine/common.hh" +#include "com/centreon/engine/configuration/applier/pb_difference.hh" +#include "com/centreon/engine/configuration/applier/timeperiod.hh" +#include "com/centreon/engine/globals.hh" +#include "common/engine_conf/timeperiod_helper.hh" + +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine; + +namespace com::centreon::engine::configuration { +class time_period_comparator { + static const std::regex name_extractor, alias_extractor, skip_extractor, + day_extractor, date_extractor, date_range1_extractor, + date_range2_extractor, range_extractor, full_date_extractor, + full_date_range_extractor, n_th_day_of_month_extractor, + n_th_day_of_month_range_extractor, n_th_day_of_week_extractor, + n_th_day_of_week_range_extractor, n_th_day_of_week_of_month_extractor, + n_th_day_of_week_of_month_range_extractor, exclude_extractor; + + static const std::map day_to_index, month_to_index; + + const configuration::Timeperiod& _conf_tp; + std::shared_ptr _result; + + static std::list extract_timerange( + const std::string& line_content, + uint32_t offset, + const std::smatch& datas); + std::string name, alias; + + /* days_array */ + std::array, 7> _timeranges; + std::array, + configuration::daterange::daterange_types> + _exceptions; + + std::set _exclude; + + public: + time_period_comparator(const configuration::Timeperiod& conf_tp, + const std::vector& timeperiod_content); + + static void extract_skip(const std::smatch matchs, + unsigned match_index, + Daterange& date_range); + bool is_equal() const; + + bool is_result_equal() const; + + const std::string get_name() const { return name; } +}; + +const std::string one_range("\\d\\d:\\d\\d\\-\\d\\d:\\d\\d"); +const std::string plage_regex("(" + one_range + ")(," + one_range + ")*"); +const std::string full_date("(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)"); +const std::string months( + "(january|february|march|april|may|june|july|august|september|october|" + "november|december)"); +const std::string days( + "(sunday|monday|tuesday|wednesday|thursday|friday|saturday)"); + +const std::string skip("(\\s+|\\s*/\\s+\\d+)\\s*"); +const std::regex time_period_comparator::name_extractor( + "^name\\s+(\\S+[\\s\\S]+\\S+)"); +const std::regex time_period_comparator::alias_extractor( + "^alias\\s+(\\S+[\\s\\S]+\\S+)"); + +const std::regex time_period_comparator::skip_extractor("/\\s*(\\d+)"); + +const std::regex time_period_comparator::day_extractor("^" + days + "\\s+" + + plage_regex); + +const std::regex time_period_comparator::date_extractor("^" + months + + "\\s+(\\-*\\d+)\\s+" + + plage_regex); + +const std::regex time_period_comparator::date_range1_extractor( + "^" + months + "\\s+(\\-*\\d+)\\s+\\-\\s+(\\-*\\d+)" + skip + plage_regex); + +const std::regex time_period_comparator::date_range2_extractor( + "^" + months + "\\s+(\\-*\\d+)\\s+\\-\\s+" + months + "\\s+(\\-*\\d+)" + + skip + plage_regex); + +const std::regex time_period_comparator::range_extractor( + "(\\d\\d):(\\d\\d)\\-(\\d\\d):(\\d\\d)"); + +const std::regex time_period_comparator::full_date_extractor("^" + full_date + + skip + + plage_regex); + +const std::regex time_period_comparator::full_date_range_extractor( + "^" + full_date + "\\s*\\-\\s*" + full_date + skip + plage_regex); + +const std::regex time_period_comparator::n_th_day_of_month_extractor( + "^day\\s+(\\-*\\d+)\\s+" + plage_regex); + +const std::regex time_period_comparator::n_th_day_of_month_range_extractor( + "^day\\s+(\\-*\\d+)\\s+\\-\\s+(\\-*\\d+)" + skip + plage_regex); + +const std::regex time_period_comparator::n_th_day_of_week_extractor( + "^" + days + "\\s+(\\-*\\d+)\\s+" + plage_regex); + +const std::regex time_period_comparator::n_th_day_of_week_range_extractor( + "^" + days + "\\s+(\\-*\\d+)\\s+\\-\\s+" + days + "\\s+(\\-*\\d+)" + skip + + plage_regex); + +const std::regex time_period_comparator::n_th_day_of_week_of_month_extractor( + "^" + days + "\\s+(\\-*\\d+)\\s+" + months + "\\s+" + plage_regex); + +const std::regex + time_period_comparator::n_th_day_of_week_of_month_range_extractor( + "^" + days + "\\s+(\\-*\\d+)\\s+" + months + "\\s+\\-\\s+" + days + + "\\s+(\\-*\\d+)\\s+" + months + skip + plage_regex); + +const std::regex time_period_comparator::exclude_extractor( + "^exclude\\s+([\\w\\-]+)(,[\\w\\-]+)*"); + +const std::map time_period_comparator::day_to_index = { + {"sunday", 0}, {"monday", 1}, {"tuesday", 2}, {"wednesday", 3}, + {"thursday", 4}, {"friday", 5}, {"saturday", 6}}; + +const std::map time_period_comparator::month_to_index = { + {"january", 0}, {"february", 1}, {"march", 2}, {"april", 3}, + {"may", 4}, {"june", 5}, {"july", 6}, {"august", 7}, + {"september", 8}, {"october", 9}, {"november", 10}, {"december", 11}}; + +void time_period_comparator::extract_skip(const std::smatch matchs, + unsigned match_index, + Daterange& date_range) { + std::smatch skip_extract; + std::string skip_data = matchs[match_index].str(); + if (std::regex_search(skip_data, skip_extract, skip_extractor)) { + date_range.set_skip_interval(atoi(skip_extract[1].str().c_str())); + } +} + +time_period_comparator::time_period_comparator( + const configuration::Timeperiod& conf_tp, + const std::vector& timeperiod_content) + : _conf_tp(conf_tp) { + com::centreon::engine::configuration::applier::timeperiod applier; + + com::centreon::engine::timeperiod::timeperiods.clear(); + + for (const std::string& line : timeperiod_content) { + if (line[0] == '#') + continue; + + { // name + std::smatch line_extract; + if (std::regex_search(line, line_extract, name_extractor)) { + name = line_extract[1]; + std::cout << " test " << name << std::endl; + continue; + } + } + { // alias + std::smatch line_extract; + if (std::regex_search(line, line_extract, alias_extractor)) { + alias = line_extract[1]; + continue; + } + } + { // day of week "monday 08:00-12:00" + std::smatch line_extract; + if (std::regex_search(line, line_extract, day_extractor)) { + unsigned day_index = day_to_index.find(line_extract[1].str())->second; + std::list time_intervals = + extract_timerange(line, 2, line_extract); + _timeranges[day_index] = time_intervals; + continue; + } + } + { // exception "january 1 08:00-12:00" + std::smatch line_extract; + if (std::regex_search(line, line_extract, date_extractor)) { + std::list time_intervals = + extract_timerange(line, 3, line_extract); + int day_of_month = atoi(line_extract[2].str().c_str()); + unsigned month_index = + month_to_index.find(line_extract[1].str())->second; + Daterange toadd; + toadd.set_type(Daterange_TypeRange_month_date); + toadd.set_smon(month_index); + toadd.set_smday(day_of_month); + toadd.set_emon(month_index); + toadd.set_emday(day_of_month); + for (auto& tr : time_intervals) { + auto* new_tr = toadd.add_timerange(); + new_tr->CopyFrom(tr); + } + + _exceptions[daterange::month_date].push_front(toadd); + continue; + } + } + { // exception july 10 - 15 / 2 00:00-24:00 + std::smatch line_extract; + if (std::regex_search(line, line_extract, date_range1_extractor)) { + std::list time_intervals = + extract_timerange(line, 5, line_extract); + int day_of_month_start = atoi(line_extract[2].str().c_str()); + int day_of_month_end = atoi(line_extract[3].str().c_str()); + unsigned month_index = + month_to_index.find(line_extract[1].str())->second; + Daterange toadd; + toadd.set_type(Daterange_TypeRange_month_date); + extract_skip(line_extract, 4, toadd); + toadd.set_smon(month_index); + toadd.set_smday(day_of_month_start); + toadd.set_emon(month_index); + toadd.set_emday(day_of_month_end); + for (auto& tr : time_intervals) { + auto* new_tr = toadd.add_timerange(); + new_tr->CopyFrom(tr); + } + + _exceptions[daterange::month_date].push_front(toadd); + continue; + } + } + { // exception april 10 - may 15 /2 00:00-24:00 + std::smatch line_extract; + if (std::regex_search(line, line_extract, date_range2_extractor)) { + std::list time_intervals = + extract_timerange(line, 6, line_extract); + int day_of_month_start = atoi(line_extract[2].str().c_str()); + unsigned month_index_start = + month_to_index.find(line_extract[1].str())->second; + int day_of_month_end = atoi(line_extract[4].str().c_str()); + unsigned month_index_end = + month_to_index.find(line_extract[3].str())->second; + Daterange toadd; + toadd.set_type(Daterange_TypeRange_month_date); + extract_skip(line_extract, 5, toadd); + toadd.set_smon(month_index_start); + toadd.set_smday(day_of_month_start); + toadd.set_emon(month_index_end); + toadd.set_emday(day_of_month_end); + for (auto& tr : time_intervals) { + auto* new_tr = toadd.add_timerange(); + new_tr->CopyFrom(tr); + } + + _exceptions[daterange::month_date].push_front(toadd); + continue; + } + } + { // exception "2022-04-05 /5 08:00-12:00" + std::smatch line_extract; + if (std::regex_search(line, line_extract, full_date_extractor)) { + unsigned year = atoi(line_extract[1].str().c_str()); + unsigned month = atoi(line_extract[2].str().c_str()) - 1; + unsigned day_of_month = atoi(line_extract[3].str().c_str()); + Daterange toadd; + toadd.set_type(Daterange_TypeRange_calendar_date); + extract_skip(line_extract, 4, toadd); + std::list time_intervals = + extract_timerange(line, 5, line_extract); + toadd.set_syear(year); + toadd.set_eyear(year); + toadd.set_smon(month); + toadd.set_emon(month); + toadd.set_smday(day_of_month); + toadd.set_emday(day_of_month); + for (auto& tr : time_intervals) { + auto* new_tr = toadd.add_timerange(); + new_tr->CopyFrom(tr); + } + + _exceptions[daterange::calendar_date].push_front(toadd); + continue; + } + } + { // exception "2007-01-01 - 2008-02-01 /3 00:00-24:00" + std::smatch line_extract; + if (std::regex_search(line, line_extract, full_date_range_extractor)) { + std::list time_intervals = + extract_timerange(line, 8, line_extract); + unsigned year_start = atoi(line_extract[1].str().c_str()); + unsigned month_start = atoi(line_extract[2].str().c_str()) - 1; + unsigned day_of_month_start = atoi(line_extract[3].str().c_str()); + unsigned year_end = atoi(line_extract[4].str().c_str()); + unsigned month_end = atoi(line_extract[5].str().c_str()) - 1; + unsigned day_of_month_end = atoi(line_extract[6].str().c_str()); + Daterange toadd; + toadd.set_type(Daterange_TypeRange_calendar_date); + extract_skip(line_extract, 7, toadd); + toadd.set_syear(year_start); + toadd.set_eyear(year_end); + toadd.set_smon(month_start); + toadd.set_emon(month_end); + toadd.set_smday(day_of_month_start); + toadd.set_emday(day_of_month_end); + for (auto& tr : time_intervals) { + auto* new_tr = toadd.add_timerange(); + new_tr->CopyFrom(tr); + } + + _exceptions[daterange::calendar_date].push_front(toadd); + continue; + } + } + { // exception day -1 + std::smatch line_extract; + if (std::regex_search(line, line_extract, n_th_day_of_month_extractor)) { + std::list time_intervals = + extract_timerange(line, 2, line_extract); + unsigned day_of_month = atoi(line_extract[1].str().c_str()); + Daterange toadd; + toadd.set_type(Daterange_TypeRange_month_day); + toadd.set_smday(day_of_month); + toadd.set_emday(day_of_month); + for (auto& tr : time_intervals) { + auto* new_tr = toadd.add_timerange(); + new_tr->CopyFrom(tr); + } + + _exceptions[daterange::month_day].push_front(toadd); + continue; + } + } + { // exception day -1 + std::smatch line_extract; + if (std::regex_search(line, line_extract, + n_th_day_of_month_range_extractor)) { + std::list time_intervals = + extract_timerange(line, 4, line_extract); + unsigned day_of_month_start = atoi(line_extract[1].str().c_str()); + unsigned day_of_month_end = atoi(line_extract[2].str().c_str()); + Daterange toadd; + toadd.set_type(Daterange_TypeRange_month_day); + extract_skip(line_extract, 3, toadd); + toadd.set_smday(day_of_month_start); + toadd.set_emday(day_of_month_end); + for (auto& tr : time_intervals) { + auto* new_tr = toadd.add_timerange(); + new_tr->CopyFrom(tr); + } + + _exceptions[daterange::month_day].push_front(toadd); + continue; + } + } + { // exception monday 3 00:00-24:00 + std::smatch line_extract; + if (std::regex_search(line, line_extract, n_th_day_of_week_extractor)) { + std::list time_intervals = + extract_timerange(line, 3, line_extract); + Daterange toadd; + toadd.set_type(Daterange_TypeRange_week_day); + unsigned week_day_index = + day_to_index.find(line_extract[1].str())->second; + int day_month_index = atoi(line_extract[2].str().c_str()); + toadd.set_swday(week_day_index); + toadd.set_ewday(week_day_index); + for (auto& tr : time_intervals) { + auto* new_tr = toadd.add_timerange(); + new_tr->CopyFrom(tr); + } + toadd.set_swday_offset(day_month_index); + toadd.set_ewday_offset(day_month_index); + + _exceptions[daterange::week_day].push_front(toadd); + continue; + } + } + { // exception monday 3 - thursday 4 / 2 00:00-24:00 + std::smatch line_extract; + if (std::regex_search(line, line_extract, + n_th_day_of_week_range_extractor)) { + std::list time_intervals = + extract_timerange(line, 6, line_extract); + Daterange toadd; + toadd.set_type(Daterange_TypeRange_week_day); + extract_skip(line_extract, 5, toadd); + unsigned week_day_index_start = + day_to_index.find(line_extract[1].str())->second; + int day_month_index_start = atoi(line_extract[2].str().c_str()); + unsigned week_day_index_end = + day_to_index.find(line_extract[3].str())->second; + int day_month_index_end = atoi(line_extract[4].str().c_str()); + toadd.set_swday(week_day_index_start); + toadd.set_ewday(week_day_index_end); + for (auto& tr : time_intervals) { + auto* new_tr = toadd.add_timerange(); + new_tr->CopyFrom(tr); + } + toadd.set_swday_offset(day_month_index_start); + toadd.set_ewday_offset(day_month_index_end); + + _exceptions[daterange::week_day].push_front(toadd); + continue; + } + } + { // exception thursday -1 november 00:00-24:00 + std::smatch line_extract; + if (std::regex_search(line, line_extract, + n_th_day_of_week_of_month_extractor)) { + std::list time_intervals = + extract_timerange(line, 4, line_extract); + Daterange toadd; + toadd.set_type(Daterange_TypeRange_month_week_day); + unsigned month_index = + month_to_index.find(line_extract[3].str())->second; + unsigned week_day_index = + day_to_index.find(line_extract[1].str())->second; + int day_month_index = atoi(line_extract[2].str().c_str()); + toadd.set_smon(month_index); + toadd.set_emon(month_index); + toadd.set_swday(week_day_index); + toadd.set_ewday(week_day_index); + toadd.set_swday_offset(day_month_index); + toadd.set_ewday_offset(day_month_index); + for (auto& tr : time_intervals) { + auto* new_tr = toadd.add_timerange(); + new_tr->CopyFrom(tr); + } + + _exceptions[daterange::month_week_day].push_front(toadd); + continue; + } + } + { // exception tuesday 1 april - friday 2 may / 6 00:00-24:00 + std::smatch line_extract; + if (std::regex_search(line, line_extract, + n_th_day_of_week_of_month_range_extractor)) { + std::list time_intervals = + extract_timerange(line, 8, line_extract); + Daterange toadd; + toadd.set_type(Daterange_TypeRange_month_week_day); + unsigned month_index_start = + month_to_index.find(line_extract[3].str())->second; + unsigned week_day_index_start = + day_to_index.find(line_extract[1].str())->second; + int day_month_index_start = atoi(line_extract[2].str().c_str()); + unsigned month_index_end = + month_to_index.find(line_extract[6].str())->second; + unsigned week_day_index_end = + day_to_index.find(line_extract[4].str())->second; + int day_month_index_end = atoi(line_extract[5].str().c_str()); + extract_skip(line_extract, 7, toadd); + toadd.set_smon(month_index_start); + toadd.set_emon(month_index_end); + toadd.set_swday(week_day_index_start); + toadd.set_ewday(week_day_index_end); + toadd.set_swday_offset(day_month_index_start); + toadd.set_ewday_offset(day_month_index_end); + for (auto& tr : time_intervals) { + auto* new_tr = toadd.add_timerange(); + new_tr->CopyFrom(tr); + } + + _exceptions[daterange::month_week_day].push_front(toadd); + continue; + } + } + { + std::smatch line_extract; + if (std::regex_search(line, line_extract, exclude_extractor)) { + for (std::string field : line_extract) { + if (field == line_extract[0]) { + continue; + } + if (field.empty()) { + continue; + } + if (field[0] == ',') { + _exclude.insert(field.substr(1)); + } else { + _exclude.insert(field); + } + } + continue; + } + } + std::cerr << "no match " << line << std::endl; + } + + applier.add_object(conf_tp); + _result = + com::centreon::engine::timeperiod::timeperiods[conf_tp.timeperiod_name()]; +} + +std::ostream& operator<<(std::ostream& s, + const std::set& to_dump) { + for (const std::string& elem : to_dump) { + s << ' ' << elem; + } + return s; +} + +static constexpr std::array day_label{ + "sunday", "monday", "tuesday", "wednesday", + "thursday", "friday", "saturday"}; + +static std::ostream& operator<<(std::ostream& s, + const std::list& tr) { + s << '('; + for (auto& t : tr) { + s << t.DebugString() << ", "; + } + s << ')'; + return s; +} + +static std::ostream& operator<<(std::ostream& s, + const std::list& dr) { + s << '('; + for (auto& d : dr) { + s << d.DebugString() << ", "; + } + s << ')'; + return s; +} + +static std::ostream& operator<<( + std::ostream& s, + const std::array, 7>& timeranges) { + s << '['; + for (unsigned day_ind = 0; day_ind < 7; ++day_ind) + s << '{' << day_label[day_ind] << ", " << timeranges[day_ind] << "},"; + s << ']'; + return s; +} + +static std::ostream& operator<<( + std::ostream& s, + const std::array, + daterange::daterange_types>& dateranges) { + s << '['; + for (unsigned day_ind = 0; day_ind < daterange::daterange_types; ++day_ind) + s << '{' << day_label[day_ind] << ", " << dateranges[day_ind] << "},"; + s << ']'; + return s; +} + +static bool operator==( + const std::set& excl1, + const std::unordered_multimap& excl2) { + if (excl1.size() != excl2.size()) { + std::cerr << "Exclude arrays have not the same size." << std::endl; + return false; + } + return true; +} + +static bool operator==( + const std::array, 7>& timerange1, + const std::array, 7>& timerange2) { + auto check_timeranges = [](const std::string_view day, auto& day1, + auto& day2) -> bool { + if (day1.size() != day2.size()) { + std::cerr << day << " timeranges have not the same size: first size: " + << day1.size() << " ; second size: " << day2.size() + << std::endl; + return false; + } + for (auto& tr2 : day2) { + bool found = false; + for (auto& tr1 : day1) { + if (tr1.range_start() == tr2.get_range_start() && + tr1.range_end() == tr2.get_range_end()) { + found = true; + break; + } + } + if (!found) { + std::cerr << day << " timeranges are not the same." << std::endl; + return false; + } + } + return true; + }; + for (int32_t i = 0; i < 7; i++) { + if (!check_timeranges(day_label[i], timerange1[i], timerange2[i])) + return false; + } + return true; +} + +static bool operator==( + const std::array, 7>& timerange1, + const DaysArray& timerange2) { + auto check_timeranges = [](const std::string_view day, auto& day1, + auto& day2) -> bool { + if (static_cast(day1.size()) != day2.size()) { + std::cerr << "sunday timeranges have not the same size." << std::endl; + return false; + } + for (auto& tr2 : day2) { + bool found = false; + for (auto& tr1 : day1) { + if (tr1.range_start() == tr2.range_start() && + tr1.range_end() == tr2.range_end()) { + found = true; + break; + } + } + if (!found) { + std::cerr << day << " timeranges are not the same." << std::endl; + return false; + } + } + return true; + }; + if (!check_timeranges("sunday", timerange1[0], timerange2.sunday()) || + !check_timeranges("monday", timerange1[1], timerange2.monday()) || + !check_timeranges("tuesday", timerange1[2], timerange2.tuesday()) || + !check_timeranges("wednesday", timerange1[3], timerange2.wednesday()) || + !check_timeranges("thursday", timerange1[4], timerange2.thursday()) || + !check_timeranges("friday", timerange1[5], timerange2.friday()) || + !check_timeranges("saturday", timerange1[6], timerange2.saturday())) + return false; + return true; +} + +static bool operator==(const std::set& exclude1, + const configuration::StringSet& exclude2) { + if (static_cast(exclude1.size()) != exclude2.data().size()) { + std::cerr << "exclude arrays have not the same size " << exclude1.size() + << " <> " << exclude2.data().size() << std::endl; + return false; + } + for (auto& s : exclude1) { + bool found = false; + for (auto& ss : exclude2.data()) + if (ss == s) { + found = true; + break; + } + if (!found) { + std::cerr << "exclude sets do not contain the same strings." << std::endl; + return false; + } + } + return true; +} + +static bool operator!=(const std::set& exclude1, + const configuration::StringSet& exclude2) { + return !(exclude1 == exclude2); +} + +static bool operator==(const Daterange& dr1, const engine::daterange& dr2) { + if (static_cast(dr1.type()) != + static_cast(dr2.get_type())) { + std::cerr << "Dateranges not of the same type." << std::endl; + return false; + } + bool retval = + dr1.syear() == dr2.get_syear() && dr1.smon() == dr2.get_smon() && + dr1.smday() == dr2.get_smday() && dr1.swday() == dr2.get_swday() && + dr1.swday_offset() == dr2.get_swday_offset() && + dr1.eyear() == dr2.get_eyear() && dr1.emon() == dr2.get_emon() && + dr1.emday() == dr2.get_emday() && dr1.ewday() == dr2.get_ewday() && + dr1.ewday_offset() == dr2.get_ewday_offset(); + + return retval; +} + +static bool operator==( + const std::array, + configuration::daterange::daterange_types>& exc1, + const std::array, daterange::daterange_types>& + exc2) { + auto compare_dateranges = + [](int32_t idx, const std::list& lst1, + const std::list& lst2) -> bool { + for (auto& dr1 : lst1) { + bool found = false; + for (auto& dr2 : lst2) { + if (dr1 == dr2) { + found = true; + break; + } + } + if (!found) { + std::cerr << "Dateranges at index " << idx + << " are not equals in exception arrays" << std::endl; + return false; + } + } + return true; + }; + for (uint32_t idx = 0; idx < exc1.size(); idx++) { + if (!compare_dateranges(idx, exc1[idx], exc2[idx])) + return false; + } + return true; +} + +static bool operator==( + const std::array, + configuration::daterange::daterange_types>& exc1, + const configuration::ExceptionArray& exc2) { + auto it_exc1 = exc1.begin(); + auto compare_dateranges = + [](const std::string_view& name, + const std::list& lst, + const google::protobuf::RepeatedPtrField& + rep) -> bool { + for (auto& dr1 : lst) { + bool found = false; + for (auto& dr2 : rep) { + found = MessageDifferencer::Equals(dr1, dr2); + if (found) + break; + } + if (!found) { + std::cerr << "Dateranges '" << name + << "' are not equals in exception arrays" << std::endl; + return false; + } + } + return true; + }; + if (!compare_dateranges("calendar_date", *it_exc1, exc2.calendar_date())) + return false; + ++it_exc1; + if (!compare_dateranges("month_date", *it_exc1, exc2.month_date())) + return false; + ++it_exc1; + if (!compare_dateranges("month_day", *it_exc1, exc2.month_day())) + return false; + ++it_exc1; + if (!compare_dateranges("month_week_day", *it_exc1, exc2.month_week_day())) + return false; + ++it_exc1; + if (!compare_dateranges("week_day", *it_exc1, exc2.week_day())) + return false; + return true; +} + +static std::ostream& operator<<(std::ostream& s, + const configuration::StringSet& exclude) { + s << exclude.DebugString(); + return s; +} + +bool time_period_comparator::is_equal() const { + if (name != _conf_tp.timeperiod_name()) { + std::cerr << "different name: " << name << " <> " + << _conf_tp.timeperiod_name() << std::endl; + return false; + } + if (alias != _conf_tp.alias()) { + std::cerr << "different alias: " << alias << " <> " << _conf_tp.alias() + << std::endl; + return false; + } + + if (!(_timeranges == _conf_tp.timeranges())) { + std::cerr << "timeranges difference" << std::endl; + std::cerr << "_timeranges=" << _timeranges << std::endl; + std::cerr << "_conf_tp.timeranges= " << _conf_tp.timeranges().DebugString() + << std::endl; + return false; + } + + if (!(_exceptions == _conf_tp.exceptions())) { + std::cerr << "exception difference" << std::endl; + std::cerr << "_exceptions= " << _exceptions << std::endl; + std::cerr << "_conf_tp.exceptions= " << _conf_tp.exceptions().DebugString() + << std::endl; + return false; + } + + if (_exclude != _conf_tp.exclude()) { + std::cerr << "exception exclude" << std::endl; + std::cerr << "_exclude=" << _exclude << std::endl; + std::cerr << "_conf_tp.exclude=" << _conf_tp.exclude() << std::endl; + return false; + } + + return true; +} + +bool time_period_comparator::is_result_equal() const { + if (name != _result->get_name()) { + std::cerr << "different name: " << name << " <> " << _result->get_name() + << std::endl; + return false; + } + if (alias != _result->get_alias()) { + std::cerr << "different alias: " << alias << " <> " << _result->get_alias() + << std::endl; + return false; + } + + if (!(_timeranges == _result->days)) { + std::cerr << "timeranges difference" << std::endl; + // std::cerr << "_timeranges= " << _timeranges << std::endl; + std::cerr << "_conf_tp.timeranges= " << _conf_tp.timeranges().DebugString() + << std::endl; + return false; + } + + if (!(_exceptions == _result->exceptions)) { + std::cerr << "exception difference" << std::endl; + // std::cerr << "_exceptions= " << _exceptions << std::endl; + std::cerr << "_conf_tp.exceptions= " << _conf_tp.exceptions().DebugString() + << std::endl; + return false; + } + + if (!(_exclude == _result->get_exclusions())) { + std::cerr << "exception exclude" << std::endl; + std::cerr << "_exclude=" << _exclude << std::endl; + std::cerr << "_conf_tp.exclude=" << _conf_tp.exclude() << std::endl; + return false; + } + + return true; +} + +std::list time_period_comparator::extract_timerange( + const std::string& line_content, + uint32_t offset, + const std::smatch& datas) { + std::list ret; + for (; offset < datas.size(); ++offset) { + std::smatch range; + std::string ranges = datas[offset].str(); + if (ranges.empty()) { + continue; + } + if (std::regex_search(ranges, range, range_extractor)) { + configuration::Timerange t; + t.set_range_start(atoi(range[1].str().c_str()) * 3600 + + atoi(range[2].str().c_str()) * 60); + t.set_range_end(atoi(range[3].str().c_str()) * 3600 + + atoi(range[4].str().c_str()) * 60); + ret.push_back(std::move(t)); + } else { + std::cerr << "fail to parse timerange: " << line_content << std::endl; + } + } + return ret; +} + +} // namespace com::centreon::engine::configuration + +std::vector> parse_timeperiods_cfg( + const std::string& file_path) { + std::vector> ret; + + std::ifstream f(file_path); + std::string line; + + bool wait_time_period_begin = true; + + std::vector current; + while (!f.eof()) { + std::getline(f, line); + + if (line.empty() || line[0] == '#') { + continue; + } + + if (wait_time_period_begin) { + wait_time_period_begin = + line.find("define timeperiod {") == std::string::npos; + } else { + if (line[0] == '}') { + wait_time_period_begin = true; + ret.push_back(current); + current.clear(); + continue; + } + + absl::StripAsciiWhitespace(&line); + current.push_back(std::move(line)); + } + } + + return ret; +} + +std::vector> file_content = + parse_timeperiods_cfg("tests/timeperiods.cfg"); + +class timeperiod_config_parser_test + : public ::testing::TestWithParam> { + protected: + public: + static void SetUpTestSuite() { pb_config.Clear(); } + static void TearDownTestSuite(){}; + + protected: + void SetUp() override {} + + void TearDown() override {} +}; + +INSTANTIATE_TEST_SUITE_P(timeperiod_config_parser_test, + timeperiod_config_parser_test, + ::testing::ValuesIn(file_content)); + +TEST_P(timeperiod_config_parser_test, VerifyParserContent) { + const std::vector period_content = GetParam(); + + configuration::Timeperiod conf_tp; + configuration::timeperiod_helper conf_tp_hlp(&conf_tp); + + for (const std::string& to_parse : period_content) { + std::string_view line_view = absl::StripAsciiWhitespace(to_parse); + if (line_view[0] == '#') + continue; + std::vector v = + absl::StrSplit(line_view, absl::MaxSplits(absl::ByAnyChar(" \t"), 1), + absl::SkipWhitespace()); + if (v.size() != 2) + abort(); + + std::string_view key = absl::StripAsciiWhitespace(v[0]); + std::string_view value = absl::StripAsciiWhitespace(v[1]); + bool retval = false; + /* particular cases with hook */ + retval = conf_tp_hlp.hook(key, value); + if (!retval) + retval = conf_tp_hlp.set(key, value); + if (!retval) { + std::cout << "Unable to parse <<" << to_parse << ">>" << std::endl; + abort(); + } + } + + time_period_comparator comparator(conf_tp, period_content); + + ASSERT_TRUE(comparator.is_equal()); + ASSERT_TRUE(comparator.is_result_equal()); +} diff --git a/engine/tests/configuration/tag.cc b/engine/tests/configuration/tag.cc index 1ac709690e9..0bd66438c8e 100644 --- a/engine/tests/configuration/tag.cc +++ b/engine/tests/configuration/tag.cc @@ -19,7 +19,6 @@ #include "common/engine_legacy_conf/tag.hh" #include - #include "common/engine_legacy_conf/object.hh" #include "helper.hh" diff --git a/engine/tests/custom_vars/pbextcmd.cc b/engine/tests/custom_vars/pbextcmd.cc new file mode 100644 index 00000000000..dafcf08e824 --- /dev/null +++ b/engine/tests/custom_vars/pbextcmd.cc @@ -0,0 +1,111 @@ +/** + * Copyright 2005 - 2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/commands/command.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/macros.hh" +#include "com/centreon/engine/macros/grab_host.hh" +#include "common/engine_conf/command_helper.hh" +#include "common/engine_conf/contact_helper.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/message_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; + +class PbCustomVar : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +// Given simple command (without connector) applier already applied with +// all objects created. +// When the command is removed from the configuration, +// Then the command is totally removed. +TEST_F(PbCustomVar, UpdateHostCustomVar) { + configuration::applier::command cmd_aply; + configuration::applier::host hst_aply; + configuration::applier::contact cnt_aply; + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("base_centreon_ping"); + cmd.set_command_line( + "$USER1$/check_icmp -H $HOSTADDRESS$ -n $_HOSTPACKETNUMBER$ -w " + "$_HOSTWARNING$ -c $_HOSTCRITICAL$ $CONTACTNAME$"); + cmd_aply.add_object(cmd); + + configuration::Contact cnt; + configuration::contact_helper cnt_hlp(&cnt); + cnt.set_contact_name("user"); + cnt.set_email("contact@centreon.com"); + cnt.set_pager("0473729383"); + cnt.set_host_notification_period("24x7"); + cnt.set_service_notification_period("24x7"); + cnt_aply.add_object(cnt); + + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("hst_test"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + hst_hlp.insert_customvariable("_PACKETNUMBER", "42"); + hst_hlp.insert_customvariable("_WARNING", "200,20%"); + hst_hlp.insert_customvariable("_CRITICAL", "400,50%"); + hst.set_check_command("base_centreon_ping"); + hst.mutable_contacts()->add_data("user"); + hst_aply.add_object(hst); + + command_map::iterator cmd_found{ + commands::command::commands.find("base_centreon_ping")}; + ASSERT_NE(cmd_found, commands::command::commands.end()); + ASSERT_TRUE(pb_config.commands().size() == 1); + + host_map::iterator hst_found{engine::host::hosts.find("hst_test")}; + ASSERT_NE(hst_found, engine::host::hosts.end()); + ASSERT_TRUE(pb_config.hosts().size() == 1); + + hst_aply.expand_objects(pb_config); + configuration::error_cnt err; + hst_aply.resolve_object(hst, err); + ASSERT_EQ(hst_found->second->custom_variables.size(), 3); + nagios_macros* macros(get_global_macros()); + grab_host_macros_r(macros, hst_found->second.get()); + std::string processed_cmd( + hst_found->second->get_check_command_ptr()->process_cmd(macros)); + ASSERT_EQ(processed_cmd, + "/check_icmp -H 127.0.0.1 -n 42 -w 200,20% -c 400,50% user"); + + char* msg = strdupa("hst_test;PACKETNUMBER;44"); + cmd_change_object_custom_var(CMD_CHANGE_CUSTOM_HOST_VAR, msg); + grab_host_macros_r(macros, hst_found->second.get()); + std::string processed_cmd2( + hst_found->second->get_check_command_ptr()->process_cmd(macros)); + ASSERT_EQ(processed_cmd2, + "/check_icmp -H 127.0.0.1 -n 44 -w 200,20% -c 400,50% user"); +} diff --git a/engine/tests/downtimes/pbdowntime.cc b/engine/tests/downtimes/pbdowntime.cc new file mode 100644 index 00000000000..80bee03b9fa --- /dev/null +++ b/engine/tests/downtimes/pbdowntime.cc @@ -0,0 +1,111 @@ +/** + * Copyright 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/downtimes/downtime_manager.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::downtimes; + +class DowntimeExternalCommand : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { + downtime_manager::instance().clear_scheduled_downtimes(); + deinit_config_state(); + } +}; + +TEST_F(DowntimeExternalCommand, AddUnkownHostDowntime) { + set_time(20000); + + time_t now = time(nullptr); + + std::stringstream s; + s << "SCHEDULE_HOST_DOWNTIME;test_srv;" << now << ";" << now + << ";1;0;7200;admin;host"; + + ASSERT_EQ(cmd_schedule_downtime(CMD_SCHEDULE_HOST_DOWNTIME, now, + const_cast(s.str().c_str())), + ERROR); + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); +} + +TEST_F(DowntimeExternalCommand, AddHostDowntime) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_srv"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + + set_time(20000); + + time_t now = time(nullptr); + + std::string query{ + fmt::format("test_srv;{};{};1;0;1;admin;host", now, now + 1)}; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + + ASSERT_EQ(cmd_schedule_downtime(CMD_SCHEDULE_HOST_DOWNTIME, now, + const_cast(query.c_str())), + OK); + + ASSERT_EQ(1u, downtime_manager::instance().get_scheduled_downtimes().size()); + ASSERT_EQ( + downtime_manager::instance().get_scheduled_downtimes().begin()->first, + 20000); + ASSERT_EQ(downtime_manager::instance() + .get_scheduled_downtimes() + .begin() + ->second->host_id(), + 1); + ASSERT_EQ(downtime_manager::instance() + .get_scheduled_downtimes() + .begin() + ->second->get_duration(), + 1); + ASSERT_EQ(downtime_manager::instance() + .get_scheduled_downtimes() + .begin() + ->second->get_end_time(), + 20001); + ASSERT_EQ(downtime_manager::instance() + .get_scheduled_downtimes() + .begin() + ->second->handle(), + OK); + set_time(20001); + ASSERT_EQ(downtime_manager::instance() + .get_scheduled_downtimes() + .begin() + ->second->handle(), + OK); + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); +} diff --git a/engine/tests/downtimes/pbdowntime_finder.cc b/engine/tests/downtimes/pbdowntime_finder.cc new file mode 100644 index 00000000000..3161d997009 --- /dev/null +++ b/engine/tests/downtimes/pbdowntime_finder.cc @@ -0,0 +1,371 @@ +/** + * Copyright 2019-2022 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include "com/centreon/engine/downtimes/downtime_finder.hh" + +#include "com/centreon/clib.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/downtimes/downtime.hh" +#include "com/centreon/engine/downtimes/downtime_manager.hh" +#include "com/centreon/engine/downtimes/service_downtime.hh" +#include "helper.hh" +#include "test_engine.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::downtimes; + +class DowntimeFinderFindMatchingAllTest : public TestEngine { + public: + void SetUp() override { + configuration::error_cnt err; + init_config_state(); + configuration::Contact ctc{ + new_pb_configuration_contact("admin", false, "a")}; + configuration::applier::contact ctc_aply; + ctc_aply.add_object(ctc); + + configuration::Host hst{new_pb_configuration_host("test_host", "admin", 1)}; + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::Host hst1{ + new_pb_configuration_host("first_host", "admin", 12)}; + hst_aply.add_object(hst1); + + configuration::Host hst2{ + new_pb_configuration_host("other_host", "admin", 2)}; + hst_aply.add_object(hst2); + + hst_aply.resolve_object(hst, err); + hst_aply.resolve_object(hst1, err); + + configuration::Service svc{ + new_pb_configuration_service("first_host", "test_service", "admin", 8)}; + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + + configuration::Service svc1{ + new_pb_configuration_service("first_host", "other_svc", "admin", 9)}; + svc_aply.add_object(svc1); + + configuration::Service svc2{ + new_pb_configuration_service("test_host", "new_svc", "admin", 10)}; + svc_aply.add_object(svc2); + + configuration::Service svc3{ + new_pb_configuration_service("test_host", "new_svc1", "admin", 11)}; + svc_aply.add_object(svc3); + + configuration::Service svc4{ + new_pb_configuration_service("test_host", "new_svc2", "admin", 12)}; + svc_aply.add_object(svc4); + + svc_aply.resolve_object(svc, err); + svc_aply.resolve_object(svc1, err); + svc_aply.resolve_object(svc2, err); + svc_aply.resolve_object(svc3, err); + svc_aply.resolve_object(svc4, err); + + downtime_manager::instance().clear_scheduled_downtimes(); + downtime_manager::instance().initialize_downtime_data(); + new_downtime(1, 1, 10, 234567891, 734567892, 1, 0, 84, "other_author", + "test_comment"); + // OK + new_downtime(2, 12, 8, 123456789, 134567892, 1, 0, 42, "test_author", + "other_comment"); + // OK + new_downtime(3, 12, 9, 123456789, 345678921, 0, 1, 42, "", "test_comment"); + new_downtime(4, 1, 10, 123456789, 345678921, 0, 1, 84, "test_author", ""); + // OK + new_downtime(5, 1, 11, 123456789, 134567892, 1, 1, 42, "test_author", + "test_comment"); + // OK + new_downtime(6, 1, 12, 7265943625, 7297479625, 1, 2, 31626500, "out_author", + "out_comment"); + _dtf = std::make_unique( + downtime_manager::instance().get_scheduled_downtimes()); + } + + void TearDown() override { + _dtf.reset(); + downtime_manager::instance().clear_scheduled_downtimes(); + downtime_manager::instance().initialize_downtime_data(); + deinit_config_state(); + } + + void new_downtime(uint64_t downtime_id, + const uint64_t host_id, + const uint64_t service_id, + time_t start, + time_t end, + int fixed, + unsigned long triggered_by, + int32_t duration, + std::string const& author, + std::string const& comment) { + downtime_manager::instance().schedule_downtime( + downtime::service_downtime, host_id, service_id, start, author.c_str(), + comment.c_str(), start, end, fixed, triggered_by, duration, + &downtime_id); + } + + protected: + std::unique_ptr _dtf; + downtime* dtl; + downtime_finder::criteria_set criterias; + downtime_finder::result_set result; + downtime_finder::result_set expected; +}; + +// Given a downtime_finder object with a NULL downtime list +// When find_matching_all() is called +// Then an empty result_set is returned +TEST_F(DowntimeFinderFindMatchingAllTest, NullDowntimeList) { + std::multimap> map; + downtime_finder local_dtf(map); + criterias.push_back(downtime_finder::criteria("host", "test_host")); + result = local_dtf.find_matching_all(criterias); + ASSERT_TRUE(result.empty()); +} + +// Given a downtime_finder object with the test downtime list +// And a downtime of the test list has a null host_name +// When find_matching_all() is called with criteria ("host", "anyhost") +// Then an empty result_set is returned +TEST_F(DowntimeFinderFindMatchingAllTest, NullHostNotFound) { + criterias.push_back(downtime_finder::criteria("host", "anyhost")); + result = _dtf->find_matching_all(criterias); + ASSERT_TRUE(result.empty()); +} + +// Given a downtime finder object with the test downtime list +// And a downtime of the test list has a null service_description +// When find_matching_all() is called with criteria ("service", "anyservice") +// Then an empty result_set is returned +TEST_F(DowntimeFinderFindMatchingAllTest, NullServiceNotFound) { + criterias.push_back(downtime_finder::criteria("service", "anyservice")); + result = _dtf->find_matching_all(criterias); + ASSERT_TRUE(result.empty()); +} + +// Given a downtime finder object with the test downtime list +// And a downtime the test list has a null service_description +// When find_matching_all() is called with the criteria ("service", "") +// Then the result_set contains the downtime +TEST_F(DowntimeFinderFindMatchingAllTest, NullServiceFound) { + criterias.push_back(downtime_finder::criteria("service", "")); + result = _dtf->find_matching_all(criterias); + ASSERT_TRUE(result.empty()); +} + +// Given a downtime_finder object with the test downtime list +// And a downtime of the test list has a null author +// When find_matching_all() is called with the criteria ("author", +// "anyauthor") +// Then an empty result_set is returned +TEST_F(DowntimeFinderFindMatchingAllTest, NullAuthorNotFound) { + criterias.push_back(downtime_finder::criteria("author", "anyauthor")); + result = _dtf->find_matching_all(criterias); + ASSERT_TRUE(result.empty()); +} + +// Given a downtime_finder object with the test downtime list +// And a downtime of the test list has a null author +// When find_matching_all() is called with the criteria ("author", "") +// Then the result_set contains the downtime +TEST_F(DowntimeFinderFindMatchingAllTest, NullAuthorFound) { + criterias.push_back(downtime_finder::criteria("author", "")); + result = _dtf->find_matching_all(criterias); + expected.push_back(3); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// And a downtime of the test list has a null comment +// When find_matching_all() is called with the criteria ("comment", +// "anycomment") Then an empty result_set is returned +TEST_F(DowntimeFinderFindMatchingAllTest, NullCommentNotFound) { + criterias.push_back(downtime_finder::criteria("comment", "anycomment")); + result = _dtf->find_matching_all(criterias); + ASSERT_TRUE(result.empty()); +} + +// Given a downtime_finder object with the test downtime list +// And a downtime of the test list has a null comment +// When find_matching_all() is called with the criteria ("comment", "") +// Then the result_set contains the downtime +TEST_F(DowntimeFinderFindMatchingAllTest, NullCommentFound) { + criterias.push_back(downtime_finder::criteria("comment", "")); + result = _dtf->find_matching_all(criterias); + expected.push_back(4); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("host", "test_host") +// Then all downtimes of host /test_host/ are returned +TEST_F(DowntimeFinderFindMatchingAllTest, MultipleHosts) { + criterias.push_back(downtime_finder::criteria("host", "test_host")); + result = _dtf->find_matching_all(criterias); + expected.push_back(4); + expected.push_back(5); + expected.push_back(1); + expected.push_back(6); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("service", +// "test_service") Then all downtimes of service /test_service/ are returned +TEST_F(DowntimeFinderFindMatchingAllTest, MultipleServices) { + criterias.push_back(downtime_finder::criteria("service", "test_service")); + result = _dtf->find_matching_all(criterias); + expected.push_back(2); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("start", "123456789") +// Then all downtimes with 123456789 as start time are returned +TEST_F(DowntimeFinderFindMatchingAllTest, MultipleStart) { + criterias.push_back(downtime_finder::criteria("start", "123456789")); + result = _dtf->find_matching_all(criterias); + expected.push_back(2); + expected.push_back(3); + expected.push_back(4); + expected.push_back(5); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("end", "134567892") +// Then all downtimes with 134567892 as end time are returned +TEST_F(DowntimeFinderFindMatchingAllTest, MultipleEnd) { + criterias.push_back(downtime_finder::criteria("end", "134567892")); + result = _dtf->find_matching_all(criterias); + expected.push_back(2); + expected.push_back(5); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("fixed", "0") +// Then all downtimes that are not fixed are returned +TEST_F(DowntimeFinderFindMatchingAllTest, MultipleFixed) { + criterias.push_back(downtime_finder::criteria("fixed", "0")); + result = _dtf->find_matching_all(criterias); + expected.push_back(3); + expected.push_back(4); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("triggered_by", "0") +// Then all downtimes that are not triggered by other downtimes are returned +TEST_F(DowntimeFinderFindMatchingAllTest, MultipleTriggeredBy) { + criterias.push_back(downtime_finder::criteria("triggered_by", "0")); + result = _dtf->find_matching_all(criterias); + expected.push_back(2); + expected.push_back(1); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("duration", "42") +// Then all downtimes with a duration of 42 seconds are returned +TEST_F(DowntimeFinderFindMatchingAllTest, MultipleDuration) { + criterias.push_back(downtime_finder::criteria("duration", "42")); + result = _dtf->find_matching_all(criterias); + expected.push_back(2); + expected.push_back(3); + expected.push_back(5); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("author", +// "test_author") Then all downtimes from author /test_author/ are returned +TEST_F(DowntimeFinderFindMatchingAllTest, MultipleAuthor) { + criterias.push_back(downtime_finder::criteria("author", "test_author")); + result = _dtf->find_matching_all(criterias); + expected.push_back(2); + expected.push_back(4); + expected.push_back(5); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("comment", +// "test_comment") Then all downtimes with comment "test_comment" are returned +TEST_F(DowntimeFinderFindMatchingAllTest, MultipleComment) { + criterias.push_back(downtime_finder::criteria("comment", "test_comment")); + result = _dtf->find_matching_all(criterias); + expected.push_back(3); + expected.push_back(5); + expected.push_back(1); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When findMatchinAll() is called with criterias ("author", "test_author"), +// ("duration", "42") and ("comment", "test_comment") Then all downtimes +// matching the criterias are returned +TEST_F(DowntimeFinderFindMatchingAllTest, MultipleCriterias) { + criterias.push_back(downtime_finder::criteria("author", "test_author")); + criterias.push_back(downtime_finder::criteria("duration", "42")); + criterias.push_back(downtime_finder::criteria("comment", "test_comment")); + result = _dtf->find_matching_all(criterias); + expected.push_back(5); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("end", "4102441200") +// Then all downtimes with 4102441200 as end time are returned +TEST_F(DowntimeFinderFindMatchingAllTest, OutOfRangeEnd) { + criterias.push_back(downtime_finder::criteria("end", "4102441200")); + result = _dtf->find_matching_all(criterias); + expected.push_back(6); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("start", +// "4102441200") +// Then all downtimes with 4102441200 as end time are returned +TEST_F(DowntimeFinderFindMatchingAllTest, OutOfRangeStart) { + criterias.push_back(downtime_finder::criteria("start", "4102441200")); + result = _dtf->find_matching_all(criterias); + expected.push_back(6); + ASSERT_EQ(result, expected); +} + +// Given a downtime_finder object with the test downtime list +// When find_matching_all() is called with the criteria ("duration", +// "4102441200") Then all downtimes with 31622400 as end time are returned +TEST_F(DowntimeFinderFindMatchingAllTest, OutOfRangeDuration) { + criterias.push_back(downtime_finder::criteria("duration", "31622400")); + result = _dtf->find_matching_all(criterias); + expected.push_back(6); + ASSERT_EQ(result, expected); +} diff --git a/engine/tests/enginerpc/pbenginerpc.cc b/engine/tests/enginerpc/pbenginerpc.cc new file mode 100644 index 00000000000..de2275e057e --- /dev/null +++ b/engine/tests/enginerpc/pbenginerpc.cc @@ -0,0 +1,1813 @@ +/** + * Copyright 2019-2022 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include + +#include +#include + +#include "com/centreon/engine/host.hh" + +#include "com/centreon/engine/enginerpc.hh" + +#include "../test_engine.hh" +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/anomalydetection.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/command_manager.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/comment.hh" +#include "com/centreon/engine/configuration/applier/anomalydetection.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/hostgroup.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/configuration/applier/servicegroup.hh" +#include "com/centreon/engine/downtimes/downtime_manager.hh" +#include "com/centreon/engine/events/loop.hh" +#include "com/centreon/engine/timezone_manager.hh" +#include "com/centreon/engine/version.hh" +#include "common/engine_conf/hostgroup_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::downtimes; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +class EngineRpc : public TestEngine { + public: + void SetUp() override { + init_config_state(); + + // Do not unload this in the tear down function, it is done by the + // other unload function... :-( + + pb_config.set_execute_service_checks(true); + + configuration::error_cnt err; + /* contact */ + configuration::applier::contact ct_aply; + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); + ct_aply.resolve_object(ctct, err); + + /* hosts */ + configuration::Host hst_child; + configuration::host_helper hst_child_hlp(&hst_child); + configuration::applier::host hst_aply2; + hst_child.set_host_name("child_host"); + hst_child.set_address("127.0.0.1"); + hst_child_hlp.hook("parents", "test_host"); + hst_child.set_host_id(42); + hst_aply2.add_object(hst_child); + + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst = new_pb_configuration_host("test_host", "admin"); + configuration::applier::host hst_aply; + hst.set_host_id(12); + hst_aply.add_object(hst); + + hst_aply.resolve_object(hst, err); + hst_aply2.resolve_object(hst_child, err); + + ASSERT_EQ(engine::host::hosts.size(), 2u); + + host_map::iterator child = engine::host::hosts.find("child_host"); + host_map::iterator parent = engine::host::hosts.find("test_host"); + + ASSERT_EQ(child->second->parent_hosts.size(), 1u); + ASSERT_EQ(parent->second->child_hosts.size(), 1u); + + /* hostgroup */ + configuration::Hostgroup hg; + configuration::hostgroup_helper hg_hlp(&hg); + configuration::applier::hostgroup hg_aply; + hg.set_hostgroup_name("test_hg"); + hg_hlp.hook("members", "test_host"); + hg_aply.add_object(hg); + hg_aply.expand_objects(pb_config); + hg_aply.resolve_object(hg, err); + + /* service */ + configuration::Service svc{ + new_pb_configuration_service("test_host", "test_svc", "admin")}; + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("/bin/sh -c 'echo \"test_cmd\"'"); + svc.set_check_command("cmd"); + configuration::applier::command cmd_aply; + + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + cmd_aply.add_object(cmd); + + svc_aply.resolve_object(svc, err); + + configuration::Anomalydetection ad{new_pb_configuration_anomalydetection( + "test_host", "test_ad", "admin", + 12, // service_id of the anomalydetection + 13, // service_id of the dependent service + "/tmp/thresholds_status_change.json")}; + configuration::applier::anomalydetection ad_aply; + ad_aply.add_object(ad); + + ad_aply.resolve_object(ad, err); + + host_map const& hm{engine::host::hosts}; + _host = hm.begin()->second; + _host->set_current_state(engine::host::state_down); + _host->set_state_type(checkable::hard); + _host->set_acknowledgement(AckType::NONE); + _host->set_notify_on(static_cast(-1)); + + service_map const& sm{engine::service::services}; + for (auto& p : sm) { + std::shared_ptr svc = p.second; + if (svc->service_id() == 12) + _ad = std::static_pointer_cast(svc); + else + _svc = svc; + } + _svc->set_current_state(engine::service::state_critical); + _svc->set_state_type(checkable::hard); + _svc->set_acknowledgement(AckType::NONE); + _svc->set_notify_on(static_cast(-1)); + + contact_map const& cm{engine::contact::contacts}; + _contact = cm.begin()->second; + + /* servicegroup */ + configuration::Servicegroup sg; + configuration::servicegroup_helper sg_hlp(&sg); + sg.set_servicegroup_name("test_sg"); + configuration::applier::servicegroup sg_aply; + sg_hlp.hook("members", "test_host,test_svc"); + + sg_aply.add_object(sg); + sg_aply.expand_objects(pb_config); + sg_aply.resolve_object(sg, err); + } + + void TearDown() override { + _host.reset(); + _svc.reset(); + _ad.reset(); + deinit_config_state(); + } + + std::list execute(const std::string& command) { + std::list retval; + char path[1024]; + std::ostringstream oss; + oss << "tests/rpc_client_engine " << command; + + FILE* fp = popen(oss.str().c_str(), "r"); + while (fgets(path, sizeof(path), fp) != nullptr) { + size_t count = strlen(path); + if (count > 0) + --count; + retval.push_back(std::string(path, count)); + } + pclose(fp); + return retval; + } + + void CreateFile(std::string const& filename, std::string const& content) { + std::ofstream oss(filename); + oss << content; + } + + protected: + std::shared_ptr _host; + std::shared_ptr _contact; + std::shared_ptr _svc; + std::shared_ptr _ad; +}; + +/* calls command manager in another thread (function is used for unit tests) + */ +static void call_command_manager(std::unique_ptr& th, + std::condition_variable* condvar, + std::mutex* mutex, + bool* continuerunning) { + auto fn = [continuerunning, mutex, condvar]() { + std::unique_lock lock(*mutex); + for (;;) { + command_manager::instance().execute(); + if (condvar->wait_for( + lock, std::chrono::milliseconds(50), + [continuerunning]() -> bool { return *continuerunning; })) { + break; + } + } + }; + + th.reset(new std::thread(fn)); +} + +TEST_F(EngineRpc, StartStop) { + enginerpc erpc("0.0.0.0", 40001); + ASSERT_NO_THROW(erpc.shutdown()); +} + +TEST_F(EngineRpc, GetVersion) { + std::ostringstream oss; + oss << "GetVersion: major: " << CENTREON_ENGINE_VERSION_MAJOR; + enginerpc erpc("0.0.0.0", 40001); + auto output = execute("GetVersion"); + ASSERT_EQ(output.front(), oss.str()); + if (output.size() == 2u) { + oss.str(""); + oss << "minor: " << CENTREON_ENGINE_VERSION_MINOR; + ASSERT_EQ(output.back(), oss.str()); + } else { + oss.str(""); + oss << "patch: " << CENTREON_ENGINE_VERSION_PATCH; + ASSERT_EQ(output.back(), oss.str()); + } + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetHost) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + std::vector vectests = { + "GetHost", + fmt::format("Host name: {}", _host->name()), + fmt::format("Host alias: {}", _host->get_alias()), + fmt::format("Host id: {}", _host->host_id()), + "Host address: 127.0.0.1", + "Host state: 1", + "Host period: test_period"}; + _host->set_current_state(engine::host::state_down); + _host->set_check_period("test_period"); + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute(fmt::format("GetHost byhostid {}", _host->host_id())); + auto output2 = execute(fmt::format("GetHost byhostname {}", _host->name())); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + std::vector result_ids(output.size()); + std::copy(output.begin(), output.end(), result_ids.begin()); + + ASSERT_EQ(vectests, result_ids); + + std::vector result_names(output2.size()); + std::copy(output2.begin(), output2.end(), result_names.begin()); + + ASSERT_EQ(vectests, result_names); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetWrongHost) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + std::vector vectests = {"GetHostByHostName rpc engine failed", + "GetHost", + "Host name: ", + "Host alias: ", + "Host id: 0", + "Host address: ", + "Host state: 0", + "Host period: "}; + _host->set_current_state(engine::host::state_down); + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetHost byhostname wrong_host"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + std::vector result_names(output.size()); + std::copy(output.begin(), output.end(), result_names.begin()); + + ASSERT_EQ(vectests, result_names); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetService) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + std::vector vectests = {"GetService", + "Host id: 12", + "Service id: 13", + "Host name: test_host", + "Serv desc: test_svc", + "Service state: 2", + "Service period: test_period"}; + _svc->set_current_state(engine::service::state_critical); + _svc->set_check_period("test_period"); + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetService bynames test_host test_svc"); + auto output2 = execute("GetService byids 12 13"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + std::vector result_names(output.size()); + std::copy(output.begin(), output.end(), result_names.begin()); + + ASSERT_EQ(vectests, result_names); + + std::vector result_ids(output2.size()); + std::copy(output2.begin(), output2.end(), result_ids.begin()); + + ASSERT_EQ(vectests, result_ids); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetWrongService) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + std::vector vectests = {"GetService rpc engine failed", + "GetService", + "Host id: 0", + "Service id: 0", + "Host name: ", + "Serv desc: ", + "Service state: 0", + "Service period: "}; + + _svc->set_current_state(engine::service::state_critical); + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetService bynames wrong_host wrong_svc"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + std::vector result_names(output.size()); + std::copy(output.begin(), output.end(), result_names.begin()); + + ASSERT_EQ(vectests, result_names); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetContact) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + std::vector vectests = {"GetContact", "admin", "admin", + "admin@centreon.com"}; + _contact->set_email("admin@centreon.com"); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetContact admin"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + std::vector result_names(output.size()); + std::copy(output.begin(), output.end(), result_names.begin()); + + ASSERT_EQ(vectests, result_names); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetWrongContact) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + std::vector vectests = {"GetContact rpc engine failed", + "GetContact", "", "", ""}; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetContact wrong_contactadmin"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + std::vector result_names(output.size()); + std::copy(output.begin(), output.end(), result_names.begin()); + + ASSERT_EQ(vectests, result_names); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetHostsCount) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetHostsCount"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(output.back(), "2"); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetContactsCount) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetContactsCount"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(output.back(), "1"); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetServicesCount) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetServicesCount"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(output.back(), "2"); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetServiceGroupsCount) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetServiceGroupsCount"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(output.back(), "1"); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetContactGroupsCount) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetContactGroupsCount"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(output.back(), "0"); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetHostGroupsCount) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetHostGroupsCount"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(output.back(), "1"); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetServiceDependenciesCount) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetServiceDependenciesCount"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(output.back(), "0"); + erpc.shutdown(); +} + +TEST_F(EngineRpc, GetHostDependenciesCount) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("GetHostDependenciesCount"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(output.back(), "0"); + erpc.shutdown(); +} + +TEST_F(EngineRpc, AddHostComment) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + ASSERT_EQ(comment::comments.size(), 0u); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = + execute("AddHostComment test_host test-admin mycomment 1 10000"); + ASSERT_EQ(comment::comments.size(), 1u); + + output = execute("DeleteComment 1"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(comment::comments.size(), 0u); + erpc.shutdown(); +} + +TEST_F(EngineRpc, AddServiceComment) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + ASSERT_EQ(comment::comments.size(), 0u); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + auto output = execute( + "AddServiceComment test_host test_svc test-admin mycomment 1 10000"); + ASSERT_EQ(comment::comments.size(), 1u); + + output = execute("DeleteComment 1"); + ASSERT_EQ(comment::comments.size(), 0u); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + erpc.shutdown(); +} + +TEST_F(EngineRpc, DeleteComment) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + ASSERT_EQ(comment::comments.size(), 0u); + // create comment + std::ostringstream oss; + oss << "my comment "; + auto cmt = std::make_shared( + comment::host, comment::user, _host->host_id(), 0, 10000, "test-admin", + oss.str(), true, comment::external, false, 0); + comment::comments.insert({cmt->get_comment_id(), cmt}); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("DeleteComment 1"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(comment::comments.size(), 0u); + erpc.shutdown(); +} + +TEST_F(EngineRpc, DeleteWrongComment) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + std::vector vectests{ + "DeleteComment failed.", + "DeleteComment 0", + }; + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("DeleteComment 999"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + std::vector results(output.size()); + std::copy(output.begin(), output.end(), results.begin()); + + ASSERT_EQ(vectests, results); + ASSERT_EQ(comment::comments.size(), 0u); + erpc.shutdown(); +} + +TEST_F(EngineRpc, DeleteAllHostComments) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + // first test + ASSERT_EQ(comment::comments.size(), 0u); + // create some comments + for (int i = 0; i < 10; ++i) { + std::string cmt_str{fmt::format("my host comment {}", i)}; + auto cmt = std::make_shared( + comment::host, comment::user, _host->host_id(), 0, 10000, "test-admin", + cmt_str, true, comment::external, false, 0); + comment::comments.insert({cmt->get_comment_id(), cmt}); + } + ASSERT_EQ(comment::comments.size(), 10u); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + auto output = execute( + fmt::format("DeleteAllHostComments byhostid {}", _host->host_id())); + + ASSERT_EQ(comment::comments.size(), 0u); + // second test + for (int i = 0; i < 10; ++i) { + std::string cmt_str{fmt::format("my host comment {}", i)}; + auto cmt = std::make_shared( + comment::host, comment::user, _host->host_id(), 0, 10000, "test-admin", + cmt_str, true, comment::external, false, 0); + comment::comments.insert({cmt->get_comment_id(), cmt}); + } + ASSERT_EQ(comment::comments.size(), 10u); + output = execute( + fmt::format("DeleteAllHostComments byhostname {}", _host->name())); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(comment::comments.size(), 0u); + erpc.shutdown(); +} + +TEST_F(EngineRpc, DeleteAllServiceComments) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + auto svc = _svc; + auto hit = engine::host::hosts_by_id.find(svc->host_id()); + auto hst = hit->second; + + // first test + ASSERT_EQ(comment::comments.size(), 0u); + // create some comments + for (int i = 0; i < 10; ++i) { + std::string cmt_str{fmt::format("my service comment {} on service ({}, {})", + i, svc->host_id(), svc->service_id())}; + auto cmt = std::make_shared( + comment::service, comment::user, svc->host_id(), svc->service_id(), + 10000, "test-admin", cmt_str, true, comment::external, false, 0); + comment::comments.insert({cmt->get_comment_id(), cmt}); + } + ASSERT_EQ(comment::comments.size(), 10u); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + auto output = execute(fmt::format("DeleteAllServiceComments byids {} {}", + svc->host_id(), svc->service_id())); + + ASSERT_EQ(comment::comments.size(), 0u); + // second test + for (int i = 0; i < 10; ++i) { + std::string cmt_str{fmt::format("my service comment {}", i)}; + auto cmt = std::make_shared( + comment::service, comment::user, svc->host_id(), svc->service_id(), + 10000, "test-admin", cmt_str, true, comment::external, false, 0); + comment::comments.insert({cmt->get_comment_id(), cmt}); + } + ASSERT_EQ(comment::comments.size(), 10u); + output = execute(fmt::format("DeleteAllServiceComments bynames {} {}", + hst->name(), svc->description())); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(comment::comments.size(), 0u); + erpc.shutdown(); +} + +TEST_F(EngineRpc, RemoveHostAcknowledgement) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + bool continuerunning = false; + oss << "my comment "; + // first test + _host->set_acknowledgement(AckType::NORMAL); + // create comment + auto cmt = std::make_shared( + comment::host, comment::acknowledgment, _host->host_id(), 0, 10000, + "test-admin", oss.str(), false, comment::external, false, 0); + comment::comments.insert({cmt->get_comment_id(), cmt}); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + auto output = execute( + fmt::format("RemoveHostAcknowledgement byhostid {}", _host->host_id())); + + ASSERT_EQ(_host->problem_has_been_acknowledged(), false); + ASSERT_EQ(comment::comments.size(), 0u); + // second test + _host->set_acknowledgement(AckType::NORMAL); + cmt = std::make_shared( + comment::host, comment::acknowledgment, _host->host_id(), 0, 10000, + "test-admin", oss.str(), false, comment::external, false, 0); + comment::comments.insert({cmt->get_comment_id(), cmt}); + + output = execute( + fmt::format("RemoveHostAcknowledgement byhostname {}", _host->name())); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(_host->problem_has_been_acknowledged(), false); + ASSERT_EQ(comment::comments.size(), 0u); + erpc.shutdown(); +} + +TEST_F(EngineRpc, RemoveServiceAcknowledgement) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + auto svc = _svc; + auto hit = engine::host::hosts_by_id.find(svc->host_id()); + auto hst = hit->second; + std::string ack_str{"my comment"}; + _svc->set_acknowledgement(AckType::NORMAL); + auto cmt = std::make_shared( + comment::service, comment::acknowledgment, hst->host_id(), + svc->service_id(), 10000, "test-admin", ack_str, false, comment::external, + false, 0); + comment::comments.insert({cmt->get_comment_id(), cmt}); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = + execute("RemoveServiceAcknowledgement bynames test_host test_svc"); + + ASSERT_EQ(comment::comments.size(), 0u); + ASSERT_EQ(svc->problem_has_been_acknowledged(), false); + + svc->set_acknowledgement(AckType::NORMAL); + cmt = std::make_shared(comment::service, comment::acknowledgment, + hst->host_id(), svc->service_id(), 10000, + "test-admin", ack_str, false, + comment::external, false, 0); + comment::comments.insert({cmt->get_comment_id(), cmt}); + + output = execute("RemoveServiceAcknowledgement byids 12 13"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(comment::comments.size(), 0u); + erpc.shutdown(); +} + +TEST_F(EngineRpc, AcknowledgementHostProblem) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + ASSERT_EQ(_host->problem_has_been_acknowledged(), false); + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute(fmt::format( + "AcknowledgementHostProblem {} admin test 1 0 0", _host->name())); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(_host->problem_has_been_acknowledged(), true); + erpc.shutdown(); +} + +TEST_F(EngineRpc, AcknowledgementServiceProblem) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + ASSERT_EQ(_svc->problem_has_been_acknowledged(), false); + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute( + "AcknowledgementServiceProblem test_host test_svc admin test 1 0 0"); + ; + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(_svc->problem_has_been_acknowledged(), true); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ScheduleHostDowntime) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + bool continuerunning = false; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + + set_time(20000); + time_t now = time(nullptr); + + oss << "ScheduleHostDowntime test_host " << now << " " << now + 1 + << " 0 0 10000 undef host " << now; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + // we fake a wrong test with an undefined parameter + auto output = execute(oss.str()); + ASSERT_EQ("ScheduleHostDowntime 0", output.back()); + oss.str(""); + + // we make the right test + oss << "ScheduleHostDowntime test_host " << now << " " << now + 1 + << " 0 0 10000 admin host " << now; + output = execute(oss.str()); + ASSERT_EQ(1u, downtime_manager::instance().get_scheduled_downtimes().size()); + ASSERT_EQ("ScheduleHostDowntime 1", output.back()); + + // deleting the current downtime + uint64_t id = downtime_manager::instance() + .get_scheduled_downtimes() + .begin() + ->second->get_downtime_id(); + oss.str(""); + oss << "DeleteDowntime " << id; + output = execute(oss.str()); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ScheduleWrongHostDowntime) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + bool continuerunning = false; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + + set_time(20000); + time_t now = time(nullptr); + + oss << "ScheduleHostDowntime test_host " << now + 1 << " " << now + << " 0 0 10000 admin host " << now; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + // we fake a wrong test with an + auto output = execute(oss.str()); + ASSERT_EQ("ScheduleHostDowntime 0", output.back()); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ScheduleServiceDowntime) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + bool continuerunning = false; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + + set_time(20000); + time_t now = time(nullptr); + + oss << "ScheduleServiceDowntime test_host test_svc " << now << " " << now + 1 + << " 0 0 10000 undef host " << now; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute(oss.str()); + ASSERT_EQ("ScheduleServiceDowntime 0", output.back()); + oss.str(""); + + oss << "ScheduleServiceDowntime test_host test_svc " << now << " " << now + 1 + << " 0 0 10000 admin host " << now; + output = execute(oss.str()); + ASSERT_EQ(2u, downtime_manager::instance() + .get_scheduled_downtimes() + .size()); // one for service and one for ano + ASSERT_EQ("ScheduleServiceDowntime 1", output.back()); + + oss.str(""); + uint64_t id = downtime_manager::instance() + .get_scheduled_downtimes() + .begin() + ->second->get_downtime_id(); + oss << "DeleteDowntime " << id; + output = execute(oss.str()); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ScheduleWrongServiceDowntime) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + bool continuerunning = false; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + + set_time(20000); + time_t now = time(nullptr); + + oss << "ScheduleServiceDowntime test_host test_svc " << now + 1 << " " << now + << " 0 0 10000 admin host " << now; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute(oss.str()); + ASSERT_EQ("ScheduleServiceDowntime 0", output.back()); + oss.str(""); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ScheduleHostServicesDowntime) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + std::ostringstream oss2; + bool continuerunning = false; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + + set_time(20000); + time_t now = time(nullptr); + + oss << "ScheduleHostServicesDowntime test_host " << now << " " << now + 1 + << " 0 0 10000 undef host " << now; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute(oss.str()); + ASSERT_EQ("ScheduleHostServicesDowntime 0", output.back()); + oss.str(""); + + oss << "ScheduleHostServicesDowntime test_host " << now << " " << now + 1 + << " 0 0 10000 admin host " << now; + output = execute(oss.str()); + ASSERT_EQ(3u, downtime_manager::instance() + .get_scheduled_downtimes() + .size()); // one for service and one for ano + ASSERT_EQ("ScheduleHostServicesDowntime 1", output.back()); + + oss2 << "DeleteServiceDowntimeFull test_host undef undef undef" + " undef undef undef undef undef"; + + output = execute(oss2.str()); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ScheduleHostGroupHostsDowntime) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + bool continuerunning = false; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + set_time(20000); + time_t now = time(nullptr); + + oss << "ScheduleHostGroupHostsDowntime test_hg " << now << " " << now + 1 + << " 0 0 10000 undef host " << now; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute(oss.str()); + ASSERT_EQ("ScheduleHostGroupHostsDowntime 0", output.back()); + oss.str(""); + + oss << "ScheduleHostGroupHostsDowntime test_hg " << now << " " << now + 1 + << " 0 0 10000 admin host " << now; + output = execute(oss.str()); + ASSERT_EQ(1u, downtime_manager::instance().get_scheduled_downtimes().size()); + uint64_t id = downtime_manager::instance() + .get_scheduled_downtimes() + .begin() + ->second->get_downtime_id(); + oss.str(""); + oss << "DeleteDowntime " << id; + output = execute(oss.str()); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ScheduleHostGroupServicesDowntime) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + bool continuerunning = false; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + set_time(20000); + time_t now = time(nullptr); + + oss << "ScheduleHostGroupServicesDowntime test_hg " << now << " " << now + 1 + << " 0 0 10000 undef host " << now; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute(oss.str()); + ASSERT_EQ("ScheduleHostGroupServicesDowntime 0", output.back()); + oss.str(""); + + oss << "ScheduleHostGroupServicesDowntime test_hg " << now << " " << now + 1 + << " 0 0 10000 admin host " << now; + output = execute(oss.str()); + ASSERT_EQ(3u, downtime_manager::instance() + .get_scheduled_downtimes() + .size()); // one for service and one for ano + ASSERT_EQ("ScheduleHostGroupServicesDowntime 1", output.back()); + + oss.str(""); + oss << "DeleteServiceDowntimeFull test_host undef undef undef" + " undef undef undef undef undef"; + output = execute(oss.str()); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ScheduleServiceGroupHostsDowntime) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + std::ostringstream oss2; + bool continuerunning = false; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + set_time(20000); + time_t now = time(nullptr); + oss << "ScheduleServiceGroupHostsDowntime test_sg " << now << " " << now + 1 + << " 0 0 10000 undef host " << now; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute(oss.str()); + ASSERT_EQ("ScheduleServiceGroupHostsDowntime 0", output.back()); + oss.str(""); + + oss << "ScheduleServiceGroupHostsDowntime test_sg " << now << " " << now + 1 + << " 0 0 10000 admin host " << now; + output = execute(oss.str()); + ASSERT_EQ(1u, downtime_manager::instance().get_scheduled_downtimes().size()); + ASSERT_EQ("ScheduleServiceGroupHostsDowntime 1", output.back()); + + // deleting current downtime + uint64_t id = downtime_manager::instance() + .get_scheduled_downtimes() + .begin() + ->second->get_downtime_id(); + oss.str(""); + oss << "DeleteDowntime " << id; + output = execute(oss.str()); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ScheduleServiceGroupServicesDowntime) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + std::ostringstream oss2; + bool continuerunning = false; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + set_time(20000); + time_t now = time(nullptr); + oss << "ScheduleServiceGroupServicesDowntime test_sg " << now << " " + << now + 1 << " 0 0 10000 undef host " << now; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute(oss.str()); + ASSERT_EQ("ScheduleServiceGroupServicesDowntime 0", output.back()); + oss.str(""); + + oss << "ScheduleServiceGroupServicesDowntime test_sg " << now << " " + << now + 1 << " 0 0 10000 admin host " << now; + output = execute(oss.str()); + ASSERT_EQ(2u, downtime_manager::instance() + .get_scheduled_downtimes() + .size()); // one for service and one for ano + ASSERT_EQ("ScheduleServiceGroupServicesDowntime 1", output.back()); + + uint64_t id = downtime_manager::instance() + .get_scheduled_downtimes() + .begin() + ->second->get_downtime_id(); + oss2 << "DeleteDowntime " << id; + output = execute(oss2.str()); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ScheduleAndPropagateHostDowntime) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + bool continuerunning = false; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + + set_time(20000); + time_t now = time(nullptr); + oss << "ScheduleAndPropagateHostDowntime test_host " << now << " " << now + 1 + << " 0 0 10000 undef host " << now; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute(oss.str()); + ASSERT_EQ("ScheduleAndPropagateHostDowntime 0", output.back()); + oss.str(""); + + oss << "ScheduleAndPropagateHostDowntime test_host " << now << " " << now + 1 + << " 0 0 10000 admin host " << now; + output = execute(oss.str()); + ASSERT_EQ(2u, downtime_manager::instance().get_scheduled_downtimes().size()); + ASSERT_EQ("ScheduleAndPropagateHostDowntime 1", output.back()); + + oss.str(""); + oss << "DeleteDowntimeByHostName test_host undef undef undef"; + output = execute(oss.str()); + + oss.str(""); + oss << "DeleteDowntimeByHostName child_host undef undef undef"; + output = execute(oss.str()); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ScheduleAndPropagateTriggeredHostDowntime) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + std::ostringstream oss; + std::ostringstream oss2; + bool continuerunning = false; + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + + set_time(20000); + time_t now = time(nullptr); + oss << "ScheduleHostDowntime test_host " << now << " " << now + 1 + << " 0 0 10000 admin host " << now; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute(oss.str()); + ASSERT_EQ(1u, downtime_manager::instance().get_scheduled_downtimes().size()); + uint64_t id = downtime_manager::instance() + .get_scheduled_downtimes() + .begin() + ->second->get_downtime_id(); + oss.str(""); + oss << "ScheduleAndPropagateTriggeredHostDowntime test_host " << now << " " + << now + 1 << " 0 " << id << " 10000 admin host " << now; + output = execute(oss.str()); + ASSERT_EQ(3u, downtime_manager::instance().get_scheduled_downtimes().size()); + + oss.str(""); + oss << "DeleteDowntime " << id; + output = execute(oss.str()); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(0u, downtime_manager::instance().get_scheduled_downtimes().size()); + erpc.shutdown(); +} + +TEST_F(EngineRpc, DelayHostNotification) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + auto hst = engine::host::hosts_by_id.find(12)->second; + + ASSERT_EQ(hst->get_next_notification(), 0); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("DelayHostNotification byhostid 12 20"); + ASSERT_EQ(hst->get_next_notification(), 20); + + output = execute( + fmt::format("DelayHostNotification byhostname {} 10", hst->name())); + ASSERT_EQ(hst->get_next_notification(), 10); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + erpc.shutdown(); +} + +TEST_F(EngineRpc, DelayServiceNotification) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + ASSERT_EQ(_host->get_next_notification(), 0); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("DelayServiceNotification byids 12 13 20"); + ASSERT_EQ(_svc->get_next_notification(), 20); + + output = execute("DelayServiceNotification bynames test_host test_svc 10"); + ASSERT_EQ(_svc->get_next_notification(), 10); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + erpc.shutdown(); +} + +TEST_F(EngineRpc, ChangeHostObjectIntVar) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = + execute(fmt::format("ChangeHostObjectIntVar {} 0 1 1.0", _host->name())); + ASSERT_EQ(_host->check_interval(), 1u); + output = + execute(fmt::format("ChangeHostObjectIntVar {} 1 1 2.0", _host->name())); + ASSERT_EQ(_host->retry_interval(), 2u); + output = + execute(fmt::format("ChangeHostObjectIntVar {} 2 1 1.0", _host->name())); + ASSERT_EQ(_host->max_check_attempts(), 1); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ChangeServiceObjectIntVar) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute( + "ChangeServiceObjectIntVar" + " test_host test_svc 0 1 1.0"); + ASSERT_EQ(_svc->check_interval(), 1u); + output = execute( + "ChangeServiceObjectIntVar" + " test_host test_svc 1 1 2.0"); + ASSERT_EQ(_svc->retry_interval(), 2u); + output = execute( + "ChangeServiceObjectIntVar" + " test_host test_svc 2 1 1.0"); + ASSERT_EQ(_svc->max_check_attempts(), 1); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + erpc.shutdown(); +} + +TEST_F(EngineRpc, ChangeContactObjectIntVar) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute( + "ChangeContactObjectIntVar" + " admin 0 1 1.0"); + ASSERT_EQ(_contact->get_modified_attributes(), 1u); + output = execute( + "ChangeContactObjectIntVar" + " admin 1 2 1.0"); + ASSERT_EQ(_contact->get_modified_host_attributes(), 2u); + output = execute( + "ChangeContactObjectIntVar" + " admin 2 3 1.0"); + ASSERT_EQ(_contact->get_modified_service_attributes(), 3u); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + erpc.shutdown(); +} + +TEST_F(EngineRpc, ChangeHostObjectCharVar) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + ASSERT_EQ(engine::timeperiod::timeperiods.size(), 1u); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("ChangeHostObjectCharVar null 0 cmd"); + ASSERT_EQ(output.back(), "ChangeHostObjectCharVar 1"); + output = + execute(fmt::format("ChangeHostObjectCharVar {} 1 cmd", _host->name())); + ASSERT_EQ(_host->event_handler(), "cmd"); + output = + execute(fmt::format("ChangeHostObjectCharVar {} 2 cmd", _host->name())); + ASSERT_EQ(_host->check_command(), "cmd"); + output = + execute(fmt::format("ChangeHostObjectCharVar {} 3 24x7", _host->name())); + ASSERT_EQ(_host->check_period(), "24x7"); + output = + execute(fmt::format("ChangeHostObjectCharVar {} 4 24x7", _host->name())); + ASSERT_EQ(_host->notification_period(), "24x7"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + erpc.shutdown(); +} + +TEST_F(EngineRpc, ChangeServiceObjectCharVar) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + auto svc = _svc; + auto hit = engine::host::hosts_by_id.find(svc->host_id()); + auto hst = hit->second; + + ASSERT_EQ(engine::timeperiod::timeperiods.size(), 1u); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute("ChangeServiceObjectCharVar null null 0 cmd"); + ASSERT_EQ(output.back(), "ChangeServiceObjectCharVar 1"); + output = execute(fmt::format("ChangeServiceObjectCharVar {} {} 1 cmd", + hst->name(), svc->description())); + ASSERT_EQ(_svc->event_handler(), "cmd"); + output = execute(fmt::format("ChangeServiceObjectCharVar {} {} 2 cmd", + hst->name(), svc->description())); + ASSERT_EQ(_svc->check_command(), "cmd"); + output = execute(fmt::format("ChangeServiceObjectCharVar {} {} 3 24x7", + hst->name(), svc->description())); + ASSERT_EQ(_svc->check_period(), "24x7"); + output = execute(fmt::format("ChangeServiceObjectCharVar {} {} 4 24x7", + hst->name(), svc->description())); + ASSERT_EQ(_svc->notification_period(), "24x7"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + erpc.shutdown(); +} + +TEST_F(EngineRpc, ChangeContactObjectCharVar) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + ASSERT_EQ(engine::timeperiod::timeperiods.size(), 1u); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + + auto output = execute( + "ChangeContactObjectCharVar" + " admin 0 24x7"); + ASSERT_EQ(_contact->get_host_notification_period(), "24x7"); + output = execute( + "ChangeContactObjectCharVar" + " admin 1 24x7"); + ASSERT_EQ(_contact->get_service_notification_period(), "24x7"); + + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + erpc.shutdown(); +} + +TEST_F(EngineRpc, ChangeHostObjectCustomVar) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + ASSERT_EQ(_host->custom_variables.size(), 0u); + call_command_manager(th, &condvar, &mutex, &continuerunning); + auto output = execute(fmt::format( + "ChangeHostObjectCustomVar {} test_var test_val", _host->name())); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(_host->custom_variables.size(), 1u); + ASSERT_EQ(_host->custom_variables["TEST_VAR"].value(), "test_val"); + _host->custom_variables.clear(); + ASSERT_EQ(_host->custom_variables.size(), 0u); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ChangeServiceObjectCustomVar) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + + _svc->custom_variables.clear(); + ASSERT_EQ(_svc->custom_variables.size(), 0u); + call_command_manager(th, &condvar, &mutex, &continuerunning); + auto output = execute( + "ChangeServiceObjectCustomVar" + " test_host test_svc test_var test_val"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + + ASSERT_EQ(_svc->custom_variables.size(), 1u); + ASSERT_EQ(_svc->custom_variables["TEST_VAR"].value(), "test_val"); + _svc->custom_variables.clear(); + ASSERT_EQ(_svc->custom_variables.size(), 0u); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ChangeContactObjectCustomVar) { + enginerpc erpc("0.0.0.0", 40001); + std::unique_ptr th; + std::condition_variable condvar; + std::mutex mutex; + bool continuerunning = false; + ASSERT_EQ(_contact->get_custom_variables().size(), 0u); + + call_command_manager(th, &condvar, &mutex, &continuerunning); + auto output = execute( + "ChangeContactObjectCustomVar" + " admin test_var test_val"); + { + std::lock_guard lock(mutex); + continuerunning = true; + } + condvar.notify_one(); + th->join(); + ASSERT_EQ(_contact->get_custom_variables().size(), 1u); + ASSERT_EQ(_contact->get_custom_variables()["TEST_VAR"].value(), "test_val"); + + erpc.shutdown(); +} + +TEST_F(EngineRpc, ProcessServiceCheckResult) { + enginerpc erpc("0.0.0.0", 40001); + auto output = execute("ProcessServiceCheckResult test_host test_svc 0"); + ASSERT_EQ(output.size(), 1u); + ASSERT_EQ(output.front(), "ProcessServiceCheckResult: 0"); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ProcessServiceCheckResultBadHost) { + enginerpc erpc("0.0.0.0", 40001); + auto output = execute("ProcessServiceCheckResult \"\" test_svc 0"); + ASSERT_EQ(output.size(), 2u); + ASSERT_EQ(output.front(), "ProcessServiceCheckResult failed."); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ProcessServiceCheckResultBadService) { + enginerpc erpc("0.0.0.0", 40001); + auto output = execute("ProcessServiceCheckResult test_host \"\" 0"); + ASSERT_EQ(output.size(), 2u); + ASSERT_EQ(output.front(), "ProcessServiceCheckResult failed."); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ProcessHostCheckResult) { + enginerpc erpc("0.0.0.0", 40001); + auto output = execute("ProcessHostCheckResult test_host 0"); + ASSERT_EQ(output.size(), 1u); + ASSERT_EQ(output.front(), "ProcessHostCheckResult: 0"); + erpc.shutdown(); +} + +TEST_F(EngineRpc, ProcessHostCheckResultBadHost) { + enginerpc erpc("0.0.0.0", 40001); + auto output = execute("ProcessHostCheckResult '' 0"); + ASSERT_EQ(output.size(), 2u); + ASSERT_EQ(output.front(), "ProcessHostCheckResult failed."); + erpc.shutdown(); +} + +TEST_F(EngineRpc, NewThresholdsFile) { + CreateFile( + "/tmp/thresholds_file.json", + "[{\n \"host_id\": \"12\",\n \"service_id\": \"12\",\n \"metric_name\": " + "\"metric\",\n \"predict\": [{\n \"timestamp\": 50000,\n \"upper\": " + "84,\n \"lower\": 74,\n \"fit\": 79\n }, {\n \"timestamp\": 100000,\n " + "\"upper\": 10,\n \"lower\": 5,\n \"fit\": 51.5\n }, {\n \"timestamp\": " + "150000,\n \"upper\": 100,\n \"lower\": 93,\n \"fit\": 96.5\n }, {\n " + "\"timestamp\": 200000,\n \"upper\": 100,\n \"lower\": 97,\n \"fit\": " + "98.5\n }, {\n \"timestamp\": 250000,\n \"upper\": 100,\n \"lower\": " + "21,\n \"fit\": 60.5\n }\n]}]"); + enginerpc erpc("0.0.0.0", 40001); + auto output = execute("NewThresholdsFile /tmp/thresholds_file.json"); + ASSERT_EQ(output.size(), 1u); + ASSERT_EQ(output.front(), "NewThresholdsFile: 0"); + command_manager::instance().execute(); + ASSERT_EQ(_ad->get_thresholds_file(), "/tmp/thresholds_file.json"); +} diff --git a/engine/tests/external_commands/pbanomalydetection.cc b/engine/tests/external_commands/pbanomalydetection.cc new file mode 100644 index 00000000000..374587e7805 --- /dev/null +++ b/engine/tests/external_commands/pbanomalydetection.cc @@ -0,0 +1,142 @@ +/** + * Copyright 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include "com/centreon/engine/anomalydetection.hh" + +#include + +#include "../test_engine.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/command_manager.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/anomalydetection.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/events/loop.hh" +#include "com/centreon/engine/timezone_manager.hh" +#include "com/centreon/engine/version.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; + +class ADExtCmd : public TestEngine { + public: + void SetUp() override { + init_config_state(); + + configuration::error_cnt err; + configuration::applier::contact ct_aply; + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); + ct_aply.resolve_object(ctct, err); + + configuration::Host hst{new_pb_configuration_host("test_host", "admin")}; + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::Service svc{new_pb_configuration_service("test_host", "test_svc", "admin")}; + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + configuration::Anomalydetection ad{new_pb_configuration_anomalydetection( + "test_host", "test_ad", "admin", + 12, // service_id of the anomalydetection + 13, // service_id of the dependent service + "/tmp/thresholds_status_change.json")}; + configuration::applier::anomalydetection ad_aply; + ad_aply.add_object(ad); + + ad_aply.resolve_object(ad, err); + + host_map const& hm{engine::host::hosts}; + _host = hm.begin()->second; + _host->set_current_state(engine::host::state_up); + _host->set_state_type(checkable::hard); + _host->set_acknowledgement(AckType::NONE); + _host->set_notify_on(static_cast(-1)); + + service_map const& sm{engine::service::services}; + for (auto& p : sm) { + std::shared_ptr svc = p.second; + if (svc->service_id() == 12) + _ad = std::static_pointer_cast(svc); + else + _svc = svc; + } + } + + void TearDown() override { + _host.reset(); + _svc.reset(); + _ad.reset(); + deinit_config_state(); + } + + std::list execute(const std::string& command) { + std::list retval; + char path[1024]; + std::ostringstream oss; + oss << "tests/rpc_client " << command; + + FILE* fp = popen(oss.str().c_str(), "r"); + while (fgets(path, sizeof(path), fp) != nullptr) { + size_t count = strlen(path); + if (count > 0) + --count; + retval.push_back(std::string(path, count)); + } + pclose(fp); + return retval; + } + + void CreateFile(std::string const& filename, std::string const& content) { + std::ofstream oss(filename); + oss << content; + } + + protected: + std::shared_ptr _host; + std::shared_ptr _svc; + std::shared_ptr _ad; +}; + +TEST_F(ADExtCmd, NewThresholdsFile) { + CreateFile( + "/tmp/thresholds_file.json", + "[{\n \"host_id\": \"12\",\n \"service_id\": \"12\",\n \"metric_name\": " + "\"metric\",\n \"predict\": [{\n \"timestamp\": 50000,\n \"upper\": " + "84,\n \"lower\": 74,\n \"fit\": 79\n }, {\n \"timestamp\": 100000,\n " + "\"upper\": 10,\n \"lower\": 5,\n \"fit\": 51.5\n }, {\n \"timestamp\": " + "150000,\n \"upper\": 100,\n \"lower\": 93,\n \"fit\": 96.5\n }, {\n " + "\"timestamp\": 200000,\n \"upper\": 100,\n \"lower\": 97,\n \"fit\": " + "98.5\n }, {\n \"timestamp\": 250000,\n \"upper\": 100,\n \"lower\": " + "21,\n \"fit\": 60.5\n }\n]}]"); + std::ostringstream oss; + oss << '[' << std::time(nullptr) << ']' + << " NEW_THRESHOLDS_FILE;/tmp/thresholds_file.json"; + process_external_command(oss.str().c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_thresholds_file(), "/tmp/thresholds_file.json"); +} diff --git a/engine/tests/external_commands/pbhost.cc b/engine/tests/external_commands/pbhost.cc new file mode 100644 index 00000000000..4d18edb280f --- /dev/null +++ b/engine/tests/external_commands/pbhost.cc @@ -0,0 +1,150 @@ +/** + * Copyright 2005 - 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/timezone_manager.hh" +#include "common/engine_conf/host_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; + +class HostExternalCommand : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { deinit_config_state(); } +}; + +TEST_F(HostExternalCommand, AddHostDowntime) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_srv"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + + set_time(20000); + time_t now = time(nullptr); + + testing::internal::CaptureStdout(); + + for (int i = 0; i < 3; i++) { + now += 300; + std::string cmd{"test_srv;1;|"}; + set_time(now); + cmd_process_host_check_result(CMD_PROCESS_HOST_CHECK_RESULT, now, + const_cast(cmd.c_str())); + checks::checker::instance().reap(); + } + + std::string const& out{testing::internal::GetCapturedStdout()}; + std::cout << out << std::endl; + ASSERT_NE(out.find("PASSIVE HOST CHECK"), std::string::npos); + ASSERT_NE(out.find("HOST ALERT"), std::string::npos); +} + +TEST_F(HostExternalCommand, AddHostDowntimeByIpAddress) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_srv"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + + set_time(20000); + time_t now = time(nullptr); + + testing::internal::CaptureStdout(); + for (int i = 0; i < 3; i++) { + now += 300; + std::string cmd{"127.0.0.1;1;|"}; + set_time(now); + cmd_process_host_check_result(CMD_PROCESS_HOST_CHECK_RESULT, now, + const_cast(cmd.c_str())); + checks::checker::instance().reap(); + } + + std::string const& out{testing::internal::GetCapturedStdout()}; + + ASSERT_NE(out.find("PASSIVE HOST CHECK"), std::string::npos); + ASSERT_NE(out.find("HOST ALERT"), std::string::npos); +} + +TEST_F(HostExternalCommand, AddHostComment) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + configuration::Host hst2; + configuration::host_helper hst2_hlp(&hst2); + + hst.set_host_name("test_srv"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + + hst2.set_host_name("test_srv2"); + hst2.set_address("127.0.0.1"); + hst2.set_host_id(2); + ASSERT_NO_THROW(hst_aply.add_object(hst2)); + + set_time(20000); + time_t now = time(nullptr); + + std::string cmd_com1{"test_srv;1;user;this is a first comment"}; + std::string cmd_com2{"test_srv;1;user;this is a second comment"}; + std::string cmd_com3{"test_srv;1;user;this is a third comment"}; + std::string cmd_com4{"test_srv;1;user;this is a fourth comment"}; + std::string cmd_com5{"test_srv2;1;user;this is a fifth comment"}; + std::string cmd_del{"1"}; + std::string cmd_del_last{"5"}; + std::string cmd_del_all{"test_srv"}; + + cmd_add_comment(CMD_ADD_HOST_COMMENT, now, + const_cast(cmd_com1.c_str())); + ASSERT_EQ(comment::comments.size(), 1u); + cmd_add_comment(CMD_ADD_HOST_COMMENT, now, + const_cast(cmd_com2.c_str())); + ASSERT_EQ(comment::comments.size(), 2u); + cmd_add_comment(CMD_ADD_HOST_COMMENT, now, + const_cast(cmd_com3.c_str())); + ASSERT_EQ(comment::comments.size(), 3u); + cmd_add_comment(CMD_ADD_HOST_COMMENT, now, + const_cast(cmd_com4.c_str())); + ASSERT_EQ(comment::comments.size(), 4u); + cmd_add_comment(CMD_ADD_HOST_COMMENT, now, + const_cast(cmd_com5.c_str())); + ASSERT_EQ(comment::comments.size(), 5u); + cmd_delete_comment(CMD_DEL_HOST_COMMENT, const_cast(cmd_del.c_str())); + ASSERT_EQ(comment::comments.size(), 4u); + cmd_delete_all_comments(CMD_DEL_ALL_HOST_COMMENTS, + const_cast(cmd_del_all.c_str())); + ASSERT_EQ(comment::comments.size(), 1u); + cmd_delete_comment(CMD_DEL_HOST_COMMENT, + const_cast(cmd_del_last.c_str())); + ASSERT_EQ(comment::comments.size(), 0u); +} diff --git a/engine/tests/external_commands/pbservice.cc b/engine/tests/external_commands/pbservice.cc new file mode 100644 index 00000000000..6ed78ec3acf --- /dev/null +++ b/engine/tests/external_commands/pbservice.cc @@ -0,0 +1,227 @@ +/** + * Copyright 2005 - 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/events/loop.hh" +#include "com/centreon/process_manager.hh" +#include "common/engine_conf/service_helper.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; + +class ServiceExternalCommand : public ::testing::Test { + public: + void SetUp() override { init_config_state(); } + + void TearDown() override { + deinit_config_state(); + events::loop::instance().clear(); + } +}; + +TEST_F(ServiceExternalCommand, AddServiceDowntime) { + configuration::error_cnt err; + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::applier::command cmd_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(3); + + cmd.set_command_line("/usr/bin/echo 1"); + cmd_aply.add_object(cmd); + + hst.set_check_command("cmd"); + svc.set_check_command("cmd"); + + hst_aply.add_object(hst); + + // We fake here the expand_object on configuration::service + svc.set_host_id(1); + + svc_aply.add_object(svc); + + hst_aply.expand_objects(pb_config); + svc_aply.expand_objects(pb_config); + + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + set_time(20000); + time_t now = time(nullptr); + + std::string str{"test_host;test_description;1;|"}; + + testing::internal::CaptureStdout(); + cmd_process_service_check_result(CMD_PROCESS_SERVICE_CHECK_RESULT, now, + const_cast(str.c_str())); + checks::checker::instance().reap(); + + std::string const& out{testing::internal::GetCapturedStdout()}; + + ASSERT_NE(out.find("PASSIVE SERVICE CHECK"), std::string::npos); +} + +TEST_F(ServiceExternalCommand, AddServiceDowntimeByHostIpAddress) { + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::applier::command cmd_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.3"); + hst.set_host_id(1); + + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(3); + + cmd.set_command_line("/usr/bin/echo 1"); + cmd_aply.add_object(cmd); + + hst.set_check_command("cmd"); + svc.set_check_command("cmd"); + + hst_aply.add_object(hst); + + // We fake here the expand_object on configuration::service + svc.set_host_id(1); + + svc_aply.add_object(svc); + + hst_aply.expand_objects(pb_config); + svc_aply.expand_objects(pb_config); + + configuration::error_cnt err; + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + set_time(20000); + time_t now = time(nullptr); + + std::string str{"127.0.0.3;test_description;1;|"}; + + testing::internal::CaptureStdout(); + cmd_process_service_check_result(CMD_PROCESS_SERVICE_CHECK_RESULT, now, + const_cast(str.c_str())); + checks::checker::instance().reap(); + + std::string const& out{testing::internal::GetCapturedStdout()}; + + ASSERT_NE(out.find("PASSIVE SERVICE CHECK"), std::string::npos); +} + +TEST_F(ServiceExternalCommand, AddServiceComment) { + configuration::applier::host hst_aply; + configuration::applier::service svc_aply; + configuration::applier::command cmd_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(1); + + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(3); + + cmd.set_command_line("/usr/bin/echo 1"); + cmd_aply.add_object(cmd); + + hst.set_check_command("cmd"); + svc.set_check_command("cmd"); + + hst_aply.add_object(hst); + + // We fake here the expand_object on configuration::service + svc.set_host_id(1); + + svc_aply.add_object(svc); + + hst_aply.expand_objects(pb_config); + svc_aply.expand_objects(pb_config); + + configuration::error_cnt err; + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + std::string cmd_com1{ + "test_host;test_description;1;user;this is a first comment"}; + std::string cmd_com2{ + "test_host;test_description;1;user;this is a second comment"}; + std::string cmd_com3{ + "test_host;test_description;1;user;this is a third comment"}; + std::string cmd_com4{ + "test_host;test_description;1;user;this is a fourth comment"}; + std::string cmd_del{"1"}; + std::string cmd_del_last{"5"}; + std::string cmd_del_all{"test_host;test_description"}; + + set_time(20000); + time_t now = time(nullptr); + + cmd_add_comment(CMD_ADD_SVC_COMMENT, now, + const_cast(cmd_com1.c_str())); + ASSERT_EQ(comment::comments.size(), 1u); + cmd_add_comment(CMD_ADD_SVC_COMMENT, now, + const_cast(cmd_com2.c_str())); + ASSERT_EQ(comment::comments.size(), 2u); + cmd_add_comment(CMD_ADD_SVC_COMMENT, now, + const_cast(cmd_com3.c_str())); + ASSERT_EQ(comment::comments.size(), 3u); + cmd_add_comment(CMD_ADD_SVC_COMMENT, now, + const_cast(cmd_com4.c_str())); + ASSERT_EQ(comment::comments.size(), 4u); + cmd_delete_comment(CMD_ADD_SVC_COMMENT, const_cast(cmd_del.c_str())); + ASSERT_EQ(comment::comments.size(), 3u); + cmd_delete_all_comments(CMD_DEL_ALL_SVC_COMMENTS, + const_cast(cmd_del_all.c_str())); + ASSERT_EQ(comment::comments.size(), 0u); +} diff --git a/engine/tests/helper.cc b/engine/tests/helper.cc index 6a9782c5bda..0a2abb48f4c 100644 --- a/engine/tests/helper.cc +++ b/engine/tests/helper.cc @@ -31,9 +31,14 @@ using namespace com::centreon::engine; using com::centreon::common::log_v2::log_v2; using log_v2_config = com::centreon::common::log_v2::config; +#ifdef LEGACY_CONF extern configuration::state* config; +#else +extern configuration::State pb_config; +#endif -void init_config_state(void) { +#ifdef LEGACY_CONF +void init_config_state() { if (config == nullptr) config = new configuration::state; @@ -51,10 +56,36 @@ void init_config_state(void) { checks::checker::init(true); } +#else +void init_config_state() { + /* Cleanup */ + pb_config.Clear(); + + configuration::state_helper cfg_hlp(&pb_config); + pb_config.set_log_file_line(true); + pb_config.set_log_file(""); + + log_v2_config log_conf("engine-tests", + log_v2_config::logger_type::LOGGER_STDOUT, + pb_config.log_flush_period(), pb_config.log_pid(), + pb_config.log_file_line()); + + log_v2::instance().apply(log_conf); + + // Hack to instanciate the logger. + configuration::applier::logging::instance().apply(pb_config); + + checks::checker::init(true); +} +#endif void deinit_config_state(void) { +#ifdef LEGACY_CONF delete config; config = nullptr; +#else + pb_config.Clear(); +#endif configuration::applier::state::instance().clear(); checks::checker::deinit(); diff --git a/engine/tests/helper.hh b/engine/tests/helper.hh index 208531351ab..efc07c6aa11 100644 --- a/engine/tests/helper.hh +++ b/engine/tests/helper.hh @@ -19,9 +19,16 @@ #ifndef CENTREON_ENGINE_TESTS_HELPER_HH_ #define CENTREON_ENGINE_TESTS_HELPER_HH_ -#include +#include "com/centreon/engine/globals.hh" +#ifdef LEGACY_CONF +#include "common/engine_legacy_conf/state.hh" +#endif +#ifdef LEGACY_CONF extern com::centreon::engine::configuration::state* config; +#else +extern com::centreon::engine::configuration::State pb_config; +#endif void init_config_state(void); void deinit_config_state(void); diff --git a/engine/tests/loop/loop.cc b/engine/tests/loop/loop.cc index 6b455818173..8a2a531952e 100644 --- a/engine/tests/loop/loop.cc +++ b/engine/tests/loop/loop.cc @@ -18,9 +18,7 @@ */ #include "com/centreon/engine/events/loop.hh" - #include - #include "../test_engine.hh" #include "../timeperiod/utils.hh" #include "com/centreon/engine/checks/checker.hh" @@ -29,8 +27,10 @@ #include "com/centreon/engine/configuration/applier/service.hh" #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/serviceescalation.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" #include "common/engine_legacy_conf/service.hh" +#endif #include "helper.hh" using namespace com::centreon; @@ -42,19 +42,35 @@ class LoopTest : public TestEngine { void SetUp() override { error_cnt err; init_config_state(); + events::loop::instance().clear(); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("admin", true)}; ct_aply.add_object(ctct); ct_aply.expand_objects(*config); +#else + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); +#ifdef LEGACY_CONF configuration::host hst{new_configuration_host("test_host", "admin")}; +#else + configuration::Host hst{new_pb_configuration_host("test_host", "admin")}; +#endif configuration::applier::host hst_aply; hst_aply.add_object(hst); +#ifdef LEGACY_CONF configuration::service svc{ new_configuration_service("test_host", "test_svc", "admin")}; +#else + configuration::Service svc{ + new_pb_configuration_service("test_host", "test_svc", "admin")}; +#endif configuration::applier::service svc_aply; svc_aply.add_object(svc); diff --git a/engine/tests/macros/macro_service.cc b/engine/tests/macros/macro_service.cc index 13befa25cf2..e1eedb81e40 100644 --- a/engine/tests/macros/macro_service.cc +++ b/engine/tests/macros/macro_service.cc @@ -47,6 +47,8 @@ using namespace com::centreon; using namespace com::centreon::engine; +using namespace std::literals; + class MacroService : public TestEngine { public: void SetUp() override { @@ -113,8 +115,8 @@ TEST_F(MacroService, ServiceMacro) { std::string out; host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")]->set_plugin_output( - "foo bar!"); + service::services[std::make_pair("test_host"sv, "test_svc"sv)] + ->set_plugin_output("foo bar!"); process_macros_r(mac, "$SERVICEOUTPUT:test_host:test_svc$", out, 1); ASSERT_EQ(out, "foo bar!"); } @@ -391,7 +393,7 @@ TEST_F(MacroService, ServicePerfData) { nagios_macros* mac(get_global_macros()); host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")]->set_perf_data( + service::services[std::make_pair("test_host"sv, "test_svc"sv)]->set_perf_data( "foo"); process_macros_r(mac, "$SERVICEPERFDATA:test_host:test_svc$", out, 0); ASSERT_EQ(out, "foo"); @@ -441,7 +443,7 @@ TEST_F(MacroService, ServiceExecutionTime) { nagios_macros* mac(get_global_macros()); host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_execution_time(20.00); process_macros_r(mac, "$SERVICEEXECUTIONTIME:test_host:test_svc$", out, 1); ASSERT_EQ(out, "20.000"); @@ -491,7 +493,7 @@ TEST_F(MacroService, ServiceLatency) { nagios_macros* mac(get_global_macros()); host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")]->set_latency( + service::services[std::make_pair("test_host"sv, "test_svc"sv)]->set_latency( 20.00); process_macros_r(mac, "$SERVICELATENCY:test_host:test_svc$", out, 1); ASSERT_EQ(out, "20.000"); @@ -541,7 +543,7 @@ TEST_F(MacroService, ServiceDuration) { nagios_macros* mac(get_global_macros()); host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")]->set_latency( + service::services[std::make_pair("test_host"sv, "test_svc"sv)]->set_latency( 20.00); process_macros_r(mac, "$SERVICEDURATION:test_host:test_svc$", out, 1); ASSERT_EQ(out, "5787d 0h 53m 20s"); @@ -590,7 +592,7 @@ TEST_F(MacroService, ServiceDurationSec) { nagios_macros* mac(get_global_macros()); host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")]->set_latency( + service::services[std::make_pair("test_host"sv, "test_svc"sv)]->set_latency( 20.00); process_macros_r(mac, "$SERVICEDURATIONSEC:test_host:test_svc$", out, 1); ASSERT_EQ(out, "500000000"); @@ -815,8 +817,8 @@ TEST_F(MacroService, LastServiceOK) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")]->set_last_time_ok( - 20); + service::services[std::make_pair("test_host"sv, "test_svc"sv)] + ->set_last_time_ok(20); process_macros_r(mac, "$LASTSERVICEOK:test_host:test_svc$", out, 1); ASSERT_EQ(out, "20"); } @@ -852,7 +854,7 @@ TEST_F(MacroService, LastServiceWarning) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_last_time_warning(30); process_macros_r(mac, "$LASTSERVICEWARNING:test_host:test_svc$", out, 1); ASSERT_EQ(out, "30"); @@ -889,7 +891,7 @@ TEST_F(MacroService, LastServiceUnknown) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_last_time_unknown(40); process_macros_r(mac, "$LASTSERVICEUNKNOWN:test_host:test_svc$", out, 1); ASSERT_EQ(out, "40"); @@ -926,7 +928,7 @@ TEST_F(MacroService, LastServiceCritical) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_last_time_critical(50); process_macros_r(mac, "$LASTSERVICECRITICAL:test_host:test_svc$", out, 1); ASSERT_EQ(out, "50"); @@ -963,7 +965,7 @@ TEST_F(MacroService, ServiceCheckCommand) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_last_time_critical(50); process_macros_r(mac, "$SERVICECHECKCOMMAND:test_host:test_svc$", out, 1); ASSERT_EQ(out, "cmd"); @@ -1000,7 +1002,7 @@ TEST_F(MacroService, ServiceDisplayName) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_last_time_critical(50); process_macros_r(mac, "$SERVICEDISPLAYNAME:test_host:test_svc$", out, 1); ASSERT_EQ(out, "test_svc"); @@ -1474,7 +1476,7 @@ TEST_F(MacroService, LongServiceOutput) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$LONGSERVICEOUTPUT:test_host:test_svc$", out, 1); ASSERT_EQ(out, "test_long_output"); @@ -1512,7 +1514,7 @@ TEST_F(MacroService, ServiceNotificationID) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$SERVICENOTIFICATIONID:test_host:test_svc$", out, 1); ASSERT_EQ(out, "0"); @@ -1550,7 +1552,7 @@ TEST_F(MacroService, ServiceEventID) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$SERVICEEVENTID:test_host:test_svc$", out, 1); ASSERT_EQ(out, "0"); @@ -1588,7 +1590,7 @@ TEST_F(MacroService, LastServiceEventID) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$LASTSERVICEEVENTID:test_host:test_svc$", out, 1); ASSERT_EQ(out, "0"); @@ -1631,7 +1633,7 @@ TEST_F(MacroService, ServiceGroupNames) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test")] + service::services[std::make_pair("test_host"sv, "test"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$SERVICEGROUPNAMES:test_host:test$", out, 1); ASSERT_EQ(out, "test_group"); @@ -1669,7 +1671,7 @@ TEST_F(MacroService, MaxServiceAttempts) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$MAXSERVICEATTEMPTS:test_host:test_svc$", out, 1); ASSERT_EQ(out, "3"); @@ -1712,7 +1714,7 @@ TEST_F(MacroService, ServiceGroupNotes) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test")] + service::services[std::make_pair("test_host"sv, "test"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$SERVICEGROUPNOTES:test_group$", out, 1); ASSERT_EQ(out, "test_notes"); @@ -1911,7 +1913,7 @@ TEST_F(MacroService, ServiceTimeZone) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$SERVICETIMEZONE:test_host:test_svc$", out, 1); ASSERT_EQ(out, "test_time"); @@ -1949,7 +1951,7 @@ TEST_F(MacroService, LastServiceState) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$LASTSERVICESTATE:test_host:test_svc$", out, 1); ASSERT_EQ(out, "OK"); @@ -1987,7 +1989,7 @@ TEST_F(MacroService, LastServiceStateId) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$LASTSERVICESTATEID:test_host:test_svc$", out, 1); ASSERT_EQ(out, "0"); @@ -2095,7 +2097,7 @@ TEST_F(MacroService, LastServiceProblemID) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$LASTSERVICEPROBLEMID:test_host:test_svc$", out, 1); ASSERT_EQ(out, "0"); diff --git a/engine/tests/macros/pbmacro.cc b/engine/tests/macros/pbmacro.cc new file mode 100644 index 00000000000..bd4435349eb --- /dev/null +++ b/engine/tests/macros/pbmacro.cc @@ -0,0 +1,1005 @@ +/* + * Copyright 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include +#include "../helper.hh" +#include "../test_engine.hh" +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/hostgroup.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/configuration/applier/serviceescalation.hh" +#include "com/centreon/engine/configuration/applier/servicegroup.hh" +#include "com/centreon/engine/configuration/applier/state.hh" +#include "com/centreon/engine/configuration/applier/timeperiod.hh" +#include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/hostescalation.hh" +#include "com/centreon/engine/macros.hh" +#include "com/centreon/engine/macros/grab_host.hh" +#include "com/centreon/engine/macros/process.hh" +#include "com/centreon/engine/timeperiod.hh" +#include "common/engine_conf/contact_helper.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/message_helper.hh" +#include "common/engine_conf/parser.hh" +#include "common/engine_conf/service_helper.hh" +#include "common/engine_conf/state.pb.h" +#include "common/engine_conf/timeperiod_helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; + +class Macro : public TestEngine { + public: + void SetUp() override { + init_config_state(); + _tp = _creator.new_timeperiod(); + for (int i(0); i < 7; ++i) + _creator.new_timerange(0, 0, 24, 0, i); + _now = strtotimet("2016-11-24 08:00:00"); + set_time(_now); + } + + void TearDown() override { + _host.reset(); + _host2.reset(); + _host3.reset(); + _svc.reset(); + deinit_config_state(); + } + + protected: + std::shared_ptr _host, _host2, _host3; + std::shared_ptr _svc; + timeperiod_creator _creator; + time_t _now; + timeperiod* _tp; +}; + +// Given host configuration without host_id +// Then the applier add_object throws an exception. +TEST_F(Macro, PbPollerName) { + configuration::parser parser; + configuration::State st; + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "poller_name=poller-test" << std::endl; + ofs << "log_file=" << std::endl; + ofs.close(); + + configuration::error_cnt err; + parser.parse("/tmp/test-config.cfg", &st, err); + configuration::applier::state::instance().apply(st, err); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$POLLERNAME$", out, 0); + ASSERT_EQ(out, "poller-test"); +} + +TEST_F(Macro, PbPollerId) { + configuration::parser parser; + configuration::State st; + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "poller_id=42" << std::endl; + ofs << "log_file=" << std::endl; + ofs.close(); + + configuration::error_cnt err; + parser.parse("/tmp/test-config.cfg", &st, err); + configuration::applier::state::instance().apply(st, err); + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$POLLERID$", out, 0); + ASSERT_EQ(out, "42"); +} + +TEST_F(Macro, PbLongDateTime) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$LONGDATETIME:test_host$", out, 0); + ASSERT_EQ(out, "Tue Nov 5 01:53:20 CET 1985"); +} + +TEST_F(Macro, PbShortDateTime) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$SHORTDATETIME:test_host$", out, 0); + ASSERT_EQ(out, "11-05-1985 01:53:20"); +} + +TEST_F(Macro, PbDate) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$DATE:test_host$", out, 0); + ASSERT_EQ(out, "11-05-1985"); +} + +TEST_F(Macro, PbTime) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$TIME:test_host$", out, 0); + ASSERT_EQ(out, "01:53:20"); +} + +TEST_F(Macro, PbTimeT) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$TIMET:test_host$", out, 0); + ASSERT_EQ(out, "500000000"); +} + +TEST_F(Macro, PbContactName) { + configuration::applier::host hst_aply; + configuration::applier::contact cnt_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + configuration::Contact cnt; + configuration::contact_helper cnt_hlp(&cnt); + cnt.set_contact_name("user"); + cnt.set_email("contact@centreon.com"); + cnt.set_pager("0473729383"); + cnt_aply.add_object(cnt); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + fill_string_group(hst.mutable_contacts(), "user"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$CONTACTNAME:user$", out, 1); + ASSERT_EQ(out, "user"); +} + +TEST_F(Macro, PbContactAlias) { + configuration::applier::host hst_aply; + configuration::applier::contact cnt_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + configuration::Contact cnt; + configuration::contact_helper cnt_hlp(&cnt); + cnt.set_contact_name("user"); + cnt.set_email("contact@centreon.com"); + cnt.set_pager("0473729383"); + cnt_aply.add_object(cnt); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + fill_string_group(hst.mutable_contacts(), "user"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$CONTACTALIAS:user$", out, 1); + ASSERT_EQ(out, "user"); +} + +TEST_F(Macro, PbContactEmail) { + configuration::applier::host hst_aply; + configuration::applier::contact cnt_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + configuration::Contact cnt; + configuration::contact_helper cnt_hlp(&cnt); + cnt.set_contact_name("user"); + cnt.set_email("contact@centreon.com"); + cnt.set_pager("0473729383"); + cnt_aply.add_object(cnt); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + fill_string_group(hst.mutable_contacts(), "user"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$CONTACTEMAIL:user$", out, 1); + ASSERT_EQ(out, "contact@centreon.com"); +} + +TEST_F(Macro, PbContactPager) { + configuration::applier::host hst_aply; + configuration::applier::contact cnt_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + configuration::Contact cnt; + configuration::contact_helper cnt_hlp(&cnt); + cnt.set_contact_name("user"); + cnt.set_email("contact@centreon.com"); + cnt.set_pager("0473729383"); + cnt_aply.add_object(cnt); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + fill_string_group(hst.mutable_contacts(), "user"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$CONTACTPAGER:user$", out, 1); + ASSERT_EQ(out, "0473729383"); +} + +TEST_F(Macro, PbAdminEmail) { + configuration::parser parser; + configuration::State st; + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "admin_email=contactadmin@centreon.com" << std::endl; + ofs << "log_file=" << std::endl; + ofs.close(); + + configuration::error_cnt err; + parser.parse("/tmp/test-config.cfg", &st, err); + configuration::applier::state::instance().apply(st, err); + + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$ADMINEMAIL:test_host$", out, 1); + ASSERT_EQ(out, "contactadmin@centreon.com"); +} + +TEST_F(Macro, PbAdminPager) { + configuration::parser parser; + configuration::State st; + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "admin_pager=04737293866" << std::endl; + ofs << "log_file=" << std::endl; + ofs.close(); + + configuration::error_cnt err; + parser.parse("/tmp/test-config.cfg", &st, err); + configuration::applier::state::instance().apply(st, err); + + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$ADMINPAGER:test_host$", out, 1); + ASSERT_EQ(out, "04737293866"); +} + +TEST_F(Macro, PbMainConfigFile) { + configuration::parser parser; + configuration::State st; + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_file=" << std::endl; + ofs.close(); + + configuration::error_cnt err; + parser.parse("/tmp/test-config.cfg", &st, err); + configuration::applier::state::instance().apply(st, err); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$MAINCONFIGFILE:test_host$", out, 1); + ASSERT_EQ(out, "/tmp/test-config.cfg"); +} + +TEST_F(Macro, PbStatusDataFile) { + configuration::parser parser; + configuration::State st; + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "status_file=/usr/local/var/status.dat" << std::endl; + ofs << "log_file=" << std::endl; + ofs.close(); + + configuration::error_cnt err; + parser.parse("/tmp/test-config.cfg", &st, err); + configuration::applier::state::instance().apply(st, err); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$STATUSDATAFILE:test_host$", out, 1); + ASSERT_EQ(out, "/usr/local/var/status.dat"); +} + +TEST_F(Macro, PbRetentionDataFile) { + configuration::parser parser; + configuration::State st; + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "state_retention_file=/var/log/centreon-engine/retention.dat" + << std::endl; + ofs << "log_file=" << std::endl; + ofs.close(); + + configuration::error_cnt err; + parser.parse("/tmp/test-config.cfg", &st, err); + configuration::applier::state::instance().apply(st, err); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$RETENTIONDATAFILE:test_host$", out, 1); + ASSERT_EQ(out, "/var/log/centreon-engine/retention.dat"); +} + +TEST_F(Macro, PbTempFile) { + configuration::parser parser; + configuration::State st; + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_file=" << std::endl; + ofs.close(); + + configuration::error_cnt err; + parser.parse("/tmp/test-config.cfg", &st, err); + configuration::applier::state::instance().apply(st, err); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$TEMPFILE:test_host$", out, 1); + ASSERT_EQ(out, "/tmp/centengine.tmp"); +} + +TEST_F(Macro, PbLogFile) { + configuration::parser parser; + configuration::State st; + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_file=/tmp/centengine.log" << std::endl; + ofs.close(); + + configuration::error_cnt err; + parser.parse("/tmp/test-config.cfg", &st, err); + configuration::applier::state::instance().apply(st, err); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$LOGFILE:test_host$", out, 1); + ASSERT_EQ(out, "/tmp/centengine.log"); +} + +TEST_F(Macro, PbCommandFile) { + configuration::parser parser; + configuration::State st; + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "command_file=/usr/local/var/rw/centengine.cmd" << std::endl; + ofs << "log_file=" << std::endl; + ofs.close(); + + configuration::error_cnt err; + parser.parse("/tmp/test-config.cfg", &st, err); + configuration::applier::state::instance().apply(st, err); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$COMMANDFILE:test_host$", out, 1); + ASSERT_EQ(out, "/usr/local/var/rw/centengine.cmd"); +} + +TEST_F(Macro, PbTempPath) { + configuration::parser parser; + configuration::State st; + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "log_file=" << std::endl; + ofs.close(); + + configuration::error_cnt err; + parser.parse("/tmp/test-config.cfg", &st, err); + configuration::applier::state::instance().apply(st, err); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$TEMPPATH$", out, 0); + ASSERT_EQ(out, "/tmp"); +} + +TEST_F(Macro, PbContactGroupName) { + configuration::applier::contact ct_aply; + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + fill_pb_configuration_contact(&ctct_hlp, "test_contact", true); + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + + configuration::applier::contactgroup cg_aply; + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + fill_pb_configuration_contactgroup(&cg_hlp, "test_cg", "test_contact"); + cg_aply.add_object(cg); + cg_aply.expand_objects(pb_config); + cg_aply.resolve_object(cg, err); + + init_macros(); + int now{500000000}; + set_time(now); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$CONTACTGROUPNAME:test_contact$", out, 1); + ASSERT_EQ(out, "test_cg"); +} + +TEST_F(Macro, PbContactGroupAlias) { + configuration::applier::contact ct_aply; + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + fill_pb_configuration_contact(&ctct_hlp, "test_contact", true); + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + + configuration::applier::contactgroup cg_aply; + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + fill_pb_configuration_contactgroup(&cg_hlp, "test_cg", "test_contact"); + cg_aply.add_object(cg); + cg_aply.expand_objects(pb_config); + cg_aply.resolve_object(cg, err); + init_macros(); + int now{500000000}; + set_time(now); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$CONTACTGROUPALIAS:test_cg$", out, 1); + ASSERT_EQ(out, "test_cg"); +} + +TEST_F(Macro, PbContactGroupMembers) { + configuration::applier::contact ct_aply; + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + fill_pb_configuration_contact(&ctct_hlp, "test_contact", true); + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + + configuration::applier::contactgroup cg_aply; + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + fill_pb_configuration_contactgroup(&cg_hlp, "test_cg", "test_contact"); + cg_aply.add_object(cg); + cg_aply.expand_objects(pb_config); + cg_aply.resolve_object(cg, err); + init_macros(); + int now{500000000}; + set_time(now); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$CONTACTGROUPMEMBERS:test_cg$", out, 1); + ASSERT_EQ(out, "test_contact"); +} + +TEST_F(Macro, PbContactGroupNames) { + configuration::applier::contact ct_aply; + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + fill_pb_configuration_contact(&ctct_hlp, "test_contact", true); + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + + configuration::applier::contactgroup cg_aply; + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + fill_pb_configuration_contactgroup(&cg_hlp, "test_cg", "test_contact"); + cg_aply.add_object(cg); + cg_aply.expand_objects(pb_config); + cg_aply.resolve_object(cg, err); + init_macros(); + int now{500000000}; + set_time(now); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$CONTACTGROUPNAMES:test_contact$", out, 1); + ASSERT_EQ(out, "test_cg"); +} + +TEST_F(Macro, PbNotificationRecipients) { + init_macros(); + configuration::applier::contact ct_aply; + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + fill_pb_configuration_contact(&ctct_hlp, "admin", true); + ct_aply.add_object(ctct); + configuration::Contact ctct1; + configuration::contact_helper ctct1_hlp(&ctct1); + fill_pb_configuration_contact(&ctct1_hlp, "admin1", false, "c,r"); + ct_aply.add_object(ctct1); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + ct_aply.resolve_object(ctct1, err); + configuration::Contact ctct2; + configuration::contact_helper ctct2_hlp(&ctct2); + fill_pb_configuration_contact(&ctct2_hlp, "test_contact", false); + ct_aply.add_object(ctct2); + ct_aply.expand_objects(pb_config); + ct_aply.resolve_object(ctct2, err); + + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + fill_pb_configuration_host(&hst_hlp, "test_host", "admin"); + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + fill_pb_configuration_service(&svc_hlp, "test_host", "test_svc", + "admin,admin1"); + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + host_map const& hm{engine::host::hosts}; + _host3 = hm.begin()->second; + _host3->set_current_state(engine::host::state_up); + _host3->set_state_type(checkable::hard); + _host3->set_acknowledgement(AckType::NONE); + _host3->set_notify_on(static_cast(-1)); + + service_map const& sm{engine::service::services}; + _svc = sm.begin()->second; + _svc->set_current_state(engine::service::state_ok); + _svc->set_state_type(checkable::hard); + _svc->set_acknowledgement(AckType::NONE); + _svc->set_notify_on(static_cast(-1)); + + nagios_macros* mac(get_global_macros()); + + ASSERT_EQ(_svc->notify(notifier::reason_normal, "test_contact", + "test_comment", notifier::notification_option_forced), + OK); + + std::string out; + process_macros_r(mac, "$NOTIFICATIONRECIPIENTS:test_host:test_svc$", out, 1); + ASSERT_TRUE(out == "admin,admin1" || out == "admin1,admin"); +} + +TEST_F(Macro, PbNotificationAuthor) { + init_macros(); + configuration::applier::contact ct_aply; + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + fill_pb_configuration_contact(&ctct_hlp, "admin", true); + ct_aply.add_object(ctct); + configuration::Contact ctct1; + configuration::contact_helper ctct1_hlp(&ctct1); + fill_pb_configuration_contact(&ctct1_hlp, "admin1", false, "c,r"); + ct_aply.add_object(ctct1); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + ct_aply.resolve_object(ctct1, err); + configuration::Contact ctct2; + configuration::contact_helper ctct2_hlp(&ctct2); + fill_pb_configuration_contact(&ctct2_hlp, "test_contact", false); + ct_aply.add_object(ctct2); + ct_aply.expand_objects(pb_config); + ct_aply.resolve_object(ctct2, err); + + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + fill_pb_configuration_host(&hst_hlp, "test_host", "admin"); + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + fill_pb_configuration_service(&svc_hlp, "test_host", "test_svc", + "admin,admin1"); + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + host_map const& hm{engine::host::hosts}; + _host3 = hm.begin()->second; + _host3->set_current_state(engine::host::state_up); + _host3->set_state_type(checkable::hard); + _host3->set_acknowledgement(AckType::NONE); + _host3->set_notify_on(static_cast(-1)); + + service_map const& sm{engine::service::services}; + _svc = sm.begin()->second; + _svc->set_current_state(engine::service::state_ok); + _svc->set_state_type(checkable::hard); + _svc->set_acknowledgement(AckType::NONE); + _svc->set_notify_on(static_cast(-1)); + + nagios_macros* mac(get_global_macros()); + + ASSERT_EQ(_svc->notify(notifier::reason_normal, "test_contact", + "test_comment", notifier::notification_option_forced), + OK); + + std::string out; + process_macros_r(mac, "$NOTIFICATIONAUTHOR$", out, 1); + ASSERT_EQ(out, "test_contact"); +} + +TEST_F(Macro, PbNotificationAuthorName) { + init_macros(); + configuration::applier::contact ct_aply; + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + fill_pb_configuration_contact(&ctct_hlp, "admin", true); + ct_aply.add_object(ctct); + configuration::Contact ctct1; + configuration::contact_helper ctct1_hlp(&ctct1); + fill_pb_configuration_contact(&ctct1_hlp, "admin1", false, "c,r"); + ct_aply.add_object(ctct1); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + ct_aply.resolve_object(ctct1, err); + configuration::Contact ctct2; + configuration::contact_helper ctct2_hlp(&ctct2); + fill_pb_configuration_contact(&ctct2_hlp, "test_contact", false); + ct_aply.add_object(ctct2); + ct_aply.expand_objects(pb_config); + ct_aply.resolve_object(ctct2, err); + + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + fill_pb_configuration_host(&hst_hlp, "test_host", "admin"); + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + fill_pb_configuration_service(&svc_hlp, "test_host", "test_svc", + "admin,admin1"); + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + host_map const& hm{engine::host::hosts}; + _host3 = hm.begin()->second; + _host3->set_current_state(engine::host::state_up); + _host3->set_state_type(checkable::hard); + _host3->set_acknowledgement(AckType::NONE); + _host3->set_notify_on(static_cast(-1)); + + service_map const& sm{engine::service::services}; + _svc = sm.begin()->second; + _svc->set_current_state(engine::service::state_ok); + _svc->set_state_type(checkable::hard); + _svc->set_acknowledgement(AckType::NONE); + _svc->set_notify_on(static_cast(-1)); + + nagios_macros* mac(get_global_macros()); + + ASSERT_EQ(_svc->notify(notifier::reason_normal, "test_contact", + "test_comment", notifier::notification_option_forced), + OK); + std::string out; + process_macros_r(mac, "$NOTIFICATIONAUTHORNAME:test_host:test_svc$", out, 1); + ASSERT_EQ(out, "test_contact"); +} + +TEST_F(Macro, PbNotificationAuthorAlias) { + init_macros(); + configuration::applier::contact ct_aply; + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + ct_aply.add_object(ctct); + configuration::Contact ctct1{ + new_pb_configuration_contact("admin1", false, "c,r")}; + ct_aply.add_object(ctct1); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + ct_aply.resolve_object(ctct1, err); + configuration::Contact ctct2{ + new_pb_configuration_contact("test_contact", false)}; + ct_aply.add_object(ctct2); + ct_aply.expand_objects(pb_config); + ct_aply.resolve_object(ctct2, err); + + configuration::Host hst{new_pb_configuration_host("test_host", "admin")}; + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::Service svc{ + new_pb_configuration_service("test_host", "test_svc", "admin,admin1")}; + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + host_map const& hm{engine::host::hosts}; + _host3 = hm.begin()->second; + _host3->set_current_state(engine::host::state_up); + _host3->set_state_type(checkable::hard); + _host3->set_acknowledgement(AckType::NONE); + _host3->set_notify_on(static_cast(-1)); + + service_map const& sm{engine::service::services}; + _svc = sm.begin()->second; + _svc->set_current_state(engine::service::state_ok); + _svc->set_state_type(checkable::hard); + _svc->set_acknowledgement(AckType::NONE); + _svc->set_notify_on(static_cast(-1)); + + nagios_macros* mac(get_global_macros()); + + ASSERT_EQ(_svc->notify(notifier::reason_normal, "test_contact", + "test_comment", notifier::notification_option_forced), + OK); + std::string out; + process_macros_r(mac, "$NOTIFICATIONAUTHORALIAS:test_host:test_svc$", out, 1); + ASSERT_EQ(out, "test_contact"); +} + +TEST_F(Macro, PbNotificationComment) { + init_macros(); + configuration::applier::contact ct_aply; + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + ct_aply.add_object(ctct); + configuration::Contact ctct1{ + new_pb_configuration_contact("admin1", false, "c,r")}; + ct_aply.add_object(ctct1); + ct_aply.expand_objects(pb_config); + configuration::error_cnt err; + ct_aply.resolve_object(ctct, err); + ct_aply.resolve_object(ctct1, err); + configuration::Contact ctct2{ + new_pb_configuration_contact("test_contact", false)}; + ct_aply.add_object(ctct2); + ct_aply.expand_objects(pb_config); + ct_aply.resolve_object(ctct2, err); + + configuration::Host hst{new_pb_configuration_host("test_host", "admin")}; + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::Service svc{ + new_pb_configuration_service("test_host", "test_svc", "admin,admin1")}; + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + + host_map const& hm{engine::host::hosts}; + _host3 = hm.begin()->second; + _host3->set_current_state(engine::host::state_up); + _host3->set_state_type(checkable::hard); + _host3->set_acknowledgement(AckType::NONE); + _host3->set_notify_on(static_cast(-1)); + + service_map const& sm{engine::service::services}; + _svc = sm.begin()->second; + _svc->set_current_state(engine::service::state_ok); + _svc->set_state_type(checkable::hard); + _svc->set_acknowledgement(AckType::NONE); + _svc->set_notify_on(static_cast(-1)); + + nagios_macros* mac(get_global_macros()); + + ASSERT_EQ(_svc->notify(notifier::reason_normal, "test_contact", + "test_comment", notifier::notification_option_forced), + OK); + + std::string out; + process_macros_r(mac, "$NOTIFICATIONCOMMENT$", out, 1); + ASSERT_EQ(out, "test_comment"); +} + +TEST_F(Macro, PbIsValidTime) { + configuration::applier::timeperiod time_aply; + configuration::Timeperiod time; + configuration::timeperiod_helper time_hlp(&time); + + time.set_timeperiod_name("test"); + time.set_alias("test"); + time_aply.add_object(time); + + init_macros(); + int now{500000000}; + set_time(now); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$ISVALIDTIME:test$", out, 1); + ASSERT_EQ(out, "0"); +} + +TEST_F(Macro, PbNextValidTime) { + configuration::applier::timeperiod time_aply; + configuration::Timeperiod time; + configuration::timeperiod_helper time_hlp(&time); + + time.set_alias("test"); + time.set_timeperiod_name("test"); + time_hlp.hook("monday", "23:00-24:00"); + time_hlp.hook("tuesday", "23:00-24:00"); + time_hlp.hook("wednesday", "23:00-24:00"); + time_hlp.hook("thursday", "23:00-24:00"); + time_hlp.hook("friday", "23:00-24:00"); + time_hlp.hook("saterday", "23:00-24:00"); + time_hlp.hook("sunday", "23:00-24:00"); + time_aply.add_object(time); + + init_macros(); + int now{500000000}; + set_time(now); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$NEXTVALIDTIME:test$", out, 1); + ASSERT_EQ(out, "23:00:00"); +} + +TEST_F(Macro, PbContactTimeZone) { + configuration::applier::contact cnt_aply; + configuration::Contact cnt; + cnt.set_contact_name("user"); + cnt.set_email("contact@centreon.com"); + cnt.set_pager("0473729383"); + cnt.set_timezone("time_test"); + cnt_aply.add_object(cnt); + + init_macros(); + int now{500000000}; + set_time(now); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$CONTACTTIMEZONE:user$", out, 1); + ASSERT_EQ(out, "time_test"); +} diff --git a/engine/tests/macros/pbmacro_hostname.cc b/engine/tests/macros/pbmacro_hostname.cc new file mode 100644 index 00000000000..fa5c34b01c2 --- /dev/null +++ b/engine/tests/macros/pbmacro_hostname.cc @@ -0,0 +1,1648 @@ +/** + * Copyright 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include +#include "com/centreon/engine/globals.hh" +#ifdef LEGACY_CONF +#include "common/engine_legacy_conf/state.hh" +#else +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/hostgroup_helper.hh" +#endif + +#include "../helper.hh" +#include "../test_engine.hh" +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/hostgroup.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/configuration/applier/serviceescalation.hh" +#include "com/centreon/engine/configuration/applier/servicegroup.hh" +#include "com/centreon/engine/configuration/applier/state.hh" +#include "com/centreon/engine/configuration/applier/timeperiod.hh" +#include "com/centreon/engine/hostescalation.hh" +#include "com/centreon/engine/macros.hh" +#include "com/centreon/engine/macros/grab_host.hh" +#include "com/centreon/engine/macros/process.hh" +#include "com/centreon/engine/timeperiod.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; + +class MacroHostname : public TestEngine { + public: + void SetUp() override { + init_config_state(); + _tp = _creator.new_timeperiod(); + for (int i(0); i < 7; ++i) + _creator.new_timerange(0, 0, 24, 0, i); + _now = strtotimet("2016-11-24 08:00:00"); + set_time(_now); + } + + void TearDown() override { + _host.reset(); + _host2.reset(); + _host3.reset(); + _svc.reset(); + deinit_config_state(); + } + + protected: + std::shared_ptr _host, _host2, _host3; + std::shared_ptr _svc; + timeperiod_creator _creator; + time_t _now; + timeperiod* _tp; +}; + +TEST_F(MacroHostname, HostProblemId) { + configuration::applier::host hst_aply, hst_aply2; + configuration::Host hst, hst2; + configuration::host_helper hst_hlp(&hst), hst2_hlp(&hst2); + next_problem_id = 1; + + set_time(50000); + // first host + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + // second host + hst2.set_host_name("test_host2"); + hst2.set_host_id(13); + hst2.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_NO_THROW(hst_aply2.add_object(hst2)); + ASSERT_EQ(2u, host::hosts.size()); + + init_macros(); + _host = host::hosts.find("test_host")->second; + _host->set_current_state(engine::host::state_up); + _host->set_last_hard_state(engine::host::state_up); + _host->set_last_hard_state_change(50000); + _host->set_state_type(checkable::hard); + + _host2 = host::hosts.find("test_host2")->second; + _host2->set_current_state(engine::host::state_up); + _host2->set_last_hard_state(engine::host::state_up); + _host2->set_last_hard_state_change(50000); + _host2->set_state_type(checkable::hard); + + auto fn = [](std::shared_ptr hst, std::string firstcheck, + std::string secondcheck) -> void { + std::string out; + nagios_macros* mac(get_global_macros()); + + for (int i = 0; i < 3; i++) { + // When i == 0, the state_down is soft => no notification + // When i == 1, the state_down is soft => no notification + // When i == 2, the state_down is hard down => notification + set_time(50500 + i * 500); + hst->set_last_state(hst->get_current_state()); + if (notifier::hard == hst->get_state_type()) + hst->set_last_hard_state(hst->get_current_state()); + hst->process_check_result_3x(engine::host::state_down, "The host is down", + CHECK_OPTION_NONE, 0, true, 0); + } + + process_macros_r(mac, fmt::format("$HOSTPROBLEMID:{}$", hst->name()), out, + 0); + ASSERT_EQ(out, firstcheck); + + for (int i = 0; i < 2; i++) { + // When i == 0, the state_up is hard (return to up) => Recovery + // notification When i == 1, the state_up is still here (no change) => no + // notification + set_time(52500 + i * 500); + hst->set_last_state(hst->get_current_state()); + if (notifier::hard == hst->get_state_type()) + hst->set_last_hard_state(hst->get_current_state()); + hst->process_check_result_3x(engine::host::state_up, "The host is up", + CHECK_OPTION_NONE, 0, true, 0); + } + + process_macros_r(mac, "$HOSTPROBLEMID:test_host$", out, 0); + ASSERT_EQ(out, secondcheck); + }; + + fn(_host, "1", "0"); + fn(_host2, "2", "0"); + fn(_host, "3", "0"); +} + +// Given host configuration without host_id +// Then the applier add_object throws an exception. +TEST_F(MacroHostname, TotalHostOk) { + configuration::applier::host hst_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + init_macros(); + + nagios_macros* mac(get_global_macros()); + std::string out; + host::hosts["test_host"]->set_current_state(host::state_up); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$TOTALHOSTSUP$", out, 1); + ASSERT_EQ(out, "1"); +} + +// Given host configuration without host_id +// Then the applier add_object throws an exception. +TEST_F(MacroHostname, TotalHostServicesCritical) { + configuration::applier::host hst_aply; + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + init_macros(); + + nagios_macros* mac(get_global_macros()); + std::string out; + host::hosts["test_host"]->set_current_state(host::state_up); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$TOTALHOSTSERVICESCRITICAL:test_host$", out, 1); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, HostName) { + init_macros(); + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + std::string out; + nagios_macros* mac(get_global_macros()); + + process_macros_r(mac, "$HOSTNAME:test_host$", out, 0); + ASSERT_EQ(out, "test_host"); +} + +TEST_F(MacroHostname, HostAlias) { + init_macros(); + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + std::string out; + nagios_macros* mac(get_global_macros()); + + process_macros_r(mac, "$HOSTALIAS:test_host$", out, 0); + ASSERT_EQ(out, "test_host"); +} + +TEST_F(MacroHostname, HostAddress) { + init_macros(); + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + std::string out; + nagios_macros* mac(get_global_macros()); + + process_macros_r(mac, "$HOSTADDRESS:test_host$", out, 0); + ASSERT_EQ(out, "127.0.0.1"); +} + +TEST_F(MacroHostname, LastHostCheck) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$LASTHOSTCHECK:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, LastHostStateChange) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$LASTHOSTSTATECHANGE:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, HostOutput) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_plugin_output("foo bar!"); + process_macros_r(mac, "$HOSTOUTPUT:test_host$", out, 0); + ASSERT_EQ(out, "foo bar!"); +} + +TEST_F(MacroHostname, HostPerfData) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_perf_data("test"); + process_macros_r(mac, "$HOSTPERFDATA:test_host$", out, 0); + ASSERT_EQ(out, "test"); +} + +TEST_F(MacroHostname, HostState) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + hst_hlp.hook("contacts", "user"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_current_state(host::state_up); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$HOSTSTATE:test_host$", out, 1); + ASSERT_EQ(out, "UP"); +} + +TEST_F(MacroHostname, HostStateID) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + hst_hlp.hook("contacts", "user"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_current_state(host::state_down); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$HOSTSTATEID:test_host$", out, 1); + ASSERT_EQ(out, "1"); +} + +TEST_F(MacroHostname, HostAttempt) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + hst_hlp.hook("contacts", "user"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_current_state(host::state_up); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$HOSTATTEMPT:test_host$", out, 1); + ASSERT_EQ(out, "1"); +} + +TEST_F(MacroHostname, HostExecutionTime) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + hst_hlp.hook("contacts", "user"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + init_macros(); + + int now{500000000}; + set_time(now); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_execution_time(10.0); + process_macros_r(mac, "$HOSTEXECUTIONTIME:test_host$", out, 1); + ASSERT_EQ(out, "10.000"); +} + +TEST_F(MacroHostname, HostLatency) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_latency(100); + process_macros_r(mac, "$HOSTLATENCY:test_host$", out, 1); + ASSERT_EQ(out, "100.000"); +} + +TEST_F(MacroHostname, HostDuration) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTDURATION:test_host$", out, 1); + ASSERT_EQ(out, "5787d 0h 53m 20s"); +} + +TEST_F(MacroHostname, HostDurationSec) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTDURATIONSEC:test_host$", out, 1); + ASSERT_EQ(out, "500000000"); +} + +TEST_F(MacroHostname, HostDownTime) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTDOWNTIME:test_host$", out, 1); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, HostStateType) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTSTATETYPE:test_host$", out, 1); + ASSERT_EQ(out, "HARD"); +} + +TEST_F(MacroHostname, HostPercentChange) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_host_id(12); + hst.set_address("127.0.0.1"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTPERCENTCHANGE:test_host$", out, 1); + ASSERT_EQ(out, "0.00"); +} + +TEST_F(MacroHostname, HostGroupName) { + configuration::error_cnt err; + configuration::applier::hostgroup hg_aply; + configuration::applier::host hst_aply; + configuration::Hostgroup hg; + configuration::hostgroup_helper hg_hlp(&hg); + configuration::Host hst_a; + configuration::host_helper hst_a_hlp(&hst_a); + hst_a.set_host_name("a"); + hst_a.set_host_id(1); + hst_a.set_address("127.0.0.1"); + + configuration::Host hst_c; + configuration::host_helper hst_c_hlp(&hst_c); + hst_c.set_host_name("c"); + hst_c.set_host_id(2); + hst_c.set_address("127.0.0.1"); + + hst_aply.add_object(hst_a); + hst_aply.add_object(hst_c); + + hg.set_hostgroup_name("temphg"); + hg_hlp.hook("members", "a,c"); + ASSERT_NO_THROW(hg_aply.add_object(hg)); + + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hg_aply.expand_objects(pb_config)); + + ASSERT_NO_THROW(hst_aply.resolve_object(hst_a, err)); + ASSERT_NO_THROW(hst_aply.resolve_object(hst_c, err)); + ASSERT_NO_THROW(hg_aply.resolve_object(hg, err)); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTGROUPNAME:a$", out, 1); + ASSERT_EQ(out, "temphg"); +} + +TEST_F(MacroHostname, HostGroupAlias) { + configuration::applier::hostgroup hg_aply; + configuration::applier::host hst_aply; + configuration::Hostgroup hg; + configuration::hostgroup_helper hg_hlp(&hg); + configuration::Host hst_a; + configuration::host_helper hst_a_hlp(&hst_a); + configuration::Host hst_c; + configuration::host_helper hst_c_hlp(&hst_c); + + hst_a.set_host_name("a"); + hst_a.set_address("127.0.0.1"); + hst_a.set_host_id(1); + + hst_c.set_host_name("c"); + hst_c.set_address("127.0.0.1"); + hst_c.set_host_id(2); + + hst_aply.add_object(hst_a); + hst_aply.add_object(hst_c); + + hg.set_hostgroup_name("temphg"); + hg.set_alias("temphgal"); + hg_hlp.hook("members", "a,c"); + ASSERT_NO_THROW(hg_aply.add_object(hg)); + + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hg_aply.expand_objects(pb_config)); + + configuration::error_cnt err; + ASSERT_NO_THROW(hst_aply.resolve_object(hst_a, err)); + ASSERT_NO_THROW(hst_aply.resolve_object(hst_c, err)); + ASSERT_NO_THROW(hg_aply.resolve_object(hg, err)); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTGROUPALIAS:temphg$", out, 1); + ASSERT_EQ(out, "temphgal"); +} + +TEST_F(MacroHostname, LastHostUP) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_last_time_up(30); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$LASTHOSTUP:test_host$", out, 1); + ASSERT_EQ(out, "30"); +} + +TEST_F(MacroHostname, LastHostDown) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + host::hosts["test_host"]->set_last_time_down(40); + host::hosts["test_host"]->set_has_been_checked(true); + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$LASTHOSTDOWN:test_host$", out, 1); + ASSERT_EQ(out, "40"); +} + +TEST_F(MacroHostname, LastHostUnreachable) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_last_time_unreachable(50); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$LASTHOSTUNREACHABLE:test_host$", out, 1); + ASSERT_EQ(out, "50"); +} + +TEST_F(MacroHostname, HostCheckCommand) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + + cmd.set_command_line("echo 'output| metric=12;50;75'"); + hst.set_check_command("cmd"); + configuration::applier::command cmd_aply; + cmd_aply.add_object(cmd); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTCHECKCOMMAND:test_host$", out, 1); + ASSERT_EQ(out, "cmd"); +} + +TEST_F(MacroHostname, HostDisplayName) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTDISPLAYNAME:test_host$", out, 1); + ASSERT_EQ(out, "test_host"); +} + +TEST_F(MacroHostname, HostActionUrl) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst.set_action_url("test_action_url"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTACTIONURL:test_host$", out, 1); + ASSERT_EQ(out, "test_action_url"); +} + +TEST_F(MacroHostname, HostNotesUrl) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst.set_notes_url("test_notes_url"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTNOTESURL:test_host$", out, 1); + ASSERT_EQ(out, "test_notes_url"); +} + +TEST_F(MacroHostname, HostNotes) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTNOTES:test_host$", out, 1); + ASSERT_EQ(out, "test_notes"); +} + +TEST_F(MacroHostname, TotalHostsDown) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_current_state(host::state_down); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$TOTALHOSTSDOWN:test_host$", out, 1); + ASSERT_EQ(out, "1"); +} + +TEST_F(MacroHostname, TotalHostsUnreachable) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_current_state(host::state_unreachable); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$TOTALHOSTSUNREACHABLE:test_host$", out, 1); + ASSERT_EQ(out, "1"); +} + +TEST_F(MacroHostname, TotalHostsDownUnhandled) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$TOTALHOSTSDOWNUNHANDLED:test_host$", out, 1); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, TotalHostsunreachableunhandled) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$TOTALHOSTSUNREACHABLEUNHANDLED:test_host$", out, 1); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, TotalHostProblems) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$TOTALHOSTPROBLEMS:test_host$", out, 1); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, TotalHostProblemsUnhandled) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$TOTALHOSTPROBLEMSUNHANDLED:test_host$", out, 1); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, HostCheckType) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_current_state(host::state_unreachable); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$HOSTCHECKTYPE:test_host$", out, 0); + ASSERT_EQ(out, "ACTIVE"); +} + +TEST_F(MacroHostname, LongHostOutput) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$LONGHOSTOUTPUT:test_host$", out, 0); + ASSERT_EQ(out, "test_long_output"); +} + +TEST_F(MacroHostname, HostNotificationNumber) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$HOSTNOTIFICATIONNUMBER:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, HostNotificationID) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$HOSTNOTIFICATIONID:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, HostEventID) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$HOSTEVENTID:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, LastHostEventID) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$LASTHOSTEVENTID:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, HostGroupNames) { + configuration::applier::hostgroup hg_aply; + configuration::applier::host hst_aply; + configuration::Hostgroup hg; + configuration::hostgroup_helper hg_hlp(&hg); + configuration::Host hst_a; + configuration::host_helper hst_a_hlp(&hst_a); + configuration::Host hst_c; + configuration::host_helper hst_c_hlp(&hst_c); + + hst_a.set_host_name("a"); + hst_a.set_address("127.0.0.1"); + hst_a.set_host_id(1); + + hst_c.set_host_name("c"); + hst_c.set_address("127.0.0.1"); + hst_c.set_host_id(2); + + hst_aply.add_object(hst_a); + hst_aply.add_object(hst_c); + + hg.set_hostgroup_name("temphg"); + hg_hlp.hook("members", "a,c"); + ASSERT_NO_THROW(hg_aply.add_object(hg)); + + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hg_aply.expand_objects(pb_config)); + + configuration::error_cnt err; + ASSERT_NO_THROW(hst_aply.resolve_object(hst_a, err)); + ASSERT_NO_THROW(hst_aply.resolve_object(hst_c, err)); + ASSERT_NO_THROW(hg_aply.resolve_object(hg, err)); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTGROUPNAMES:a$", out, 0); + ASSERT_EQ(out, "temphg"); +} + +TEST_F(MacroHostname, MaxHostAttempts) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$MAXHOSTATTEMPTS:test_host$", out, 0); + ASSERT_EQ(out, "3"); +} + +TEST_F(MacroHostname, TotalHostServices) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$TOTALHOSTSERVICES:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, TotalHostServicesOK) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$TOTALHOSTSERVICESOK:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, TotalHostServicesWarning) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$TOTALHOSTSERVICESWARNING:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, TotalHostServicesUnknown) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$TOTALHOSTSERVICESUNKNOWN:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, HostGroupNotes) { + configuration::applier::hostgroup hg_aply; + configuration::applier::host hst_aply; + configuration::Hostgroup hg; + configuration::hostgroup_helper hg_hlp(&hg); + configuration::Host hst_a; + configuration::host_helper hst_a_hlp(&hst_a); + configuration::Host hst_c; + configuration::host_helper hst_c_hlp(&hst_c); + + hst_a.set_host_name("a"); + hst_a.set_address("127.0.0.1"); + hst_a.set_host_id(1); + + hst_c.set_host_name("c"); + hst_c.set_address("127.0.0.1"); + hst_c.set_host_id(2); + + hst_aply.add_object(hst_a); + hst_aply.add_object(hst_c); + + hg.set_hostgroup_name("temphg"); + hg_hlp.hook("members", "a,c"); + hg.set_notes("test_note"); + ASSERT_NO_THROW(hg_aply.add_object(hg)); + + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hg_aply.expand_objects(pb_config)); + + configuration::error_cnt err; + ASSERT_NO_THROW(hst_aply.resolve_object(hst_a, err)); + ASSERT_NO_THROW(hst_aply.resolve_object(hst_c, err)); + ASSERT_NO_THROW(hg_aply.resolve_object(hg, err)); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTGROUPNOTES:temphg$", out, 0); + ASSERT_EQ(out, "test_note"); +} + +TEST_F(MacroHostname, HostGroupNotesUrl) { + configuration::applier::hostgroup hg_aply; + configuration::applier::host hst_aply; + configuration::Hostgroup hg; + configuration::hostgroup_helper hg_hlp(&hg); + configuration::Host hst_a; + configuration::host_helper hst_a_helper(&hst_a); + configuration::Host hst_c; + configuration::host_helper hst_c_hlp(&hst_c); + + hst_a.set_host_name("a"); + hst_a.set_address("127.0.0.1"); + hst_a.set_host_id(1); + + hst_c.set_host_name("c"); + hst_c.set_address("127.0.0.1"); + hst_c.set_host_id(2); + + hst_aply.add_object(hst_a); + hst_aply.add_object(hst_c); + + hg.set_hostgroup_name("temphg"); + hg_hlp.hook("members", "a,c"); + hg.set_notes_url("test_note_url"); + ASSERT_NO_THROW(hg_aply.add_object(hg)); + + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hg_aply.expand_objects(pb_config)); + + configuration::error_cnt err; + ASSERT_NO_THROW(hst_aply.resolve_object(hst_a, err)); + ASSERT_NO_THROW(hst_aply.resolve_object(hst_c, err)); + ASSERT_NO_THROW(hg_aply.resolve_object(hg, err)); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTGROUPNOTESURL:temphg$", out, 0); + ASSERT_EQ(out, "test_note_url"); +} + +TEST_F(MacroHostname, HostGroupActionUrl) { + configuration::applier::hostgroup hg_aply; + configuration::applier::host hst_aply; + configuration::Hostgroup hg; + configuration::hostgroup_helper hg_hlp(&hg); + configuration::Host hst_a; + configuration::host_helper hst_a_hlp(&hst_a); + configuration::Host hst_c; + configuration::host_helper hst_c_hlp(&hst_c); + + hst_a.set_host_name("a"); + hst_a.set_address("127.0.0.1"); + hst_a.set_host_id(1); + + hst_c.set_host_name("c"); + hst_c.set_address("127.0.0.1"); + hst_c.set_host_id(2); + + hst_aply.add_object(hst_a); + hst_aply.add_object(hst_c); + + hg.set_hostgroup_name("temphg"); + hg_hlp.hook("members", "a,c"); + hg.set_action_url("test_action_url"); + ASSERT_NO_THROW(hg_aply.add_object(hg)); + + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hg_aply.expand_objects(pb_config)); + + configuration::error_cnt err; + ASSERT_NO_THROW(hst_aply.resolve_object(hst_a, err)); + ASSERT_NO_THROW(hst_aply.resolve_object(hst_c, err)); + ASSERT_NO_THROW(hg_aply.resolve_object(hg, err)); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTGROUPACTIONURL:temphg$", out, 0); + ASSERT_EQ(out, "test_action_url"); +} + +TEST_F(MacroHostname, HostGroupMembers) { + configuration::applier::hostgroup hg_aply; + configuration::applier::host hst_aply; + configuration::Hostgroup hg; + configuration::hostgroup_helper hg_hlp(&hg); + configuration::Host hst_a; + configuration::host_helper hst_a_hlp(&hst_a); + configuration::Host hst_c; + configuration::host_helper hst_c_hlp(&hst_c); + + hst_a.set_host_name("a"); + hst_a.set_host_id(1); + hst_a.set_address("127.0.0.1"); + + hst_c.set_host_name("c"); + hst_c.set_address("127.0.0.1"); + hst_c.set_host_id(2); + + hst_aply.add_object(hst_a); + hst_aply.add_object(hst_c); + + hg.set_hostgroup_name("temphg"); + hg_hlp.hook("members", "a,c"); + hg.set_action_url("test_action_url"); + ASSERT_NO_THROW(hg_aply.add_object(hg)); + + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hst_aply.expand_objects(pb_config)); + ASSERT_NO_THROW(hg_aply.expand_objects(pb_config)); + + configuration::error_cnt err; + ASSERT_NO_THROW(hst_aply.resolve_object(hst_a, err)); + ASSERT_NO_THROW(hst_aply.resolve_object(hst_c, err)); + ASSERT_NO_THROW(hg_aply.resolve_object(hg, err)); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTGROUPMEMBERS:temphg$", out, 0); + ASSERT_TRUE(out == "c,a" || out == "a,c"); +} + +TEST_F(MacroHostname, LastHostProblemId) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$LASTHOSTPROBLEMID:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, LastHostState) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$LASTHOSTSTATE:test_host$", out, 0); + ASSERT_EQ(out, "UP"); +} + +TEST_F(MacroHostname, LastHostStateID) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$LASTHOSTSTATEID:test_host$", out, 0); + ASSERT_EQ(out, "0"); +} + +TEST_F(MacroHostname, HostParents) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst_hlp.hook("parents", "test_parent"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$HOSTPARENTS:test_host$", out, 0); + ASSERT_EQ(out, "test_parent"); +} + +TEST_F(MacroHostname, HostChildren) { + configuration::applier::host hst_aply; + configuration::applier::command cmd_aply; + configuration::Host hst_child; + configuration::host_helper hst_child_hlp(&hst_child); + configuration::Host hst_parent; + configuration::host_helper hst_parent_hlp(&hst_parent); + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("base_centreon_ping"); + + cmd.set_command_line( + "$USER1$/check_icmp -H $HOSTADDRESS$ -n $_HOSTPACKETNUMBER$ -w " + "$_HOSTWARNING$ -c $_HOSTCRITICAL$"); + cmd_aply.add_object(cmd); + + hst_child.set_host_name("child_host"); + hst_child.set_address("127.0.0.1"); + hst_child_hlp.hook("parents", "parent_host"); + hst_child.set_host_id(1); + hst_child_hlp.insert_customvariable("_PACKETNUMBER", "42"); + hst_child_hlp.insert_customvariable("_WARNING", "200,20%"); + hst_child_hlp.insert_customvariable("_CRITICAL", "400,50%"); + hst_child.set_check_command("base_centreon_ping"); + hst_aply.add_object(hst_child); + + hst_parent.set_host_name("parent_host"); + hst_parent.set_address("127.0.0.1"); + hst_parent.set_host_id(2); + hst_parent_hlp.insert_customvariable("_PACKETNUMBER", "42"); + hst_parent_hlp.insert_customvariable("_WARNING", "200,20%"); + hst_parent_hlp.insert_customvariable("_CRITICAL", "400,50%"); + hst_parent.set_check_command("base_centreon_ping"); + hst_aply.add_object(hst_parent); + + ASSERT_EQ(engine::host::hosts.size(), 2u); + + configuration::error_cnt err; + hst_aply.expand_objects(pb_config); + hst_aply.resolve_object(hst_child, err); + hst_aply.resolve_object(hst_parent, err); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$HOSTCHILDREN:parent_host$", out, 0); + ASSERT_EQ(out, "child_host"); +} + +TEST_F(MacroHostname, HostID) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_notes("test_notes"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$HOSTID:test_host$", out, 0); + ASSERT_EQ(out, "12"); +} + +TEST_F(MacroHostname, HostTimeZone) { + configuration::applier::host hst_aply; + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "user"); + hst.set_timezone("test_timezone"); + ASSERT_NO_THROW(hst_aply.add_object(hst)); + ASSERT_EQ(1u, host::hosts.size()); + + int now{500000000}; + set_time(now); + init_macros(); + + std::string out; + nagios_macros* mac(get_global_macros()); + host::hosts["test_host"]->set_long_plugin_output("test_long_output"); + host::hosts["test_host"]->set_has_been_checked(true); + process_macros_r(mac, "$HOSTTIMEZONE:test_host$", out, 0); + ASSERT_EQ(out, "test_timezone"); +} diff --git a/engine/tests/macros/pbmacro_service.cc b/engine/tests/macros/pbmacro_service.cc new file mode 100644 index 00000000000..4209dae5163 --- /dev/null +++ b/engine/tests/macros/pbmacro_service.cc @@ -0,0 +1,2099 @@ +/** + * Copyright 2019 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include +#include +#include "../helper.hh" +#include "../test_engine.hh" +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/hostgroup.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/configuration/applier/serviceescalation.hh" +#include "com/centreon/engine/configuration/applier/servicegroup.hh" +#include "com/centreon/engine/configuration/applier/state.hh" +#include "com/centreon/engine/configuration/applier/timeperiod.hh" +#include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/hostescalation.hh" +#include "com/centreon/engine/macros.hh" +#include "com/centreon/engine/macros/grab_host.hh" +#include "com/centreon/engine/macros/process.hh" +#include "com/centreon/engine/timeperiod.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; + +class MacroService : public TestEngine { + public: + void SetUp() override { + init_config_state(); + _tp = _creator.new_timeperiod(); + for (int i(0); i < 7; ++i) + _creator.new_timerange(0, 0, 24, 0, i); + _now = strtotimet("2016-11-24 08:00:00"); + set_time(_now); + } + + void TearDown() override { + _host.reset(); + _host2.reset(); + _host3.reset(); + _svc.reset(); + deinit_config_state(); + } + + protected: + std::shared_ptr _host, _host2, _host3; + std::shared_ptr _svc; + timeperiod_creator _creator; + time_t _now; + timeperiod* _tp; +}; + +// Given host configuration without host_id +// Then the applier add_object throws an exception. +TEST_F(MacroService, TotalServicesOkZero) { + std::string out; + nagios_macros* mac(get_global_macros()); + process_macros_r(mac, "$TOTALSERVICESOK$", out, 0); + ASSERT_EQ(out, "0"); +} + +//TEST_F(MacroService, ServiceMacro) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// nagios_macros* mac(get_global_macros()); +// std::string out; +// host::hosts["test_host"]->set_current_state(host::state_up); +// host::hosts["test_host"]->set_has_been_checked(true); +// service::services[std::make_pair("test_host", "test_svc")]->set_plugin_output( +// "foo bar!"); +// process_macros_r(mac, "$SERVICEOUTPUT:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "foo bar!"); +//} +// +//TEST_F(MacroService, ServiceDesc) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// +// process_macros_r(mac, "$SERVICEDESC:test_host:test_svc$", out, 0); +// ASSERT_EQ(out, "test_svc"); +//} +// +//TEST_F(MacroService, ServiceState) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// +// process_macros_r(mac, "$SERVICESTATE:test_host:test_svc$", out, 0); +// ASSERT_EQ(out, "OK"); +//} +// +//TEST_F(MacroService, ServiceStateID) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// +// process_macros_r(mac, "$SERVICESTATEID:test_host:test_svc$", out, 0); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, ServiceAttempt) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// +// process_macros_r(mac, "$SERVICEATTEMPT:test_host:test_svc$", out, 0); +// ASSERT_EQ(out, "1"); +//} +// +//TEST_F(MacroService, ServiceisVolatile) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// +// process_macros_r(mac, "$SERVICEISVOLATILE:test_host:test_svc$", out, 0); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, LastServiceCheck) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// int now{500000000}; +// set_time(now); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// host::hosts["test_host"]->set_current_state(host::state_up); +// host::hosts["test_host"]->set_has_been_checked(true); +// process_macros_r(mac, "$LASTSERVICECHECK:test_host:test_svc$", out, 0); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, LastServiceStateChange) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// int now{500000000}; +// set_time(now); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// host::hosts["test_host"]->set_current_state(host::state_up); +// host::hosts["test_host"]->set_has_been_checked(true); +// process_macros_r(mac, "$LASTSERVICESTATECHANGE:test_host:test_svc$", out, 0); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, ServicePerfData) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// int now{500000000}; +// set_time(now); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// host::hosts["test_host"]->set_current_state(host::state_up); +// host::hosts["test_host"]->set_has_been_checked(true); +// service::services[std::make_pair("test_host", "test_svc")]->set_perf_data( +// "foo"); +// process_macros_r(mac, "$SERVICEPERFDATA:test_host:test_svc$", out, 0); +// ASSERT_EQ(out, "foo"); +//} +// +//TEST_F(MacroService, ServiceExecutionTime) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::applier::contact cnt_aply; +// configuration::service svc; +// configuration::host hst; +// configuration::contact cnt; +// ASSERT_TRUE(cnt.parse("contact_name", "user")); +// ASSERT_TRUE(cnt.parse("email", "contact@centreon.com")); +// ASSERT_TRUE(cnt.parse("pager", "0473729383")); +// ASSERT_TRUE(cnt.parse("host_notification_period", "24x7")); +// ASSERT_TRUE(cnt.parse("service_notification_period", "24x7")); +// cnt_aply.add_object(cnt); +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// // ASSERT_TRUE(hst.parse("contact_name", "testeeeeee")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// int now{500000000}; +// set_time(now); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// host::hosts["test_host"]->set_current_state(host::state_up); +// host::hosts["test_host"]->set_has_been_checked(true); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_execution_time(20.00); +// process_macros_r(mac, "$SERVICEEXECUTIONTIME:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "20.000"); +//} +// +//TEST_F(MacroService, ServiceLatency) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::applier::contact cnt_aply; +// configuration::service svc; +// configuration::host hst; +// configuration::contact cnt; +// ASSERT_TRUE(cnt.parse("contact_name", "user")); +// ASSERT_TRUE(cnt.parse("email", "contact@centreon.com")); +// ASSERT_TRUE(cnt.parse("pager", "0473729383")); +// ASSERT_TRUE(cnt.parse("host_notification_period", "24x7")); +// ASSERT_TRUE(cnt.parse("service_notification_period", "24x7")); +// cnt_aply.add_object(cnt); +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// // ASSERT_TRUE(hst.parse("contact_name", "testeeeeee")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// int now{500000000}; +// set_time(now); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// host::hosts["test_host"]->set_current_state(host::state_up); +// host::hosts["test_host"]->set_has_been_checked(true); +// service::services[std::make_pair("test_host", "test_svc")]->set_latency( +// 20.00); +// process_macros_r(mac, "$SERVICELATENCY:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "20.000"); +//} +// +//TEST_F(MacroService, ServiceDuration) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::applier::contact cnt_aply; +// configuration::service svc; +// configuration::host hst; +// configuration::contact cnt; +// ASSERT_TRUE(cnt.parse("contact_name", "user")); +// ASSERT_TRUE(cnt.parse("email", "contact@centreon.com")); +// ASSERT_TRUE(cnt.parse("pager", "0473729383")); +// ASSERT_TRUE(cnt.parse("host_notification_period", "24x7")); +// ASSERT_TRUE(cnt.parse("service_notification_period", "24x7")); +// cnt_aply.add_object(cnt); +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// // ASSERT_TRUE(hst.parse("contact_name", "testeeeeee")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// int now{500000000}; +// set_time(now); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// host::hosts["test_host"]->set_current_state(host::state_up); +// host::hosts["test_host"]->set_has_been_checked(true); +// service::services[std::make_pair("test_host", "test_svc")]->set_latency( +// 20.00); +// process_macros_r(mac, "$SERVICEDURATION:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "5787d 0h 53m 20s"); +//} +// +//TEST_F(MacroService, ServiceDurationSec) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::applier::contact cnt_aply; +// configuration::service svc; +// configuration::host hst; +// configuration::contact cnt; +// ASSERT_TRUE(cnt.parse("contact_name", "user")); +// ASSERT_TRUE(cnt.parse("email", "contact@centreon.com")); +// ASSERT_TRUE(cnt.parse("pager", "0473729383")); +// ASSERT_TRUE(cnt.parse("host_notification_period", "24x7")); +// ASSERT_TRUE(cnt.parse("service_notification_period", "24x7")); +// cnt_aply.add_object(cnt); +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// // We fake the expand_object +// svc.set_host_id(12); +// +// int now{500000000}; +// set_time(now); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// host::hosts["test_host"]->set_current_state(host::state_up); +// host::hosts["test_host"]->set_has_been_checked(true); +// service::services[std::make_pair("test_host", "test_svc")]->set_latency( +// 20.00); +// process_macros_r(mac, "$SERVICEDURATIONSEC:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "500000000"); +//} +// +//TEST_F(MacroService, ServiceDownTime) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICEDOWNTIME:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, ServiceStateType) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICESTATETYPE:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "HARD"); +//} +// +//TEST_F(MacroService, ServicePercentChange) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICEPERCENTCHANGE:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "0.00"); +//} +// +//TEST_F(MacroService, ServiceGroupName) { +// configuration::applier::host aply_hst; +// configuration::applier::service aply_svc; +// configuration::applier::command aply_cmd; +// configuration::applier::servicegroup aply_grp; +// configuration::servicegroup grp("test_group"); +// configuration::host hst; +// configuration::command cmd("cmd"); +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// aply_hst.add_object(hst); +// configuration::service svc; +// ASSERT_TRUE(svc.parse("service_description", "test")); +// ASSERT_TRUE(svc.parse("hosts", "test_host")); +// ASSERT_TRUE(svc.parse("service_id", "18")); +// cmd.parse("command_line", "echo 1"); +// svc.parse("check_command", "cmd"); +// aply_cmd.add_object(cmd); +// +// // We fake here the expand_object on configuration::service +// svc.set_host_id(12); +// +// configuration::error_cnt err; +// aply_svc.add_object(svc); +// ASSERT_TRUE(svc.parse("servicegroups", "test_group")); +// grp.parse("members", "test_host,test"); +// aply_grp.add_object(grp); +// aply_grp.expand_objects(*config); +// ASSERT_NO_THROW(aply_grp.resolve_object(grp, err)); +// +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICEGROUPNAME:test_host:test$", out, 1); +// ASSERT_EQ(out, "test_group"); +//} +// +//TEST_F(MacroService, ServiceGroupAlias) { +// configuration::applier::host aply_hst; +// configuration::applier::service aply_svc; +// configuration::applier::command aply_cmd; +// configuration::applier::servicegroup aply_grp; +// configuration::servicegroup grp("test_group"); +// configuration::host hst; +// configuration::command cmd("cmd"); +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// aply_hst.add_object(hst); +// configuration::service svc; +// ASSERT_TRUE(svc.parse("service_description", "test")); +// ASSERT_TRUE(svc.parse("hosts", "test_host")); +// ASSERT_TRUE(svc.parse("service_id", "18")); +// cmd.parse("command_line", "echo 1"); +// svc.parse("check_command", "cmd"); +// aply_cmd.add_object(cmd); +// +// // We fake here the expand_object on configuration::service +// svc.set_host_id(12); +// +// configuration::error_cnt err; +// aply_svc.add_object(svc); +// ASSERT_TRUE(svc.parse("servicegroups", "test_group")); +// grp.parse("members", "test_host,test"); +// grp.parse("alias", "test_group_alias"); +// aply_grp.add_object(grp); +// aply_grp.expand_objects(*config); +// ASSERT_NO_THROW(aply_grp.resolve_object(grp, err)); +// +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICEGROUPALIAS:test_group$", out, 1); +// ASSERT_EQ(out, "test_group_alias"); +//} +// +//TEST_F(MacroService, LastServiceOK) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")]->set_last_time_ok( +// 20); +// process_macros_r(mac, "$LASTSERVICEOK:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "20"); +//} +// +//TEST_F(MacroService, LastServiceWarning) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_last_time_warning(30); +// process_macros_r(mac, "$LASTSERVICEWARNING:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "30"); +//} +// +//TEST_F(MacroService, LastServiceUnknown) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_last_time_unknown(40); +// process_macros_r(mac, "$LASTSERVICEUNKNOWN:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "40"); +//} +// +//TEST_F(MacroService, LastServiceCritical) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_last_time_critical(50); +// process_macros_r(mac, "$LASTSERVICECRITICAL:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "50"); +//} +// +//TEST_F(MacroService, ServiceCheckCommand) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_last_time_critical(50); +// process_macros_r(mac, "$SERVICECHECKCOMMAND:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "cmd"); +//} +// +//TEST_F(MacroService, ServiceDisplayName) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_last_time_critical(50); +// process_macros_r(mac, "$SERVICEDISPLAYNAME:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "test_svc"); +//} +// +//TEST_F(MacroService, ServiceNotesUrl) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("notes_url", "http://192.168.0.172/centreon/main.php")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICENOTESURL:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "http://192.168.0.172/centreon/main.php"); +//} +// +//TEST_F(MacroService, ServiceNotes) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("notes", "test_notes")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICENOTES:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "test_notes"); +//} +// +//TEST_F(MacroService, ServiceActionUrl) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICEACTIONURL:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "test_action_url"); +//} +// +//TEST_F(MacroService, TotalServicesWarning) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$TOTALSERVICESWARNING:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, TotalServicesCritical) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$TOTALSERVICESCRITICAL:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, TotalServicesUnknown) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$TOTALSERVICESUNKNOWN:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, TotalServicesWarningUnhandled) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$TOTALSERVICESWARNINGUNHANDLED:test_host:test_svc$", +// out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, TotalServicesCriticalUnhandled) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$TOTALSERVICESCRITICALUNHANDLED:test_host:test_svc$", +// out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, TotalServicesUnknownUnhandled) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$TOTALSERVICESUNKNOWNUNHANDLED:test_host:test_svc$", +// out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, TotalServiceProblems) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$TOTALSERVICEPROBLEMS:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, TotalServiceProblemsUnhandled) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$TOTALSERVICEPROBLEMSUNHANDLED:test_host:test_svc$", +// out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, ServiceCheckType) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICECHECKTYPE:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "ACTIVE"); +//} +// +//TEST_F(MacroService, LongServiceOutput) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_long_plugin_output("test_long_output"); +// process_macros_r(mac, "$LONGSERVICEOUTPUT:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "test_long_output"); +//} +// +//TEST_F(MacroService, ServiceNotificationID) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_long_plugin_output("test_long_output"); +// process_macros_r(mac, "$SERVICENOTIFICATIONID:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, ServiceEventID) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_long_plugin_output("test_long_output"); +// process_macros_r(mac, "$SERVICEEVENTID:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, LastServiceEventID) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_long_plugin_output("test_long_output"); +// process_macros_r(mac, "$LASTSERVICEEVENTID:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, ServiceGroupNames) { +// configuration::applier::host aply_hst; +// configuration::applier::service aply_svc; +// configuration::applier::command aply_cmd; +// configuration::applier::servicegroup aply_grp; +// configuration::servicegroup grp("test_group"); +// configuration::host hst; +// configuration::command cmd("cmd"); +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// aply_hst.add_object(hst); +// configuration::service svc; +// ASSERT_TRUE(svc.parse("service_description", "test")); +// ASSERT_TRUE(svc.parse("hosts", "test_host")); +// ASSERT_TRUE(svc.parse("service_id", "18")); +// cmd.parse("command_line", "echo 1"); +// svc.parse("check_command", "cmd"); +// aply_cmd.add_object(cmd); +// +// // We fake here the expand_object on configuration::service +// svc.set_host_id(12); +// +// configuration::error_cnt err; +// aply_svc.add_object(svc); +// ASSERT_TRUE(svc.parse("servicegroups", "test_group")); +// grp.parse("members", "test_host,test"); +// aply_grp.add_object(grp); +// aply_grp.expand_objects(*config); +// ASSERT_NO_THROW(aply_grp.resolve_object(grp, err)); +// +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test")] +// ->set_long_plugin_output("test_long_output"); +// process_macros_r(mac, "$SERVICEGROUPNAMES:test_host:test$", out, 1); +// ASSERT_EQ(out, "test_group"); +//} +// +//TEST_F(MacroService, MaxServiceAttempts) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_long_plugin_output("test_long_output"); +// process_macros_r(mac, "$MAXSERVICEATTEMPTS:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "3"); +//} +// +//TEST_F(MacroService, ServiceGroupNotes) { +// configuration::applier::host aply_hst; +// configuration::applier::service aply_svc; +// configuration::applier::command aply_cmd; +// configuration::applier::servicegroup aply_grp; +// configuration::servicegroup grp("test_group"); +// configuration::host hst; +// configuration::command cmd("cmd"); +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// aply_hst.add_object(hst); +// configuration::service svc; +// ASSERT_TRUE(svc.parse("service_description", "test")); +// ASSERT_TRUE(svc.parse("hosts", "test_host")); +// ASSERT_TRUE(svc.parse("service_id", "18")); +// cmd.parse("command_line", "echo 1"); +// svc.parse("check_command", "cmd"); +// aply_cmd.add_object(cmd); +// +// // We fake here the expand_object on configuration::service +// svc.set_host_id(12); +// +// configuration::error_cnt err; +// aply_svc.add_object(svc); +// ASSERT_TRUE(svc.parse("servicegroups", "test_group")); +// grp.parse("members", "test_host,test"); +// ASSERT_TRUE(grp.parse("notes", "test_notes")); +// aply_grp.add_object(grp); +// aply_grp.expand_objects(*config); +// ASSERT_NO_THROW(aply_grp.resolve_object(grp, err)); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test")] +// ->set_long_plugin_output("test_long_output"); +// process_macros_r(mac, "$SERVICEGROUPNOTES:test_group$", out, 1); +// ASSERT_EQ(out, "test_notes"); +//} +// +//TEST_F(MacroService, ServiceGroupNotesUrl) { +// configuration::applier::host aply_hst; +// configuration::applier::service aply_svc; +// configuration::applier::command aply_cmd; +// configuration::applier::servicegroup aply_grp; +// configuration::servicegroup grp("test_group"); +// configuration::host hst; +// configuration::command cmd("cmd"); +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// aply_hst.add_object(hst); +// configuration::service svc; +// ASSERT_TRUE(svc.parse("service_description", "test")); +// ASSERT_TRUE(svc.parse("hosts", "test_host")); +// ASSERT_TRUE(svc.parse("service_id", "18")); +// cmd.parse("command_line", "echo 1"); +// svc.parse("check_command", "cmd"); +// aply_cmd.add_object(cmd); +// +// // We fake here the expand_object on configuration::service +// svc.set_host_id(12); +// +// configuration::error_cnt err; +// aply_svc.add_object(svc); +// ASSERT_TRUE(svc.parse("servicegroups", "test_group")); +// grp.parse("members", "test_host,test"); +// ASSERT_TRUE(grp.parse("notes_url", "test_notes_url")); +// aply_grp.add_object(grp); +// aply_grp.expand_objects(*config); +// ASSERT_NO_THROW(aply_grp.resolve_object(grp, err)); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICEGROUPNOTESURL:test_group$", out, 1); +// ASSERT_EQ(out, "test_notes_url"); +//} +// +//TEST_F(MacroService, ServiceGroupActionUrl) { +// configuration::applier::host aply_hst; +// configuration::applier::service aply_svc; +// configuration::applier::command aply_cmd; +// configuration::applier::servicegroup aply_grp; +// configuration::servicegroup grp("test_group"); +// configuration::host hst; +// configuration::command cmd("cmd"); +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// aply_hst.add_object(hst); +// configuration::service svc; +// ASSERT_TRUE(svc.parse("service_description", "test")); +// ASSERT_TRUE(svc.parse("hosts", "test_host")); +// ASSERT_TRUE(svc.parse("service_id", "18")); +// cmd.parse("command_line", "echo 1"); +// svc.parse("check_command", "cmd"); +// aply_cmd.add_object(cmd); +// +// // We fake here the expand_object on configuration::service +// svc.set_host_id(12); +// +// configuration::error_cnt err; +// aply_svc.add_object(svc); +// ASSERT_TRUE(svc.parse("servicegroups", "test_group")); +// grp.parse("members", "test_host,test"); +// ASSERT_TRUE(grp.parse("action_url", "test_notes_url")); +// aply_grp.add_object(grp); +// aply_grp.expand_objects(*config); +// ASSERT_NO_THROW(aply_grp.resolve_object(grp, err)); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICEGROUPACTIONURL:test_group$", out, 1); +// ASSERT_EQ(out, "test_notes_url"); +//} +// +//TEST_F(MacroService, ServiceGroupMembers) { +// configuration::applier::host aply_hst; +// configuration::applier::service aply_svc; +// configuration::applier::command aply_cmd; +// configuration::applier::servicegroup aply_grp; +// configuration::servicegroup grp("test_group"); +// configuration::host hst; +// configuration::command cmd("cmd"); +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// aply_hst.add_object(hst); +// configuration::service svc; +// ASSERT_TRUE(svc.parse("service_description", "test")); +// ASSERT_TRUE(svc.parse("hosts", "test_host")); +// ASSERT_TRUE(svc.parse("service_id", "18")); +// cmd.parse("command_line", "echo 1"); +// svc.parse("check_command", "cmd"); +// aply_cmd.add_object(cmd); +// +// // We fake here the expand_object on configuration::service +// svc.set_host_id(12); +// +// configuration::error_cnt err; +// aply_svc.add_object(svc); +// ASSERT_TRUE(svc.parse("servicegroups", "test_group")); +// grp.parse("members", "test_host,test"); +// ASSERT_TRUE(grp.parse("action_url", "test_notes_url")); +// aply_grp.add_object(grp); +// aply_grp.expand_objects(*config); +// ASSERT_NO_THROW(aply_grp.resolve_object(grp, err)); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICEGROUPMEMBERS:test_group$", out, 1); +// ASSERT_EQ(out, "test_host,test"); +//} +// +//TEST_F(MacroService, ServiceID) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("service_id", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICEID:test_host:test_svc$", out, 0); +// ASSERT_EQ(out, "13"); +//} +// +//TEST_F(MacroService, ServiceTimeZone) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// svc.parse("timezone", "test_time"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_long_plugin_output("test_long_output"); +// process_macros_r(mac, "$SERVICETIMEZONE:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "test_time"); +//} +// +//TEST_F(MacroService, LastServiceState) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_long_plugin_output("test_long_output"); +// process_macros_r(mac, "$LASTSERVICESTATE:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "OK"); +//} +// +//TEST_F(MacroService, LastServiceStateId) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_long_plugin_output("test_long_output"); +// process_macros_r(mac, "$LASTSERVICESTATEID:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "0"); +//} +// +//TEST_F(MacroService, ServiceProblemID) { +// init_macros(); +// +// configuration::error_cnt err; +// configuration::applier::contact ct_aply; +// configuration::contact ctct{new_configuration_contact("admin", true)}; +// ct_aply.add_object(ctct); +// configuration::contact ctct1{ +// new_configuration_contact("admin1", false, "c,r")}; +// ct_aply.add_object(ctct1); +// ct_aply.expand_objects(*config); +// ct_aply.resolve_object(ctct, err); +// ct_aply.resolve_object(ctct1, err); +// +// configuration::host hst{new_configuration_host("test_host", "admin")}; +// configuration::applier::host hst_aply; +// hst_aply.add_object(hst); +// +// configuration::service svc{ +// new_configuration_service("test_host", "test_svc", "admin,admin1")}; +// configuration::applier::service svc_aply; +// svc_aply.add_object(svc); +// +// hst_aply.resolve_object(hst, err); +// svc_aply.resolve_object(svc, err); +// +// host_map const& hm{engine::host::hosts}; +// _host3 = hm.begin()->second; +// _host3->set_current_state(engine::host::state_up); +// _host3->set_state_type(checkable::hard); +// _host3->set_acknowledgement(AckType::NONE); +// _host3->set_notify_on(static_cast(-1)); +// +// std::string out; +// service_map const& sm{engine::service::services}; +// _svc = sm.begin()->second; +// _svc->set_current_state(engine::service::state_ok); +// _svc->set_state_type(checkable::hard); +// _svc->set_acknowledgement(AckType::NORMAL); +// _svc->set_notify_on(static_cast(-1)); +// +// set_time(50000); +// _svc->set_current_state(engine::service::state_ok); +// _svc->set_last_hard_state(engine::service::state_ok); +// _svc->set_last_hard_state_change(50000); +// _svc->set_state_type(checkable::hard); +// _svc->set_first_notification_delay(3); +// +// for (int i = 1; i < 4; i++) { +// // When i == 0, the state_down is soft => no notification +// // When i == 1, the state_down is soft => no notification +// // When i == 2, the state_down is hard down => notification +// set_time(50000 + i * 60); +// _svc->set_last_state(_svc->get_current_state()); +// if (notifier::hard == _svc->get_state_type()) +// _svc->set_last_hard_state(_svc->get_current_state()); +// std::ostringstream oss; +// std::time_t now{std::time(nullptr)}; +// oss << '[' << now << ']' +// << " PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " +// "critical"; +// std::string cmd{oss.str()}; +// process_external_command(cmd.c_str()); +// checks::checker::instance().reap(); +// } +// nagios_macros* mac(get_global_macros()); +// process_macros_r(mac, "$SERVICEPROBLEMID:test_host:test_svc$", out, 0); +// ASSERT_EQ(out, "4"); +//} +// +//TEST_F(MacroService, LastServiceProblemID) { +// configuration::applier::host hst_aply; +// configuration::applier::service svc_aply; +// configuration::service svc; +// configuration::host hst; +// +// ASSERT_TRUE(hst.parse("host_name", "test_host")); +// ASSERT_TRUE(hst.parse("address", "127.0.0.1")); +// ASSERT_TRUE(hst.parse("_HOST_ID", "12")); +// ASSERT_TRUE(hst.parse("contacts", "user")); +// ASSERT_NO_THROW(hst_aply.add_object(hst)); +// ASSERT_EQ(1u, host::hosts.size()); +// ASSERT_TRUE(svc.parse("description", "test_svc")); +// ASSERT_TRUE(svc.parse("host_name", "test_host")); +// ASSERT_TRUE(svc.parse("_HOST_ID", "12")); +// ASSERT_TRUE(svc.parse("_SERVICE_ID", "13")); +// ASSERT_TRUE(svc.parse("action_url", "test_action_url")); +// svc.set_host_id(12); +// +// configuration::command cmd("cmd"); +// cmd.parse("command_line", "echo 'output| metric=12;50;75'"); +// svc.parse("check_command", "cmd"); +// configuration::applier::command cmd_aply; +// cmd_aply.add_object(cmd); +// ASSERT_NO_THROW(svc_aply.add_object(svc)); +// ASSERT_EQ(1u, service::services.size()); +// init_macros(); +// int now{500000000}; +// set_time(now); +// +// std::string out; +// nagios_macros* mac(get_global_macros()); +// service::services[std::make_pair("test_host", "test_svc")] +// ->set_long_plugin_output("test_long_output"); +// process_macros_r(mac, "$LASTSERVICEPROBLEMID:test_host:test_svc$", out, 1); +// ASSERT_EQ(out, "0"); +//} diff --git a/engine/tests/notifications/host_downtime_notification.cc b/engine/tests/notifications/host_downtime_notification.cc index b0d5ab373dd..08308cd33a1 100644 --- a/engine/tests/notifications/host_downtime_notification.cc +++ b/engine/tests/notifications/host_downtime_notification.cc @@ -31,7 +31,11 @@ #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/hostescalation.hh" #include "com/centreon/engine/timeperiod.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" +#else +#include "common/engine_conf/timeperiod_helper.hh" +#endif #include "helper.hh" using namespace com::centreon; @@ -46,12 +50,22 @@ class HostDowntimeNotification : public TestEngine { init_config_state(); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("admin", true)}; ct_aply.add_object(ctct); ct_aply.expand_objects(*config); +#else + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); +#ifdef LEGACY_CONF configuration::host hst{new_configuration_host("test_host", "admin")}; +#else + configuration::Host hst{new_pb_configuration_host("test_host", "admin")}; +#endif configuration::applier::host aply; aply.add_object(hst); aply.resolve_object(hst, err); @@ -84,10 +98,33 @@ TEST_F(HostDowntimeNotification, SimpleHostDowntime) { */ set_time(43000); _host->set_last_hard_state_change(43000); +#ifdef LEGACY_CONF std::unique_ptr tperiod{ new engine::timeperiod("tperiod", "alias")}; for (size_t i = 0; i < tperiod->days.size(); ++i) tperiod->days[i].emplace_back(0, 86400); +#else + configuration::Timeperiod tp; + configuration::timeperiod_helper tp_hlp(&tp); + tp.set_timeperiod_name("tperiod"); + tp.set_alias("alias"); +#define add_day(day) \ + { \ + auto* d = tp.mutable_timeranges()->add_##day(); \ + d->set_range_start(0); \ + d->set_range_end(86400); \ + } + + add_day(sunday); + add_day(monday); + add_day(tuesday); + add_day(wednesday); + add_day(thursday); + add_day(friday); + add_day(saturday); + + std::unique_ptr tperiod{new engine::timeperiod(tp)}; +#endif std::unique_ptr host_escalation{ new engine::hostescalation("host_name", 0, 1, 1.0, "tperiod", 7, 12345)}; @@ -133,10 +170,26 @@ TEST_F(HostDowntimeNotification, contact_map::iterator it{engine::contact::contacts.find("admin")}; engine::contact* ctct{it->second.get()}; ctct->set_notify_on(notifier::host_notification, notifier::none); +#ifdef LEGACY_CONF std::unique_ptr tperiod{ new engine::timeperiod("tperiod", "alias")}; for (size_t i = 0; i < tperiod->days.size(); ++i) tperiod->days[i].emplace_back(0, 86400); +#else + configuration::Timeperiod tp; + configuration::timeperiod_helper tp_hlp(&tp); + tp.set_timeperiod_name("tperiod"); + tp.set_alias("alias"); + add_day(sunday); + add_day(monday); + add_day(tuesday); + add_day(wednesday); + add_day(thursday); + add_day(friday); + add_day(saturday); + + std::unique_ptr tperiod{new engine::timeperiod(tp)}; +#endif std::unique_ptr host_escalation{ new engine::hostescalation("host_name", 0, 1, 1.0, "tperiod", 7, 12345)}; diff --git a/engine/tests/notifications/host_flapping_notification.cc b/engine/tests/notifications/host_flapping_notification.cc index b9b98014e8c..4fa9584d6a1 100644 --- a/engine/tests/notifications/host_flapping_notification.cc +++ b/engine/tests/notifications/host_flapping_notification.cc @@ -29,8 +29,13 @@ #include "com/centreon/engine/host.hh" #include "com/centreon/engine/hostescalation.hh" #include "com/centreon/engine/timezone_manager.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" #include "common/engine_legacy_conf/state.hh" +#else +#include "common/engine_conf/contact_helper.hh" +#include "common/engine_conf/host_helper.hh" +#endif #include "helper.hh" using namespace com::centreon; @@ -45,17 +50,32 @@ class HostFlappingNotification : public TestEngine { init_config_state(); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("admin", true)}; ct_aply.add_object(ctct); ct_aply.expand_objects(*config); +#else + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::host hst_aply; +#ifdef LEGACY_CONF configuration::host hst; hst.parse("host_name", "test_host"); hst.parse("address", "127.0.0.1"); hst.parse("_HOST_ID", "12"); hst.parse("contacts", "admin"); +#else + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst_hlp.hook("contacts", "admin"); +#endif hst_aply.add_object(hst); hst_aply.resolve_object(hst, err); host_map const& hm{engine::host::hosts}; @@ -65,12 +85,22 @@ class HostFlappingNotification : public TestEngine { _host->set_acknowledgement(AckType::NONE); _host->set_notify_on(static_cast(-1)); +#ifdef LEGACY_CONF configuration::host hst_child; hst_child.parse("host_name", "child_host"); hst_child.parse("parents", "test_host"); hst_child.parse("address", "127.0.0.1"); hst_child.parse("_HOST_ID", "13"); hst_child.parse("contacts", "admin"); +#else + configuration::Host hst_child; + configuration::host_helper hst_child_hlp(&hst_child); + hst_child.set_host_name("child_host"); + hst_child.set_address("127.0.0.1"); + hst_child_hlp.hook("parents", "test_host"); + hst_child.set_host_id(13); + hst_child_hlp.hook("contacts", "admin"); +#endif hst_aply.add_object(hst_child); hst_aply.resolve_object(hst_child, err); @@ -104,12 +134,9 @@ TEST_F(HostFlappingNotification, SimpleHostFlapping) { * If we call time(), it is not the glibc time() function that will be called. */ set_time(43000); - // FIXME DBR: should not we find a better solution than fixing this each time? _host->set_last_hard_state_change(43000); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; - for (size_t i = 0; i < tperiod->days.size(); ++i) - tperiod->days[i].emplace_back(0, 86400); + new_timeperiod_with_timeranges("tperiod", "alias")}; std::unique_ptr host_escalation = std::make_unique("host_name", 0, 1, 1.0, @@ -157,10 +184,33 @@ TEST_F(HostFlappingNotification, SimpleHostFlappingStartTwoTimes) { */ set_time(43000); _host->set_notification_interval(2); +#ifdef LEGACY_CONF std::unique_ptr tperiod{ new engine::timeperiod("tperiod", "alias")}; for (uint32_t i = 0; i < tperiod->days.size(); ++i) tperiod->days[i].emplace_back(0, 86400); +#else + configuration::Timeperiod tp; + configuration::timeperiod_helper tp_hlp(&tp); + tp.set_timeperiod_name("tperiod"); + tp.set_alias("alias"); +#define add_day(day) \ + { \ + auto* d = tp.mutable_timeranges()->add_##day(); \ + d->set_range_start(0); \ + d->set_range_end(86400); \ + } + + add_day(sunday); + add_day(monday); + add_day(tuesday); + add_day(wednesday); + add_day(thursday); + add_day(friday); + add_day(saturday); + + std::unique_ptr tperiod{new engine::timeperiod(tp)}; +#endif std::unique_ptr host_escalation{ new engine::hostescalation("host_name", 0, 1, 1.0, "tperiod", 7, 12345)}; @@ -196,10 +246,33 @@ TEST_F(HostFlappingNotification, SimpleHostFlappingStopTwoTimes) { */ set_time(43000); _host->set_notification_interval(2); +#ifdef LEGACY_CONF std::unique_ptr tperiod{ new engine::timeperiod("tperiod", "alias")}; for (uint32_t i = 0; i < tperiod->days.size(); ++i) tperiod->days[i].emplace_back(0, 86400); +#else + configuration::Timeperiod tp; + configuration::timeperiod_helper tp_hlp(&tp); + tp.set_timeperiod_name("tperiod"); + tp.set_alias("alias"); +#define add_day(day) \ + { \ + auto* d = tp.mutable_timeranges()->add_##day(); \ + d->set_range_start(0); \ + d->set_range_end(86400); \ + } + + add_day(sunday); + add_day(monday); + add_day(tuesday); + add_day(wednesday); + add_day(thursday); + add_day(friday); + add_day(saturday); + + std::unique_ptr tperiod{new engine::timeperiod(tp)}; +#endif std::unique_ptr host_escalation{ new engine::hostescalation("host_name", 0, 1, 1.0, "tperiod", 7, 12345)}; @@ -229,7 +302,11 @@ TEST_F(HostFlappingNotification, SimpleHostFlappingStopTwoTimes) { } TEST_F(HostFlappingNotification, CheckFlapping) { +#ifdef LEGACY_CONF config->enable_flap_detection(true); +#else + pb_config.set_enable_flap_detection(true); +#endif _host->set_flap_detection_enabled(true); _host->add_flap_detection_on(engine::host::up); _host->add_flap_detection_on(engine::host::down); @@ -290,7 +367,11 @@ TEST_F(HostFlappingNotification, CheckFlapping) { } TEST_F(HostFlappingNotification, CheckFlappingWithHostParentDown) { +#ifdef LEGACY_CONF config->enable_flap_detection(true); +#else + pb_config.set_enable_flap_detection(true); +#endif _host->set_current_state(engine::host::state_down); _host->set_last_hard_state(engine::host::state_down); _host->set_state_type(checkable::hard); diff --git a/engine/tests/notifications/host_normal_notification.cc b/engine/tests/notifications/host_normal_notification.cc index 661c41d943a..47755ca3236 100644 --- a/engine/tests/notifications/host_normal_notification.cc +++ b/engine/tests/notifications/host_normal_notification.cc @@ -38,9 +38,12 @@ #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/retention/dump.hh" #include "com/centreon/engine/timezone_manager.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" #include "common/engine_legacy_conf/hostescalation.hh" #include "common/engine_legacy_conf/state.hh" +#else +#endif using namespace com::centreon; using namespace com::centreon::engine; @@ -58,12 +61,26 @@ class HostNotification : public TestEngine { error_cnt err; configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("admin", true)}; ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif +#else + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); +#ifdef LEGACY_CONF configuration::host hst{new_configuration_host("test_host", "admin")}; +#else + configuration::Host hst{new_pb_configuration_host("test_host", "admin")}; +#endif configuration::applier::host hst_aply; hst_aply.add_object(hst); hst_aply.resolve_object(hst, err); @@ -91,7 +108,7 @@ TEST_F(HostNotification, SimpleNormalHostNotification) { */ set_time(43200); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; for (int i = 0; i < 7; ++i) tperiod->days[i].emplace_back(0, 86400); @@ -115,10 +132,14 @@ TEST_F(HostNotification, SimpleNormalHostNotificationNotificationsdisabled) { /* We are using a local time() function defined in tests/timeperiod/utils.cc. * If we call time(), it is not the glibc time() function that will be called. */ +#ifdef LEGACY_CONF config->enable_notifications(false); +#else + pb_config.set_enable_notifications(false); +#endif set_time(43200); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; for (int i = 0; i < 7; ++i) tperiod->days[i].emplace_back(0, 86400); @@ -140,7 +161,7 @@ TEST_F(HostNotification, SimpleNormalHostNotificationNotifierNotifdisabled) { */ set_time(43200); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; for (int i = 0; i < 7; ++i) tperiod->days[i].emplace_back(0, 86400); @@ -159,7 +180,7 @@ TEST_F(HostNotification, SimpleNormalHostNotificationNotifierNotifdisabled) { TEST_F(HostNotification, SimpleNormalHostNotificationOutsideTimeperiod) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_host->get_next_notification_id()}; @@ -179,9 +200,13 @@ TEST_F(HostNotification, SimpleNormalHostNotificationOutsideTimeperiod) { TEST_F(HostNotification, SimpleNormalHostNotificationForcedWithNotificationDisabled) { +#ifdef LEGACY_CONF config->enable_notifications(false); +#else + pb_config.set_enable_notifications(false); +#endif std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_host->get_next_notification_id()}; @@ -201,7 +226,7 @@ TEST_F(HostNotification, TEST_F(HostNotification, SimpleNormalHostNotificationForcedNotification) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_host->get_next_notification_id()}; @@ -221,7 +246,7 @@ TEST_F(HostNotification, SimpleNormalHostNotificationForcedNotification) { TEST_F(HostNotification, SimpleNormalHostNotificationWithDowntime) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); _host->set_scheduled_downtime_depth(30); @@ -242,7 +267,7 @@ TEST_F(HostNotification, SimpleNormalHostNotificationWithDowntime) { TEST_F(HostNotification, SimpleNormalHostNotificationWithFlapping) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); _host->set_is_flapping(true); @@ -263,7 +288,7 @@ TEST_F(HostNotification, SimpleNormalHostNotificationWithFlapping) { TEST_F(HostNotification, SimpleNormalHostNotificationWithSoftState) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); _host->set_state_type(checkable::soft); @@ -285,7 +310,7 @@ TEST_F(HostNotification, SimpleNormalHostNotificationWithSoftState) { TEST_F(HostNotification, SimpleNormalHostNotificationWithHardStateAcknowledged) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_host->get_next_notification_id()}; @@ -306,7 +331,7 @@ TEST_F(HostNotification, TEST_F(HostNotification, SimpleNormalHostNotificationAfterPreviousTooSoon) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_host->get_next_notification_id()}; @@ -329,7 +354,7 @@ TEST_F(HostNotification, SimpleNormalHostNotificationAfterPreviousTooSoon) { TEST_F(HostNotification, SimpleNormalHostNotificationAfterPreviousWithNullInterval) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_host->get_next_notification_id()}; @@ -353,7 +378,7 @@ TEST_F(HostNotification, TEST_F(HostNotification, SimpleNormalHostNotificationOnStateNotNotified) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_host->get_next_notification_id()}; @@ -377,7 +402,7 @@ TEST_F(HostNotification, SimpleNormalHostNotificationOnStateNotNotified) { TEST_F(HostNotification, SimpleNormalHostNotificationOnStateBeforeFirstNotifDelay) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_host->get_next_notification_id()}; @@ -403,7 +428,7 @@ TEST_F(HostNotification, TEST_F(HostNotification, SimpleNormalHostNotificationOnStateAfterFirstNotifDelay) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_host->get_next_notification_id()}; @@ -431,7 +456,7 @@ TEST_F(HostNotification, SimpleNormalHostNotificationNotifierDelayTooShort) { */ set_time(43200); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; for (uint32_t i = 0; i < tperiod->days.size(); ++i) tperiod->days[i].emplace_back(0, 86400); @@ -560,23 +585,51 @@ TEST_F(HostNotification, CheckFirstNotificationDelay) { TEST_F(HostNotification, HostEscalation) { error_cnt err; configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("test_contact", false)}; +#else + configuration::Contact ctct{ + new_pb_configuration_contact("test_contact", false)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + fill_pb_configuration_contactgroup(&cg_hlp, "test_cg", "test_contact"); +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::hostescalation he_aply; +#ifdef LEGACY_CONF configuration::hostescalation he{ new_configuration_hostescalation("test_host", "test_cg")}; +#else + configuration::Hostescalation he{ + new_pb_configuration_hostescalation("test_host", "test_cg")}; +#endif he_aply.add_object(he); +#ifdef LEGACY_CONF he_aply.expand_objects(*config); +#else + he_aply.expand_objects(pb_config); +#endif he_aply.resolve_object(he, err); int now{50000}; @@ -686,28 +739,61 @@ TEST_F(HostNotification, HostEscalation) { TEST_F(HostNotification, HostDependency) { error_cnt err; configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("test_contact", false)}; +#else + configuration::Contact ctct{ + new_pb_configuration_contact("test_contact", false)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + fill_pb_configuration_contactgroup(&cg_hlp, "test_cg", "test_contact"); +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::host h_aply; +#ifdef LEGACY_CONF configuration::host h{new_configuration_host("dep_host", "admin", 15)}; +#else + configuration::Host h{new_pb_configuration_host("dep_host", "admin", 15)}; +#endif h_aply.add_object(h); +#ifdef LEGACY_CONF h_aply.expand_objects(*config); +#else + h_aply.expand_objects(pb_config); +#endif h_aply.resolve_object(h, err); configuration::applier::hostdependency hd_aply; +#ifdef LEGACY_CONF configuration::hostdependency hd{ new_configuration_hostdependency("test_host", "dep_host")}; hd_aply.expand_objects(*config); +#else + configuration::Hostdependency hd{ + new_pb_configuration_hostdependency("test_host", "dep_host")}; + hd_aply.expand_objects(pb_config); +#endif hd_aply.add_object(hd); hd_aply.resolve_object(hd, err); @@ -828,23 +914,49 @@ TEST_F(HostNotification, HostDependency) { TEST_F(HostNotification, HostEscalationOneTime) { error_cnt err; configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("test_contact", false)}; ct_aply.add_object(ctct); ct_aply.expand_objects(*config); +#else + configuration::Contact ctct{ + new_pb_configuration_contact("test_contact", false)}; + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + fill_pb_configuration_contactgroup(&cg_hlp, "test_cg", "test_contact"); +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::hostescalation he_aply; +#ifdef LEGACY_CONF configuration::hostescalation he{ new_configuration_hostescalation("test_host", "test_cg", 1, 0)}; +#else + configuration::Hostescalation he{ + new_pb_configuration_hostescalation("test_host", "test_cg", 1, 0)}; +#endif he_aply.add_object(he); +#ifdef LEGACY_CONF he_aply.expand_objects(*config); +#else + he_aply.expand_objects(pb_config); +#endif he_aply.resolve_object(he, err); int now{50000}; @@ -924,23 +1036,51 @@ TEST_F(HostNotification, HostEscalationOneTime) { TEST_F(HostNotification, HostEscalationOneTimeNotifInter0) { error_cnt err; configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("test_contact", false)}; +#else + configuration::Contact ctct{ + new_pb_configuration_contact("test_contact", false)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + fill_pb_configuration_contactgroup(&cg_hlp, "test_cg", "test_contact"); +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::hostescalation he_aply; +#ifdef LEGACY_CONF configuration::hostescalation he{ new_configuration_hostescalation("test_host", "test_cg", 1, 0, 0)}; +#else + configuration::Hostescalation he{ + new_pb_configuration_hostescalation("test_host", "test_cg", 1, 0, 0)}; +#endif he_aply.add_object(he); +#ifdef LEGACY_CONF he_aply.expand_objects(*config); +#else + he_aply.expand_objects(pb_config); +#endif he_aply.resolve_object(he, err); int now{50000}; @@ -1020,23 +1160,51 @@ TEST_F(HostNotification, HostEscalationOneTimeNotifInter0) { TEST_F(HostNotification, HostEscalationRetention) { error_cnt err; configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("test_contact", false)}; +#else + configuration::Contact ctct{ + new_pb_configuration_contact("test_contact", false)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + fill_pb_configuration_contactgroup(&cg_hlp, "test_cg", "test_contact"); +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::hostescalation he_aply; +#ifdef LEGACY_CONF configuration::hostescalation he{ new_configuration_hostescalation("test_host", "test_cg", 1, 0, 0)}; +#else + configuration::Hostescalation he{ + new_pb_configuration_hostescalation("test_host", "test_cg", 1, 0, 0)}; +#endif he_aply.add_object(he); +#ifdef LEGACY_CONF he_aply.expand_objects(*config); +#else + he_aply.expand_objects(pb_config); +#endif he_aply.resolve_object(he, err); int now{50000}; diff --git a/engine/tests/notifications/host_recovery_notification.cc b/engine/tests/notifications/host_recovery_notification.cc index eb9663bc564..ef6fd962e82 100644 --- a/engine/tests/notifications/host_recovery_notification.cc +++ b/engine/tests/notifications/host_recovery_notification.cc @@ -27,14 +27,19 @@ #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/host.hh" #include "com/centreon/engine/hostescalation.hh" +#include "test_engine.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" +#else +#include "common/engine_conf/host_helper.hh" +#endif using namespace com::centreon; using namespace com::centreon::engine; using namespace com::centreon::engine::configuration; using namespace com::centreon::engine::configuration::applier; -class HostRecovery : public ::testing::Test { +class HostRecovery : public TestEngine { public: void SetUp() override { init_config_state(); @@ -42,10 +47,18 @@ class HostRecovery : public ::testing::Test { // other unload function... :-( configuration::applier::host hst_aply; +#ifdef LEGACY_CONF configuration::host hst; hst.parse("host_name", "test_host"); hst.parse("address", "127.0.0.1"); hst.parse("_HOST_ID", "12"); +#else + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); +#endif hst_aply.add_object(hst); host_map const& hm{engine::host::hosts}; _host = hm.begin()->second; @@ -56,7 +69,8 @@ class HostRecovery : public ::testing::Test { _host->set_notify_on(static_cast(-1)); _current_time = 43200; set_time(_current_time); - _tperiod.reset(new engine::timeperiod("tperiod", "alias")); + + _tperiod = new_timeperiod_with_timeranges("tperiod", "alias"); for (size_t i = 0; i < _tperiod->days.size(); ++i) _tperiod->days[i].emplace_back(0, 86400); diff --git a/engine/tests/notifications/service_downtime_notification_test.cc b/engine/tests/notifications/service_downtime_notification_test.cc index 5c90fe68c7a..4eb2a88ef4b 100644 --- a/engine/tests/notifications/service_downtime_notification_test.cc +++ b/engine/tests/notifications/service_downtime_notification_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Centreon (https://www.centreon.com/) + * Copyright 2019-2024 Centreon (https://www.centreon.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -37,8 +37,10 @@ #include "com/centreon/engine/configuration/applier/serviceescalation.hh" #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/serviceescalation.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" #include "common/engine_legacy_conf/service.hh" +#endif #include "helper.hh" using namespace com::centreon; @@ -53,17 +55,34 @@ class ServiceDowntimeNotification : public TestEngine { error_cnt err; configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("admin", true)}; +#else + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); +#ifdef LEGACY_CONF configuration::host hst{new_configuration_host("test_host", "admin")}; +#else + configuration::Host hst{new_pb_configuration_host("test_host", "admin")}; +#endif configuration::applier::host hst_aply; hst_aply.add_object(hst); +#ifdef LEGACY_CONF configuration::service svc{ new_configuration_service("test_host", "test_svc", "admin")}; +#else + configuration::Service svc{ + new_pb_configuration_service("test_host", "test_svc", "admin")}; +#endif configuration::applier::service svc_aply; svc_aply.add_object(svc); diff --git a/engine/tests/notifications/service_flapping_notification.cc b/engine/tests/notifications/service_flapping_notification.cc index 0a3d97838d4..39160b52ddb 100644 --- a/engine/tests/notifications/service_flapping_notification.cc +++ b/engine/tests/notifications/service_flapping_notification.cc @@ -32,8 +32,10 @@ #include "com/centreon/engine/serviceescalation.hh" #include "com/centreon/engine/timezone_manager.hh" #include "com/centreon/process_manager.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" #include "common/engine_legacy_conf/service.hh" +#endif #include "helper.hh" using namespace com::centreon; @@ -49,32 +51,66 @@ class ServiceFlappingNotification : public TestEngine { init_config_state(); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("admin", true)}; +#else + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::command cmd_aply; +#ifdef LEGACY_CONF configuration::command cmd("cmd"); cmd.parse("command_line", "echo 1"); +#else + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 1"); +#endif cmd_aply.add_object(cmd); configuration::applier::host hst_aply; +#ifdef LEGACY_CONF configuration::host hst; hst.parse("host_name", "test_host"); hst.parse("address", "127.0.0.1"); hst.parse("_HOST_ID", "12"); hst.parse("check_command", "cmd"); +#else + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name("test_host"); + hst.set_address("127.0.0.1"); + hst.set_host_id(12); + hst.set_check_command("cmd"); +#endif hst_aply.add_object(hst); hst_aply.resolve_object(hst, err); configuration::applier::service svc_aply; +#ifdef LEGACY_CONF configuration::service svc; svc.parse("host", "test_host"); svc.parse("service_description", "test_description"); svc.parse("_SERVICE_ID", "12"); svc.parse("check_command", "cmd"); svc.parse("contacts", "admin"); +#else + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + svc.set_host_name("test_host"); + svc.set_service_description("test_description"); + svc.set_service_id(12); + svc.set_check_command("cmd"); + svc_hlp.hook("contacts", "admin"); +#endif // We fake here the expand_object on configuration::service svc.set_host_id(12); @@ -125,9 +161,7 @@ TEST_F(ServiceFlappingNotification, SimpleServiceFlapping) { // FIXME DBR: should not we find a better solution than fixing this each time? _service->set_last_hard_state_change(43000); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; - for (size_t i = 0; i < tperiod->days.size(); ++i) - tperiod->days[i].emplace_back(0, 86400); + new_timeperiod_with_timeranges("tperiod", "alias")}; std::unique_ptr service_escalation{ new engine::serviceescalation("host_name", "test_description", 0, 1, 1.0, @@ -178,9 +212,7 @@ TEST_F(ServiceFlappingNotification, SimpleServiceFlappingStartTwoTimes) { set_time(43000); _service->set_notification_interval(2); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; - for (uint32_t i = 0; i < tperiod->days.size(); ++i) - tperiod->days[i].emplace_back(0, 86400); + new_timeperiod_with_timeranges("tperiod", "alias")}; std::unique_ptr service_escalation{ new engine::serviceescalation("host_name", "test_description", 0, 1, 1.0, @@ -218,9 +250,7 @@ TEST_F(ServiceFlappingNotification, SimpleServiceFlappingStopTwoTimes) { set_time(43000); _service->set_notification_interval(2); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; - for (uint32_t i = 0; i < tperiod->days.size(); ++i) - tperiod->days[i].emplace_back(0, 86400); + new_timeperiod_with_timeranges("tperiod", "alias")}; std::unique_ptr service_escalation{ new engine::serviceescalation("host_name", "test_description", 0, 1, 1.0, @@ -251,7 +281,11 @@ TEST_F(ServiceFlappingNotification, SimpleServiceFlappingStopTwoTimes) { } TEST_F(ServiceFlappingNotification, CheckFlapping) { +#ifdef LEGACY_CONF config->enable_flap_detection(true); +#else + pb_config.set_enable_flap_detection(true); +#endif _service->set_flap_detection_enabled(true); _service->add_flap_detection_on(engine::service::ok); _service->add_flap_detection_on(engine::service::down); @@ -343,7 +377,11 @@ TEST_F(ServiceFlappingNotification, CheckFlapping) { } TEST_F(ServiceFlappingNotification, CheckFlappingWithVolatile) { +#ifdef LEGACY_CONF config->enable_flap_detection(true); +#else + pb_config.set_enable_flap_detection(true); +#endif _service->set_flap_detection_enabled(true); _service->set_is_volatile(true); _service->add_flap_detection_on(engine::service::ok); @@ -445,7 +483,11 @@ TEST_F(ServiceFlappingNotification, CheckFlappingWithVolatile) { TEST_F(ServiceFlappingNotification, CheckFlappingWithHostDown) { _host->set_current_state(engine::host::state_down); _host->set_state_type(checkable::hard); +#ifdef LEGACY_CONF config->enable_flap_detection(true); +#else + pb_config.set_enable_flap_detection(true); +#endif _service->set_flap_detection_enabled(true); _service->add_flap_detection_on(engine::service::ok); _service->add_flap_detection_on(engine::service::down); @@ -534,7 +576,11 @@ TEST_F(ServiceFlappingNotification, CheckFlappingWithHostDown) { } TEST_F(ServiceFlappingNotification, CheckFlappingWithSoftState) { +#ifdef LEGACY_CONF config->enable_flap_detection(true); +#else + pb_config.set_enable_flap_detection(true); +#endif _service->set_flap_detection_enabled(true); _service->add_flap_detection_on(engine::service::ok); _service->add_flap_detection_on(engine::service::down); @@ -623,7 +669,11 @@ TEST_F(ServiceFlappingNotification, CheckFlappingWithSoftState) { } TEST_F(ServiceFlappingNotification, RetentionFlappingNotification) { +#ifdef LEGACY_CONF config->enable_flap_detection(true); +#else + pb_config.set_enable_flap_detection(true); +#endif _service->set_flap_detection_enabled(true); _service->add_flap_detection_on(engine::service::ok); _service->add_flap_detection_on(engine::service::down); diff --git a/engine/tests/notifications/service_normal_notification.cc b/engine/tests/notifications/service_normal_notification.cc index a0fe779eb99..33cf5f1e6b8 100644 --- a/engine/tests/notifications/service_normal_notification.cc +++ b/engine/tests/notifications/service_normal_notification.cc @@ -37,8 +37,10 @@ #include "com/centreon/engine/configuration/applier/serviceescalation.hh" #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/serviceescalation.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" #include "common/engine_legacy_conf/service.hh" +#endif #include "helper.hh" using namespace com::centreon; @@ -53,21 +55,37 @@ class ServiceNotification : public TestEngine { error_cnt err; configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("admin", true)}; - ct_aply.add_object(ctct); configuration::contact ctct1{ new_configuration_contact("admin1", false, "c,r")}; +#else + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; + configuration::Contact ctct1{ + new_pb_configuration_contact("admin1", false, "c,r")}; +#endif + ct_aply.add_object(ctct); ct_aply.add_object(ctct1); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); ct_aply.resolve_object(ctct1, err); +#ifdef LEGACY_CONF configuration::host hst{new_configuration_host("test_host", "admin")}; + configuration::service svc{ + new_configuration_service("test_host", "test_svc", "admin,admin1")}; +#else + configuration::Host hst{new_pb_configuration_host("test_host", "admin")}; + configuration::Service svc{ + new_pb_configuration_service("test_host", "test_svc", "admin,admin1")}; +#endif configuration::applier::host hst_aply; hst_aply.add_object(hst); - configuration::service svc{ - new_configuration_service("test_host", "test_svc", "admin,admin1")}; configuration::applier::service svc_aply; svc_aply.add_object(svc); @@ -107,9 +125,7 @@ TEST_F(ServiceNotification, SimpleNormalServiceNotification) { ASSERT_EQ(_host->services.size(), 1u); set_time(43200); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); + new_timeperiod_with_timeranges("tperiod", "alias")}; std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, @@ -133,12 +149,14 @@ TEST_F(ServiceNotification, /* We are using a local time() function defined in tests/timeperiod/utils.cc. * If we call time(), it is not the glibc time() function that will be called. */ +#ifdef LEGACY_CONF config->enable_notifications(false); +#else + pb_config.set_enable_notifications(false); +#endif set_time(43200); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); + new_timeperiod_with_timeranges("tperiod", "alias")}; std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, @@ -160,9 +178,7 @@ TEST_F(ServiceNotification, */ set_time(43200); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); + new_timeperiod_with_timeranges("tperiod", "alias")}; std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, @@ -180,7 +196,7 @@ TEST_F(ServiceNotification, TEST_F(ServiceNotification, SimpleNormalServiceNotificationOutsideTimeperiod) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_svc->get_next_notification_id()}; @@ -201,9 +217,13 @@ TEST_F(ServiceNotification, SimpleNormalServiceNotificationOutsideTimeperiod) { TEST_F(ServiceNotification, SimpleNormalServiceNotificationForcedWithNotificationDisabled) { +#ifdef LEGACY_CONF config->enable_notifications(false); +#else + pb_config.set_enable_notifications(false); +#endif std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_svc->get_next_notification_id()}; @@ -224,7 +244,7 @@ TEST_F(ServiceNotification, TEST_F(ServiceNotification, SimpleNormalServiceNotificationForcedNotification) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_svc->get_next_notification_id()}; @@ -245,13 +265,11 @@ TEST_F(ServiceNotification, SimpleNormalServiceNotificationForcedNotification) { TEST_F(ServiceNotification, SimpleNormalServiceNotificationWithDowntime) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); _svc->set_scheduled_downtime_depth(30); uint64_t id{_svc->get_next_notification_id()}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, "", 7, @@ -267,13 +285,11 @@ TEST_F(ServiceNotification, SimpleNormalServiceNotificationWithDowntime) { TEST_F(ServiceNotification, SimpleNormalServiceNotificationWithFlapping) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); _svc->set_is_flapping(true); uint64_t id{_svc->get_next_notification_id()}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, "", 7, @@ -289,13 +305,11 @@ TEST_F(ServiceNotification, SimpleNormalServiceNotificationWithFlapping) { TEST_F(ServiceNotification, SimpleNormalServiceNotificationWithSoftState) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); _svc->set_state_type(checkable::soft); uint64_t id{_svc->get_next_notification_id()}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, "", 7, @@ -312,12 +326,10 @@ TEST_F(ServiceNotification, SimpleNormalServiceNotificationWithSoftState) { TEST_F(ServiceNotification, SimpleNormalServiceNotificationWithHardStateAcknowledged) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_svc->get_next_notification_id()}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, "", 7, @@ -335,12 +347,10 @@ TEST_F(ServiceNotification, TEST_F(ServiceNotification, SimpleNormalServiceNotificationAfterPreviousTooSoon) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_svc->get_next_notification_id()}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, "", 7, @@ -359,12 +369,10 @@ TEST_F(ServiceNotification, TEST_F(ServiceNotification, SimpleNormalServiceNotificationAfterPreviousWithNullInterval) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_svc->get_next_notification_id()}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, "", 7, @@ -384,12 +392,10 @@ TEST_F(ServiceNotification, TEST_F(ServiceNotification, SimpleNormalServiceNotificationOnStateNotNotified) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_svc->get_next_notification_id()}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, "", 7, @@ -409,12 +415,10 @@ TEST_F(ServiceNotification, SimpleNormalServiceNotificationOnStateNotNotified) { TEST_F(ServiceNotification, SimpleNormalServiceNotificationOnStateBeforeFirstNotifDelay) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_svc->get_next_notification_id()}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, "", 7, @@ -436,12 +440,10 @@ TEST_F(ServiceNotification, TEST_F(ServiceNotification, SimpleNormalServiceNotificationOnStateAfterFirstNotifDelay) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); uint64_t id{_svc->get_next_notification_id()}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, "", 7, @@ -466,9 +468,7 @@ TEST_F(ServiceNotification, */ set_time(43200); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; - for (uint32_t i = 0; i < tperiod->days.size(); ++i) - tperiod->days[i].emplace_back(0, 86400); + new_timeperiod_with_timeranges("tperiod", "alias")}; std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, @@ -757,24 +757,50 @@ TEST_F(ServiceNotification, NormalRecoveryTwoTimes) { TEST_F(ServiceNotification, ServiceEscalationCG) { init_macros(); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("test_contact", false)}; +#else + configuration::Contact ctct{ + new_pb_configuration_contact("test_contact", false)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif error_cnt err; ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg; + configuration::contactgroup_helper cg_hlp(&cg); + fill_pb_configuration_contactgroup(&cg_hlp, "test_cg", "test_contact"); +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::serviceescalation se_aply; +#ifdef LEGACY_CONF configuration::serviceescalation se{ new_configuration_serviceescalation("test_host", "test_svc", "test_cg")}; se_aply.add_object(se); se_aply.expand_objects(*config); +#else + configuration::Serviceescalation se{new_pb_configuration_serviceescalation( + "test_host", "test_svc", "test_cg")}; + se_aply.add_object(se); + se_aply.expand_objects(pb_config); +#endif se_aply.resolve_object(se, err); int now{50000}; @@ -933,9 +959,7 @@ TEST_F(ServiceNotification, WarnCritServiceNotification) { ASSERT_EQ(_host->services.size(), 1u); set_time(43200); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); + new_timeperiod_with_timeranges("tperiod", "alias")}; std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, @@ -970,9 +994,7 @@ TEST_F(ServiceNotification, SimpleNormalVolatileServiceNotification) { ASSERT_EQ(_host->services.size(), 1u); set_time(43200); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); + new_timeperiod_with_timeranges("tperiod", "alias")}; std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, @@ -1005,7 +1027,11 @@ TEST_F(ServiceNotification, SimpleNormalVolatileServiceNotification) { id = _svc->get_next_notification_id(); _svc->set_notification_period_ptr(tperiod.get()); _svc->set_notifications_enabled(true); +#ifdef LEGACY_CONF config->enable_notifications(false); +#else + pb_config.set_enable_notifications(false); +#endif ASSERT_EQ(_svc->notify(notifier::reason_normal, "", "", notifier::notification_option_none), OK); @@ -1019,9 +1045,7 @@ TEST_F(ServiceNotification, RecoveryNotifEvenIfServiceAcknowledged) { ASSERT_EQ(_host->services.size(), 1u); set_time(43200); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); + new_timeperiod_with_timeranges("tperiod", "alias")}; std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, @@ -1074,14 +1098,12 @@ TEST_F(ServiceNotification, RecoveryNotifEvenIfServiceAcknowledged) { TEST_F(ServiceNotification, SimpleVolatileServiceNotificationWithDowntime) { std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; set_time(20000); _svc->set_scheduled_downtime_depth(30); _svc->set_is_volatile(true); uint64_t id{_svc->get_next_notification_id()}; - for (int i = 0; i < 7; ++i) - tperiod->days[i].emplace_back(0, 86400); std::unique_ptr service_escalation{ new engine::serviceescalation("test_host", "test_svc", 0, 1, 1.0, "", 7, diff --git a/engine/tests/notifications/service_timeperiod_notification.cc b/engine/tests/notifications/service_timeperiod_notification.cc index ce4ffa184c9..0c893e8eb3c 100644 --- a/engine/tests/notifications/service_timeperiod_notification.cc +++ b/engine/tests/notifications/service_timeperiod_notification.cc @@ -39,9 +39,12 @@ #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/serviceescalation.hh" #include "com/centreon/engine/timeperiod.hh" +#include "gtest/gtest.h" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" #include "common/engine_legacy_conf/service.hh" #include "common/engine_legacy_conf/state.hh" +#endif #include "helper.hh" using namespace com::centreon; @@ -49,8 +52,6 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::configuration; using namespace com::centreon::engine::configuration::applier; -// extern configuration::state* config; - class ServiceTimePeriodNotification : public TestEngine { public: void SetUp() override { @@ -58,21 +59,43 @@ class ServiceTimePeriodNotification : public TestEngine { init_config_state(); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("admin", true)}; +#else + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF configuration::contact ctct1{ new_configuration_contact("admin1", false, "c,r")}; +#else + configuration::Contact ctct1{ + new_pb_configuration_contact("admin1", false, "c,r")}; +#endif ct_aply.add_object(ctct1); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); ct_aply.resolve_object(ctct1, err); +#ifdef LEGACY_CONF configuration::host hst{new_configuration_host("test_host", "admin")}; +#else + configuration::Host hst{new_pb_configuration_host("test_host", "admin")}; +#endif configuration::applier::host hst_aply; hst_aply.add_object(hst); +#ifdef LEGACY_CONF configuration::service svc{ new_configuration_service("test_host", "test_svc", "admin,admin1")}; +#else + configuration::Service svc{ + new_pb_configuration_service("test_host", "test_svc", "admin,admin1")}; +#endif configuration::applier::service svc_aply; svc_aply.add_object(svc); @@ -124,28 +147,55 @@ TEST_F(ServiceTimePeriodNotification, NoTimePeriodOk) { error_cnt err; init_macros(); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; int now{20000}; set_time(now); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("test_contact", false)}; +#else + configuration::Contact ctct{ + new_pb_configuration_contact("test_contact", false)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg{ + new_pb_configuration_contactgroup("test_cg", "test_contact")}; +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::serviceescalation se_aply; +#ifdef LEGACY_CONF configuration::serviceescalation se{ new_configuration_serviceescalation("test_host", "test_svc", "test_cg")}; +#else + configuration::Serviceescalation se{new_pb_configuration_serviceescalation( + "test_host", "test_svc", "test_cg")}; +#endif se_aply.add_object(se); +#ifdef LEGACY_CONF se_aply.expand_objects(*config); +#else + se_aply.expand_objects(pb_config); +#endif se_aply.resolve_object(se, err); // uint64_t id{_svc->get_next_notification_id()}; @@ -229,24 +279,43 @@ TEST_F(ServiceTimePeriodNotification, NoTimePeriodKo) { error_cnt err; init_macros(); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; int now{20000}; set_time(now); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("test_contact", false)}; +#else + configuration::Contact ctct{ + new_pb_configuration_contact("test_contact", false)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg{ + new_pb_configuration_contactgroup("test_cg", "test_contact")}; +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::serviceescalation se_aply; +#ifdef LEGACY_CONF configuration::serviceescalation se; se.parse("first_notification", "1"); se.parse("last_notification", "1"); @@ -255,8 +324,23 @@ TEST_F(ServiceTimePeriodNotification, NoTimePeriodKo) { se.parse("host_name", "test_host"); se.parse("service_description", "test_svc"); se.parse("contact_groups", "test_cg"); +#else + configuration::Serviceescalation se; + configuration::serviceescalation_helper se_hlp(&se); + se.set_first_notification(1); + se.set_last_notification(1); + se.set_notification_interval(0); + se_hlp.hook("escalation_options", "w,u,c,r"); + se_hlp.hook("host_name", "test_host"); + se_hlp.hook("service_description", "test_svc"); + se_hlp.hook("contact_groups", "test_cg"); +#endif se_aply.add_object(se); +#ifdef LEGACY_CONF se_aply.expand_objects(*config); +#else + se_aply.expand_objects(pb_config); +#endif se_aply.resolve_object(se, err); for (int i = 0; i < 7; ++i) { timerange_list list_time; @@ -345,28 +429,55 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodOut) { error_cnt err; init_macros(); std::unique_ptr tperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; int now{20000}; set_time(now); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("test_contact", false)}; +#else + configuration::Contact ctct{ + new_pb_configuration_contact("test_contact", false)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg{ + new_pb_configuration_contactgroup("test_cg", "test_contact")}; +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::serviceescalation se_aply; +#ifdef LEGACY_CONF configuration::serviceescalation se{ new_configuration_serviceescalation("test_host", "test_svc", "test_cg")}; +#else + configuration::Serviceescalation se{new_pb_configuration_serviceescalation( + "test_host", "test_svc", "test_cg")}; +#endif se_aply.add_object(se); +#ifdef LEGACY_CONF se_aply.expand_objects(*config); +#else + se_aply.expand_objects(pb_config); +#endif se_aply.resolve_object(se, err); // uint64_t id{_svc->get_next_notification_id()}; @@ -450,9 +561,10 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserOut) { error_cnt err; init_macros(); std::unique_ptr tiperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; int now{20000}; set_time(now); +#ifdef LEGACY_CONF configuration::timeperiod tperiod; tperiod.parse("timeperiod_name", "24x9"); tperiod.parse("alias", "24x9"); @@ -463,10 +575,24 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserOut) { tperiod.parse("friday", "00:00-09:00"); tperiod.parse("saterday", "00:00-09:00"); tperiod.parse("sunday", "00:00-09:00"); +#else + configuration::Timeperiod tperiod; + configuration::timeperiod_helper tperiod_hlp(&tperiod); + tperiod.set_timeperiod_name("24x9"); + tperiod.set_alias("24x9"); + tperiod_hlp.hook("monday", "00:00-09:00"); + tperiod_hlp.hook("tuesday", "00:00-09:00"); + tperiod_hlp.hook("wednesday", "00:00-09:00"); + tperiod_hlp.hook("thursday", "00:00-09:00"); + tperiod_hlp.hook("friday", "00:00-09:00"); + tperiod_hlp.hook("saterday", "00:00-09:00"); + tperiod_hlp.hook("sunday", "00:00-09:00"); +#endif configuration::applier::timeperiod aplyr; aplyr.add_object(tperiod); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct; ctct.parse("contact_name", "test_contact"); ctct.parse("service_notification_period", "24x9"); @@ -476,23 +602,57 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserOut) { ctct.parse("service_notification_options", "a"); ctct.parse("host_notifications_enabled", "1"); ctct.parse("service_notifications_enabled", "1"); +#else + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + ctct.set_contact_name("test_contact"); + ctct.set_service_notification_period("24x9"); + ctct_hlp.hook("host_notification_commands", "cmd"); + ctct_hlp.hook("service_notification_commands", "cmd"); + ctct_hlp.hook("host_notification_options", "d,r,f,s"); + ctct_hlp.hook("service_notification_options", "a"); + ctct.set_host_notifications_enabled(true); + ctct.set_service_notifications_enabled(true); +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg{ + new_pb_configuration_contactgroup("test_cg", "test_contact")}; +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::serviceescalation se_aply; +#ifdef LEGACY_CONF configuration::serviceescalation se{ new_configuration_serviceescalation("test_host", "test_svc", "test_cg")}; +#else + configuration::Serviceescalation se{new_pb_configuration_serviceescalation( + "test_host", "test_svc", "test_cg")}; +#endif se_aply.add_object(se); +#ifdef LEGACY_CONF se_aply.expand_objects(*config); +#else + se_aply.expand_objects(pb_config); +#endif se_aply.resolve_object(se, err); // uint64_t id{_svc->get_next_notification_id()}; @@ -575,9 +735,10 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserIn) { error_cnt err; init_macros(); std::unique_ptr tiperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; int now{20000}; set_time(now); +#ifdef LEGACY_CONF configuration::timeperiod tperiod; tperiod.parse("timeperiod_name", "24x9"); tperiod.parse("alias", "24x9"); @@ -588,10 +749,24 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserIn) { tperiod.parse("friday", "09:00-20:00"); tperiod.parse("saterday", "09:00-20:00"); tperiod.parse("sunday", "09:00-20:00"); +#else + configuration::Timeperiod tperiod; + configuration::timeperiod_helper tperiod_hlp(&tperiod); + tperiod.set_timeperiod_name("24x9"); + tperiod.set_alias("24x9"); + tperiod_hlp.hook("monday", "09:00-20:00"); + tperiod_hlp.hook("tuesday", "09:00-20:00"); + tperiod_hlp.hook("wednesday", "09:00-20:00"); + tperiod_hlp.hook("thursday", "09:00-20:00"); + tperiod_hlp.hook("friday", "09:00-20:00"); + tperiod_hlp.hook("saterday", "09:00-20:00"); + tperiod_hlp.hook("sunday", "09:00-20:00"); +#endif configuration::applier::timeperiod aplyr; aplyr.add_object(tperiod); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct; ctct.parse("contact_name", "test_contact"); ctct.parse("service_notification_period", "24x9"); @@ -601,23 +776,57 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserIn) { ctct.parse("service_notification_options", "a"); ctct.parse("host_notifications_enabled", "1"); ctct.parse("service_notifications_enabled", "1"); +#else + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + ctct.set_contact_name("test_contact"); + ctct.set_service_notification_period("24x9"); + ctct_hlp.hook("host_notification_commands", "cmd"); + ctct_hlp.hook("service_notification_commands", "cmd"); + ctct_hlp.hook("host_notification_options", "d,r,f,s"); + ctct_hlp.hook("service_notification_options", "a"); + ctct.set_host_notifications_enabled(true); + ctct.set_service_notifications_enabled(true); +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg{ + new_pb_configuration_contactgroup("test_cg", "test_contact")}; +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::serviceescalation se_aply; +#ifdef LEGACY_CONF configuration::serviceescalation se{ new_configuration_serviceescalation("test_host", "test_svc", "test_cg")}; +#else + configuration::Serviceescalation se{new_pb_configuration_serviceescalation( + "test_host", "test_svc", "test_cg")}; +#endif se_aply.add_object(se); +#ifdef LEGACY_CONF se_aply.expand_objects(*config); +#else + se_aply.expand_objects(pb_config); +#endif se_aply.resolve_object(se, err); // uint64_t id{_svc->get_next_notification_id()}; @@ -699,9 +908,10 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserIn) { TEST_F(ServiceTimePeriodNotification, TimePeriodUserAll) { init_macros(); std::unique_ptr tiperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; int now{20000}; set_time(now); +#ifdef LEGACY_CONF configuration::timeperiod tperiod; tperiod.parse("timeperiod_name", "24x9"); tperiod.parse("alias", "24x9"); @@ -712,10 +922,24 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserAll) { tperiod.parse("friday", "00:00-24:00"); tperiod.parse("saterday", "00:00-24:00"); tperiod.parse("sunday", "00:00-24:00"); +#else + configuration::Timeperiod tperiod; + configuration::timeperiod_helper tperiod_hlp(&tperiod); + tperiod.set_timeperiod_name("24x9"); + tperiod.set_alias("24x9"); + tperiod_hlp.hook("monday", "00:00-24:00"); + tperiod_hlp.hook("tuesday", "00:00-24:00"); + tperiod_hlp.hook("wednesday", "00:00-24:00"); + tperiod_hlp.hook("thursday", "00:00-24:00"); + tperiod_hlp.hook("friday", "00:00-24:00"); + tperiod_hlp.hook("saterday", "00:00-24:00"); + tperiod_hlp.hook("sunday", "00:00-24:00"); +#endif configuration::applier::timeperiod aplyr; aplyr.add_object(tperiod); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct; ctct.parse("contact_name", "test_contact"); ctct.parse("service_notification_period", "24x9"); @@ -725,24 +949,58 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserAll) { ctct.parse("service_notification_options", "a"); ctct.parse("host_notifications_enabled", "1"); ctct.parse("service_notifications_enabled", "1"); +#else + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + ctct.set_contact_name("test_contact"); + ctct.set_service_notification_period("24x9"); + ctct_hlp.hook("host_notification_commands", "cmd"); + ctct_hlp.hook("service_notification_commands", "cmd"); + ctct_hlp.hook("host_notification_options", "d,r,f,s"); + ctct_hlp.hook("service_notification_options", "a"); + ctct.set_host_notifications_enabled(true); + ctct.set_service_notifications_enabled(true); +#endif error_cnt err; ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg{ + new_pb_configuration_contactgroup("test_cg", "test_contact")}; +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::serviceescalation se_aply; +#ifdef LEGACY_CONF configuration::serviceescalation se{ new_configuration_serviceescalation("test_host", "test_svc", "test_cg")}; +#else + configuration::Serviceescalation se{new_pb_configuration_serviceescalation( + "test_host", "test_svc", "test_cg")}; +#endif se_aply.add_object(se); +#ifdef LEGACY_CONF se_aply.expand_objects(*config); +#else + se_aply.expand_objects(pb_config); +#endif se_aply.resolve_object(se, err); // uint64_t id{_svc->get_next_notification_id()}; @@ -824,16 +1082,24 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserAll) { TEST_F(ServiceTimePeriodNotification, TimePeriodUserNone) { init_macros(); std::unique_ptr tiperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; int now{20000}; set_time(now); +#ifdef LEGACY_CONF configuration::timeperiod tperiod; tperiod.parse("timeperiod_name", "24x9"); tperiod.parse("alias", "24x9"); +#else + configuration::Timeperiod tperiod; + configuration::timeperiod_helper tperiod_hlp(&tperiod); + tperiod.set_timeperiod_name("24x9"); + tperiod.set_alias("24x9"); +#endif configuration::applier::timeperiod aplyr; aplyr.add_object(tperiod); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct; ctct.parse("contact_name", "test_contact"); ctct.parse("service_notification_period", "24x9"); @@ -843,24 +1109,58 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserNone) { ctct.parse("service_notification_options", "a"); ctct.parse("host_notifications_enabled", "1"); ctct.parse("service_notifications_enabled", "1"); +#else + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + ctct.set_contact_name("test_contact"); + ctct.set_service_notification_period("24x9"); + ctct_hlp.hook("host_notification_commands", "cmd"); + ctct_hlp.hook("service_notification_commands", "cmd"); + ctct_hlp.hook("host_notification_options", "d,r,f,s"); + ctct_hlp.hook("service_notification_options", "a"); + ctct.set_host_notifications_enabled(true); + ctct.set_service_notifications_enabled(true); +#endif error_cnt err; ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg{ + new_pb_configuration_contactgroup("test_cg", "test_contact")}; +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::serviceescalation se_aply; +#ifdef LEGACY_CONF configuration::serviceescalation se{ new_configuration_serviceescalation("test_host", "test_svc", "test_cg")}; +#else + configuration::Serviceescalation se{new_pb_configuration_serviceescalation( + "test_host", "test_svc", "test_cg")}; +#endif se_aply.add_object(se); +#ifdef LEGACY_CONF se_aply.expand_objects(*config); +#else + se_aply.expand_objects(pb_config); +#endif se_aply.resolve_object(se, err); // uint64_t id{_svc->get_next_notification_id()}; @@ -942,16 +1242,24 @@ TEST_F(ServiceTimePeriodNotification, TimePeriodUserNone) { TEST_F(ServiceTimePeriodNotification, NoTimePeriodUser) { init_macros(); std::unique_ptr tiperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; int now{20000}; set_time(now); +#ifdef LEGACY_CONF configuration::timeperiod tperiod; tperiod.parse("timeperiod_name", "24x9"); tperiod.parse("alias", "24x9"); +#else + configuration::Timeperiod tperiod; + configuration::timeperiod_helper tperiod_hlp(&tperiod); + tperiod.set_timeperiod_name("24x9"); + tperiod.set_alias("24x9"); +#endif configuration::applier::timeperiod aplyr; aplyr.add_object(tperiod); configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct; ctct.parse("contact_name", "test_contact"); ctct.parse("host_notification_commands", "cmd"); @@ -960,24 +1268,57 @@ TEST_F(ServiceTimePeriodNotification, NoTimePeriodUser) { ctct.parse("service_notification_options", "a"); ctct.parse("host_notifications_enabled", "1"); ctct.parse("service_notifications_enabled", "1"); +#else + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + ctct.set_contact_name("test_contact"); + ctct_hlp.hook("host_notification_commands", "cmd"); + ctct_hlp.hook("service_notification_commands", "cmd"); + ctct_hlp.hook("host_notification_options", "d,r,f,s"); + ctct_hlp.hook("service_notification_options", "a"); + ctct.set_host_notifications_enabled(true); + ctct.set_service_notifications_enabled(true); +#endif error_cnt err; ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); configuration::applier::contactgroup cg_aply; +#ifdef LEGACY_CONF configuration::contactgroup cg{ new_configuration_contactgroup("test_cg", "test_contact")}; +#else + configuration::Contactgroup cg{ + new_pb_configuration_contactgroup("test_cg", "test_contact")}; +#endif cg_aply.add_object(cg); +#ifdef LEGACY_CONF cg_aply.expand_objects(*config); +#else + cg_aply.expand_objects(pb_config); +#endif cg_aply.resolve_object(cg, err); configuration::applier::serviceescalation se_aply; +#ifdef LEGACY_CONF configuration::serviceescalation se{ new_configuration_serviceescalation("test_host", "test_svc", "test_cg")}; +#else + configuration::Serviceescalation se{new_pb_configuration_serviceescalation( + "test_host", "test_svc", "test_cg")}; +#endif se_aply.add_object(se); +#ifdef LEGACY_CONF se_aply.expand_objects(*config); +#else + se_aply.expand_objects(pb_config); +#endif se_aply.resolve_object(se, err); // uint64_t id{_svc->get_next_notification_id()}; diff --git a/engine/tests/opentelemetry/agent_check_result_builder_test.cc b/engine/tests/opentelemetry/agent_check_result_builder_test.cc index 1f8f0438830..101ae10c651 100644 --- a/engine/tests/opentelemetry/agent_check_result_builder_test.cc +++ b/engine/tests/opentelemetry/agent_check_result_builder_test.cc @@ -36,8 +36,6 @@ #include "opentelemetry/proto/common/v1/common.pb.h" #include "opentelemetry/proto/metrics/v1/metrics.pb.h" -#include "com/centreon/engine/modules/opentelemetry/data_point_fifo_container.hh" - #include "com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh" #include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_check_result_builder.hh" @@ -363,45 +361,11 @@ static const char* agent_exemple = R"( class otl_agent_check_result_builder_test : public TestEngine { protected: - std::shared_ptr _builder_config; - data_point_fifo_container _fifos; + absl::flat_hash_map + _received; public: otl_agent_check_result_builder_test() { - if (service::services.find({"test_host", "test_svc_builder_2"}) == - service::services.end()) { - init_config_state(); - config->contacts().clear(); - configuration::error_cnt err; - - configuration::applier::contact ct_aply; - configuration::contact ctct{new_configuration_contact("admin", true)}; - ct_aply.add_object(ctct); - ct_aply.expand_objects(*config); - ct_aply.resolve_object(ctct, err); - - configuration::host hst{ - new_configuration_host("test_host", "admin", 457)}; - configuration::applier::host hst_aply; - hst_aply.add_object(hst); - - configuration::service svc{new_configuration_service( - "test_host", "test_svc_builder", "admin", 458)}; - configuration::applier::service svc_aply; - svc_aply.add_object(svc); - configuration::service svc2{new_configuration_service( - "test_host", "test_svc_builder_2", "admin", 459)}; - svc_aply.add_object(svc2); - - hst_aply.resolve_object(hst, err); - svc_aply.resolve_object(svc, err); - svc_aply.resolve_object(svc2, err); - } - - _builder_config = - otl_check_result_builder::create_check_result_builder_config( - "--processor=centreon_agent"); - metric_request_ptr request = std::make_shared< ::opentelemetry::proto::collector::metrics::v1:: ExportMetricsServiceRequest>(); @@ -411,35 +375,29 @@ class otl_agent_check_result_builder_test : public TestEngine { otl_data_point::extract_data_points( request, [&](const otl_data_point& data_pt) { std::string service_name; - for (const auto attrib : data_pt.get_resource().attributes()) { + for (const auto& attrib : data_pt.get_resource().attributes()) { if (attrib.key() == "service.name") { service_name = attrib.value().string_value(); break; } } - _fifos.add_data_point("test_host", service_name, - data_pt.get_metric().name(), data_pt); + _received[service_name][data_pt.get_metric().name()].insert(data_pt); }); } }; TEST_F(otl_agent_check_result_builder_test, test_svc_builder) { auto check_result_builder = otl_check_result_builder::create( - "", _builder_config, 1789, *host::hosts.find("test_host")->second, - service::services.find({"test_host", "test_svc_builder"})->second.get(), - std::chrono::system_clock::time_point(), [&](const commands::result&) {}, - spdlog::default_logger()); + "--processor=centreon_agent", spdlog::default_logger()); - commands::result res; - bool success = - check_result_builder->sync_build_result_from_metrics(_fifos, res); + check_result res; + bool success = check_result_builder->build_result_from_metrics( + _received["test_svc_builder"], res); ASSERT_TRUE(success); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); - ASSERT_EQ(res.command_id, 1789); - ASSERT_EQ(res.start_time.to_useconds(), 1718345061381922153 / 1000); - ASSERT_EQ(res.end_time.to_useconds(), 1718345061381922153 / 1000); + ASSERT_EQ(res.get_return_code(), 0); + ASSERT_EQ(res.get_start_time().tv_sec, 1718345061381922153 / 1000000000); + ASSERT_EQ(res.get_finish_time().tv_sec, 1718345061381922153 / 1000000000); auto compare_to_excepted = [](const std::string& to_cmp) -> bool { return to_cmp == @@ -450,26 +408,21 @@ TEST_F(otl_agent_check_result_builder_test, test_svc_builder) { "metric=12;0:50;0:75;;"; }; - ASSERT_PRED1(compare_to_excepted, res.output); + ASSERT_PRED1(compare_to_excepted, res.get_output()); } TEST_F(otl_agent_check_result_builder_test, test_svc_builder_2) { auto check_result_builder = otl_check_result_builder::create( - "", _builder_config, 1789, *host::hosts.find("test_host")->second, - service::services.find({"test_host", "test_svc_builder_2"})->second.get(), - std::chrono::system_clock::time_point(), [&](const commands::result&) {}, - spdlog::default_logger()); + "--processor=centreon_agent", spdlog::default_logger()); - commands::result res; - bool success = - check_result_builder->sync_build_result_from_metrics(_fifos, res); + check_result res; + bool success = check_result_builder->build_result_from_metrics( + _received["test_svc_builder_2"], res); ASSERT_TRUE(success); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); - ASSERT_EQ(res.command_id, 1789); - ASSERT_EQ(res.start_time.to_useconds(), 1718345061713456225 / 1000); - ASSERT_EQ(res.end_time.to_useconds(), 1718345061713456225 / 1000); + ASSERT_EQ(res.get_return_code(), 0); + ASSERT_EQ(res.get_start_time().tv_sec, 1718345061713456225 / 1000000000); + ASSERT_EQ(res.get_finish_time().tv_sec, 1718345061713456225 / 1000000000); auto compare_to_excepted = [](const std::string& to_cmp) -> bool { return to_cmp == @@ -480,5 +433,5 @@ TEST_F(otl_agent_check_result_builder_test, test_svc_builder_2) { "metric=12;@0:50;@~:75;;"; }; - ASSERT_PRED1(compare_to_excepted, res.output); -} \ No newline at end of file + ASSERT_PRED1(compare_to_excepted, res.get_output()); +} diff --git a/engine/tests/opentelemetry/agent_reverse_client_test.cc b/engine/tests/opentelemetry/agent_reverse_client_test.cc new file mode 100644 index 00000000000..79c1e166682 --- /dev/null +++ b/engine/tests/opentelemetry/agent_reverse_client_test.cc @@ -0,0 +1,153 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "opentelemetry/proto/collector/metrics/v1/metrics_service.pb.h" +#include "opentelemetry/proto/common/v1/common.pb.h" +#include "opentelemetry/proto/metrics/v1/metrics.pb.h" + +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh" +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh" + +using namespace com::centreon::engine::modules::opentelemetry; +using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; + +extern std::shared_ptr g_io_context; + +struct fake_connector : public to_agent_connector { + using config_to_fake = absl::btree_map, + grpc_config_compare>; + + fake_connector(const grpc_config::pointer& conf, + const std::shared_ptr& io_context, + const centreon_agent::agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger) + : to_agent_connector(conf, io_context, agent_conf, handler, logger) {} + + void start() override { + all_fake.emplace(std::static_pointer_cast(get_conf()), + shared_from_this()); + } + + static std::shared_ptr load( + const grpc_config::pointer& conf, + const std::shared_ptr& io_context, + const centreon_agent::agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger) { + std::shared_ptr ret = std::make_shared( + conf, io_context, agent_conf, handler, logger); + ret->start(); + return ret; + } + + static config_to_fake all_fake; + + void shutdown() override { + all_fake.erase(std::static_pointer_cast(get_conf())); + } +}; + +fake_connector::config_to_fake fake_connector::all_fake; + +class my_agent_reverse_client : public agent_reverse_client { + public: + my_agent_reverse_client( + const std::shared_ptr& io_context, + const metric_handler& handler, + const std::shared_ptr& logger) + : agent_reverse_client(io_context, handler, logger) {} + + agent_reverse_client::config_to_client::iterator + _create_new_client_connection( + const grpc_config::pointer& agent_endpoint, + const agent_config::pointer& agent_conf) override { + return _agents + .try_emplace(agent_endpoint, + fake_connector::load(agent_endpoint, _io_context, + agent_conf, _metric_handler, _logger)) + .first; + } + + void _shutdown_connection(config_to_client::const_iterator to_delete) { + to_delete->second->shutdown(); + } +}; + +TEST(agent_reverse_client, update_config) { + my_agent_reverse_client to_test( + g_io_context, [](const metric_request_ptr&) {}, spdlog::default_logger()); + + ASSERT_TRUE(fake_connector::all_fake.empty()); + + auto agent_conf = std::shared_ptr( + new centreon_agent::agent_config( + 60, 100, 60, 10, + {std::make_shared("host1:port1", false)})); + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 1); + ASSERT_EQ(fake_connector::all_fake.begin()->first, + *agent_conf->get_agent_grpc_reverse_conf().begin()); + agent_conf = std::make_shared(1, 100, 1, 10); + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 0); + + agent_conf = std::shared_ptr( + new centreon_agent::agent_config( + 60, 100, 60, 10, + {std::make_shared("host1:port1", false), + std::make_shared("host1:port3", false)})); + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 2); + auto first_conn = fake_connector::all_fake.begin()->second; + auto second_conn = (++fake_connector::all_fake.begin())->second; + agent_conf = std::shared_ptr( + new centreon_agent::agent_config( + 60, 100, 60, 10, + {std::make_shared("host1:port1", false), + std::make_shared("host1:port2", false), + std::make_shared("host1:port3", false)})); + + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 3); + ASSERT_EQ(fake_connector::all_fake.begin()->second, first_conn); + ASSERT_EQ((++(++fake_connector::all_fake.begin()))->second, second_conn); + second_conn = (++fake_connector::all_fake.begin())->second; + auto third_conn = (++(++fake_connector::all_fake.begin()))->second; + + agent_conf = std::shared_ptr( + new centreon_agent::agent_config( + 60, 100, 60, 10, + {std::make_shared("host1:port1", false), + std::make_shared("host1:port3", false)})); + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 2); + ASSERT_EQ(fake_connector::all_fake.begin()->second, first_conn); + ASSERT_EQ((++fake_connector::all_fake.begin())->second, third_conn); + + agent_conf = std::shared_ptr( + new centreon_agent::agent_config( + 60, 100, 60, 10, + {std::make_shared("host1:port3", false)})); + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 1); + ASSERT_EQ(fake_connector::all_fake.begin()->second, third_conn); +} \ No newline at end of file diff --git a/engine/tests/opentelemetry/agent_to_engine_test.cc b/engine/tests/opentelemetry/agent_to_engine_test.cc new file mode 100644 index 00000000000..91679611c36 --- /dev/null +++ b/engine/tests/opentelemetry/agent_to_engine_test.cc @@ -0,0 +1,327 @@ +/** + * Copyright 2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ + +#include +#include + +#include +#include + +#include + +#include "opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.h" +#include "opentelemetry/proto/metrics/v1/metrics.pb.h" + +#include "com/centreon/engine/contact.hh" +#include "com/centreon/engine/host.hh" +#include "com/centreon/engine/service.hh" + +#include "com/centreon/engine/command_manager.hh" +#include "com/centreon/engine/configuration/applier/connector.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" + +#include "com/centreon/agent/streaming_client.hh" +#include "com/centreon/engine/modules/opentelemetry/otl_fmt.hh" +#include "com/centreon/engine/modules/opentelemetry/otl_server.hh" + +#include "../test_engine.hh" +#include "helper.hh" + +using namespace com::centreon::engine; +using namespace com::centreon::agent; +// using namespace com::centreon::engine::configuration; +// using namespace com::centreon::engine::configuration::applier; +using namespace com::centreon::engine::modules::opentelemetry; +using namespace ::opentelemetry::proto::collector::metrics::v1; + +class agent_to_engine_test : public TestEngine { + protected: + std::shared_ptr _server; + + // agent code is mono-thread so it runs on his own io_context run by only one + // thread + std::shared_ptr _agent_io_context; + + asio::executor_work_guard _worker; + std::thread _agent_io_ctx_thread; + + public: + agent_to_engine_test() + : _agent_io_context(std::make_shared()), + _worker{asio::make_work_guard(*_agent_io_context)}, + _agent_io_ctx_thread([this] { _agent_io_context->run(); }) {} + + ~agent_to_engine_test() { + _agent_io_context->stop(); + _agent_io_ctx_thread.join(); + } + + void SetUp() override { + spdlog::default_logger()->set_level(spdlog::level::trace); + ::fmt::formatter< ::opentelemetry::proto::collector::metrics::v1:: + ExportMetricsServiceRequest>::json_grpc_format = true; + timeperiod::timeperiods.clear(); + contact::contacts.clear(); + host::hosts.clear(); + host::hosts_by_id.clear(); + service::services.clear(); + service::services_by_id.clear(); + + init_config_state(); + + configuration::applier::connector conn_aply; + configuration::connector cnn("agent"); + cnn.parse("connector_line", + "opentelemetry " + "--processor=nagios_telegraf --extractor=attributes " + "--host_path=resource_metrics.scope_metrics.data.data_points." + "attributes.host " + "--service_path=resource_metrics.scope_metrics.data.data_points." + "attributes.service"); + conn_aply.add_object(cnn); + configuration::error_cnt err; + + configuration::applier::contact ct_aply; + configuration::contact ctct{new_configuration_contact("admin", true)}; + ct_aply.add_object(ctct); + ct_aply.expand_objects(*config); + ct_aply.resolve_object(ctct, err); + + configuration::host hst = + new_configuration_host("test_host", "admin", 1, "agent"); + + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::service svc{new_configuration_service( + "test_host", "test_svc", "admin", 1, "agent")}; + configuration::service svc2{new_configuration_service( + "test_host", "test_svc_2", "admin", 2, "agent")}; + configuration::service svc_no_otel{ + new_configuration_service("test_host", "test_svc_2", "admin", 3)}; + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + svc_aply.add_object(svc2); + svc_aply.add_object(svc_no_otel); + + hst_aply.resolve_object(hst, err); + svc_aply.resolve_object(svc, err); + svc_aply.resolve_object(svc2, err); + svc_aply.resolve_object(svc_no_otel, err); + } + + void TearDown() override { + if (_server) { + _server->shutdown(std::chrono::seconds(15)); + _server.reset(); + } + deinit_config_state(); + } + + template + void start_server(const grpc_config::pointer& listen_endpoint, + const centreon_agent::agent_config::pointer& agent_conf, + const metric_handler_type& handler) { + _server = otl_server::load(_agent_io_context, listen_endpoint, agent_conf, + handler, spdlog::default_logger()); + } +}; + +bool compare_to_expected_host_metric( + const opentelemetry::proto::metrics::v1::ResourceMetrics& metric) { + bool host_found = false, serv_found = false; + for (const auto& attrib : metric.resource().attributes()) { + if (attrib.key() == "host.name") { + if (attrib.value().string_value() != "test_host") { + return false; + } + host_found = true; + } + if (attrib.key() == "service.name") { + if (!attrib.value().string_value().empty()) { + return false; + } + serv_found = true; + } + } + if (!host_found || !serv_found) { + return false; + } + const auto& scope_metric = metric.scope_metrics(); + if (scope_metric.size() != 1) + return false; + const auto& metrics = scope_metric.begin()->metrics(); + if (metrics.empty()) + return false; + const auto& status_metric = *metrics.begin(); + if (status_metric.name() != "status") + return false; + if (!status_metric.has_gauge()) + return false; + if (status_metric.gauge().data_points().empty()) + return false; + return status_metric.gauge().data_points().begin()->as_int() == 0; +} + +bool test_exemplars( + const google::protobuf::RepeatedPtrField< + ::opentelemetry::proto::metrics::v1::Exemplar>& examplars, + const std::map& expected) { + std::set matches; + + for (const auto& ex : examplars) { + if (ex.filtered_attributes().empty()) + continue; + auto search = expected.find(ex.filtered_attributes().begin()->key()); + if (search == expected.end()) + return false; + + if (search->second != ex.as_double()) + return false; + matches.insert(search->first); + } + return matches.size() == expected.size(); +} + +bool compare_to_expected_serv_metric( + const opentelemetry::proto::metrics::v1::ResourceMetrics& metric, + const std::string_view& serv_name) { + bool host_found = false, serv_found = false; + for (const auto& attrib : metric.resource().attributes()) { + if (attrib.key() == "host.name") { + if (attrib.value().string_value() != "test_host") { + return false; + } + host_found = true; + } + if (attrib.key() == "service.name") { + if (attrib.value().string_value() != serv_name) { + return false; + } + serv_found = true; + } + } + if (!host_found || !serv_found) { + return false; + } + const auto& scope_metric = metric.scope_metrics(); + if (scope_metric.size() != 1) + return false; + const auto& metrics = scope_metric.begin()->metrics(); + if (metrics.empty()) + return false; + + for (const auto& met : metrics) { + if (!met.has_gauge()) + return false; + if (met.name() == "metric") { + if (met.gauge().data_points().empty()) + return false; + if (met.gauge().data_points().begin()->as_double() != 12) + return false; + if (!test_exemplars(met.gauge().data_points().begin()->exemplars(), + {{"crit_gt", 75.0}, + {"crit_lt", 0.0}, + {"warn_gt", 50.0}, + {"warn_lt", 0.0}})) + return false; + } else if (met.name() == "metric2") { + if (met.gauge().data_points().empty()) + return false; + if (met.gauge().data_points().begin()->as_double() != 30) + return false; + if (!test_exemplars(met.gauge().data_points().begin()->exemplars(), + {{"crit_gt", 80.0}, + {"crit_lt", 75.0}, + {"warn_gt", 75.0}, + {"warn_lt", 50.0}, + {"min", 0.0}, + {"max", 100.0}})) + return false; + + } else if (met.name() == "status") { + if (met.gauge().data_points().begin()->as_int() != 0) + return false; + } else + return false; + } + + return true; +} + +TEST_F(agent_to_engine_test, server_send_conf_to_agent_and_receive_metrics) { + grpc_config::pointer listen_endpoint = + std::make_shared("127.0.0.1:4623", false); + + absl::Mutex mut; + std::vector received; + std::vector + resource_metrics; + + auto agent_conf = std::make_shared(1, 10, 1, 5); + + start_server(listen_endpoint, agent_conf, + [&](const metric_request_ptr& metric) { + absl::MutexLock l(&mut); + received.push_back(metric); + for (const opentelemetry::proto::metrics::v1::ResourceMetrics& + res_metric : metric->resource_metrics()) { + resource_metrics.push_back(&res_metric); + } + }); + + auto agent_client = + streaming_client::load(_agent_io_context, spdlog::default_logger(), + listen_endpoint, "test_host"); + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + command_manager::instance().execute(); + + auto metric_received = [&]() { return resource_metrics.size() >= 3; }; + + mut.LockWhen(absl::Condition(&metric_received)); + mut.Unlock(); + + agent_client->shutdown(); + + _server->shutdown(std::chrono::seconds(15)); + + bool host_metric_found = true; + bool serv_1_found = false; + bool serv_2_found = false; + + for (const opentelemetry::proto::metrics::v1::ResourceMetrics* to_compare : + resource_metrics) { + if (compare_to_expected_serv_metric(*to_compare, "test_svc")) { + serv_1_found = true; + } else if (compare_to_expected_serv_metric(*to_compare, "test_svc_2")) { + serv_2_found = true; + } else if (compare_to_expected_host_metric(*to_compare)) { + host_metric_found = true; + } else { + SPDLOG_ERROR("bad resource metric: {}", to_compare->DebugString()); + ASSERT_TRUE(false); + } + } + ASSERT_TRUE(host_metric_found); + ASSERT_TRUE(serv_1_found); + ASSERT_TRUE(serv_2_found); +} \ No newline at end of file diff --git a/engine/tests/opentelemetry/open_telemetry_test.cc b/engine/tests/opentelemetry/open_telemetry_test.cc index 469de553274..76aa3e5045d 100644 --- a/engine/tests/opentelemetry/open_telemetry_test.cc +++ b/engine/tests/opentelemetry/open_telemetry_test.cc @@ -35,16 +35,21 @@ #include #include "com/centreon/common/http/http_server.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/command_manager.hh" #include "com/centreon/engine/configuration/applier/contact.hh" #include "com/centreon/engine/configuration/applier/host.hh" #include "com/centreon/engine/configuration/applier/service.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" #include "common/engine_legacy_conf/service.hh" +#endif #include "opentelemetry/proto/collector/metrics/v1/metrics_service.pb.h" #include "opentelemetry/proto/common/v1/common.pb.h" #include "opentelemetry/proto/metrics/v1/metrics.pb.h" +#include "com/centreon/engine/commands/otel_connector.hh" #include "com/centreon/engine/modules/opentelemetry/open_telemetry.hh" #include "helper.hh" @@ -57,36 +62,6 @@ extern const char* telegraf_example; extern std::shared_ptr g_io_context; -class open_telemetry - : public com::centreon::engine::modules::opentelemetry::open_telemetry { - protected: - void _create_otl_server( - const grpc_config::pointer& server_conf, - const centreon_agent::agent_config::pointer&) override {} - - public: - open_telemetry(const std::string_view config_file_path, - const std::shared_ptr& io_context, - const std::shared_ptr& logger) - : com::centreon::engine::modules::opentelemetry::open_telemetry( - config_file_path, - io_context, - logger) {} - - void on_metric(const metric_request_ptr& metric) { _on_metric(metric); } - void shutdown() { _shutdown(); } - static std::shared_ptr load( - const std::string_view& config_path, - const std::shared_ptr& io_context, - const std::shared_ptr& logger) { - std::shared_ptr ret = - std::make_shared(config_path, io_context, logger); - ret->_reload(); - ret->_start_second_timer(); - return ret; - } -}; - class open_telemetry_test : public TestEngine { public: commands::otel::host_serv_list::pointer _host_serv_list; @@ -105,7 +80,7 @@ open_telemetry_test::open_telemetry_test() void open_telemetry_test::SetUpTestSuite() { std::ofstream conf_file("/tmp/otel_conf.json"); conf_file << R"({ - "server": { + "otel_server": { "host": "127.0.0.1", "port": 4317 } @@ -118,27 +93,90 @@ void open_telemetry_test::SetUpTestSuite() { void open_telemetry_test::SetUp() { configuration::error_cnt err; init_config_state(); +#ifdef LEGACY_CONF config->contacts().clear(); +#else + pb_config.mutable_contacts()->Clear(); +#endif configuration::applier::contact ct_aply; +#ifdef LEGACY_CONF configuration::contact ctct{new_configuration_contact("admin", true)}; +#else + configuration::Contact ctct{new_pb_configuration_contact("admin", true)}; +#endif ct_aply.add_object(ctct); +#ifdef LEGACY_CONF ct_aply.expand_objects(*config); +#else + ct_aply.expand_objects(pb_config); +#endif ct_aply.resolve_object(ctct, err); +#ifdef LEGACY_CONF configuration::host hst{new_configuration_host("localhost", "admin")}; +#else + configuration::Host hst{new_pb_configuration_host("localhost", "admin")}; +#endif configuration::applier::host hst_aply; hst_aply.add_object(hst); +#ifdef LEGACY_CONF configuration::service svc{ new_configuration_service("localhost", "check_icmp", "admin")}; +#else + configuration::Service svc{ + new_pb_configuration_service("localhost", "check_icmp", "admin")}; +#endif configuration::applier::service svc_aply; svc_aply.add_object(svc); hst_aply.resolve_object(hst, err); svc_aply.resolve_object(svc, err); - data_point_fifo::update_fifo_limit(std::numeric_limits::max(), 10); } void open_telemetry_test::TearDown() { deinit_config_state(); } + +TEST_F(open_telemetry_test, data_available) { + auto instance = open_telemetry::load("/tmp/otel_conf.json", g_io_context, + spdlog::default_logger()); + + std::shared_ptr conn = + commands::otel_connector::create( + "otel_conn", + "--processor=nagios_telegraf --extractor=attributes " + "--host_path=resource_metrics.scope_metrics.data.data_points." + "attributes." + "host " + "--service_path=resource_metrics.scope_metrics.data.data_points." + "attributes.service", + nullptr); + conn->register_host_serv("localhost", "check_icmp"); + + metric_request_ptr request = + std::make_shared<::opentelemetry::proto::collector::metrics::v1:: + ExportMetricsServiceRequest>(); + ::google::protobuf::util::JsonStringToMessage(telegraf_example, + request.get()); + instance->on_metric(request); + command_manager::instance().execute(); + + bool checked = false; + checks::checker::instance().inspect_reap_partial( + [&checked](const std::deque& queue) { + ASSERT_FALSE(queue.empty()); + check_result::pointer res = *queue.rbegin(); + ASSERT_EQ(res->get_start_time().tv_sec, 1707744430); + ASSERT_EQ(res->get_finish_time().tv_sec, 1707744430); + ASSERT_TRUE(res->get_exited_ok()); + ASSERT_EQ(res->get_return_code(), 0); + ASSERT_EQ( + res->get_output(), + "OK|pl=0%;0:40;0:80;; rta=0.022ms;0:200;0:500;0; rtmax=0.071ms;;;; " + "rtmin=0.008ms;;;;"); + checked = true; + }); + + ASSERT_TRUE(checked); +} diff --git a/engine/tests/opentelemetry/opentelemetry_test.cc b/engine/tests/opentelemetry/opentelemetry_test.cc deleted file mode 100644 index 19d385c9214..00000000000 --- a/engine/tests/opentelemetry/opentelemetry_test.cc +++ /dev/null @@ -1,263 +0,0 @@ -/** - * Copyright 2024 Centreon - * - * This file is part of Centreon Engine. - * - * Centreon Engine is free software: you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * Centreon Engine is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Centreon Engine. If not, see - * . - */ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include - -#include "com/centreon/common/http/http_server.hh" -#include "com/centreon/engine/configuration/applier/contact.hh" -#include "com/centreon/engine/configuration/applier/host.hh" -#include "com/centreon/engine/configuration/applier/service.hh" -#include "common/engine_legacy_conf/host.hh" -#include "common/engine_legacy_conf/service.hh" - -#include "opentelemetry/proto/collector/metrics/v1/metrics_service.pb.h" -#include "opentelemetry/proto/common/v1/common.pb.h" -#include "opentelemetry/proto/metrics/v1/metrics.pb.h" - -#include "com/centreon/engine/modules/opentelemetry/opentelemetry.hh" - -#include "helper.hh" -#include "test_engine.hh" - -using namespace com::centreon::engine::modules::opentelemetry; -using namespace com::centreon::engine; - -extern const char* telegraf_example; - -extern std::shared_ptr g_io_context; - -class open_telemetry - : public com::centreon::engine::modules::opentelemetry::open_telemetry { - protected: - void _create_otl_server(const grpc_config::pointer& server_conf) override {} - - public: - open_telemetry(const std::string_view config_file_path, - const std::shared_ptr& io_context, - const std::shared_ptr& logger) - : com::centreon::engine::modules::opentelemetry::open_telemetry( - config_file_path, - io_context, - logger) {} - - void on_metric(const metric_request_ptr& metric) { _on_metric(metric); } - void shutdown() { _shutdown(); } - static std::shared_ptr load( - const std::string_view& config_path, - const std::shared_ptr& io_context, - const std::shared_ptr& logger) { - std::shared_ptr ret = - std::make_shared(config_path, io_context, logger); - ret->_reload(); - ret->_start_second_timer(); - return ret; - } -}; - -class open_telemetry_test : public TestEngine { - public: - commands::otel::host_serv_list::pointer _host_serv_list; - - open_telemetry_test(); - static void SetUpTestSuite(); - void SetUp() override; - void TearDown() override; -}; - -open_telemetry_test::open_telemetry_test() - : _host_serv_list(std::make_shared()) { - _host_serv_list->register_host_serv("localhost", "check_icmp"); -} - -void open_telemetry_test::SetUpTestSuite() { - std::ofstream conf_file("/tmp/otel_conf.json"); - conf_file << R"({ - "otel_server": { - "host": "127.0.0.1", - "port": 4317 - } -} -)"; - conf_file.close(); - // spdlog::default_logger()->set_level(spdlog::level::trace); -} - -void open_telemetry_test::SetUp() { - init_config_state(); - config->contacts().clear(); - configuration::applier::contact ct_aply; - configuration::contact ctct{new_configuration_contact("admin", true)}; - ct_aply.add_object(ctct); - ct_aply.expand_objects(*config); - ct_aply.resolve_object(ctct); - - configuration::host hst{new_configuration_host("localhost", "admin")}; - configuration::applier::host hst_aply; - hst_aply.add_object(hst); - - configuration::service svc{ - new_configuration_service("localhost", "check_icmp", "admin")}; - configuration::applier::service svc_aply; - svc_aply.add_object(svc); - - hst_aply.resolve_object(hst); - svc_aply.resolve_object(svc); - data_point_fifo::update_fifo_limit(std::numeric_limits::max(), 10); -} - -void open_telemetry_test::TearDown() { - deinit_config_state(); -} - -TEST_F(open_telemetry_test, data_available) { - auto instance = ::open_telemetry::load("/tmp/otel_conf.json", g_io_context, - spdlog::default_logger()); - - instance->create_extractor( - "--extractor=attributes " - "--host_path=resource_metrics.scope_metrics.data.data_points.attributes." - "host " - "--service_path=resource_metrics.scope_metrics.data.data_points." - "attributes.service", - _host_serv_list); - - metric_request_ptr request = - std::make_shared<::opentelemetry::proto::collector::metrics::v1:: - ExportMetricsServiceRequest>(); - ::google::protobuf::util::JsonStringToMessage(telegraf_example, - request.get()); - instance->on_metric(request); - // data are now available - commands::result res; - nagios_macros macros; - macros.host_ptr = host::hosts.begin()->second.get(); - macros.service_ptr = service::services.begin()->second.get(); - ASSERT_TRUE(instance->check("nagios_telegraf", - instance->create_check_result_builder_config( - "--processor=nagios_telegraf"), - 1, macros, 1, res, - [](const commands::result&) {})); - ASSERT_EQ(res.command_id, 1); - ASSERT_EQ(res.start_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.end_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); - ASSERT_EQ(res.output, - "OK|pl=0%;0:40;0:80;; rta=0.022ms;0:200;0:500;0; rtmax=0.071ms;;;; " - "rtmin=0.008ms;;;;"); -} - -TEST_F(open_telemetry_test, timeout) { - auto instance = ::open_telemetry::load("/tmp/otel_conf.json", g_io_context, - spdlog::default_logger()); - - instance->create_extractor( - "--extractor=attributes " - "--host_path=resource_metrics.scope_metrics.data.data_points.attributes." - "host " - "--service_path=resource_metrics.scope_metrics.data.data_points." - "attributes.service", - _host_serv_list); - - commands::result res; - res.exit_status = com::centreon::process::normal; - nagios_macros macros; - macros.host_ptr = host::hosts.begin()->second.get(); - macros.service_ptr = service::services.begin()->second.get(); - std::condition_variable cv; - std::mutex cv_m; - ASSERT_FALSE(instance->check("nagios_telegraf", - instance->create_check_result_builder_config( - "--processor=nagios_telegraf"), - 1, macros, 1, res, - [&res, &cv](const commands::result& async_res) { - res = async_res; - cv.notify_one(); - })); - - std::unique_lock l(cv_m); - ASSERT_EQ(cv.wait_for(l, std::chrono::seconds(3)), - std::cv_status::no_timeout); - ASSERT_EQ(res.exit_status, com::centreon::process::timeout); -} - -TEST_F(open_telemetry_test, wait_for_data) { - auto instance = ::open_telemetry::load("/tmp/otel_conf.json", g_io_context, - spdlog::default_logger()); - - static const std::string otl_conf = - "--processor=nagios_telegraf " - "--extractor=attributes " - "--host_path=resource_metrics.scope_metrics.data.data_points.attributes." - "host " - "--service_path=resource_metrics.scope_metrics.data.data_points." - "attributes.service"; - - instance->create_extractor(otl_conf, _host_serv_list); - - commands::result res; - res.exit_status = com::centreon::process::normal; - nagios_macros macros; - macros.host_ptr = host::hosts.begin()->second.get(); - macros.service_ptr = service::services.begin()->second.get(); - std::mutex cv_m; - std::condition_variable cv; - bool data_available = instance->check( - "nagios_telegraf", instance->create_check_result_builder_config(otl_conf), - 1, macros, 1, res, [&res, &cv](const commands::result& async_res) { - res = async_res; - cv.notify_one(); - }); - ASSERT_FALSE(data_available); - - metric_request_ptr request = - std::make_shared<::opentelemetry::proto::collector::metrics::v1:: - ExportMetricsServiceRequest>(); - ::google::protobuf::util::JsonStringToMessage(telegraf_example, - request.get()); - std::thread t([instance, request]() { instance->on_metric(request); }); - - std::unique_lock l(cv_m); - ASSERT_EQ(cv.wait_for(l, std::chrono::seconds(1)), - std::cv_status::no_timeout); - ASSERT_EQ(res.command_id, 1); - ASSERT_EQ(res.start_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.end_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); - ASSERT_EQ(res.output, - "OK|pl=0%;0:40;0:80;; rta=0.022ms;0:200;0:500;0; rtmax=0.071ms;;;; " - "rtmin=0.008ms;;;;"); - t.join(); -} diff --git a/engine/tests/opentelemetry/otl_converter_test.cc b/engine/tests/opentelemetry/otl_converter_test.cc index d8f4265f499..17037a4ce88 100644 --- a/engine/tests/opentelemetry/otl_converter_test.cc +++ b/engine/tests/opentelemetry/otl_converter_test.cc @@ -1,22 +1,21 @@ /** - * Copyright 2024 Centreon + * Copyright 2011-2024 Centreon * - * This file is part of Centreon Engine. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * Centreon Engine is free software: you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. + * http://www.apache.org/licenses/LICENSE-2.0 * - * Centreon Engine is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com * - * You should have received a copy of the GNU General Public License - * along with Centreon Engine. If not, see - * . */ - #include #include #include @@ -29,14 +28,15 @@ #include "com/centreon/engine/configuration/applier/contact.hh" #include "com/centreon/engine/configuration/applier/host.hh" #include "com/centreon/engine/configuration/applier/service.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/host.hh" #include "common/engine_legacy_conf/service.hh" +#endif #include "opentelemetry/proto/collector/metrics/v1/metrics_service.pb.h" #include "opentelemetry/proto/common/v1/common.pb.h" #include "opentelemetry/proto/metrics/v1/metrics.pb.h" -#include "com/centreon/engine/modules/opentelemetry/data_point_fifo_container.hh" #include "com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh" #include "com/centreon/engine/modules/opentelemetry/telegraf/nagios_check_result_builder.hh" @@ -46,55 +46,13 @@ using namespace com::centreon::engine::modules::opentelemetry; using namespace com::centreon::engine; -class otl_converter_test : public TestEngine { - public: - void SetUp() override; - void TearDown() override; -}; - -void otl_converter_test::SetUp() { - configuration::error_cnt err; - init_config_state(); - timeperiod::timeperiods.clear(); - contact::contacts.clear(); - host::hosts.clear(); - host::hosts_by_id.clear(); - service::services.clear(); - service::services_by_id.clear(); - config->contacts().clear(); - configuration::applier::contact ct_aply; - configuration::contact ctct{new_configuration_contact("admin", true)}; - ct_aply.add_object(ctct); - ct_aply.expand_objects(*config); - ct_aply.resolve_object(ctct, err); - - configuration::host hst{new_configuration_host("localhost", "admin")}; - configuration::applier::host hst_aply; - hst_aply.add_object(hst); - - configuration::service svc{ - new_configuration_service("localhost", "check_icmp", "admin")}; - configuration::applier::service svc_aply; - svc_aply.add_object(svc); - - hst_aply.resolve_object(hst, err); - svc_aply.resolve_object(svc, err); - data_point_fifo::update_fifo_limit(std::numeric_limits::max(), 10); -} - -void otl_converter_test::TearDown() { - deinit_config_state(); -} +class otl_converter_test : public TestEngine {}; -TEST_F(otl_converter_test, empty_fifo) { - data_point_fifo_container empty; - telegraf::nagios_check_result_builder conv( - "", 1, *host::hosts.begin()->second, - service::services.begin()->second.get(), - std::chrono::system_clock::time_point(), [&](const commands::result&) {}, - spdlog::default_logger()); - commands::result res; - ASSERT_FALSE(conv.sync_build_result_from_metrics(empty, res)); +TEST_F(otl_converter_test, empty_metrics) { + telegraf::nagios_check_result_builder conv("", spdlog::default_logger()); + metric_to_datapoints empty; + check_result res; + ASSERT_FALSE(conv.build_result_from_metrics(empty, res)); } const char* telegraf_example = R"( @@ -581,38 +539,30 @@ const char* telegraf_example = R"( )"; TEST_F(otl_converter_test, nagios_telegraf) { - data_point_fifo_container received; metric_request_ptr request = std::make_shared< ::opentelemetry::proto::collector::metrics::v1:: ExportMetricsServiceRequest>(); ::google::protobuf::util::JsonStringToMessage(telegraf_example, request.get()); + metric_to_datapoints received; otl_data_point::extract_data_points( request, [&](const otl_data_point& data_pt) { - received.add_data_point("localhost", "check_icmp", - data_pt.get_metric().name(), data_pt); + received[data_pt.get_metric().name()].insert(data_pt); }); - telegraf::nagios_check_result_builder conv( - "", 1, *host::hosts.begin()->second, - service::services.begin()->second.get(), - std::chrono::system_clock::time_point(), [&](const commands::result&) {}, - spdlog::default_logger()); - commands::result res; - ASSERT_TRUE(conv.sync_build_result_from_metrics(received, res)); - ASSERT_EQ(res.command_id, 1); - ASSERT_EQ(res.start_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.end_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); - ASSERT_EQ(res.output, + telegraf::nagios_check_result_builder conv("", spdlog::default_logger()); + check_result res; + ASSERT_TRUE(conv.build_result_from_metrics(received, res)); + ASSERT_EQ(res.get_start_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_finish_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_return_code(), 0); + ASSERT_EQ(res.get_output(), "OK|pl=0%;0:40;0:80;; rta=0.022ms;0:200;0:500;0; rtmax=0.071ms;;;; " "rtmin=0.008ms;;;;"); } TEST_F(otl_converter_test, nagios_telegraf_le_ge) { - data_point_fifo_container received; metric_request_ptr request = std::make_shared< ::opentelemetry::proto::collector::metrics::v1:: ExportMetricsServiceRequest>(); @@ -624,32 +574,25 @@ TEST_F(otl_converter_test, nagios_telegraf_le_ge) { ::google::protobuf::util::JsonStringToMessage(example, request.get()); + metric_to_datapoints received; otl_data_point::extract_data_points( request, [&](const otl_data_point& data_pt) { - received.add_data_point("localhost", "check_icmp", - data_pt.get_metric().name(), data_pt); + received[data_pt.get_metric().name()].insert(data_pt); }); - telegraf::nagios_check_result_builder conv( - "", 1, *host::hosts.begin()->second, - service::services.begin()->second.get(), - std::chrono::system_clock::time_point(), [&](const commands::result&) {}, - spdlog::default_logger()); - commands::result res; - ASSERT_TRUE(conv.sync_build_result_from_metrics(received, res)); - ASSERT_EQ(res.command_id, 1); - ASSERT_EQ(res.start_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.end_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); + telegraf::nagios_check_result_builder conv("", spdlog::default_logger()); + check_result res; + ASSERT_TRUE(conv.build_result_from_metrics(received, res)); + ASSERT_EQ(res.get_start_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_finish_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_return_code(), 0); ASSERT_EQ( - res.output, + res.get_output(), "OK|pl=0%;0:40;@0:80;; rta=0.022ms;0:200;@0:500;0; rtmax=0.071ms;;;; " "rtmin=0.008ms;;;;"); } TEST_F(otl_converter_test, nagios_telegraf_max) { - data_point_fifo_container received; metric_request_ptr request = std::make_shared< ::opentelemetry::proto::collector::metrics::v1:: ExportMetricsServiceRequest>(); @@ -658,25 +601,19 @@ TEST_F(otl_converter_test, nagios_telegraf_max) { ::google::protobuf::util::JsonStringToMessage(example, request.get()); + metric_to_datapoints received; otl_data_point::extract_data_points( request, [&](const otl_data_point& data_pt) { - received.add_data_point("localhost", "check_icmp", - data_pt.get_metric().name(), data_pt); + received[data_pt.get_metric().name()].insert(data_pt); }); - telegraf::nagios_check_result_builder conv( - "", 1, *host::hosts.begin()->second, - service::services.begin()->second.get(), - std::chrono::system_clock::time_point(), [&](const commands::result&) {}, - spdlog::default_logger()); - commands::result res; - ASSERT_TRUE(conv.sync_build_result_from_metrics(received, res)); - ASSERT_EQ(res.command_id, 1); - ASSERT_EQ(res.start_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.end_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); - ASSERT_EQ(res.output, + telegraf::nagios_check_result_builder conv("", spdlog::default_logger()); + check_result res; + ASSERT_TRUE(conv.build_result_from_metrics(received, res)); + ASSERT_EQ(res.get_start_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_finish_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_return_code(), 0); + ASSERT_EQ(res.get_output(), "OK|pl=0%;0:40;0:80;; rta=0.022ms;0:200;0:500;;0 rtmax=0.071ms;;;; " "rtmin=0.008ms;;;;"); } diff --git a/engine/tests/pb_service_check.cc b/engine/tests/pb_service_check.cc new file mode 100644 index 00000000000..175676304c8 --- /dev/null +++ b/engine/tests/pb_service_check.cc @@ -0,0 +1,599 @@ +/* + * Copyright 2020-2022 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include + +#include "../test_engine.hh" +#include "../timeperiod/utils.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/contactgroup.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/configuration/applier/servicedependency.hh" +#include "com/centreon/engine/configuration/applier/serviceescalation.hh" +#include "com/centreon/engine/configuration/applier/state.hh" +#include "com/centreon/engine/configuration/applier/timeperiod.hh" +#include "com/centreon/engine/configuration/host.hh" +#include "com/centreon/engine/configuration/service.hh" +#include "com/centreon/engine/configuration/state.hh" +#include "com/centreon/engine/serviceescalation.hh" +#include "com/centreon/engine/timezone_manager.hh" +#include "helper.hh" + +using namespace com::centreon; +using namespace com::centreon::engine; +using namespace com::centreon::engine::configuration; +using namespace com::centreon::engine::configuration::applier; + +extern configuration::State pb_config; + +class PbServiceCheck : public TestEngine { + public: + void SetUp() override { + init_config_state(); + + pb_config.clear_contacts(); + configuration::applier::contact ct_aply; + configuration::Contact ctct = new_pb_configuration_contact("admin", true); + ct_aply.add_object(ctct); + ct_aply.expand_objects(pb_config); + ct_aply.resolve_object(ctct); + + configuration::Host hst = new_pb_configuration_host("test_host", "admin"); + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::Service svc = + new_pb_configuration_service("test_host", "test_svc", "admin"); + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + + hst_aply.resolve_object(hst); + svc_aply.resolve_object(svc); + + host_map const& hm{engine::host::hosts}; + _host = hm.begin()->second; + _host->set_current_state(engine::host::state_up); + _host->set_state_type(checkable::hard); + _host->set_acknowledgement(AckType::NONE); + _host->set_notify_on(static_cast(-1)); + + service_map const& sm{engine::service::services}; + _svc = sm.begin()->second; + _svc->set_current_state(engine::service::state_ok); + _svc->set_state_type(checkable::hard); + _svc->set_acknowledgement(AckType::NONE); + _svc->set_notify_on(static_cast(-1)); + + // This is to not be bothered by host checks during service checks + pb_config.set_host_check_timeout(10000); + } + + void TearDown() override { + _host.reset(); + _svc.reset(); + deinit_config_state(); + } + + protected: + std::shared_ptr _host; + std::shared_ptr _svc; +}; + +/* The following test comes from this array (inherited from Nagios behaviour): + * + * | Time | Check # | State | State type | State change | + * ------------------------------------------------------ + * | 0 | 1 | OK | HARD | No | + * | 1 | 1 | CRTCL | SOFT | Yes | + * | 2 | 2 | WARN | SOFT | Yes | + * | 3 | 3 | CRTCL | HARD | Yes | + * | 4 | 3 | WARN | HARD | Yes | + * | 5 | 3 | WARN | HARD | No | + * | 6 | 1 | OK | HARD | Yes | + * | 7 | 1 | OK | HARD | No | + * | 8 | 1 | UNKNWN| SOFT | Yes | + * | 9 | 2 | OK | SOFT | Yes | + * | 10 | 1 | OK | HARD | No | + * ------------------------------------------------------ + */ +TEST_F(PbServiceCheck, SimpleCheck) { + set_time(50000); + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(50000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + + set_time(50500); + + std::time_t now{std::time(nullptr)}; + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now)}; + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(51000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;1;service warning", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_warning); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 2); + + set_time(51500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(52000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;1;service warning", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_warning); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(52500); + + time_t previous = now; + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;1;service warning", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_warning); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(53000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok", now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(53500); + + previous = now; + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok", now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(54000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;4;service unknown", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_svc->get_last_hard_state_change(), now - 1000); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(54500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok", now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 2); + + set_time(55000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok", now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); +} + +/* The following test comes from this array (inherited from Nagios behaviour): + * + * | Time | Check # | State | State type | State change | + * ------------------------------------------------------ + * | 0 | 1 | OK | HARD | No | + * | 1 | 1 | CRTCL | SOFT | Yes | + * | 2 | 2 | CRTCL | SOFT | No | + * | 3 | 3 | CRTCL | HARD | No | + * ------------------------------------------------------ + */ +TEST_F(PbServiceCheck, OkCritical) { + set_time(55000); + + time_t now = std::time(nullptr); + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok", + now)}; + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(55500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(56000); + + time_t previous = now; + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 2); + + set_time(56500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); +} + +/* The following test comes from this array (inherited from Nagios behaviour): + * + * | Time | Check # | State | State type | State change | + * ------------------------------------------------------ + * | 0 | 2 | OK | SOFT | No | + * | 1 | 1 | CRTCL | SOFT | Yes | + * | 2 | 2 | CRTCL | SOFT | No | + * | 3 | 3 | CRTCL | HARD | No | + * ------------------------------------------------------ + */ +TEST_F(PbServiceCheck, OkSoft_Critical) { + set_time(55000); + + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_state_change(55000); + _svc->set_current_attempt(2); + _svc->set_state_type(checkable::soft); + _svc->set_accept_passive_checks(true); + + set_time(55500); + + time_t now = std::time(nullptr); + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now)}; + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(56000); + + time_t previous = now; + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::soft); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 2); + + set_time(56500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); +} + +/* The following test comes from this array (inherited from Nagios behaviour): + * + * | Time | Check # | State | State type | State change | + * ------------------------------------------------------ + * | 0 | 1 | OK | HARD | No | + * | 1 | 2 | OK | HARD | No | + * | 2 | 3 | WARN | HARD | Yes | + * | 3 | 4 | CRTCL | HARD | Yes | + * | 4 | 5 | CRTCL | HARD | Yes | + * | 5 | 6 | CRTCL | HARD | Yes | + * | 6 | 7 | CRTCL | HARD | No | + * | 7 | 8 | CRTCL | HARD | No | + * ------------------------------------------------------ + */ +TEST_F(PbServiceCheck, OkCriticalStalking) { + set_time(55000); + + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_state_change(55000); + _svc->set_current_attempt(2); + _svc->set_state_type(checkable::soft); + _svc->set_accept_passive_checks(true); + _svc->set_stalk_on(static_cast(-1)); + + set_time(55500); + testing::internal::CaptureStdout(); + time_t now = std::time(nullptr); + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;RAID array " + "optimal", + now)}; + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(56000); + time_t previous = now; + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;RAID array " + "optimal", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 1); + + set_time(56500); + for (int i = 0; i < 3; i++) { + // When i == 0, the state_critical is soft => no notification + // When i == 1, the state_critical is soft => no notification + // When i == 2, the state_critical is hard down => notification + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;1;RAID array " + "degraded (1 drive bad, 1 hot spare rebuilding)", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + } + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_warning); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(57000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;RAID array " + "degraded (2 drives bad, 1 host spare online, 1 hot spare rebuilding)", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(57500); + previous = now; + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;RAID array " + "degraded (3 drives bad, 2 hot spares online)", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(58000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;RAID array " + "failed", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(58500); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;RAID array " + "failed", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + set_time(59000); + + now = std::time(nullptr); + cmd = fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;RAID array " + "failed", + now); + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + + ASSERT_EQ(_svc->get_state_type(), checkable::hard); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_hard_state_change(), previous); + ASSERT_EQ(_svc->get_current_attempt(), 3); + + std::string out{testing::internal::GetCapturedStdout()}; + std::cout << out << std::endl; + ASSERT_NE( + out.find( + "SERVICE ALERT: test_host;test_svc;OK;HARD;1;RAID array optimal"), + std::string::npos); + ASSERT_NE(out.find("SERVICE ALERT: test_host;test_svc;WARNING;HARD;3;RAID " + "array degraded (1 drive bad, 1 hot spare rebuilding)"), + std::string::npos); + ASSERT_NE(out.find("SERVICE ALERT: test_host;test_svc;CRITICAL;HARD;3;RAID " + "array degraded (2 drives bad, 1 host spare online, 1 hot " + "spare rebuilding)"), + std::string::npos); + ASSERT_NE(out.find("SERVICE ALERT: test_host;test_svc;CRITICAL;HARD;3;RAID " + "array degraded (3 drives bad, 2 hot spares online"), + std::string::npos); + ASSERT_NE(out.find("SERVICE ALERT: test_host;test_svc;CRITICAL;HARD;3;RAID " + "array failed"), + std::string::npos); +} + +TEST_F(PbServiceCheck, CheckRemoveCheck) { + set_time(50000); + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(50000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + + set_time(50500); + std::time_t now{std::time(nullptr)}; + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service critical", + now)}; + process_external_command(cmd.c_str()); + + /* We simulate a reload that destroyed the service */ + engine::service::services.clear(); + engine::service::services_by_id.clear(); + _svc.reset(); + + checks::checker::instance().reap(); +} + +TEST_F(PbServiceCheck, CheckUpdateMultilineOutput) { + set_time(50000); + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(50000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + + set_time(50500); + std::time_t now{std::time(nullptr)}; + std::string cmd{fmt::format( + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " + "critical\\nline2\\nline3\\nline4\\nline5|res;2;5;5\\n", + now)}; + process_external_command(cmd.c_str()); + checks::checker::instance().reap(); + ASSERT_EQ(_svc->get_plugin_output(), "service critical"); + ASSERT_EQ(_svc->get_long_plugin_output(), "line2\\nline3\\nline4\\nline5"); + ASSERT_EQ(_svc->get_perf_data(), "res;2;5;5"); +} diff --git a/engine/tests/test_engine.cc b/engine/tests/test_engine.cc index 2528e488033..79d6f2fba5e 100644 --- a/engine/tests/test_engine.cc +++ b/engine/tests/test_engine.cc @@ -19,11 +19,6 @@ #include "test_engine.hh" -#include "com/centreon/engine/commands/commands.hh" -#include "com/centreon/engine/configuration/applier/command.hh" -#include "com/centreon/engine/configuration/applier/timeperiod.hh" -#include "common/engine_legacy_conf/state.hh" - using namespace com::centreon::engine; using namespace com::centreon::engine::downtimes; @@ -31,6 +26,7 @@ using namespace com::centreon::engine::downtimes; * to know if the host is already declared when creating a new service. */ static absl::flat_hash_map conf_hosts; +#ifdef LEGACY_CONF configuration::contact TestEngine::new_configuration_contact( const std::string& name, bool full, @@ -74,7 +70,116 @@ configuration::contact TestEngine::new_configuration_contact( ctct.parse("service_notifications_enabled", "1"); return ctct; } +#else +configuration::Contact TestEngine::new_pb_configuration_contact( + const std::string& name, + bool full, + const std::string& notif) const { + if (full) { + // Add command. + { + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("true"); + configuration::applier::command aplyr; + aplyr.add_object(cmd); + } + // Add timeperiod. + { + configuration::Timeperiod tperiod; + configuration::timeperiod_helper tperiod_hlp(&tperiod); + tperiod.set_timeperiod_name("24x7"); + tperiod.set_alias("24x7"); + auto* r = tperiod.mutable_timeranges()->add_monday(); + r->set_range_start(0); + r->set_range_end(86400); + r = tperiod.mutable_timeranges()->add_tuesday(); + r->set_range_start(0); + r->set_range_end(86400); + r = tperiod.mutable_timeranges()->add_wednesday(); + r->set_range_start(0); + r->set_range_end(86400); + r = tperiod.mutable_timeranges()->add_thursday(); + r->set_range_start(0); + r->set_range_end(86400); + r = tperiod.mutable_timeranges()->add_friday(); + r->set_range_start(0); + r->set_range_end(86400); + r = tperiod.mutable_timeranges()->add_saturday(); + r->set_range_start(0); + r->set_range_end(86400); + r = tperiod.mutable_timeranges()->add_sunday(); + r->set_range_start(0); + r->set_range_end(86400); + configuration::applier::timeperiod aplyr; + aplyr.add_object(tperiod); + } + } + // Valid contact configuration + // (will generate 0 warnings or 0 errors). + configuration::Contact ctct; + configuration::contact_helper ctct_hlp(&ctct); + ctct.set_contact_name(name); + ctct.set_host_notification_period("24x7"); + ctct.set_service_notification_period("24x7"); + fill_string_group(ctct.mutable_host_notification_commands(), "cmd"); + fill_string_group(ctct.mutable_service_notification_commands(), "cmd"); + ctct_hlp.hook("host_notification_options", "d,r,f,s"); + ctct_hlp.hook("service_notification_options", notif); + ctct.set_host_notifications_enabled(true); + ctct.set_service_notifications_enabled(true); + return ctct; +} +void TestEngine::fill_pb_configuration_contact( + configuration::contact_helper* ctct_hlp, + const std::string& name, + bool full, + const std::string& notif) const { + auto* ctct = static_cast(ctct_hlp->mut_obj()); + if (full) { + // Add command. + { + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("true"); + configuration::applier::command aplyr; + aplyr.add_object(cmd); + } + // Add timeperiod. + { + configuration::Timeperiod tperiod; + configuration::timeperiod_helper tperiod_hlp(&tperiod); + tperiod.set_timeperiod_name("24x7"); + tperiod.set_alias("24x7"); + tperiod_hlp.hook("monday", "00:00-24:00"); + tperiod_hlp.hook("tuesday", "00:00-24:00"); + tperiod_hlp.hook("wednesday", "00:00-24:00"); + tperiod_hlp.hook("thursday", "00:00-24:00"); + tperiod_hlp.hook("friday", "00:00-24:00"); + tperiod_hlp.hook("saterday", "00:00-24:00"); + tperiod_hlp.hook("sunday", "00:00-24:00"); + configuration::applier::timeperiod aplyr; + aplyr.add_object(tperiod); + } + } + // Valid contact configuration + // (will generate 0 warnings or 0 errors). + ctct->set_contact_name(name); + ctct->set_host_notification_period("24x7"); + ctct->set_service_notification_period("24x7"); + fill_string_group(ctct->mutable_host_notification_commands(), "cmd"); + fill_string_group(ctct->mutable_service_notification_commands(), "cmd"); + ctct_hlp->hook("host_notification_options", "d,r,f,s"); + ctct_hlp->hook("service_notification_options", notif); + ctct->set_host_notifications_enabled(true); + ctct->set_service_notifications_enabled(true); +} +#endif + +#ifdef LEGACY_CONF configuration::contactgroup TestEngine::new_configuration_contactgroup( const std::string& name, const std::string& contactname) { @@ -84,7 +189,29 @@ configuration::contactgroup TestEngine::new_configuration_contactgroup( cg.parse("members", contactname.c_str()); return cg; } +#else +configuration::Contactgroup TestEngine::new_pb_configuration_contactgroup( + const std::string& name, + const std::string& contactname) { + configuration::Contactgroup retval; + configuration::contactgroup_helper retval_hlp(&retval); + fill_pb_configuration_contactgroup(&retval_hlp, name, contactname); + return retval; +} +void TestEngine::fill_pb_configuration_contactgroup( + configuration::contactgroup_helper* cg_hlp, + const std::string& name, + const std::string& contactname) { + configuration::Contactgroup* cg = + static_cast(cg_hlp->mut_obj()); + cg->set_contactgroup_name(name); + cg->set_alias(name); + fill_string_group(cg->mutable_members(), contactname); +} +#endif + +#ifdef LEGACY_CONF configuration::serviceescalation TestEngine::new_configuration_serviceescalation( const std::string& hostname, @@ -100,7 +227,26 @@ TestEngine::new_configuration_serviceescalation( se.parse("contact_groups", contactgroup.c_str()); return se; } +#else +configuration::Serviceescalation +TestEngine::new_pb_configuration_serviceescalation( + const std::string& hostname, + const std::string& svc_desc, + const std::string& contactgroup) { + configuration::Serviceescalation se; + configuration::serviceescalation_helper se_hlp(&se); + se.set_first_notification(2); + se.set_last_notification(11); + se.set_notification_interval(9); + se_hlp.hook("escalation_options", "w,u,c,r"); + se_hlp.hook("host_name", hostname); + se_hlp.hook("service_description", svc_desc); + se_hlp.hook("contact_groups", contactgroup); + return se; +} +#endif +#ifdef LEGACY_CONF configuration::hostdependency TestEngine::new_configuration_hostdependency( const std::string& hostname, const std::string& dep_hostname) { @@ -112,7 +258,31 @@ configuration::hostdependency TestEngine::new_configuration_hostdependency( hd.dependency_type(configuration::hostdependency::notification_dependency); return hd; } +#else +/** + * @brief Create a new host dependency protobuf configuration. + * + * @param hostname The master host name we work with. + * @param dep_hostname The dependent host. + * + * @return the new configuration as a configuration::Hostdependency. + */ +configuration::Hostdependency TestEngine::new_pb_configuration_hostdependency( + const std::string& hostname, + const std::string& dep_hostname) { + configuration::Hostdependency hd; + configuration::hostdependency_helper hd_hlp(&hd); + EXPECT_TRUE(hd_hlp.hook("master_host", hostname)); + EXPECT_TRUE(hd_hlp.hook("dependent_host", dep_hostname)); + EXPECT_TRUE(hd_hlp.hook("notification_failure_options", "u,d")); + hd.set_inherits_parent(true); + hd.set_dependency_type( + configuration::DependencyKind::notification_dependency); + return hd; +} +#endif +#ifdef LEGACY_CONF configuration::servicedependency TestEngine::new_configuration_servicedependency( const std::string& hostname, @@ -128,7 +298,9 @@ TestEngine::new_configuration_servicedependency( sd.dependency_type(configuration::servicedependency::notification_dependency); return sd; } +#endif +#ifdef LEGACY_CONF configuration::host TestEngine::new_configuration_host( const std::string& hostname, const std::string& contacts, @@ -152,7 +324,53 @@ configuration::host TestEngine::new_configuration_host( conf_hosts[hostname] = hst_id; return hst; } +#else +configuration::Host TestEngine::new_pb_configuration_host( + const std::string& hostname, + const std::string& contacts, + uint64_t hst_id) { + configuration::Host hst; + configuration::host_helper hst_hlp(&hst); + hst.set_host_name(hostname); + hst.set_address("127.0.0.1"); + hst.set_host_id(hst_id); + fill_string_group(hst.mutable_contacts(), contacts); + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("hcmd"); + cmd.set_command_line("echo 0"); + hst.set_check_command("hcmd"); + configuration::applier::command cmd_aply; + cmd_aply.add_object(cmd); + conf_hosts[hostname] = hst_id; + return hst; +} + +void TestEngine::fill_pb_configuration_host(configuration::host_helper* hst_hlp, + const std::string& hostname, + const std::string& contacts, + uint64_t hst_id) { + auto* hst = static_cast(hst_hlp->mut_obj()); + hst->set_host_name(hostname); + hst->set_address("127.0.0.1"); + hst->set_host_id(hst_id); + fill_string_group(hst->mutable_contacts(), contacts); + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("hcmd"); + cmd.set_command_line("echo 0"); + hst->set_check_command("hcmd"); + configuration::applier::command cmd_aply; + cmd_aply.add_object(cmd); + + conf_hosts[hostname] = hst_id; +} +#endif + +#ifdef LEGACY_CONF configuration::hostescalation TestEngine::new_configuration_hostescalation( const std::string& hostname, const std::string& contactgroup, @@ -168,7 +386,26 @@ configuration::hostescalation TestEngine::new_configuration_hostescalation( he.parse("contact_groups", contactgroup.c_str()); return he; } +#else +configuration::Hostescalation TestEngine::new_pb_configuration_hostescalation( + const std::string& hostname, + const std::string& contactgroup, + uint32_t first_notif, + uint32_t last_notif, + uint32_t interval_notif) { + configuration::Hostescalation he; + configuration::hostescalation_helper he_hlp(&he); + he.set_first_notification(first_notif); + he.set_last_notification(last_notif); + he.set_notification_interval(interval_notif); + he_hlp.hook("escalation_options", "d,u,r"); + he_hlp.hook("host_name", hostname); + he_hlp.hook("contact_groups", contactgroup); + return he; +} +#endif +#ifdef LEGACY_CONF configuration::service TestEngine::new_configuration_service( const std::string& hostname, const std::string& description, @@ -205,7 +442,95 @@ configuration::service TestEngine::new_configuration_service( return svc; } +#else +configuration::Service TestEngine::new_pb_configuration_service( + const std::string& hostname, + const std::string& description, + const std::string& contacts, + uint64_t svc_id) { + configuration::Service svc; + configuration::service_helper svc_hlp(&svc); + svc.set_host_name(hostname); + svc.set_service_description(description); + auto it = conf_hosts.find(hostname); + if (it != conf_hosts.end()) + svc.set_host_id(it->second); + else + svc.set_host_id(12); + svc.set_service_id(svc_id); + fill_string_group(svc.mutable_contacts(), contacts); + + // We fake here the expand_object on configuration::service + if (it != conf_hosts.end()) + svc.set_host_id(it->second); + else + svc.set_host_id(12); + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 'output| metric=$ARG1$;50;75'"); + svc.set_check_command("cmd!12"); + configuration::applier::command cmd_aply; + cmd_aply.add_object(cmd); + + return svc; +} +configuration::Anomalydetection +TestEngine::new_pb_configuration_anomalydetection( + const std::string& hostname, + const std::string& description, + const std::string& contacts, + uint64_t svc_id, + uint64_t dependent_svc_id, + const std::string& thresholds_file) { + configuration::Anomalydetection ad; + configuration::anomalydetection_helper ad_hlp(&ad); + ad.set_host_name(hostname); + ad.set_service_description(description); + ad.set_dependent_service_id(dependent_svc_id); + ad.set_host_id(12); + ad.set_service_id(svc_id); + fill_string_group(ad.mutable_contacts(), contacts); + ad.set_metric_name("metric"); + ad.set_internal_id(1234); + ad.set_thresholds_file(thresholds_file); + + // We fake here the expand_object on configuration::service + ad.set_host_id(12); + + return ad; +} +void TestEngine::fill_pb_configuration_service( + configuration::service_helper* svc_hlp, + const std::string& hostname, + const std::string& description, + const std::string& contacts, + uint64_t svc_id) { + auto* svc = static_cast(svc_hlp->mut_obj()); + svc->set_host_name(hostname); + svc->set_service_description(description); + auto it = conf_hosts.find(hostname); + // We fake here the expand_object on configuration::service + if (it != conf_hosts.end()) + svc->set_host_id(it->second); + else + svc->set_host_id(12); + svc->set_service_id(svc_id); + fill_string_group(svc->mutable_contacts(), contacts); + + configuration::Command cmd; + configuration::command_helper cmd_hlp(&cmd); + cmd.set_command_name("cmd"); + cmd.set_command_line("echo 'output| metric=$ARG1$;50;75'"); + svc->set_check_command("cmd!12"); + configuration::applier::command cmd_aply; + cmd_aply.add_object(cmd); +} +#endif + +#ifdef LEGACY_CONF configuration::anomalydetection TestEngine::new_configuration_anomalydetection( const std::string& hostname, const std::string& description, @@ -229,3 +554,36 @@ configuration::anomalydetection TestEngine::new_configuration_anomalydetection( return ad; } + +#endif + +std::unique_ptr TestEngine::new_timeperiod_with_timeranges( + const std::string& name, + const std::string& alias) { +#ifdef LEGACY_CONF + auto tperiod = std::make_unique(name, alias); + for (size_t i = 0; i < tperiod->days.size(); ++i) + tperiod->days[i].emplace_back(0, 86400); +#else + configuration::Timeperiod tp; + configuration::timeperiod_helper tp_hlp(&tp); + tp.set_timeperiod_name(name); + tp.set_alias(alias); +#define add_day(day) \ + { \ + auto* d = tp.mutable_timeranges()->add_##day(); \ + d->set_range_start(0); \ + d->set_range_end(86400); \ + } + add_day(sunday); + add_day(monday); + add_day(tuesday); + add_day(wednesday); + add_day(thursday); + add_day(friday); + add_day(saturday); + + auto tperiod = std::make_unique(tp); +#endif + return tperiod; +} diff --git a/engine/tests/test_engine.hh b/engine/tests/test_engine.hh index 3271cb8873c..e2444d6a076 100644 --- a/engine/tests/test_engine.hh +++ b/engine/tests/test_engine.hh @@ -23,6 +23,11 @@ #include +#include "com/centreon/engine/commands/commands.hh" +#include "com/centreon/engine/configuration/applier/command.hh" +#include "com/centreon/engine/configuration/applier/timeperiod.hh" +#include "com/centreon/engine/timeperiod.hh" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/anomalydetection.hh" #include "common/engine_legacy_conf/contact.hh" #include "common/engine_legacy_conf/contactgroup.hh" @@ -32,11 +37,23 @@ #include "common/engine_legacy_conf/service.hh" #include "common/engine_legacy_conf/servicedependency.hh" #include "common/engine_legacy_conf/serviceescalation.hh" +#include "common/engine_legacy_conf/state.hh" +#else +#include "common/engine_conf/anomalydetection_helper.hh" +#include "common/engine_conf/contact_helper.hh" +#include "common/engine_conf/host_helper.hh" +#include "common/engine_conf/hostdependency_helper.hh" +#include "common/engine_conf/hostescalation_helper.hh" +#include "common/engine_conf/service_helper.hh" +#include "common/engine_conf/serviceescalation_helper.hh" +#include "common/engine_conf/timeperiod_helper.hh" +#endif using namespace com::centreon::engine; class TestEngine : public ::testing::Test { public: +#ifdef LEGACY_CONF configuration::contact new_configuration_contact( std::string const& name, bool full, @@ -80,6 +97,63 @@ class TestEngine : public ::testing::Test { configuration::contactgroup new_configuration_contactgroup( std::string const& name, std::string const& contactname); +#else + void fill_pb_configuration_contact(configuration::contact_helper* ctct_hlp, + std::string const& name, + bool full, + const std::string& notif = "a") const; + configuration::Contact new_pb_configuration_contact( + const std::string& name, + bool full, + const std::string& notif = "a") const; + configuration::Hostdependency new_pb_configuration_hostdependency( + const std::string& hostname, + const std::string& dep_hostname); + void fill_pb_configuration_host(configuration::host_helper* hst_hlp, + std::string const& hostname, + std::string const& contacts, + uint64_t hst_id = 12); + configuration::Host new_pb_configuration_host(const std::string& hostname, + const std::string& contacts, + uint64_t hst_id = 12); + configuration::Contactgroup new_pb_configuration_contactgroup( + const std::string& name, + const std::string& contactname); + void fill_pb_configuration_contactgroup( + configuration::contactgroup_helper* ctct_hlp, + const std::string& name, + const std::string& contactname); + configuration::Service new_pb_configuration_service( + const std::string& hostname, + const std::string& description, + const std::string& contacts, + uint64_t svc_id = 13); + configuration::Anomalydetection new_pb_configuration_anomalydetection( + const std::string& hostname, + const std::string& description, + const std::string& contacts, + uint64_t svc_id = 14, + uint64_t dependent_svc_id = 13, + const std::string& thresholds_file = "/tmp/thresholds_file"); + void fill_pb_configuration_service(configuration::service_helper* svc_hlp, + std::string const& hostname, + std::string const& description, + std::string const& contacts, + uint64_t svc_id = 13); + configuration::Serviceescalation new_pb_configuration_serviceescalation( + std::string const& hostname, + std::string const& svc_desc, + std::string const& contactgroup); + configuration::Hostescalation new_pb_configuration_hostescalation( + std::string const& hostname, + std::string const& contactgroup, + uint32_t first_notif = 2, + uint32_t last_notif = 11, + uint32_t interval_notif = 9); +#endif + std::unique_ptr new_timeperiod_with_timeranges( + const std::string& name, + const std::string& alias); }; #endif /* !TEST_ENGINE_HH */ diff --git a/engine/tests/timeperiod/get_next_valid_time/calendar_date.cc b/engine/tests/timeperiod/get_next_valid_time/calendar_date.cc index 09101857ea1..ad3989a1afd 100644 --- a/engine/tests/timeperiod/get_next_valid_time/calendar_date.cc +++ b/engine/tests/timeperiod/get_next_valid_time/calendar_date.cc @@ -1,33 +1,34 @@ /** -* Copyright 2016 Centreon -* -* This file is part of Centreon Engine. -* -* Centreon Engine is free software: you can redistribute it and/or -* modify it under the terms of the GNU General Public License version 2 -* as published by the Free Software Foundation. -* -* Centreon Engine is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -* General Public License for more details. -* -* You should have received a copy of the GNU General Public License -* along with Centreon Engine. If not, see -* . -*/ + * Copyright 2016 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #include #include #include "com/centreon/clib.hh" #include "com/centreon/engine/configuration/applier/state.hh" #include "com/centreon/engine/timeperiod.hh" +#include "test_engine.hh" #include "tests/timeperiod/utils.hh" using namespace com::centreon; using namespace com::centreon::engine; -class GetNextValidTimeCalendarDateTest : public ::testing::Test { +class GetNextValidTimeCalendarDateTest : public TestEngine { public: void default_data_set() { _creator.new_timeperiod(); @@ -90,7 +91,7 @@ TEST_F(GetNextValidTimeCalendarDateTest, WithinCalendarDate) { // Then the next valid time is now TEST_F(GetNextValidTimeCalendarDateTest, AfterCalendarDates) { std::unique_ptr tiperiod{ - new engine::timeperiod("tperiod", "alias")}; + new_timeperiod_with_timeranges("tperiod", "alias")}; for (int i = 0; i < 7; ++i) { timerange_list list_time; diff --git a/engine/tests/timeperiod/get_next_valid_time/exceptions_test.cc b/engine/tests/timeperiod/get_next_valid_time/exceptions_test.cc index bdf0f7add12..c83f8dd9a16 100644 --- a/engine/tests/timeperiod/get_next_valid_time/exceptions_test.cc +++ b/engine/tests/timeperiod/get_next_valid_time/exceptions_test.cc @@ -22,7 +22,12 @@ #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/string.hh" #include "com/centreon/engine/timeperiod.hh" +#include "gtest/gtest.h" +#ifdef LEGACY_CONF #include "common/engine_legacy_conf/timeperiod.hh" +#else +#include "common/engine_conf/timeperiod_helper.hh" +#endif #include "helper.hh" @@ -35,6 +40,13 @@ struct test_param { std::string expected; // YYYY-MM-DD HH:MM:SS format }; +std::string PrintToString(const test_param& data) { + std::stringstream ss; + ss << "name: " << data.name << " ; now: " << data.now + << " ; prefered: " << data.prefered << " ; expected: " << data.expected; + return ss.str(); +} + class timeperiod_exception : public ::testing::TestWithParam { protected: static configuration::applier::timeperiod _applier; @@ -73,8 +85,15 @@ void timeperiod_exception::parse_timeperiods_cfg_file( bool wait_time_period_begin = true; +#ifdef LEGACY_CONF std::unique_ptr conf( std::make_unique()); +#else + std::unique_ptr conf( + std::make_unique()); + std::unique_ptr conf_hlp = + std::make_unique(conf.get()); +#endif while (!f.eof()) { std::getline(f, line); @@ -89,10 +108,42 @@ void timeperiod_exception::parse_timeperiods_cfg_file( if (line[0] == '}') { wait_time_period_begin = true; _applier.add_object(*conf); +#ifdef LEGACY_CONF conf = std::make_unique(); +#else + conf = std::make_unique(); + conf_hlp = + std::make_unique(conf.get()); +#endif continue; } + if (line.substr(0, 9) == "\tmonday 3") { + std::cout << "monday 3..." << std::endl; + } +#ifdef LEGACY_CONF conf->parse(string::trim(line)); +#else + std::string_view line_view = absl::StripAsciiWhitespace(line); + if (line_view[0] == '#') + continue; + std::vector v = + absl::StrSplit(line_view, absl::MaxSplits(absl::ByAnyChar(" \t"), 1), + absl::SkipWhitespace()); + if (v.size() != 2) + abort(); + + std::string_view key = absl::StripAsciiWhitespace(v[0]); + std::string_view value = absl::StripAsciiWhitespace(v[1]); + bool retval = false; + /* particular cases with hook */ + retval = conf_hlp->hook(key, value); + if (!retval) + retval = conf_hlp->set(key, value); + if (!retval) { + std::cout << "Unable to parse <<" << line << ">>" << std::endl; + abort(); + } +#endif } } } diff --git a/engine/tests/timeperiod/utils.cc b/engine/tests/timeperiod/utils.cc index ffd10cd5b91..bd77c4f7b85 100644 --- a/engine/tests/timeperiod/utils.cc +++ b/engine/tests/timeperiod/utils.cc @@ -1,22 +1,21 @@ /** - * Copyright 2016 Centreon + * Copyright 2016-2024 Centreon * - * This file is part of Centreon Engine. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * Centreon Engine is free software: you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. + * http://www.apache.org/licenses/LICENSE-2.0 * - * Centreon Engine is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com * - * You should have received a copy of the GNU General Public License - * along with Centreon Engine. If not, see - * . */ - #include #include #include @@ -26,6 +25,9 @@ #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/timerange.hh" #include "tests/timeperiod/utils.hh" +#ifndef LEGACY_CONF +#include "common/engine_conf/timeperiod_helper.hh" +#endif using namespace com::centreon::engine; // Global time. @@ -56,6 +58,7 @@ std::shared_ptr timeperiod_creator::get_timeperiods_shared() { return (*_timeperiods.begin()); } +#ifdef LEGACY_CONF /** * Create a new timeperiod. * @@ -64,8 +67,24 @@ std::shared_ptr timeperiod_creator::get_timeperiods_shared() { timeperiod* timeperiod_creator::new_timeperiod() { std::shared_ptr tp{new timeperiod("test", "test")}; _timeperiods.push_front(tp); - return (tp.get()); + return tp.get(); } +#else +/** + * Create a new timeperiod. + * + * @return The newly created timeperiod. + */ +timeperiod* timeperiod_creator::new_timeperiod() { + configuration::Timeperiod conf_tp; + configuration::timeperiod_helper tp_hlp(&conf_tp); + conf_tp.set_timeperiod_name("test"); + conf_tp.set_alias("test"); + std::shared_ptr tp = std::make_shared(conf_tp); + _timeperiods.push_front(tp); + return tp.get(); +} +#endif /** * Create a new exclusion on the timeperiod. @@ -104,10 +123,17 @@ daterange* timeperiod_creator::new_calendar_date(int start_year, if (!target) target = _timeperiods.begin()->get(); +#ifdef LEGACY_CONF target->exceptions[daterange::calendar_date].emplace_back( daterange::calendar_date, start_year, start_month, start_day, 0, 0, end_year, end_month, end_day, 0, 0, 0, std::list()); +#else + target->exceptions[daterange::calendar_date].emplace_back( + daterange::calendar_date, start_year, start_month, start_day, 0, 0, + end_year, end_month, end_day, 0, 0, 0, + google::protobuf::RepeatedPtrField()); +#endif return &*target->exceptions[daterange::calendar_date].rbegin(); } @@ -130,9 +156,16 @@ daterange* timeperiod_creator::new_specific_month_date(int start_month, if (!target) target = _timeperiods.begin()->get(); +#ifdef LEGACY_CONF target->exceptions[daterange::month_date].emplace_back( daterange::month_date, 0, start_month, start_day, 0, 0, 0, end_month, end_day, 0, 0, 0, std::list()); +#else + target->exceptions[daterange::month_date].emplace_back( + daterange::month_date, 0, start_month, start_day, 0, 0, 0, end_month, + end_day, 0, 0, 0, + google::protobuf::RepeatedPtrField()); +#endif return &*target->exceptions[daterange::month_date].rbegin(); } @@ -154,9 +187,15 @@ daterange* timeperiod_creator::new_generic_month_date(int start_day, std::shared_ptr dr{new daterange( daterange::month_day, 0, 0, start_day, 0, 0, 0, 0, end_day, 0, 0, 0, {})}; +#ifdef LEGACY_CONF target->exceptions[daterange::month_day].emplace_back( daterange::month_day, 0, 0, start_day, 0, 0, 0, 0, end_day, 0, 0, 0, std::list()); +#else + target->exceptions[daterange::month_day].emplace_back( + daterange::month_day, 0, 0, start_day, 0, 0, 0, 0, end_day, 0, 0, 0, + google::protobuf::RepeatedPtrField()); +#endif return &*target->exceptions[daterange::month_day].rbegin(); } @@ -184,10 +223,17 @@ daterange* timeperiod_creator::new_offset_weekday_of_specific_month( if (!target) target = _timeperiods.begin()->get(); +#ifdef LEGACY_CONF target->exceptions[daterange::month_week_day].emplace_back( daterange::month_week_day, 0, start_month, 0, start_wday, start_offset, 0, end_month, 0, end_wday, end_offset, 0, std::list()); +#else + target->exceptions[daterange::month_week_day].emplace_back( + daterange::month_week_day, 0, start_month, 0, start_wday, start_offset, 0, + end_month, 0, end_wday, end_offset, 0, + google::protobuf::RepeatedPtrField()); +#endif return &*target->exceptions[daterange::month_week_day].rbegin(); } @@ -211,9 +257,16 @@ daterange* timeperiod_creator::new_offset_weekday_of_generic_month( if (!target) target = _timeperiods.begin()->get(); +#ifdef LEGACY_CONF target->exceptions[daterange::week_day].emplace_back( daterange::week_day, 0, 0, 0, start_wday, start_offset, 0, 0, 0, end_wday, end_offset, 0, std::list()); +#else + target->exceptions[daterange::week_day].emplace_back( + daterange::week_day, 0, 0, 0, start_wday, start_offset, 0, 0, 0, end_wday, + end_offset, 0, + google::protobuf::RepeatedPtrField()); +#endif return &*target->exceptions[daterange::week_day].rbegin(); } @@ -315,7 +368,12 @@ extern "C" time_t time(time_t* t) __THROW { return (gl_now); } +#ifdef LEGACY_GETTIMEOFDAY extern "C" int gettimeofday(struct timeval* tv, struct timezone*) __THROW { +#else +extern "C" int gettimeofday(struct timeval* tv, void*) __THROW { +#endif + // extern "C" int gettimeofday(struct timeval* tv, struct timezone*) __THROW { if (tv) { tv->tv_sec = gl_now; tv->tv_usec = 0; diff --git a/engine/tests/timeperiod/utils.hh b/engine/tests/timeperiod/utils.hh index 964e3ec0d5d..da07b395dbb 100644 --- a/engine/tests/timeperiod/utils.hh +++ b/engine/tests/timeperiod/utils.hh @@ -1,21 +1,21 @@ -/* -** Copyright 2016 Centreon -** -** This file is part of Centreon Engine. -** -** Centreon Engine is free software: you can redistribute it and/or -** modify it under the terms of the GNU General Public License version 2 -** as published by the Free Software Foundation. -** -** Centreon Engine is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -** General Public License for more details. -** -** You should have received a copy of the GNU General Public License -** along with Centreon Engine. If not, see -** . -*/ +/** + * Copyright 2016-2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #ifndef TESTS_TIMEPERIOD_UTILS_HH #define TESTS_TIMEPERIOD_UTILS_HH diff --git a/gorgone/.gitignore b/gorgone/.gitignore new file mode 100644 index 00000000000..33e72b73fd3 --- /dev/null +++ b/gorgone/.gitignore @@ -0,0 +1,5 @@ +## source script + +# temporary folder +log + diff --git a/gorgone/.veracode-exclusions b/gorgone/.veracode-exclusions new file mode 100644 index 00000000000..e69de29bb2d diff --git a/gorgone/.version b/gorgone/.version new file mode 100644 index 00000000000..be51a9cda2e --- /dev/null +++ b/gorgone/.version @@ -0,0 +1 @@ +MINOR=0 diff --git a/gorgone/LICENSE.txt b/gorgone/LICENSE.txt new file mode 100644 index 00000000000..dfbec9227fe --- /dev/null +++ b/gorgone/LICENSE.txt @@ -0,0 +1,190 @@ + Copyright 2020 - Centreon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/gorgone/README.md b/gorgone/README.md new file mode 100644 index 00000000000..28fbfbfea19 --- /dev/null +++ b/gorgone/README.md @@ -0,0 +1,30 @@ +# Centreon Gorgone + +Centreon Gorgone and his "gorgoned" daemon is a lightweight, distributed, modular tasks handler. + +It provides a set of actions like: + +* Execute commands +* Send files/directories, +* Schedule cron-like tasks, +* Push or execute tasks through SSH. + +The daemon can be installed on Centreon environments like Centreon Central, Remote and Poller servers. + +It uses ZeroMQ library. + +To install it follow the [Getting started](docs/getting_started.md) documentation. + +To understand the main principles of Gorgone protocol, follow the [guide](docs/guide.md). + +## Modules + +The Centreon Gorgone project encloses several built-in modules. + +See the full list [here](docs/modules.md). + +## API + +The HTTP server module exposes a RestAPI. + +See how to use it [here](docs/api.md). diff --git a/gorgone/TODO b/gorgone/TODO new file mode 100644 index 00000000000..da16b751a13 --- /dev/null +++ b/gorgone/TODO @@ -0,0 +1,2 @@ +- gorgone-newtest: don't use centcore.cmd. use ssh system. +- Add redis backend to store logs (we could disable synclog in redis mode) diff --git a/gorgone/config/gorgoned-central-ssh.yml b/gorgone/config/gorgoned-central-ssh.yml new file mode 100644 index 00000000000..144c3f47562 --- /dev/null +++ b/gorgone/config/gorgoned-central-ssh.yml @@ -0,0 +1,68 @@ +name: gorgoned-central-ssh +description: Configuration example in a SSH environment for Central server +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;dbname=centreon" + username: centreon + password: centreon + db_realtime: + dsn: "mysql:host=localhost;dbname=centreon_storage" + username: centreon + password: centreon + gorgone: + gorgonecore: + timeout: 50 + modules: + - name: httpserver + package: gorgone::modules::core::httpserver::hooks + enable: true + address: 0.0.0.0 + port: 8443 + ssl: true + ssl_cert_file: /etc/pki/tls/certs/server-cert.pem + ssl_key_file: /etc/pki/tls/server-key.pem + auth: + enabled: true + user: admin + password: password + + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: nodes + package: gorgone::modules::centreon::nodes::hooks + enable: true + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "/var/lib/centreon/centcore.cmd" + cache_dir: "/var/cache/centreon/" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "/var/cache/centreon/config/remote-data/" + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" diff --git a/gorgone/config/gorgoned-central-zmq.yml b/gorgone/config/gorgoned-central-zmq.yml new file mode 100644 index 00000000000..a7a0c1d12e0 --- /dev/null +++ b/gorgone/config/gorgoned-central-zmq.yml @@ -0,0 +1,93 @@ +name: gorgoned-central-zmq +description: Configuration example in a full ZMQ environment for Central server +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;dbname=centreon" + username: centreon + password: centreon + db_realtime: + dsn: "mysql:host=localhost;dbname=centreon_storage" + username: centreon + password: centreon + gorgone: + gorgonecore: + id: 1 + privkey: keys/central/privkey.pem + # can be: always, first (default), strict + fingerprint_mode: first + fingerprint_mgr: + package: gorgone::class::fingerprint::backend::sql + # if unset, it uses global configuration + #gorgone_db_type: + #gorgone_db_name: + modules: + - name: httpserver + package: gorgone::modules::core::httpserver::hooks + enable: true + address: 0.0.0.0 + port: 8443 + ssl: true + ssl_cert_file: /etc/pki/tls/certs/server-cert.pem + ssl_key_file: /etc/pki/tls/server-key.pem + auth: + enabled: true + user: admin + password: password + allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 + - 10.30.2.0/16 + + - name: cron + package: gorgone::modules::core::cron::hooks + enable: true + cron: + - id: echo_date + timespec: "* * * * *" + action: COMMAND + parameters: + command: "date >> /tmp/date.log" + timeout: 10 + + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: register + package: gorgone::modules::core::register::hooks + enable: true + config_file: config/registernodes-central.yml + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "/var/lib/centreon/centcore.cmd" + cache_dir: "/var/cache/centreon/" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "/var/cache/centreon/config/remote-data/" + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" diff --git a/gorgone/config/gorgoned-poller.yml b/gorgone/config/gorgoned-poller.yml new file mode 100644 index 00000000000..735e864311d --- /dev/null +++ b/gorgone/config/gorgoned-poller.yml @@ -0,0 +1,34 @@ +name: gorgoned-poller +description: Configuration example in a full ZMQ environment for Poller server +configuration: + gorgone: + gorgonecore: + id: 2 + external_com_type: tcp + external_com_path: "*:5556" + privkey: keys/poller/privkey.pem + authorized_clients: + - key: pnI6EWkiTbazjikJXRkLmjml5wvVECYtQduJUjS4QK4 + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" diff --git a/gorgone/config/gorgoned-remote-ssh.yml b/gorgone/config/gorgoned-remote-ssh.yml new file mode 100644 index 00000000000..fea645f45af --- /dev/null +++ b/gorgone/config/gorgoned-remote-ssh.yml @@ -0,0 +1,55 @@ +name: gorgoned-remote-ssh +description: Configuration example in a SSH environment for Remote server +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;dbname=centreon" + username: centreon + password: centreon + db_realtime: + dsn: "mysql:host=localhost;dbname=centreon_storage" + username: centreon + password: centreon + gorgone: + gorgonecore: + timeout: 50 + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: nodes + package: gorgone::modules::centreon::nodes::hooks + enable: true + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "/var/lib/centreon/centcore.cmd" + cache_dir: "/var/cache/centreon/" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "/var/cache/centreon/config/remote-data/" + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" diff --git a/gorgone/config/gorgoned-remote-zmq.yml b/gorgone/config/gorgoned-remote-zmq.yml new file mode 100644 index 00000000000..2eb9872d8f0 --- /dev/null +++ b/gorgone/config/gorgoned-remote-zmq.yml @@ -0,0 +1,61 @@ +name: gorgoned-remote-zmq +description: Configuration example in a full ZMQ environment for Remote server +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;dbname=centreon" + username: centreon + password: centreon + db_realtime: + dsn: "mysql:host=localhost;dbname=centreon_storage" + username: centreon + password: centreon + gorgone: + gorgonecore: + id: 4 + external_com_type: tcp + external_com_path: "*:5556" + privkey: keys/central/privkey.pem + authorized_clients: + - key: pnI6EWkiTbazjikJXRkLmjml5wvVECYtQduJUjS4QK4 + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: register + package: gorgone::modules::core::register::hooks + enable: true + config_file: config/registernodes-remote.yml + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "/var/lib/centreon/centcore.cmd" + cache_dir: "/var/cache/centreon/" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "/var/cache/centreon/config/remote-data/" + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" diff --git a/gorgone/config/logrotate/gorgoned b/gorgone/config/logrotate/gorgoned new file mode 100644 index 00000000000..e6f56b7475f --- /dev/null +++ b/gorgone/config/logrotate/gorgoned @@ -0,0 +1,10 @@ +/var/log/centreon-gorgone/gorgoned.log { + copytruncate + weekly + rotate 52 + compress + delaycompress + notifempty + missingok + su root root +} diff --git a/gorgone/config/registernodes-central.yml b/gorgone/config/registernodes-central.yml new file mode 100644 index 00000000000..5c40cd531b4 --- /dev/null +++ b/gorgone/config/registernodes-central.yml @@ -0,0 +1,9 @@ +nodes: + - id: 4 + type: push_zmq + address: 10.30.2.135 + port: 5556 + prevail: 1 + nodes: + - id: 2 + pathscore: 1 diff --git a/gorgone/config/registernodes-remote.yml b/gorgone/config/registernodes-remote.yml new file mode 100644 index 00000000000..41a0e672033 --- /dev/null +++ b/gorgone/config/registernodes-remote.yml @@ -0,0 +1,5 @@ +nodes: + - id: 2 + type: push_zmq + address: 10.30.2.90 + port: 5556 diff --git a/gorgone/config/systemd/gorgoned-sysconfig b/gorgone/config/systemd/gorgoned-sysconfig new file mode 100644 index 00000000000..3ee7e99a48a --- /dev/null +++ b/gorgone/config/systemd/gorgoned-sysconfig @@ -0,0 +1,4 @@ +# Configuration file for Centreon Gorgone. + +# OPTIONS for the daemon launch +OPTIONS="--config=/etc/centreon-gorgone/config.yaml --logfile=/var/log/centreon-gorgone/gorgoned.log --severity=error" diff --git a/gorgone/config/systemd/gorgoned.deb.service b/gorgone/config/systemd/gorgoned.deb.service new file mode 100644 index 00000000000..46aef41c175 --- /dev/null +++ b/gorgone/config/systemd/gorgoned.deb.service @@ -0,0 +1,33 @@ +## +## Copyright 2019-2020 Centreon +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +## For more information : contact@centreon.com +## + +[Unit] +Description=Centreon Gorgone +PartOf=centreon.service +After=centreon.service +ReloadPropagatedFrom=centreon.service + +[Service] +EnvironmentFile=/etc/default/gorgoned +ExecStart=/usr/bin/perl /usr/bin/gorgoned $OPTIONS +Type=simple +User=centreon-gorgone + +[Install] +WantedBy=multi-user.target +WantedBy=centreon.service diff --git a/gorgone/config/systemd/gorgoned.rpm.service b/gorgone/config/systemd/gorgoned.rpm.service new file mode 100644 index 00000000000..aec4c1efede --- /dev/null +++ b/gorgone/config/systemd/gorgoned.rpm.service @@ -0,0 +1,33 @@ +## +## Copyright 2019-2020 Centreon +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +## For more information : contact@centreon.com +## + +[Unit] +Description=Centreon Gorgone +PartOf=centreon.service +After=centreon.service +ReloadPropagatedFrom=centreon.service + +[Service] +EnvironmentFile=/etc/sysconfig/gorgoned +ExecStart=/usr/bin/perl /usr/bin/gorgoned $OPTIONS +Type=simple +User=centreon-gorgone + +[Install] +WantedBy=multi-user.target +WantedBy=centreon.service diff --git a/gorgone/contrib/gorgone_audit.pl b/gorgone/contrib/gorgone_audit.pl new file mode 100644 index 00000000000..f6d86fa3fbd --- /dev/null +++ b/gorgone/contrib/gorgone_audit.pl @@ -0,0 +1,636 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::gorgone_audit->new()->run(); + +package gorgone::script::gorgone_audit; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new('gorgone_audit', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + $self->add_options( + 'url:s' => \$self->{url}, + 'markdown:s' => \$self->{markdown} + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{markdown} = 'audit.md' if (defined($self->{markdown}) && $self->{markdown} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub schedule_audit { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/audit/schedule', + query_form_post => '{}', + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($code) { + $self->{logger}->writeLogError("http request error"); + exit(1); + } + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub get_audit_log { + my ($self) = @_; + + my $progress = 0; + while (1) { + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($code) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + if ($_->{code} == 500 && $progress < $data->{complete}) { + $self->{logger}->writeLogInfo("audit completed: $data->{complete}\%"); + $progress = $data->{complete}; + } elsif ($_->{code} == 1) { + $self->{logger}->writeLogError("audit execution: $data->{message}"); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->{audit} = $data->{audit}; + $stop = 1; + } + } + + last if ($stop == 1); + sleep(10); + } + + if (defined($self->{audit})) { + $self->{logger}->writeLogInfo("audit result: " . JSON::XS->new->encode($self->{audit})); + if (defined($self->{markdown})) { + $self->md_output(); + } + } +} + +sub md_node_system_cpu { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $cpu = <<"END_CPU"; + + Cpu + +END_CPU + + if ($options{entry}->{status_code} != 0) { + my $message = '_**Error:** cannot get informations ' . $options{entry}->{status_message}; + $cpu .= <<"END_CPU"; + + $message + +END_CPU + return $cpu; + } + + my $used = sprintf( + '%s/%s/%s/%s (1m/5m/15m/60m)', + defined($options{entry}->{avg_used_1min}) && $options{entry}->{avg_used_1min} =~ /\d/ ? $options{entry}->{avg_used_1min} . '%' : '-', + defined($options{entry}->{avg_used_5min}) && $options{entry}->{avg_used_5min} =~ /\d/ ? $options{entry}->{avg_used_5min} . '%' : '-', + defined($options{entry}->{avg_used_15min}) && $options{entry}->{avg_used_15min} =~ /\d/ ? $options{entry}->{avg_used_15min} . '%' : '-', + defined($options{entry}->{avg_used_60min}) && $options{entry}->{avg_used_60min} =~ /\d/ ? $options{entry}->{avg_used_60min} . '%' : '-' + ); + my $iowait = sprintf( + '%s/%s/%s/%s (1m/5m/15m/60m)', + defined($options{entry}->{avg_iowait_1min}) && $options{entry}->{avg_iowait_1min} =~ /\d/ ? $options{entry}->{avg_iowait_1min} . '%' : '-', + defined($options{entry}->{avg_iowait_5min}) && $options{entry}->{avg_iowait_5min} =~ /\d/ ? $options{entry}->{avg_iowait_5min} . '%' : '-', + defined($options{entry}->{avg_iowait_15min}) && $options{entry}->{avg_iowait_15min} =~ /\d/ ? $options{entry}->{avg_iowait_15min} . '%' : '-', + defined($options{entry}->{avg_iowait_60min}) && $options{entry}->{avg_iowait_60min} =~ /\d/ ? $options{entry}->{avg_iowait_60min} . '%' : '-' + ); + $cpu .= <<"END_CPU"; + + number of cores + $options{entry}->{num_cpu} + + + used + $used + + + iowait + $iowait + +END_CPU + + return $cpu; +} + +sub md_node_system_load { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $load = <<"END_LOAD"; + + Load + +END_LOAD + + if ($options{entry}->{status_code} != 0) { + my $message = '_**Error:** cannot get informations ' . $options{entry}->{status_message}; + $load .= <<"END_LOAD"; + + $message + +END_LOAD + return $load; + } + + $load .= <<"END_LOAD"; + + load average + $options{entry}->{load1m}/$options{entry}->{load5m}/$options{entry}->{load15m} (1m/5m/15m) + +END_LOAD + return $load; +} + +sub md_node_system_memory { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $memory = <<"END_MEMORY"; + + Memory + +END_MEMORY + + if ($options{entry}->{status_code} != 0) { + my $message = '_**Error:** cannot get informations ' . $options{entry}->{status_message}; + $memory .= <<"END_MEMORY"; + + $message + +END_MEMORY + return $memory; + } + + $memory .= <<"END_MEMORY"; + + memory total + $options{entry}->{ram_total_human} + + + memory available + $options{entry}->{ram_available_human} + + + swap total + $options{entry}->{swap_total_human} + + + swap free + $options{entry}->{swap_free_human} + +END_MEMORY + return $memory; +} + +sub md_node_system_disk { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $disk = "#### Filesystems\n\n"; + if ($options{entry}->{status_code} != 0) { + $disk .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $disk; + } + + $disk .= <<"END_DISK"; +| Filesystem | Type | Size | Used | Avail | Inodes | Mounted | +| :---------- | :---- | :----- | :--- | :----- | :------ | :------ | +END_DISK + + foreach my $mount (sort keys %{$options{entry}->{partitions}}) { + my $values = $options{entry}->{partitions}->{$mount}; + $disk .= <<"END_DISK"; +| $values->{filesystem} | $values->{type} | $values->{space_size_human} | $values->{space_used_human} | $values->{space_free_human} | $values->{inodes_used_percent} | $values->{mount} | +END_DISK + } + + $disk .= "\n"; + return $disk; +} + +sub md_node_system_diskio { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $diskio = "#### Disks I/O\n\n"; + if ($options{entry}->{status_code} != 0) { + $diskio .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $diskio; + } + + $diskio .= <<"END_DISKIO"; +| Device | Read IOPs | Write IOPs | Read Time | Write Time | +| :---------- | :--------- | :----------- | :-------- | :---------- | +END_DISKIO + + foreach my $dev (sort keys %{$options{entry}->{partitions}}) { + my $values = $options{entry}->{partitions}->{$dev}; + $diskio .= "| $dev | " . + sprintf( + '%s/%s/%s/%s', + defined($values->{read_iops_1min_human}) && $values->{read_iops_1min_human} =~ /\d/ ? $values->{read_iops_1min_human} : '-', + defined($values->{read_iops_5min_human}) && $values->{read_iops_5min_human} =~ /\d/ ? $values->{read_iops_5min_human} : '-', + defined($values->{read_iops_15min_human}) && $values->{read_iops_15min_human} =~ /\d/ ? $values->{read_iops_15min_human} : '-', + defined($values->{read_iops_60min_human}) && $values->{read_iops_60min_human} =~ /\d/ ? $values->{read_iops_60min_human} : '-', + ) . '| ' . + sprintf( + '%s/%s/%s/%s', + defined($values->{write_iops_1min_human}) && $values->{write_iops_1min_human} =~ /\d/ ? $values->{write_iops_1min_human} : '-', + defined($values->{write_iops_5min_human}) && $values->{write_iops_5min_human} =~ /\d/ ? $values->{write_iops_5min_human} : '-', + defined($values->{write_iops_15min_human}) && $values->{write_iops_15min_human} =~ /\d/ ? $values->{write_iops_15min_human} : '-', + defined($values->{write_iops_60min_human}) && $values->{write_iops_60min_human} =~ /\d/ ? $values->{write_iops_60min_human} : '-', + ) . '| ' . + sprintf( + '%s/%s/%s/%s', + defined($values->{read_time_1min_ms}) && $values->{read_time_1min_ms} =~ /\d/ ? $values->{read_time_1min_ms} . 'ms' : '-', + defined($values->{read_time_5min_ms}) && $values->{read_time_5min_ms} =~ /\d/ ? $values->{read_time_5min_ms} . 'ms' : '-', + defined($values->{read_time_15min_ms}) && $values->{read_time_15min_ms} =~ /\d/ ? $values->{read_time_15min_ms} . 'ms' : '-', + defined($values->{read_time_60min_ms}) && $values->{read_time_60min_ms} =~ /\d/ ? $values->{read_time_60min_ms} . 'ms' : '-' + ) . '| ' . + sprintf( + '%s/%s/%s/%s', + defined($values->{write_time_1min_ms}) && $values->{write_time_1min_ms} =~ /\d/ ? $values->{write_time_1min_ms} . 'ms' : '-', + defined($values->{write_time_5min_ms}) && $values->{write_time_5min_ms} =~ /\d/ ? $values->{write_time_5min_ms} . 'ms' : '-', + defined($values->{write_time_15min_ms}) && $values->{write_time_15min_ms} =~ /\d/ ? $values->{write_time_15min_ms} . 'ms' : '-', + defined($values->{write_time_60min_ms}) && $values->{write_time_60min_ms} =~ /\d/ ? $values->{write_time_60min_ms} . 'ms' : '-' + ) . "|\n"; + } + + $diskio .= "\n"; + return $diskio; +} + +sub md_node_centreon_packages { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $packages = "#### Packages\n\n"; + if ($options{entry}->{status_code} != 0) { + $packages .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $packages; + } + + $packages .= <<"END_PACKAGES"; +| Name | Version | +| :---- | :---- | +END_PACKAGES + + foreach my $entry (sort { $a->[0] cmp $b->[0] } @{$options{entry}->{list}}) { + $packages .= <<"END_PACKAGES"; +| $entry->[0] | $entry->[1] | +END_PACKAGES + } + + $packages .= "\n"; + return $packages; +} + +sub md_node_centreon_realtime { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $realtime = "#### Realtime\n\n"; + if ($options{entry}->{status_code} != 0) { + $realtime .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $realtime; + } + + $realtime .= <<"END_REALTIME"; +number of hosts: $options{entry}->{hosts_count} \\ +number of services: $options{entry}->{services_count} \\ +number of hostgroups: $options{entry}->{hostgroups_count} \\ +number of servicegroups: $options{entry}->{servicegroups_count} \\ +number of acl: $options{entry}->{acl_count} + +END_REALTIME + + return $realtime; +} + +sub md_node_centreon_rrd { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $rrd = "#### Rrd\n\n"; + if ($options{entry}->{status_code} != 0) { + $rrd .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $rrd; + } + + $rrd .= <<"END_RRD"; +number of metrics rrd: $options{entry}->{rrd_metrics_count} \\ +number of metrics rrd outdated: $options{entry}->{rrd_metrics_outdated} \\ +size of metrics rrd: $options{entry}->{rrd_metrics_human} \\ +number of status rrd: $options{entry}->{rrd_status_count} \\ +number of status rrd outdated: $options{entry}->{rrd_status_outdated} \\ +size of metrics rrd: $options{entry}->{rrd_status_human} + +END_RRD + + return $rrd; +} + +sub md_node_centreon_database { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $db = "#### Database\n\n"; + if ($options{entry}->{status_code} != 0) { + $db .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $db; + } + + $db .= <<"END_DATABASE"; +Total databases space used: $options{entry}->{space_used_human} \\ +Total databases space free: $options{entry}->{space_free_human} + +END_DATABASE + + $db .= <<"END_DATABASE"; +| Database | Used | Free | +| :-------- | :--- | :--- | +END_DATABASE + + foreach my $dbname (sort keys %{$options{entry}->{databases}}) { + $db .= sprintf( + '| %s | %s | %s |' . "\n", + $dbname, + $options{entry}->{databases}->{$dbname}->{space_used_human}, + $options{entry}->{databases}->{$dbname}->{space_free_human} + ); + } + + $db .= <<"END_DATABASE"; + +| Table | Engine | Used | Free | Frag | +| :-------- | :----- | :--- | :--- | :--- | +END_DATABASE + + foreach my $dbname (sort keys %{$options{entry}->{databases}}) { + foreach my $table (sort keys %{$options{entry}->{databases}->{$dbname}->{tables}}) { + $db .= sprintf( + '| %s | %s | %s | %s | %.2f%% |' . "\n", + $dbname . '.' . $table, + $options{entry}->{databases}->{$dbname}->{tables}->{$table}->{engine}, + $options{entry}->{databases}->{$dbname}->{tables}->{$table}->{space_used_human}, + $options{entry}->{databases}->{$dbname}->{tables}->{$table}->{space_free_human}, + $options{entry}->{databases}->{$dbname}->{tables}->{$table}->{frag} + ); + } + } + + $db .= "\n"; + return $db; +} + +sub md_node_centreon_pluginpacks { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $pp = "#### Plugin-Packs\n\n"; + if ($options{entry}->{status_code} != 0) { + $pp .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $pp; + } + + $pp .= <<"END_PP"; +| Pack installed | Version | +| :-------------- | :------ | +END_PP + + foreach my $entry (sort { $a->{slug} cmp $b->{slug} } @{$options{entry}->{installed}}) { + $pp .= <<"END_PP"; +| $entry->{slug} | $entry->{version} | +END_PP + } + + $pp .= "\n"; + return $pp; +} + +sub md_node_system { + my ($self, %options) = @_; + + my $os = defined($options{node}->{metrics}->{'system::os'}) ? $options{node}->{metrics}->{'system::os'}->{os}->{value} : '-'; + my $kernel = defined($options{node}->{metrics}->{'system::os'}) ? $options{node}->{metrics}->{'system::os'}->{kernel}->{value} : '-'; + + my $cpu = $self->md_node_system_cpu(entry => $options{node}->{metrics}->{'system::cpu'}); + my $load = $self->md_node_system_load(entry => $options{node}->{metrics}->{'system::load'}); + my $memory = $self->md_node_system_memory(entry => $options{node}->{metrics}->{'system::memory'}); + my $disks = $self->md_node_system_disk(entry => $options{node}->{metrics}->{'system::disk'}); + my $disks_io = $self->md_node_system_diskio(entry => $options{node}->{metrics}->{'system::diskio'}); + + $self->{md_content} .= "### System + +#### Overall + +os: $os \\ +kernel: $kernel + + +${cpu}${load}${memory} +
+ +${disks}${disks_io}"; + +} + +sub md_node_centreon { + my ($self, %options) = @_; + + my $realtime = $self->md_node_centreon_realtime(entry => $options{node}->{metrics}->{'centreon::realtime'}); + my $rrd = $self->md_node_centreon_rrd(entry => $options{node}->{metrics}->{'centreon::rrd'}); + my $database = $self->md_node_centreon_database(entry => $options{node}->{metrics}->{'centreon::database'}); + my $packages = $self->md_node_centreon_packages(entry => $options{node}->{metrics}->{'centreon::packages'}); + my $pp = $self->md_node_centreon_pluginpacks(entry => $options{node}->{metrics}->{'centreon::pluginpacks'}); + + $self->{md_content} .= "### Centreon + +${realtime}${rrd}${database}${packages}${pp}"; + +} + +sub md_node { + my ($self, %options) = @_; + + $self->{md_content} .= "## " . $options{node}->{name} . "\n\n"; + if ($options{node}->{status_code} != 0) { + $self->{md_content} .= '_**Error:** cannot get informations ' . $options{node}->{status_message} . "\n\n"; + return ; + } + + $self->md_node_system(%options); + $self->md_node_centreon(%options); +} + +sub md_output { + my ($self) = @_; + + if (!open(FH, '>', $self->{markdown})) { + $self->{logger}->writeLogError("cannot open file '" . $self->{markdown} . "': $!"); + exit(1); + } + $self->{md_content} = "# Audit\n\n"; + + foreach my $node_id (sort { $self->{audit}->{nodes}->{$a}->{name} cmp $self->{audit}->{nodes}->{$b}->{name} } keys %{$self->{audit}->{nodes}}) { + $self->md_node(node => $self->{audit}->{nodes}->{$node_id}); + } + + print FH $self->{md_content}; + close FH; +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->schedule_audit(); + $self->get_audit_log(); +} + +__END__ + +=head1 NAME + +gorgone_audit.pl - script to execute and get audit + +=head1 SYNOPSIS + +gorgone_audit.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--markdown> + +Markdown output format (default: 'audit.md'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +=back + +=head1 DESCRIPTION + +B + +=cut + diff --git a/gorgone/contrib/gorgone_config_init.pl b/gorgone/contrib/gorgone_config_init.pl new file mode 100644 index 00000000000..b5702888331 --- /dev/null +++ b/gorgone/contrib/gorgone_config_init.pl @@ -0,0 +1,228 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::gorgone_config_init->new()->run(); + +package gorgone::script::gorgone_config_init; + +use strict; +use warnings; +use gorgone::standard::misc; + +use base qw(gorgone::class::script); + +use vars qw($centreon_config); + +sub new { + my $class = shift; + my $self = $class->SUPER::new("gorgone_config_init", + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + $self->add_options( + 'centcore-config:s' => \$self->{centcore_config}, + 'gorgone-config:s' => \$self->{gorgone_config}, + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{centcore_config} = '/etc/centreon/conf.pm' if (!defined($self->{centcore_config}) || $self->{centcore_config} eq ''); + $self->{gorgone_config} = '/etc/centreon-gorgone/config.yaml' if (!defined($self->{gorgone_config}) || + $self->{gorgone_config} eq ''); +} + +sub read_centcore_config { + my ($self) = @_; + + unless (my $return = do $self->{centcore_config}) { + $self->{logger}->writeLogError("couldn't parse $self->{centcore_config}: $@") if $@; + $self->{logger}->writeLogError("couldn't do $self->{centcore_config}: $!") unless defined $return; + $self->{logger}->writeLogError("couldn't run $self->{centcore_config}") unless $return; + exit(1); + } + + if (!defined($centreon_config->{VarLib})) { + $self->{logger}->writeLogError("config file doesn't look like a centcore config file"); + exit(1); + } + + $centreon_config->{VarLib} =~ s/\/$//; + if ($centreon_config->{db_host} =~ /^(.*?):(\d+)$/) { + $centreon_config->{db_host} = $1; + $centreon_config->{db_port} = $2; + } +} + +sub write_gorgone_config { + my ($self) = @_; + + my $fh; + if (!open($fh, '>', $self->{gorgone_config})) { + $self->{logger}->writeLogError("couldn't open file '$self->{gorgone_config}': $!"); + exit(1); + } + + my $db_port = ''; + if (defined($centreon_config->{db_port})) { + $db_port = ';port=' . $centreon_config->{db_port}; + } + + my $content = <<"END_FILE"; +name: gorgoned +description: Configuration init by gorgone_config_init +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=$centreon_config->{db_host}${db_port};dbname=$centreon_config->{centreon_db}" + username: "$centreon_config->{db_user}" + password: "$centreon_config->{db_passwd}" + db_realtime: + dsn: "mysql:host=$centreon_config->{db_host}${db_port};dbname=$centreon_config->{centstorage_db}" + username: "$centreon_config->{db_user}" + password: "$centreon_config->{db_passwd}" + gorgone: + gorgonecore: + hostname: + id: + privkey: /var/lib/centreon-gorgone/.keys/rsakey.priv.pem + pubkey: /var/lib/centreon-gorgone/.keys/rsakey.pub.pem + modules: + - name: httpserver + package: gorgone::modules::core::httpserver::hooks + enable: true + address: 0.0.0.0 + port: 8085 + ssl: false + auth: + enabled: false + allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 + + - name: cron + package: gorgone::modules::core::cron::hooks + enable: true + + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: nodes + package: gorgone::modules::centreon::nodes::hooks + enable: true + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "$centreon_config->{VarLib}/centcore.cmd" + cache_dir: "$centreon_config->{CacheDir}" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "$centreon_config->{CacheDir}/config/remote-data/" + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" + + - name: statistics + package: "gorgone::modules::centreon::statistics::hooks" + enable: true + broker_cache_dir: "/var/cache/centreon/broker-stats/" + cron: + - id: broker_stats + timespec: "*/5 * * * *" + action: BROKERSTATS + parameters: + timeout: 10 + - id: engine_stats + timespec: "*/5 * * * *" + action: ENGINESTATS + parameters: + timeout: 10 +END_FILE + + print $fh $content; + close($fh); +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->read_centcore_config(); + $self->write_gorgone_config(); + + $self->{logger}->writeLogInfo("file '$self->{gorgone_config}' created success"); +} + +__END__ + +=head1 NAME + +gorgone_config_init.pl - script to create gorgone config to replace centcore + +=head1 SYNOPSIS + +gorgone_config_init.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--centcore-config> + +Specify the path to the centcore configuration file (default: '/etc/centreon/conf.pm'). + +=item B<--gorgone-config> + +Specify the gorgone config file created (default: '/etc/centreon-gorgone/config.yaml'). + +=item B<--severity> + +Set the script log severity (default: 'error'). + +=item B<--help> + +Print a brief help message and exits. + +=back + +=head1 DESCRIPTION + +B + +=cut + diff --git a/gorgone/contrib/gorgone_install_plugins.pl b/gorgone/contrib/gorgone_install_plugins.pl new file mode 100644 index 00000000000..970d25f55f4 --- /dev/null +++ b/gorgone/contrib/gorgone_install_plugins.pl @@ -0,0 +1,70 @@ +#!/usr/bin/perl +# +# Copyright 2022 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +my $plugins = []; +my $type; +if ($ARGV[0] !~ /^--type=(deb|rpm)$/) { + print "need to set option --type=[deb|rpm]\n"; + exit(1); +} +$type = $1; + +for (my $i = 1; $i < scalar(@ARGV); $i++) { + if ($ARGV[$i] =~ /^centreon-plugin-([A-Za-z\-_=0-9]+)$/) { + push @$plugins, $ARGV[$i]; + } +} + +if (scalar(@$plugins) <= 0) { + print "nothing to install\n"; + exit(0); +} + +my $command; +if ($type eq 'rpm') { + $command = 'yum -y install'; + foreach (@$plugins) { + $command .= " '" . $_ . "-*'" + } +} elsif ($type eq 'deb') { + $command = 'apt-get -y install'; + foreach (@$plugins) { + $command .= " '" . $_ . "-*'" + } +} +$command .= ' 2>&1'; + +my $output = `$command`; +if ($? == -1) { + print "failed to execute: $!\n"; + exit(1); +} elsif ($? & 127) { + printf "child died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; + exit(1); +} + +my $exit = $? >> 8; +print "succeeded command (code: $exit): " . $output; +exit(0); diff --git a/gorgone/contrib/gorgone_key_generation.pl b/gorgone/contrib/gorgone_key_generation.pl new file mode 100644 index 00000000000..a7ec62bbad3 --- /dev/null +++ b/gorgone/contrib/gorgone_key_generation.pl @@ -0,0 +1,41 @@ +#!/usr/bin/perl +use strict; +use warnings FATAL => 'all'; +use Try::Tiny; +use Crypt::PK::RSA; +use File::Basename qw( fileparse ); + +# generate key if there is none. +# Gorgone can generate it's own key, but as we need the thumbprint in the configuration we need to generate them before launching gorgone. +# this script only create key if the files don't exists, and silently finish if the files already exists. + +my ($privkey, $pubkey); + +my $priv_dest = '/var/lib/centreon-gorgone/.keys/rsakey.priv.pem'; +my $pub_dest = '/var/lib/centreon-gorgone/.keys/rsakey.pub.pem'; +$ARGV[0] and $priv_dest = $ARGV[0]; +$ARGV[1] and $pub_dest = $ARGV[1]; +if (-f $priv_dest or -f $pub_dest){ + print("files already exist, no overriding is done.\n"); + exit 0; +} +try { + my $pkrsa = Crypt::PK::RSA->new(); + $pkrsa->generate_key(256, 65537); + $pubkey = $pkrsa->export_key_pem('public_x509'); + $privkey = $pkrsa->export_key_pem('private'); +} catch { + die("Cannot generate server keys: $_\n"); +}; + +my ( $priv_key_name, $priv_folder_name ) = fileparse $priv_dest; +`mkdir -p $priv_folder_name`; +open(my $priv_fh, '>', $priv_dest) or die("failed opening $priv_dest : $!"); +print $priv_fh $privkey; +print "private key saved to file.\n"; + +my ( $pub_key_name, $pub_folder_name ) = fileparse $pub_dest; +`mkdir -p $pub_folder_name`; +open(my $pub_fh, '>', $pub_dest) or die("failed opening $pub_dest : $!"); +print $pub_fh $pubkey; +print "pub key saved to file.\n"; diff --git a/gorgone/contrib/gorgone_key_thumbprint.pl b/gorgone/contrib/gorgone_key_thumbprint.pl new file mode 100644 index 00000000000..bf7b9fdd5d0 --- /dev/null +++ b/gorgone/contrib/gorgone_key_thumbprint.pl @@ -0,0 +1,116 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::gorgone_key_thumbprint->new()->run(); + +package gorgone::script::gorgone_key_thumbprint; + +use strict; +use warnings; +use gorgone::standard::misc; +use Crypt::PK::RSA; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new("gorgone_key_thumbprint", + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + $self->add_options( + 'key-path:s' => \$self->{key_path}, + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{key_path} = '/etc/pki/gorgone/pubkey.pem' if (!defined($self->{key_path}) || $self->{key_path} eq ''); +} + +sub read_key { + my ($self, $key_path) = @_; + + my $fh; + if (!open($fh, '<', $key_path)) { + $self->{logger}->writeLogError("Couldn't open file '$key_path': $!"); + exit(1); + } + my $content = do { local $/; <$fh> }; + close($fh); + + return $content; +} + +sub get_key_thumbprint { + my ($self, $key_string) = @_; + + my $kh; + $key_string =~ s/\\n/\n/g; + eval { + $kh = Crypt::PK::RSA->new(\$key_string); + }; + if ($@) { + $self->{logger}->writeLogError("Cannot load key: $@"); + return -1; + } + + return $kh->export_key_jwk_thumbprint('SHA256'); +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + my $key = $self->read_key($self->{key_path}); + my $thumbprint = $self->get_key_thumbprint($key); + + $self->{logger}->writeLogInfo("File '$self->{key_path}' JWK thumbprint: " . $thumbprint); +} + +__END__ + +=head1 NAME + +gorgone_key_thumbprint.pl - script to get the JWK thumbprint of a RSA key. + +=head1 SYNOPSIS + +gorgone_key_thumbprint.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--key-path> + +Specify the path to the RSA key (default: '/etc/pki/gorgone/pubkey.pem'). + +=item B<--severity> + +Set the script log severity (default: 'error'). + +=item B<--help> + +Print a brief help message and exits. + +=back + +=head1 DESCRIPTION + +B + +=cut + diff --git a/gorgone/contrib/mbi/centreonBIETL b/gorgone/contrib/mbi/centreonBIETL new file mode 100644 index 00000000000..4e666a0f926 --- /dev/null +++ b/gorgone/contrib/mbi/centreonBIETL @@ -0,0 +1,382 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::centreonBIETL->new()->run(); + +package gorgone::script::centreonBIETL; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'centreonBIETL', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + + $self->{moptions}->{rebuild} = 0; + $self->{moptions}->{daily} = 0; + $self->{moptions}->{import} = 0; + $self->{moptions}->{dimensions} = 0; + $self->{moptions}->{event} = 0; + $self->{moptions}->{perfdata} = 0; + $self->{moptions}->{start} = ''; + $self->{moptions}->{end} = ''; + $self->{moptions}->{create_tables} = 0; + $self->{moptions}->{ignore_databin} = 0; + $self->{moptions}->{centreon_only} = 0; + $self->{moptions}->{nopurge} = 0; + + $self->add_options( + 'url:s' => \$self->{url}, + 'status' => \$self->{status}, + 'r' => \$self->{moptions}->{rebuild}, + 'd' => \$self->{moptions}->{daily}, + 'I' => \$self->{moptions}->{import}, + 'D' => \$self->{moptions}->{dimensions}, + 'E' => \$self->{moptions}->{event}, + 'P' => \$self->{moptions}->{perfdata}, + 's:s' => \$self->{moptions}->{start}, + 'e:s' => \$self->{moptions}->{end}, + 'c' => \$self->{moptions}->{create_tables}, + 'i' => \$self->{moptions}->{ignore_databin}, + 'C' => \$self->{moptions}->{centreon_only}, + 'p' => \$self->{moptions}->{nopurge} + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + + return if (defined($self->{status})); + + my $utils = gorgone::modules::centreon::mbi::libs::Utils->new($self->{logger}); + if ($utils->checkBasicOptions($self->{moptions}) == 1) { + exit(1); + } + + if ($self->{moptions}->{create_tables} == 0 && + $self->{moptions}->{import} == 0 && + $self->{moptions}->{dimensions} == 0 && + $self->{moptions}->{event} == 0 && + $self->{moptions}->{perfdata} == 0) { + $self->{moptions}->{import} = 1; + $self->{moptions}->{dimensions} = 1; + $self->{moptions}->{event} = 1; + $self->{moptions}->{perfdata} = 1; + } +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub run_etl { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/run', + query_form_post => JSON::XS->new->encode($self->{moptions}), + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub display_messages { + my ($self, %options) = @_; + + if (defined($options{data}->{messages})) { + foreach (@{$options{data}->{messages}}) { + if ($_->[0] eq 'D') { + $self->{logger}->writeLogDebug($_->[1]) + } elsif ($_->[0] eq 'I') { + $self->{logger}->writeLogInfo($_->[1]); + } elsif ($_->[0] eq 'E') { + $self->{logger}->writeLogError($_->[1]); + } + } + } +} + +sub get_etl_log { + my ($self) = @_; + + my $log_id; + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 600) { + $self->display_messages(data => $data); + } elsif ($_->{code} == 1) { + $self->display_messages(data => $data); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->display_messages(data => $data); + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } +} + +sub get_etl_status { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/status', + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + my $token = $decoded->{token}; + my $log_id; + my $result; + + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $token, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 1) { + $self->{logger}->writeLogError('cannot get etl status'); + exit(1); + } elsif ($_->{code} == 2) { + $result = $data; + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } + + print "ETL status: $result->{statusStr}\n"; + if ($result->{statusStr} ne 'ready') { + print "planning: $result->{planningStr}\n"; + foreach ('import', 'dimensions', 'event', 'perfdata') { + next if (!defined($result->{sections}->{$_})); + + print " $_ status: $result->{sections}->{$_}->{statusStr}"; + if (defined($result->{sections}->{$_}->{steps_total})) { + print " ($result->{sections}->{$_}->{steps_executed}/$result->{sections}->{$_}->{steps_total})"; + } + print "\n"; + } + } +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + + if (defined($self->{status})) { + $self->get_etl_status(); + } else { + $self->run_etl(); + $self->get_etl_log(); + } +} + +__END__ + +=head1 NAME + +centreonBIETL - script to execute mbi etl + +=head1 SYNOPSIS + +centreonBIETL [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +Execution modes + + -c Create the reporting database model + -d Daily execution to calculate statistics on yesterday + -r Rebuild mode to calculate statitics on a historical period. Can be used with: + Extra arguments for options -d and -r (if none of the following is specified, these one are selected by default: -IDEP): + -I Extract data from the monitoring server + Extra arguments for option -I: + -C Extract only Centreon configuration database only. Works with option -I. + -i Ignore perfdata extraction from monitoring server + -o Extract only perfdata from monitoring server + + -D Calculate dimensions + -E Calculate event and availability statistics + -P Calculate perfdata statistics + Common options for -rIDEP: + -s Start date in format YYYY-MM-DD. + By default, the program uses the data retention period from Centreon BI configuration + -e End date in format YYYY-MM-DD. + By default, the program uses the data retention period from Centreon BI configuration + -p Do not empty statistic tables, delete only entries for the processed period. + Does not work on raw data tables, only on Centreon BI statistics tables. + +=back + +=head1 DESCRIPTION + +B + +=cut diff --git a/gorgone/contrib/mbi/dimensionBuilder.pl b/gorgone/contrib/mbi/dimensionBuilder.pl new file mode 100644 index 00000000000..1e81760852d --- /dev/null +++ b/gorgone/contrib/mbi/dimensionBuilder.pl @@ -0,0 +1,237 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::dimensionsBuilder->new()->run(); + +package gorgone::script::dimensionsBuilder; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'dimensionsBuilder', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + + $self->{moptions}->{rebuild} = 0; + $self->{moptions}->{daily} = 0; + $self->{moptions}->{import} = 0; + $self->{moptions}->{dimensions} = 1; + $self->{moptions}->{event} = 0; + $self->{moptions}->{perfdata} = 0; + $self->{moptions}->{start} = ''; + $self->{moptions}->{end} = ''; + $self->{moptions}->{nopurge} = 0; + $self->{moptions}->{centile} = 0; + + $self->add_options( + 'url:s' => \$self->{url}, + 'r|rebuild' => \$self->{moptions}->{rebuild}, + 'd|daily' => \$self->{moptions}->{daily}, + 'centile' => \$self->{moptions}->{centile}, + 'p|no-purge' => \$self->{moptions}->{nopurge} + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + my $utils = gorgone::modules::centreon::mbi::libs::Utils->new($self->{logger}); + if ($utils->checkBasicOptions($self->{moptions}) == 1) { + exit(1); + } +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub run_etl { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/run', + query_form_post => JSON::XS->new->encode($self->{moptions}), + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub display_messages { + my ($self, %options) = @_; + + if (defined($options{data}->{messages})) { + foreach (@{$options{data}->{messages}}) { + if ($_->[0] eq 'D') { + $self->{logger}->writeLogDebug($_->[1]) + } elsif ($_->[0] eq 'I') { + $self->{logger}->writeLogInfo($_->[1]); + } elsif ($_->[0] eq 'E') { + $self->{logger}->writeLogError($_->[1]); + } + } + } +} + +sub get_etl_log { + my ($self) = @_; + + my $log_id; + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 600) { + $self->display_messages(data => $data); + } elsif ($_->{code} == 1) { + $self->display_messages(data => $data); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->display_messages(data => $data); + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->run_etl(); + $self->get_etl_log(); +} + +__END__ + +=head1 NAME + +dimensionsBuilder.pl - script to compute dimensions + +=head1 SYNOPSIS + +dimensionsBuilder.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +=back + + Rebuild options: + [-r|--rebuild] : Rebuild dimensions + [--no-purge] : Do not delete previous dimensions while rebuilding + [--centile] : import only centile dimensions without deleting other dimensions + Daily run options: + [-d|--daily] + +=head1 DESCRIPTION + +B + +=cut diff --git a/gorgone/contrib/mbi/eventStatisticsBuilder.pl b/gorgone/contrib/mbi/eventStatisticsBuilder.pl new file mode 100644 index 00000000000..6f993f5a6e3 --- /dev/null +++ b/gorgone/contrib/mbi/eventStatisticsBuilder.pl @@ -0,0 +1,249 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::eventStatisticsBuilder->new()->run(); + +package gorgone::script::eventStatisticsBuilder; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'eventStatisticsBuilder', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + + $self->{moptions}->{rebuild} = 0; + $self->{moptions}->{daily} = 0; + $self->{moptions}->{import} = 0; + $self->{moptions}->{dimensions} = 0; + $self->{moptions}->{event} = 1; + $self->{moptions}->{perfdata} = 0; + $self->{moptions}->{start} = ''; + $self->{moptions}->{end} = ''; + $self->{moptions}->{nopurge} = 0; + $self->{moptions}->{host_only} = 0; + $self->{moptions}->{service_only} = 0; + $self->{moptions}->{availability_only} = 0; + $self->{moptions}->{events_only} = 0; + + $self->add_options( + 'url:s' => \$self->{url}, + 'r|rebuild' => \$self->{moptions}->{rebuild}, + 'd|daily' => \$self->{moptions}->{daily}, + 's:s' => \$self->{moptions}->{start}, + 'e:s' => \$self->{moptions}->{end}, + 'host-only' => \$self->{moptions}->{host_only}, + 'service-only' => \$self->{moptions}->{service_only}, + 'availability-only' => \$self->{moptions}->{availability_only}, + 'events-only' => \$self->{moptions}->{events_only}, + 'no-purge' => \$self->{moptions}->{nopurge} + ); + + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + my $utils = gorgone::modules::centreon::mbi::libs::Utils->new($self->{logger}); + if ($utils->checkBasicOptions($self->{moptions}) == 1) { + exit(1); + } +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub run_etl { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/run', + query_form_post => JSON::XS->new->encode($self->{moptions}), + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub display_messages { + my ($self, %options) = @_; + + if (defined($options{data}->{messages})) { + foreach (@{$options{data}->{messages}}) { + if ($_->[0] eq 'D') { + $self->{logger}->writeLogDebug($_->[1]) + } elsif ($_->[0] eq 'I') { + $self->{logger}->writeLogInfo($_->[1]); + } elsif ($_->[0] eq 'E') { + $self->{logger}->writeLogError($_->[1]); + } + } + } +} + +sub get_etl_log { + my ($self) = @_; + + my $log_id; + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 600) { + $self->display_messages(data => $data); + } elsif ($_->{code} == 1) { + $self->display_messages(data => $data); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->display_messages(data => $data); + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->run_etl(); + $self->get_etl_log(); +} + +__END__ + +=head1 NAME + +eventStatisticsBuilder.pl - script to calculate events and availbility statistics + +=head1 SYNOPSIS + +eventStatisticsBuilder.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +=back + + Rebuild options: + [-s|--start] [-e|--end] [-r|--rebuild] [--no-purge] + Daily run options: + [-d|--daily] + Other options:\n"; + --host-only Process only host events and availability statistics + --service-only Process only service events and availability statistics + --availability-only Build only availability statistics + --events-only Build only event statistics + +=head1 DESCRIPTION + +B + +=cut diff --git a/gorgone/contrib/mbi/importData.pl b/gorgone/contrib/mbi/importData.pl new file mode 100644 index 00000000000..82e429c4abe --- /dev/null +++ b/gorgone/contrib/mbi/importData.pl @@ -0,0 +1,250 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::importData->new()->run(); + +package gorgone::script::importData; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'importData', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + + $self->{moptions}->{rebuild} = 0; + $self->{moptions}->{daily} = 0; + $self->{moptions}->{import} = 1; + $self->{moptions}->{dimensions} = 0; + $self->{moptions}->{event} = 0; + $self->{moptions}->{perfdata} = 0; + $self->{moptions}->{start} = ''; + $self->{moptions}->{end} = ''; + $self->{moptions}->{create_tables} = 0; + $self->{moptions}->{databin_only} = 0; + $self->{moptions}->{ignore_databin} = 0; + $self->{moptions}->{centreon_only} = 0; + $self->{moptions}->{nopurge} = 0; + $self->{moptions}->{bam_only} = 0; + + $self->add_options( + 'url:s' => \$self->{url}, + 'r|rebuild' => \$self->{moptions}->{rebuild}, + 'd|daily' => \$self->{moptions}->{daily}, + 's:s' => \$self->{moptions}->{start}, + 'e:s' => \$self->{moptions}->{end}, + 'c|create-tables' => \$self->{moptions}->{create_tables}, + 'databin-only' => \$self->{moptions}->{databin_only}, + 'i|ignore-databin' => \$self->{moptions}->{ignore_databin}, + 'C|centreon-only' => \$self->{moptions}->{centreon_only}, + 'p|no-purge' => \$self->{moptions}->{nopurge}, + 'bam-only' => \$self->{moptions}->{bam_only} + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + my $utils = gorgone::modules::centreon::mbi::libs::Utils->new($self->{logger}); + if ($utils->checkBasicOptions($self->{moptions}) == 1) { + exit(1); + } +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub run_etl { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/run', + query_form_post => JSON::XS->new->encode($self->{moptions}), + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub display_messages { + my ($self, %options) = @_; + + if (defined($options{data}->{messages})) { + foreach (@{$options{data}->{messages}}) { + if ($_->[0] eq 'D') { + $self->{logger}->writeLogDebug($_->[1]) + } elsif ($_->[0] eq 'I') { + $self->{logger}->writeLogInfo($_->[1]); + } elsif ($_->[0] eq 'E') { + $self->{logger}->writeLogError($_->[1]); + } + } + } +} + +sub get_etl_log { + my ($self) = @_; + + my $log_id; + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 600) { + $self->display_messages(data => $data); + } elsif ($_->{code} == 1) { + $self->display_messages(data => $data); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->display_messages(data => $data); + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->run_etl(); + $self->get_etl_log(); +} + +__END__ + +=head1 NAME + +importData.pl - script to execute import centreon datas + +=head1 SYNOPSIS + +importData.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +=back + + First run + [-c|--create-tables] + Rebuild options: + [-r|--rebuild] [--databin-only] [--centreon-only] [--ignore-databin] [--bam-only] + [-s|--start] [-e|--end] Not mandatory : if you don't use these options, the retention parameters will be taken into account + [--no-purge] Only use this mode with rebuild mode to import missing data. + This command may create duplicate entries if executed on a non appropriate period + Daily run options: + [-d|--daily] + +=head1 DESCRIPTION + +B + +=cut diff --git a/gorgone/contrib/mbi/perfdataStatisticsBuilder.pl b/gorgone/contrib/mbi/perfdataStatisticsBuilder.pl new file mode 100644 index 00000000000..da32dd6fd6f --- /dev/null +++ b/gorgone/contrib/mbi/perfdataStatisticsBuilder.pl @@ -0,0 +1,241 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::perfdataStatisticsBuilder->new()->run(); + +package gorgone::script::perfdataStatisticsBuilder; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'perfdataStatisticsBuilder', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + + $self->{moptions}->{rebuild} = 0; + $self->{moptions}->{daily} = 0; + $self->{moptions}->{import} = 0; + $self->{moptions}->{dimensions} = 0; + $self->{moptions}->{event} = 0; + $self->{moptions}->{perfdata} = 1; + $self->{moptions}->{start} = ''; + $self->{moptions}->{end} = ''; + $self->{moptions}->{nopurge} = 0; + $self->{moptions}->{month_only} = 0; + $self->{moptions}->{centile_only} = 0; + $self->{moptions}->{no_centile} = 0; + + $self->add_options( + 'url:s' => \$self->{url}, + 'r|rebuild' => \$self->{moptions}->{rebuild}, + 'd|daily' => \$self->{moptions}->{daily}, + 's:s' => \$self->{moptions}->{start}, + 'e:s' => \$self->{moptions}->{end}, + 'month-only' => \$self->{moptions}->{month_only}, + 'centile-only' => \$self->{moptions}->{centile_only}, + 'no-centile' => \$self->{moptions}->{no_centile}, + 'no-purge' => \$self->{moptions}->{nopurge} + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + my $utils = gorgone::modules::centreon::mbi::libs::Utils->new($self->{logger}); + if ($utils->checkBasicOptions($self->{moptions}) == 1) { + exit(1); + } +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub run_etl { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/run', + query_form_post => JSON::XS->new->encode($self->{moptions}), + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub display_messages { + my ($self, %options) = @_; + + if (defined($options{data}->{messages})) { + foreach (@{$options{data}->{messages}}) { + if ($_->[0] eq 'D') { + $self->{logger}->writeLogDebug($_->[1]) + } elsif ($_->[0] eq 'I') { + $self->{logger}->writeLogInfo($_->[1]); + } elsif ($_->[0] eq 'E') { + $self->{logger}->writeLogError($_->[1]); + } + } + } +} + +sub get_etl_log { + my ($self) = @_; + + my $log_id; + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 600) { + $self->display_messages(data => $data); + } elsif ($_->{code} == 1) { + $self->display_messages(data => $data); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->display_messages(data => $data); + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->run_etl(); + $self->get_etl_log(); +} + +__END__ + +=head1 NAME + +perfdataStatisticsBuilder.pl - script to calculate perfdata statistics + +=head1 SYNOPSIS + +perfdataStatisticsBuilder.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +=back + + Rebuild options: + [-r | --rebuild] [-s|--start] [-e|--end] [--no-purge] [--month-only] [--centile-only] [--no-centile] + Daily run options: + [-d | --daily] + +=head1 DESCRIPTION + +B + +=cut diff --git a/gorgone/contrib/mojolicious_client.pl b/gorgone/contrib/mojolicious_client.pl new file mode 100644 index 00000000000..79c349ee3ce --- /dev/null +++ b/gorgone/contrib/mojolicious_client.pl @@ -0,0 +1,34 @@ +use strict; +use warnings; +use Mojo::UserAgent; + +my $ua = Mojo::UserAgent->new(); +# ws or wss +$ua->websocket( + 'ws://127.0.0.1:8086/' => sub { + my ($ua, $tx) = @_; + + print "error: ", $tx->res->error->{message}, "\n" if $tx->res->error; + print 'WebSocket handshake failed!\n' and return unless $tx->is_websocket; + + $tx->on( + finish => sub { + my ($tx, $code, $reason) = @_; + print "WebSocket closed with status $code.\n"; + } + ); + $tx->on( + message => sub { + my ($tx, $msg) = @_; + print "WebSocket message: $msg\n"; + } + ); + + $tx->send({json => { username => 'admin', password => 'plop' } }); + $tx->send({json => { method => 'POST', uri => '/core/action/command', userdata => 'command1', data => [ { command => 'ls' } ] } }); + } +); +$ua->inactivity_timeout(120); +Mojo::IOLoop->start() unless (Mojo::IOLoop->is_running); + +exit(0); diff --git a/gorgone/contrib/mojolicious_server.pl b/gorgone/contrib/mojolicious_server.pl new file mode 100644 index 00000000000..3f0c60d8026 --- /dev/null +++ b/gorgone/contrib/mojolicious_server.pl @@ -0,0 +1,67 @@ +use strict; +use warnings; +use Mojolicious::Lite; +use Mojo::Server::Daemon; +use IO::Socket::SSL; +use DateTime; + +sub sigalrm_handler +{ + printf (STDOUT "Timeout: Timeout Error Occured.\n"); + alarm(10); +} +$SIG{ALRM} = \&sigalrm_handler; + + +plugin 'basic_auth_plus'; + +my $clients = {}; + +IO::Socket::SSL::set_defaults(SSL_passwd_cb => sub { return 'secret' } ); + +websocket '/echo' => sub { + my $self = shift; + + print sprintf("Client connected: %s\n", $self->tx->connection); + my $ws_id = sprintf "%s", $self->tx->connection; + $clients->{$ws_id} = $self->tx; + + $self->on(message => sub { + my ($self, $msg) = @_; + + my $dt = DateTime->now( time_zone => 'Asia/Tokyo'); + + for (keys %$clients) { + $clients->{$_}->send({json => { + hms => $dt->hms, + text => $msg, + }}); + } + }); + + $self->on(finish => sub { + my ($self, $code, $reason) = @_; + + print "Client disconnected: $code\n"; + delete $clients->{ $self->tx->connection }; + }); +}; + +get '/' => sub { + my $self = shift; + + $self->render(json => { message => 'ok' }) + if $self->basic_auth( + "Realm Name" => { + username => 'username', + password => 'password' + } + ); +}; + +my $daemon = Mojo::Server::Daemon->new( + app => app, + listen => ["https://*:3000?reuse=1&cert=/etc/pki/tls/certs/localhost.crt&key=/etc/pki/tls/private/localhost.key"] +); +alarm(10); +$daemon->run(); diff --git a/gorgone/contrib/test-client.pl b/gorgone/contrib/test-client.pl new file mode 100644 index 00000000000..c8de55eaf20 --- /dev/null +++ b/gorgone/contrib/test-client.pl @@ -0,0 +1,187 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +use ZMQ::LibZMQ4; +use ZMQ::Constants qw(:all); +use JSON::XS; +use UUID; +use Data::Dumper; +use Sys::Hostname; +use gorgone::class::clientzmq; +use gorgone::standard::library; + +my ($client, $client2); +my $identities_token = {}; +my $stopped = {}; +my $results = {}; + +sub get_command_result { + my ($current_retries, $retries) = (0, 4); + $stopped->{$client2->{identity}} = '^(1|2)$'; + $client2->send_message( + action => 'COMMAND', data => { content => { command => 'ls /' } }, target => 100, + json_encode => 1 + ); + while (1) { + my $poll = []; + + $client2->ping(poll => $poll); + my $rev = zmq_poll($poll, 15000); + + if (defined($results->{$client2->{identity}})) { + print "The result: " . Data::Dumper::Dumper($results->{$client2->{identity}}); + last; + } + + if (!defined($rev) || $rev == 0) { + $current_retries++; + last if ($current_retries >= $retries); + + if (defined($identities_token->{$client2->{identity}})) { + # We ask a sync + print "==== send logs ===\n"; + $client2->send_message(action => 'GETLOG', target => 150, json_encode => 1); + $client2->send_message(action => 'GETLOG', token => $identities_token->{$client2->{identity}}, data => { token => $identities_token->{$client2->{identity}} }, + json_encode => 1); + } + } + + } +} + +sub read_response_result { + my (%options) = @_; + + $options{data} =~ /^\[ACK\]\s+\[(.*?)\]\s+(.*)$/m; + $identities_token->{$options{identity}} = $1; + + my $data; + eval { + $data = JSON::XS->new->utf8->decode($2); + }; + if ($@) { + return undef; + } + + if (defined($data->{data}->{action}) && $data->{data}->{action} eq 'getlog') { + if (defined($data->{data}->{result})) { + foreach my $key (keys %{$data->{data}->{result}}) { + if ($data->{data}->{result}->{$key}->{code} =~ /$stopped->{$options{identity}}/) { + $results->{$options{identity}} = $data->{data}->{result}; + last; + } + } + } + } +} + +sub read_response { + my (%options) = @_; + + print "==== PLOP = " . $options{data} . "===\n"; +} + +my ($symkey, $status, $hostname, $ciphertext); + +my $uuid; +#$uuid = 'toto'; +UUID::generate($uuid); + +#$client = gorgone::class::clientzmq->new( +# identity => 'toto', +# cipher => 'Cipher::AES', +# vector => '0123456789012345', +# server_pubkey => 'keys/central/pubkey.crt', +# client_pubkey => 'keys/poller/pubkey.crt', +# client_privkey => 'keys/poller/privkey.pem', +# target_type => 'tcp', +# target_path => '127.0.0.1:5555', +# ping => 60, +#); +#$client->init(callback => \&read_response); +$client2 = gorgone::class::clientzmq->new( + identity => 'tata', + cipher => 'Cipher::AES', + vector => '0123456789012345', + server_pubkey => 'keys/central/pubkey.crt', + client_pubkey => 'keys/poller/pubkey.crt', + client_privkey => 'keys/poller/privkey.pem', + target_type => 'tcp', + target_path => '127.0.0.1:5555' +); +$client2->init(callback => \&read_response_result); + +#$client->send_message( +# action => 'SCOMRESYNC', +# data => { container_id => 'toto' }, +# json_encode => 1 +#); +#$client->send_message(action => 'PUTLOG', data => { code => 120, etime => time(), token => 'plopplop', data => { 'nawak' => 'nawak2' } }, +# json_encode => 1); +#$client2->send_message(action => 'RELOADCRON', data => { }, +# json_encode => 1); + +# We send a request to a poller +#$client2->send_message(action => 'ENGINECOMMAND', data => { command => '[1417705150] ENABLE_HOST_CHECK;host1', engine_pipe => '/var/lib/centreon-engine/rw/centengine.cmd' }, target => 120, +# json_encode => 1); + +#$client2->send_message(action => 'COMMAND', data => { content => { command => 'ls' } }, target => 150, +# json_encode => 1); +#$client2->send_message(action => 'CONSTATUS'); +$client2->send_message( + action => 'LOADMODULE', + data => { content => { name => 'engine', package => 'gorgone::modules::centreon::engine::hooks', enable => 'true', command_file => 'plop' } }, + json_encode => 1 +); + +# It will transform +#$client2->send_message(action => 'GETLOG', data => { cmd => 'ls' }, target => 120, +# json_encode => 1); +#$client2->send_message(action => 'GETLOG', data => {}, target => 140, +# json_encode => 1); + +get_command_result(); + +#while (1) { +# my $poll = []; + +# $client->ping(poll => $poll); +# $client2->ping(poll => $poll); +# zmq_poll($poll, 5000); +#} + +while (1) { + #my $poll = [$client->get_poll(), $client2->get_poll()]; + my $poll = [$client2->get_poll()]; + +# $client->ping(poll => $poll); +# $client2->ping(poll => $poll); + zmq_poll($poll, 5000); +} + +$client->close(); +$client2->close(); +exit(0); + +#zmq_close($requester); + diff --git a/gorgone/docs/api.md b/gorgone/docs/api.md new file mode 100644 index 00000000000..d98a8ea86a8 --- /dev/null +++ b/gorgone/docs/api.md @@ -0,0 +1,408 @@ +# API + +Centreon Gorgone provides a RestAPI through its HTTP server module. + +## Internal endpoints + +### Get Nodes Connection Status + +| Endpoint | Method | +|:--------------------|:-------| +| /internal/constatus | `GET` | + +#### Headers + +| Header | Value | +|:-------|:-----------------| +| Accept | application/json | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/internal/constatus" \ + --header "Accept: application/json" +``` + +#### Response example + +```json +{ + "action": "constatus", + "data": { + "2": { + "last_ping_sent": 1579684258, + "type": "push_zmq", + "nodes": {}, + "last_ping_recv": 1579684258 + } + }, + "message": "ok" +} +``` + +### Get Public Key Thumbprint + +| Endpoint | Method | +|:---------------------|:-------| +| /internal/thumbprint | `GET` | + +#### Headers + +| Header | Value | +|:-------|:-----------------| +| Accept | application/json | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/internal/thumbprint" \ + --header "Accept: application/json" +``` + +#### Response example + +```json +{ + "action": "getthumbprint", + "data": { + "thumbprint": "cS4B3lZq96qcP4FTMhVMuwAhztqRBQERKyhnEitnTFM" + }, + "message": "ok" +} +``` + +### Get Runtime Informations And Statistics + +| Endpoint | Method | +|:----------------------|:-------| +| /internal/information | `GET` | + +#### Headers + +| Header | Value | +|:-------|:-----------------| +| Accept | application/json | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/internal/information" \ + --header "Accept: application/json" +``` + +#### Response example + +```json +{ + "action": "information", + "data": { + "modules": { + "httpserver": "gorgone::modules::core::httpserver::hooks", + "dbcleaner": "gorgone::modules::core::dbcleaner::hooks", + "cron": "gorgone::modules::core::cron::hooks", + "engine": "gorgone::modules::centreon::engine::hooks", + "action": "gorgone::modules::core::action::hooks", + "statistics": "gorgone::modules::centreon::statistics::hooks", + "nodes": "gorgone::modules::centreon::nodes::hooks", + "legacycmd": "gorgone::modules::centreon::legacycmd::hooks" + }, + "api_endpoints": { + "GET_/centreon/statistics/broker": "BROKERSTATS", + "GET_/internal/thumbprint": "GETTHUMBPRINT", + "GET_/core/cron/definitions": "GETCRON", + "GET_/internal/information": "INFORMATION", + "POST_/core/cron/definitions": "ADDCRON", + "POST_/core/action/command": "COMMAND", + "POST_/centreon/engine/command": "ENGINECOMMAND", + "POST_/core/proxy/remotecopy": "REMOTECOPY", + "PATCH_/core/cron/definitions": "UPDATECRON", + "DELETE_/core/cron/definitions": "DELETECRON", + "GET_/internal/constatus": "CONSTATUS" + }, + "counters": { + "external": { + "total": 0 + }, + "total": 183, + "internal": { + "legacycmdready": 1, + "statisticsready": 1, + "addcron": 1, + "cronready": 1, + "centreonnodesready": 1, + "httpserverready": 1, + "command": 51, + "putlog": 75, + "dbcleanerready": 1, + "information": 1, + "brokerstats": 8, + "total": 183, + "setcoreid": 2, + "getlog": 37, + "engineready": 1, + "actionready": 1 + }, + "proxy": { + "total": 0 + } + } + }, + "message": "ok" +} +``` + +## Modules endpoints + +The available endpoints depend on which modules are loaded. + +Endpoints are basically built from: + +* API root, +* optional target node, local if not present ( `/nodes/:nodeid/` ) +* Module's namespace, +* Module's name, +* Action + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/core/action/command" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "[ + { + \"command\": \"echo 'Test command'\" + } +]" +``` + +Find more informations directly from modules documentations [here](./modules.md). + +As Centreon Gorgone is asynchronous, those endpoints will return a token corresponding to the action. + +#### Example + +```json +{ + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1" +} +``` + +That being said, it is possible to make Gorgone work synchronously by providing two parameters. + +First one is `log_wait` with a numeric value in microseconds: this value defines the amount of time the API will wait before trying to retrieve log results. + +Second one is `sync_wait` with a numeric value in microseconds: this value defines the amount of time the API will wait after asking for logs synchronisation if a remote node is involved. + +Note: the `sync_wait` parameter is induced if you ask for a log directly specifying a node, by using the log endpoint, and the default value is 10000 microseconds (10 milliseconds). + +#### Examples + +##### Launch a command locally and wait for the result + +Using the `/core/action/command` endpoint with `log_wait` parameter set to 100000: + +```bash +curl --request POST "https://hostname:8443/api/core/action/command?log_wait=100000" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "[ + { + \"command\": \"echo 'Test command'\" + } +]" +``` + +This call will ask for the API to execute an action and will give a result after 100ms that can be: + +* Logs, like the log endpoint could provide, +* A no_log error with a token to retrieve the logs later. + +Note: there is no need for logs synchronisation when dealing with local actions. + +##### Launch a command remotely and wait for the result + +Using the `/nodes/:id/core/action/command` endpoint with `log_wait` parameter set to 100000: + +```bash +curl --request POST "https://hostname:8443/api/nodes/2/core/action/command?log_wait=100000&sync_wait=200000" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "[ + { + \"command\": \"echo 'Test command'\" + } +]" +``` + +This call will ask for the API to execute an action on the node with ID 2, will then wait for 100ms before getting a result, but will wait for an extra 200ms for logs synchronisation before giving a result, that can be: + +* Logs, like the log endpoint could provide, +* A no_log error with a token to retrieve the logs later. + +## Log endpoint + +To retrieve the logs, a specific endpoint can be called as follows. + +| Endpoint | Method | +|:------------------------------|:-------| +| /api/nodes/:nodeid/log/:token | `GET` | + +#### Headers + +| Header | Value | +|:-------|:------------------| +| Accept | application/json | + +#### Path variables + +| Variable | Description | +|:---------|:---------------------------| +| token | Token of the action | +| nodeid | node id to search log into | + +#### Examples + +```bash +curl --request GET "https://hostname:8443/api/log/3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1" \ + --header "Accept: application/json" +``` + +```bash +curl --request GET "https://hostname:8443/api/nodes/2/log/3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1" \ + --header "Accept: application/json" +``` + +This second example will force logs synchronisation before looking for results to retrieve. Default wait time is 10ms and can be changed by providing `sync_wait` parameter. + +#### Response example + +```json +{ + "data": [ + { + "ctime": 1576083003, + "etime": 1576083003, + "id": "15638", + "instant": 0, + "data": "{\"message\":\"commands processing has started\",\"request_content\":[{\"timeout\":10,\"command\":\"echo 'Test command'\"}]}", + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1", + "code": 0 + }, + { + "ctime": 1576083003, + "etime": 1576083003, + "id": "15639", + "instant": 0, + "data": "{\"metadata\":null,\"message\":\"command has started\",\"command\":\"echo 'Test command'\"}", + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1", + "code": 0 + }, + { + "ctime": 1576083003, + "etime": 1576083003, + "id": "15640", + "instant": 0, + "data": "{\"metadata\":null,\"metrics\":{\"duration\":0,\"start\":1576083003,\"end\":1576083003},\"message\":\"command has finished\",\"command\":\"echo 'Test command'\",\"result\":{\"exit_code\":0,\"stdout\":\"Test command\"}}", + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1", + "code": 2 + }, + { + "ctime": 1576083003, + "etime": 1576083003, + "id": "15641", + "instant": 0, + "data": "{\"message\":\"commands processing has finished\"}", + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1", + "code": 2 + } + ], + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1", + "message": "Logs found" +} +``` + +## Errors + +### Unauthorized + +```json +{ + "error": "http_error_401", + "message": "unauthorized" +} +``` + +### Forbidden + +```json +{ + "error": "http_error_403", + "message": "forbidden" +} +``` + +### Unknown endpoint + +```json +{ + "error": "endpoint_unknown", + "message": "endpoint not implemented" +} +``` + +### Unknown method + +```json +{ + "error": "method_unknown", + "message": "Method not implemented" +} +``` + +### No logs for provided token + +```json +{ + "error": "no_log", + "message": "No log found for token", + "data": [], + "token": "" +} +``` + +### JSON decoding error for request + +```json +{ + "error": "decode_error", + "message": "Cannot decode response" +} +``` + +### JSON encoding error for response + +```json +{ + "error": "encode_error", + "message": "Cannot encode response" +} +``` + +### No results for internal actions + +```json +{ + "error": "no_result", + "message": "No result found for action " +} +``` + +### No token found when using wait parameter + +```json +{ + "error": "no_token", + "message": "Cannot retrieve token from ack" +} +``` diff --git a/gorgone/docs/api/centreon-logo.png b/gorgone/docs/api/centreon-logo.png new file mode 100755 index 00000000000..5458fb678d4 Binary files /dev/null and b/gorgone/docs/api/centreon-logo.png differ diff --git a/gorgone/docs/api/gorgone-openapi.yaml b/gorgone/docs/api/gorgone-openapi.yaml new file mode 100644 index 00000000000..a7e6a203ce5 --- /dev/null +++ b/gorgone/docs/api/gorgone-openapi.yaml @@ -0,0 +1,1044 @@ +openapi: 3.0.1 +info: + title: Centreon Gorgone RestAPI + description: | + # Information + Centreon Gorgone and his "gorgoned" daemon is a lightweight, distributed, modular tasks handler. + + It provides a set of actions like: + + - Execute commands + - Send files/directories, + - Schedule cron-like tasks, + - Push or execute tasks through SSH. + + The daemon can be installed on Centreon environments like Centreon Central, Remote and Poller servers. + + It uses ZeroMQ library. + x-logo: + url: ./centreon-logo.png + contact: + url: 'https://www.centreon.com' + license: + name: Apache 2.0 + url: 'http://www.apache.org/licenses/LICENSE-2.0.html' + version: "1.0" +externalDocs: + description: You can contact us on our community Slack + url: 'https://centreon.slack.com/messages/CCRGLQSE5' +servers: + - url: '{protocol}://{server}:{port}/api' + description: "Local Gorgone instance" + variables: + protocol: + enum: + - http + - https + default: http + description: "HTTP schema" + server: + default: localhost + description: "IP address or hostname of Gorgone instance" + port: + default: '8085' + description: "Port used by HTTP server" + - url: '{protocol}://{server}:{port}/api/nodes/{id}' + description: "Remote Gorgone instance" + variables: + protocol: + enum: + - http + - https + default: http + description: "HTTP schema" + server: + default: localhost + description: "IP address or hostname of Gorgone instance" + port: + default: '8085' + description: "Port used by HTTP server" + id: + default: '1' + description: "ID of the remote Gorgone node" +tags: + - name: Internal + description: "Internal events." + - name: Logs + description: "Logs management." + - name: Cron + description: "Module aiming to reproduce a cron-like scheduler that can send events to other Gorgone modules." + - name: Action + description: "Module aiming to execute actions on the server running the Gorgone daemon or remotly using SSH." + - name: Engine + description: "Module aiming to provide a bridge to communicate with Centreon Engine daemon." + - name: Statistics + description: "Module aiming to deal with statistics collection of Centreon Engine and Broker." + - name: Autodiscovery + description: "Module aiming to extend Centreon Autodiscovery server functionalities." +security: + - Basic Authentication: [] +paths: + /internal/constatus: + get: + tags: + - Internal + summary: "Get nodes connection status" + description: "Get the connection status of all nodes managed by the Gorgone daemon." + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/NodesStatus' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /internal/information: + get: + tags: + - Internal + summary: "Get runtime informations and statistics" + description: "Get informations and statistics about loaded modules, available endpoints and number of events computed at runtime." + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/Information' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /internal/thumbprint: + get: + tags: + - Internal + summary: "Get public key thumbprint" + description: "Get the thumbprint of the public key of the Gorgone daemon." + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/Thumbprint' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /internal/logger: + post: + tags: + - Internal + summary: "Set logger severity level" + description: "Set the logger severity level for all modules." + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/SeverityLevel' + responses: + '204': + description: OK + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /log/{token}: + get: + tags: + - Logs + summary: "Retrieve event's logs" + description: "Retrieve the event's logs based on event's token." + parameters: + - $ref: '#/components/parameters/Token' + - $ref: '#/components/parameters/Code' + - $ref: '#/components/parameters/Limit' + - $ref: '#/components/parameters/Ctime' + - $ref: '#/components/parameters/Etime' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /core/cron/definitions: + get: + tags: + - Cron + summary: "List definitions" + description: "List all cron definitions." + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + post: + tags: + - Cron + summary: "Add definitions" + description: "Add one or multiple cron definitions to runtime." + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CronDefinitions' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /core/cron/definitions/{definition_id}: + get: + tags: + - Cron + summary: "Get a definition" + description: "List cron definition identified by id." + parameters: + - $ref: '#/components/parameters/DefinitionId' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + patch: + tags: + - Cron + summary: "Update a definition" + description: "Update a cron definition." + parameters: + - $ref: '#/components/parameters/DefinitionId' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CronDefinition' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + delete: + tags: + - Cron + summary: "Delete a definition" + description: "Delete a cron definition." + parameters: + - $ref: '#/components/parameters/DefinitionId' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /core/cron/definitions/{definition_id}/status: + get: + tags: + - Cron + summary: "Get a definition status" + description: "Get a definition execution status." + parameters: + - $ref: '#/components/parameters/DefinitionId' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /core/action/command: + post: + tags: + - Action + summary: "Execute one or several command lines" + description: "Execute a command or a set of commands on server running Gorgone." + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ActionCommands' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/engine/command: + post: + tags: + - Engine + summary: "Send one or several external commands" + description: | + Send an external command or a set of external commands to a running Centreon Engine instance using command file pipe. + This method needs the commands to be preformatted as Nagios external commands format. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EngineCommands' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/statistics/broker: + get: + tags: + - Statistics + summary: "Launch Broker statistics collection" + description: "Launch Broker statistics collection and store the result on disk." + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/statistics/broker/{monitoring_server_id}: + get: + tags: + - Statistics + summary: "Launch Broker statistics collection of a specific monitoring server" + description: "Launch Broker statistics collection and store the result on disk." + parameters: + - $ref: '#/components/parameters/MonitoringServerId' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/autodiscovery/hosts: + post: + tags: + - Autodiscovery + summary: "Add a host discovery job" + description: "Add one Centreon Autodiscovery job to discover hosts." + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/HostDiscoveryJob' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/autodiscovery/hosts/{job_id}/schedule: + get: + tags: + - Autodiscovery + summary: "Launch a host discovery job" + description: "Launch a host discovery job identified by id (even if in cron mode)." + parameters: + - $ref: '#/components/parameters/HostDiscoveryId' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/autodiscovery/hosts/{token}: + delete: + tags: + - Autodiscovery + summary: "Delete a host discovery job" + description: "Delete one Centreon Autodiscovery scheduled job." + parameters: + - $ref: '#/components/parameters/HostDiscoveryToken' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' +components: + securitySchemes: + Basic Authentication: + type: http + scheme: basic + parameters: + Token: + in: path + name: token + required: true + description: "Token of the event" + schema: + type: string + example: "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165" + Code: + in: query + name: code + required: false + description: "Only retrieve logs with defined code" + schema: + type: integer + enum: [0, 1, 2] + example: 2 + Limit: + in: query + name: limit + required: false + description: "Only retrieve the last x logs" + schema: + type: integer + minimum: 1 + example: 1 + Ctime: + in: query + name: ctime + required: false + description: "Only retrieve logs with a creation time equal or superior to a timestamp" + schema: + type: integer + format: int64 + example: 1577726040 + Etime: + in: query + name: etime + required: false + description: "Only retrieve logs of an event time superior to a timestamp" + schema: + type: integer + format: int64 + example: 1577726040 + DefinitionId: + in: path + name: definition_id + required: true + description: "ID of the definition" + schema: + type: string + example: "broker_stats" + MonitoringServerId: + in: path + name: monitoring_server_id + required: true + description: "ID of the monitoring server" + schema: + type: integer + example: 2 + HostDiscoveryId: + in: path + name: job_id + required: true + description: "ID of the job" + schema: + type: integer + example: 2 + HostDiscoveryToken: + in: path + name: token + required: true + description: "Token of the scheduled job" + schema: + type: string + example: "discovery_14_6b7d1bb8" + responses: + NotFound: + description: "The specified resource was not found" + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + Unauthorized: + description: "Unauthorized" + headers: + WWW-Authenticate: + schema: + type: string + content: + application/json: + schema: + type: object + properties: + error: + type: string + description: "Short error description" + example: "http_error_401" + message: + type: string + description: "Message explaining the error" + example: "unauthorized" + required: + - error + - message + Forbidden: + description: "Forbidden" + content: + application/json: + schema: + type: object + properties: + error: + type: string + description: "Short error description" + example: "http_error_403" + message: + type: string + description: "Message explaining the error" + example: "forbidden" + required: + - error + - message + UnknownEndpoint: + description: "Unknown endpoint" + content: + application/json: + schema: + type: object + properties: + error: + type: string + description: "Short error description" + example: "method_unknown" + message: + type: string + description: "Message explaining the error" + example: "Method not implemented" + required: + - error + - message + UnknownMethod: + description: "Unknown method" + content: + application/json: + schema: + type: object + properties: + error: + type: string + description: "Short error description" + example: "endpoint_unknown" + message: + type: string + description: "Message explaining the error" + example: "endpoint not implemented" + required: + - error + - message + schemas: + Error: + type: object + properties: + error: + type: string + description: "Short error description" + message: + type: string + description: "Message explaining the error" + required: + - error + - message + Token: + type: object + properties: + token: + type: string + format: byte + description: "Token related to the event's result" + example: "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165" + Logs: + type: object + properties: + message: + type: string + description: "Additionnal message" + example: "Logs found" + token: + type: string + format: byte + description: "Token related to the event's result" + example: "03008486ba50b52e529ff5828d1432e5578dd18bb530c145b133dc902c8cfa6b8aac4d58fffb0c5ed44b943d2acbfb7cd1b18c55fcebce62e51999db460112c7" + data: + type: array + description: "Results array containing all logs related to token" + items: + $ref: '#/components/schemas/Log' + Log: + type: object + properties: + ctime: + type: string + format: timestamp + description: "Time when the server has stored the log in its database" + example: 1577727699 + etime: + type: string + format: timestamp + description: "Time when the event has occured" + example: 1577727699 + id: + type: integer + description: "ID of the event" + example: 101483 + instant: + type: integer + example: 0 + data: + type: object + description: "Data stored for this event" + token: + type: string + format: byte + description: "Token related to the event" + example: "03008486ba50b52e529ff5828d1432e5578dd18bb530c145b133dc902c8cfa6b8aac4d58fffb0c5ed44b943d2acbfb7cd1b18c55fcebce62e51999db460112c7" + code: + type: integer + description: "Returned code of the event" + example: 2 + NoLogs: + type: object + properties: + error: + type: string + description: "Short error description" + example: "no_log" + message: + type: string + description: "Message explaining the error" + example: "No log found for token" + token: + type: string + description: "Token related to the event's result" + example: "03008486ba50b52e529ff5828d1432e5578dd18bb530c145b133dc902c8cfa6b8aac4d58fffb0c5ed44b943d2acbfb7cd1b18c55fcebce62e51999db460112c7" + data: + type: array + description: "Empty array" + items: + type: object + NodesStatus: + type: object + properties: + action: + type: string + description: "Event sent to retrieve data" + example: "constatus" + message: + type: string + description: "Response message" + example: "ok" + data: + type: object + properties: + id: + $ref: '#/components/schemas/NodeStatus' + NodeStatus: + type: object + properties: + last_ping_sent: + type: string + format: timestamp + description: "Last ping sent timestamp" + example: 1577726040 + type: + type: string + enum: [push_zmq, pull_zmq, ssh] + description: "Communication type" + example: "push_zmq" + nodes: + type: object + description: "Nodes managed by this Gorgone daemon" + last_ping_recv: + type: string + format: timestamp + description: "Last ping received timestamp" + example: 1577726040 + Information: + type: object + properties: + action: + type: string + description: "Event sent to retrieve data" + example: "information" + message: + type: string + description: "Response message" + example: "ok" + data: + type: object + properties: + modules: + $ref: '#/components/schemas/Modules' + api_endpoints: + $ref: '#/components/schemas/ApiEndpoints' + counters: + $ref: '#/components/schemas/Counters' + Modules: + type: object + description: "List of loaded modules" + additionalProperties: + type: string + example: + httpserver: "gorgone::modules::core::httpserver::hooks" + dbcleaner: "gorgone::modules::core::dbcleaner::hooks" + cron: "gorgone::modules::core::cron::hooks" + engine: "gorgone::modules::centreon::engine::hooks" + action: "gorgone::modules::core::action::hooks" + statistics: "gorgone::modules::centreon::statistics::hooks" + nodes: "gorgone::modules::centreon::nodes::hooks" + legacycmd: "gorgone::modules::centreon::legacycmd::hooks" + proxy: "gorgone::modules::core::proxy::hooks" + ApiEndpoints: + type: object + description: "List of available endpoints" + additionalProperties: + type: string + example: + POST_/internal/logger: "BCASTLOGGER" + GET_/centreon/statistics/broker: "BROKERSTATS" + GET_/internal/thumbprint: "GETTHUMBPRINT" + GET_/core/cron/definitions: "GETCRON" + GET_/internal/information: "INFORMATION" + POST_/core/cron/definitions: "ADDCRON" + POST_/core/action/command: "COMMAND" + POST_/core/proxy/remotecopy: "REMOTECOPY" + POST_/centreon/engine/command: "ENGINECOMMAND" + PATCH_/core/cron/definitions: "UPDATECRON" + DELETE_/core/cron/definitions: "DELETECRON" + GET_/internal/constatus: "CONSTATUS" + Counters: + type: object + description: "List of metric counters" + properties: + total: + type: integer + description: "Total number of events processed since startup" + example: 40210 + external: + type: object + description: "Number of external events since startup" + additionalProperties: + type: string + example: + total: 0 + internal: + type: object + description: "Number of internal events since startup" + additionalProperties: + type: string + example: + legacycmdready: 1 + setlogs: 7841 + enginecommand: 20 + registernodes: 443 + pong: 3397 + proxyready: 5 + statisticsready: 1 + addcron: 1 + cronready: 1 + getthumbprint: 2 + centreonnodesready: 1 + httpserverready: 1 + command: 4446 + putlog: 9809 + dbcleanerready: 1 + information: 6 + brokerstats: 4446 + constatus: 1 + total: 40210 + setcoreid: 443 + getlog: 8893 + engineready: 1 + unregisternodes: 443 + actionready: 1 + proxy: + type: object + description: "Number of events passed through proxy since startup" + additionalProperties: + type: string + example: + enginecommand: 10 + getlog: 4446 + total: 8902 + command: 4446 + Thumbprint: + type: object + properties: + action: + type: string + description: "Event sent to retrieve data" + example: "getthumbprint" + message: + type: string + description: "Response message" + example: "ok" + data: + type: object + properties: + thumbprint: + type: string + description: "Thumbprint of the public key" + example: + "cS4B3lZq96qcP4FTMhVMuwAhztqRBQERKyhnEitnTFM" + SeverityLevel: + type: object + properties: + severity: + type: string + description: "Severity level to be defined for all loaded modules" + enum: + - info + - error + - debug + CronDefinitions: + type: array + items: + $ref: '#/components/schemas/CronDefinition' + CronDefinition: + type: object + properties: + timespec: + type: string + description: "Cron-like time specification" + id: + type: string + description: "Unique identifier of the cron definition" + action: + type: string + description: "Action/event to call at job execution" + parameters: + type: object + description: "Parameters needed by the called action/event" + keep_token: + type: boolean + description: "Boolean to define whether or not the ID of the definition will be used as token for the command" + required: + - timespec + - id + - action + - parameters + ActionCommands: + type: array + items: + $ref: '#/components/schemas/ActionCommand' + ActionCommand: + type: object + properties: + command: + type: string + description: "Command to execute" + example: "echo data > /tmp/date.log" + timeout: + type: integer + description: "Time in seconds before a command is considered timed out" + example: 5 + default: 30 + continue_on_error: + type: boolean + description: "Behaviour in case of execution issue" + example: true + default: false + required: + - command + EngineCommands: + type: object + properties: + command_file: + type: string + description: "Path to the Centreon Engine command file pipe" + example: "/var/lib/centreon-engine/rw/centengine.cmd" + command: + type: array + items: + type: string + description: "External command" + example: "[653284380] SCHEDULE_SVC_CHECK;host1;service1;653284380" + HostDiscoveryJob: + type: object + properties: + job_id: + type: integer + description: "ID of the Host Discovery job" + example: 14 + target: + type: integer + description: "Identifier of the target on which to execute the command" + example: 2 + command_line: + type: string + description: "Command line to execute to perform the discovery" + example: "perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'" + timeout: + type: integer + description: "Time in seconds before the command is considered timed out" + example: 300 + execution: + type: object + description: "Execution mode of this job ('0': execute immediately, '1': schedule with cron)" + properties: + mode: + type: integer + description: "Execution mode ('0': immediate, '1': scheduled)" + example: 0 + parameters: + type: object + description: "Parameters needed by execution mode" + properties: + cron_definition: + type: string + description: "Cron definition" + example: "*/10 * * * *" + post_execution: + type: object + description: "Post-execution settings" + properties: + commands: + type: array + description: "Array of commands (content depends on command)" + items: + type: object + description: "Command" + properties: + action: + type: string + description: "Action to perform" + example: COMMAND + command_line: + type: string + description: "Command line to execute" + example: "/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host --job-id=14" + required: + - job_id + - target + - command_line + - execution + \ No newline at end of file diff --git a/gorgone/docs/api/index.html b/gorgone/docs/api/index.html new file mode 100644 index 00000000000..e2f378ac27d --- /dev/null +++ b/gorgone/docs/api/index.html @@ -0,0 +1,504 @@ + + + + + + Centreon Gorgone RestAPI + + + + + + + + + +

Information

Centreon Gorgone and his "gorgoned" daemon is a lightweight, distributed, modular tasks handler.

+

It provides a set of actions like:

+
    +
  • Execute commands
  • +
  • Send files/directories,
  • +
  • Schedule cron-like tasks,
  • +
  • Push or execute tasks through SSH.
  • +
+

The daemon can be installed on Centreon environments like Centreon Central, Remote and Poller servers.

+

It uses ZeroMQ library.

+

Authentication

Basic Authentication

Security Scheme Type HTTP
HTTP Authorization Scheme basic

Internal

Internal events.

+

Get nodes connection status

Get the connection status of all nodes managed by the Gorgone daemon.

+
Authorizations:

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/internal/constatus

Local Gorgone instance

+
{protocol}://{server}:{port}/api/internal/constatus

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/internal/constatus

Response samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "action": "constatus",
  • "message": "ok",
  • "data":
    {
    }
}

Get runtime informations and statistics

Get informations and statistics about loaded modules, available endpoints and number of events computed at runtime.

+
Authorizations:

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/internal/information

Local Gorgone instance

+
{protocol}://{server}:{port}/api/internal/information

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/internal/information

Response samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "action": "information",
  • "message": "ok",
  • "data":
    {
    }
}

Get public key thumbprint

Get the thumbprint of the public key of the Gorgone daemon.

+
Authorizations:

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/internal/thumbprint

Local Gorgone instance

+
{protocol}://{server}:{port}/api/internal/thumbprint

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/internal/thumbprint

Response samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "action": "getthumbprint",
  • "message": "ok",
  • "data":
    {
    }
}

Set logger severity level

Set the logger severity level for all modules.

+
Authorizations:
Request Body schema: application/json
severity
string
Enum: "info" "error" "debug"

Severity level to be defined for all loaded modules

+

Responses

204

OK

+
401

Unauthorized

+
403

Forbidden

+
post/internal/logger

Local Gorgone instance

+
{protocol}://{server}:{port}/api/internal/logger

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/internal/logger

Request samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "severity": "info"
}

Response samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "error": "http_error_401",
  • "message": "unauthorized"
}

Logs

Logs management.

+

Retrieve event's logs

Retrieve the event's logs based on event's token.

+
Authorizations:
path Parameters
token
required
string
Example: 1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165

Token of the event

+
query Parameters
code
integer
Enum: 0 1 2
Example: code=2

Only retrieve logs with defined code

+
limit
integer >= 1
Example: limit=1

Only retrieve the last x logs

+
ctime
integer <int64>
Example: ctime=1577726040

Only retrieve logs with a creation time equal or superior to a timestamp

+
etime
integer <int64>
Example: etime=1577726040

Only retrieve logs of an event time superior to a timestamp

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/log/{token}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/log/{token}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/log/{token}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "message": "Logs found",
  • "token": "03008486ba50b52e529ff5828d1432e5578dd18bb530c145b133dc902c8cfa6b8aac4d58fffb0c5ed44b943d2acbfb7cd1b18c55fcebce62e51999db460112c7",
  • "data":
    [
    ]
}

Cron

Module aiming to reproduce a cron-like scheduler that can send events to other Gorgone modules.

+

List definitions

List all cron definitions.

+
Authorizations:

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/core/cron/definitions

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Add definitions

Add one or multiple cron definitions to runtime.

+
Authorizations:
Request Body schema: application/json
Array
timespec
required
string

Cron-like time specification

+
id
required
string

Unique identifier of the cron definition

+
action
required
string

Action/event to call at job execution

+
parameters
required
object

Parameters needed by the called action/event

+
keep_token
boolean

Boolean to define whether or not the ID of the definition will be used as token for the command

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
post/core/cron/definitions

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions

Request samples

Content type
application/json
Copy
Expand all Collapse all
[
  • {
    }
]

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Get a definition

List cron definition identified by id.

+
Authorizations:
path Parameters
definition_id
required
string
Example: broker_stats

ID of the definition

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/core/cron/definitions/{definition_id}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions/{definition_id}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions/{definition_id}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Update a definition

Update a cron definition.

+
Authorizations:
path Parameters
definition_id
required
string
Example: broker_stats

ID of the definition

+
Request Body schema: application/json
timespec
required
string

Cron-like time specification

+
id
required
string

Unique identifier of the cron definition

+
action
required
string

Action/event to call at job execution

+
parameters
required
object

Parameters needed by the called action/event

+
keep_token
boolean

Boolean to define whether or not the ID of the definition will be used as token for the command

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
patch/core/cron/definitions/{definition_id}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions/{definition_id}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions/{definition_id}

Request samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "timespec": "string",
  • "id": "string",
  • "action": "string",
  • "parameters": { },
  • "keep_token": true
}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Delete a definition

Delete a cron definition.

+
Authorizations:
path Parameters
definition_id
required
string
Example: broker_stats

ID of the definition

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
delete/core/cron/definitions/{definition_id}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions/{definition_id}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions/{definition_id}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Get a definition status

Get a definition execution status.

+
Authorizations:
path Parameters
definition_id
required
string
Example: broker_stats

ID of the definition

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/core/cron/definitions/{definition_id}/status

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions/{definition_id}/status

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions/{definition_id}/status

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Action

Module aiming to execute actions on the server running the Gorgone daemon or remotly using SSH.

+

Execute one or several command lines

Execute a command or a set of commands on server running Gorgone.

+
Authorizations:
Request Body schema: application/json
Array
command
required
string

Command to execute

+
timeout
integer
Default: 30

Time in seconds before a command is considered timed out

+
continue_on_error
boolean
Default: false

Behaviour in case of execution issue

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
post/core/action/command

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/action/command

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/action/command

Request samples

Content type
application/json
Copy
Expand all Collapse all
[
  • {
    }
]

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Engine

Module aiming to provide a bridge to communicate with Centreon Engine daemon.

+

Send one or several external commands

Send an external command or a set of external commands to a running Centreon Engine instance using command file pipe. +This method needs the commands to be preformatted as Nagios external commands format.

+
Authorizations:
Request Body schema: application/json
command_file
string

Path to the Centreon Engine command file pipe

+
command
Array of strings

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
post/centreon/engine/command

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/engine/command

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/engine/command

Request samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "command_file": "/var/lib/centreon-engine/rw/centengine.cmd",
  • "command":
    [
    ]
}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Statistics

Module aiming to deal with statistics collection of Centreon Engine and Broker.

+

Launch Broker statistics collection

Launch Broker statistics collection and store the result on disk.

+
Authorizations:

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/centreon/statistics/broker

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/statistics/broker

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/statistics/broker

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Launch Broker statistics collection of a specific monitoring server

Launch Broker statistics collection and store the result on disk.

+
Authorizations:
path Parameters
monitoring_server_id
required
integer
Example: 2

ID of the monitoring server

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/centreon/statistics/broker/{monitoring_server_id}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/statistics/broker/{monitoring_server_id}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/statistics/broker/{monitoring_server_id}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Autodiscovery

Module aiming to extend Centreon Autodiscovery server functionalities.

+

Add a host discovery job

Add one Centreon Autodiscovery job to discover hosts.

+
Authorizations:
Request Body schema: application/json
job_id
required
integer

ID of the Host Discovery job

+
target
required
integer

Identifier of the target on which to execute the command

+
command_line
required
string

Command line to execute to perform the discovery

+
timeout
integer

Time in seconds before the command is considered timed out

+
execution
required
object

Execution mode of this job ('0': execute immediately, '1': schedule with cron)

+
post_execution
object

Post-execution settings

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
post/centreon/autodiscovery/hosts

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/autodiscovery/hosts

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/autodiscovery/hosts

Request samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "job_id": 14,
  • "target": 2,
  • "command_line": "perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'",
  • "timeout": 300,
  • "execution":
    {
    },
  • "post_execution":
    {
    }
}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Launch a host discovery job

Launch a host discovery job identified by id (even if in cron mode).

+
Authorizations:
path Parameters
job_id
required
integer
Example: 2

ID of the job

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/centreon/autodiscovery/hosts/{job_id}/schedule

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/autodiscovery/hosts/{job_id}/schedule

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/autodiscovery/hosts/{job_id}/schedule

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Delete a host discovery job

Delete one Centreon Autodiscovery scheduled job.

+
Authorizations:
path Parameters
token
required
string
Example: discovery_14_6b7d1bb8

Token of the scheduled job

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
delete/centreon/autodiscovery/hosts/{token}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/autodiscovery/hosts/{token}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/autodiscovery/hosts/{token}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}
+ + + + \ No newline at end of file diff --git a/gorgone/docs/client_server_zmq.md b/gorgone/docs/client_server_zmq.md new file mode 100644 index 00000000000..079e12087e1 --- /dev/null +++ b/gorgone/docs/client_server_zmq.md @@ -0,0 +1,90 @@ +# Client/Server ZMQ communication + +When using ZMQ protocol, all communications are encrypted using symmetric-key encryption based on public/private keys from both client and server. + +In a Centreon context, the **client** is the Gorgone daemon running on the **Centreon Central**, the **servers** are the daemon running on **Pollers**. + +## Generate private and public keys + +On both client and server, generate RSA private and public keys using *centreon* user. + +```bash +$ mkdir -p /var/spool/centreon/.gorgone/ +$ chmod 700 /var/spool/centreon/.gorgone +$ openssl genrsa -out /var/spool/centreon/.gorgone/privkey.pem 4092 +Generating RSA private key, 4092 bit long modulus +...................................++ +...........................................................................................................................................................................++ +e is 65537 (0x10001) +$ openssl rsa -in /var/spool/centreon/.gorgone/privkey.pem -out /var/spool/centreon/.gorgone/pubkey.pem -pubout -outform PEM +writing RSA key +$ chmod 644 /var/spool/centreon/.gorgone/pubkey.pem +$ chmod 600 /var/spool/centreon/.gorgone/privkey.pem +``` + +Copy the server public key onto the client in a specific directory (for example */var/spool/centreon/.gorgone/*) + +## Get the string-formatted JWK thumbprint + +On the client, execute the following command: + +```bash +$ perl /usr/local/bin/gorgone_key_thumbprint.pl --key-path='/var/spool/centreon/.gorgone/pubkey.pem' +2019-09-30 11:00:00 - INFO - File '/var/spool/centreon/.gorgone/pubkey.pem' JWK thumbprint: pnI6EWkiTbazjikJXRkLmjml5wvVECYtQduJUjS4QK4 +``` + +## Set the configurations + +*Make the IDs match Centreon Pollers ID to benefit from [legacy cmd](../docs/modules/core/legacycmd.md) module's actions.* + +#### Server + +In the */etc/centreon/config.d/20-gorgoned.yaml* configuration file, add the following directives under the +*gorgonecore* +section: + +```yaml +gorgone: + gorgonecore: + id: 1 + privkey: /var/spool/centreon/.gorgone/privkey.pem + pubkey: /var/spool/centreon/.gorgone/pubkey.pem +``` + +Add the [register](../docs/modules/core/register.md) module and define the path to the dedicated configuration file. + +```yaml +modules: + - name: register + package: "gorgone::modules::core::register::hooks" + enable: true + config_file: /etc/centreon/gorgone-targets.yml +``` + +Create the file */etc/centreon/gorgone-targets.yml* and fill it with the following configuration: + +```yaml +nodes: + - id: 2 + type: push_zmq + address: 10.1.2.3 + port: 5556 +``` + +#### Client + +In the */etc/centreon/config.d/20-gorgoned.yaml* configuration file, add the following directives: + +```yaml +gorgone: + gorgonecore: + id: 2 + external_com_type: tcp + external_com_path: "*:5556" + privkey: /var/spool/centreon/.gorgone/privkey.pem + pubkey: /var/spool/centreon/.gorgone/pubkey.pem + authorized_clients: + - key: pnI6EWkiTbazjikJXRkLmjml5wvVECYtQduJUjS4QK4 +``` + +The *authorized_clients* entry allows to define the client public key thumbprint retrieved earlier. diff --git a/gorgone/docs/configuration.md b/gorgone/docs/configuration.md new file mode 100644 index 00000000000..0789429e9b7 --- /dev/null +++ b/gorgone/docs/configuration.md @@ -0,0 +1,105 @@ +# Configuration + +| Directive | Description | +| :------------ | :---------------------------------------------------------------------- | +| name | Name of the configuration | +| description | Short string to decribe the configuration | +| configuration | First configuration entry point | +| centreon | Entry point to set Centreon configuration | +| database | Entry point to set Centreon databases data source names and credentials | +| gorgonecore | Entry point to set Gorgone main configuration | +| modules | Table to load and configuration Gorgone modules | + +## *database* + +Usefull in a Centreon Central installation to access Centreon databases. + +| Directive | Description | +| :-------- | :------------------------------- | +| dsn | Data source name of the database | +| username | Username to access the database | +| password | Username's password | + +#### Example + +```yaml +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;dbname=centreon" + username: centreon + password: centreon + db_realtime: + dsn: "mysql:host=localhost;dbname=centreon_storage" + username: centreon + password: centreon +``` + +## *gorgonecore* + +| Directive | Description | Default value | +| :-------------------- | :---------------------------------------------------------------------- | :--------------------------------------------- | +| internal_com_type | Type of the internal ZMQ socket | `ipc` | +| internal_com_path | Path to the internal ZMQ socket | `/tmp/gorgone/routing.ipc` | +| internal_com_crypt | Crypt internal communication | `true` | +| internal_com_cipher | Internal communication cipher | `AES` | +| internal_com_padding | Internal communication padding | `1` (mean: PKCS5) | +| internal_com_keysize | Internal communication key size | `32` (bytes) | +| internal_com_rotation | Internal communication time before key rotation | `1440` (minutes) | +| external_com_type | Type of the external ZMQ socket | `tcp` | +| external_com_path | Path to the external ZMQ socket | `*:5555` | +| external_com_cipher | Cipher used for encryption | `AES` | +| external_com_keysize | Size in bytes of the symmetric encryption key | `32` | +| external_com_padding | External communication padding | `1` (mean: PKCS5) | +| external_com_rotation | External communication time before key rotation | `1440` (minutes) | +| timeout | Time in seconds before killing child processes when stopping Gorgone | `50` | +| gorgone_db_type | Type of the Gorgone database | `SQLite` | +| gorgone_db_name | Path and name of the database | `dbname=/var/lib/centreon-gorgone/history.sdb` | +| gorgone_db_host | Hostname/IP address of the server hosting the database | | +| gorgone_db_port | Port of the database listener | | +| gorgone_db_user | Username to access the database | | +| gorgone_db_password | Username's password | | +| hostname | Hostname of the server running Gorgone | Result of *hostname* system function. | +| id | Identifier of server running Gorgone | None. Must be unique over all Gorgone daemons. | +| privkey | Path to the Gorgone core private key | `keys/rsakey.priv.pem` | +| pubkey | Path to the Gorgone core public key | `keys/rsakey.pub.pem` | +| fingerprint_mode | Validation mode of zmq nodes to connect (can be: always, first, strict) | `first` | +| fingerprint_mgr | Hash of the definition class to store fingerprints | | +| authorized_clients | Table of string-formated JWK thumbprints of clients public key | | +| proxy_name | Name of the proxy module definition | `proxy` (loaded internally) | + +#### Example + +```yaml +configuration: + gorgone: + gorgonecore: + internal_com_type: ipc + internal_com_path: /tmp/gorgone/routing.ipc + external_com_type: tcp + external_com_path: "*:5555" + timeout: 50 + gorgone_db_type: SQLite + gorgone_db_name: dbname=/var/lib/centreon-gorgone/history.sdb + gorgone_db_host: + gorgone_db_port: + gorgone_db_user: + gorgone_db_password: + hostname: + id: + privkey: keys/central/privkey.pem + cipher: "Cipher::AES" + keysize: 32 + vector: 0123456789012345 + fingerprint_mode: first + fingerprint_mgr: + package: gorgone::class::fingerprint::backend::sql + authorized_clients: + - key: pnI6EWkiTbazjikJXRkLmjml5wvVECYtQduJUjS4QK4 + proxy_name: proxy +``` + +## *modules* + +See the *configuration* titles of the modules documentations listed [here](../docs/modules.md). diff --git a/gorgone/docs/getting_started.md b/gorgone/docs/getting_started.md new file mode 100644 index 00000000000..5611a2464b5 --- /dev/null +++ b/gorgone/docs/getting_started.md @@ -0,0 +1,201 @@ +# Getting started + +## Installation + +### From package + +Using Centreon standard yum repositories, execute the following command to install Gorgone: + +```bash +yum install centreon-gorgone +``` + +### From sources centos 7 + +Using Github project, execute the following command to retrieve Gorgone source code: + +```bash +git clone https://github.com/centreon/centreon-gorgone +``` + +The daemon uses the following Perl modules: + +* Repository 'centreon-stable': + * ZMQ::LibZMQ4 + * UUID + * Digest::MD5::File +* Repository 'centos base': + * JSON::PP + * JSON::XS + * YAML + * DBD::SQLite + * DBD::mysql + * Crypt::CBC + * HTTP::Daemon + * HTTP::Status + * MIME::Base64 + * NetAddr::IP +* Repository 'epel': + * HTTP::Daemon::SSL + * Schedule::Cron +* From offline packages: + * Hash::Merge + * YAML::XS + * Crypt::Cipher::AES (module CryptX) + * Crypt::PK::RSA (module CryptX) + * Crypt::PRNG (module CryptX) + +Execute the following commands to install them all: + +```bash +yum install 'perl(JSON::PP)' 'perl(Digest::MD5::File)' 'perl(NetAddr::IP)' 'perl(Schedule::Cron)' 'perl(Crypt::CBC)' 'perl(ZMQ::LibZMQ4)' 'perl(JSON::XS)' 'perl(YAML)' 'perl(DBD::SQLite)' 'perl(DBD::mysql)' 'perl(UUID)' 'perl(HTTP::Daemon)' 'perl(HTTP::Daemon::SSL)' 'perl(HTTP::Status)' 'perl(MIME::Base64)' +yum install packaging/packages/perl-CryptX-0.064-1.el7.x86_64.rpm packaging/packages/perl-YAML-LibYAML-0.80-1.el7.x86_64.rpm packaging/packages/perl-Hash-Merge-0.300-1.el7.noarch.rpm packaging/packages/perl-Clone-Choose-0.010-1.el7.noarch.rpm +``` + +### From sources centos 8 + +Using Github project, execute the following command to retrieve Gorgone source code: + +```bash +git clone https://github.com/centreon/centreon-gorgone +``` + +The daemon uses the following Perl modules: + +* Repository 'centos base': + * JSON::PP + * YAML + * DBD::SQLite + * DBD::mysql + * HTTP::Status + * MIME::Base64 + * NetAddr::IP +* Repository 'epel': + * Crypt::CBC + * HTTP::Daemon::SSL + * Schedule::Cron + * Hash::Merge +* From offline packages: + * ZMQ::LibZMQ4 + * UUID + * Digest::MD5::File + * JSON::XS + * HTTP::Daemon + * YAML::XS + * Crypt::Cipher::AES (module CryptX) + * Crypt::PK::RSA (module CryptX) + * Crypt::PRNG (module CryptX) + +Execute the following commands to install them all: + +```bash +dnf install packaging/packages/*.el8*.rpm +dnf install 'perl(Hash::Merge)' 'perl(JSON::PP)' 'perl(NetAddr::IP)' 'perl(Schedule::Cron)' 'perl(Crypt::CBC)' 'perl(YAML)' 'perl(DBD::SQLite)' 'perl(DBD::mysql)' 'perl(HTTP::Daemon::SSL)' 'perl(HTTP::Status)' 'perl(MIME::Base64)' +``` + +## Configuration + +You can retrieve `centcore` configuration, i.e. database hostname and credentials in */etc/centreon/conf.pm*, and build a minimal configuration by applying the [migration procedure](../docs/migration.md). + +All directives are available [here](../docs/configuration.md). + +## Create the database + +Gorgone uses a SQLite database to store all events messages. + +If it does not exist, the daemon will automatically create it in the path set by the `gorgone_db_name` configuration directive. + +However, you can manualy create it with the database schema: + +```bash +sqlite3 -init schema/gorgone_database.sql /var/lib/centreon-gorgone/history.sdb +``` + +Database schema: + +```sql +CREATE TABLE IF NOT EXISTS `gorgone_identity` ( + `id` INTEGER PRIMARY KEY, + `ctime` int(11) DEFAULT NULL, + `identity` varchar(2048) DEFAULT NULL, + `key` varchar(4096) DEFAULT NULL, + `parent` int(11) DEFAULT '0' +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_identity ON gorgone_identity (identity); +CREATE INDEX IF NOT EXISTS idx_gorgone_parent ON gorgone_identity (parent); + +CREATE TABLE IF NOT EXISTS `gorgone_history` ( + `id` INTEGER PRIMARY KEY, + `token` varchar(2048) DEFAULT NULL, + `code` int(11) DEFAULT NULL, + `etime` int(11) DEFAULT NULL, + `ctime` int(11) DEFAULT NULL, + `instant` int(11) DEFAULT '0', + `data` TEXT DEFAULT NULL +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_history_id ON gorgone_history (id); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_token ON gorgone_history (token); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_etime ON gorgone_history (etime); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_code ON gorgone_history (code); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_ctime ON gorgone_history (ctime); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_instant ON gorgone_history (instant); + +CREATE TABLE IF NOT EXISTS `gorgone_synchistory` ( + `id` int(11) DEFAULT NULL, + `ctime` int(11) DEFAULT NULL, + `last_id` int(11) DEFAULT NULL +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_synchistory_id ON gorgone_synchistory (id); + +CREATE TABLE IF NOT EXISTS `gorgone_target_fingerprint` ( + `id` INTEGER PRIMARY KEY, + `target` varchar(2048) DEFAULT NULL, + `fingerprint` varchar(4096) DEFAULT NULL +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_target_fingerprint_target ON gorgone_target_fingerprint (target); +``` + +## Launch the daemon + +If you are using the package, just launch the service as below: + +```bash +systemctl start gorgoned +``` + +Make sure the daemon is running: + +```bash +$ systemctl status gorgoned +● gorgoned.service - Centreon Gorgone + Loaded: loaded (/etc/systemd/system/gorgoned.service; disabled; vendor preset: disabled) + Active: active (running) since Mon 2019-09-30 09:36:19 CEST; 2min 29s ago + Main PID: 5168 (perl) + CGroup: /system.slice/gorgoned.service + ├─5168 /usr/bin/perl /usr/bin/gorgoned --config=/etc/centreon-gorgone/config.yaml --logfile=/var/log/centreon/gorgoned.log --severity=info + ├─5175 gorgone-dbcleaner + ├─5182 gorgone-action + ├─5187 gorgone-nodes + ├─5190 gorgone-legacycmd + ├─5203 gorgone-proxy + ├─5204 gorgone-proxy + ├─5205 gorgone-proxy + ├─5206 gorgone-proxy + └─5207 gorgone-proxy + +Sep 30 09:36:19 localhost systemd[1]: Started Centreon Gorgone. +``` + +If you are using the sources, execute the following command: + +```bash +perl gorgoned --config=config/config.yaml --severity=info +``` + +## Full-ZMQ setup + +To use Gorgone distributed on multiple servers using ZMQ, follow the example given [here](../docs/client_server_zmq.md). diff --git a/gorgone/docs/guide.md b/gorgone/docs/guide.md new file mode 100644 index 00000000000..a60df55deab --- /dev/null +++ b/gorgone/docs/guide.md @@ -0,0 +1,268 @@ +# Gorgone protocol + +"gorgone-core" (main mandatory module) can have 2 interfaces: + +* Internal: uncrypted dialog (used by internal modules. Commonly in ipc) +* External: crypted dialog (used by third-party clients. Commonly in tcp) + +## Handshake scenario + +Third-party clients have to use the ZeroMQ library and the following process: + +1. Client: need to create an uniq identity (will be used in "zmq_setsockopt" and "ZMQ_IDENTITY") +2. Client -> Server: ask the server pubkey + + ```text + [GETPUBKEY] + ``` + +3. Server -> Client: send back the pubkey + + ```text + [PUBKEY] [base64 encoding pubkey] + ``` + +4. Client -> Server: send the following message with HELO crypted with the public key of the server (and base64 encoding) and provides client pubkey (base64 encoding): + + ```text + [HOSTNAME] [CLIENTPUBKEY] [HELO] + ``` + +5. Server -> Client: uncrypt the client message: + + * If uncrypted message result is not "HELO", server refuses the connection and send it back: + + ```text + [ACK] [] { "code": 1, "data": { "message": "handshake issue" } } + ``` + + * If uncrypted message result is "HELO", server accepts the connection if the clientpubkey is authorized. It creates a symmetric key and send the following message crypted with client pubkey: + + ```text + [KEY] { "hostname": "xxxx", "key": "ab0182xxxx", "iv": "ab0182xxx", "cipher": "AES", "padding": 1 } + ``` + +4. Client: uncrypts the server message with its private key. +5. Client and Server uses the symmetric key+base64 encoding to dialog. + +The server keeps sessions for 24 hours since the last message of the client. + +Otherwise, it purges the identity/symmetric-key of the client. + +If a third-party client with the same identity try to open a new session, the server deletes the old identity/symmetric-key. + +Be sure to have the same parameters to crypt/uncrypt with the symmetric key. Commonly: 'AES' cipher, keysize of 32 bytes, vector '0123456789012345'. + +## Client request + +After a successful handshake, client requests use the following syntax: + +```text +[ACTION] [TOKEN] [TARGET] DATA +``` + +* ACTION: the request, for example 'COMMAND' or 'ENGINECOMMAND'. It depends of the target server capabilites, +* TOKEN: can be used to create some "sessions". If empty, the server creates an uniq token for each requests, +* TARGET: which "gorgoned" must execute the request. With the following option, you can execute a command on a specific server through another. The poller ID is needed. If empty, the server (which is connected with the client) is the target. +* DATA: JSON stream. It depends on the request. + +For each client requests, the server get an immediate response: + +```text +[ACK] [TOKEN] { "code": "x", "data": { "message": "xxxxx" } } +``` + +* TOKEN: a uniq ID to follow the request, +* DATA: a JSON stream + + * 0 : OK + * 1 : NOK + +There are some exceptions for 'CONSTATUS' and 'GETLOG' requests. + +## Core requests + +### CONSTATUS + +The following request gives you a table with the last ping response of "gorgoned" nodes connected to the server. +The command is useful to know if some pollers are disconnected. + +The client request: + +```text +[CONSTATUS] [] [] +``` + +The server response: + +```text +[CONSTATUS] [token_id] DATA +``` + +An example of the JSON stream: + +```json +{ + "code": 1, + "data": { + "action": "constatus", + "mesage": "ok", + "data": { + "last_ping_sent": "xxxx", + "last_ping_recv": "xxxx", + "nodes": { + "1": "xxx", + "2": "xxx" + } + } + } +} +``` + +'last_ping' and 'entries' values are unix timestamp in seconds. + +The 'last_ping' value is the date when the daemon have launched a PING broadcast to the poller connected. + +The 'entries' values are the last time the poller have responded to the PING broadcast. + +### GETLOG + +The following request gives you the capability to follow your requests. "gorgone" protocol is asynchronous. + +An example: when you request a command execution, the server gives you a direct response and a token. This token can be used to know what happened to your command. + +The client request: + +```text +[GETLOG] [TOKEN] [TARGET] { "code": "xx", "ctime": "xx", "etime": "xx", "token": "xx", "id": "xx" } +``` + +At least one of the 5 values must be defined: + +* code: get logs if code = value +* token: get logs if token = value +* ctime: get logs if creation time in seconds >= value +* etime: get logs if event time in seconds >= value +* id: get logs if id > value + +The 'etime' value gives the time when the event has occured. + +The 'ctime' value gives the time when the server has stored the log in its database. + +The server response: + +```text +[ACK] [token_id] DATA +``` + +An example of the json stream: + +```json +{ + "code": 1, + "data": { + "action": "getlog", + "message": "ok", + "result": [ + { + "id": 10, + "token": "xxxx", + "code": 1, + "etime": 1419252684, + "ctime": 1419252686, + "data": "xxxx", + }, + { + "id": 100, + "token": "xxxx", + "code": 1, + "etime": 1419252688, + "ctime": 1419252690, + "data": "xxxx", + } + ] + } +} +``` + +Each 'gorgoned' nodes store its logs. But every minute (by default), the Central server gets the new logs of its connected nodes and stores it. + +A client can force a synchronization with the following request: + +```text +[GETLOG] [] [target_id] +``` + +The client have to set the target ID (it can be the Poller ID). + +### PUTLOG + +The request shouldn't be used by third-party program. It's commonly used by the internal modules. + +The client request: + +```text +[PUTLOG] [TOKEN] [TARGET] { "code": xxx, "etime": "xxx", "token": "xxxx", "data": { some_datas } } +``` + +### REGISTERNODES + +The request shouldn't be used by third-party program. It's commonly used by the internal modules. + +The client request (no carriage returns. only for reading): + +```text +[REGISTERNODES] [TOKEN] [TARGET] { "nodes": [ + { "id": 20, "type": "pull" }, + { "id": 100, "type": "push_ssh", "address": "10.0.0.1", "ssh_port": 22 }, + { + "id": 150, "type": "push_zmq", "address": "10.3.2.1", + "nodes": [ { "id": 400, { "id": 455 } ] + } + ] +} +``` + +## Common codes + +Common code responses for all module requests: + +* 0: action proceed +* 1: action finished OK +* 2: action finished KO + +Modules can have extra codes. + +# FAQ + +## Which modules should I enable ? + +A Central with gorgoned should have the following modules: + +* action, +* proxy, +* cron, +* httpserver. + +A Poller with gorgoned should have the following modules: + +* action, +* pull (if the connection to the Central should be opened by the Poller). + +## I want to create a client. How should I proceed ? + +First, you must choose a language which can use ZeroMQ library and have some knowledge about ZeroMQ. + +I recommend the following scenario: + +* Create a ZMQ_DEALER, +* Manage the handshake with the server (see :ref:`handshake-scenario`), +* Do a request: + * If you don't need to get the result: close the connection, + * If you need to get the result: + 1. Get the token, + 2. If you have used a target, force a synchronization with 'GETLOG' (without token), + 3. Do a 'GETLOG' request with the token to get the result, + 4. Repeat actions 2 and 3 if you don't have a result yet (you should stop after X retries). + +You can inspire from the code of '[test-client.pl](../contrib/test-client.pl)'. diff --git a/gorgone/docs/migration.md b/gorgone/docs/migration.md new file mode 100644 index 00000000000..ce115818a1b --- /dev/null +++ b/gorgone/docs/migration.md @@ -0,0 +1,107 @@ +# Migrate from Centreon *centcore* + +To build a configuration file based on */etc/centreon/conf.pm*, execute the following command line. + +If using package: + +```bash +$ perl /usr/local/bin/gorgone_config_init.pl +2019-09-30 11:00:00 - INFO - file '/etc/centreon-gorgone/config.yaml' created success +``` + +If using sources: + +```bash +$ perl ./contrib/gorgone_config_init.pl +2019-09-30 11:00:00 - INFO - file '/etc/centreon-gorgone/config.yaml' created success +``` + +As a result the following configuration file will be created at */etc/centreon-gorgone/config.yaml*: + +```yaml +name: config.yaml +description: Configuration init by gorgone_config_init +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;port=3306;dbname=centreon" + username: "centreon" + password: "centreon" + db_realtime: + dsn: "mysql:host=localhost;port=3306;dbname=centreon_storage" + username: "centreon" + password: "centreon" + gorgone: + gorgonecore: + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + modules: + - name: httpserver + package: gorgone::modules::core::httpserver::hooks + enable: false + address: 0.0.0.0 + port: 8085 + ssl: false + auth: + enabled: false + allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 + + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: cron + package: gorgone::modules::core::cron::hooks + enable: false + cron: !include cron.d/*.yaml + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "/var/lib/centreon/centcore.cmd" + cache_dir: "/var/cache/centreon/" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "/var/lib/centreon/remote-data/" + + - name: engine + package: "gorgone::modules::centreon::engine::hooks" + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" + + - name: pollers + package: gorgone::modules::centreon::pollers::hooks + enable: true + + - name: broker + package: "gorgone::modules::centreon::broker::hooks" + enable: true + cache_dir: "/var/cache/centreon//broker-stats/" + cron: + - id: broker_stats + timespec: "*/2 * * * *" + action: BROKERSTATS + parameters: + timeout: 10 +``` diff --git a/gorgone/docs/modules.md b/gorgone/docs/modules.md new file mode 100644 index 00000000000..a24e4495499 --- /dev/null +++ b/gorgone/docs/modules.md @@ -0,0 +1,105 @@ +# Modules + +List of the available modules: + +* Core + * [Action](../docs/modules/core/action.md) + * [Cron](../docs/modules/core/cron.md) + * [DB Cleaner](../docs/modules/core/dbcleaner.md) + * [HTTP Server](../docs/modules/core/httpserver.md) + * [Proxy](../docs/modules/core/proxy.md) + * [Pull](../docs/modules/core/pull.md) + * [Register](../docs/modules/core/register.md) +* Centreon + * [Autodiscovery](../docs/modules/centreon/autodiscovery.md) + * [Broker](../docs/modules/centreon/statistics.md) + * [Engine](../docs/modules/centreon/engine.md) + * [Legacy Cmd](../docs/modules/centreon/legacycmd.md) + * [Nodes](../docs/modules/centreon/nodes.md) +* Plugins + * [Newtest](../docs/modules/plugins/newtest.md) + * [Scom](../docs/modules/plugins/scom.md) + +# Module implementation + +Each module should have a hook.pm and a class.pm file with some mandatory functions implemented. + + +## hook.pm + +Mainly used for creating the module process(es) +and route events to it each time a new message is received by gorgone. + +### const EVENTS [] + +Array defining all events this module can process. Optionally add API endpoint for events. + +### const NAME + +### const NAMESPACE + +### gently() + +Called by gorgone-core when stopping the module. + +### register() + +### init() + +Called by library::loadmodule to initialize the module, it should create a child process as it's not done by gorgone-core. + +### routing() + +### kill() + +### check() + +### broadcast() + +### create_child() + +Not strictly required, but present every time, used to instantiate a new child process by the init() function.\ +Inside the child process, a class.pm object is created and the class->run method is started. + +## class.pm + +This class must inherit the module.pm package. + + +This object is most of the time a singleton (maybe every time). + + +It will be created by hook.pm when starting the module. +This is the workhorse that will process all events. + +It seems like none of these methods will be called by gorgone-core, so naming is not required to follow this convention. + +(Please keep the code base consistent if you make a new module). + + +### new() + +Class constructor + +### run() + +Will be called by hook.pm. This method should wait for events and dispatch them accordingly. + + +Uses the EV library to wait for new things to do, either by waiting on the ZMQ file descriptor (fd) + +or with a periodic timer.\ +Generally waits for new data on ZMQ socket with EV::io(), and call event() when there is. + +### event() + +Reads data from ZMQ socket, and acts on it, generally by launching an action_* method to process the event. + +module.pm parent class has an event() method, so it's not mandatory to implement it. + +### action_*() + +Method called by event() when a ZMQ message is found. + +Method name is in the `action_eventname` form where eventname is the name of the event in lowercase, as defined by the constant in hook.pm + diff --git a/gorgone/docs/modules/centreon/autodiscovery.md b/gorgone/docs/modules/centreon/autodiscovery.md new file mode 100644 index 00000000000..95a886f8f7b --- /dev/null +++ b/gorgone/docs/modules/centreon/autodiscovery.md @@ -0,0 +1,343 @@ +# Autodiscovery + +## Description + +This module aims to extend Centreon Autodiscovery server functionalities. + +## Configuration + +| Directive | Description | Default value | +|:----------------|:-----------------------------------------------------------------------|:--------------| +| global\_timeout | Time in seconds before a discovery command is considered timed out | `300` | +| check\_interval | Time in seconds defining frequency at which results will be search for | `15` | + +#### Example + +```yaml +name: autodiscovery +package: "gorgone::modules::centreon::autodiscovery::hooks" +enable: true +global_timeout: 60 +check_interval: 10 +``` + +## Events + +| Event | Description | +|:-------------------------|:------------------------------------------------| +| AUTODISCOVERYREADY | Internal event to notify the core | +| HOSTDISCOVERYLISTENER | Internal event to get host discovery results | +| SERVICEDISCOVERYLISTENER | Internal event to get service discovery results | +| ADDHOSTDISCOVERYJOB | Add a host discovery job | +| DELETEHOSTDISCOVERYJOB | Delete a host discovery job | +| LAUNCHHOSTDISCOVERY | Execute a host discovery job | +| LAUNCHSERVICEDISCOVERY | Execute a service discovery job | + +## API + +### Add a host discovery job + +| Endpoint | Method | +|:------------------------------|:-------| +| /centreon/autodiscovery/hosts | `POST` | + +#### Headers + +| Header | Value | +|:-------------|:-----------------| +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +|:----------------|:-----------------------------------------------------------| +| job\_id | ID of the Host Discovery job | +| target | Identifier of the target on which to execute the command | +| command_line | Command line to execute to perform the discovery | +| timeout | Time in seconds before the command is considered timed out | +| execution | Execution settings | +| post\_execution | Post-execution settings | + +With the following keys for the `execution` entry: + +| Key | Value | +|:-----------|:------------------------------------------------| +| mode | Execution mode ('0': immediate, '1': scheduled) | +| parameters | Parameters needed by execution mode | + +With the following keys for the `post_execution` entry: + +| Key | Value | +|:---------|:---------------------------------| +| commands | Array of commands to be executed | + +```json +{ + "job_id": "", + "target": "", + "command_line": "", + "timeout": "", + "execution": { + "mode": "", + "parameters": "", + }, + "post_execution": { + "commands": "", + } +} +``` + +#### Examples + +##### Execute immediately without post-execution commands + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/hosts" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"job_id\": 14, + \"target\": 3, + \"command_line\": \"perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'\", + \"timeout\": 300, + \"execution\": { + \"mode\": 0, + \"parameters\": {} + }, + \"post_execution\": {} +}" +``` + +##### Execute immediately with post-execution commands + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/hosts" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"job_id\": 14, + \"target\": 3, + \"command_line\": \"perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'\", + \"timeout\": 300, + \"execution\": { + \"mode\": 0, + \"parameters\": {} + }, + \"post_execution\": { + \"commands\": [ + { + \"action\": \"COMMAND\", + \"command_line\": \"/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host --job-id=14\" + } + ] + } +}" +``` + +##### Schedule execution without post-execution commands + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/hosts" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"job_id\": 14, + \"target\": 3, + \"command_line\": \"perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'\", + \"timeout\": 300, + \"execution\": { + \"mode\": 1, + \"parameters\": { + \"cron_definition\": \"*/10 * * * *\" + } + }, + \"post_execution\": {} +}" +``` + +##### Schedule execution with post-execution commands + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/hosts" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"job_id\": 14, + \"target\": 3, + \"command_line\": \"perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'\", + \"timeout\": 300, + \"execution\": { + \"mode\": 1, + \"parameters\": { + \"cron_definition\": \"*/10 * * * *\" + } + }, + \"post_execution\": { + \"commands\": [ + { + \"action\": \"COMMAND\", + \"command_line\": \"/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host --job-id=14\" + } + ] + } +}" +``` + +### Launch a host discovery job + +| Endpoint | Method | +|:-------------------------------------------|:-------| +| /centreon/autodiscovery/hosts/:id/schedule | `GET` | + +#### Headers + +| Header | Value | +|:-------|:-----------------| +| Accept | application/json | + +#### Path variables + +| Variable | Description | +|:---------|:----------------------| +| id | Identifier of the job | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/centreon/autodiscovery/hosts/:id/schedule" \ + --header "Accept: application/json" +``` + +### Delete a host discovery job + +| Endpoint | Method | +|:-------------------------------------|:---------| +| /centreon/autodiscovery/hosts/:token | `DELETE` | + +#### Headers + +| Header | Value | +|:-------|:-----------------| +| Accept | application/json | + +#### Path variables + +| Variable | Description | +|:---------|:---------------------------| +| token | Token of the scheduled job | + +#### Example + +```bash +curl --request DELETE "https://hostname:8443/api/centreon/autodiscovery/hosts/discovery_14_6b7d1bb8" \ + --header "Accept: application/json" +``` + +### Execute a service discovery job + +| Endpoint | Method | +|:---------------------------------|:-------| +| /centreon/autodiscovery/services | `POST` | + +#### Headers + +| Header | Value | +|:-------------|:-----------------| +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +|:---------------------|:--------------------------------------------------------------------------------------------------| +| filter\_rules | Array of rules to use for discovery (empty means all) | +| force\_rule | Run disabled rules ('0': not forced, '1': forced) | +| filter\_hosts | Array of hosts against which run the discovery (empty means all) | +| filter\_pollers | Array of pollers for which linked hosts will be discovered against (empty means all) | +| manual | Run discovery for manual scan from web UI ('0': automatic, '1': manual) | +| dry\_run | Run discovery without configuration change ('0': changes, '1': dry run) | +| no\_generate\_config | No configuration generation (even if there is some changes) ('0': generation, '1': no generation) | + +```json +{ + "filter_rules": "", + "force_rule": "", + "filter_hosts": "", + "filter_pollers": "", + "manual": "", + "dry_run": "", + "no_generate_config": "" +} +``` + +#### Examples + +##### Execute discovery with defined rules (even if disabled) + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/services" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"filter_rules\": [ + \"OS-Linux-SNMP-Disk-Name\", + \"OS-Linux-SNMP-Traffic-Name\" + ], + \"force_rule\": 1 +}" +``` + +##### Execute discovery for defined hosts + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/services" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"filter_hosts\": [ + \"Host-1\", + \"Host-2\", + \"Host-3\" + ] +}" +``` + +##### Execute discovery for defined poller (without changes) + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/services" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"filter_pollers\": [ + \"Poller-1\" + ], + \"dry_run\": 1 +}" +``` + +### Developer manual + +This module heavily uses the gorgone-action module to work. + +Here is a diagram of how these modules interact: + +![image](./centreon-gorgone-autodiscovery-archi.jpg) + + +Dotted lines mean a ZMQ message is sent. Direct lines mean the function is called normally. + +Each column represents a Linux thread, as Gorgone is multiprocess. + +For each ZMQ message, names are described in the [events section](#events) of each module, +and for putlog the second part is the 'code' used by gorgone-autodiscovery +and defined as constant in the [class.pm](../../../gorgone/modules/centreon/autodiscovery/class.pm) file. + +The gorgone-action module does not send the result directly to the calling module. It sends a putlog message instead, processed by core. + +Core keeps track of every module waiting for a particular event (use library.pm::addlistener to show interest in an event) +and dispatch another message to the waiting module. + + +gorgone-core also stores the log in a local sqlite database. diff --git a/gorgone/docs/modules/centreon/centreon-gorgone-autodiscovery-archi.jpg b/gorgone/docs/modules/centreon/centreon-gorgone-autodiscovery-archi.jpg new file mode 100644 index 00000000000..4ee3f7e322d Binary files /dev/null and b/gorgone/docs/modules/centreon/centreon-gorgone-autodiscovery-archi.jpg differ diff --git a/gorgone/docs/modules/centreon/engine.md b/gorgone/docs/modules/centreon/engine.md new file mode 100644 index 00000000000..4c8c561d5bc --- /dev/null +++ b/gorgone/docs/modules/centreon/engine.md @@ -0,0 +1,72 @@ +# Engine + +## Description + +This module aims to provide a bridge to communicate with Centreon Engine daemon. + +## Configuration + +| Directive | Description | Default value | +| :----------- | :-------------------------------------------- | :------------------------------------------- | +| command_file | Path to the Centreon Engine command file pipe | `/var/lib/centreon-engine/rw/centengine.cmd` | + +#### Example + +```yaml +name: engine +package: "gorgone::modules::centreon::engine::hooks" +enable: true +command_file: "/var/lib/centreon-engine/rw/centengine.cmd" +``` + +## Events + +| Event | Description | +| :------------ | :--------------------------------------------------------------------------- | +| ENGINEREADY | Internal event to notify the core | +| ENGINECOMMAND | Send a Centreon external command to Centreon Engine daemon command file pipe | + +## API + +### Execute a command line + +| Endpoint | Method | +| :----------------------- | :----- | +| /centreon/engine/command | `POST` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +| :----------- | :-------------------------------------------- | +| command_file | Path to the Centreon Engine command file pipe | +| commands | Array of external commands (old-style format) | + +```json +{ + "command_file": "", + "commands": [ + "" + ] +} +``` + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/centreon/engine/command" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"command_file\": \"/var/lib/centreon-engine/rw/centengine.cmd\", + \"commands\": [ + \"[653284380] SCHEDULE_SVC_CHECK;host1;service1;653284380\" + ] +}" +``` diff --git a/gorgone/docs/modules/centreon/legacycmd.md b/gorgone/docs/modules/centreon/legacycmd.md new file mode 100644 index 00000000000..d7221114f75 --- /dev/null +++ b/gorgone/docs/modules/centreon/legacycmd.md @@ -0,0 +1,48 @@ +# Legacy Cmd + +## Description + +This module aims to mimick the behaviour of the antique *centcore* daemon. + +As for *centcore*, it reads a file (called command file) and process every commands that it knows of. + +The module relies on the following modules to process commands: + +* [Action](../core/action.md) +* [Proxy](../core/proxy.md) +* [Engine](engine.md) + +## Configuration + +| Directive | Description | Default value | +| :--------------------------- | :----------------------------------------------------------- | :---------------------------------------- | +| cmd_file | *Command file* to read commands from | `/var/lib/centreon/centcore.cmd` | +| cmd_dir | Directory where to watch for *command files* | `/var/lib/centreon/` | +| cache_dir | Directory where to process Centreon configuration files | `/var/cache/centreon/` | +| cache_dir_trap | Directory where to process Centreontrapd databases | `/etc/snmp/centreon_traps/` | +| remote_dir | Directory where to export Remote Servers configuration | `/var/cache/centreon/config/remote-data/` | +| bulk_external_cmd | Bulk external commands (DOWNTIME, ACK,...) | `50` | +| bulk_external_cmd_sequential | Order bulk external commands and other commands (Eg. RELOAD) | `1` | + +#### Example + +```yaml +name: legacycmd +package: "gorgone::modules::centreon::legacycmd::hooks" +enable: true +cmd_file: "/var/lib/centreon/centcore.cmd" +cmd_dir: "/var/lib/centreon/" +cache_dir: "/var/cache/centreon/" +cache_dir_trap: "/etc/snmp/centreon_traps/" +remote_dir: "/var/cache/centreon/config/remote-data/" +``` + +## Events + +| Event | Description | +| :------------- | :-------------------------------- | +| LEGACYCMDREADY | Internal event to notify the core | + +## API + +No API endpoints. diff --git a/gorgone/docs/modules/centreon/nodes.md b/gorgone/docs/modules/centreon/nodes.md new file mode 100644 index 00000000000..b7eb23bbaa0 --- /dev/null +++ b/gorgone/docs/modules/centreon/nodes.md @@ -0,0 +1,53 @@ +# Nodes + +## Description + +This module aims to automatically register Poller servers as Gorgone nodes, in opposition to the [register](../core/register.md) module. + +For now, nodes can be registered as SSH nodes or ZMQ nodes. + +## Configuration + +No specific configuration. + +#### Example + +```yaml +name: nodes +package: "gorgone::modules::centreon::nodes::hooks" +enable: true +``` + +## Events + +| Event | Description | +| :----------------- | :-------------------------------- | +| CENTREONNODESREADY | Internal event to notify the core | + +## API + +### Synchronize centreon nodes configuration + +| Endpoint | Method | +| :------------------- | :----- | +| /centreon/nodes/sync | `POST` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +No parameters. + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/centreon/nodes/sync" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{}" +``` diff --git a/gorgone/docs/modules/centreon/statistics.md b/gorgone/docs/modules/centreon/statistics.md new file mode 100644 index 00000000000..db28a88ec4d --- /dev/null +++ b/gorgone/docs/modules/centreon/statistics.md @@ -0,0 +1,88 @@ +# Broker + +## Description + +This module aims to manage the collection of Centreon Engine and Broker statistics. It requires the configuration of an action module on each poller and on the central. + +## Configuration + +| Directive | Description | Default value | +| :--------------- | :--------------------------------------------------------------------------------------------- | :-------------------------------- | +| broker_cache_dir | Path to the Centreon Broker statistics directory (local) use to store node's broker statistics | `/var/lib/centreon/broker-stats/` | + +The configuration needs a cron definition to ensure that statistics are collected regularly. + +#### Example + +```yaml +name: statistics +package: "gorgone::modules::centreon::statistics::hooks" +enable: false +broker_cache_dir: "/var/lib/centreon/broker-stats/" +cron: + - id: broker_stats + timespec: "*/5 * * * *" + action: BROKERSTATS + parameters: + timeout: 10 + collect_localhost: false +``` + +## Events + +| Event | Description | +|:-------------------|:--------------------------------------------------| +| STATISTICSREADY | Internal event to notify the core | +| STATISTICSLISTENER | Internal Event to receive data from action module | +| BROKERSTATS | Collect Centreon Broker statistics files on node | +| ENGINESTATS | Collect Centreon engine statistics on node | + +## API + +### Collect Centreon engine statistics on every nodes configured + +The API returns a token to monitor the progess. Please note this token does not allow to monitor the whole process but only the first part until an action command is sent. + +Data will be stored in the `centreon_storage.nagios_stats` table and in the rrd database. + +| Endpoint | Method | +|:----------------------------| :----- | +| /centreon/statistics/engine | `GET` | + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/centreon/statistics/engine" \ + --header "Accept: application/json" +``` + +### Collect Centreon Broker statistics on one or several nodes + +| Endpoint | Method | +| :------------------------------ | :----- | +| /centreon/statistics/broker | `GET` | +| /centreon/statistics/broker/:id | `GET` | + +#### Headers + +| Header | Value | +| :----- | :--------------- | +| Accept | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :--------------------- | +| id | Identifier of the node | + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/centreon/statistics/broker" \ + --header "Accept: application/json" +``` + +```bash +curl --request POST "https://hostname:8443/api/centreon/statistics/broker/2" \ + --header "Accept: application/json" +``` diff --git a/gorgone/docs/modules/core/action.md b/gorgone/docs/modules/core/action.md new file mode 100644 index 00000000000..db21bbe971c --- /dev/null +++ b/gorgone/docs/modules/core/action.md @@ -0,0 +1,98 @@ +# Action + +## Description + +This module aims to execute actions on the server running the Gorgone daemon or remotly using SSH. + +## Configuration + +| Directive | Description | Default value | +|:-----------------|:---------------------------------------------------------------|:--------------| +| command_timeout | Time in seconds before a command is considered timed out | `30` | +| whitelist_cmds | Boolean to enable commands whitelist | `false` | +| allowed_cmds | Regexp list of allowed commands | | +| paranoid_plugins | Block centengine restart/reload if plugin dependencies missing | `false` | + +#### Example + +```yaml +name: action +package: "gorgone::modules::core::action::hooks" +enable: true +command_timeout: 30 +whitelist_cmds: true +allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ +``` + +## Events + +| Event | Description | +|:------------|:----------------------------------------------------------------------------------------| +| ACTIONREADY | Internal event to notify the core | +| PROCESSCOPY | Process file or archive received from another daemon | +| COMMAND | Execute a shell command on the server running the daemon or on another server using SSH | + +## API + +### Execute a command line + +| Endpoint | Method | +|:---------------------|:-------| +| /core/action/command | `POST` | + +#### Headers + +| Header | Value | +|:-------------|:-----------------| +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +|:------------------|:---------------------------------------------------------| +| command | Command to execute | +| timeout | Time in seconds before a command is considered timed out | +| continue_on_error | Behaviour in case of execution issue | + +```json +[ + { + "command": "", + "timeout": "", + "continue_on_error": "" + } +] +``` + + +#### Example + +See a complete exemple of this endpoint in the [api documentation](../../api.md) + +```bash +curl --request POST "https://hostname:8443/api/core/action/command" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "[ + { + \"command\": \"echo 'Test command' >> /tmp/here.log\" + } +]" +``` +Output : +```bash +{"token":"b3f825f87d64764316d872c59e4bae69299b0003f6e5d27bbc7de4e27c50eb65fc17440baf218578343eff7f4d67f7e98ab6da40b050a2635bb735c7cec276bd"} +``` + diff --git a/gorgone/docs/modules/core/cron.md b/gorgone/docs/modules/core/cron.md new file mode 100644 index 00000000000..b4e5be4d224 --- /dev/null +++ b/gorgone/docs/modules/core/cron.md @@ -0,0 +1,229 @@ +# Cron + +## Description + +This module aims to reproduce a cron-like scheduler that can send events to other Gorgone modules. + +## Configuration + +No specific configuration is needed. + +Below the configuration to add cron definitions: + +| Directive | Description | +| :--------- | :---------------------------------------------------------------------------------------------- | +| id | Unique identifier of the cron definition | +| timespec | Cron-like time specification | +| action | Action/event to call at job execution | +| parameters | Parameters needed by the called action/event | +| keep_token | Boolean to define whether or not the ID of the definition will be used as token for the command | + +#### Example + +```yaml +name: cron +package: "gorgone::modules::core::cron::hooks" +enable: true +cron: + - id: echo_date + timespec: "* * * * *" + action: COMMAND + parameters: + - command: "date >> /tmp/date.log" + timeout: 10 + keep_token: true +``` + +## Events + +| Event | Description | +| :- | :- | +| CRONREADY | Internal event to notify the core | +| GETCRON | Get one or all cron definitions | +| ADDCRON | Add one or several cron definitions | +| DELETECRON | Delete a cron definition | +| UPDATECRON | Update a cron definition | + +## API + +### Get one or all definitions configuration + +| Endpoint | Method | +| :------------------------- | :----- | +| /core/cron/definitions | `GET` | +| /core/cron/definitions/:id | `GET` | + +#### Headers + +| Header | Value | +| :----- | :--------------- | +| Accept | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :-------------------------------- | +| id | Identifier of the cron definition | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/core/cron/definitions" \ + --header "Accept: application/json" +``` + +```bash +curl --request GET "https://hostname:8443/api/core/cron/definitions/echo_date" \ + --header "Accept: application/json" +``` + +### Get one definition status + +| Endpoint | Method | +| :-------------------------------- | :----- | +| /core/cron/definitions/:id/status | `GET` | + +#### Headers + +| Header | Value | +| :----- | :--------------- | +| Accept | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :-------------------------------- | +| id | Identifier of the cron definition | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/core/cron/definitions/echo_date/status" \ + --header "Accept: application/json" +``` + +### Add one or several cron definitions + +| Endpoint | Method | +| :--------------------- | :----- | +| /core/cron/definitions | `POST` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +| :--------- | :---------------------------------------------------------------------------------------------- | +| id | ID of the definition | +| timespec | Cron-like time specification | +| command | Action/event to call at job execution | +| parameters | Parameters needed by the called action/event | +| keep_token | Boolean to define whether or not the ID of the definition will be used as token for the command | + +```json +[ + { + "id": "", + "timespec": "", + "command": "", + "parameters": "", + "keep_token": "" + } +] +``` + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/core/cron/definitions" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "[ + { + \"timespec\": \"*/15 * * * *\", + \"id\": \"job_123\", + \"action\": \"COMMAND\", + \"parameters\": [ + { + \"command\": \"date >> /tmp/the_date_again.log\", + \"timeout\": 5 + } + ], + \"keep_token\": true + } +]" +``` + +### Update a definition + +| Endpoint | Method | +| :------------------------- | :------ | +| /core/cron/definitions/:id | `PATCH` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :-------------------------------- | +| id | Identifier of the cron definition | + +#### Body + +One or several keys allowed by the add endpoint. + +```json +{ + "id": "", + "timespec": "", + "command": "", + "parameters": "", + "keep_token": "" +} +``` + +#### Example + +```bash +curl --request PATCH "https://hostname:8443/api/core/cron/definitions/job_123" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"timespec\": \"*/2 * * * *\" +}" +``` + +### Delete a definition + +| Endpoint | Method | +| :------------------------- | :------- | +| /core/cron/definitions/:id | `DELETE` | + +#### Headers + +| Header | Value | +| :----- | :--------------- | +| Accept | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :-------------------------------- | +| id | Identifier of the cron definition | + +#### Example + +```bash +curl --request DELETE "https://hostname:8443/api/core/cron/definitions/job_123" \ + --header "Accept: application/json" +``` diff --git a/gorgone/docs/modules/core/dbcleaner.md b/gorgone/docs/modules/core/dbcleaner.md new file mode 100644 index 00000000000..c80af1aad66 --- /dev/null +++ b/gorgone/docs/modules/core/dbcleaner.md @@ -0,0 +1,34 @@ +# DB Cleaner + +## Description + +This module aims to maintain the Gorgone daemon database by purging entries cyclically. + +The module is loaded by default. Adding it to the configuration will overload daemon default configuration. + +## Configuration + +| Directive | Description | Default value | +| :------------------ | :----------------------------------------------------------------------- | :------------ | +| purge_sessions_time | Time in seconds before deleting sessions in the `gorgone_identity` table | `3600` | +| purge_history_time | Time in seconds before deleting history in the `gorgone_history` table | `604800` | + +#### Example + +```yaml +name: dbcleaner +package: "gorgone::modules::core::dbcleaner::hooks" +enable: true +purge_sessions_time: 3600 +purge_history_time: 604800 +``` + +## Events + +| Event | Description | +| :------------- | :-------------------------------- | +| DBCLEANERREADY | Internal event to notify the core | + +## API + +No API endpoints. diff --git a/gorgone/docs/modules/core/httpserver.md b/gorgone/docs/modules/core/httpserver.md new file mode 100644 index 00000000000..cae8e874bdd --- /dev/null +++ b/gorgone/docs/modules/core/httpserver.md @@ -0,0 +1,56 @@ +# HTTP Server + +## Description + +This module aims to provide a HTTP/S server to expose handy endpoints to talk to Gorgone. + +It relies on a core API module to server Gorgone events and can dispatch any other piece of code. + +## Configuration + +| Directive | Description | Default value | +| :------------ | :----------------------------------------------- | :------------ | +| address | IP address for the server to bind to | `0.0.0.0` | +| port | Port on which the server will listen to requests | `8080` | +| ssl | Boolean to enable SSL terminaison | `false` | +| ssl_cert_file | Path to the SSL certificate (if SSL enabled) | | +| ssl_key_file | Path to the SSL key (if SSL enabled) | | +| auth | Basic credentials to access the server | | +| allowed_hosts | Peer address to access the server | | + +#### Example + +```yaml +name: httpserver +package: "gorgone::modules::core::httpserver::hooks" +enable: true +address: 0.0.0.0 +port: 8443 +ssl: true +ssl_cert_file: /etc/pki/tls/certs/server-cert.pem +ssl_key_file: /etc/pki/tls/server-key.pem +auth: + enabled: true + user: admin + password: password +allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 + - 10.30.2.0/16 +``` + +Below the configuration to add other endpoints: + +```yaml +dispatch: + - endpoint: "/mycode" + method: GET + class: "path::to::my::code" +``` + +## Events + +| Event | Description | +| :-------------- | :-------------------------------- | +| HTTPSERVERREADY | Internal event to notify the core | diff --git a/gorgone/docs/modules/core/proxy.md b/gorgone/docs/modules/core/proxy.md new file mode 100644 index 00000000000..69899491411 --- /dev/null +++ b/gorgone/docs/modules/core/proxy.md @@ -0,0 +1,116 @@ +# Proxy + +## Description + +This module aims to give the possibility to Gorgone to become distributed. + +It is not needed in a Centreon standalone configuration, but must be enabled if there is Poller or Remote servers. + +The module includes mechanisms like ping to make sure nodes are alive, synchronisation to store logs in the Central Gorgone database, etc. + +A SSH client library make routing to non-gorgoned nodes possible. + +## Configuration + +| Directive | Description | Default value | +|:---------------------|:-------------------------------------------------------------------|:---------------| +| pool | Number of children to instantiate to process events | `5` | +| synchistory_time | Time in seconds between two log synchronisations | `60` | +| synchistory_timeout | Time in seconds before log synchronisation is considered timed out | `30` | +| ping | Time in seconds between two node pings | `60` | +| pong_discard_timeout | Time in seconds before a ping is considered lost | `300` | + +This part of the configuration is only used if some poller must connect with the pullwss module. + + +| Directive | Description | Default value | +|:--------------|:-----------------------------------------------------------------------------------------------|:--------------| +| httpserver | Array containing all the configuration below for a pullwss connection | no value. | +| enable | Boolean if HTTP server should be enabled | `false` | +| ssl | Should connection be made over TLS/SSL or not | `false` | +| ssl_cert_file | Path to a SSL certificate file. required if ssl: true | | +| ssl_key_file | Path to a SSL key file associated to the certificate already configured. required if ssl: true | | +| passphrase | May be an optional passphrase for the SSL key. | | +| token | Allow to authenticate node. It is required to enable the HTTP server. | | +| address | Address to listen to. It can be 0.0.0.0 to listen on all IPv4 addresses. | | +| port | TCP port to listen to. | | + + +#### Example + +```yaml +name: proxy +package: "gorgone::modules::core::proxy::hooks" +enable: false +pool: 5 +synchistory_time: 60 +synchistory_timeout: 30 +ping: 60 +pong_discard_timeout: 300 +httpserver: # this is used only if you want to configure pullwss nodes. to make it work you have to add the register module and configure a configuration file for it. + enable: true + ssl: true + ssl_cert_file: /etc/centreon-gorgone/keys/public.pem + ssl_key_file: /etc/centreon-gorgone/keys/private.pem + token: secure_token + address: "0.0.0.0" +``` + +## Events + +| Event | Description | +|:----------------|:-------------------------------------------------------------------------------| +| PROXYREADY | Internal event to notify the core | +| REMOTECOPY | Copy files or directories from the server running the daemon to another server | +| SETLOGS | Internal event to insert logs into the database | +| PONG | Internal event to handle node ping response | +| REGISTERNODES | Internal event to register nodes | +| UNREGISTERNODES | Internal event to unregister nodes | +| PROXYADDNODE | Internal event to add nodes for proxying | +| PROXYDELNODE | Internal event to delete nodes from proxying | +| PROXYADDSUBNODE | Internal event to add nodes of nodes for proxying | +| PONGRESET | Internal event to deal with no pong nodes | + +## API + +### Copy files or directory to remote server + +| Endpoint | Method | +|:---------------------------|:-------| +| /api/core/proxy/remotecopy | `POST` | + +#### Headers + +| Header | Value | +|:-------------|:-----------------| +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +|:------------|:--------------------------------------------------| +| source | Path of the source file or directory | +| destination | Path of the destination file or directory | +| cache_dir | Path to the cache directory for archiving purpose | + +```json +{ + "source": "", + "destination": "", + "cache_dir": "" +} +``` + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/core/proxy/remotecopy" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data " { + \"source\": \"/var/cache/centreon/config/engine/2/\", + \"destination\": \"/etc/centreon-engine\", + \"cache_dir\": \"/var/cache/centreon\" +}" +``` diff --git a/gorgone/docs/modules/core/pull.md b/gorgone/docs/modules/core/pull.md new file mode 100644 index 00000000000..8ec62829d27 --- /dev/null +++ b/gorgone/docs/modules/core/pull.md @@ -0,0 +1,28 @@ +# Pull + +## Description + +This module should be used on remote nodes where the connection has to be opened from the node to the Central Gorgone. + +## Configuration + +No specific configuration. + +#### Example + +```yaml +name: pull +package: "gorgone::modules::core::pull::hooks" +enable: true +target_type: tcp +target_path: 10.30.2.203:5556 +ping: 1 +``` + +## Events + +No events. + +## API + +No API endpoints. diff --git a/gorgone/docs/modules/core/pullwss.md b/gorgone/docs/modules/core/pullwss.md new file mode 100644 index 00000000000..5bcdf3f1900 --- /dev/null +++ b/gorgone/docs/modules/core/pullwss.md @@ -0,0 +1,41 @@ +# Pullwss + +## Description + +This module should be used on remote nodes where the connection has to be HTTP/HTTPS and must be opened from the node to the Central Gorgone. + +This module requires proxy and register module to be configured on the central Gorgone. +The register Module will allow Gorgone to keep the state of every poller, and find out the connection mode. +The proxy module has to bind to a tcp port for the pullwss module to connect to. + +## Configuration + +| Directive | Description | Default value | +|:----------|:--------------------------------------------------|:--------------| +| ssl | should the connection be made over TLS/SSL or not | `false` | +| address | IP address to connect to | | +| port | TCP port to connect to | | +| token | token to authenticate to the central gorgone | | +| proxy | HTTP(S) proxy to access central gorgone | | + +### Example + +```yaml +name: pullwss +package: "gorgone::modules::core::pullwss::hooks" +enable: true +ssl: true +port: 8086 +token: "1234" +address: 192.168.56.105 +``` + +## Events + +| Event | Description | +|:---------------|:--------------------------------------------------------| +| PULLWSSREADY | Internal event to notify the core this module is ready. | + +## API + +No API endpoints. diff --git a/gorgone/docs/modules/core/register.md b/gorgone/docs/modules/core/register.md new file mode 100644 index 00000000000..6cc3bac81fe --- /dev/null +++ b/gorgone/docs/modules/core/register.md @@ -0,0 +1,96 @@ +# Register + +## Description + +This module aims to provide a way to register nodes manually, in opposition to the [pollers](../centreon/pollers.md) module. + +Nodes are either servers running Gorgone daemon or simple equipment with SSH server. + +## Configuration + +There is no specific configuration in the Gorgone daemon configuration file, only a directive to set a path to a dedicated configuration file. + +| Directive | Description | Default value | +|:-------------|:---------------------------------------------|:--------------| +| config\_file | Path to the configuration file listing nodes | | + +#### Example + +```yaml +name: register +package: "gorgone::modules::core::register::hooks" +enable: true +config_file: config/registernodes.yaml +``` + +Nodes are listed in a separate configuration file in a `nodes` table as below: + +##### Using ZMQ (Gorgone running on node) + +| Directive | Description | +|:----------------|:---------------------------------------------------------------------------| +| id | Unique identifier of the node (can be Poller’s ID if using prevail option) | +| type | Way for the daemon to connect to the node (push\_zmq, pull, wss) | +| address | IP address of the node | +| port | Port to connect to on the node | +| server\_pubkey | Server public key (Default: ask the server pubkey when it connects) | +| client\_pubkey | Client public key (Default: use global public key) | +| client\_privkey | Client private key (Default: use global private key) | +| cipher | Cipher used for encryption (Default: “Cipher::AES”) | +| vector | Encryption vector (Default: 0123456789012345) | +| prevail | Defines if this configuration prevails on `nodes` module configuration | +| nodes | Table to register subnodes managed by node (pathscore is not mandatory) | + +#### Example + +```yaml +nodes: + - id: 4 + type: push_zmq + address: 10.1.2.3 + port: 5556 + nodes: + - id: 2 + pathscore: 1 + - id: 20 + pathscore: 10 +``` + +##### Using SSH + +| Directive | Description | +|:-------------------------|:--------------------------------------------------------------------------------------------------| +| id | Unique identifier of the node (can be Poller’s ID if using prevail option) | +| type | Way for the daemon to connect to the node (push\_ssh) | +| address | IP address of the node | +| ssh\_port | Port to connect to on the node | +| ssh\_directory | Path to the SSH directory, used for files like known\_hosts and identity (private and public key) | +| ssh\_known\_hosts | Path to the known hosts file | +| ssh\_identity | Path to the identity file | +| ssh\_username | SSH username | +| ssh\_password | SSH password (if no SSH key) | +| ssh\_connect\_timeout | Time is seconds before a connection is considered timed out | +| strict\_serverkey\_check | Boolean to strictly check the node fingerprint | +| prevail | Defines if this configuration prevails on `nodes` module configuration | + +#### Example + +```yaml +nodes: + - id: 8 + type: push_ssh + address: 10.4.5.6 + ssh_port: 2222 + ssh_identity: ~/.ssh/the_rsa_key + ssh_username: user + strict_serverkey_check: false + prevail: 1 +``` + +## Events + +No events. + +## API + +No API endpoints. diff --git a/gorgone/docs/modules/plugins/newtest.md b/gorgone/docs/modules/plugins/newtest.md new file mode 100644 index 00000000000..6705c91a37c --- /dev/null +++ b/gorgone/docs/modules/plugins/newtest.md @@ -0,0 +1,137 @@ +# IP-Label Newtest + +## Description + +This module aims to retrieve Newtest services. + +It uses the Newtest webservice in order to connect and retrieve the informations of one (or more) Newtest Management Console (NMC). + +By default *newtest* starts X processes (it depends of the configuration). + +Here are the steps done by one process: + +1. Centreon configuration: get the robots and scenarios already configured, + +2. Get the list of robots and scenarios from the NMC, + +3. Create the needed configuration in Centreon with CLAPI (no disable or delete actions), + +4. Get the last status of scenarios from the NMC, + +5. Submit the result to Centreon through *centcore*. + +#### Requirements + +| Dependency | Version | Repository | +| :----------------- | :----------: | :----------------- | +| perl-SOAP-Lite | 1.10 | centreon base | +| perl-TimeDate | 2.30 | redhat/centos base | + +## Configuration + +| Directive | Description | Default value | +| :- | :- | :- | +| clapi_command | Path to the CLAPI binary | `/usr/bin/centreon` | +| clapi_timeout | Time in seconds before CLAPI command execution is considered timed out | `10` | +| clapi_username | CLAPI username | | +| clapi_password | CLAPI username's password | | +| centcore_cmd | Path to centcore command file | `/var/lib/centreon/centcore.cmd` | +| clapi_action_applycfg | CLAPI action used to apply Poller configuration | | +| clapi_generate_config_timeout | Time in seconds before the configuration generation is considered timed out | `180` | +| check_containers_time | Time in seconds between two containers synchronisation | `3600` | + +#### Example + +```yaml +name: newtest +package: "gorgone::modules::plugins::newtest::hooks" +enable: false +check_containers_time: 3600 +clapi_command: /usr/bin/centreon +clapi_username: admin +clapi_password: centreon +clapi_action_applycfg: POLLERRELOAD +centcore_cmd: /var/lib/centreon/centcore.cmd +``` + +Add an entry in the *containers* table with the following attributes per NWC definition: + +| Directive | Description | +| :------------ | :---------- | +| name | Name of the NWC configuration entrie | +| resync_time | Time in seconds between two NWC/Centreon synchronisations | +| nmc_endpoint | Address of the NMC endpoint | +| username | Username to connect to NWC endpoint | +| password | Username's password | +| host_template | Host template used when the daemon creates a host in Centreon | +| host_prefix | Name used when the daemon creates and looks for a host in Centreon | +| service_template | Service template used when the daemon creates a host in Centreon | +| service_prefix | Name used when the daemon creates and looks for a service in Centreon | +| poller_name | Poller used when the daemon creates a host in Centreon | +| list_scenario_status | Informations to look for from the NWC endpoint | + +#### Example + +```yaml +containers: + - name: nwc_1 + resync_time: 300 + nmc_endpoint: "http://__NMC_ADDRESS__/nws/managementconsoleservice.asmx" + username: user + password: pass + host_template: generic-active-host-custom + host_prefix: Robot-%s + service_template: generic-passive-service-custom + service_prefix: Scenario-%s + poller_name: Central + list_scenario_status: '{ "search": "All", "instances": [] }' + - name: nwc_2 + resync_time: 600 + nmc_endpoint: "http://__NMC_ADDRESS__/nws/managementconsoleservice.asmx" + username: user + password: pass + host_template: generic-active-host-custom + host_prefix: Robot-%s + service_template: generic-passive-service-custom + service_prefix: Scenario-%s + poller_name: Central + list_scenario_status: '{ "search": "Robot", "instances": ["XXXX"] }' +``` + +## Events + +| Event | Description | +| :- | :- | +| NEWTESTREADY | Internal event to notify the core | +| NEWTESTRESYNC | Synchronise NWC and Centreon configuration | + +## API + +### Force synchronisation between NWC endpoints and Centreon configuration + +| Endpoint | Method | +| :- | :- | +| /plugins/newtest/resync | `GET` | + +#### Headers + +| Header | Value | +| :- | :- | +| Accept | application/json | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/plugins/newtest/resync" \ + --header "Accept: application/json" +``` + +## Troubleshooting + +It is possible to get this kind of error in logs of *newtest*: + +```bash +die: syntax error at line 1, column 0, byte 0 at /usr/lib/perl5/vendor_perl/5.8.8/i386-linux-thread-multi/XML/Parser.pm line 189 +``` + +It often means that a timeout occur. diff --git a/gorgone/docs/modules/plugins/scom.md b/gorgone/docs/modules/plugins/scom.md new file mode 100644 index 00000000000..a18477da000 --- /dev/null +++ b/gorgone/docs/modules/plugins/scom.md @@ -0,0 +1,90 @@ +# Microsoft SCOM + +## Description + +This module aims to retreive alerts from Microsoft SCOM and store them in Centreon DSM slots. + +## Configuration + +| Directive | Description | Default value | +| :- | :- | :- | +| dsmclient_bin | Path to the Centreon DSM client | `/usr/share/centreon/bin/`dsmclient.pl| +| centcore_cmd | Path to centcore command file | `/var/lib/centreon/centcore.cmd` | +| check_containers_time | Time in seconds between two containers synchronisation | `3600` | + +#### Example + +```yaml +name: scom +package: "gorgone::modules::plugins::scom::hooks" +enable: false +check_containers_time: 3600 +dsmclient_bin: /usr/share/centreon/bin/dsmclient.pl +centcore_cmd: /var/lib/centreon/centcore.cmd +``` + +Add an entry in the *containers* table with the following attributes per SCOM server: + +| Directive | Description | +| :------------ | :---------- | +| name | Name of the SCOM configuration entrie | +| api_version | SCOM API version | +| url | URL of the SCOM API | +| username | Username to connect to SCOM API | +| password | Username's password | +| httpauth | API authentication type | +| resync_time | Time in seconds between two SCOM/Centreon synchronisations | +| dsmhost | Name of the Centreon host to link alerts to | +| dsmslot | Name of the Centreon DSM slots to link alerts to | +| dsmmacro | Name of the Centreon DSM macro to fill | +| dsmalertmessage | Output template for Centreon DSM service when there is an alert | +| dsmrecoverymessage | Output template for Centreon DSM service when alert is recovered | +| curlopts | Options table for Curl library | + +#### Example + +```yaml +containers: + - name: SCOM_prod + api_version: 2016 + url: "http://scomserver/api/" + username: user + password: pass + httpauth: basic + resync_time: 300 + dsmhost: ADH3 + dsmslot: Scom-% + dsmmacro: ALARM_ID + dsmalertmessage: "%{monitoringobjectdisplayname} %{name}" + dsmrecoverymessage: slot ok + curlopts: + CURLOPT_SSL_VERIFYPEER: 0 +``` + +## Events + +| Event | Description | +| :- | :- | +| SCOMREADY | Internal event to notify the core | +| SCOMRESYNC | Synchronise SCOM and Centreon realtime | + +## API + +### Force synchronisation between SCOM endpoints and Centreon realtime + +| Endpoint | Method | +| :- | :- | +| /plugins/scom/resync | `GET` | + +#### Headers + +| Header | Value | +| :- | :- | +| Accept | application/json | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/plugins/scom/resync" \ + --header "Accept: application/json" +``` diff --git a/gorgone/docs/poller_pull_configuration.md b/gorgone/docs/poller_pull_configuration.md new file mode 100644 index 00000000000..852c145b1c3 --- /dev/null +++ b/gorgone/docs/poller_pull_configuration.md @@ -0,0 +1,105 @@ +# Architecture + +We are showing how to configure gorgone to manage that architecture: + +```text +Central server <------- Distant Poller +``` + +In our case, we have the following configuration (need to adatp to your configuration). + +* Central server: + * address: 10.30.2.203 +* Distant Poller: + * id: 6 (configured in Centreon interface as **zmq**. You get it in the Centreon interface) + * address: 10.30.2.179 + * rsa public key thumbprint: nJSH9nZN2ugQeksHif7Jtv19RQA58yjxfX-Cpnhx09s + +# Distant Poller + +## Installation + +The Distant Poller is already installed and Gorgone also. + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml +name: distant-server +description: Configuration for distant server +gorgone: + gorgonecore: + id: 6 + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" + + - name: pull + package: "gorgone::modules::core::pull::hooks" + enable: true + target_type: tcp + target_path: 10.30.2.203:5556 + ping: 1 +``` + +# Central server + +## Installation + +The Central server is already installed and Gorgone also. + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml +... +gorgone: + gorgonecore: + ... + external_com_type: tcp + external_com_path: "*:5556" + authorized_clients: + - key: nJSH9nZN2ugQeksHif7Jtv19RQA58yjxfX-Cpnhx09s + ... + modules: + ... + - name: register + package: "gorgone::modules::core::register::hooks" + enable: true + config_file: /etc/centreon-gorgone/nodes-register-override.yml + ... +``` + +We create the file **/etc/centreon-gorgone/nodes-register-override.yml**: + +```yaml +nodes: + - id: 6 + type: pull + prevail: 1 +``` diff --git a/gorgone/docs/poller_pullwss_configuration.md b/gorgone/docs/poller_pullwss_configuration.md new file mode 100644 index 00000000000..da4d1cbeca6 --- /dev/null +++ b/gorgone/docs/poller_pullwss_configuration.md @@ -0,0 +1,114 @@ +# Architecture + +We are showing how to configure gorgone to manage that architecture: + +```text + +Central server <------- Distant Poller +``` +unlike for the pull module, the communication is entirely done on the HTTP(S) websocket. +In our case, we have the following configuration (you need to adapt it to your configuration). + +* Central server: + * address: 10.30.2.203 +* Distant Poller: + * id: 6 (configured in the Centreon interface as **zmq**. You get it in the Centreon interface) + * address: 10.30.2.179 + * rsa public key thumbprint: nJSH9nZN2ugQeksHif7Jtv19RQA58yjxfX-Cpnhx09s + +# Distant Poller + +## Installation + +The Distant Poller is already installed with Gorgone. + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml +name: distant-server +description: Configuration for distant server +gorgone: + gorgonecore: + id: 6 + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" + + - name: pullwss + package: "gorgone::modules::core::pullwss::hooks" + enable: true + ssl: true + port: 443 + token: "1234" + address: 10.30.2.203 + ping: 1 +``` + +# Central server + +## Installation + +The Central server is already installed and Gorgone too. + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml + +... +gorgone: + ... + modules: + ... + - name: proxy + package: "gorgone::modules::core::proxy::hooks" + enable: true + httpserver: + enable: true + ssl: true + ssl_cert_file: /etc/centreon-gorgone/keys/certificate.crt + ssl_key_file: /etc/centreon-gorgone/keys/private.key + token: "1234" + address: "0.0.0.0" + port: 443 + - name: register + package: "gorgone::modules::core::register::hooks" + enable: true + config_file: /etc/centreon-gorgone/nodes-register-override.yml + ... +``` + +We create the file **/etc/centreon-gorgone/nodes-register-override.yml**: + +```yaml +nodes: + - id: 6 + type: pullwss + prevail: 1 +``` diff --git a/gorgone/docs/rebound_configuration.md b/gorgone/docs/rebound_configuration.md new file mode 100644 index 00000000000..4a83f5af2ce --- /dev/null +++ b/gorgone/docs/rebound_configuration.md @@ -0,0 +1,153 @@ +# Architecture + +We are showing how to configure gorgone to manage that architecture: + +```text +Central server <------- Rebound server <------- Distant Poller +``` + +In our case, we have the following configuration (need to adatp to your configuration). + +* Central server: + * address: 10.30.2.203 +* Rebound server: + * id: 1024 (It must be unique. It's an arbitrary number) + * address: 10.30.2.67 + * rsa public key thumbprint: NmnPME43IoWpkQoam6CLnrI5hjmdq6Kq8QMUCCg-F4g +* Distant Poller: + * id: 6 (configured in Centreon interface as **zmq**. You get it in the Centreon interface) + * address: 10.30.2.179 + * rsa public key thumbprint: nJSH9nZN2ugQeksHif7Jtv19RQA58yjxfX-Cpnhx09s + +# Distant Poller + +## Installation + +The Distant Poller is already installed and Gorgone also. + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml +name: distant-server +description: Configuration for distant server +gorgone: + gorgonecore: + id: 6 + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" + + - name: pull + package: "gorgone::modules::core::pull::hooks" + enable: true + target_type: tcp + target_path: 10.30.2.67:5556 + ping: 1 +``` + +# Rebound server + +## Installation + +We have installed a CentOS 7 server. We install Gorgone daemon: + +```shell +yum install http://yum.centreon.com/standard/20.04/el7/stable/noarch/RPMS/centreon-release-20.04-1.el7.centos.noarch.rpm +yum install centreon-gorgone +``` + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml +name: rebound-server +description: Configuration for rebound-server +gorgone: + gorgonecore: + id: 1024 + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + external_com_type: tcp + external_com_path: "*:5556" + authorized_clients: + - key: nJSH9nZN2ugQeksHif7Jtv19RQA58yjxfX-Cpnhx09s + + modules: + - name: proxy + package: "gorgone::modules::core::proxy::hooks" + enable: true + + - name: pull + package: "gorgone::modules::core::pull::hooks" + enable: true + target_type: tcp + target_path: 10.30.2.203:5556 + ping: 1 +``` + +# Central server + +## Installation + +The Central server is already installed and Gorgone also. + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml +... +gorgone: + gorgonecore: + ... + external_com_type: tcp + external_com_path: "*:5556" + authorized_clients: + - key: NmnPME43IoWpkQoam6CLnrI5hjmdq6Kq8QMUCCg-F4g + ... + modules: + ... + - name: register + package: "gorgone::modules::core::register::hooks" + enable: true + config_file: /etc/centreon-gorgone/nodes-register-override.yml + ... +``` + +We create the file **/etc/centreon-gorgone/nodes-register-override.yml**: + +```yaml +nodes: + - id: 1024 + type: pull + prevail: 1 + nodes: + - id: 6 + pathscore: 1 +``` diff --git a/gorgone/docs/zmq_architecture.svg b/gorgone/docs/zmq_architecture.svg new file mode 100644 index 00000000000..e1027101a51 --- /dev/null +++ b/gorgone/docs/zmq_architecture.svg @@ -0,0 +1,713 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + DEALER + + + + DEALER + + + + DEALER + + + Interface Web + + + ROUTER + + + + + ROUTER + + + + + + DEALER + + + + ROUTER + + + + + DEALER + + + + DEALER + + gorgone-crond + + + DEALER + + + + + + DEALER + + gorgone-core + gorgone-proxy + gorgone-action + Gorgoned + Agent + + + + + + + + + + + + Flux chiffrés + gorgone-pull + Agent + + diff --git a/gorgone/gorgone/class/clientzmq.pm b/gorgone/gorgone/class/clientzmq.pm new file mode 100644 index 00000000000..9c34f5bed97 --- /dev/null +++ b/gorgone/gorgone/class/clientzmq.pm @@ -0,0 +1,464 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::clientzmq; + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::misc; +use Crypt::Mode::CBC; +use MIME::Base64; +use Scalar::Util; +use ZMQ::FFI qw(ZMQ_DONTWAIT); +use EV; + +my $connectors = {}; +my $callbacks = {}; +my $sockets = {}; + +sub new { + my ($class, %options) = @_; + my $connector = {}; + $connector->{context} = $options{context}; + $connector->{logger} = $options{logger}; + $connector->{identity} = $options{identity}; + $connector->{extra_identity} = gorgone::standard::library::generate_token(length => 12); + $connector->{core_loop} = $options{core_loop}; + + $connector->{verbose_last_message} = ''; + $connector->{config_core} = $options{config_core}; + + if (defined($connector->{config_core}) && defined($connector->{config_core}->{fingerprint_mgr}->{package})) { + my ($code, $class_mgr) = gorgone::standard::misc::mymodule_load( + logger => $connector->{logger}, + module => $connector->{config_core}->{fingerprint_mgr}->{package}, + error_msg => "Cannot load module $connector->{config_core}->{fingerprint_mgr}->{package}" + ); + if ($code == 0) { + $connector->{fingerprint_mgr} = $class_mgr->new( + logger => $connector->{logger}, + config => $connector->{config_core}->{fingerprint_mgr}, + config_core => $connector->{config_core} + ); + } + } + + if (defined($options{server_pubkey}) && $options{server_pubkey} ne '') { + (undef, $connector->{server_pubkey}) = gorgone::standard::library::loadpubkey( + pubkey => $options{server_pubkey}, + logger => $options{logger} + ); + } + (undef, $connector->{client_pubkey}) = gorgone::standard::library::loadpubkey( + pubkey => $options{client_pubkey}, + logger => $options{logger} + ); + (undef, $connector->{client_privkey}) = gorgone::standard::library::loadprivkey( + privkey => $options{client_privkey}, + logger => $options{logger} + ); + $connector->{target_type} = $options{target_type}; + $connector->{target_path} = $options{target_path}; + $connector->{ping} = defined($options{ping}) ? $options{ping} : -1; + $connector->{ping_timeout} = defined($options{ping_timeout}) ? $options{ping_timeout} : 30; + $connector->{ping_progress} = 0; + $connector->{ping_time} = time(); + $connector->{ping_timeout_time} = time(); + + if (defined($connector->{logger}) && $connector->{logger}->is_debug()) { + $connector->{logger}->writeLogDebug('[core] JWK thumbprint = ' . $connector->{client_pubkey}->export_key_jwk_thumbprint('SHA256')); + } + + $connectors->{ $options{identity} } = $connector; + bless $connector, $class; + return $connector; +} + +sub init { + my ($self, %options) = @_; + + $self->{handshake} = 0; + delete $self->{server_pubkey}; + $sockets->{ $self->{identity} } = gorgone::standard::library::connect_com( + context => $self->{context}, + zmq_type => 'ZMQ_DEALER', + name => $self->{identity} . '-' . $self->{extra_identity}, + logger => $self->{logger}, + type => $self->{target_type}, + path => $self->{target_path}, + zmq_ipv6 => $self->{config_core}->{ipv6} + ); + $callbacks->{ $self->{identity} } = $options{callback} if (defined($options{callback})); +} + +sub cleanup { + my ($self, %options) = @_; + + delete $callbacks->{ $self->{identity} }; + delete $connectors->{ $self->{identity} }; + delete $sockets->{ $self->{identity} }; +} + +sub close { + my ($self, %options) = @_; + + $sockets->{ $self->{identity} }->close() if (defined($sockets->{ $self->{identity} })); + $self->{core_watcher}->stop() if (defined($self->{core_watcher})); + delete $self->{core_watcher}; +} + +sub get_connect_identity { + my ($self, %options) = @_; + + return $self->{identity} . '-' . $self->{extra_identity}; +} + +sub get_server_pubkey { + my ($self, %options) = @_; + + $sockets->{ $self->{identity} }->send('[GETPUBKEY]', ZMQ_DONTWAIT); + $self->event(identity => $self->{identity}); + + my $w1 = $self->{connect_loop}->io( + $sockets->{ $self->{identity} }->get_fd(), + EV::READ, + sub { + $self->event(identity => $self->{identity}); + } + ); + my $w2 = $self->{connect_loop}->timer( + 10, + 0, + sub {} + ); + $self->{connect_loop}->run(EV::RUN_ONCE); +} + +sub read_key_protocol { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[clientzmq] ' . $self->{identity} . ' - read key protocol: ' . $options{text}); + + return (-1, 'Wrong protocol') if ($options{text} !~ /^\[KEY\]\s+(.*)$/); + + my $data = gorgone::standard::library::json_decode(module => 'clientzmq', data => $1, logger => $self->{logger}); + return (-1, 'Wrong protocol') if (!defined($data)); + + return (-1, 'Wrong protocol') if ( + !defined($data->{hostname}) || + !defined($data->{key}) || $data->{key} eq '' || + !defined($data->{cipher}) || $data->{cipher} eq '' || + !defined($data->{iv}) || $data->{iv} eq '' || + !defined($data->{padding}) || $data->{padding} eq '' + ); + + $self->{key} = pack('H*', $data->{key}); + $self->{iv} = pack('H*', $data->{iv}); + $self->{cipher} = $data->{cipher}; + $self->{padding} = $data->{padding}; + + $self->{crypt_mode} = Crypt::Mode::CBC->new( + $self->{cipher}, + $self->{padding} + ); + + return (0, 'ok'); +} + +sub decrypt_message { + my ($self, %options) = @_; + + my $plaintext; + eval { + $plaintext = $self->{crypt_mode}->decrypt( + MIME::Base64::decode_base64($options{message}), + $self->{key}, + $self->{iv} + ); + }; + if ($@) { + $self->{logger}->writeLogError("[clientzmq] $self->{identity} - decrypt message issue: " . $@); + return (-1, $@); + } + return (0, $plaintext); +} + +sub client_get_secret { + my ($self, %options) = @_; + + # there is an issue + if ($options{message} =~ /^\[ACK\]/) { + return (-1, "issue: $options{message}"); + } + + my $plaintext; + eval { + my $cryptedtext = MIME::Base64::decode_base64($options{message}); + $plaintext = $self->{client_privkey}->decrypt($cryptedtext, 'v1.5'); + }; + if ($@) { + return (-1, "Decoding issue: $@"); + } + + return $self->read_key_protocol(text => $plaintext); +} + +sub check_server_pubkey { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - get_server_pubkey check [1]"); + + if ($options{message} !~ /^\s*\[PUBKEY\]\s+\[(.*?)\]/) { + $self->{logger}->writeLogError('[clientzmq] ' . $self->{identity} . ' - cannot read pubbkey response from server: ' . $options{message}) if (defined($self->{logger})); + $self->{verbose_last_message} = 'cannot read pubkey response from server'; + return 0; + } + + my ($code, $verbose_message); + my $server_pubkey_str = MIME::Base64::decode_base64($1); + ($code, $self->{server_pubkey}) = gorgone::standard::library::loadpubkey( + pubkey_str => $server_pubkey_str, + logger => $self->{logger}, + noquit => 1 + ); + + if ($code == 0) { + $self->{logger}->writeLogError('[clientzmq] ' . $self->{identity} . ' cannot load pubbkey') if (defined($self->{logger})); + $self->{verbose_last_message} = 'cannot load pubkey'; + return 0; + } + + # if not set, we are in 'always' mode + if (defined($self->{fingerprint_mgr})) { + my $thumbprint = $self->{server_pubkey}->export_key_jwk_thumbprint('SHA256'); + ($code, $verbose_message) = $self->{fingerprint_mgr}->check_fingerprint( + target => $self->{target_type} . '://' . $self->{target_path}, + fingerprint => $thumbprint + ); + if ($code == 0) { + $self->{logger}->writeLogError($verbose_message) if (defined($self->{logger})); + $self->{verbose_last_message} = $verbose_message; + return 0; + } + } + + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - get_server_pubkey ok [1]"); + + return 1; +} + +sub is_connected { + my ($self, %options) = @_; + + # Should be connected (not 100% sure) + if ($self->{handshake} == 2) { + return (0, $self->{ping_time}); + } + return -1; +} + +sub ping { + my ($self, %options) = @_; + my $status = 0; + + if ($self->{ping} > 0 && $self->{ping_progress} == 0 && + time() - $self->{ping_time} > $self->{ping}) { + $self->{ping_progress} = 1; + $self->{ping_timeout_time} = time(); + my $action = defined($options{action}) ? $options{action} : 'PING'; + $self->send_message(action => $action, data => $options{data}, json_encode => $options{json_encode}); + $status = 1; + } + + if ($self->{ping_progress} == 1 && + time() - $self->{ping_timeout_time} > $self->{ping_timeout}) { + $self->{logger}->writeLogError("[clientzmq] No ping response") if (defined($self->{logger})); + $self->{ping_progress} = 0; + $self->close(); + # new identity for a new handshake (for module pull) + $self->{extra_identity} = gorgone::standard::library::generate_token(length => 12); + $self->init(); + $status = 1; + } + + return $status; +} + +sub add_watcher { + my ($self, %options) = @_; + + $self->{core_watcher} = $self->{core_loop}->io( + $sockets->{ $self->{identity} }->get_fd(), + EV::READ, + sub { + $self->event(identity => $self->{identity}); + } + ); +} + +sub event { + my ($self, %options) = @_; + + $connectors->{ $options{identity} }->{ping_time} = time(); + while ($sockets->{ $options{identity} }->has_pollin()) { + my ($rv, $message) = gorgone::standard::library::zmq_dealer_read_message(socket => $sockets->{ $options{identity} }); + next if ($connectors->{ $options{identity} }->{handshake} == -1); + next if ($rv); + + # We have a response. So it's ok :) + if ($connectors->{ $options{identity} }->{ping_progress} == 1) { + $connectors->{ $options{identity} }->{ping_progress} = 0; + } + + # in progress + if ($connectors->{ $options{identity} }->{handshake} == 0) { + $connectors->{ $options{identity} }->{handshake} = 1; + if ($connectors->{ $options{identity} }->check_server_pubkey(message => $message) == 0) { + $connectors->{ $options{identity} }->{handshake} = -1; + + } + } elsif ($connectors->{ $options{identity} }->{handshake} == 1) { + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - client_get_secret recv [3]"); + my ($status, $verbose, $symkey, $hostname) = $connectors->{ $options{identity} }->client_get_secret( + message => $message + ); + if ($status == -1) { + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - client_get_secret $verbose [3]"); + $connectors->{ $options{identity} }->{handshake} = -1; + $connectors->{ $options{identity} }->{verbose_last_message} = $verbose; + next; + } + $connectors->{ $options{identity} }->{handshake} = 2; + if (defined($connectors->{ $options{identity} }->{logger})) { + $connectors->{ $options{identity} }->{logger}->writeLogInfo( + "[clientzmq] $self->{identity} - Client connected successfully to '" . $connectors->{ $options{identity} }->{target_type} . + "://" . $connectors->{ $options{identity} }->{target_path} . "'" + ); + $self->add_watcher(); + } + } else { + my ($rv, $data) = $connectors->{ $options{identity} }->decrypt_message(message => $message); + + if ($rv == -1 || $data !~ /^\[([a-zA-Z0-9:\-_]+?)\]\s+/) { + $connectors->{ $options{identity} }->{handshake} = -1; + $connectors->{ $options{identity} }->{verbose_last_message} = 'decrypt issue: ' . $data; + next; + } + + if ($1 eq 'KEY') { + ($rv) = $connectors->{ $options{identity} }->read_key_protocol(text => $data); + } elsif (defined($callbacks->{$options{identity}})) { + $callbacks->{$options{identity}}->(identity => $options{identity}, data => $data); + } + } + } +} + +sub zmq_send_message { + my ($self, %options) = @_; + + my $message = $options{message}; + if (!defined($message)) { + $message = gorgone::standard::library::build_protocol(%options); + } + + eval { + $message = $self->{crypt_mode}->encrypt( + $message, + $self->{key}, + $self->{iv} + ); + $message = MIME::Base64::encode_base64($message, ''); + }; + if ($@) { + $self->{logger}->writeLogError("[clientzmq] encrypt message issue: " . $@); + return undef; + } + + $options{socket}->send($message, ZMQ_DONTWAIT); + $self->event(identity => $self->{identity}); +} + +sub send_message { + my ($self, %options) = @_; + + if ($self->{handshake} == 0) { + $self->{connect_loop} = EV::Loop->new(); + + if (!defined($self->{server_pubkey})) { + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - get_server_pubkey sent [1]"); + $self->get_server_pubkey(); + } else { + $self->{handshake} = 1; + } + } + + if ($self->{handshake} == 1) { + my ($status, $ciphertext) = gorgone::standard::library::client_helo_encrypt( + identity => $self->{identity}, + server_pubkey => $self->{server_pubkey}, + client_pubkey => $self->{client_pubkey}, + ); + if ($status == -1) { + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - client_helo crypt handshake issue [2]"); + $self->{verbose_last_message} = 'crypt handshake issue'; + return (-1, $self->{verbose_last_message}); + } + + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - client_helo sent [2]"); + + $self->{verbose_last_message} = 'Handshake timeout'; + $sockets->{ $self->{identity} }->send($ciphertext, ZMQ_DONTWAIT); + $self->event(identity => $self->{identity}); + + my $w1 = $self->{connect_loop}->io( + $sockets->{ $self->{identity} }->get_fd(), + EV::READ, + sub { + $self->event(identity => $self->{identity}); + } + ); + my $w2 = $self->{connect_loop}->timer( + 10, + 0, + sub {} + ); + $self->{connect_loop}->run(EV::RUN_ONCE); + } + + if (defined($self->{connect_loop})) { + delete $self->{connect_loop}; + } + + if ($self->{handshake} < 2) { + $self->{handshake} = 0; + return (-1, $self->{verbose_last_message}); + } + + $self->zmq_send_message( + socket => $sockets->{ $self->{identity} }, + %options + ); + + return 0; +} + +1; diff --git a/gorgone/gorgone/class/core.pm b/gorgone/gorgone/class/core.pm new file mode 100644 index 00000000000..b432de30721 --- /dev/null +++ b/gorgone/gorgone/class/core.pm @@ -0,0 +1,1326 @@ +# +# Copyright 2023 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::core; + +use strict; +use warnings; +use POSIX ":sys_wait_h"; +use MIME::Base64; +use Crypt::Mode::CBC; +use ZMQ::FFI qw(ZMQ_DONTWAIT ZMQ_SNDMORE); +use EV; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use gorgone::class::db; +use gorgone::class::listener; +use gorgone::class::frame; +use Time::HiRes; +use Try::Tiny; + +my ($gorgone); + +use base qw(gorgone::class::script); + +my $VERSION = '23.10.0'; +my %handlers = (TERM => {}, HUP => {}, CHLD => {}, DIE => {}); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'gorgoned', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 1 + ); + + bless $self, $class; + + $self->{return_child} = {}; + $self->{stop} = 0; + $self->{internal_register} = {}; + $self->{modules_register} = {}; + $self->{modules_events} = {}; + $self->{modules_id} = {}; + $self->{purge_timer} = time(); + $self->{history_timer} = time(); + $self->{sigterm_start_time} = undef; + $self->{sigterm_last_time} = undef; + $self->{server_privkey} = undef; + $self->{register_parent_nodes} = {}; + $self->{counters} = { total => 0, internal => { total => 0 }, external => { total => 0 }, proxy => { total => 0 } }; + $self->{api_endpoints} = { + 'GET_/internal/thumbprint' => 'GETTHUMBPRINT', + 'GET_/internal/constatus' => 'CONSTATUS', + 'GET_/internal/information' => 'INFORMATION', + 'POST_/internal/logger' => 'BCASTLOGGER', + }; + + return $self; +} + +sub get_version { + my ($self, %options) = @_; + + return $VERSION; +} + +sub init_server_keys { + my ($self, %options) = @_; + + my ($code, $content_privkey, $content_pubkey); + $self->{logger}->writeLogInfo("[core] Initialize server keys"); + + $self->{keys_loaded} = 0; + $self->{config} = { configuration => {} } if (!defined($self->{config}->{configuration})); + $self->{config}->{configuration} = { gorgone => {} } if (!defined($self->{config}->{configuration}->{gorgone})); + $self->{config}->{configuration}->{gorgone}->{gorgonecore} = {} if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore})); + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey} = '/var/lib/centreon-gorgone/.keys/rsakey.priv.pem' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey} = '/var/lib/centreon-gorgone/.keys/rsakey.pub.pem' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey} eq ''); + + if (! -f $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey} && ! -f $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}) { + ($code, $content_privkey, $content_pubkey) = gorgone::standard::library::generate_keys(logger => $self->{logger}); + return if ($code == 0); + $code = gorgone::standard::misc::write_file( + logger => $self->{logger}, + filename => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}, + content => $content_privkey, + ); + return if ($code == 0); + $self->{logger}->writeLogInfo("[core] Private key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}' written"); + + $code = gorgone::standard::misc::write_file( + logger => $self->{logger}, + filename => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}, + content => $content_pubkey, + ); + return if ($code == 0); + $self->{logger}->writeLogInfo("[core] Public key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}' written"); + } + + my $rv = chmod(0600, $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}); + if ($rv == 0) { + $self->{logger}->writeLogInfo("[core] chmod private key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}': $!"); + } + $rv = chmod(0640, $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}); + if ($rv == 0) { + $self->{logger}->writeLogInfo("[core] chmod public key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}': $!"); + } + + ($code, $self->{server_privkey}) = gorgone::standard::library::loadprivkey( + logger => $self->{logger}, + privkey => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}, + noquit => 1 + ); + return if ($code == 0); + $self->{logger}->writeLogInfo("[core] Private key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}' loaded"); + + ($code, $self->{server_pubkey}) = gorgone::standard::library::loadpubkey( + logger => $self->{logger}, + pubkey => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}, + noquit => 1 + ); + return if ($code == 0); + $self->{logger}->writeLogInfo("[core] Public key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}' loaded"); + + $self->{keys_loaded} = 1; +} + +sub init { + my ($self) = @_; + $self->SUPER::init(); + + # redefine to avoid out when we try modules + $SIG{__DIE__} = undef; + + ## load config + if (!defined($self->{config_file})) { + $self->{logger}->writeLogError('[core] please define config file option'); + exit(1); + } + if (! -f $self->{config_file}) { + $self->{logger}->writeLogError("[core] can't find config file '$self->{config_file}'"); + exit(1); + } + $self->{config} = $self->yaml_load_config( + file => $self->{config_file}, + filter => '!($ariane eq "configuration##" || $ariane =~ /^configuration##(?:gorgone|centreon)##/)' + ); + $self->init_server_keys(); + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_zmq_tcp_keepalive} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_zmq_tcp_keepalive}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_zmq_tcp_keepalive} =~ /^(0|1)$/ ? $1 : 1; + + my $time_hi = Time::HiRes::time(); + $time_hi =~ s/\.//; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_type} = 'ipc' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_type}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_type} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_path} = '/tmp/gorgone/routing-' . $time_hi . '.ipc' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_path}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_path} eq ''); + + if (defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_crypt}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_crypt} =~ /^(?:false|0)$/i) { + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_crypt} = 0; + } else { + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_crypt} = 1; + } + + $self->{internal_crypt} = { enabled => 0 }; + if ($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_crypt} == 1) { + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_cipher} = 'AES' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_cipher}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_cipher} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_padding} = 1 # PKCS5 padding + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_padding}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_padding} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_keysize} = 32 + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_keysize}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_keysize} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_rotation} = 1440 # minutes + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_rotation}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_rotation} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_rotation} *= 60; + + $self->{cipher} = Crypt::Mode::CBC->new( + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_cipher}, + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_padding} + ); + + my ($rv, $symkey, $iv); + ($rv, $symkey) = gorgone::standard::library::generate_symkey( + keysize => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_keysize} + ); + ($rv, $iv) = gorgone::standard::library::generate_symkey( + keysize => 16 + ); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key_ctime} = time(); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key} = $symkey; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_oldkey} = undef; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_identity_keys} = {}; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_iv} = $iv; + + $self->{internal_crypt} = { + enabled => 1, + cipher => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_cipher}, + padding => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_padding}, + iv => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_iv} + }; + } + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{timeout} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{timeout}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{timeout} =~ /(\d+)/ ? $1 : 50; + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher} = 'AES' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding} = 1 # PKCS5 padding + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_keysize} = 32 + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_keysize}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_keysize} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_rotation} = 1440 # minutes + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_rotation}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_rotation} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_rotation} *= 60; + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode} =~ /^\s*(always|firt|strict)\s*/i ? lc($1) : 'first'; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mgr} = {} if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mgr})); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mgr}->{package} = 'gorgone::class::fingerprint::backend::sql' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mgr}->{package}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mgr}->{package} eq ''); + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode} =~ /^\s*(always|firt|strict)\s*/i ? lc($1) : 'first'; + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_type} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_type}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_type} ne '' ? $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_type} : 'SQLite'; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_name} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_name}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_name} ne '' ? $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_name} : 'dbname=/var/lib/centreon-gorgone/history.sdb'; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_autocreate_schema} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_autocreate_schema}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_autocreate_schema} =~ /(\d+)/ ? $1 : 1; + gorgone::standard::library::init_database( + gorgone => $gorgone, + version => $self->get_version(), + type => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_type}, + db => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_name}, + host => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_host}, + port => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_port}, + user => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_user}, + password => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_password}, + autocreate_schema => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_autocreate_schema}, + force => 2, + logger => $gorgone->{logger} + ); + + $self->{hostname} = $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{hostname}; + if (!defined($self->{hostname}) || $self->{hostname} eq '') { + my ($sysname, $nodename, $release, $version, $machine) = POSIX::uname(); + $self->{hostname} = $sysname; + } + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} = + (defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} ne '') ? $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} : 'proxy'; + $self->{id} = $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{id}; + + $self->load_modules(); + + $self->set_signal_handlers(); +} + +sub init_external_informations { + my ($self) = @_; + + my ($status, $sth) = $self->{db_gorgone}->query({ + query => "SELECT `identity`, `ctime`, `mtime`, `key`, `oldkey`, `iv`, `oldiv` FROM gorgone_identity ORDER BY id DESC" + }); + if ($status == -1) { + $self->{logger}->writeLogError("[core] cannot load gorgone_identity"); + return 0; + } + + $self->{identity_infos} = {}; + while (my $row = $sth->fetchrow_arrayref()) { + next if (!defined($row->[3]) || !defined($row->[2])); + + if (!defined($self->{identity_infos}->{ $row->[0] })) { + $self->{identity_infos}->{ $row->[0] } = { + ctime => $row->[1], + mtime => $row->[2], + key => pack('H*', $row->[3]), + oldkey => defined($row->[4]) ? pack('H*', $row->[4]) : undef, + iv => pack('H*', $row->[5]), + oldiv => defined($row->[6]) ? pack('H*', $row->[6]) : undef + }; + } + } + + $self->{external_crypt_mode} = Crypt::Mode::CBC->new( + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher}, + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding} + ); +} + +sub set_signal_handlers { + my ($self) = @_; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; + $SIG{CHLD} = \&class_handle_CHLD; + $handlers{CHLD}->{$self} = sub { $self->handle_CHLD() }; + $SIG{__DIE__} = \&class_handle_DIE; + $handlers{DIE}->{$self} = sub { $self->handle_DIE($_[0]) }; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub class_handle_CHLD { + foreach (keys %{$handlers{CHLD}}) { + &{$handlers{CHLD}->{$_}}(); + } +} + +sub class_handle_DIE { + my ($msg) = @_; + + foreach (keys %{$handlers{DIE}}) { + &{$handlers{DIE}->{$_}}($msg); + } +} + +sub handle_TERM { + my ($self) = @_; + $self->{logger}->writeLogInfo("[core] $$ Receiving order to stop..."); + + $self->{stop} = 1; +} + +sub handle_HUP { + my $self = shift; + $self->{logger}->writeLogInfo("[core] $$ Receiving order to reload..."); + # TODO +} + +sub handle_CHLD { + my $self = shift; + my $child_pid; + + while (($child_pid = waitpid(-1, &WNOHANG)) > 0) { + $self->{logger}->writeLogDebug("[core] Received SIGCLD signal (pid: $child_pid)"); + $self->{return_child}->{$child_pid} = time(); + } + + $SIG{CHLD} = \&class_handle_CHLD; +} + +sub handle_DIE { + my $self = shift; + my $msg = shift; + + $self->{logger}->writeLogError("[core] Receiving DIE: $msg"); +} + +sub unload_module { + my ($self, %options) = @_; + + foreach my $event (keys %{$self->{modules_events}}) { + if ($self->{modules_events}->{$event}->{module}->{package} eq $options{package}) { + delete $self->{modules_events}->{$event}; + } + } + + delete $self->{modules_register}->{ $options{package} }; + foreach (keys %{$self->{modules_id}}) { + if ($self->{modules_id}->{$_} eq $options{package}) { + delete $self->{modules_id}->{$_}; + last; + } + } + $self->{logger}->writeLogInfo("[core] Module '" . $options{package} . "' is unloaded"); +} + +sub load_module { + my ($self, %options) = @_; + + if (!defined($options{config_module}->{name}) || $options{config_module}->{name} eq '') { + $self->{logger}->writeLogError('[core] No module name'); + return 0; + } + if (!defined($options{config_module}->{package}) || $options{config_module}->{package} eq '') { + $self->{logger}->writeLogError('[core] No package name'); + return 0; + } + if (defined($self->{modules_register}->{ $options{config_module}->{package} })) { + $self->{logger}->writeLogError("[core] Package '$options{config_module}->{package}' already loaded"); + return 0; + } + + return 0 if (!defined($options{config_module}->{enable}) || $options{config_module}->{enable} eq 'false'); + $self->{logger}->writeLogInfo("[core] Module '" . $options{config_module}->{name} . "' is loading"); + + my $package = $options{config_module}->{package}; + (my $file = "$package.pm") =~ s{::}{/}g; + eval { + local $SIG{__DIE__} = 'IGNORE'; + require $file; + }; + if ($@) { + $self->{logger}->writeLogInfo("[core] Module '" . $options{config_module}->{name} . "' cannot be loaded: " . $@); + return 0; + } + $self->{modules_register}->{$package} = {}; + + foreach my $method_name (('register', 'routing', 'kill', 'kill_internal', 'gently', 'check', 'init', 'broadcast')) { + unless ($self->{modules_register}->{$package}->{$method_name} = $package->can($method_name)) { + delete $self->{modules_register}->{$package}; + $self->{logger}->writeLogError("[core] No function '$method_name' for module '" . $options{config_module}->{name} . "'"); + return 0; + } + } + + my ($loaded, $namespace, $name, $events) = $self->{modules_register}->{$package}->{register}->( + config => $options{config_module}, + config_core => $self->{config}->{configuration}->{gorgone}, + config_db_centreon => $self->{config}->{configuration}->{centreon}->{database}->{db_configuration}, + config_db_centstorage => $self->{config}->{configuration}->{centreon}->{database}->{db_realtime}, + logger => $self->{logger} + ); + if ($loaded == 0) { + delete $self->{modules_register}->{$package}; + $self->{logger}->writeLogError("[core] Module '" . $options{config_module}->{name} . "' cannot be loaded"); + return 0; + } + + $self->{modules_id}->{$name} = $package; + + foreach my $event (@$events) { + $self->{modules_events}->{$event->{event}} = { + module => { + namespace => $namespace, + name => $name, + package => $package + } + }; + $self->{api_endpoints}->{$event->{method} . '_/' . $namespace . '/' . $name . $event->{uri}} = $event->{event} if defined($event->{uri}); + } + + $self->{logger}->writeLogInfo("[core] Module '" . $options{config_module}->{name} . "' is loaded"); + return 1; +} + +sub load_modules { + my ($self) = @_; + return if (!defined($self->{config}->{configuration}->{gorgone}->{modules})); + + foreach my $module (@{$self->{config}->{configuration}->{gorgone}->{modules}}) { + $self->load_module(config_module => $module); + } + + # force to load module dbclean + $self->load_module(config_module => { name => 'dbcleaner', package => 'gorgone::modules::core::dbcleaner::hooks', enable => 'true' }); + + # Load internal functions + foreach my $method_name (('addlistener', 'putlog', 'getlog', 'kill', 'ping', + 'getthumbprint', 'constatus', 'setcoreid', 'synclogs', 'loadmodule', 'unloadmodule', 'information', 'setmodulekey')) { + unless ($self->{internal_register}->{$method_name} = gorgone::standard::library->can($method_name)) { + $self->{logger}->writeLogError("[core] No function '$method_name'"); + exit(1); + } + } +} + +sub broadcast_core_key { + my ($self, %options) = @_; + + my ($rv, $key) = gorgone::standard::library::generate_symkey( + keysize => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_keysize} + ); + + my $message = '[BCASTCOREKEY] [] [] { "key": "' . unpack('H*', $key). '"}'; + my $frame = gorgone::class::frame->new(); + $frame->setFrame(\$message); + + $self->message_run( + { + frame => $frame, + router_type => 'internal' + } + ); +} + +sub decrypt_internal_message { + my ($self, %options) = @_; + + if ($self->{internal_crypt}->{enabled} == 1) { + my $id = pack('H*', $options{identity}); + my $keys; + if (defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_identity_keys}->{$id})) { + $keys = [ $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_identity_keys}->{$id}->{key} ]; + } else { + $keys = [ $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key} ]; + push @$keys, $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_oldkey} + if (defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_oldkey})); + } + foreach my $key (@$keys) { + if ($options{frame}->decrypt({ cipher => $self->{cipher}, key => $key, iv => $self->{internal_crypt}->{iv} }) == 0) { + return 0; + } + } + + $self->{logger}->writeLogError("[core] decrypt issue ($id): " . $options{frame}->getLastError()); + return 1; + } + + return 0; +} + +sub send_internal_response { + my ($self, %options) = @_; + + my $response_type = defined($options{response_type}) ? $options{response_type} : 'ACK'; + my $data = gorgone::standard::library::json_encode(data => { code => $options{code}, data => $options{data} }); + # We add 'target' for 'PONG', 'SYNCLOGS'. Like that 'gorgone-proxy can get it + my $message = '[' . $response_type . '] [' . (defined($options{token}) ? $options{token} : '') . '] ' . ($response_type =~ /^PONG|SYNCLOGS$/ ? '[] ' : '') . $data; + + if ($self->{internal_crypt}->{enabled} == 1) { + try { + $message = $self->{cipher}->encrypt( + $message, + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key}, + $self->{internal_crypt}->{iv} + ); + } catch { + $self->{logger}->writeLogError("[core] encrypt issue: $_"); + return undef; + }; + + $message = MIME::Base64::encode_base64($message, ''); + } + + $self->{internal_socket}->send(pack('H*', $options{identity}), ZMQ_DONTWAIT | ZMQ_SNDMORE); + $self->{internal_socket}->send($message, ZMQ_DONTWAIT); +} + +sub send_internal_message { + my ($self, %options) = @_; + + my $message = $options{message}; + if (!defined($message)) { + $message = gorgone::standard::library::build_protocol(%options); + } + + if ($self->{internal_crypt}->{enabled} == 1) { + try { + $message = $self->{cipher}->encrypt( + $message, + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key}, + $self->{internal_crypt}->{iv} + ); + } catch { + $self->{logger}->writeLogError("[core] encrypt issue: $_"); + return undef; + }; + + $message = MIME::Base64::encode_base64($message, ''); + } + + $self->{internal_socket}->send($options{identity}, ZMQ_DONTWAIT | ZMQ_SNDMORE); + $self->{internal_socket}->send($message, ZMQ_DONTWAIT); +} + +sub broadcast_run { + my ($self, %options) = @_; + + my $data = $options{frame}->decodeData(); + return if (!defined($data)); + + if ($options{action} eq 'BCASTLOGGER') { + if (defined($data->{content}->{severity}) && $data->{content}->{severity} ne '') { + if ($data->{content}->{severity} eq 'default') { + $self->{logger}->set_default_severity(); + } else { + $self->{logger}->severity($data->{content}->{severity}); + } + } + } + + foreach (keys %{$self->{modules_register}}) { + $self->{modules_register}->{$_}->{broadcast}->( + gorgone => $self, + dbh => $self->{db_gorgone}, + action => $options{action}, + logger => $self->{logger}, + frame => $options{frame}, + token => $options{token} + ); + } + + if ($options{action} eq 'BCASTCOREKEY') { + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key_ctime} = time(); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_oldkey} = $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key}; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key} = pack('H*', $data->{key}); + } +} + +sub message_run { + my ($self, $options) = (shift, shift); + + if ($self->{logger}->is_debug()) { + my $frame_ref = $options->{frame}->getFrame(); + $self->{logger}->writeLogDebug('[core] Message received ' . $options->{router_type} . ' - ' . $$frame_ref); + } + if ($options->{frame}->parse({ releaseFrame => 1 }) != 0) { + return (undef, 1, { message => 'request not well formatted' }); + } + my ($action, $token, $target) = ($options->{frame}->getAction(), $options->{frame}->getToken(), $options->{frame}->getTarget()); + + # Check if not myself ;) + if (defined($target) && ($target eq '' || (defined($self->{id}) && $target eq $self->{id}))) { + $target = undef; + } + + if (!defined($token) || $token eq '') { + $token = gorgone::standard::library::generate_token(); + } + + if ($action !~ /^(?:ADDLISTENER|PUTLOG|GETLOG|KILL|PING|CONSTATUS|SETCOREID|SETMODULEKEY|SYNCLOGS|LOADMODULE|UNLOADMODULE|INFORMATION|GETTHUMBPRINT|BCAST.*)$/ && + !defined($target) && !defined($self->{modules_events}->{$action})) { + gorgone::standard::library::add_history({ + dbh => $self->{db_gorgone}, + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { error => "unknown_action", message => "action '$action' is unknown" }, + json_encode => 1 + }); + return (undef, 1, { error => "unknown_action", message => "action '$action' is unknown" }); + } + + $self->{counters}->{ $options->{router_type} }->{lc($action)} = 0 if (!defined($self->{counters}->{ $options->{router_type} }->{lc($action)})); + $self->{counters}->{ $options->{router_type} }->{lc($action)}++; + $self->{counters}->{total}++; + $self->{counters}->{ $options->{router_type} }->{total}++; + + if ($self->{stop} == 1) { + gorgone::standard::library::add_history({ + dbh => $self->{db_gorgone}, + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { message => 'gorgone is stopping/restarting. Cannot proceed request.' }, + json_encode => 1 + }); + return ($token, 1, { message => 'gorgone is stopping/restarting. Cannot proceed request.' }); + } + + # Check Routing + if (defined($target)) { + if (!defined($self->{modules_id}->{ $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} }) || + !defined($self->{modules_register}->{ $self->{modules_id}->{ $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} } })) { + gorgone::standard::library::add_history({ + dbh => $self->{db_gorgone}, + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { error => "no_proxy", message => 'no proxy configured. cannot manage target.' }, + json_encode => 1 + }); + return ($token, 1, { error => "no_proxy", message => 'no proxy configured. cannot manage target.' }); + } + + $self->{counters}->{proxy}->{lc($action)} = 0 if (!defined($self->{counters}->{proxy}->{lc($action)})); + $self->{counters}->{proxy}->{lc($action)}++; + $self->{counters}->{proxy}->{total}++; + + $self->{modules_register}->{ $self->{modules_id}->{ $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} } }->{routing}->( + gorgone => $self, + dbh => $self->{db_gorgone}, + logger => $self->{logger}, + action => $action, + token => $token, + target => $target, + frame => $options->{frame}, + hostname => $self->{hostname} + ); + return ($token, 0); + } + + if ($action =~ /^(?:ADDLISTENER|PUTLOG|GETLOG|KILL|PING|CONSTATUS|SETCOREID|SETMODULEKEY|SYNCLOGS|LOADMODULE|UNLOADMODULE|INFORMATION|GETTHUMBPRINT)$/) { + my ($code, $response, $response_type) = $self->{internal_register}->{lc($action)}->( + gorgone => $self, + gorgone_config => $self->{config}->{configuration}->{gorgone}, + identity => $options->{identity}, + router_type => $options->{router_type}, + id => $self->{id}, + frame => $options->{frame}, + token => $token, + logger => $self->{logger} + ); + + if ($action =~ /^(?:CONSTATUS|INFORMATION|GETTHUMBPRINT)$/) { + gorgone::standard::library::add_history({ + dbh => $self->{db_gorgone}, + code => $code, + token => $token, + data => $response, + json_encode => 1 + }); + } + + return ($token, $code, $response, $response_type); + } elsif ($action =~ /^BCAST(.*)$/) { + return (undef, 1, { message => "action '$action' is not known" }) if ($1 !~ /^(?:LOGGER|COREKEY)$/); + $self->broadcast_run( + action => $action, + frame => $options->{frame}, + token => $token + ); + } else { + $self->{modules_register}->{ $self->{modules_events}->{$action}->{module}->{package} }->{routing}->( + gorgone => $self, + dbh => $self->{db_gorgone}, + logger => $self->{logger}, + action => $action, + token => $token, + target => $target, + frame => $options->{frame}, + hostname => $self->{hostname} + ); + } + + return ($token, 0); +} + +sub router_internal_event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($identity, $frame) = gorgone::standard::library::zmq_read_message( + socket => $self->{internal_socket}, + logger => $self->{logger} + ); + + next if (!defined($identity)); + + next if ($self->decrypt_internal_message(identity => $identity, frame => $frame)); + + my ($token, $code, $response, $response_type) = $self->message_run( + { + frame => $frame, + identity => $identity, + router_type => 'internal' + } + ); + + $self->send_internal_response( + identity => $identity, + response_type => $response_type, + data => $response, + code => $code, + token => $token + ); + } +} + +sub is_handshake_done { + my ($self, %options) = @_; + + if (defined($self->{identity_infos}->{ $options{identity} })) { + return (1, $self->{identity_infos}->{ $options{identity} }); + } + + return 0; +} + +sub check_external_rotate_keys { + my ($self, %options) = @_; + + my $time = time(); + my ($rv, $key, $iv); + foreach my $id (keys %{$self->{identity_infos}}) { + if ($self->{identity_infos}->{$id}->{mtime} < ($time - 86400)) { + $self->{logger}->writeLogDebug('[core] clean external key for ' . $id); + delete $self->{identity_infos}->{$id}; + next; + } + next if ($self->{identity_infos}->{$id}->{ctime} > ($time - $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_rotation})); + + $self->{logger}->writeLogDebug('[core] rotate external key for ' . pack('H*', $id)); + + ($rv, $key) = gorgone::standard::library::generate_symkey( + keysize => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_keysize} + ); + ($rv, $iv) = gorgone::standard::library::generate_symkey(keysize => 16); + $rv = gorgone::standard::library::update_identity_attrs( + dbh => $self->{db_gorgone}, + identity => $id, + ctime => $time, + oldkey => unpack('H*', $self->{identity_infos}->{$id}->{key}), + oldiv => unpack('H*', $self->{identity_infos}->{$id}->{iv}), + key => unpack('H*', $key), + iv => unpack('H*', $iv) + ); + next if ($rv == -1); + + my $message = gorgone::standard::library::json_encode( + data => { + hostname => $self->{hostname}, + cipher => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher}, + padding => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding}, + key => unpack('H*', $key), + iv => unpack('H*', $iv) + } + ); + + $self->external_core_response( + message => '[KEY] ' . $message, + identity => $id, + cipher_infos => { + key => $self->{identity_infos}->{$id}->{key}, + iv => $self->{identity_infos}->{$id}->{iv} + } + ); + + $self->{identity_infos}->{$id}->{ctime} = $time; + $self->{identity_infos}->{$id}->{oldkey} = $self->{identity_infos}->{$id}->{key}; + $self->{identity_infos}->{$id}->{oldiv} = $self->{identity_infos}->{$id}->{iv}; + $self->{identity_infos}->{$id}->{key} = $key; + $self->{identity_infos}->{$id}->{iv} = $iv; + } +} + +sub external_decrypt_message { + my ($self, %options) = @_; + + my $message = $options{frame}->getFrame(); + + my $crypt = MIME::Base64::decode_base64($$message); + + my $keys = [ { key => $options{cipher_infos}->{key}, iv => $options{cipher_infos}->{iv} } ]; + if (defined($options{cipher_infos}->{oldkey})) { + push @$keys, { key => $options{cipher_infos}->{oldkey}, iv => $options{cipher_infos}->{oldiv} } + } + foreach my $key (@$keys) { + my $plaintext; + try { + $plaintext = $self->{external_crypt_mode}->decrypt($crypt, $key->{key}, $key->{iv}); + }; + if (defined($plaintext) && $plaintext =~ /^\[[A-Za-z0-9_\-]+?\]/) { + $options{frame}->setFrame(\$plaintext); + return 0; + } + } + + $self->{logger}->writeLogError("[core] external decrypt issue: " . ($_ ? $_ : 'no message')); + return -1; +} + +sub external_core_response { + my ($self, %options) = @_; + + my $message = $options{message}; + if (!defined($message)) { + my $response_type = defined($options{response_type}) ? $options{response_type} : 'ACK'; + my $data = gorgone::standard::library::json_encode(data => { code => $options{code}, data => $options{data} }); + # We add 'target' for 'PONG', 'SYNCLOGS'. Like that 'gorgone-proxy can get it + $message = '[' . $response_type . '] [' . (defined($options{token}) ? $options{token} : '') . '] ' . ($response_type =~ /^PONG|SYNCLOGS$/ ? '[] ' : '') . $data; + } + + if (defined($options{cipher_infos})) { + try { + $message = $self->{external_crypt_mode}->encrypt( + $message, + $options{cipher_infos}->{key}, + $options{cipher_infos}->{iv} + ); + } catch { + $self->{logger}->writeLogError("[core] external_core_response encrypt issue: $_"); + return undef; + }; + + $message = MIME::Base64::encode_base64($message, ''); + } + + $self->{external_socket}->send(pack('H*', $options{identity}), ZMQ_DONTWAIT|ZMQ_SNDMORE); + $self->{external_socket}->send($message, ZMQ_DONTWAIT); + $self->router_external_event(); +} + +sub external_core_key_response { + my ($self, %options) = @_; + + my $data = gorgone::standard::library::json_encode( + data => { + hostname => $self->{hostname}, + cipher => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher}, + padding => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding}, + key => unpack('H*', $options{key}), + iv => unpack('H*', $options{iv}) + } + ); + return -1 if (!defined($data)); + + my $crypttext; + try { + $crypttext = $options{client_pubkey}->encrypt("[KEY] " . $data, 'v1.5'); + } catch { + $self->{logger}->writeLogError("[core] core key response encrypt issue: $_"); + return -1; + }; + + $self->{external_socket}->send(pack('H*', $options{identity}), ZMQ_DONTWAIT | ZMQ_SNDMORE); + $self->{external_socket}->send(MIME::Base64::encode_base64($crypttext, ''), ZMQ_DONTWAIT); + $self->router_external_event(); + return 0; +} + +sub handshake { + my ($self, %options) = @_; + + my ($rv, $cipher_infos); + my $first_message = $options{frame}->getFrame(); + + # Test if it asks for the pubkey + if ($$first_message =~ /^\s*\[GETPUBKEY\]/) { + gorgone::standard::library::zmq_core_pubkey_response( + socket => $self->{external_socket}, + identity => $options{identity}, + pubkey => $self->{server_pubkey} + ); + $self->router_external_event(); + return 1; + } + + ($rv, $cipher_infos) = $self->is_handshake_done(identity => $options{identity}); + + if ($rv == 1) { + my $response; + + ($rv) = $self->external_decrypt_message( + frame => $options{frame}, + cipher_infos => $cipher_infos + ); + + my $message = $options{frame}->getFrame(); + if ($rv == 0 && $$message =~ /^(?:[\[a-zA-Z-_]+?\]\s+\[.*?\]|[\[a-zA-Z-_]+?\]\s*$)/) { + $self->{identity_infos}->{ $options{identity} }->{mtime} = time(); + gorgone::standard::library::update_identity_mtime(dbh => $self->{db_gorgone}, identity => $options{identity}); + return (0, $cipher_infos); + } + + # Maybe he want to redo a handshake + $rv = 0; + } + + if ($rv == 0) { + my ($client_pubkey, $key, $iv); + + # We try to uncrypt + ($rv, $client_pubkey) = gorgone::standard::library::is_client_can_connect( + privkey => $self->{server_privkey}, + message => $$first_message, + logger => $self->{logger}, + authorized_clients => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{authorized_clients} + ); + if ($rv == -1) { + $self->external_core_response( + identity => $options{identity}, + code => GORGONE_ACTION_FINISH_KO, + data => { message => 'handshake issue' } + ); + return -1; + } + ($rv, $key) = gorgone::standard::library::generate_symkey( + keysize => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_keysize} + ); + ($rv, $iv) = gorgone::standard::library::generate_symkey(keysize => 16); + + if (gorgone::standard::library::add_identity(dbh => $self->{db_gorgone}, identity => $options{identity}, key => $key, iv => $iv) == -1) { + $self->external_core_response( + identity => $options{identity}, + code => GORGONE_ACTION_FINISH_KO, + data => { message => 'handshake issue' } + ); + } + + $self->{identity_infos}->{ $options{identity} } = { + ctime => time(), + mtime => time(), + key => $key, + oldkey => undef, + iv => $iv, + oldiv => undef + }; + + $rv = $self->external_core_key_response( + identity => $options{identity}, + client_pubkey => $client_pubkey, + key => $key, + iv => $iv + ); + if ($rv == -1) { + $self->external_core_response( + identity => $options{identity}, + code => GORGONE_ACTION_FINISH_KO, + data => { message => 'handshake issue' } + ); + } + } + + return -1; +} + +sub send_message_parent { + my (%options) = @_; + + if ($options{router_type} eq 'internal') { + $gorgone->send_internal_response( + identity => $options{identity}, + response_type => $options{response_type}, + data => $options{data}, + code => $options{code}, + token => $options{token} + ); + } + if ($options{router_type} eq 'external') { + my ($rv, $cipher_infos) = $gorgone->is_handshake_done(identity => $options{identity}); + return if ($rv == 0); + $gorgone->external_core_response( + cipher_infos => $cipher_infos, + identity => $options{identity}, + response_type => $options{response_type}, + token => $options{token}, + code => $options{code}, + data => $options{data} + ); + } +} + +sub router_external_event { + my ($self, %options) = @_; + + while ($self->{external_socket}->has_pollin()) { + my ($identity, $frame) = gorgone::standard::library::zmq_read_message( + socket => $self->{external_socket}, + logger => $self->{logger} + ); + next if (!defined($identity)); + + my ($rv, $cipher_infos) = $self->handshake( + identity => $identity, + frame => $frame + ); + if ($rv == 0) { + my ($token, $code, $response, $response_type) = $self->message_run( + { + frame => $frame, + identity => $identity, + router_type => 'external' + } + ); + $self->external_core_response( + identity => $identity, + cipher_infos => $cipher_infos, + response_type => $response_type, + token => $token, + code => $code, + data => $response + ); + } + } +} + +sub waiting_ready_pool { + my (%options) = @_; + + my $name = $gorgone->{modules_id}->{$gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name}}; + my $method = $name->can('is_all_proxy_ready'); + + if ($method->() > 0) { + return 1; + } + + my $iteration = 10; + while ($iteration > 0) { + my $watcher_timer = $gorgone->{loop}->timer(1, 0, \&stop_ev); + $gorgone->{loop}->run(); + $iteration--; + if ($method->() > 0) { + return 1; + } + } + + return 0; +} + +sub stop_ev { + $gorgone->{loop}->break(); + $gorgone->check_exit_modules(); +} + +sub waiting_ready { + my (%options) = @_; + + if (${$options{ready}} == 1) { + return 1; + } + + my $iteration = 10; + while ($iteration > 0) { + my $watcher_timer = $gorgone->{loop}->timer(1, 0, \&stop_ev); + $gorgone->{loop}->run(); + if (${$options{ready}} == 1) { + return 1; + } + $iteration--; + } + + return 0; +} + +sub quit { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[core] Quit main process"); + + if ($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_type} eq 'ipc') { + unlink($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_path}); + } + + $self->{internal_socket}->close(); + if (defined($self->{external_socket})) { + $self->{external_socket}->close(); + } + + exit(0); +} + +sub check_exit_modules { + my ($self, %options) = @_; + + my $current_time = time(); + + # check key rotate + if ($self->{internal_crypt}->{enabled} == 1 && + ($current_time - $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key_ctime}) > $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_rotation}) { + $self->broadcast_core_key(); + } + if (defined($self->{external_socket})) { + $self->check_external_rotate_keys(); + } + + my $count = 0; + if (time() - $self->{cb_timer_check} > 15 || $self->{stop} == 1) { + if ($self->{stop} == 1 && (!defined($self->{sigterm_last_time}) || ($current_time - $self->{sigterm_last_time}) >= 10)) { + $self->{sigterm_start_time} = time() if (!defined($self->{sigterm_start_time})); + $self->{sigterm_last_time} = time(); + foreach my $name (keys %{$self->{modules_register}}) { + $self->{modules_register}->{$name}->{gently}->(logger => $gorgone->{logger}); + } + } + + foreach my $name (keys %{$self->{modules_register}}) { + my ($count_module, $keepalive) = $self->{modules_register}->{$name}->{check}->( + gorgone => $self, + logger => $self->{logger}, + dead_childs => $self->{return_child}, + dbh => $self->{db_gorgone}, + api_endpoints => $self->{api_endpoints} + ); + + $count += $count_module; + if ($count_module == 0 && (!defined($keepalive) || $keepalive == 0)) { + $self->unload_module(package => $name); + } + } + + $self->{cb_timer_check} = time(); + # We can clean old return_child. + foreach my $pid (keys %{$self->{return_child}}) { + if (($self->{cb_timer_check} - $self->{return_child}->{$pid}) > 300) { + delete $self->{return_child}->{$pid}; + } + } + } + + if ($self->{stop} == 1) { + # No childs + if ($count == 0) { + $self->quit(); + } + + # Send KILL + if (time() - $self->{sigterm_start_time} > $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{timeout}) { + foreach my $name (keys %{$self->{modules_register}}) { + $self->{modules_register}->{$name}->{kill_internal}->(logger => $gorgone->{logger}); + } + $self->quit(); + } + } +} + +sub periodic_exec { + $gorgone->check_exit_modules(); + $gorgone->{listener}->check(); + + $gorgone->router_internal_event(); + + if (defined($gorgone->{external_socket})) { + $gorgone->router_external_event(); + } +} + +sub run { + $gorgone = shift; + + $gorgone->SUPER::run(); + $gorgone->{logger}->redirect_output(); + + $gorgone->{logger}->writeLogInfo("[core] Gorgoned started"); + $gorgone->{logger}->writeLogInfo("[core] PID $$"); + + if (gorgone::standard::library::add_history({ + dbh => $gorgone->{db_gorgone}, + code => GORGONE_STARTED, + data => { message => 'gorgoned is starting...' }, + json_encode => 1}) == -1 + ) { + $gorgone->{logger}->writeLogInfo("[core] Cannot write in history. We quit!!"); + exit(1); + } + + { + local $SIG{__DIE__}; + $gorgone->{zmq_context} = ZMQ::FFI->new(); + } + + $gorgone->{internal_socket} = gorgone::standard::library::create_com( + context => $gorgone->{zmq_context}, + type => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_type}, + path => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_path}, + zmq_type => 'ZMQ_ROUTER', + name => 'router-internal', + zmq_router_handover => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_zmq_router_handover}, + logger => $gorgone->{logger} + ); + + if (defined($gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_type}) && $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_type} ne '') { + if ($gorgone->{keys_loaded}) { + $gorgone->init_external_informations(); + + $gorgone->{external_socket} = gorgone::standard::library::create_com( + context => $gorgone->{zmq_context}, + type => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_type}, + path => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_path}, + zmq_type => 'ZMQ_ROUTER', + zmq_router_handover => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_zmq_router_handover}, + zmq_tcp_keepalive => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_zmq_tcp_keepalive}, + zmq_ipv6 => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{ipv6}, + name => 'router-external', + logger => $gorgone->{logger} + ); + } else { + $gorgone->{logger}->writeLogError("[core] Cannot create external com: no keys loaded"); + } + } + + # init all modules + foreach my $name (keys %{$gorgone->{modules_register}}) { + $gorgone->{logger}->writeLogDebug("[core] Call init function from module '$name'"); + $gorgone->{modules_register}->{$name}->{init}->( + gorgone => $gorgone, + id => $gorgone->{id}, + logger => $gorgone->{logger}, + poll => $gorgone->{poll}, + external_socket => $gorgone->{external_socket}, + internal_socket => $gorgone->{internal_socket}, + dbh => $gorgone->{db_gorgone}, + api_endpoints => $gorgone->{api_endpoints} + ); + } + + $gorgone->{listener} = gorgone::class::listener->new( + gorgone => $gorgone, + logger => $gorgone->{logger} + ); + $gorgone::standard::library::listener = $gorgone->{listener}; + + $gorgone->{logger}->writeLogInfo("[core] Server accepting clients"); + $gorgone->{cb_timer_check} = time(); + + $gorgone->{loop} = new EV::Loop(); + $gorgone->{watcher_timer} = $gorgone->{loop}->timer(5, 5, \&periodic_exec); + + $gorgone->{watcher_io_internal} = $gorgone->{loop}->io($gorgone->{internal_socket}->get_fd(), EV::READ, sub { $gorgone->router_internal_event() }); + + if (defined($gorgone->{external_socket})) { + $gorgone->{watcher_io_external} = $gorgone->{loop}->io($gorgone->{external_socket}->get_fd(), EV::READ, sub { $gorgone->router_external_event() }); + } + + $gorgone->{loop}->run(); +} + +1; + +__END__ diff --git a/gorgone/gorgone/class/db.pm b/gorgone/gorgone/class/db.pm new file mode 100644 index 00000000000..847678411fd --- /dev/null +++ b/gorgone/gorgone/class/db.pm @@ -0,0 +1,388 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::db; + +use strict; +use warnings; +use DBI; + +sub new { + my ($class, %options) = @_; + my %defaults = ( + logger => undef, + db => undef, + dsn => undef, + host => "localhost", + user => undef, + password => undef, + port => 3306, + force => 0, + type => "mysql" + ); + my $self = {%defaults, %options}; + $self->{type} = 'mysql' if (!defined($self->{type})); + + # strip double-quotes + if (defined($self->{dsn})) { + $self->{dsn} =~ s/^\s*"//; + $self->{dsn} =~ s/"\s*$//; + } + + $self->{die} = defined($options{die}) ? 1 : 0; + $self->{instance} = undef; + $self->{transaction_begin} = 0; + bless $self, $class; + return $self; +} + +# Getter/Setter DB name +sub type { + my $self = shift; + if (@_) { + $self->{type} = shift; + } + return $self->{type}; +} + +sub getInstance { + my ($self) = @_; + + return $self->{instance}; +} + +# Getter/Setter DB name +sub db { + my $self = shift; + if (@_) { + $self->{db} = shift; + } + return $self->{db}; +} + +sub sameParams { + my ($self, %options) = @_; + + my $params = ''; + if (defined($self->{dsn})) { + $params = $self->{dsn}; + } else { + $params = $self->{host} . ':' . $self->{port} . ':' . $self->{db}; + } + $params .= ':' . $self->{user} . ':' . $self->{password}; + + my $paramsNew = ''; + if (defined($options{dsn})) { + $paramsNew = $options{dsn}; + } else { + $paramsNew = $options{host} . ':' . $options{port} . ':' . $options{db}; + } + $params .= ':' . $options{user} . ':' . $options{password}; + + return ($paramsNew eq $params) ? 1 : 0; +} + +# Getter/Setter DB host +sub host { + my $self = shift; + if (@_) { + $self->{host} = shift; + } + return $self->{host}; +} + +# Getter/Setter DB port +sub port { + my $self = shift; + if (@_) { + $self->{port} = shift; + } + return $self->{port}; +} + +# Getter/Setter DB user +sub user { + my $self = shift; + if (@_) { + $self->{user} = shift; + } + return $self->{user}; +} + +# Getter/Setter DB force +# force 2 should'nt be used with transaction +sub force { + my $self = shift; + if (@_) { + $self->{force} = shift; + } + return $self->{force}; +} + +# Getter/Setter DB password +sub password { + my $self = shift; + if (@_) { + $self->{password} = shift; + } + return $self->{password}; +} + +sub last_insert_id { + my $self = shift; + return $self->{instance}->last_insert_id(undef, undef, undef, undef); +} + +sub set_inactive_destroy { + my $self = shift; + + if (defined($self->{instance})) { + $self->{instance}->{InactiveDestroy} = 1; + } +} + +sub transaction_mode { + my ($self, $mode) = @_; + + my $status; + if (!defined($self->{instance})) { + $status = $self->connect(); + return -1 if ($status == -1); + } + + if ($mode) { + $status = $self->{instance}->begin_work(); + if (!$status) { + $self->error($self->{instance}->errstr, 'begin work'); + return -1; + } + $self->{transaction_begin} = 1; + } else { + $self->{transaction_begin} = 0; + $self->{instance}->{AutoCommit} = 1; + } + + return 0; +} + +sub commit { + my ($self) = @_; + + # Commit only if autocommit isn't enabled + if ($self->{instance}->{AutoCommit} != 1) { + if (!defined($self->{instance})) { + $self->{transaction_begin} = 0; + return -1; + } + + my $status = $self->{instance}->commit(); + $self->{transaction_begin} = 0; + + if (!$status) { + $self->error($self->{instance}->errstr, 'commit'); + return -1; + } + } + + return 0; +} + +sub rollback { + my ($self) = @_; + + $self->{instance}->rollback() if (defined($self->{instance})); + $self->{transaction_begin} = 0; +} + +sub kill { + my $self = shift; + + if (defined($self->{instance})) { + $self->{logger}->writeLogInfo("KILL QUERY\n"); + my $rv = $self->{instance}->do("KILL QUERY " . $self->{instance}->{'mysql_thread_id'}); + if (!$rv) { + my ($package, $filename, $line) = caller; + $self->{logger}->writeLogError("MySQL error : " . $self->{instance}->errstr . " (caller: $package:$filename:$line)"); + } + } +} + +# Connection initializer +sub connect() { + my $self = shift; + my ($status, $count) = (0, 0); + + while (1) { + $self->{port} = 3306 if (!defined($self->{port}) && $self->{type} eq 'mysql'); + if (defined($self->{dsn})) { + $self->{instance} = DBI->connect( + "DBI:".$self->{dsn}, $self->{user}, $self->{password}, + { + RaiseError => 0, + PrintError => 0, + AutoCommit => 1, + mysql_enable_utf8 => 1 + } + ); + } elsif ($self->{type} =~ /SQLite/i) { + $self->{instance} = DBI->connect( + "DBI:".$self->{type} + .":".$self->{db}, + $self->{user}, + $self->{password}, + { RaiseError => 0, PrintError => 0, AutoCommit => 1, sqlite_unicode => 1 } + ); + } else { + $self->{instance} = DBI->connect( + "DBI:".$self->{type} + .":".$self->{db} + .":".$self->{host} + .":".$self->{port}, + $self->{user}, + $self->{password}, + { + RaiseError => 0, + PrintError => 0, + AutoCommit => 1, + mysql_enable_utf8 => 1 + } + ); + } + if (defined($self->{instance})) { + last; + } + + my ($package, $filename, $line) = caller; + $self->{logger}->writeLogError("MySQL error : cannot connect to database '" . + (defined($self->{db}) ? $self->{db} : $self->{dsn}) . "': " . $DBI::errstr . " (caller: $package:$filename:$line) (try: $count)" + ); + if ($self->{force} == 0 || ($self->{force} == 2 && $count == 1)) { + $self->{lastError} = "MySQL error : cannot connect to database '" . + (defined($self->{db}) ? $self->{db} : $self->{dsn}) . "': " . $DBI::errstr; + $status = -1; + last; + } + sleep(1); + $count++; + } + + return $status; +} + +# Destroy connection +sub disconnect { + my $self = shift; + my $instance = $self->{instance}; + if (defined($instance)) { + $instance->disconnect; + $self->{instance} = undef; + } +} + +sub do { + my ($self, $query) = @_; + + if (!defined($self->{instance})) { + if ($self->connect() == -1) { + $self->{logger}->writeLogError("Cannot connect to database"); + return -1; + } + } + my $numrows = $self->{instance}->do($query); + die $self->{instance}->errstr if !defined $numrows; + return $numrows; +} + +sub error { + my ($self, $error, $query) = @_; + my ($package, $filename, $line) = caller 1; + + chomp($query); + $self->{lastError} = "SQL error: $error (caller: $package:$filename:$line) +Query: $query +"; + $self->{logger}->writeLogError($error); + if ($self->{transaction_begin} == 1) { + $self->rollback(); + } + $self->disconnect(); + $self->{instance} = undef; +} + +sub prepare { + my ($self, $query) = @_; + + return $self->query({ query => $query, prepare_only => 1 }); +} + +sub query { + my ($self) = shift; + my ($status, $count) = (0, -1); + my $statement_handle; + + while (1) { + if (!defined($self->{instance})) { + $status = $self->connect(); + if ($status == -1) { + last; + } + } + + $count++; + $statement_handle = $self->{instance}->prepare($_[0]->{query}); + if (!defined($statement_handle)) { + $self->error($self->{instance}->errstr, $_[0]->{query}); + $status = -1; + last if ($self->{force} == 0 || ($self->{force} == 2 && $count == 1)); + sleep(1); + next; + } + + if (defined($_[0]->{prepare_only})) { + return $statement_handle if ($self->{die} == 1); + return ($status, $statement_handle); + } + + my $rv; + if (defined($_[0]->{bind_values}) && scalar(@{$_[0]->{bind_values}}) > 0) { + $rv = $statement_handle->execute(@{$_[0]->{bind_values}}); + } else { + $rv = $statement_handle->execute(); + } + if (!$rv) { + $self->error($statement_handle->errstr, $_[0]->{query}); + $status = -1; + last if ($self->{force} == 0 || ($self->{force} == 2 && $count == 1)); + sleep(1); + next; + } + + last; + } + + if ($self->{die} == 1) { + die $self->{lastError} if ($status == -1); + return $statement_handle; + } + + return ($status, $statement_handle); +} + +1; diff --git a/gorgone/gorgone/class/fingerprint/backend/sql.pm b/gorgone/gorgone/class/fingerprint/backend/sql.pm new file mode 100644 index 00000000000..a36542cd7c9 --- /dev/null +++ b/gorgone/gorgone/class/fingerprint/backend/sql.pm @@ -0,0 +1,85 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::fingerprint::backend::sql; + +use base qw(gorgone::class::db); + +use strict; +use warnings; + +sub new { + my ($class, %options) = @_; + my $self = $class->SUPER::new( + logger => $options{logger}, + type => defined($options{config}->{gorgone_db_type}) && $options{config}->{gorgone_db_type} ne '' ? + $options{config}->{gorgone_db_type} : $options{config_core}->{gorgone_db_type}, + db => defined($options{config}->{gorgone_db_name}) && $options{config}->{gorgone_db_name} ne '' ? + $options{config}->{gorgone_db_name} : $options{config_core}->{gorgone_db_name}, + host => defined($options{config}->{gorgone_db_host}) && $options{config}->{gorgone_db_host} ne '' ? + $options{config}->{gorgone_db_host} : $options{config_core}->{gorgone_db_host}, + port => defined($options{config}->{gorgone_db_port}) && $options{config}->{gorgone_db_port} ne '' ? + $options{config}->{gorgone_db_port} : $options{config_core}->{gorgone_db_port}, + user => defined($options{config}->{gorgone_db_user}) && $options{config}->{gorgone_db_user} ne '' ? + $options{config}->{gorgone_db_user} : $options{config_core}->{gorgone_db_user}, + password => defined($options{config}->{gorgone_db_password}) && $options{config}->{gorgone_db_password} ne '' ? + $options{config}->{gorgone_db_password} : $options{config_core}->{gorgone_db_password}, + force => 2 + ); + bless $self, $class; + + $self->{fingerprint_mode} = $options{config_core}->{fingerprint_mode}; + + return $self; +} + +sub check_fingerprint { + my ($self, %options) = @_; + + return 1 if ($self->{fingerprint_mode} eq 'always'); + + my ($status, $sth) = $self->query({ + query => "SELECT `id`, `fingerprint` FROM gorgone_target_fingerprint WHERE target = ? ORDER BY id ASC LIMIT 1", + bind_values => [$options{target}] + }); + return (0, "cannot get fingerprint for target '$options{target}'") if ($status == -1); + my $row = $sth->fetchrow_hashref(); + + if (!defined($row)) { + if ($self->{fingerprint_mode} eq 'strict') { + return (0, "no fingerprint found for target '" . $options{target} . "' [strict mode] [fingerprint: $options{fingerprint}]"); + } + ($status) = $self->query({ + query => "INSERT INTO gorgone_target_fingerprint (`target`, `fingerprint`) VALUES (?, ?)", + bind_values => [$options{target}, $options{fingerprint}] + }); + return (0, "cannot insert target '$options{target}' fingerprint") if ($status == -1); + return 1; + } + + if ($row->{fingerprint} ne $options{fingerprint}) { + return (0, "fingerprint changed for target '" . $options{target} . "' [id: $row->{id}] [old fingerprint: $row->{fingerprint}] [new fingerprint: $options{fingerprint}]"); + } + return 1; +} + +1; + +__END__ diff --git a/gorgone/gorgone/class/frame.pm b/gorgone/gorgone/class/frame.pm new file mode 100644 index 00000000000..14688e2da23 --- /dev/null +++ b/gorgone/gorgone/class/frame.pm @@ -0,0 +1,190 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::frame; + +use strict; +use warnings; + +use JSON::XS; +use Try::Tiny; +use MIME::Base64; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + if (defined($options{rawData})) { + $self->setRawData($options{rawData}); + } + if (defined($options{data})) { + $self->setData($options{data}); + } + + return $self; +} + +sub setData { + my ($self) = shift; + + $self->{data} = $_[0]; +} + +sub setRawData { + my ($self) = shift; + + $self->{rawData} = $_[0]; +} + +sub setFrame { + my ($self) = shift; + + $self->{frame} = $_[0]; +} + +sub getFrame { + my ($self) = shift; + + return $self->{frame}; +} + +sub getLastError { + my ($self) = shift; + + return $self->{lastError}; +} + +sub decrypt { + my ($self, $options) = (shift, shift); + + my $plaintext; + try { + $plaintext = $options->{cipher}->decrypt(MIME::Base64::decode_base64(${$self->{frame}}), $options->{key}, $options->{iv}); + }; + if (defined($plaintext) && $plaintext =~ /^\[[A-Za-z0-9_\-]+?\]/) { + $self->{frame} = \$plaintext; + return 0; + } + + $self->{lastError} = $_ ? $_ : 'no message'; + return 1; +} + +sub parse { + my ($self, $options) = (shift, shift); + + if (${$self->{frame}} =~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[(.*?)\]\s+/g) { + $self->{action} = $1; + $self->{token} = $2; + $self->{target} = $3; + + if (defined($options) && defined($options->{decode})) { + try { + $self->{data} = JSON::XS->new->decode(substr(${$self->{frame}}, pos(${$self->{frame}}))); + } catch { + $self->{lastError} = $_; + return 1; + } + } else { + $self->{rawData} = substr(${$self->{frame}}, pos(${$self->{frame}})); + } + + if (defined($options) && defined($options->{releaseFrame})) { + $self->{frame} = undef; + } + + return 0; + } + + return 1; +} + +sub getData { + my ($self) = shift; + + if (!defined($self->{data})) { + try { + $self->{data} = JSON::XS->new->decode($self->{rawData}); + } catch { + $self->{lastError} = $_; + return undef; + } + } + + return $self->{data}; +} + +sub decodeData { + my ($self) = shift; + + if (!defined($self->{data})) { + try { + $self->{data} = JSON::XS->new->decode($self->{rawData}); + } catch { + $self->{lastError} = $_; + return undef; + } + } + + return $self->{data}; +} + +sub getRawData { + my ($self) = shift; + + if (!defined($self->{rawData})) { + try { + $self->{rawData} = JSON::XS->new->encode($self->{data}); + } catch { + $self->{lastError} = $_; + return undef; + } + } + return \$self->{rawData}; +} + +sub getAction { + my ($self) = shift; + + return $self->{action}; +} + +sub getToken { + my ($self) = shift; + + return $self->{token}; +} + +sub getTarget { + my ($self) = shift; + + return $self->{target}; +} + +sub DESTROY { + my ($self) = shift; + + $self->{frame} = undef; + $self->{data} = undef; + $self->{rawData} = undef; +} + +1; diff --git a/gorgone/gorgone/class/http/backend/curl.pm b/gorgone/gorgone/class/http/backend/curl.pm new file mode 100644 index 00000000000..f2801bafd45 --- /dev/null +++ b/gorgone/gorgone/class/http/backend/curl.pm @@ -0,0 +1,450 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::http::backend::curl; + +use strict; +use warnings; +use URI; +use gorgone::standard::misc; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{logger} = $options{logger}; + + return $self; +} + +sub check_options { + my ($self, %options) = @_; + + if (gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, module => 'Net::Curl::Easy', + error_msg => "Cannot load module 'Net::Curl::Easy'." + ) == 1) { + return 1; + } + if (gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, module => 'gorgone::class::http::backend::curlconstants', + error_msg => "Cannot load module 'gorgone::class::http::backend::curlconstants'." + ) == 1) { + return 1; + } + $self->{constant_cb} = \&gorgone::class::http::backend::curlconstants::get_constant_value; + + if (!defined($options{request}->{curl_opt})) { + $options{request}->{curl_opt} = []; + } +} + +my $http_code_explained = { + 100 => 'Continue', + 101 => 'Switching Protocols', + 200 => 'OK', + 201 => 'Created', + 202 => 'Accepted', + 203 => 'Non-Authoritative Information', + 204 => 'No Content', + 205 => 'Reset Content', + 206 => 'Partial Content', + 300 => 'Multiple Choices', + 301 => 'Moved Permanently', + 302 => 'Found', + 303 => 'See Other', + 304 => 'Not Modified', + 305 => 'Use Proxy', + 306 => '(Unused)', + 307 => 'Temporary Redirect', + 400 => 'Bad Request', + 401 => 'Unauthorized', + 402 => 'Payment Required', + 403 => 'Forbidden', + 404 => 'Not Found', + 405 => 'Method Not Allowed', + 406 => 'Not Acceptable', + 407 => 'Proxy Authentication Required', + 408 => 'Request Timeout', + 409 => 'Conflict', + 410 => 'Gone', + 411 => 'Length Required', + 412 => 'Precondition Failed', + 413 => 'Request Entity Too Large', + 414 => 'Request-URI Too Long', + 415 => 'Unsupported Media Type', + 416 => 'Requested Range Not Satisfiable', + 417 => 'Expectation Failed', + 450 => 'Timeout reached', # custom code + 451 => 'Failed Connection Host', # custom code + 500 => 'Internal Server Error', + 501 => 'Not Implemented', + 502 => 'Bad Gateway', + 503 => 'Service Unavailable', + 504 => 'Gateway Timeout', + 505 => 'HTTP Version Not Supported' +}; + +sub cb_debug { + my ($easy, $type, $data, $uservar) = @_; + + chomp $data; + $data =~ s/\r//mg; + + my $msg = ''; + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_TEXT')) { + $msg = sprintf("== Info: %s", $data); + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_HEADER_OUT')) { + $msg = sprintf("=> Send header: %s", $data); + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_DATA_OUT')) { + $msg = sprintf("=> Send data: %s", $data); + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_SSL_DATA_OUT')) { + #$msg = sprintf("=> Send SSL data: %s", $data); + return 0; + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_HEADER_IN')) { + $msg = sprintf("=> Recv header: %s", $data); + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_DATA_IN')) { + $msg = sprintf("=> Recv data: %s", $data); + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_SSL_DATA_IN')) { + #$msg = sprintf("=> Recv SSL data: %s", $data); + return 0; + } + + $uservar->{logger}->writeLogDebug($msg); + return 0; +} + +sub curl_setopt { + my ($self, %options) = @_; + + eval { + $self->{curl_easy}->setopt($options{option}, $options{parameter}); + }; + if ($@) { + $self->{logger}->writeLogError("curl setopt error: '" . $@ . "'."); + } +} + +sub set_method { + my ($self, %options) = @_; + + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_CUSTOMREQUEST'), parameter => undef); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_POSTFIELDS'), parameter => undef); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HTTPGET'), parameter => 1); + + if ($options{request}->{method} eq 'GET') { + return ; + } + + if ($options{content_type_forced} == 1) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_POSTFIELDS'), parameter => $options{request}->{query_form_post}) + if (defined($options{request}->{query_form_post}) && $options{request}->{query_form_post} ne ''); + } elsif (defined($options{request}->{post_params})) { + my $uri_post = URI->new(); + $uri_post->query_form($options{request}->{post_params}); + push @{$options{headers}}, 'Content-Type: application/x-www-form-urlencoded'; + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_POSTFIELDS'), parameter => $uri_post->query); + } + + if ($options{request}->{method} eq 'POST') { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_POST'), parameter => 1); + } + if ($options{request}->{method} eq 'PUT') { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_CUSTOMREQUEST'), parameter => $options{request}->{method}); + } + if ($options{request}->{method} eq 'DELETE') { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_CUSTOMREQUEST'), parameter => $options{request}->{method}); + } +} + +sub set_auth { + my ($self, %options) = @_; + + if (defined($options{request}->{credentials})) { + if (defined($options{request}->{basic})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HTTPAUTH'), parameter => $self->{constant_cb}->(name => 'CURLAUTH_BASIC')); + } elsif (defined($options{request}->{ntlmv2})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HTTPAUTH'), parameter => $self->{constant_cb}->(name => 'CURLAUTH_NTLM')); + } else { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HTTPAUTH'), parameter => $self->{constant_cb}->(name => 'CURLAUTH_ANY')); + } + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_USERPWD'), parameter => $options{request}->{username} . ':' . $options{request}->{password}); + } + + if (defined($options{request}->{cert_file}) && $options{request}->{cert_file} ne '') { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_SSLCERT'), parameter => $options{request}->{cert_file}); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_SSLKEY'), parameter => $options{request}->{key_file}); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_KEYPASSWD'), parameter => $options{request}->{cert_pwd}); + } + + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_SSLCERTTYPE'), parameter => "PEM"); + if (defined($options{request}->{cert_pkcs12})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_SSLCERTTYPE'), parameter => "P12"); + } +} + +sub set_proxy { + my ($self, %options) = @_; + + if (defined($options{request}->{proxyurl}) && $options{request}->{proxyurl} ne '') { + if ($options{request}->{proxyurl} =~ /^(?:http|https):\/\/(.*?):(.*?)@/) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_PROXYUSERNAME'), parameter => $1); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_PROXYPASSWORD'), parameter => $2); + $options{request}->{proxyurl} =~ s/\/\/$1:$2@//; + } + + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_PROXY'), parameter => $options{request}->{proxyurl}); + } + + if (defined($options{request}->{proxypac}) && $options{request}->{proxypac} ne '') { + $self->{logger}->writeLogError('Unsupported proxypac option'); + } +} + +sub set_extra_curl_opt { + my ($self, %options) = @_; + + my $fields = { key => '', value => '' }; + foreach (@{$options{request}->{curl_opt}}) { + ($fields->{key}, $fields->{value}) = split /=>/; + foreach my $label ('key', 'value') { + $fields->{$label} = gorgone::standard::misc::trim($fields->{$label}); + if ($fields->{$label} =~ /^CURLOPT|CURL/) { + $fields->{$label} = $self->{constant_cb}->(name => $fields->{$label}); + } + } + + $self->curl_setopt(option => $fields->{key}, parameter => $fields->{value}); + } +} + +sub cb_get_header { + my ($easy, $header, $uservar) = @_; + + $header =~ s/[\r\n]//g; + if ($header =~ /^[\r\n]*$/) { + $uservar->{nheaders}++; + } else { + $uservar->{response_headers}->[$uservar->{nheaders}] = {} + if (!defined($uservar->{response_headers}->[$uservar->{nheaders}])); + if ($header =~ /^(\S(?:.*?))\s*:\s*(.*)/) { + my $header_name = lc($1); + $uservar->{response_headers}->[$uservar->{nheaders}]->{$header_name} = [] + if (!defined($uservar->{response_headers}->[$uservar->{nheaders}]->{$header_name})); + push @{$uservar->{response_headers}->[$uservar->{nheaders}]->{$header_name}}, $2; + } else { + $uservar->{response_headers}->[$uservar->{nheaders}]->{response_line} = $header; + } + } + + return length($_[1]); +} + +sub request { + my ($self, %options) = @_; + + if (!defined($self->{curl_easy})) { + $self->{curl_easy} = Net::Curl::Easy->new(); + } + + if ($self->{logger}->is_debug()) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_DEBUGFUNCTION'), parameter => \&cb_debug); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_DEBUGDATA'), parameter => $self); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_VERBOSE'), parameter => 1); + } + + if (defined($options{request}->{timeout})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_TIMEOUT'), parameter => $options{request}->{timeout}); + } + if (defined($options{request}->{cookies_file})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_COOKIEFILE'), parameter => $options{request}->{cookies_file}); + } + + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_FOLLOWLOCATION'), parameter => 1); + if (defined($options{request}->{no_follow})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_FOLLOWLOCATION'), parameter => 0); + } + + my $url; + if (defined($options{request}->{full_url})) { + $url = $options{request}->{full_url}; + } elsif (defined($options{request}->{port}) && $options{request}->{port} =~ /^[0-9]+$/) { + $url = $options{request}->{proto}. "://" . $options{request}->{hostname} . ':' . $options{request}->{port} . $options{request}->{url_path}; + } else { + $url = $options{request}->{proto}. "://" . $options{request}->{hostname} . $options{request}->{url_path}; + } + + if (defined($options{request}->{http_peer_addr}) && $options{request}->{http_peer_addr} ne '') { + $url =~ /^(?:http|https):\/\/(.*?)(\/|\:|$)/; + $self->{curl_easy}->setopt( + $self->{constant_cb}->(name => 'CURLOPT_RESOLVE'), + [$1 . ':' . $options{request}->{port_force} . ':' . $options{request}->{http_peer_addr}] + ); + } + + my $uri = URI->new($url); + if (defined($options{request}->{get_params})) { + $uri->query_form($options{request}->{get_params}); + } + + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_URL'), parameter => $uri); + + my $headers = []; + my $content_type_forced = 0; + foreach my $key (keys %{$options{request}->{headers}}) { + push @$headers, $key . ':' . $options{request}->{headers}->{$key}; + if ($key =~ /content-type/i) { + $content_type_forced = 1; + } + } + + $self->set_method(%options, content_type_forced => $content_type_forced, headers => $headers); + + if (scalar(@$headers) > 0) { + $self->{curl_easy}->setopt($self->{constant_cb}->(name => 'CURLOPT_HTTPHEADER'), $headers); + } + + if (defined($options{request}->{cacert_file}) && $options{request}->{cacert_file} ne '') { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_CAINFO'), parameter => $options{request}->{cacert_file}); + } + + $self->set_auth(%options); + $self->set_proxy(%options); + $self->set_extra_curl_opt(%options); + + $self->{response_body} = ''; + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_FILE'), parameter => \$self->{response_body}); + $self->{nheaders} = 0; + $self->{response_headers} = [{}]; + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HEADERDATA'), parameter => $self); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HEADERFUNCTION'), parameter => \&cb_get_header); + + eval { + $SIG{__DIE__} = sub {}; + + $self->{curl_easy}->perform(); + }; + if ($@) { + my $err = $@; + if (ref($@) eq "Net::Curl::Easy::Code") { + my $num = $@; + if ($num == $self->{constant_cb}->(name => 'CURLE_OPERATION_TIMEDOUT')) { + $self->{response_code} = 450; + } elsif ($num == $self->{constant_cb}->(name => 'CURLE_COULDNT_CONNECT')) { + $self->{response_code} = 451; + } + } + + if (!defined($self->{response_code})) { + $self->{logger}->writeLogError('curl perform error : ' . $err); + } + + return 1; + } + + $self->{response_code} = $self->{curl_easy}->getinfo($self->{constant_cb}->(name => 'CURLINFO_RESPONSE_CODE')); + + return (0, $self->{response_body}); +} + +sub get_headers { + my ($self, %options) = @_; + + my $headers = ''; + foreach (keys %{$self->{response_headers}->[$options{nheader}]}) { + next if (/response_line/); + foreach my $value (@{$self->{response_headers}->[$options{nheader}]->{$_}}) { + $headers .= "$_: " . $value . "\n"; + } + } + + return $headers; +} + +sub get_first_header { + my ($self, %options) = @_; + + if (!defined($options{name})) { + return $self->get_headers(nheader => 0); + } + + return undef + if (!defined($self->{response_headers}->[0]->{ lc($options{name}) })); + return wantarray ? @{$self->{response_headers}->[0]->{ lc($options{name}) }} : $self->{response_headers}->[0]->{ lc($options{name}) }->[0]; +} + +sub get_header { + my ($self, %options) = @_; + + if (!defined($options{name})) { + return $self->get_headers(nheader => -1); + } + + return undef + if (!defined($self->{response_headers}->[-1]->{ lc($options{name}) })); + return wantarray ? @{$self->{response_headers}->[-1]->{ lc($options{name}) }} : $self->{response_headers}->[-1]->{ lc($options{name}) }->[0]; +} + +sub get_code { + my ($self, %options) = @_; + + return $self->{response_code}; +} + +sub get_message { + my ($self, %options) = @_; + + return $http_code_explained->{$self->{response_code}}; +} + +1; + +__END__ + +=head1 NAME + +HTTP Curl backend layer. + +=head1 SYNOPSIS + +HTTP Curl backend layer. + +=head1 BACKEND CURL OPTIONS + +=over 8 + +=item B<--curl-opt> + +Set CURL Options (--curl-opt="CURLOPT_SSL_VERIFYPEER => 0" --curl-opt="CURLOPT_SSLVERSION => CURL_SSLVERSION_TLSv1_1" ). + +=back + +=head1 DESCRIPTION + +B. + +=cut diff --git a/gorgone/gorgone/class/http/backend/curlconstants.pm b/gorgone/gorgone/class/http/backend/curlconstants.pm new file mode 100644 index 00000000000..41045c38bf0 --- /dev/null +++ b/gorgone/gorgone/class/http/backend/curlconstants.pm @@ -0,0 +1,33 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::http::backend::curlconstants; + +use strict; +use warnings; +use Net::Curl::Easy qw(:constants); + +sub get_constant_value { + my (%options) = @_; + + return eval $options{name}; +} + +1; diff --git a/gorgone/gorgone/class/http/backend/lwp.pm b/gorgone/gorgone/class/http/backend/lwp.pm new file mode 100644 index 00000000000..f396a35093d --- /dev/null +++ b/gorgone/gorgone/class/http/backend/lwp.pm @@ -0,0 +1,299 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::http::backend::lwp; + +use strict; +use warnings; +use gorgone::class::http::backend::useragent; +use URI; +use IO::Socket::SSL; +use gorgone::standard::misc; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{logger} = $options{logger}; + $self->{ua} = undef; + $self->{debug_handlers} = 0; + + return $self; +} + +sub check_options { + my ($self, %options) = @_; + + $self->{ssl_context} = ''; + if (!defined($options{request}->{ssl_opt})) { + $options{request}->{ssl_opt} = []; + } + if (defined($options{request}->{ssl}) && $options{request}->{ssl} ne '') { + push @{$options{request}->{ssl_opt}}, 'SSL_version => ' . $options{request}->{ssl}; + } + if (defined($options{request}->{cert_file}) && !defined($options{request}->{cert_pkcs12})) { + push @{$options{request}->{ssl_opt}}, 'SSL_use_cert => 1'; + push @{$options{request}->{ssl_opt}}, 'SSL_cert_file => "' . $options{request}->{cert_file} . '"'; + push @{$options{request}->{ssl_opt}}, 'SSL_key_file => "' . $options{request}->{key_file} . '"' + if (defined($options{request}->{key_file})); + push @{$options{request}->{ssl_opt}}, 'SSL_ca_file => "' . $options{request}->{cacert_file} . '"' + if (defined($options{request}->{cacert_file})); + } + my $append = ''; + foreach (@{$options{request}->{ssl_opt}}) { + if ($_ ne '') { + $self->{ssl_context} .= $append . $_; + $append = ', '; + } + } +} + +sub set_proxy { + my ($self, %options) = @_; + + if (defined($options{request}->{proxypac}) && $options{request}->{proxypac} ne '') { + if (gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, module => 'HTTP::ProxyPAC', + error_msg => "Cannot load module 'HTTP::ProxyPAC'." + ) == 1) { + return 1; + } + my ($pac, $pac_uri); + eval { + if ($options{request}->{proxypac} =~ /^(http|https):\/\//) { + $pac_uri = URI->new($options{request}->{proxypac}); + $pac = HTTP::ProxyPAC->new($pac_uri); + } else { + $pac = HTTP::ProxyPAC->new($options{request}->{proxypac}); + } + }; + if ($@) { + $self->{logger}->writeLogError('issue to load proxypac: ' . $@); + return 1; + } + my $res = $pac->find_proxy($options{url}); + if (defined($res->direct) && $res->direct != 1) { + my $proxy_uri = URI->new($res->proxy); + $proxy_uri->userinfo($pac_uri->userinfo) if (defined($pac_uri->userinfo)); + $self->{ua}->proxy(['http', 'https'], $proxy_uri->as_string); + } + } + if (defined($options{request}->{proxyurl}) && $options{request}->{proxyurl} ne '') { + $self->{ua}->proxy(['http', 'https'], $options{request}->{proxyurl}); + } +} + +sub request { + my ($self, %options) = @_; + + my $request_options = $options{request}; + if (!defined($self->{ua})) { + $self->{ua} = centreon::plugins::backend::http::useragent->new( + keep_alive => 1, protocols_allowed => ['http', 'https'], timeout => $request_options->{timeout}, + credentials => $request_options->{credentials}, username => $request_options->{username}, password => $request_options->{password}); + if (defined($request_options->{cookies_file})) { + if (gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, module => 'HTTP::Cookies', + error_msg => "Cannot load module 'HTTP::Cookies'." + ) == 1) { + return 1; + } + $self->{ua}->cookie_jar(HTTP::Cookies->new(file => $request_options->{cookies_file}, + autosave => 1)); + } + } + + if ($self->{logger}->is_debug() && $self->{debug_handlers} == 0) { + $self->{debug_handlers} = 1; + $self->{ua}->add_handler("request_send", sub { + my ($response, $ua, $handler) = @_; + + $self->{logger}->writeLogDebug("======> request send"); + $self->{logger}->writeLogDebug($response->as_string); + return ; + }); + $self->{ua}->add_handler("response_done", sub { + my ($response, $ua, $handler) = @_; + + $self->{logger}->writeLogDebug("======> response done"); + $self->{logger}->writeLogDebug($response->as_string); + return ; + }); + } + + if (defined($request_options->{no_follow})) { + $self->{ua}->requests_redirectable(undef); + } else { + $self->{ua}->requests_redirectable([ 'GET', 'HEAD', 'POST' ]); + } + if (defined($request_options->{http_peer_addr})) { + push @LWP::Protocol::http::EXTRA_SOCK_OPTS, PeerAddr => $request_options->{http_peer_addr}; + } + + my ($req, $url); + if (defined($request_options->{full_url})) { + $url = $request_options->{full_url}; + } elsif (defined($request_options->{port}) && $request_options->{port} =~ /^[0-9]+$/) { + $url = $request_options->{proto}. "://" . $request_options->{hostname} . ':' . $request_options->{port} . $request_options->{url_path}; + } else { + $url = $request_options->{proto}. "://" . $request_options->{hostname} . $request_options->{url_path}; + } + + my $uri = URI->new($url); + if (defined($request_options->{get_params})) { + $uri->query_form($request_options->{get_params}); + } + $req = HTTP::Request->new($request_options->{method}, $uri); + + my $content_type_forced; + foreach my $key (keys %{$request_options->{headers}}) { + if ($key !~ /content-type/i) { + $req->header($key => $request_options->{headers}->{$key}); + } else { + $content_type_forced = $request_options->{headers}->{$key}; + } + } + + if ($request_options->{method} eq 'POST') { + if (defined($content_type_forced)) { + $req->content_type($content_type_forced); + $req->content($request_options->{query_form_post}); + } else { + my $uri_post = URI->new(); + if (defined($request_options->{post_params})) { + $uri_post->query_form($request_options->{post_params}); + } + $req->content_type('application/x-www-form-urlencoded'); + $req->content($uri_post->query); + } + } + + if (defined($request_options->{credentials}) && defined($request_options->{ntlmv2})) { + if (gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, module => 'Authen::NTLM', + error_msg => "Cannot load module 'Authen::NTLM'." + ) == 1) { + return 1; + } + Authen::NTLM::ntlmv2(1); + } + + if (defined($request_options->{credentials}) && defined($request_options->{basic})) { + $req->authorization_basic($request_options->{username}, $request_options->{password}); + } + + $self->set_proxy(request => $request_options, url => $url); + + if (defined($request_options->{cert_pkcs12}) && $request_options->{cert_file} ne '' && $request_options->{cert_pwd} ne '') { + eval "use Net::SSL"; die $@ if $@; + $ENV{HTTPS_PKCS12_FILE} = $request_options->{cert_file}; + $ENV{HTTPS_PKCS12_PASSWORD} = $request_options->{cert_pwd}; + } + + if (defined($self->{ssl_context}) && $self->{ssl_context} ne '') { + my $context = new IO::Socket::SSL::SSL_Context(eval $self->{ssl_context}); + IO::Socket::SSL::set_default_context($context); + } + + $self->{response} = $self->{ua}->request($req); + + $self->{headers} = $self->{response}->headers(); + return (0, $self->{response}->content); +} + +sub get_headers { + my ($self, %options) = @_; + + my $headers = ''; + foreach ($options{response}->header_field_names()) { + $headers .= "$_: " . $options{response}->header($_) . "\n"; + } + + return $headers; +} + +sub get_first_header { + my ($self, %options) = @_; + + my @redirects = $self->{response}->redirects(); + if (!defined($options{name})) { + return $self->get_headers(response => defined($redirects[0]) ? $redirects[0] : $self->{response}); + } + + return + defined($redirects[0]) ? + $redirects[0]->headers()->header($options{name}) : + $self->{headers}->header($options{name}) + ; +} + +sub get_header { + my ($self, %options) = @_; + + if (!defined($options{name})) { + return $self->get_headers(response => $self->{response}); + } + return $self->{headers}->header($options{name}); +} + +sub get_code { + my ($self, %options) = @_; + + return $self->{response}->code(); +} + +sub get_message { + my ($self, %options) = @_; + + return $self->{response}->message(); +} + +1; + +__END__ + +=head1 NAME + +HTTP LWP backend layer. + +=head1 SYNOPSIS + +HTTP LWP backend layer. + +=head1 BACKEND LWP OPTIONS + +=over 8 + +=item B<--ssl-opt> + +Set SSL Options (--ssl-opt="SSL_version => TLSv1" --ssl-opt="SSL_verify_mode => SSL_VERIFY_NONE"). + +=item B<--ssl> + +Set SSL version (--ssl=TLSv1). + +=back + +=head1 DESCRIPTION + +B. + +=cut diff --git a/gorgone/gorgone/class/http/backend/useragent.pm b/gorgone/gorgone/class/http/backend/useragent.pm new file mode 100644 index 00000000000..e3c2d56b3ee --- /dev/null +++ b/gorgone/gorgone/class/http/backend/useragent.pm @@ -0,0 +1,50 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::http::backend::useragent; + +use strict; +use warnings; +use base 'LWP::UserAgent'; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self = LWP::UserAgent::new(@_); + $self->agent('gorgone::class::http::backend::useragent'); + + $self->{credentials} = $options{credentials} if defined($options{credentials}); + $self->{username} = $options{username} if defined($options{username}); + $self->{password} = $options{password} if defined($options{password}); + + return $self; +} + +sub get_basic_credentials { + my($self, $realm, $uri, $proxy) = @_; + return if $proxy; + return $self->{username}, $self->{password} if $self->{credentials} and wantarray; + return $self->{username}.":".$self->{password} if $self->{credentials}; + return undef; +} + +1; diff --git a/gorgone/gorgone/class/http/http.pm b/gorgone/gorgone/class/http/http.pm new file mode 100644 index 00000000000..fc659354edb --- /dev/null +++ b/gorgone/gorgone/class/http/http.pm @@ -0,0 +1,240 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::http::http; + +use strict; +use warnings; +use gorgone::standard::misc; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{logger} = $options{logger}; + $self->{options} = { + proto => 'http', + url_path => '/', + timeout => 5, + method => 'GET', + }; + + $self->{add_headers} = {}; + return $self; +} + +sub set_options { + my ($self, %options) = @_; + + $self->{options} = { %{$self->{options}} }; + foreach (keys %options) { + $self->{options}->{$_} = $options{$_} if (defined($options{$_})); + } +} + +sub add_header { + my ($self, %options) = @_; + + $self->{add_headers}->{$options{key}} = $options{value}; +} + +sub check_options { + my ($self, %options) = @_; + + $options{request}->{http_backend} = 'curl' + if (!defined($options{request}->{http_backend}) || $options{request}->{http_backend} eq ''); + $self->{http_backend} = $options{request}->{http_backend}; + if ($self->{http_backend} !~ /^\s*lwp|curl\s*$/i) { + $self->{logger}->writeLogError("Unsupported http backend specified '" . $self->{http_backend} . "'."); + return 1; + } + + if (!defined($self->{backend_lwp}) && !defined($self->{backend_curl})) { + if ($options{request}->{http_backend} eq 'lwp' && gorgone::standard::misc::mymodule_load( + logger => $options{logger}, module => 'gorgone::class::http::backend::lwp', + error_msg => "Cannot load module 'gorgone::class::http::backend::lwp'." + ) == 0) { + $self->{backend_lwp} = gorgone::class::http::backend::lwp->new(%options, logger => $self->{logger}); + } + + if ($options{request}->{http_backend} eq 'curl' && gorgone::standard::misc::mymodule_load( + logger => $options{logger}, module => 'gorgone::class::http::backend::curl', + error_msg => "Cannot load module 'gorgone::class::http::backend::curl'." + ) == 0) { + $self->{backend_curl} = gorgone::class::http::backend::curl->new(%options, logger => $self->{logger}); + } + } + + if (($options{request}->{proto} ne 'http') && ($options{request}->{proto} ne 'https')) { + $self->{logger}->writeLogError("Unsupported protocol specified '" . $self->{option_results}->{proto} . "'."); + return 1; + } + if (!defined($options{request}->{hostname})) { + $self->{logger}->writeLogError("Please set the hostname option"); + return 1; + } + if ((defined($options{request}->{credentials})) && (!defined($options{request}->{username}) || !defined($options{request}->{password}))) { + $self->{logger}->writeLogError("You need to set --username= and --password= options when --credentials is used"); + return 1; + } + if ((defined($options{request}->{cert_pkcs12})) && (!defined($options{request}->{cert_file}) && !defined($options{request}->{cert_pwd}))) { + $self->{logger}->writeLogError("You need to set --cert-file= and --cert-pwd= options when --pkcs12 is used"); + return 1; + } + + $options{request}->{port_force} = $self->get_port(); + + $options{request}->{headers} = {}; + if (defined($options{request}->{header})) { + foreach (@{$options{request}->{header}}) { + if (/^(.*?):(.*)/) { + $options{request}->{headers}->{$1} = $2; + } + } + } + foreach (keys %{$self->{add_headers}}) { + $options{request}->{headers}->{$_} = $self->{add_headers}->{$_}; + } + + foreach my $method (('get', 'post')) { + if (defined($options{request}->{$method . '_param'})) { + $options{request}->{$method . '_params'} = {}; + foreach (@{$options{request}->{$method . '_param'}}) { + if (/^([^=]+)={0,1}(.*)$/) { + my $key = $1; + my $value = defined($2) ? $2 : 1; + if (defined($options{request}->{$method . '_params'}->{$key})) { + if (ref($options{request}->{$method . '_params'}->{$key}) ne 'ARRAY') { + $options{request}->{$method . '_params'}->{$key} = [ $options{request}->{$method . '_params'}->{$key} ]; + } + push @{$options{request}->{$method . '_params'}->{$key}}, $value; + } else { + $options{request}->{$method . '_params'}->{$key} = $value; + } + } + } + } + } + + $self->{'backend_' . $self->{http_backend}}->check_options(%options); + return 0; +} + +sub get_port { + my ($self, %options) = @_; + + my $port = ''; + if (defined($self->{options}->{port}) && $self->{options}->{port} ne '') { + $port = $self->{options}->{port}; + } else { + $port = 80 if ($self->{options}->{proto} eq 'http'); + $port = 443 if ($self->{options}->{proto} eq 'https'); + } + + return $port; +} + +sub get_port_request { + my ($self, %options) = @_; + + my $port = ''; + if (defined($self->{options}->{port}) && $self->{options}->{port} ne '') { + $port = $self->{options}->{port}; + } + return $port; +} + +sub request { + my ($self, %options) = @_; + + my $request_options = { %{$self->{options}} }; + foreach (keys %options) { + $request_options->{$_} = $options{$_} if (defined($options{$_})); + } + return 1 if ($self->check_options(request => $request_options)); + + return $self->{'backend_' . $self->{http_backend}}->request(request => $request_options); +} + +sub get_first_header { + my ($self, %options) = @_; + + return $self->{'backend_' . $self->{http_backend}}->get_first_header(%options); +} + +sub get_header { + my ($self, %options) = @_; + + return $self->{'backend_' . $self->{http_backend}}->get_header(%options); +} + +sub get_code { + my ($self, %options) = @_; + + return $self->{'backend_' . $self->{http_backend}}->get_code(); +} + +sub get_message { + my ($self, %options) = @_; + + return $self->{'backend_' . $self->{http_backend}}->get_message(); +} + +1; + +__END__ + +=head1 NAME + +HTTP abstraction layer. + +=head1 SYNOPSIS + +HTTP abstraction layer for lwp and curl backends + +=head1 HTTP GLOBAL OPTIONS + +=over 8 + +=item B<--http-peer-addr> + +Set the address you want to connect (Useful if hostname is only a vhost. no ip resolve) + +=item B<--proxyurl> + +Proxy URL + +=item B<--proxypac> + +Proxy pac file (can be an url or local file) + +=item B<--http-backend> + +Set the backend used (Default: 'lwp') +For curl: --http-backend=curl + +=back + +=head1 DESCRIPTION + +B. + +=cut diff --git a/gorgone/gorgone/class/listener.pm b/gorgone/gorgone/class/listener.pm new file mode 100644 index 00000000000..61d5421001d --- /dev/null +++ b/gorgone/gorgone/class/listener.pm @@ -0,0 +1,126 @@ +# +# Copyright 2020 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::listener; + +use strict; +use warnings; +use gorgone::standard::constants qw(:all); +use gorgone::standard::library; +use gorgone::class::frame; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{logger} = $options{logger}; + $self->{gorgone_core} = $options{gorgone}; + $self->{tokens} = {}; + + return $self; +} + +sub event_log { + my ($self) = shift; + + return if (!defined($self->{tokens}->{ $_[0]->{token}})); + + # we want to avoid loop + my $events = $self->{tokens}->{ $_[0]->{token} }; + if ($_[0]->{code} == GORGONE_ACTION_FINISH_KO || $_[0]->{code} == GORGONE_ACTION_FINISH_OK) { + delete $self->{tokens}->{ $_[0]->{token} }; + } + + foreach (keys %{$events->{events}}) { + $self->{logger}->writeLogDebug("[listener] trigger event '$_[0]->{token}'"); + + my $message = '[' . $_ . '] [' . $_[0]->{token} . '] [] { "code": ' . $_[0]->{code} . ', "data": ' . ${$_[0]->{data}} . ' }'; + my $frame = gorgone::class::frame->new(); + $frame->setFrame(\$message); + + $self->{gorgone_core}->message_run({ frame => $frame, router_type => 'internal' }); + } +} + +sub add_listener { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug("[listener] add token '$options{token}'"); + # an issue can happen if the event is unknown (recursive loop) + if (!defined($self->{tokens}->{$options{token}})) { + my ($log_pace, $timeout) = (30, 600); + $log_pace = $1 if (defined($options{log_pace}) && $options{log_pace} =~ /(\d+)/); + $timeout = $1 if (defined($options{timeout}) && $options{timeout} =~ /(\d+)/); + $self->{tokens}->{$options{token}} = { + target => $options{target}, + log_pace => $log_pace, + timeout => $timeout, + events => { $options{event} => $options{identity} }, + getlog_last => -1, + created => time() + }; + } else { + $self->{tokens}->{$options{token}}->{events}->{$options{event}} = $options{identity}; + } + + $self->check_getlog_token(token => $options{token}); +} + +sub check_getlog_token { + my ($self, %options) = @_; + + if (defined($self->{tokens}->{$options{token}}->{target}) && + $self->{tokens}->{$options{token}}->{target}) { + + return if (defined($self->{gorgone_core}->{id}) && $self->{gorgone_core}->{id} == $self->{tokens}->{$options{token}}->{target}); + + if ((time() - $self->{tokens}->{$options{token}}->{log_pace}) > $self->{tokens}->{$options{token}}->{getlog_last}) { + my $message = "[GETLOG] [] [$self->{tokens}->{$options{token}}->{target}] {}"; + my $frame = gorgone::class::frame->new(); + $frame->setFrame(\$message); + + $self->{gorgone_core}->message_run({ frame => $frame, router_type => 'internal' }); + + $self->{tokens}->{$options{token}}->{getlog_last} = time(); + } + } +} + +sub check { + my ($self, %options) = @_; + + foreach my $token (keys %{$self->{tokens}}) { + if (time() - $self->{tokens}->{$token}->{created} > $self->{tokens}->{$token}->{timeout}) { + $self->{logger}->writeLogDebug("[listener] delete token '$token': timeout"); + gorgone::standard::library::add_history({ + dbh => $self->{gorgone_core}->{db_gorgone}, + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => '{ "message": "listener token ' . $token . ' timeout reached" }' + }); + delete $self->{tokens}->{$token}; + next; + } + $self->check_getlog_token(token => $token); + } +} + +1; diff --git a/gorgone/gorgone/class/lock.pm b/gorgone/gorgone/class/lock.pm new file mode 100644 index 00000000000..6b84e07423a --- /dev/null +++ b/gorgone/gorgone/class/lock.pm @@ -0,0 +1,167 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::lock; + +use strict; +use warnings; + +sub new { + my ($class, $name, %options) = @_; + my %defaults = (name => $name, pid => $$, timeout => 10); + my $self = {%defaults, %options}; + + bless $self, $class; + return $self; +} + +sub is_set { + die "Not implemented"; +} + +sub set { + my $self = shift; + + for (my $i = 0; $self->is_set() && $i < $self->{timeout}; $i++) { + sleep 1; + } + die "Failed to set lock for $self->{name}" if $self->is_set(); +} + +package gorgone::class::lock::file; + +use base qw(gorgone::class::lock); + +sub new { + my $class = shift; + my $self = $class->SUPER::new(@_); + + if (!defined $self->{storagedir}) { + die "Can't build lock, required arguments not provided"; + } + bless $self, $class; + $self->{pidfile} = "$self->{storagedir}/$self->{name}.lock"; + return $self; +} + +sub is_set { + return -e shift->{pidfile}; +} + +sub set { + my $self = shift; + + $self->SUPER::set(); + open LOCK, ">", $self->{pidfile}; + print LOCK $self->{pid}; + close LOCK; +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{pidfile} && -e $self->{pidfile}) { + unlink $self->{pidfile}; + } +} + +package gorgone::class::lock::sql; + +use base qw(gorgone::class::lock); + +sub new { + my $class = shift; + my $self = $class->SUPER::new(@_); + + if (!defined $self->{dbc}) { + die "Can't build lock, required arguments not provided"; + } + bless $self, $class; + $self->{launch_time} = time(); + return $self; +} + +sub is_set { + my $self = shift; + my ($status, $sth) = $self->{dbc}->query({ + query => "SELECT id,running,pid,time_launch FROM cron_operation WHERE name LIKE '$self->{name}'" + }); + + return 1 if ($status == -1); + my $data = $sth->fetchrow_hashref(); + + if (!defined $data->{id}) { + $self->{not_created_yet} = 1; + $self->{previous_launch_time} = 0; + return 0; + } + $self->{id} = $data->{id}; + $data->{pid} = -1 if (!defined($data->{pid})); + $self->{pid} = $data->{pid}; + $self->{previous_launch_time} = $data->{time_launch}; + if (defined $data->{running} && $data->{running} == 1) { + my $line = `ps -ef | grep -v grep | grep $self->{pid} | grep $self->{name}`; + return 0 if !length $line; + return 1; + } + return 0; +} + +sub set { + my $self = shift; + my $status; + + $self->SUPER::set(); + if (defined $self->{not_created_yet}) { + $status = $self->{dbc}->do(<<"EOQ"); +INSERT INTO cron_operation +(name, system, activate) +VALUES ('$self->{name}', '1', '1') +EOQ + goto error if $status == -1; + $self->{id} = $self->{dbc}->last_insert_id(); + return; + } + $status = $self->{dbc}->do(<<"EOQ"); +UPDATE cron_operation +SET running = '1', time_launch = '$self->{launch_time}', pid = '$self->{pid}' +WHERE id = '$self->{id}' +EOQ + goto error if $status == -1; + return; + + error: + die "Failed to set lock for $self->{name}"; +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{dbc}) { + my $exectime = time() - $self->{launch_time}; + $self->{dbc}->do(<<"EOQ"); +UPDATE cron_operation +SET running = '0', last_execution_time = '$exectime', pid = '-1' +WHERE id = '$self->{id}' +EOQ + } +} + +1; diff --git a/gorgone/gorgone/class/logger.pm b/gorgone/gorgone/class/logger.pm new file mode 100644 index 00000000000..90b13859819 --- /dev/null +++ b/gorgone/gorgone/class/logger.pm @@ -0,0 +1,256 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::logger; + +=head1 NOM + +gorgone::class::logger - Simple logging module + +=head1 SYNOPSIS + + #!/usr/bin/perl -w + + use strict; + use warnings; + + use centreon::polling; + + my $logger = new gorgone::class::logger(); + + $logger->writeLogInfo("information"); + +=head1 DESCRIPTION + +This module offers a simple interface to write log messages to various output: + +* standard output +* file +* syslog + +=cut + +use strict; +use warnings; +use Sys::Syslog qw(:standard :macros); +use IO::Handle; +use Encode; + +my %severities = ( + 1 => LOG_INFO, + 2 => LOG_ERR, + 4 => LOG_DEBUG +); + +sub new { + my $class = shift; + + my $self = bless + { + file => 0, + filehandler => undef, + # 0 = nothing, 1 = critical, 3 = info, 7 = debug + severity => 3, + old_severity => 3, + # 0 = stdout, 1 = file, 2 = syslog + log_mode => 0, + # Output pid of current process + withpid => 0, + # syslog + log_facility => undef, + log_option => LOG_PID, + }, $class; + return $self; +} + +sub file_mode($$) { + my ($self, $file) = @_; + + if (defined($self->{filehandler})) { + $self->{filehandler}->close(); + } + if (open($self->{filehandler}, ">>", $file)){ + $self->{log_mode} = 1; + $self->{filehandler}->autoflush(1); + $self->{file_name} = $file; + return 1; + } + $self->{filehandler} = undef; + print STDERR "Cannot open file $file: $!\n"; + return 0; +} + +sub is_file_mode { + my $self = shift; + + if ($self->{log_mode} == 1) { + return 1; + } + return 0; +} + +sub is_debug { + my $self = shift; + + if (($self->{severity} & 4) == 0) { + return 0; + } + return 1; +} + +sub syslog_mode($$$) { + my ($self, $logopt, $facility) = @_; + + $self->{log_mode} = 2; + openlog($0, $logopt, $facility); + return 1; +} + +# For daemons +sub redirect_output { + my $self = shift; + + if ($self->is_file_mode()) { + open my $lfh, '>>', $self->{file_name}; + open STDOUT, '>&', $lfh; + open STDERR, '>&', $lfh; + } +} + +sub flush_output { + my ($self, %options) = @_; + + $| = 1 if (defined($options{enabled})); +} + +sub force_default_severity { + my ($self, %options) = @_; + + $self->{old_severity} = defined($options{severity}) ? $options{severity} : $self->{severity}; +} + +sub set_default_severity { + my $self = shift; + + $self->{severity} = $self->{old_severity}; +} + +# Getter/Setter Log severity +sub severity { + my $self = shift; + if (@_) { + my $save_severity = $self->{severity}; + if ($_[0] =~ /^[012347]$/) { + $self->{severity} = $_[0]; + } elsif ($_[0] eq 'none') { + $self->{severity} = 0; + } elsif ($_[0] eq 'error') { + $self->{severity} = 1; + } elsif ($_[0] eq 'info') { + $self->{severity} = 3; + } elsif ($_[0] eq 'debug') { + $self->{severity} = 7; + } else { + $self->writeLogError('Wrong severity value set.'); + return -1; + } + $self->{old_severity} = $save_severity; + } + return $self->{severity}; +} + +sub withpid { + my $self = shift; + if (@_) { + $self->{withpid} = $_[0]; + } + return $self->{withpid}; +} + +sub get_date { + my $self = shift; + my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time()); + return sprintf( + '%04d-%02d-%02d %02d:%02d:%02d', + $year+1900, $mon+1, $mday, $hour, $min, $sec + ); +} + +sub writeLog { + my ($self) = shift; + + my $withdate = (defined $_[0]->{withdate}) ? $_[0]->{withdate} : 1; + my $withseverity = (defined $_[0]->{withseverity}) ? $_[0]->{withseverity} : 1; + + if (($self->{severity} & $_[0]->{severity}) == 0) { + return; + } + + if (length($_[0]->{message}) > 20000) { + $_[0]->{message} = substr($_[0]->{message}, 0, 20000) . '...'; + } + if ($self->{log_mode} == 2) { + syslog($severities{$_[0]->{severity}}, $_[0]->{message}); + return; + } + + $_[0]->{message} = (($self->{withpid} == 1) ? "$$ - $_[0]->{message} " : $_[0]->{message}); + $_[0]->{message} = ($withseverity) + ? $_[0]->{severity_str} . " - $_[0]->{message}" : $_[0]->{message}; + $_[0]->{message} = ($withdate) + ? $self->get_date . " - $_[0]->{message}" : $_[0]->{message}; + + chomp($_[0]->{message}); + if ($self->{log_mode} == 0) { + print "$_[0]->{message}\n"; + } elsif ($self->{log_mode} == 1) { + if (defined $self->{filehandler}) { + print { $self->{filehandler} } "$_[0]->{message}\n"; + } + } +} + +sub writeLogDebug { + my ($self) = shift; + + $self->writeLog({ severity => 4, severity_str => 'DEBUG', message => $_[0] }); +} + +sub writeLogInfo { + my ($self) = shift; + + $self->writeLog({ severity => 2, severity_str => 'INFO', message => $_[0] }); +} + +sub writeLogError { + my ($self) = shift; + + $self->writeLog({ severity => 1, severity_str => 'ERROR', message => $_[0] }); +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{filehandler}) { + $self->{filehandler}->close(); + } +} + +1; diff --git a/gorgone/gorgone/class/module.pm b/gorgone/gorgone/class/module.pm new file mode 100644 index 00000000000..f80b525dae1 --- /dev/null +++ b/gorgone/gorgone/class/module.pm @@ -0,0 +1,401 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::module; + +use strict; +use warnings; + +use gorgone::standard::constants qw(:all); +use gorgone::standard::library; +use gorgone::standard::misc; +use gorgone::class::tpapi; +use ZMQ::FFI qw(ZMQ_DONTWAIT); +use JSON::XS; +use Crypt::Mode::CBC; +use Try::Tiny; +use EV; +use MIME::Base64; + +my %handlers = (DIE => {}); + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + { + local $SIG{__DIE__}; + $self->{zmq_context} = ZMQ::FFI->new(); + } + + $self->{internal_socket} = undef; + $self->{module_id} = $options{module_id}; + $self->{container_id} = $options{container_id}; + $self->{container} = ''; + $self->{container} = ' container ' . $self->{container_id} . ':' if (defined($self->{container_id})); + + $self->{core_id} = $options{core_id}; + $self->{logger} = $options{logger}; + $self->{config} = $options{config}; + $self->{exit_timeout} = (defined($options{config}->{exit_timeout}) && $options{config}->{exit_timeout} =~ /(\d+)/) ? $1 : 30; + $self->{config_core} = $options{config_core}; + $self->{config_db_centreon} = $options{config_db_centreon}; + $self->{config_db_centstorage} = $options{config_db_centstorage}; + $self->{stop} = 0; + $self->{fork} = 0; + + $self->{loop} = new EV::Loop(); + + $self->{internal_crypt} = { enabled => 0 }; + if ($self->get_core_config(name => 'internal_com_crypt') == 1) { + $self->{cipher} = Crypt::Mode::CBC->new( + $self->get_core_config(name => 'internal_com_cipher'), + $self->get_core_config(name => 'internal_com_padding') + ); + + $self->{internal_crypt} = { + enabled => 1, + rotation => $self->get_core_config(name => 'internal_com_rotation'), + cipher => $self->get_core_config(name => 'internal_com_cipher'), + padding => $self->get_core_config(name => 'internal_com_padding'), + iv => $self->get_core_config(name => 'internal_com_iv'), + core_keys => [$self->get_core_config(name => 'internal_com_core_key'), $self->get_core_config(name => 'internal_com_core_oldkey')], + identity_keys => $self->get_core_config(name => 'internal_com_identity_keys') + }; + } + + $self->{tpapi} = gorgone::class::tpapi->new(); + $self->{tpapi}->load_configuration(configuration => $options{config_core}->{tpapi}); + + $SIG{__DIE__} = \&class_handle_DIE; + $handlers{DIE}->{$self} = sub { $self->handle_DIE($_[0]) }; + + return $self; +} + +sub class_handle_DIE { + my ($msg) = @_; + + foreach (keys %{$handlers{DIE}}) { + &{$handlers{DIE}->{$_}}($msg); + } +} + +sub handle_DIE { + my ($self, $msg) = @_; + + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} Receiving DIE: $msg"); +} + +sub generate_token { + my ($self, %options) = @_; + + return gorgone::standard::library::generate_token(length => $options{length}); +} + +sub set_fork { + my ($self, %options) = @_; + + $self->{fork} = 1; +} + +sub event { + my ($self, %options) = @_; + + my $socket = defined($options{socket}) ? $options{socket} : $self->{internal_socket}; + while ($socket->has_pollin()) { + my ($message) = $self->read_message(); + next if (!defined($message)); + + $self->{logger}->writeLogDebug("[$self->{module_id}]$self->{container} Event: $message"); + if ($message =~ /^\[(.*?)\]/) { + if ((my $method = $self->can('action_' . lc($1)))) { + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + my ($action, $token) = ($1, $2); + my ($rv, $data) = $self->json_decode(argument => $3, token => $token); + next if ($rv); + + $method->($self, token => $token, data => $data); + } + } + } +} + +sub get_core_config { + my ($self, %options) = @_; + + return $self->{config_core}->{gorgonecore} if (!defined($options{name})); + + return $self->{config_core}->{gorgonecore}->{ $options{name} }; +} + +sub read_message { + my ($self, %options) = @_; + + my ($rv, $message) = gorgone::standard::library::zmq_dealer_read_message( + socket => defined($options{socket}) ? $options{socket} : $self->{internal_socket}, + frame => $options{frame} + ); + return (undef, 1) if ($rv); + if ($self->{internal_crypt}->{enabled} == 0) { + if (defined($options{frame})) { + return (undef, 0); + } + return ($message, 0); + } + + foreach my $key (@{$self->{internal_crypt}->{core_keys}}) { + next if (!defined($key)); + + if (defined($options{frame})) { + if ($options{frame}->decrypt({ cipher => $self->{cipher}, key => $key, iv => $self->{internal_crypt}->{iv} }) == 0) { + return (undef, 0); + } + } else { + my $plaintext; + try { + $plaintext = $self->{cipher}->decrypt(MIME::Base64::decode_base64($message), $key, $self->{internal_crypt}->{iv}); + }; + if (defined($plaintext) && $plaintext =~ /^\[[A-Za-z_\-]+?\]/) { + $message = undef; + return ($plaintext, 0); + } + } + } + + if (defined($options{frame})) { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} decrypt issue: " . $options{frame}->getLastError()); + } else { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} decrypt issue: " . ($_ ? $_ : 'no message')); + } + return (undef, 1); +} + +sub renew_internal_key { + my ($self, %options) = @_; + + my $message = gorgone::standard::library::build_protocol( + action => 'SETMODULEKEY', + data => { key => unpack('H*', $options{key}) }, + json_encode => 1 + ); + try { + $message = $self->{cipher}->encrypt($message, $options{encrypt_key}, $self->{internal_crypt}->{iv}); + } catch { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} encrypt issue: $_"); + return -1; + }; + + return (0, $message); +} + +sub send_internal_action { + my ($self, $options) = (shift, shift); + + if (!defined($options->{message})) { + $options->{message} = gorgone::standard::library::build_protocol( + token => $options->{token}, + action => $options->{action}, + target => $options->{target}, + data => $options->{data}, + json_encode => defined($options->{data_noencode}) ? undef : 1 + ); + } + $self->{logger}->writeLogDebug("[$self->{module_id}]$self->{container} internal message: $options->{message}"); + + my $socket = defined($options->{socket}) ? $options->{socket} : $self->{internal_socket}; + my $message_key; + if ($self->{internal_crypt}->{enabled} == 1) { + my $identity = gorgone::standard::library::zmq_get_routing_id(socket => $socket); + + my $key = $self->{internal_crypt}->{core_keys}->[0]; + if ($self->{fork} == 0) { + if (!defined($self->{internal_crypt}->{identity_keys}->{$identity}) || + (time() - $self->{internal_crypt}->{identity_keys}->{$identity}->{ctime}) > ($self->{internal_crypt}->{rotation})) { + my ($rv, $genkey) = gorgone::standard::library::generate_symkey( + keysize => $self->get_core_config(name => 'internal_com_keysize') + ); + + ($rv, $message_key) = $self->renew_internal_key( + key => $genkey, + encrypt_key => defined($self->{internal_crypt}->{identity_keys}->{$identity}) ? + $self->{internal_crypt}->{identity_keys}->{$identity}->{key} : $self->{internal_crypt}->{core_keys}->[0] + ); + return undef if ($rv == -1); + + $self->{internal_crypt}->{identity_keys}->{$identity} = { + key => $genkey, + ctime => time() + }; + } + + $key = $self->{internal_crypt}->{identity_keys}->{$identity}->{key}; + } + + try { + $options->{message} = $self->{cipher}->encrypt($options->{message}, $key, $self->{internal_crypt}->{iv}); + } catch { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} encrypt issue: $_"); + return undef; + }; + + $options->{message} = MIME::Base64::encode_base64($options->{message}, ''); + } + + $socket->send(MIME::Base64::encode_base64($message_key, ''), ZMQ_DONTWAIT) if (defined($message_key)); + $socket->send($options->{message}, ZMQ_DONTWAIT); + if ($socket->has_error) { + $self->{logger}->writeLogError( + "[$self->{module_id}]$self->{container} Cannot send message: " . $socket->last_strerror + ); + } + $self->event(socket => $socket); +} + +sub send_log_msg_error { + my ($self, %options) = @_; + + return if (!defined($options{token})); + + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} -$options{subname}- $options{number} $options{message}"); + $self->send_internal_action({ + socket => (defined($options{socket})) ? $options{socket} : $self->{internal_socket}, + action => 'PUTLOG', + token => $options{token}, + data => { code => GORGONE_ACTION_FINISH_KO, etime => time(), instant => $options{instant}, token => $options{token}, data => { message => $options{message} } }, + json_encode => 1 + }); +} + +sub send_log { + my ($self, %options) = @_; + + return if (!defined($options{token})); + + return if (defined($options{logging}) && $options{logging} =~ /^(?:false|0)$/); + + $self->send_internal_action({ + socket => (defined($options{socket})) ? $options{socket} : $self->{internal_socket}, + action => 'PUTLOG', + token => $options{token}, + data => { code => $options{code}, etime => time(), instant => $options{instant}, token => $options{token}, data => $options{data} }, + json_encode => 1 + }); +} + +sub json_encode { + my ($self, %options) = @_; + + my $encoded_arguments; + try { + $encoded_arguments = JSON::XS->new->encode($options{argument}); + } catch { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} $options{method} - cannot encode json: $_"); + return 1; + }; + + return (0, $encoded_arguments); +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded_arguments; + try { + $decoded_arguments = JSON::XS->new->decode($options{argument}); + } catch { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} $options{method} - cannot decode json: $_"); + if (defined($options{token})) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cannot decode json' } + ); + } + return 1; + }; + + return (0, $decoded_arguments); +} + +sub execute_shell_cmd { + my ($self, %options) = @_; + + my $timeout = defined($options{timeout}) && $options{timeout} =~ /(\d+)/ ? $1 : 30; + my ($lerror, $stdout, $exit_code) = gorgone::standard::misc::backtick( + command => $options{cmd}, + logger => $self->{logger}, + timeout => $timeout, + wait_exit => 1, + ); + if ($lerror == -1 || ($exit_code >> 8) != 0) { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} command execution issue $options{cmd} : " . $stdout); + return -1; + } + + return 0; +} + +sub change_macros { + my ($self, %options) = @_; + + $options{template} =~ s/%\{(.*?)\}/$options{macros}->{$1}/g; + if (defined($options{escape})) { + $options{template} =~ s/([\Q$options{escape}\E])/\\$1/g; + } + return $options{template}; +} + +sub action_bcastlogger { + my ($self, %options) = @_; + + my $data = $options{data}; + if (defined($options{frame})) { + $data = $options{frame}->decodeData(); + } + + if (defined($data->{content}->{severity}) && $data->{content}->{severity} ne '') { + if ($data->{content}->{severity} eq 'default') { + $self->{logger}->set_default_severity(); + } else { + $self->{logger}->severity($data->{content}->{severity}); + } + } +} + +sub action_bcastcorekey { + my ($self, %options) = @_; + + return if ($self->{internal_crypt}->{enabled} == 0); + + my $data = $options{data}; + if (defined($options{frame})) { + $data = $options{frame}->decodeData(); + } + + if (defined($data->{key})) { + $self->{logger}->writeLogDebug("[$self->{module_id}]$self->{container} core key changed"); + $self->{internal_crypt}->{core_keys}->[1] = $self->{internal_crypt}->{core_keys}->[0]; + $self->{internal_crypt}->{core_keys}->[0] = pack('H*', $data->{key}); + } +} + +1; diff --git a/gorgone/gorgone/class/script.pm b/gorgone/gorgone/class/script.pm new file mode 100644 index 00000000000..a5891101799 --- /dev/null +++ b/gorgone/gorgone/class/script.pm @@ -0,0 +1,264 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::script; + +use strict; +use warnings; +use FindBin; +use Getopt::Long; +use Pod::Usage; +use gorgone::class::logger; +use gorgone::class::db; +use gorgone::class::lock; +use YAML::XS; +use Hash::Merge; +Hash::Merge::set_behavior('RIGHT_PRECEDENT'); +$YAML::XS::Boolean = 'JSON::PP'; +$YAML::XS::LoadBlessed = 1; + +$SIG{__DIE__} = sub { + my $error = shift; + print "Error: $error"; + exit 1; +}; + +sub new { + my ($class, $name, %options) = @_; + my %defaults = ( + log_file => undef, + centreon_db_conn => 0, + centstorage_db_conn => 0, + severity => 'info', + noroot => 0 + ); + my $self = {%defaults, %options}; + + bless $self, $class; + $self->{name} = $name; + $self->{logger} = gorgone::class::logger->new(); + $self->{options} = { + 'config=s' => \$self->{config_file}, + 'logfile=s' => \$self->{log_file}, + 'severity=s' => \$self->{severity}, + 'flushoutput' => \$self->{flushoutput}, + 'help|?' => \$self->{help}, + 'version' => \$self->{version} + }; + return $self; +} + +sub init { + my $self = shift; + + if (defined $self->{log_file}) { + $self->{logger}->file_mode($self->{log_file}); + } + $self->{logger}->flush_output(enabled => $self->{flushoutput}); + $self->{logger}->severity($self->{severity}); + $self->{logger}->force_default_severity(); + + if ($self->{noroot} == 1) { + # Stop exec if root + if ($< == 0) { + $self->{logger}->writeLogError("Can't execute script as root."); + die('Quit'); + } + } + + if ($self->{centreon_db_conn}) { + $self->{cdb} = gorgone::class::db->new( + db => $self->{centreon_config}->{centreon_db}, + host => $self->{centreon_config}->{db_host}, + user => $self->{centreon_config}->{db_user}, + password => $self->{centreon_config}->{db_passwd}, + logger => $self->{logger} + ); + $self->{lock} = gorgone::class::lock::sql->new($self->{name}, dbc => $self->{cdb}); + $self->{lock}->set(); + } + if ($self->{centstorage_db_conn}) { + $self->{csdb} = gorgone::class::db->new( + db => $self->{centreon_config}->{centstorage_db}, + host => $self->{centreon_config}->{db_host}, + user => $self->{centreon_config}->{db_user}, + password => $self->{centreon_config}->{db_passwd}, + logger => $self->{logger} + ); + } +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{cdb}) { + $self->{cdb}->disconnect(); + } + if (defined $self->{csdb}) { + $self->{csdb}->disconnect(); + } +} + +sub add_options { + my ($self, %options) = @_; + + $self->{options} = {%{$self->{options}}, %options}; +} + +sub parse_options { + my $self = shift; + + Getopt::Long::Configure('bundling'); + die "Command line error" if (!GetOptions(%{$self->{options}})); + pod2usage(-exitval => 1, -input => $FindBin::Bin . "/" . $FindBin::Script) if ($self->{help}); + if ($self->{version}) { + print "version: " . $self->get_version() . "\n"; + exit(0); + } +} + +sub run { + my $self = shift; + + $self->parse_options(); + $self->init(); +} + +sub yaml_get_include { + my ($self, %options) = @_; + + my @all_files = (); + my @dirs = split(/,/, $options{include}); + foreach my $dir (@dirs) { + next if ($dir eq ''); + my $dirname = File::Basename::dirname($dir); + $dirname = $options{current_dir} . '/' . $dirname if ($dirname !~ /^\//); + my $match_files = File::Basename::basename($dir); + $match_files =~ s/\*/\\E.*\\Q/g; + $match_files = '\Q' . $match_files . '\E'; + + my @sorted_files = (); + my $DIR; + if (!opendir($DIR, $dirname)) { + $self->{logger}->writeLogError("config - cannot opendir '$dirname' error: $!"); + return (); + } + + while (readdir($DIR)) { + if (-f "$dirname/$_" && eval "/^$match_files\$/") { + push @sorted_files, "$dirname/$_"; + } + } + closedir($DIR); + @sorted_files = sort { $a cmp $b } @sorted_files; + push @all_files, @sorted_files; + } + + return @all_files; +} + +sub yaml_parse_config { + my ($self, %options) = @_; + + if (ref(${$options{config}}) eq 'HASH') { + foreach (keys %{${$options{config}}}) { + my $ariane = $options{ariane} . $_ . '##'; + if (defined($options{filter}) && eval "$options{filter}") { + delete ${$options{config}}->{$_}; + next; + } + $self->yaml_parse_config( + config => \${$options{config}}->{$_}, + current_dir => $options{current_dir}, + filter => $options{filter}, + ariane => $ariane + ); + } + } elsif (ref(${$options{config}}) eq 'ARRAY') { + my $size = @{${$options{config}}}; + my $ariane = $options{ariane} . 'ARRAY##'; + for (my $i = 0; $i < $size; $i++) { + if (defined($options{filter}) && eval "$options{filter}") { + ${$options{config}} = undef; + last; + } + $self->yaml_parse_config( + config => \${$options{config}}->[$i], + current_dir => $options{current_dir}, + filter => $options{filter}, + ariane => $ariane + ); + } + } elsif (ref(${$options{config}}) eq 'include') { + my @files = $self->yaml_get_include( + include => ${${$options{config}}}, + current_dir => $options{current_dir}, + filter => $options{filter} + ); + ${$options{config}} = undef; + foreach (@files) { + if (! -r $_) { + $self->{logger}->writeLogError("config - cannot read file '$_'"); + next; + } + my $config = $self->yaml_load_config(file => $_, filter => $options{filter}, ariane => $options{ariane}); + next if (!defined($config)); + if (ref($config) eq 'ARRAY') { + ${$options{config}} = [] if (ref(${$options{config}}) ne 'ARRAY'); + push @{${$options{config}}}, @$config; + } elsif (ref($config) eq 'HASH') { + ${$options{config}} = {} if (ref(${$options{config}}) ne 'HASH'); + ${$options{config}} = Hash::Merge::merge(${$options{config}}, $config); + } else { + ${$options{config}} = $config; + } + } + } elsif (ref(${$options{config}}) eq 'JSON::PP::Boolean') { + if (${${$options{config}}}) { + ${$options{config}} = 'true'; + } else { + ${$options{config}} = 'false'; + } + } +} + +sub yaml_load_config { + my ($self, %options) = @_; + + my $config; + eval { + $config = YAML::XS::LoadFile($options{file}); + }; + if ($@) { + $self->{logger}->writeLogError("config - yaml load file '$options{file}' error: $@"); + return undef; + } + + my $current_dir = File::Basename::dirname($options{file}); + $self->yaml_parse_config( + config => \$config, + current_dir => $current_dir, + filter => $options{filter}, + ariane => defined($options{ariane}) ? $options{ariane} : '' + ); + return $config; +} + +1; diff --git a/gorgone/gorgone/class/sqlquery.pm b/gorgone/gorgone/class/sqlquery.pm new file mode 100644 index 00000000000..e9a84675913 --- /dev/null +++ b/gorgone/gorgone/class/sqlquery.pm @@ -0,0 +1,155 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::sqlquery; + +use strict; +use warnings; + +sub new { + my ($class, %options) = @_; + my $self = {}; + $self->{logger} = $options{logger}; + $self->{db_centreon} = $options{db_centreon}; + + bless $self, $class; + return $self; +} + +sub builder { + my ($self, %options) = @_; + + my $where = defined($options{where}) ? ' WHERE ' . $options{where} : ''; + my $extra_suffix = defined($options{extra_suffix}) ? $options{extra_suffix} : ''; + my $request = $options{request} . " " . join(', ', @{$options{fields}}) . + ' FROM ' . join(', ', @{$options{tables}}) . $where . $extra_suffix; + return $request; +} + +sub do { + my ($self, %options) = @_; + my $mode = defined($options{mode}) ? $options{mode} : 0; + + my ($status, $sth) = $self->{db_centreon}->query({ query => $options{request}, bind_values => $options{bind_values} }); + if ($status == -1) { + return (-1, undef); + } + if ($mode == 0) { + return ($status, $sth); + } elsif ($mode == 1) { + my $result = $sth->fetchall_hashref($options{keys}); + if (!defined($result)) { + $self->{logger}->writeLogError("[core] Cannot fetch database data: " . $sth->errstr . " [request = $options{request}]"); + return (-1, undef); + } + return ($status, $result); + } + my $result = $sth->fetchall_arrayref(); + if (!defined($result)) { + $self->{logger}->writeLogError("[core] Cannot fetch database data: " . $sth->errstr . " [request = $options{request}]"); + return (-1, undef); + } + return ($status, $result); +} + +sub custom_execute { + my ($self, %options) = @_; + + return $self->do(%options); +} + +sub execute { + my ($self, %options) = @_; + + my $request = $self->builder(%options); + return $self->do(request => $request, %options); +} + +sub transaction_query_multi { + my ($self, %options) = @_; + + my ($status, $sth); + + $status = $self->transaction_mode(1); + return -1 if ($status == -1); + + ($status, $sth) = $self->{db_centreon}->query({ query => $options{request}, prepare_only => 1 }); + if ($status == -1) { + $self->rollback(); + return -1; + } + + if (defined($options{bind_values}) && scalar(@{$options{bind_values}}) > 0) { + $sth->execute(@{$options{bind_values}}); + } else { + $sth->execute(); + } + do { + if ($sth->err) { + $self->rollback(); + $self->{db_centreon}->error($sth->errstr, $options{request}); + return -1; + } + } while ($sth->more_results); + + $status = $self->commit(); + return -1 if ($status == -1); + + return 0; +} + +sub transaction_query { + my ($self, %options) = @_; + my $status; + + $status = $self->transaction_mode(1); + return -1 if ($status == -1); + + ($status) = $self->do(request => $options{request}); + if ($status == -1) { + $self->rollback(); + return -1; + } + + $status = $self->commit(); + return -1 if ($status == -1); + + return 0; +} + +sub transaction_mode { + my ($self) = @_; + + return $self->{db_centreon}->transaction_mode($_[1]); +}; + +sub commit { + my ($self, %options) = @_; + + return $self->{db_centreon}->commit(); +} + +sub rollback { + my ($self, %options) = @_; + + return $self->{db_centreon}->rollback(); +} + +1; diff --git a/gorgone/gorgone/class/tpapi.pm b/gorgone/gorgone/class/tpapi.pm new file mode 100644 index 00000000000..27b6697e848 --- /dev/null +++ b/gorgone/gorgone/class/tpapi.pm @@ -0,0 +1,55 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::tpapi; + +use strict; +use warnings; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{configs} = {}; + + return $self; +} + +sub get_configuration { + my ($self, %options) = @_; + + return $self->{configs}->{ $options{name} }; +} + +sub load_configuration { + my ($self, %options) = @_; + + $self->{configs} = {}; + return if (!defined($options{configuration})); + + foreach my $config (@{$options{configuration}}) { + next if (!defined($config->{name})); + + $self->{configs}->{ $config->{name} } = $config; + } +} + +1; diff --git a/gorgone/gorgone/class/tpapi/centreonv2.pm b/gorgone/gorgone/class/tpapi/centreonv2.pm new file mode 100644 index 00000000000..9bd5d84a240 --- /dev/null +++ b/gorgone/gorgone/class/tpapi/centreonv2.pm @@ -0,0 +1,286 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::tpapi::centreonv2; + +use strict; +use warnings; +use gorgone::class::http::http; +use JSON::XS; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{is_error} = 1; + $self->{error} = 'configuration missing'; + $self->{is_logged} = 0; + + return $self; +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{is_error} = 1; + $self->{error} = "cannot decode json response: $@"; + return undef; + } + + return $decoded; +} + +sub error { + my ($self, %options) = @_; + + return $self->{error}; +} + +sub set_configuration { + my ($self, %options) = @_; + + if (!defined($options{config})) { + return 1; + } + + foreach (('base_url', 'username', 'password')) { + if (!defined($options{config}->{$_}) || + $options{config}->{$_} eq '') { + $self->{error} = $_ . ' configuration missing'; + return 1; + } + + $self->{$_} = $options{config}->{$_}; + } + + $self->{base_url} =~ s/\/$//; + + $self->{http_backend} = defined($options{config}->{backend}) ? $options{config}->{backend} : 'curl'; + + $self->{curl_opts} = ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL']; + my $curl_opts = []; + if (defined($options{config}->{curlopts})) { + foreach (keys %{$options{config}->{curlopts}}) { + push @{$curl_opts}, $_ . ' => ' . $options{config}->{curlopts}->{$_}; + } + } + if (scalar(@$curl_opts) > 0) { + $self->{curl_opts} = $curl_opts; + } + + $self->{http} = gorgone::class::http::http->new(logger => $options{logger}); + $self->{is_error} = 0; + return 0; +} + +sub authenticate { + my ($self, %options) = @_; + + my $json_request = { + security => { + credentials => { + login => $self->{username}, + password => $self->{password} + } + } + }; + my $encoded; + eval { + $encoded = encode_json($json_request); + }; + if ($@) { + $self->{is_error} = 1; + $self->{error} = "cannot encode json request: $@"; + return undef; + } + + my ($code, $content) = $self->{http}->request( + http_backend => $self->{http_backend}, + method => 'POST', + hostname => '', + full_url => $self->{base_url} . '/login', + query_form_post => $encoded, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => $self->{curl_opts}, + warning_status => '', + unknown_status => '', + critical_status => '' + ); + if ($code) { + $self->{is_error} = 1; + $self->{error} = 'http request error'; + return undef; + } + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{is_error} = 1; + $self->{error} = "Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"; + return undef; + } + + my $decoded = $self->json_decode(content => $content); + return if (!defined($decoded)); + + my $token = defined($decoded->{security}->{token}) ? $decoded->{security}->{token} : undef; + if (!defined($token)) { + $self->{is_error} = 1; + $self->{error} = 'authenticate issue - cannot get token'; + return undef; + } + + $self->{token} = $token; + $self->{is_logged} = 1; +} + +sub request { + my ($self, %options) = @_; + + if (!defined($self->{base_url})) { + $self->{is_error} = 1; + $self->{error} = 'configuration missing'; + return 1; + } + + $self->{is_error} = 0; + if ($self->{is_logged} == 0) { + $self->authenticate(); + } + + return 1 if ($self->{is_logged} == 0); + + # TODO: manage it properly + my $get_param = ['page=1', 'limit=10000']; + if (defined($options{get_param})) { + push @$get_param, @{$options{get_param}}; + } + + my ($code, $content) = $self->{http}->request( + http_backend => $self->{http_backend}, + method => $options{method}, + hostname => '', + full_url => $self->{base_url} . $options{endpoint}, + query_form_post => $options{query_form_post}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + 'X-AUTH-TOKEN: ' . $self->{token} + ], + curl_opt => $self->{curl_opts}, + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + my $decoded = $self->json_decode(content => $content); + + # code 403 means forbidden (token not good maybe) + if ($self->{http}->get_code() == 403) { + $self->{token} = undef; + $self->{is_logged} = 0; + $self->{is_error} = 1; + $self->{error} = 'token forbidden'; + $self->{error} = $decoded->{message} if (defined($decoded) && defined($decoded->{message})); + return 1; + } + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{is_error} = 1; + my $message = $self->{http}->get_message(); + $message = $decoded->{message} if (defined($decoded) && defined($decoded->{message})); + $self->{error} = "request error [code: '" . $self->{http}->get_code() . "'] [message: '" . $message . "']"; + return 1; + } + + return 1 if (!defined($decoded)); + + return (0, $decoded); +} + +sub get_token { + my ($self, %options) = @_; + + return $self->{token}; +} + +sub get_monitoring_hosts { + my ($self, %options) = @_; + + my $endpoint = '/monitoring/hosts'; + $endpoint .= '/' . $options{host_id} if (defined($options{host_id})); + + my $get_param; + if (defined($options{search})) { + $get_param = ['search=' . $options{search}]; + } + + return $self->request( + method => 'GET', + endpoint => $endpoint, + get_param => $get_param + ); +} + + +sub get_platform_versions { + my ($self, %options) = @_; + + return $self->request( + method => 'GET', + endpoint => '/platform/versions' + ); +} + +sub get_scheduling_jobs { + my ($self, %options) = @_; + + my $get_param; + if (defined($options{search})) { + $get_param = ['search=' . $options{search}]; + } + + my $endpoint = '/auto-discovery/scheduling/jobs'; + return $self->request( + method => 'GET', + endpoint => $endpoint, + get_param => $get_param + ); +} + +sub DESTROY { + my ($self) = @_; + + if ($self->{is_logged} == 1) { + $self->request( + method => 'GET', + endpoint => '/logout' + ); + } +} + +1; diff --git a/gorgone/gorgone/class/tpapi/clapi.pm b/gorgone/gorgone/class/tpapi/clapi.pm new file mode 100644 index 00000000000..d7c1810be8e --- /dev/null +++ b/gorgone/gorgone/class/tpapi/clapi.pm @@ -0,0 +1,104 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::tpapi::clapi; + +use strict; +use warnings; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{is_error} = 1; + $self->{error} = 'configuration missing'; + $self->{username} = undef; + $self->{password} = undef; + + return $self; +} + +sub error { + my ($self, %options) = @_; + + return $self->{error}; +} + +sub get_username { + my ($self, %options) = @_; + + if ($self->{is_error} == 1) { + return undef; + } + + return $self->{username}; +} + +sub get_password { + my ($self, %options) = @_; + + if ($self->{is_error} == 1) { + return undef; + } + + if (defined($options{protected}) && $options{protected} == 1) { + my $password = $self->{password}; + $password =~ s/\$/\\\$/g; + $password =~ s/"/\\"/g; + return $password; + } + + return $self->{password}; +} + +sub set_configuration { + my ($self, %options) = @_; + + if (!defined($options{config}) || + !defined($options{config}->{username}) || + $options{config}->{username} eq '') { + $self->{error} = 'username configuration missing'; + return 1; + } + + if (!defined($options{config}->{password}) || + $options{config}->{password} eq '') { + $self->{error} = 'password configuration missing'; + return 1; + } + + $self->{is_error} = 0; + $self->{username} = $options{config}->{username}; + $self->{password} = $options{config}->{password}; + return 0; +} + +sub get_applycfg_command { + my ($self, %options) = @_; + + if ($self->{is_error} == 1) { + return undef; + } + + return 'centreon -u "' . $self->{username} . '" -p "' . $self->get_password(protected => 1) . '" -a APPLYCFG -v ' . $options{poller_id}; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/anomalydetection/class.pm b/gorgone/gorgone/modules/centreon/anomalydetection/class.pm new file mode 100644 index 00000000000..c1ea2649b16 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/anomalydetection/class.pm @@ -0,0 +1,681 @@ +# +# Copyright 2020 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::anomalydetection::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use gorgone::class::http::http; +use JSON::XS; +use IO::Compress::Bzip2; +use MIME::Base64; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{resync_time} = (defined($options{config}->{resync_time}) && $options{config}->{resync_time} =~ /(\d+)/) ? $1 : 600; + $connector->{thresholds_sync_time} = (defined($options{config}->{thresholds_sync_time}) && $options{config}->{thresholds_sync_time} =~ /(\d+)/) ? $1 : 28800; + $connector->{last_resync_time} = -1; + $connector->{saas_token} = undef; + $connector->{saas_url} = undef; + $connector->{proxy_url} = undef; # format http://[username:password@]server:port + $connector->{centreon_metrics} = {}; + $connector->{unregister_metrics_centreon} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[anomalydetection] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub http_check_error { + my ($self, %options) = @_; + + if ($options{status} == 1) { + $self->{logger}->writeLogError("[anomalydetection] -class- $options{endpoint} issue"); + return 1; + } + + my $code = $self->{http}->get_code(); + if ($code !~ /$options{http_code_continue}/) { + $self->{logger}->writeLogError("[anomalydetection] -class- $options{endpoint} issue - " . $self->{http}->get_message()); + return 1; + } + + return 0; +} + +sub get_localhost_poller { + my ($self, %options) = @_; + + my $instance; + foreach (keys %{$self->{pollers}}) { + if ($self->{pollers}->{$_}->{localhost} == 1) { + $instance = $_; + last; + } + } + + return $instance; +} + +sub get_poller { + my ($self, %options) = @_; + + return $self->{pollers}->{$options{instance}}; +} + +sub write_file { + my ($self, %options) = @_; + + my $fh; + if (!open($fh, '>', $options{file})) { + $self->{logger}->writeLogError("[anomalydetection] -class- cannot open file '" . $options{file} . "': $!"); + return 1; + } + print $fh $options{content}; + close($fh); + return 0; +} + +sub saas_api_request { + my ($self, %options) = @_; + + my ($status, $payload); + if (defined($options{payload})) { + ($status, $payload) = $self->json_encode(argument => $options{payload}); + return 1 if ($status == 1); + } + my $accept = defined $options{accept} ? $options{accept} : '*/*'; + + ($status, my $response) = $self->{http}->request( + method => $options{method}, hostname => '', + full_url => $self->{saas_url} . $options{endpoint}, + query_form_post => $payload, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + 'Accept: ' . $accept, + 'x-api-key: ' . $self->{saas_token} + ], + proxyurl => $self->{proxy_url}, + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0'] + ); + return 1 if ($self->http_check_error(status => $status, endpoint => $options{endpoint}, http_code_continue => $options{http_code_continue}) == 1); + + ($status, my $result) = $self->json_decode(argument => $response); + return 1 if ($status == 1); + + return (0, $result); +} + +sub connection_informations { + my ($self, %options) = @_; + + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => "select `key`, `value` from options WHERE `key` IN ('saas_url', 'saas_token', 'proxy_url', 'proxy_port', 'proxy_user', 'proxy_password')", + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] -class- cannot get connection informations'); + return 1; + } + + $self->{$_->[0]} = $_->[1] foreach (@$datas); + + if (!defined($self->{saas_url}) || $self->{saas_url} eq '') { + $self->{logger}->writeLogInfo('[anomalydetection] -class- database: saas_url is not defined'); + return 1; + } + $self->{saas_url} =~ s/\/$//g; + + if (!defined($self->{saas_token}) || $self->{saas_token} eq '') { + $self->{logger}->writeLogInfo('[anomalydetection] -class- database: saas_token is not defined'); + return 1; + } + + if (defined($self->{proxy_url})) { + if ($self->{proxy_url} eq '') { + $self->{proxy_url} = undef; + return 0; + } + + $self->{proxy_url} = $self->{proxy_user} . ':' . $self->{proxy_password} . '@' . $self->{proxy_url} + if (defined($self->{proxy_user}) && $self->{proxy_user} ne '' && + defined($self->{proxy_password}) && $self->{proxy_password} ne ''); + $self->{proxy_url} = $self->{proxy_url} . ':' . $self->{proxy_port} + if (defined($self->{proxy_port}) && $self->{proxy_port} =~ /(\d+)/); + $self->{proxy_url} = 'http://' . $self->{proxy_url}; + } + + return 0; +} + +sub get_centreon_anomaly_metrics { + my ($self, %options) = @_; + + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => + 'SELECT nagios_server_id, cfg_dir, centreonbroker_cfg_path, localhost, ' . + 'engine_start_command, engine_stop_command, engine_restart_command, engine_reload_command, ' . + 'broker_reload_command ' . + 'FROM cfg_nagios ' . + 'JOIN nagios_server ' . + 'WHERE id = nagios_server_id', + mode => 1, + keys => 'nagios_server_id' + ); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] cannot get configuration for pollers'); + return 1; + } + $self->{pollers} = $datas; + + ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => ' + SELECT mas.*, hsr.host_host_id as host_id, nhr.nagios_server_id as instance_id + FROM mod_anomaly_service mas + LEFT JOIN (host_service_relation hsr, ns_host_relation nhr) ON + (mas.service_id = hsr.service_service_id AND hsr.host_host_id = nhr.host_host_id) + ', + keys => 'id', + mode => 1 + ); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] -class- database: cannot get metrics from centreon'); + return 1; + } + + $self->{centreon_metrics} = $datas; + + return 0; +} + +sub save_centreon_previous_register { + my ($self, %options) = @_; + + my ($query, $query_append) = ('', ''); + my @bind_values = (); + foreach (keys %{$self->{unregister_metrics_centreon}}) { + $query .= $query_append . + 'UPDATE mod_anomaly_service SET' . + ' saas_model_id = ?,' . + ' saas_metric_id = ?,' . + ' saas_creation_date = ?, ' . + ' saas_update_date = ?' . + ' WHERE `id` = ?'; + $query_append = ';'; + push @bind_values, $self->{unregister_metrics_centreon}->{$_}->{saas_model_id}, $self->{unregister_metrics_centreon}->{$_}->{saas_metric_id}, + $self->{unregister_metrics_centreon}->{$_}->{creation_date}, $self->{unregister_metrics_centreon}->{$_}->{creation_date}, $_; + } + + if ($query ne '') { + my $status = $self->{class_object_centreon}->transaction_query_multi(request => $query, bind_values => \@bind_values); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] -class- database: cannot save centreon previous register'); + return 1; + } + + foreach (keys %{$self->{unregister_metrics_centreon}}) { + $self->{centreon_metrics}->{$_}->{saas_creation_date} = $self->{unregister_metrics_centreon}->{$_}->{creation_date}; + $self->{centreon_metrics}->{$_}->{saas_update_date} = $self->{unregister_metrics_centreon}->{$_}->{creation_date}; + $self->{centreon_metrics}->{$_}->{saas_model_id} = $self->{unregister_metrics_centreon}->{$_}->{saas_model_id}; + $self->{centreon_metrics}->{$_}->{saas_metric_id} = $self->{unregister_metrics_centreon}->{$_}->{saas_metric_id}; + } + } + + $self->{unregister_metrics_centreon} = {}; + return 0; +} + +sub saas_register_metrics { + my ($self, %options) = @_; + + my $register_centreon_metrics = {}; + my ($query, $query_append) = ('', ''); + my @bind_values = (); + + $self->{generate_metrics_lua} = 0; + foreach (keys %{$self->{centreon_metrics}}) { + # saas_creation_date is set when we need to register it + next if (defined($self->{centreon_metrics}->{$_}->{saas_creation_date})); + next if ($self->{centreon_metrics}->{$_}->{saas_to_delete} == 1); + + my $payload = { + metrics => [ + { + name => $self->{centreon_metrics}->{$_}->{metric_name}, + labels => { + host_id => "" . $self->{centreon_metrics}->{$_}->{host_id}, + service_id => "" . $self->{centreon_metrics}->{$_}->{service_id} + }, + preprocessingOptions => { + bucketize => { + bucketizeFunction => 'mean', + period => 300 + } + } + } + ], + algorithm => { + type => $self->{centreon_metrics}->{$_}->{ml_model_name}, + options => { + period => '30d' + } + } + }; + + my ($status, $result) = $self->saas_api_request( + endpoint => '/machinelearning', + method => 'POST', + payload => $payload, + http_code_continue => '^2' + ); + return 1 if ($status); + + $self->{logger}->writeLogDebug( + "[anomalydetection] -class- saas: metric '$self->{centreon_metrics}->{$_}->{host_id}/$self->{centreon_metrics}->{$_}->{service_id}/$self->{centreon_metrics}->{$_}->{metric_name}' registered" + ); + + # { + # "metrics": [ + # { + # "name": "system_load1", + # "labels": { "hostname":"srvi-monitoring" }, + # "preprocessingOptions": { + # "bucketize": { + # "bucketizeFunction": "mean", "period": 300 + # } + # }, + # "id": "e255db55-008b-48cd-8dfe-34cf60babd01" + # } + # ], + # "algorithm": { + # "type": "h2o", + # "options": { "period":"180d" } + # }, + # "id":"257fc68d-3248-4c92-92a1-43c0c63d5e5e" + # } + + $self->{generate_metrics_lua} = 1; + $register_centreon_metrics->{$_} = { + saas_creation_date => time(), + saas_model_id => $result->{id}, + saas_metric_id => $result->{metrics}->[0]->{id} + }; + + $query .= $query_append . + 'UPDATE mod_anomaly_service SET' . + ' saas_model_id = ?,' . + ' saas_metric_id = ?,' . + ' saas_creation_date = ?,' . + ' saas_update_date = ?' . + ' WHERE `id` = ?'; + $query_append = ';'; + push @bind_values, $register_centreon_metrics->{$_}->{saas_model_id}, $register_centreon_metrics->{$_}->{saas_metric_id}, + $register_centreon_metrics->{$_}->{saas_creation_date}, $register_centreon_metrics->{$_}->{saas_creation_date}, $_; + } + + return 0 if ($query eq ''); + + my $status = $self->{class_object_centreon}->transaction_query_multi(request => $query, bind_values => \@bind_values); + if ($status == -1) { + $self->{unregister_metrics_centreon} = $register_centreon_metrics; + $self->{logger}->writeLogError('[anomalydetection] -class- database: cannot update centreon register'); + return 1; + } + + foreach (keys %$register_centreon_metrics) { + $self->{centreon_metrics}->{$_}->{saas_creation_date} = $register_centreon_metrics->{$_}->{saas_creation_date}; + $self->{centreon_metrics}->{$_}->{saas_update_date} = $register_centreon_metrics->{$_}->{saas_creation_date}; + $self->{centreon_metrics}->{$_}->{saas_metric_id} = $register_centreon_metrics->{$_}->{saas_metric_id}; + $self->{centreon_metrics}->{$_}->{saas_model_id} = $register_centreon_metrics->{$_}->{saas_model_id}; + } + + return 0; +} + +sub saas_delete_metrics { + my ($self, %options) = @_; + + my $delete_ids = []; + foreach (keys %{$self->{centreon_metrics}}) { + next if ($self->{centreon_metrics}->{$_}->{saas_to_delete} == 0); + + if (defined($self->{centreon_metrics}->{$_}->{saas_model_id})) { + my ($status, $result) = $self->saas_api_request( + endpoint => '/machinelearning/' . $self->{centreon_metrics}->{$_}->{saas_model_id}, + method => 'DELETE', + http_code_continue => '^(?:2|404)' + ); + next if ($status); + + $self->{logger}->writeLogDebug( + "[anomalydetection] -class- saas: metric '$self->{centreon_metrics}->{$_}->{service_id}/$self->{centreon_metrics}->{$_}->{metric_name}' deleted" + ); + + next if (!defined($result->{message}) || + $result->{message} !~ /machine learning request id is not found/i); + } + + push @$delete_ids, $_; + } + + return 0 if (scalar(@$delete_ids) <= 0); + + my $status = $self->{class_object_centreon}->transaction_query( + request => 'DELETE FROM mod_anomaly_service WHERE id IN (' . join(', ', @$delete_ids) . ')' + ); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] -class- database: cannot delete centreon saas'); + return 1; + } + + return 0; +} + +sub generate_lua_filter_file { + my ($self, %options) = @_; + + my $data = { filters => { } }; + foreach (values %{$self->{centreon_metrics}}) { + next if ($_->{saas_to_delete} == 1); + next if (!defined($_->{saas_creation_date})); + next if (!defined($_->{host_id})); + + $data->{filters}->{ $_->{host_id} } = {} + if (!defined($data->{filters}->{ $_->{host_id} })); + $data->{filters}->{ $_->{host_id} }->{ $_->{service_id} } = {} + if (!defined($data->{filters}->{ $_->{host_id} }->{ $_->{service_id} })); + $data->{filters}->{ $_->{host_id} }->{ $_->{service_id} }->{ $_->{metric_name} } = 1; + } + + my ($status, $content) = $self->json_encode(argument => $data); + if ($status == 1) { + $self->{logger}->writeLogError('[anomalydetection] -class- cannot encode lua filter file'); + return 1; + } + + my $instance = $self->get_localhost_poller(); + if ($status == 1) { + $self->{logger}->writeLogError('[anomalydetection] -class- cannot find localhost poller'); + return 1; + } + + my $poller = $self->get_poller(instance => $instance); + my $file = $poller->{centreonbroker_cfg_path} . '/anomaly-detection-filters.json'; + if (! -w $poller->{centreonbroker_cfg_path}) { + $self->{logger}->writeLogError("[anomalydetection] -class- cannot write file '" . $file . "'"); + return 1; + } + + return 1 if ($self->write_file(file => $file, content => $content)); + + $self->{logger}->writeLogDebug('[anomalydetection] -class- reload centreon-broker'); + + $self->send_internal_action({ + action => 'COMMAND', + token => $options{token}, + data => { + content => [ { command => 'sudo ' . $poller->{broker_reload_command} } ] + } + }); + + return 0; +} + +sub saas_get_predicts { + my ($self, %options) = @_; + + my ($query, $query_append, $status) = ('', ''); + my $engine_reload = {}; + foreach (keys %{$self->{centreon_metrics}}) { + next if ($self->{centreon_metrics}->{$_}->{saas_to_delete} == 1); + #next if (!defined($self->{centreon_metrics}->{$_}->{thresholds_file}) || + # $self->{centreon_metrics}->{$_}->{thresholds_file} eq ''); + next if (!defined($self->{centreon_metrics}->{$_}->{saas_update_date}) || + $self->{centreon_metrics}->{$_}->{saas_update_date} > time() - $self->{thresholds_sync_time}); + + ($status, my $result) = $self->saas_api_request( + endpoint => '/machinelearning/' . $self->{centreon_metrics}->{$_}->{saas_model_id} . '/predicts', + method => 'GET', + http_code_continue => '^2', + accept => 'application/vnd.centreon.v2+json' + ); + next if ($status); + + $self->{logger}->writeLogDebug( + "[anomalydetection] -class- saas: get predict metric '$self->{centreon_metrics}->{$_}->{host_id}/$self->{centreon_metrics}->{$_}->{service_id}/$self->{centreon_metrics}->{$_}->{metric_name}'" + ); + + next if (!defined($result->[0]) || !defined($result->[0]->{predict})); + + my $data = [ + { + host_id => $self->{centreon_metrics}->{$_}->{host_id}, + service_id => $self->{centreon_metrics}->{$_}->{service_id}, + metric_name => $self->{centreon_metrics}->{$_}->{metric_name}, + predict => $result->[0]->{predict} + } + ]; + ($status, my $content) = $self->json_encode(argument => $data); + next if ($status == 1); + + my $encoded_content; + if (!IO::Compress::Bzip2::bzip2(\$content, \$encoded_content)) { + $self->{logger}->writeLogError('[anomalydetection] -class- cannot compress content: ' . $IO::Compress::Bzip2::Bzip2Error); + next; + } + + $encoded_content = MIME::Base64::encode_base64($encoded_content, ''); + + my $poller = $self->get_poller(instance => $self->{centreon_metrics}->{$_}->{instance_id}); + $self->send_internal_action({ + action => 'COMMAND', + target => $self->{centreon_metrics}->{$_}->{instance_id}, + token => $options{token}, + data => { + content => [ { command => 'mkdir -p ' . $poller->{cfg_dir} . '/anomaly/' . '; echo -n ' . $encoded_content . ' | base64 -d | bzcat -d > "' . $poller->{cfg_dir} . '/anomaly/' . $_ . '.json"' } ] + } + }); + + $engine_reload->{ $self->{centreon_metrics}->{$_}->{instance_id} } = [] if (!defined($engine_reload->{ $self->{centreon_metrics}->{$_}->{instance_id} })); + push @{$engine_reload->{ $self->{centreon_metrics}->{$_}->{instance_id} }}, $poller->{cfg_dir} . '/anomaly/' . $_ . '.json'; + + $query .= $query_append . + 'UPDATE mod_anomaly_service SET' . + ' saas_update_date = ' . time() . + ' WHERE `id` = ' . $_; + $query_append = ';'; + } + + return 0 if ($query eq ''); + + foreach my $instance_id (keys %$engine_reload) { + $self->{logger}->writeLogDebug('[anomalydetection] -class- send engine threshold files external command ' . $instance_id); + my $contents = []; + foreach (@{$engine_reload->{$instance_id}}) { + push @$contents, { + target => $instance_id, + command => 'EXTERNALCMD', + param => '[' . time() . '] NEW_THRESHOLDS_FILE;' . $_ + }; + } + + $self->send_internal_action({ + action => 'CENTREONCOMMAND', + token => $options{token}, + data => { + content => $contents + } + }); + } + + $status = $self->{class_object_centreon}->transaction_query_multi(request => $query); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] -class- database: cannot update predicts'); + return 1; + } + + return 0; +} + +sub action_saaspredict { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[anomalydetection] -class - start saaspredict'); + $options{token} = $self->generate_token() if (!defined($options{token})); + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action saaspredict proceed' }); + + $self->saas_get_predicts(token => $options{token}); + + $self->{logger}->writeLogDebug('[anomalydetection] -class- finish saaspredict'); + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $options{token}, data => { message => 'action saaspredict finished' }); + return 0; +} + +sub action_saasregister { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[anomalydetection] -class- start saasregister'); + $options{token} = $self->generate_token() if (!defined($options{token})); + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action saasregister proceed' }); + + if ($self->connection_informations()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get connection informations' }); + return 1; + } + + if ($self->save_centreon_previous_register()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot save previous register' }); + return 1; + } + + if ($self->get_centreon_anomaly_metrics()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get metrics from centreon' }); + return 1; + } + + if ($self->saas_register_metrics()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get declare metrics in saas' }); + return 1; + } + + if ($self->{generate_metrics_lua} == 1) { + $self->generate_lua_filter_file(token => $options{token}); + } + + if ($self->saas_delete_metrics()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot delete metrics in saas' }); + return 1; + } + + $self->{logger}->writeLogDebug('[anomalydetection] -class- finish saasregister'); + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $options{token}, data => { message => 'action saasregister finished' }); + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[anomalydetection] -class- $$ has quit"); + exit(0); + } + + if (time() - $connector->{resync_time} > $connector->{last_resync_time}) { + $connector->{last_resync_time} = time(); + $connector->action_saasregister(); + $connector->action_saaspredict(); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn} . ';mysql_multi_statements=1', + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + + $self->{class_object_centreon} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centreon}); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-anomalydetection', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CENTREONADREADY', + data => {} + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/anomalydetection/hooks.pm b/gorgone/gorgone/modules/centreon/anomalydetection/hooks.pm new file mode 100644 index 00000000000..479287383ca --- /dev/null +++ b/gorgone/gorgone/modules/centreon/anomalydetection/hooks.pm @@ -0,0 +1,158 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::anomalydetection::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::anomalydetection::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'anomalydetection'; +use constant EVENTS => [ + { event => 'CENTREONADREADY' } +]; + +my $config_core; +my $config; +my ($config_db_centreon, $config_db_centstorage); +my $process = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centreon = $options{config_db_centreon}; + $config_db_centstorage = $options{config_db_centstorage}; + $config->{resync_time} = defined($config->{resync_time}) && $config->{resync_time} =~ /(\d+)/ ? $1 : 600; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'CENTREONADREADY') { + $process->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$process->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-anomalydetection: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-anomalydetection', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($process->{running}) && $process->{running} == 1) { + $options{logger}->writeLogDebug("[anomalydetection] Send TERM signal $process->{pid}"); + CORE::kill('TERM', $process->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($process->{running} == 1) { + $options{logger}->writeLogDebug("[anomalydetection] Send KILL signal for pool"); + CORE::kill('KILL', $process->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($process->{pid}) || $process->{pid} != $pid); + + $process = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($process->{running}) && $process->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[anomalydetection] Create module 'anomalydetection' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-anomalydetection'; + my $module = gorgone::modules::centreon::anomalydetection::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[anomalydetection] PID $child_pid (gorgone-anomalydetection)"); + $process = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/class.pm b/gorgone/gorgone/modules/centreon/audit/class.pm new file mode 100644 index 00000000000..b579299e72a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/class.pm @@ -0,0 +1,372 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use gorgone::class::sqlquery; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +my @sampling_modules = ( + 'system::cpu', + 'system::diskio' +); +my @metrics_modules = ( + 'centreon::database', + 'centreon::packages', + 'centreon::pluginpacks', + 'centreon::realtime', + 'centreon::rrd', + 'system::cpu', + 'system::disk', + 'system::diskio', + 'system::load', + 'system::memory', + 'system::os' +); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{audit_tokens} = {}; + $connector->{sampling} = {}; + $connector->{sampling_modules} = {}; + $connector->{metrics_modules} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[audit] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub load_modules { + my ($self, %options) = @_; + + foreach (@sampling_modules) { + my $mod_name = 'gorgone::modules::centreon::audit::sampling::' . $_; + my $ret = gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, + module => $mod_name, + error_msg => "Cannot load sampling module '$_'" + ); + next if ($ret == 1); + $self->{sampling_modules}->{$_} = $mod_name->can('sample'); + } + + foreach (@metrics_modules) { + my $mod_name = 'gorgone::modules::centreon::audit::metrics::' . $_; + my $ret = gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, + module => $mod_name, + error_msg => "Cannot load metrics module '$_'" + ); + next if ($ret == 1); + $self->{metrics_modules}->{$_} = $mod_name->can('metrics'); + } +} + +sub action_centreonauditnode { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[audit] action node starting'); + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action node starting' }); + + my $metrics = {}; + foreach my $name (keys %{$self->{metrics_modules}}) { + my $result = $self->{metrics_modules}->{$name}->( + os => $self->{os}, + centreon_sqlquery => $self->{centreon_sqlquery}, + centstorage_sqlquery => $self->{centstorage_sqlquery}, + sampling => $self->{sampling}, + params => $options{data}->{content}, + logger => $self->{logger} + ); + next if (!defined($result)); + $metrics->{$name} = $result; + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + message => 'action node finished', + metrics => $metrics + } + ); + $self->{logger}->writeLogDebug('[audit] action node finished'); +} + +sub action_centreonauditnodelistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token}) || $options{token} !~ /^audit-(.*?)-(.*)$/); + my ($audit_token, $audit_node) = ($1, $2); + + return 0 if (!defined($self->{audit_tokens}->{ $audit_token }) || !defined($self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node })); + + if ($options{data}->{code} == GORGONE_ACTION_FINISH_KO) { + $self->{logger}->writeLogError("[audit] audit node listener - node '" . $audit_node . "' error"); + $self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node }->{status_code} = 2; + $self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node }->{status_message} = $options{data}->{data}->{message}; + } elsif ($options{data}->{code} == GORGONE_ACTION_FINISH_OK) { + $self->{logger}->writeLogDebug("[audit] audit node listener - node '" . $audit_node . "' ok"); + $self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node }->{status_code} = 0; + $self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node }->{status_message} = 'ok'; + $self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node }->{metrics} = $options{data}->{data}->{metrics}; + } else { + return 0; + } + $self->{audit_tokens}->{ $audit_token }->{done_nodes}++; + + if ($self->{audit_tokens}->{ $audit_token }->{done_nodes} == $self->{audit_tokens}->{ $audit_token }->{count_nodes}) { + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $audit_token, + instant => 1, + data => { + message => 'finished', + audit => $self->{audit_tokens}->{ $audit_token } + } + ); + delete $self->{audit_tokens}->{ $audit_token }; + return 1; + } + + my $progress = $self->{audit_tokens}->{ $audit_token }->{done_nodes} * 100 / $self->{audit_tokens}->{ $audit_token }->{count_nodes}; + my $div = int(int($progress) / 5); + if (int($progress) % 3 == 0) { + $self->send_log( + code => GORGONE_MODULE_CENTREON_AUDIT_PROGRESS, + token => $audit_token, + instant => 1, + data => { + message => 'current progress', + complete => sprintf('%.2f', $progress) + } + ); + } + + return 1; +} + +sub action_centreonauditschedule { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[audit] starting schedule action'); + $options{token} = $self->generate_token() if (!defined($options{token})); + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action schedule proceed' }); + + my $params = {}; + + my ($status, $datas) = $self->{centstorage_sqlquery}->custom_execute( + request => 'SELECT RRDdatabase_path, RRDdatabase_status_path FROM config', + mode => 2 + ); + if ($status == -1) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find centstorage config' }); + $self->{logger}->writeLogError('[audit] Cannot find centstorage configuration'); + return 1; + } + $params->{rrd_metrics_path} = $datas->[0]->[0]; + $params->{rrd_status_path} = $datas->[0]->[1]; + + ($status, $datas) = $self->{centreon_sqlquery}->custom_execute( + request => "SELECT id, name FROM nagios_server WHERE ns_activate = '1'", + mode => 2 + ); + if ($status == -1) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find nodes configuration' }); + $self->{logger}->writeLogError('[audit] Cannot find nodes configuration'); + return 1; + } + + $self->{audit_tokens}->{ $options{token} } = { + started => time(), + count_nodes => 0, + done_nodes => 0, + nodes => {} + }; + foreach (@$datas) { + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgone-audit', + event => 'CENTREONAUDITNODELISTENER', + token => 'audit-' . $options{token} . '-' . $_->[0], + timeout => 300 + } + ] + }); + $self->send_internal_action({ + action => 'CENTREONAUDITNODE', + target => $_->[0], + token => 'audit-' . $options{token} . '-' . $_->[0], + data => { + instant => 1, + content => $params + } + }); + + $self->{audit_tokens}->{ $options{token} }->{nodes}->{$_->[0]} = { + name => $_->[1], + status_code => 1, + status_message => 'wip' + }; + $self->{audit_tokens}->{ $options{token} }->{count_nodes}++; + } + + return 0; +} + +sub sampling { + my ($self, %options) = @_; + + return if (defined($self->{sampling_last}) && (time() - $self->{sampling_last}) < 60); + $self->{logger}->writeLogDebug('[audit] sampling starting'); + foreach (keys %{$self->{sampling_modules}}) { + $self->{sampling_modules}->{$_}->(sampling => $self->{sampling}); + } + + $self->{sampling_last} = time(); +} + +sub get_system { + my ($self, %options) = @_; + + $self->{os} = 'unknown'; + + my ($rv, $message, $content) = gorgone::standard::misc::slurp(file => '/etc/os-release'); + if ($rv && $content =~ /^ID="(.*?)"/mi) { + $self->{os} = $1; + return ; + } + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'lsb_release -a', + timeout => 5, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error == 0 && $stdout =~ /^Description:\s+(.*)$/mi) { + $self->{os} = $1; + } +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[audit] $$ has quit"); + exit(0); + } + + $connector->sampling(); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-audit', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CENTREONAUDITREADY', + data => {} + }); + + if (defined($self->{config_db_centreon})) { + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 0, + logger => $self->{logger} + ); + $self->{centreon_sqlquery} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centreon}); + } + + if (defined($self->{config_db_centstorage})) { + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 0, + logger => $self->{logger} + ); + $self->{centstorage_sqlquery} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centstorage}); + } + + $self->load_modules(); + $self->get_system(); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/hooks.pm b/gorgone/gorgone/modules/centreon/audit/hooks.pm new file mode 100644 index 00000000000..b95de9dedd1 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/hooks.pm @@ -0,0 +1,161 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::audit::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'audit'; +use constant EVENTS => [ + { event => 'CENTREONAUDITSCHEDULE', uri => '/schedule', method => 'POST' }, + { event => 'CENTREONAUDITNODE', uri => '/node', method => 'POST' }, + { event => 'CENTREONAUDITNODELISTENER' }, + { event => 'CENTREONAUDITREADY' } +]; + +my $config_core; +my $config; +my $audit = {}; +my $stop = 0; +my ($config_db_centreon, $config_db_centstorage); + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centstorage = $options{config_db_centstorage}; + $config_db_centreon = $options{config_db_centreon}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'CENTREONAUDITREADY') { + $audit->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$audit->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-audit: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-audit', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($audit->{running}) && $audit->{running} == 1) { + $options{logger}->writeLogDebug("[audit] Send TERM signal $audit->{pid}"); + CORE::kill('TERM', $audit->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($audit->{running} == 1) { + $options{logger}->writeLogDebug("[audit] Send KILL signal for child"); + CORE::kill('KILL', $audit->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($audit->{pid}) || $audit->{pid} != $pid); + + $audit = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($audit->{running}) && $audit->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[audit] Create module 'audit' process"); + + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-audit'; + my $module = gorgone::modules::centreon::audit::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[audit] PID $child_pid (gorgone-audit)"); + $audit = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/centreon/database.pm b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/database.pm new file mode 100644 index 00000000000..de32ac931ad --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/database.pm @@ -0,0 +1,110 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::centreon::database; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + return undef if (!defined($options{centstorage_sqlquery})); + + my $metrics = { + status_code => 0, + status_message => 'ok', + space_free_bytes => 0, + space_used_bytes => 0, + databases => {} + }; + + my ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => q{show variables like 'innodb_file_per_table'}, + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get innodb_file_per_table configuration'; + return $metrics; + } + my $innodb_per_table = 0; + $innodb_per_table = 1 if ($datas->[0]->[1] =~ /on/i); + + ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => q{SELECT table_schema, table_name, engine, data_free, data_length+index_length as data_used, (DATA_FREE / (DATA_LENGTH+INDEX_LENGTH)) as TAUX_FRAG FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND engine IN ('InnoDB', 'MyISAM')}, + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get schema information'; + return $metrics; + } + + my $innodb_ibdata_done = 0; + foreach my $row (@$datas) { + if (!defined($metrics->{databases}->{ $row->[0] })) { + $metrics->{databases}->{ $row->[0] } = { + space_free_bytes => 0, + space_used_bytes => 0, + tables => {} + }; + } + + $metrics->{databases}->{ $row->[0] }->{tables}->{ $row->[1] } = {}; + + # For a table located in the shared tablespace, this is the free space of the shared tablespace. + if ($row->[2] !~ /innodb/i || $innodb_per_table == 1) { + $metrics->{space_free_bytes} += $row->[3]; + $metrics->{databases}->{ $row->[0] }->{space_free_bytes} += $row->[3]; + $metrics->{databases}->{ $row->[0] }->{tables}->{ $row->[1] }->{space_free_bytes} = $row->[3]; + $metrics->{databases}->{ $row->[0] }->{tables}->{ $row->[1] }->{frag} = $row->[5]; + } elsif ($innodb_ibdata_done == 0) { + $metrics->{space_free_bytes} += $row->[3]; + $innodb_ibdata_done = 1; + } + $metrics->{space_used_bytes} += $row->[4]; + $metrics->{databases}->{ $row->[0] }->{space_used_bytes} += $row->[4]; + $metrics->{databases}->{ $row->[0] }->{tables}->{ $row->[1] }->{space_used_bytes} = $row->[4]; + $metrics->{databases}->{ $row->[0] }->{tables}->{ $row->[1] }->{engine} = $row->[2]; + } + + my $rm_table_size = 10 * 1024 * 1024; + + $metrics->{space_free_human} = join('', gorgone::standard::misc::scale(value => $metrics->{space_free_bytes}, format => '%.2f')); + $metrics->{space_used_human} = join('', gorgone::standard::misc::scale(value => $metrics->{space_used_bytes}, format => '%.2f')); + foreach my $db (keys %{$metrics->{databases}}) { + $metrics->{databases}->{$db}->{space_used_human} = join('', gorgone::standard::misc::scale(value => $metrics->{databases}->{$db}->{space_used_bytes}, format => '%.2f')); + $metrics->{databases}->{$db}->{space_free_human} = join('', gorgone::standard::misc::scale(value => $metrics->{databases}->{$db}->{space_free_bytes}, format => '%.2f')); + foreach my $table (keys %{$metrics->{databases}->{$db}->{tables}}) { + if ($metrics->{databases}->{$db}->{tables}->{$table}->{space_used_bytes} < $rm_table_size) { + delete $metrics->{databases}->{$db}->{tables}->{$table}; + next; + } + $metrics->{databases}->{$db}->{tables}->{$table}->{space_free_human} = join('', gorgone::standard::misc::scale(value => $metrics->{databases}->{$db}->{tables}->{$table}->{space_free_bytes}, format => '%.2f')) + if (defined($metrics->{databases}->{$db}->{tables}->{$table}->{space_free_bytes})); + $metrics->{databases}->{$db}->{tables}->{$table}->{space_used_human} = join('', gorgone::standard::misc::scale(value => $metrics->{databases}->{$db}->{tables}->{$table}->{space_used_bytes}, format => '%.2f')); + } + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/centreon/packages.pm b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/packages.pm new file mode 100644 index 00000000000..a8bacc19397 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/packages.pm @@ -0,0 +1,94 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::centreon::packages; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub dpkg_list { + my (%options) = @_; + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => "dpkg-query -W -f='\${binary:Package}\\t\${Version}\\n' 'centreon*'", + timeout => 30, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error != 0 || $return_code != 0) { + $options{metrics}->{status_code} = 1; + $options{metrics}->{status_message} = $stdout; + return ; + } + + foreach (split(/\n/, $stdout)) { + my ($name, $version) = split(/\t/); + push @{$options{metrics}->{list}}, [$name, $version]; + } +} + +sub rpm_list { + my (%options) = @_; + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'rpm -qa --queryformat "%{NAME}\t%{RPMTAG_VERSION}-%{RPMTAG_RELEASE}\n" | grep centreon', + timeout => 30, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error != 0 || $return_code != 0) { + $options{metrics}->{status_code} = 1; + $options{metrics}->{status_message} = $stdout; + return ; + } + + foreach (split(/\n/, $stdout)) { + my ($name, $version) = split(/\t/); + push @{$options{metrics}->{list}}, [$name, $version]; + } +} + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok', + list => [] + }; + + if ($options{os} =~ /Debian|Ubuntu/i) { + dpkg_list(metrics => $metrics); + } elsif ($options{os} =~ /CentOS|Redhat|rhel|almalinux|rocky/i) { + rpm_list(metrics => $metrics); + } elsif ($options{os} eq 'ol' || $options{os} =~ /Oracle Linux/i) { + rpm_list(metrics => $metrics); + } else { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'unsupported os'; + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/centreon/pluginpacks.pm b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/pluginpacks.pm new file mode 100644 index 00000000000..fa790e20225 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/pluginpacks.pm @@ -0,0 +1,53 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::centreon::pluginpacks; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + return undef if (!defined($options{centreon_sqlquery})); + + my $metrics = { + status_code => 0, + status_message => 'ok', + installed => [] + }; + + my ($status, $datas) = $options{centreon_sqlquery}->custom_execute( + request => "SELECT slug, version FROM mod_ppm_pluginpack", + mode => 2 + ); + if ($status == -1) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get plugin-packs installed'; + return $metrics; + } + foreach (@$datas) { + push @{$metrics->{installed}}, { slug => $_->[0], version => $_->[1] }; + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/centreon/realtime.pm b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/realtime.pm new file mode 100644 index 00000000000..41567275d25 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/realtime.pm @@ -0,0 +1,99 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::centreon::realtime; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + return undef if (!defined($options{centstorage_sqlquery})); + + my $metrics = { + status_code => 0, + status_message => 'ok', + hosts_count => 0, + services_count => 0, + hostgroups_count => 0, + servicegroups_count => 0, + acl_count => 0 + }; + + my ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => "SELECT count(*) FROM instances, hosts, services WHERE instances.running = '1' AND hosts.instance_id = instances.instance_id AND hosts.enabled = '1' AND services.host_id = hosts.host_id AND services.enabled = '1'", + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get number of services'; + return $metrics; + } + $metrics->{services_count} = $datas->[0]->[0]; + + ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => "SELECT count(*) FROM instances, hosts WHERE instances.running = '1' AND hosts.instance_id = instances.instance_id AND hosts.enabled = '1'", + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get number of hosts'; + return $metrics; + } + $metrics->{hosts_count} = $datas->[0]->[0]; + + ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => 'SELECT count(*) FROM hostgroups', + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get number of hostgroups'; + return $metrics; + } + $metrics->{hostgroups_count} = $datas->[0]->[0]; + + ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => 'SELECT count(*) FROM servicegroups', + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get number of servicegroups'; + return $metrics; + } + $metrics->{servicegroups_count} = $datas->[0]->[0]; + + ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => 'SELECT count(*) FROM centreon_acl', + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get number of acl'; + return $metrics; + } + $metrics->{acl_count} = $datas->[0]->[0]; + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/centreon/rrd.pm b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/rrd.pm new file mode 100644 index 00000000000..c2c961b190e --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/rrd.pm @@ -0,0 +1,68 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::centreon::rrd; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub metrics { + my (%options) = @_; + + return undef if (!defined($options{params}->{rrd_metrics_path})); + return undef if (! -d $options{params}->{rrd_metrics_path}); + + my $metrics = { + status_code => 0, + status_message => 'ok', + rrd_metrics_count => 0, + rrd_status_count => 0, + rrd_metrics_bytes => 0, + rrd_status_bytes => 0, + rrd_metrics_outdated => 0, + rrd_status_outdated => 0 + }; + + my $outdated_time = time() - (180 * 86400); + my $dh; + foreach my $type (('metrics', 'status')) { + if (!opendir($dh, $options{params}->{'rrd_' . $type . '_path'})) { + $metrics->{status_code} = 1; + $metrics->{status_message} = "Could not open directoy for reading: $!"; + next; + } + while (my $file = readdir($dh)) { + next if ($file !~ /\.rrd/); + $metrics->{'rrd_' . $type . '_count'}++; + my @attrs = stat($options{params}->{'rrd_' . $type . '_path'} . '/' . $file); + $metrics->{'rrd_' . $type . '_bytes'} += $attrs[7] if (defined($attrs[7])); + $metrics->{'rrd_' . $type . '_outdated'}++ if ($attrs[9] < $outdated_time); + } + closedir($dh); + } + + $metrics->{rrd_metrics_human} = join('', gorgone::standard::misc::scale(value => $metrics->{rrd_metrics_bytes}, format => '%.2f')); + $metrics->{rrd_status_human} = join('', gorgone::standard::misc::scale(value => $metrics->{rrd_status_bytes}, format => '%.2f')); + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/cpu.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/cpu.pm new file mode 100644 index 00000000000..ea8fad5bc0f --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/cpu.pm @@ -0,0 +1,62 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::cpu; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok', + num_cpu => 0 + }; + if ($options{sampling}->{cpu}->{status_code} != 0) { + $metrics->{status_code} = $options{sampling}->{cpu}->{status_code}; + $metrics->{status_message} = $options{sampling}->{cpu}->{status_message}; + return $metrics; + } + + $metrics->{num_cpu} = $options{sampling}->{cpu}->{num_cpu}; + foreach (([1, '1min'], [4, '5min'], [14, '15min'], [59, '60min'])) { + $metrics->{ 'avg_used_' . $_->[1] } = 'n/a'; + $metrics->{ 'avg_iowait_' . $_->[1] } = 'n/a'; + next if (!defined($options{sampling}->{cpu}->{values}->[ $_->[0] ])); + $metrics->{ 'avg_used_' . $_->[1] } = sprintf( + '%.2f', + 100 - ( + 100 * ($options{sampling}->{cpu}->{values}->[0]->[1] - $options{sampling}->{cpu}->{values}->[ $_->[0] ]->[1]) + / ($options{sampling}->{cpu}->{values}->[0]->[0] - $options{sampling}->{cpu}->{values}->[ $_->[0] ]->[0]) + ) + ); + $metrics->{ 'avg_iowait_' . $_->[1] } = sprintf( + '%.2f', + 100 * ($options{sampling}->{cpu}->{values}->[0]->[2] - $options{sampling}->{cpu}->{values}->[ $_->[0] ]->[2]) + / ($options{sampling}->{cpu}->{values}->[0]->[0] - $options{sampling}->{cpu}->{values}->[ $_->[0] ]->[0]) + ); + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/disk.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/disk.pm new file mode 100644 index 00000000000..ad9a59433d0 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/disk.pm @@ -0,0 +1,68 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::disk; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok', + partitions => {} + }; + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'df -P -k -T', + timeout => 5, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error != 0) { + $metrics->{status_code} = 1; + $metrics->{status_message} = $stdout; + return $metrics; + } + + foreach my $line (split(/\n/, $stdout)) { + next if ($line !~ /^(\S+)\s+(\S+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\S+)\s+(.*)/); + $metrics->{partitions}->{$7} = { + mount => $7, + filesystem => $1, + type => $2, + space_size_bytes => $3 * 1024, + space_size_human => join('', gorgone::standard::misc::scale(value => $3 * 1024, format => '%.2f')), + space_used_bytes => $4 * 1024, + space_used_human => join('', gorgone::standard::misc::scale(value => $4 * 1024, format => '%.2f')), + space_free_bytes => $5 * 1024, + space_free_human => join('', gorgone::standard::misc::scale(value => $5 * 1024, format => '%.2f')), + inodes_used_percent => $6 + }; + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/diskio.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/diskio.pm new file mode 100644 index 00000000000..387d41dea6a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/diskio.pm @@ -0,0 +1,75 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::diskio; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok', + partitions => {} + }; + if ($options{sampling}->{diskio}->{status_code} != 0) { + $metrics->{status_code} = $options{sampling}->{diskio}->{status_code}; + $metrics->{status_message} = $options{sampling}->{diskio}->{status_message}; + return $metrics; + } + + foreach my $partname (keys %{$options{sampling}->{diskio}->{partitions}}) { + $metrics->{partitions}->{$partname} = {}; + foreach (([1, '1min'], [4, '5min'], [14, '15min'], [59, '60min'])) { + $metrics->{partitions}->{$partname}->{ 'read_iops_' . $_->[1] . '_bytes' } = 'n/a'; + $metrics->{partitions}->{$partname}->{ 'write_iops_' . $_->[1] . '_bytes' } = 'n/a'; + $metrics->{partitions}->{$partname}->{ 'read_time_' . $_->[1] . '_ms' } = 'n/a'; + $metrics->{partitions}->{$partname}->{ 'write_time_' . $_->[1] . '_ms' } = 'n/a'; + next if (!defined($options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ])); + + $metrics->{partitions}->{$partname}->{ 'read_iops_' . $_->[1] . '_bytes' } = sprintf( + '%.2f', + ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[1] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[1]) + / ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[0] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[0]) + ); + $metrics->{partitions}->{$partname}->{ 'read_iops_' . $_->[1] . '_human' } = join('', gorgone::standard::misc::scale(value => $metrics->{partitions}->{$partname}->{ 'read_iops_' . $_->[1] . '_bytes' }, format => '%.2f')); + + $metrics->{partitions}->{$partname}->{ 'write_iops_' . $_->[1] . '_bytes' } = sprintf( + '%.2f', + ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[2] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[2]) + / ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[0] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[0]) + ); + $metrics->{partitions}->{$partname}->{ 'write_iops_' . $_->[1] . '_human' } = join('', gorgone::standard::misc::scale(value => $metrics->{partitions}->{$partname}->{ 'write_iops_' . $_->[1] . '_bytes' }, format => '%.2f')); + + $metrics->{partitions}->{$partname}->{ 'read_time_' . $_->[1] . '_ms' } = sprintf( + '%s', ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[3] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[3]) + ); + $metrics->{partitions}->{$partname}->{ 'write_time_' . $_->[1] . '_ms' } = sprintf( + '%s', ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[4] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[4]) + ); + } + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/load.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/load.pm new file mode 100644 index 00000000000..eb4dba4a5b3 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/load.pm @@ -0,0 +1,53 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::load; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok' + }; + my ($ret, $message, $buffer) = gorgone::standard::misc::slurp(file => '/proc/loadavg'); + if ($ret == 0) { + $metrics->{status_code} = 1; + $metrics->{status_message} = $message; + return $metrics; + } + + if ($buffer !~ /^([0-9\.]+)\s+([0-9\.]+)\s+([0-9\.]+)/mi) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot find load information'; + return $metrics; + } + + $metrics->{load1m} = $1; + $metrics->{load5m} = $2; + $metrics->{load15m} = $3; + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/memory.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/memory.pm new file mode 100644 index 00000000000..98f5a734ea8 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/memory.pm @@ -0,0 +1,70 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::memory; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok', + ram_total_bytes => 0, + ram_available_bytes => 0, + swap_total_bytes => 0, + swap_free_bytes => 0 + }; + my ($ret, $message, $buffer) = gorgone::standard::misc::slurp(file => '/proc/meminfo'); + if ($ret == 0) { + $metrics->{status_code} = 1; + $metrics->{status_message} = $message; + return $metrics; + } + + if ($buffer !~ /^MemTotal:\s+(\d+)/mi) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot find memory information'; + return $metrics; + } + + $metrics->{ram_total_bytes} = $1 * 1024; + $metrics->{ram_total_human} = join('', gorgone::standard::misc::scale(value => $metrics->{ram_total_bytes}, format => '%.2f')); + + if ($buffer =~ /^MemAvailable:\s+(\d+)/mi) { + $metrics->{ram_available_bytes} = $1 * 1024; + $metrics->{ram_available_human} = join('', gorgone::standard::misc::scale(value => $metrics->{ram_available_bytes}, format => '%.2f')); + } + if ($buffer =~ /^SwapTotal:\s+(\d+)/mi) { + $metrics->{swap_total_bytes} = $1 * 1024; + $metrics->{swap_total_human} = join('', gorgone::standard::misc::scale(value => $metrics->{swap_total_bytes}, format => '%.2f')); + } + if ($buffer =~ /^SwapFree:\s+(\d+)/mi) { + $metrics->{swap_free_bytes} = $1 * 1024; + $metrics->{swap_free_human} = join('', gorgone::standard::misc::scale(value => $metrics->{swap_free_bytes}, format => '%.2f')); + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/os.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/os.pm new file mode 100644 index 00000000000..1bd0d4a5b1b --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/os.pm @@ -0,0 +1,56 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::os; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + my $metrics = { + kernel => { + status_code => 0, + status_message => 'ok', + value => 'n/a' + } + }; + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'uname -a', + timeout => 5, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error != 0) { + $metrics->{kernel}->{status_code} = 1; + $metrics->{kernel}->{status_message} = $stdout; + } else { + $metrics->{kernel}->{value} = $stdout; + } + + $metrics->{os}->{value} = $options{os}; + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/sampling/system/cpu.pm b/gorgone/gorgone/modules/centreon/audit/sampling/system/cpu.pm new file mode 100644 index 00000000000..3dd99e412bc --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/sampling/system/cpu.pm @@ -0,0 +1,68 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::sampling::system::cpu; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub sample { + my (%options) = @_; + + if (!defined($options{sampling}->{cpu})) { + $options{sampling}->{cpu} = { + status_code => 0, + status_message => 'ok', + round => 0, + values => [] + }; + } + + $options{sampling}->{cpu}->{round}++; + my ($ret, $message, $buffer) = gorgone::standard::misc::slurp(file => '/proc/stat'); + if ($ret == 0) { + $options{sampling}->{cpu}->{status_code} = 1; + $options{sampling}->{cpu}->{status_message} = $message; + return ; + } + + if ($buffer !~ /^cpu\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)/) { + $options{sampling}->{cpu}->{status_code} = 1; + $options{sampling}->{cpu}->{status_message} = 'cannot find cpu information'; + return ; + } + + $options{sampling}->{cpu}->{num_cpu} = 0; + while ($buffer =~ /^cpu(\d+)/mg) { + $options{sampling}->{cpu}->{num_cpu}++; + } + + unshift @{$options{sampling}->{cpu}->{values}}, [ + $1 + $2 + $3 + $4 + $5 + $6 + $7, + $4, + $5 + ]; + if (scalar(@{$options{sampling}->{cpu}->{values}}) > 60) { + pop @{$options{sampling}->{cpu}->{values}}; + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/sampling/system/diskio.pm b/gorgone/gorgone/modules/centreon/audit/sampling/system/diskio.pm new file mode 100644 index 00000000000..7ca7dac342e --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/sampling/system/diskio.pm @@ -0,0 +1,63 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::sampling::system::diskio; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub sample { + my (%options) = @_; + + if (!defined($options{sampling}->{diskio})) { + $options{sampling}->{diskio} = { + status_code => 0, + status_message => 'ok', + partitions => {} + }; + } + + my $time = time(); + my ($ret, $message, $buffer) = gorgone::standard::misc::slurp(file => '/proc/diskstats'); + if ($ret == 0) { + $options{sampling}->{diskio}->{status_code} = 1; + $options{sampling}->{diskio}->{status_message} = $message; + return ; + } + + while ($buffer =~ /^\s*\S+\s+\S+\s+(\S+)\s+\d+\s+\d+\s+(\d+)\s+(\d+)\s+\d+\s+\d+\s+(\d+)\s+(\d+)\s+\d+\s+\d+\s+(\d+)/mg) { + my ($partition_name, $read_sector, $write_sector, $read_ms, $write_ms) = ($1, $2, $4, $3, $5); + next if ($read_sector == 0 && $write_sector == 0); + if (!defined($options{sampling}->{diskio}->{partitions}->{$partition_name})) { + $options{sampling}->{diskio}->{partitions}->{$partition_name} = []; + } + unshift @{$options{sampling}->{diskio}->{partitions}->{$partition_name}}, [ + $time, + $read_sector, $write_sector, + $read_ms, $write_ms + ]; + if (scalar(@{$options{sampling}->{diskio}->{partitions}->{$partition_name}}) > 60) { + pop @{$options{sampling}->{diskio}->{partitions}->{$partition_name}}; + } + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/autodiscovery/class.pm b/gorgone/gorgone/modules/centreon/autodiscovery/class.pm new file mode 100644 index 00000000000..26b7a5585ca --- /dev/null +++ b/gorgone/gorgone/modules/centreon/autodiscovery/class.pm @@ -0,0 +1,1205 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::autodiscovery::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::modules::centreon::autodiscovery::services::discovery; +use gorgone::class::tpapi::clapi; +use gorgone::class::tpapi::centreonv2; +use gorgone::class::sqlquery; +use gorgone::class::frame; +use JSON::XS; +use Time::HiRes; +use POSIX qw(strftime); +use Digest::MD5 qw(md5_hex); +use Try::Tiny; +use EV; + +use constant JOB_SCHEDULED => 0; +use constant JOB_FINISH => 1; +use constant JOB_FAILED => 2; +use constant JOB_RUNNING => 3; +use constant SAVE_RUNNING => 4; +use constant SAVE_FINISH => 5; +use constant SAVE_FAILED => 6; + +use constant CRON_ADDED_NONE => 0; +use constant CRON_ADDED_OK => 1; +use constant CRON_ADDED_KO => 2; +use constant CRON_ADDED_PROGRESS => 3; + +use constant EXECUTION_MODE_IMMEDIATE => 0; +use constant EXECUTION_MODE_CRON => 1; +use constant EXECUTION_MODE_PAUSE => 2; + +use constant MAX_INSERT_BY_QUERY => 100; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{global_timeout} = (defined($options{config}->{global_timeout}) && + $options{config}->{global_timeout} =~ /(\d+)/) ? $1 : 300; + $connector->{check_interval} = (defined($options{config}->{check_interval}) && + $options{config}->{check_interval} =~ /(\d+)/) ? $1 : 15; + $connector->{tpapi_clapi_name} = defined($options{config}->{tpapi_clapi}) && $options{config}->{tpapi_clapi} ne '' ? $options{config}->{tpapi_clapi} : 'clapi'; + $connector->{tpapi_centreonv2_name} = defined($options{config}->{tpapi_centreonv2}) && $options{config}->{tpapi_centreonv2} ne '' ? + $options{config}->{tpapi_centreonv2} : 'centreonv2'; + + $connector->{is_module_installed} = 0; + $connector->{is_module_installed_check_interval} = 60; + $connector->{is_module_installed_last_check} = -1; + + $connector->{hdisco_synced} = 0; + $connector->{hdisco_synced_failed_time} = -1; + $connector->{hdisco_synced_ok_time} = -1; + $connector->{hdisco_jobs_tokens} = {}; + $connector->{hdisco_jobs_ids} = {}; + + $connector->{service_discoveries} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[autodiscovery] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +=pod + +******************* +Host Discovery part +******************* + +For cron job, we use discovery token as cron ID. + +=cut + +sub hdisco_is_running_job { + my ($self, %options) = @_; + + if ($options{status} == JOB_RUNNING || + $options{status} == SAVE_RUNNING) { + return 1; + } + + return 0; +} + +sub hdisco_add_cron { + my ($self, %options) = @_; + + if (!defined($options{job}->{execution}->{parameters}->{cron_definition}) || + $options{job}->{execution}->{parameters}->{cron_definition} eq '') { + return (1, "missing 'cron_definition' parameter"); + } + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgoneautodiscovery', + event => 'HOSTDISCOVERYCRONLISTENER', + token => 'cron-' . $options{discovery_token} + } + ] + }); + + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - add cron for job '" . $options{job}->{job_id} . "'"); + my $definition = { + id => $options{discovery_token}, + timespec => $options{job}->{execution}->{parameters}->{cron_definition}, + action => 'LAUNCHHOSTDISCOVERY', + parameters => { + job_id => $options{job}->{job_id}, + timeout => (defined($options{job}->{timeout}) && $options{job}->{timeout} =~ /(\d+)/) ? $1 : $self->{global_timeout} + } + }; + $self->send_internal_action({ + action => 'ADDCRON', + token => 'cron-' . $options{discovery_token}, + data => { + content => [ $definition ] + } + }); + + return 0; +} + +sub hdisco_addupdate_job { + my ($self, %options) = @_; + my ($status, $message); + + my $update = 0; + my $extra_infos = { cron_added => CRON_ADDED_NONE, listener_added => 0 }; + if (defined($self->{hdisco_jobs_ids}->{ $options{job}->{job_id} })) { + $extra_infos = $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{extra_infos}; + $update = 1; + } else { + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - new job '" . $options{job}->{job_id} . "'"); + # it's running so we have a token + if ($self->hdisco_is_running_job(status => $options{job}->{status})) { + $extra_infos->{listener_added} = 1; + $self->hdisco_add_joblistener( + jobs => [ + { job_id => $options{job}->{job_id}, target => $options{job}->{target}, token => $options{job}->{token} } + ] + ); + } + } + + # cron changed: we remove old definition + # right now: can be immediate or schedule (not both) + if ($update == 1 && + ($self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{execution}->{mode} == EXECUTION_MODE_IMMEDIATE || + (defined($self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{execution}->{parameters}->{cron_definition}) && + defined($options{job}->{execution}->{parameters}->{cron_definition}) && + $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{execution}->{parameters}->{cron_definition} ne $options{job}->{execution}->{parameters}->{cron_definition} + ) + ) + ) { + $self->hdisco_delete_cron(discovery_token => $options{job}->{token}); + $extra_infos->{cron_added} = CRON_ADDED_NONE; + } + + $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} } = $options{job}; + $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{extra_infos} = $extra_infos; + if (!defined($options{job}->{token})) { + my $discovery_token = 'discovery_' . $options{job}->{job_id} . '_' . $self->generate_token(length => 4); + if ($self->update_job_information( + values => { + token => $discovery_token + }, + where_clause => [ + { id => $options{job}->{job_id} } + ] + ) == -1) { + return (1, 'cannot add discovery token'); + } + + $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{token} = $discovery_token; + $options{job}->{token} = $discovery_token; + } + + if (defined($options{job}->{token})) { + $self->{hdisco_jobs_tokens}->{ $options{job}->{token} } = $options{job}->{job_id}; + } + + if ($self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{execution}->{mode} == EXECUTION_MODE_CRON && + ($extra_infos->{cron_added} == CRON_ADDED_NONE || $extra_infos->{cron_added} == CRON_ADDED_KO) + ) { + ($status, $message) = $self->hdisco_add_cron( + job => $options{job}, + discovery_token => $options{job}->{token} + ); + return ($status, $message) if ($status); + $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{extra_infos}->{cron_added} = CRON_ADDED_PROGRESS; + } + + return 0; +} + +sub hdisco_sync { + my ($self, %options) = @_; + + return if ($self->{is_module_installed} == 0); + return if ($self->{hdisco_synced} == 0 && (time() - $self->{hdisco_synced_failed_time}) < 60); + return if ($self->{hdisco_synced} == 1 && (time() - $self->{hdisco_synced_ok_time}) < 600); + + $self->{logger}->writeLogInfo('[autodiscovery] -class- host discovery - sync started'); + my ($status, $results, $message); + + $self->{hdisco_synced} = 0; + ($status, $results) = $self->{tpapi_centreonv2}->get_scheduling_jobs(); + if ($status != 0) { + $self->{hdisco_synced_failed_time} = time(); + $self->{logger}->writeLogError('[autodiscovery] -class- host discovery - cannot get host discovery jobs - ' . $self->{tpapi_centreonv2}->error()); + return ; + } + + my $jobs = {}; + foreach my $job (@{$results->{result}}) { + ($status, $message) = $self->hdisco_addupdate_job(job => $job); + if ($status) { + $self->{logger}->writeLogError('[autodiscovery] -class- host discovery - addupdate job - ' . $message); + } + + $jobs->{ $job->{job_id} } = 1; + } + + foreach my $job_id (keys %{$self->{hdisco_jobs_ids}}) { + next if (defined($jobs->{$job_id})); + + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - delete job '" . $job_id . "'"); + if (defined($self->{hdisco_jobs_ids}->{$job_id}->{token})) { + $self->hdisco_delete_cron(discovery_token => $self->{hdisco_jobs_ids}->{$job_id}->{token}); + delete $self->{hdisco_jobs_tokens}->{ $self->{hdisco_jobs_ids}->{$job_id}->{token} }; + } + delete $self->{hdisco_jobs_ids}->{$job_id}; + } + + $self->{hdisco_synced_ok_time} = time(); + $self->{hdisco_synced} = 1; +} + +sub get_host_job { + my ($self, %options) = @_; + + my ($status, $results) = $self->{tpapi_centreonv2}->get_scheduling_jobs(search => '{"id": ' . $options{job_id} . '}'); + if ($status != 0) { + return (1, "cannot get host discovery job '$options{job_id}' - " . $self->{tpapi_centreonv2}->error()); + } + + my $job; + foreach my $entry (@{$results->{result}}) { + if ($entry->{job_id} == $options{job_id}) { + $job = $entry; + last; + } + } + + return (0, 'ok', $job); +} + +sub hdisco_delete_cron { + my ($self, %options) = @_; + + return if (!defined($self->{hdisco_jobs_tokens}->{ $options{discovery_token} })); + my $job_id = $self->{hdisco_jobs_tokens}->{ $options{discovery_token} }; + return if ( + $self->{hdisco_jobs_ids}->{$job_id}->{extra_infos}->{cron_added} == CRON_ADDED_NONE || + $self->{hdisco_jobs_ids}->{$job_id}->{extra_infos}->{cron_added} == CRON_ADDED_KO + ); + + $self->{logger}->writeLogInfo("[autodiscovery] -class- host discovery - delete job '" . $job_id . "'"); + + $self->send_internal_action({ + action => 'DELETECRON', + token => $options{token}, + data => { + variables => [ $options{discovery_token} ] + } + }); +} + +sub action_addhostdiscoveryjob { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + if (!$self->is_hdisco_synced()) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => 'host discovery synchronization issue' + } + ); + return ; + } + + my $data = $options{frame}->getData(); + + my ($status, $message, $job); + ($status, $message, $job) = $self->get_host_job(job_id => $data->{content}->{job_id}); + if ($status != 0) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - cannot get host discovery job '$data->{content}->{job_id}' - " . $self->{tpapi_centreonv2}->error()); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "cannot get job '$data->{content}->{job_id}'" + } + ); + return 1; + } + + if (!defined($job)) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - cannot get host discovery job '$data->{content}->{job_id}' - " . $self->{tpapi_centreonv2}->error()); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "cannot get job '$data->{content}->{job_id}'" + } + ); + return 1; + } + + $job->{timeout} = $data->{content}->{timeout}; + ($status, $message) = $self->hdisco_addupdate_job(job => $job); + if ($status) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - add job '$data->{content}->{job_id}' - $message"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "add job '$data->{content}->{job_id}' - $message" + } + ); + return 1; + } + + # Launch a immediate job. + if ($self->{hdisco_jobs_ids}->{ $data->{content}->{job_id} }->{execution}->{mode} == EXECUTION_MODE_IMMEDIATE) { + ($status, $message) = $self->launchhostdiscovery( + job_id => $data->{content}->{job_id}, + timeout => $data->{content}->{timeout}, + source => 'immediate' + ); + if ($status) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "launch issue - $message" + } + ); + return 1; + } + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + message => 'job ' . $data->{content}->{job_id} . ' added' + } + ); + + return 0; +} + +sub launchhostdiscovery { + my ($self, %options) = @_; + + return (1, 'host discovery sync not done') if (!$self->is_hdisco_synced()); + + my $job_id = $options{job_id}; + + if (!defined($job_id) || !defined($self->{hdisco_jobs_ids}->{$job_id})) { + return (1, 'trying to launch discovery for inexistant job'); + } + if ($self->hdisco_is_running_job(status => $self->{hdisco_jobs_ids}->{$job_id}->{status})) { + return (1, 'job is already running'); + } + if ($self->{hdisco_jobs_ids}->{$job_id}->{execution}->{mode} == EXECUTION_MODE_PAUSE && $options{source} eq 'cron') { + return (0, "job '$job_id' is paused"); + } + + $self->{logger}->writeLogInfo("[autodiscovery] -class- host discovery - launching discovery for job '" . $job_id . "'"); + + # Running + if ($self->update_job_information( + values => { + status => JOB_RUNNING, + message => 'Running', + last_execution => strftime("%F %H:%M:%S", localtime), + duration => 0, + discovered_items => 0 + }, + where_clause => [ + { + id => $job_id + } + ] + ) == -1) { + return (1, 'cannot update job status'); + } + $self->{hdisco_jobs_ids}->{$job_id}->{status} = JOB_RUNNING; + my $timeout = (defined($options{timeout}) && $options{timeout} =~ /(\d+)/) ? $1 : $self->{global_timeout}; + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgoneautodiscovery', + event => 'HOSTDISCOVERYJOBLISTENER', + target => $self->{hdisco_jobs_ids}->{$job_id}->{target}, + token => $self->{hdisco_jobs_ids}->{$job_id}->{token}, + timeout => $timeout + $self->{check_interval} + 15, + log_pace => $self->{check_interval} + } + ] + }); + + # plugins attribute format: + # "plugins": { + # "centreon-plugin-Cloud-Aws-Ec2-Api": 20220727, + # ... + # } + + $self->send_internal_action({ + action => 'COMMAND', + target => $self->{hdisco_jobs_ids}->{$job_id}->{target}, + token => $self->{hdisco_jobs_ids}->{$job_id}->{token}, + data => { + instant => 1, + content => [ + { + command => $self->{hdisco_jobs_ids}->{$job_id}->{command_line}, + timeout => $timeout, + metadata => { + job_id => $job_id, + source => 'autodiscovery-host-job-discovery', + pkg_install => $self->{hdisco_jobs_ids}->{$job_id}->{plugins} + } + } + ] + } + }); + + return (0, "job '$job_id' launched"); +} + +sub action_launchhostdiscovery { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + if (!$self->is_hdisco_synced()) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => 'host discovery synchronization issue' + } + ); + return ; + } + + my $data = $options{frame}->getData(); + + my ($job_id, $timeout, $source); + if (defined($data->{variables}->[0]) && + defined($data->{variables}->[1]) && $data->{variables}->[1] eq 'schedule') { + $job_id = $data->{variables}->[0]; + $source = 'immediate'; + } elsif (defined($data->{content}->{job_id})) { + $job_id = $data->{content}->{job_id}; + $timeout = $data->{content}->{timeout}; + $source = 'cron'; + } + + my ($status, $message, $job); + ($status, $message, $job) = $self->get_host_job(job_id => $job_id); + if ($status != 0) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - cannot get host discovery job '$job_id' - " . $self->{tpapi_centreonv2}->error()); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "cannot get job '$job_id'" + } + ); + return 1; + } + + if (!defined($job)) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - cannot get host discovery job '$job_id' - " . $self->{tpapi_centreonv2}->error()); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "cannot get job '$job_id'" + } + ); + return 1; + } + + ($status, $message) = $self->hdisco_addupdate_job(job => $job); + if ($status) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - add job '$job_id' - $message"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "add job '$job_id' - $message" + } + ); + return 1; + } + + ($status, $message) = $self->launchhostdiscovery( + job_id => $job_id, + timeout => $timeout, + source => $source + ); + if ($status) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - launch discovery job '$job_id' - $message"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + instant => 1, + data => { + message => $message + } + ); + return 1; + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + instant => 1, + data => { + message => $message + } + ); +} + +sub discovery_postcommand_result { + my ($self, %options) = @_; + + my $data = $options{frame}->getData(); + + return 1 if (!defined($data->{data}->{metadata}->{job_id})); + + my $job_id = $data->{data}->{metadata}->{job_id}; + if (!defined($self->{hdisco_jobs_ids}->{$job_id})) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - found result for inexistant job '" . $job_id . "'"); + return 1; + } + + my $exit_code = $data->{data}->{result}->{exit_code}; + my $output = (defined($data->{data}->{result}->{stderr}) && $data->{data}->{result}->{stderr} ne '') ? + $data->{data}->{result}->{stderr} : $data->{data}->{result}->{stdout}; + + if ($exit_code != 0) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - execute discovery postcommand failed job '$job_id'"); + $self->update_job_status( + job_id => $job_id, + status => SAVE_FAILED, + message => $output + ); + return 1; + } + + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - finished discovery postcommand job '$job_id'"); + $self->update_job_status( + job_id => $job_id, + status => SAVE_FINISH, + message => 'Finished' + ); +} + +sub discovery_add_host_result { + my ($self, %options) = @_; + + if ($options{builder}->{num_lines} == MAX_INSERT_BY_QUERY) { + my ($status) = $self->{class_object_centreon}->custom_execute( + request => $options{builder}->{query} . $options{builder}->{values}, + bind_values => $options{builder}->{bind_values} + ); + if ($status == -1) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - failed to insert job '$options{job_id}' results"); + $self->update_job_status( + job_id => $options{job_id}, + status => JOB_FAILED, + message => 'Failed to insert job results' + ); + return 1; + } + $options{builder}->{num_lines} = 0; + $options{builder}->{values} = ''; + $options{builder}->{append} = ''; + $options{builder}->{bind_values} = (); + } + + # Generate uuid based on attributs + my $uuid_char = ''; + foreach (@{$options{uuid_parameters}}) { + $uuid_char .= $options{host}->{$_} if (defined($options{host}->{$_}) && $options{host}->{$_} ne ''); + } + my $ctx = Digest::MD5->new; + $ctx->add($uuid_char); + my $digest = $ctx->hexdigest; + my $uuid = substr($digest, 0, 8) . '-' . substr($digest, 8, 4) . '-' . substr($digest, 12, 4) . '-' . + substr($digest, 16, 4) . '-' . substr($digest, 20, 12); + my $encoded_host = JSON::XS->new->encode($options{host}); + + # Build bulk insert + $options{builder}->{values} .= $options{builder}->{append} . '(?, ?, ?)'; + $options{builder}->{append} = ', '; + push @{$options{builder}->{bind_values}}, $options{job_id}, $encoded_host, $uuid; + $options{builder}->{num_lines}++; + $options{builder}->{total_lines}++; + + return 0; +} + +sub discovery_command_result { + my ($self, %options) = @_; + + my $data = $options{frame}->getData(); + + return 1 if (!defined($data->{data}->{metadata}->{job_id})); + + my $job_id = $data->{data}->{metadata}->{job_id}; + if (!defined($self->{hdisco_jobs_ids}->{$job_id})) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - found result for inexistant job '" . $job_id . "'"); + return 1; + } + + $self->{logger}->writeLogInfo("[autodiscovery] -class- host discovery - found result for job '" . $job_id . "'"); + my $uuid_parameters = $self->{hdisco_jobs_ids}->{$job_id}->{uuid_parameters}; + my $exit_code = $data->{data}->{result}->{exit_code}; + + if ($exit_code != 0) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - execute discovery plugin failed job '$job_id'"); + $self->update_job_status( + job_id => $job_id, + status => JOB_FAILED, + message => (defined($data->{data}->{result}->{stderr}) && $data->{data}->{result}->{stderr} ne '') ? + $data->{data}->{result}->{stderr} : $data->{data}->{result}->{stdout} + ); + return 1; + } + + # Delete previous results + my $query = "DELETE FROM mod_host_disco_host WHERE job_id = ?"; + my ($status) = $self->{class_object_centreon}->custom_execute(request => $query, bind_values => [$job_id]); + if ($status == -1) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - failed to delete previous job '$job_id' results"); + $self->update_job_status( + job_id => $job_id, + status => JOB_FAILED, + message => 'Failed to delete previous job results' + ); + return 1; + } + + # Add new results + my $builder = { + query => "INSERT INTO mod_host_disco_host (job_id, discovery_result, uuid) VALUES ", + num_lines => 0, + total_lines => 0, + values => '', + append => '', + bind_values => [] + }; + my $duration = 0; + + try { + my $json = JSON::XS->new(); + $json->incr_parse($data->{data}->{result}->{stdout}); + while (my $obj = $json->incr_parse()) { + if (ref($obj) eq 'HASH') { + foreach my $host (@{$obj->{results}}) { + my $rv = $self->discovery_add_host_result(host => $host, job_id => $job_id, uuid_parameters => $uuid_parameters, builder => $builder); + return 1 if ($rv); + } + $duration = $obj->{duration}; + } elsif (ref($obj) eq 'ARRAY') { + foreach my $host (@$obj) { + my $rv = $self->discovery_add_host_result(host => $host, job_id => $job_id, uuid_parameters => $uuid_parameters, builder => $builder); + return 1 if ($rv); + } + } + } + } catch { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - failed to decode discovery plugin response job '$job_id'"); + $self->update_job_status( + job_id => $job_id, + status => JOB_FAILED, + message => 'Failed to decode discovery plugin response' + ); + return 1; + }; + + if ($builder->{values} ne '') { + ($status) = $self->{class_object_centreon}->custom_execute(request => $builder->{query} . $builder->{values}, bind_values => $builder->{bind_values}); + if ($status == -1) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - failed to insert job '$job_id' results"); + $self->update_job_status( + job_id => $job_id, + status => JOB_FAILED, + message => 'Failed to insert job results' + ); + return 1; + } + } + + if (defined($self->{hdisco_jobs_ids}->{$job_id}->{post_execution}->{commands}) && + scalar(@{$self->{hdisco_jobs_ids}->{$job_id}->{post_execution}->{commands}}) > 0) { + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - execute post command job '$job_id'"); + my $post_command = $self->{hdisco_jobs_ids}->{$job_id}->{post_execution}->{commands}->[0]; + + $self->send_internal_action({ + action => $post_command->{action}, + token => $self->{hdisco_jobs_ids}->{$job_id}->{token}, + data => { + instant => 1, + content => [ + { + command => $post_command->{command_line} . ' --token=' . $self->{tpapi_centreonv2}->get_token(), + metadata => { + job_id => $job_id, + source => 'autodiscovery-host-job-postcommand' + } + } + ] + } + }); + } + + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - finished discovery command job '$job_id'"); + $self->update_job_status( + job_id => $job_id, + status => JOB_FINISH, + message => 'Finished', + duration => $duration, + discovered_items => $builder->{total_lines} + ); + + return 0; +} + +sub action_deletehostdiscoveryjob { + my ($self, %options) = @_; + + # delete is call when it's in pause (execution_mode 2). + # in fact, we do a curl to sync. If don't exist in database, we remove it. otherwise we do nothing + $options{token} = $self->generate_token() if (!defined($options{token})); + if (!$self->is_hdisco_synced()) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => 'host discovery synchronization issue' + } + ); + return ; + } + + my $data = $options{frame}->getData(); + + my $discovery_token = $data->{variables}->[0]; + my $job_id = (defined($discovery_token) && defined($self->{hdisco_jobs_tokens}->{$discovery_token})) ? + $self->{hdisco_jobs_tokens}->{$discovery_token} : undef; + if (!defined($discovery_token) || $discovery_token eq '') { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - missing ':token' variable to delete discovery"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'missing discovery token' } + ); + return 1; + } + + my ($status, $message, $job); + ($status, $message, $job) = $self->get_host_job(job_id => $job_id); + if ($status != 0) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - cannot get host discovery job '$job_id' - " . $self->{tpapi_centreonv2}->error()); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "cannot get job '$job_id'" + } + ); + return 1; + } + + if (!defined($job)) { + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - delete job '" . $job_id . "'"); + if (defined($self->{hdisco_jobs_ids}->{$job_id}->{token})) { + $self->hdisco_delete_cron(discovery_token => $discovery_token); + delete $self->{hdisco_jobs_tokens}->{$discovery_token}; + } + delete $self->{hdisco_jobs_ids}->{$job_id}; + } else { + $self->hdisco_addupdate_job(job => $job); + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { message => 'job ' . $discovery_token . ' deleted' } + ); + + return 0; +} + +sub update_job_status { + my ($self, %options) = @_; + + my $values = { status => $options{status}, message => $options{message} }; + $values->{duration} = $options{duration} if (defined($options{duration})); + $values->{discovered_items} = $options{discovered_items} if (defined($options{discovered_items})); + $self->update_job_information( + values => $values, + where_clause => [ + { + id => $options{job_id} + } + ] + ); + $self->{hdisco_jobs_ids}->{$options{job_id}}->{status} = $options{status}; +} + +sub update_job_information { + my ($self, %options) = @_; + + return 1 if (!defined($options{where_clause}) || ref($options{where_clause}) ne 'ARRAY' || scalar($options{where_clause}) < 1); + return 1 if (!defined($options{values}) || ref($options{values}) ne 'HASH' || !keys %{$options{values}}); + + my $query = "UPDATE mod_host_disco_job SET "; + my @bind_values = (); + my $append = ''; + foreach (keys %{$options{values}}) { + $query .= $append . $_ . ' = ?'; + $append = ', '; + push @bind_values, $options{values}->{$_}; + } + + $query .= " WHERE "; + $append = ''; + foreach (@{$options{where_clause}}) { + my ($key, $value) = each %{$_}; + $query .= $append . $key . " = ?"; + $append = 'AND '; + push @bind_values, $value; + } + + my ($status) = $self->{class_object_centreon}->custom_execute(request => $query, bind_values => \@bind_values); + if ($status == -1) { + $self->{logger}->writeLogError('[autodiscovery] Failed to update job information'); + return -1; + } + + return 0; +} + +sub action_hostdiscoveryjoblistener { + my ($self, %options) = @_; + + return 0 if (!$self->is_hdisco_synced()); + return 0 if (!defined($options{token})); + return 0 if (!defined($self->{hdisco_jobs_tokens}->{ $options{token} })); + + my $data = $options{frame}->getData(); + + my $job_id = $self->{hdisco_jobs_tokens}->{ $options{token} }; + if ($data->{code} == GORGONE_MODULE_ACTION_COMMAND_RESULT && + $data->{data}->{metadata}->{source} eq 'autodiscovery-host-job-discovery') { + $self->discovery_command_result(%options); + return 1; + } + #if ($data->{code} == GORGONE_MODULE_ACTION_COMMAND_RESULT && + # $data->{data}->{metadata}->{source} eq 'autodiscovery-host-job-postcommand') { + # $self->discovery_postcommand_result(%options); + # return 1; + #} + + # Can happen if we have a execution command timeout + my $message = defined($data->{data}->{result}->{stdout}) ? $data->{data}->{result}->{stdout} : $data->{data}->{message}; + $message = $data->{message} if (!defined($message)); + if ($data->{code} == GORGONE_ACTION_FINISH_KO) { + $self->{hdisco_jobs_ids}->{$job_id}->{status} = JOB_FAILED; + $self->update_job_information( + values => { + status => JOB_FAILED, + message => $message, + duration => 0, + discovered_items => 0 + }, + where_clause => [ + { + id => $job_id + } + ] + ); + return 1; + } + + return 1; +} + +sub action_hostdiscoverycronlistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token}) || $options{token} !~ /^cron-(.*)/); + my $discovery_token = $1; + + return 0 if (!defined($self->{hdisco_jobs_tokens}->{ $discovery_token })); + + my $data = $options{frame}->getData(); + + my $job_id = $self->{hdisco_jobs_tokens}->{ $discovery_token }; + if ($data->{code} == GORGONE_ACTION_FINISH_KO) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - job '" . $job_id . "' add cron error"); + $self->{hdisco_jobs_ids}->{$job_id}->{extra_infos}->{cron_added} = CRON_ADDED_KO; + } elsif ($data->{code} == GORGONE_ACTION_FINISH_OK) { + $self->{logger}->writeLogInfo("[autodiscovery] -class- host discovery - job '" . $job_id . "' add cron ok"); + $self->{hdisco_jobs_ids}->{$job_id}->{extra_infos}->{cron_added} = CRON_ADDED_OK; + } + + return 1; +} + +sub hdisco_add_joblistener { + my ($self, %options) = @_; + + foreach (@{$options{jobs}}) { + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - register listener for '" . $_->{job_id} . "'"); + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgoneautodiscovery', + event => 'HOSTDISCOVERYJOBLISTENER', + target => $_->{target}, + token => $_->{token}, + log_pace => $self->{check_interval} + } + ] + }); + } + + return 0; +} + +=pod + +********************** +Service Discovery part +********************** + +=cut + +sub action_servicediscoverylistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token})); + + # 'svc-disco-UUID-RULEID-HOSTID' . $self->{service_uuid} . '-' . $service_number . '-' . $rule_id . '-' . $host->{host_id} + return 0 if ($options{token} !~ /^svc-disco-(.*?)-(\d+)-(\d+)/); + + my ($uuid, $rule_id, $host_id) = ($1, $2, $3); + return 0 if (!defined($self->{service_discoveries}->{ $uuid })); + + $self->{service_discoveries}->{ $uuid }->discoverylistener( + rule_id => $rule_id, + host_id => $host_id, + %options + ); + + if (defined($self->{service_discoveries}->{ $uuid }) && $self->{service_discoveries}->{ $uuid }->is_finished()) { + return 0 if ($self->{service_discoveries}->{ $uuid }->is_post_execution()); + $self->{service_discoveries}->{ $uuid }->service_discovery_post_exec(); + delete $self->{service_discoveries}->{ $uuid }; + } +} + +sub action_launchservicediscovery { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{service_number}++; + my $svc_discovery = gorgone::modules::centreon::autodiscovery::services::discovery->new( + module_id => $self->{module_id}, + logger => $self->{logger}, + tpapi_clapi => $self->{tpapi_clapi}, + internal_socket => $self->{internal_socket}, + config => $self->{config}, + config_core => $self->{config_core}, + service_number => $self->{service_number}, + class_object_centreon => $self->{class_object_centreon}, + class_object_centstorage => $self->{class_object_centstorage}, + class_autodiscovery => $self + ); + + $self->{service_discoveries}->{ $svc_discovery->get_uuid() } = $svc_discovery; + my $status = $svc_discovery->launchdiscovery( + token => $options{token}, + frame => $options{frame} + ); + if ($status == -1) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cannot launch discovery' } + ); + delete $self->{service_discoveries}->{ $svc_discovery->get_uuid() }; + } +} + +sub is_module_installed { + my ($self) = @_; + + return 1 if ($self->{is_module_installed} == 1); + return 0 if ((time() - $self->{is_module_installed_check_interval}) < $self->{is_module_installed_last_check}); + + $self->{logger}->writeLogDebug('[autodiscovery] -class- host discovery - check centreon module installed'); + $self->{is_module_installed_last_check} = time(); + + my ($status, $results) = $self->{tpapi_centreonv2}->get_platform_versions(); + if ($status != 0) { + $self->{logger}->writeLogError('[autodiscovery] -class- host discovery - cannot get platform versions - ' . $self->{tpapi_centreonv2}->error()); + return 0; + } + + if (defined($results->{modules}) && ref($results->{modules}) eq 'HASH' && + defined($results->{modules}->{'centreon-autodiscovery-server'})) { + $self->{logger}->writeLogDebug('[autodiscovery] -class- host discovery - module autodiscovery installed'); + $self->{is_module_installed} = 1; + } + + return $self->{is_module_installed}; +} + +sub is_hdisco_synced { + my ($self) = @_; + + return $self->{hdisco_synced} == 1 ? 1 : 0; +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my $frame = gorgone::class::frame->new(); + my (undef, $rv) = $self->read_message(frame => $frame); + next if ($rv); + + my $raw = $frame->getFrame(); + $self->{logger}->writeLogDebug("[autodiscovery] Event: " . $$raw) if ($connector->{logger}->is_debug()); + if ($$raw =~ /^\[(.*?)\]/) { + if ((my $method = $connector->can('action_' . lc($1)))) { + next if ($frame->parse({ releaseFrame => 1, decode => 1 })); + + $method->($self, token => $frame->getToken(), frame => $frame); + } + } + } +} + +sub periodic_exec { + $connector->is_module_installed(); + $connector->hdisco_sync(); + + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[autodiscovery] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{tpapi_clapi} = gorgone::class::tpapi::clapi->new(); + $self->{tpapi_clapi}->set_configuration( + config => $self->{tpapi}->get_configuration(name => $self->{tpapi_clapi_name}) + ); + $self->{tpapi_centreonv2} = gorgone::class::tpapi::centreonv2->new(); + my ($status) = $self->{tpapi_centreonv2}->set_configuration( + config => $self->{tpapi}->get_configuration(name => $self->{tpapi_centreonv2_name}), + logger => $self->{logger} + ); + if ($status) { + $self->{logger}->writeLogError('[autodiscovery] -class- host discovery - configure api centreonv2 - ' . $self->{tpapi_centreonv2}->error()); + } + + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 2, + logger => $self->{logger} + ); + + $self->{class_object_centreon} = gorgone::class::sqlquery->new( + logger => $self->{logger}, + db_centreon => $self->{db_centreon} + ); + $self->{class_object_centstorage} = gorgone::class::sqlquery->new( + logger => $self->{logger}, + db_centreon => $self->{db_centstorage} + ); + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-autodiscovery', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'AUTODISCOVERYREADY', + data => {} + }); + + $self->is_module_installed(); + $self->hdisco_sync(); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/autodiscovery/hooks.pm b/gorgone/gorgone/modules/centreon/autodiscovery/hooks.pm new file mode 100644 index 00000000000..ae20261c054 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/autodiscovery/hooks.pm @@ -0,0 +1,167 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::autodiscovery::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::autodiscovery::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'autodiscovery'; +use constant EVENTS => [ + { event => 'AUTODISCOVERYREADY' }, + { event => 'HOSTDISCOVERYJOBLISTENER' }, + { event => 'HOSTDISCOVERYCRONLISTENER' }, + { event => 'SERVICEDISCOVERYLISTENER' }, + { event => 'ADDHOSTDISCOVERYJOB', uri => '/hosts', method => 'POST' }, + { event => 'DELETEHOSTDISCOVERYJOB', uri => '/hosts', method => 'DELETE' }, + { event => 'LAUNCHHOSTDISCOVERY', uri => '/hosts', method => 'GET' }, + { event => 'LAUNCHSERVICEDISCOVERY', uri => '/services', method => 'POST' } +]; + +my $config_core; +my $config; +my ($config_db_centreon, $config_db_centstorage); +my $autodiscovery = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centreon = $options{config_db_centreon}; + $config_db_centstorage = $options{config_db_centstorage}; + + $config->{vault_file} = defined($config->{vault_file}) ? $config->{vault_file} : '/var/lib/centreon/vault/vault.json'; + + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'AUTODISCOVERYREADY') { + $autodiscovery->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$autodiscovery->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { msg => 'gorgoneautodiscovery: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-autodiscovery', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($autodiscovery->{running}) && $autodiscovery->{running} == 1) { + $options{logger}->writeLogDebug("[autodiscovery] Send TERM signal $autodiscovery->{pid}"); + CORE::kill('TERM', $autodiscovery->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($autodiscovery->{running} == 1) { + $options{logger}->writeLogDebug("[autodiscovery] Send KILL signal for pool"); + CORE::kill('KILL', $autodiscovery->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($autodiscovery->{pid}) || $autodiscovery->{pid} != $pid); + + $autodiscovery = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($autodiscovery->{running}) && $autodiscovery->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[autodiscovery] Create module 'autodiscovery' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-autodiscovery'; + my $module = gorgone::modules::centreon::autodiscovery::class->new( + module_id => NAME, + logger => $options{logger}, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[autodiscovery] PID $child_pid (gorgone-autodiscovery)"); + $autodiscovery = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/autodiscovery/services/discovery.pm b/gorgone/gorgone/modules/centreon/autodiscovery/services/discovery.pm new file mode 100644 index 00000000000..41d8efc231f --- /dev/null +++ b/gorgone/gorgone/modules/centreon/autodiscovery/services/discovery.pm @@ -0,0 +1,1011 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::autodiscovery::services::discovery; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::modules::centreon::autodiscovery::services::resources; +use Net::SMTP; +use XML::Simple; +use POSIX qw(strftime); +use Safe; +use JSON::XS; + +sub new { + my ($class, %options) = @_; + my $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{internal_socket} = $options{internal_socket}; + $connector->{class_object_centreon} = $options{class_object_centreon}; + $connector->{class_object_centstorage} = $options{class_object_centstorage}; + $connector->{class_autodiscovery} = $options{class_autodiscovery}; + $connector->{tpapi_clapi} = $options{tpapi_clapi}; + $connector->{mail_subject} = defined($connector->{config}->{mail_subject}) ? $connector->{config}->{mail_subject} : 'Centreon Auto Discovery'; + $connector->{mail_from} = defined($connector->{config}->{mail_from}) ? $connector->{config}->{mail_from} : 'centreon-autodisco'; + + $connector->{service_pollers} = {}; + $connector->{audit_user_id} = undef; + $connector->{service_parrallel_commands_poller} = 8; + $connector->{service_current_commands_poller} = {}; + $connector->{finished} = 0; + $connector->{post_execution} = 0; + + $connector->{safe_display} = Safe->new(); + $connector->{safe_display}->share('$values'); + $connector->{safe_display}->share('$description'); + $connector->{safe_display}->permit_only(':default'); + $connector->{safe_display}->share_from( + 'gorgone::modules::centreon::autodiscovery::services::resources', + ['change_bytes'] + ); + + $connector->{safe_cv} = Safe->new(); + $connector->{safe_cv}->share('$values'); + $connector->{safe_cv}->permit_only(':default'); + + $connector->{uuid} = $connector->generate_token(length => 4) . ':' . $options{service_number}; + return $connector; +} + +sub database_init_transaction { + my ($self, %options) = @_; + + my $status = $self->{class_object_centreon}->{db_centreon}->transaction_mode(1); + if ($status == -1) { + $self->{logger}->writeLogError("$@"); + return -1; + } + return 0; +} + +sub database_commit_transaction { + my ($self, %options) = @_; + + my $status = $self->{class_object_centreon}->commit(); + if ($status == -1) { + $self->{logger}->writeLogError("$@"); + return -1; + } + + $self->{class_object_centreon}->transaction_mode(0); + return 0; +} + +sub database_error_rollback { + my ($self, %options) = @_; + + $self->{logger}->writeLogError($options{message}); + eval { + $self->{class_object_centreon}->rollback(); + $self->{class_object_centreon}->transaction_mode(0); + }; + if ($@) { + $self->{logger}->writeLogError("$@"); + } + return -1; +} + +sub get_uuid { + my ($self, %options) = @_; + + return $self->{uuid}; +} + +sub is_finished { + my ($self, %options) = @_; + + return $self->{finished}; +} + +sub is_post_execution { + my ($self, %options) = @_; + + return $self->{post_execution}; +} + +sub send_email { + my ($self, %options) = @_; + + my $messages = {}; + foreach my $journal (@{$self->{discovery}->{journal}}) { + $messages->{ $journal->{rule_id } } = [] if (!defined($messages->{ $journal->{rule_id } })); + push @{$messages->{ $journal->{rule_id } }}, $journal->{type} . " service '" . $journal->{service_name} . "' on host '" . $journal->{host_name} . "'."; + } + + my $contact_send = {}; + foreach my $rule_id (keys %{$self->{discovery}->{rules}}) { + next if (!defined($self->{discovery}->{rules}->{$rule_id}->{contact})); + next if (!defined($messages->{$rule_id})); + + foreach my $contact_id (keys %{$self->{discovery}->{rules}->{$rule_id}->{contact}}) { + next if (defined($contact_send->{$contact_id})); + $contact_send->{$contact_id} = 1; + + my $body = []; + foreach my $rule_id2 (keys %{$messages}) { + if (defined($self->{discovery}->{rules}->{$rule_id2}->{contact}->{$contact_id})) { + push @$body, @{$messages->{$rule_id2}}; + } + } + + if (scalar(@$body) > 0) { + $self->{logger}->writeLogDebug("[autodiscovery] -servicediscovery- $self->{uuid} send email to '" . $contact_id . "' (" . $self + ->{discovery} + ->{rules} + ->{$rule_id} + ->{contact} + ->{$contact_id} + ->{contact_email} . ")"); + + my $smtp = Net::SMTP->new('localhost', Timeout => 15); + if (!defined($smtp)) { + $self->{logger}->writeLogError("[autodiscovery] -servicediscovery- sent email error - " . $@); + next; + } + $smtp->mail($self->{mail_from}); + if (!$smtp->to($self->{discovery}->{rules}->{$rule_id}->{contact}->{$contact_id}->{contact_email})) { + $self->{logger}->writeLogError("[autodiscovery] -servicediscovery- sent email error - " . $smtp->message()); + next; + } + + $smtp->data(); + $smtp->datasend( + 'Date: ' . strftime('%a, %d %b %Y %H:%M:%S %z', localtime(time())) . "\n" . + 'From: ' . $self->{mail_from} . "\n" . + 'To: ' . $self->{discovery}->{rules}->{$rule_id}->{contact}->{$contact_id}->{contact_email} . "\n" . + 'Subject: ' . $self->{mail_subject} . "\n" . + "\n" . + join("\n", @$body) . "\n" + ); + $smtp->dataend(); + $smtp->quit(); + } + } + } +} + +sub restart_pollers { + my ($self, %options) = @_; + + return if ($self->{discovery}->{no_generate_config} == 1); + + my $poller_ids = {}; + foreach my $poller_id (keys %{$self->{discovery}->{pollers_reload}}) { + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} generate poller config '" . $poller_id . "'"); + $self->send_internal_action({ + action => 'COMMAND', + token => $self->{discovery}->{token} . ':config', + data => { + content => [ + { + command => $self->{tpapi_clapi}->get_applycfg_command(poller_id => $poller_id) + } + ] + } + }); + } +} + +sub audit_update { + my ($self, %options) = @_; + + return if ($self->{discovery}->{audit_enable} != 1); + + my $query = 'INSERT INTO log_action (action_log_date, object_type, object_id, object_name, action_type, log_contact_id) VALUES (?, ?, ?, ?, ?, ?)'; + my ($status, $sth) = $self->{class_object_centstorage}->custom_execute( + request => $query, + bind_values => [time(), $options{object_type}, $options{object_id}, $options{object_name}, $options{action_type}, $options{contact_id}] + ); + + return if (!defined($options{fields})); + + my $action_log_id = $self->{class_object_centstorage}->{db_centreon}->last_insert_id(); + foreach (keys %{$options{fields}}) { + $query = 'INSERT INTO log_action_modification (action_log_id, field_name, field_value) VALUES (?, ?, ?)'; + ($status) = $self->{class_object_centstorage}->custom_execute( + request => $query, + bind_values => [$action_log_id, $_, $options{fields}->{$_}] + ); + if ($status == -1) { + return -1; + } + } +} + +sub custom_variables { + my ($self, %options) = @_; + + if (defined($options{rule}->{rule_variable_custom}) && $options{rule}->{rule_variable_custom} ne '') { + local $SIG{__DIE__} = 'IGNORE'; + + our $values = { attributes => $options{discovery_svc}->{attributes}, service_name => $options{discovery_svc}->{service_name} }; + $self->{safe_cv}->reval($options{rule}->{rule_variable_custom}, 1); + if ($@) { + $self->{logger}->writeLogError("$options{logger_pre_message} custom variable code execution problem: " . $@); + } else { + $options{discovery_svc}->{attributes} = $values->{attributes}; + } + } +} + +sub get_description { + my ($self, %options) = @_; + + my $desc = $options{discovery_svc}->{service_name}; + if (defined($self->{discovery}->{rules}->{ $options{rule_id} }->{rule_scan_display_custom}) && $self->{discovery}->{rules}->{ $options{rule_id} }->{rule_scan_display_custom} ne '') { + local $SIG{__DIE__} = 'IGNORE'; + + our $description = $desc; + our $values = { attributes => $options{discovery_svc}->{attributes}, service_name => $options{discovery_svc}->{service_name} }; + $self->{safe_display}->reval($self->{discovery}->{rules}->{ $options{rule_id} }->{rule_scan_display_custom}, 1); + if ($@) { + $self->{logger}->writeLogError("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] custom description code execution problem: " . $@); + } else { + $desc = $description; + } + } + + return $desc; +} + +sub link_service_autodisco { + my ($self, %options) = @_; + + my $query = 'INSERT IGNORE INTO mod_auto_disco_rule_service_relation (rule_rule_id, service_service_id) VALUES (' . $options{rule_id} . ', ' . $options{service_id} . ')'; + my ($status, $sth) = $self->{class_object_centreon}->custom_execute(request => $query); + if ($status == -1) { + return -1; + } + + return 0; +} + +sub update_service { + my ($self, %options) = @_; + my %query_update = (); + my @journal = (); + my @update_macros = (); + my @insert_macros = (); + + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} } = { + type => 0, + macros => {}, + description => $self->get_description(%options) + }; + } + + return if ($self->{discovery}->{rules}->{ $options{rule_id} }->{rule_update} == 0); + + if ($options{service}->{template_id} != $self->{discovery}->{rules}->{ $options{rule_id} }->{service_template_model_id}) { + $query_update{service_template_model_stm_id} = $self->{discovery}->{rules}->{ $options{rule_id} }->{service_template_model_id}; + push @journal, { + host_name => $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}, + service_name => $options{discovery_svc}->{service_name}, + type => 'update', + msg => 'template', + rule_id => $options{rule_id} + }; + + $self->{logger}->writeLogDebug("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> service update template"); + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} }->{service_template_model_stm_id} = $self + ->{discovery} + ->{rules} + ->{ $options{rule_id} } + ->{service_template_model_id}; + } + } + if ($options{service}->{activate} == '0') { + $query_update{service_activate} = "'1'"; + push @journal, { + host_name => $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}, + service_name => $options{discovery_svc}->{service_name}, + type => 'enable', + rule_id => $options{rule_id} + }; + $self->{logger}->writeLogDebug("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> service enable"); + } + + foreach my $macro_name (keys %{$options{macros}}) { + if (!defined($options{service}->{macros}->{'$_SERVICE' . $macro_name . '$'})) { + push @insert_macros, { + name => $macro_name, + value => $options{macros}->{$macro_name} + }; + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} }->{macros}->{$macro_name} = + { value => $options{macros} ->{$macro_name}, type => 1 }; + } + } elsif ($options{service}->{macros}->{'$_SERVICE' . $macro_name . '$'} ne $options{macros}->{$macro_name}) { + push @update_macros, { + name => $macro_name, + value => $options{macros}->{$macro_name} + }; + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} }->{macros}->{$macro_name} = + { value => $options{macros}->{$macro_name}, type => 0 }; + } + } + } + + if (scalar(@insert_macros) > 0 || scalar(@update_macros) > 0) { + push @journal, { + host_name => $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}, + service_name => $options{discovery_svc}->{service_name}, + type => 'update', + msg => 'macros', + rule_id => $options{rule_id} + }; + $self->{logger}->writeLogDebug("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> service update/insert macros"); + } + + return $options{service}->{id} if ($self->{discovery}->{dry_run} == 1 || scalar(@journal) == 0); + + return -1 if ($self->database_init_transaction() == -1); + + if (scalar(keys %query_update) > 0) { + my $set = ''; + my $set_append = ''; + foreach (keys %query_update) { + $set .= $set_append . $_ . ' = ' . $query_update{$_}; + $set_append = ', '; + } + my $query = 'UPDATE service SET ' . $set . ' WHERE service_id = ' . $options{service}->{id}; + my ($status) = $self->{class_object_centreon}->custom_execute(request => $query); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot update service"); + } + } + + foreach (@update_macros) { + my $query = 'UPDATE on_demand_macro_service SET svc_macro_value = ? WHERE svc_svc_id = ' . $options{service}->{id} . ' AND svc_macro_name = ?'; + my ($status) = $self->{class_object_centreon}->custom_execute( + request => $query, + bind_values => [$_->{value}, '$_SERVICE' . $_->{name} . '$'] + ); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot update macro"); + } + } + foreach (@insert_macros) { + my $query = 'INSERT on_demand_macro_service (svc_svc_id, svc_macro_name, svc_macro_value) VALUES (' . $options{service}->{id} . ', ?, ?)'; + my ($status) = $self->{class_object_centreon}->custom_execute( + request => $query, + bind_values => ['$_SERVICE' . $_->{name} . '$', $_->{value}] + ); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot insert macro"); + } + } + + if ($self->link_service_autodisco(%options, service_id => $options{service}->{id}) == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot link service to autodisco"); + } + + return -1 if ($self->database_commit_transaction() == -1); + + $self->{discovery}->{pollers_reload}->{ $options{poller_id} } = 1; + push @{$self->{discovery}->{journal}}, @journal; + + if (defined($query_update{service_activate})) { + $self->audit_update( + object_type => 'service', + action_type => 'enable', + object_id => $options{service}->{id}, + object_name => $options{discovery_svc}->{service_name}, + contact_id => $self->{audit_user_id} + ); + } + if (defined($query_update{service_template_model_stm_id})) { + $self->audit_update( + object_type => 'service', + action_type => 'c', + object_id => $options{service}->{id}, + object_name => $options{discovery_svc}->{service_name}, + contact_id => $self->{audit_user_id}, + fields => { service_template_model_stm_id => $query_update{service_template_model_stm_id} } + ); + } + + return $options{service}->{id}; +} + +sub create_service { + my ($self, %options) = @_; + + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} } = { + type => 1, + service_template_model_stm_id => $self->{discovery}->{rules}->{ $options{rule_id} }->{service_template_model_id}, + macros => {}, + description => $self->get_description(%options) + }; + foreach (keys %{$options{macros}}) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} }->{macros}->{$_} = { + value => $options{macros}->{$_}, + type => 1 + }; + } + } + + return 0 if ($self->{discovery}->{dry_run} == 1); + # We create the service + + return -1 if ($self->database_init_transaction() == -1); + + my $query = "INSERT INTO service (service_template_model_stm_id, service_description, service_register) VALUES (?, ?, '1')"; + my ($status, $sth) = $self->{class_object_centreon}->custom_execute( + request => $query, + bind_values => [$self->{discovery}->{rules}->{ $options{rule_id} }->{service_template_model_id}, $options{discovery_svc}->{service_name}] + ); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot create service"); + } + my $service_id = $self->{class_object_centreon}->{db_centreon}->last_insert_id(); + + $query = 'INSERT INTO host_service_relation (host_host_id, service_service_id) VALUES (' . $options{host_id} . ', ' . $service_id . ')'; + ($status) = $self->{class_object_centreon}->custom_execute(request => $query); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot link service to host"); + } + + $query = 'INSERT INTO extended_service_information (service_service_id) VALUES (' . $service_id . ')'; + ($status) = $self->{class_object_centreon}->custom_execute(request => $query); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot service extended information"); + } + + foreach (keys %{$options{macros}}) { + $query = 'INSERT INTO on_demand_macro_service (svc_svc_id, svc_macro_name, svc_macro_value) VALUES (' . $service_id . ', ?, ?)'; + ($status) = $self->{class_object_centreon}->custom_execute( + request => $query, + bind_values => ['$_SERVICE' . $_ . '$', $options{macros}->{$_}] + ); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot create macro '$_' => '$options{macros}->{$_}'"); + } + } + + if ($self->link_service_autodisco(%options, service_id => $service_id) == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot link service to autodisco"); + } + + return -1 if ($self->database_commit_transaction() == -1); + + $self->{discovery}->{pollers_reload}->{ $options{poller_id} } = 1; + + $self->audit_update( + object_type => 'service', + action_type => 'a', + object_id => $service_id, + object_name => $options{discovery_svc}->{service_name}, + contact_id => $self->{audit_user_id}, + fields => { + service_template_model_id => $self->{discovery}->{rules}->{ $options{rule_id} }->{service_template_model_id}, + service_description => $options{discovery_svc}->{service_name}, + service_register => '1', + service_hPars => $options{host_id} + } + ); + + return $service_id; +} + +sub crud_service { + my ($self, %options) = @_; + + my $service_id; + if (!defined($options{service})) { + $service_id = $self->create_service(%options); + $self->{logger}->writeLogDebug("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> service created"); + if ($service_id != -1) { + push @{$self->{discovery}->{journal}}, { + host_name => $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}, + service_name => $options{discovery_svc}->{service_name}, + type => 'created', + rule_id => $options{rule_id} + }; + } + } else { + $service_id = $self->update_service(%options); + } + + return 0; +} + +sub disable_services { + my ($self, %options) = @_; + + return if ($self->{discovery}->{rules}->{ $options{rule_id} }->{rule_disable} != 1 || !defined($self->{discovery}->{rules}->{ $options{rule_id} }->{linked_services}->{ $options{host_id} })); + foreach my $service (keys %{$self->{discovery}->{rules}->{ $options{rule_id} }->{linked_services}->{ $options{host_id} }}) { + my $service_description = $self->{discovery}->{rules}->{ $options{rule_id} }->{linked_services}->{ $options{host_id} }->{$service}->{service_description}; + + if (!defined($options{discovery_svc}->{discovered_services}->{$service_description}) && + $self->{discovery}->{rules}->{ $options{rule_id} }->{linked_services}->{ $options{host_id} }->{$service}->{service_activate} == 1) { + $self->{logger}->writeLogDebug("$options{logger_pre_message} -> disable service '" . $service_description . "'"); + next if ($self->{discovery}->{dry_run} == 1); + + my $query = "UPDATE service SET service_activate = '0' WHERE service_id = " . $service; + my ($status) = $self->{class_object_centreon}->custom_execute(request => $query); + if ($status == -1) { + $self->{logger}->writeLogError("$options{logger_pre_message} -> cannot disable service '" . $service_description . "'"); + next; + } + + push @{$self->{discovery}->{journal}}, { + host_name => $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}, + service_name => $service_description, + type => 'disable', + rule_id => $options{rule_id} + }; + $self->{discovery}->{pollers_reload}->{ $options{poller_id} } = 1; + $self->audit_update( + object_type => 'service', + action_type => 'disable', + object_id => $service, + object_name => $service_description, + contact_id => $self->{audit_user_id} + ); + } + } +} + +sub service_response_parsing { + my ($self, %options) = @_; + + my $rule_alias = $self->{discovery}->{rules}->{ $options{rule_id} }->{rule_alias}; + my $poller_name = $self->{service_pollers}->{ $options{poller_id} }->{name}; + my $host_name = $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}; + my $logger_pre_message = "[autodiscovery] -servicediscovery- $self->{uuid} [" . $rule_alias . "] [" . $poller_name . "] [" . $host_name . "]"; + + my $xml; + eval { + $xml = XMLin($options{response}, ForceArray => 1, KeyAttr => []); + }; + if ($@) { + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{failed} = 1; + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{message} = 'load xml issue'; + } + $self->{logger}->writeLogError("$logger_pre_message -> load xml issue"); + $self->{logger}->writeLogDebug("$logger_pre_message -> load xml error: $@"); + return -1; + } + + my $discovery_svc = { discovered_services => {} }; + foreach my $attributes (@{$xml->{label}}) { + $discovery_svc->{service_name} = ''; + $discovery_svc->{attributes} = $attributes; + + $self->custom_variables( + discovery_svc => $discovery_svc, + rule => $self->{discovery}->{rules}->{ $options{rule_id} }, + logger_pre_message => $logger_pre_message + ); + + gorgone::modules::centreon::autodiscovery::services::resources::change_vars( + discovery_svc => $discovery_svc, + rule => $self->{discovery}->{rules}->{ $options{rule_id} }, + logger => $self->{logger}, + logger_pre_message => $logger_pre_message + ); + if ($discovery_svc->{service_name} eq '') { + $self->{logger}->writeLogError("$logger_pre_message -> no value for service name"); + next; + } + + if (defined($discovery_svc->{discovered_services}->{ $discovery_svc->{service_name} })) { + $self->{logger}->writeLogError("$logger_pre_message -> service '" . $discovery_svc->{service_name} . "' already created"); + next; + } + + $discovery_svc->{discovered_services}->{ $discovery_svc->{service_name} } = 1; + + next if ( + gorgone::modules::centreon::autodiscovery::services::resources::check_exinc( + discovery_svc => $discovery_svc, + rule => $self->{discovery}->{rules}->{ $options{rule_id} }, + logger => $self->{logger}, + logger_pre_message => $logger_pre_message + ) + ); + + my $macros = gorgone::modules::centreon::autodiscovery::services::resources::get_macros( + discovery_svc => $discovery_svc, + rule => $self->{discovery}->{rules}->{ $options{rule_id} } + ); + + my ($status, $service) = gorgone::modules::centreon::autodiscovery::services::resources::get_service( + class_object_centreon => $self->{class_object_centreon}, + host_id => $options{host_id}, + service_name => $discovery_svc->{service_name}, + logger => $self->{logger}, + logger_pre_message => $logger_pre_message + ); + next if ($status == -1); + + $self->crud_service( + discovery_svc => $discovery_svc, + rule_id => $options{rule_id}, + host_id => $options{host_id}, + poller_id => $options{poller_id}, + service => $service, + macros => $macros, + logger_pre_message => $logger_pre_message + ); + } + + $self->disable_services( + discovery_svc => $discovery_svc, + rule_id => $options{rule_id}, + host_id => $options{host_id}, + poller_id => $options{poller_id}, + logger_pre_message => $logger_pre_message + ); +} + +sub discoverylistener { + my ($self, %options) = @_; + + my $data = $options{frame}->getData(); + + return 0 if ($data->{code} != GORGONE_MODULE_ACTION_COMMAND_RESULT && $data->{code} != GORGONE_ACTION_FINISH_KO); + + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} } = { rules => {} } if (!defined($self->{discovery}->{manual}->{ $options{host_id} })); + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} } = { failed => 0, discovery => {} } if (!defined($self + ->{discovery} + ->{manual} + ->{ $options{host_id} } + ->{rules} + ->{ $options{rule_id} })); + } + + # if i have GORGONE_MODULE_ACTION_COMMAND_RESULT, i can't have GORGONE_ACTION_FINISH_KO + if ($data->{code} == GORGONE_MODULE_ACTION_COMMAND_RESULT) { + my $exit_code = $data->{data}->{result}->{exit_code}; + if ($exit_code == 0) { + $self->service_response_parsing( + rule_id => $options{rule_id}, + host_id => $options{host_id}, + poller_id => $self->{discovery}->{hosts}->{ $options{host_id} }->{poller_id}, + response => $data->{data}->{result}->{stdout} + ); + } else { + $self->{discovery}->{failed_discoveries}++; + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{failed} = 1; + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{message} = $data->{data}->{message}; + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{data} = $data->{data}; + } + } + } elsif ($data->{code} == GORGONE_ACTION_FINISH_KO) { + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{failed} = 1; + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{message} = $data->{data}->{message}; + } + $self->{discovery}->{failed_discoveries}++; + } else { + return 0; + } + + $self->{service_current_commands_poller}->{ $self->{discovery}->{hosts}->{ $options{host_id} }->{poller_id} }--; + $self->service_execute_commands(); + + $self->{discovery}->{done_discoveries}++; + my $progress = $self->{discovery}->{done_discoveries} * 100 / $self->{discovery}->{count_discoveries}; + my $div = int(int($progress) / 5); + if ($div > $self->{discovery}->{progress_div}) { + $self->{discovery}->{progress_div} = $div; + $self->send_log( + code => GORGONE_MODULE_CENTREON_AUTODISCO_SVC_PROGRESS, + token => $self->{discovery}->{token}, + instant => 1, + data => { + message => 'current progress', + complete => sprintf('%.2f', $progress) + } + ); + } + + $self->{logger}->writeLogDebug("[autodiscovery] -servicediscovery- $self->{uuid} current count $self->{discovery}->{done_discoveries}/$self->{discovery}->{count_discoveries}"); + if ($self->{discovery}->{done_discoveries} == $self->{discovery}->{count_discoveries}) { + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} discovery finished"); + $self->{finished} = 1; + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $self->{discovery}->{token}, + data => { + message => 'discovery finished', + failed_discoveries => $self->{discovery}->{failed_discoveries}, + count_discoveries => $self->{discovery}->{count_discoveries}, + journal => $self->{discovery}->{journal}, + manual => $self->{discovery}->{manual} + } + ); + } + + return 0; +} + +sub service_discovery_post_exec { + my ($self, %options) = @_; + + $self->{post_execution} = 1; + + if ($self->{discovery}->{is_manual} == 0) { + $self->restart_pollers(); + $self->send_email(); + } + + return 0; +} + +sub service_execute_commands { + my ($self, %options) = @_; + + foreach my $rule_id (keys %{$self->{discovery}->{rules}}) { + foreach my $poller_id (keys %{$self->{discovery}->{rules}->{$rule_id}->{hosts}}) { + next if (scalar(@{$self->{discovery}->{rules}->{$rule_id}->{hosts}->{$poller_id}}) <= 0); + $self->{service_current_commands_poller}->{$poller_id} = 0 if (!defined($self->{service_current_commands_poller}->{$poller_id})); + + while (1) { + last if ($self->{service_current_commands_poller}->{$poller_id} >= $self->{service_parrallel_commands_poller}); + my $host_id = shift @{$self->{discovery}->{rules}->{$rule_id}->{hosts}->{$poller_id}}; + last if (!defined($host_id)); + + my $host = $self->{discovery}->{hosts}->{$host_id}; + $self->{service_current_commands_poller}->{$poller_id}++; + + my $command = gorgone::modules::centreon::autodiscovery::services::resources::substitute_service_discovery_command( + command_line => $self->{discovery}->{rules}->{$rule_id}->{command_line}, + host => $host, + poller => $self->{service_pollers}->{$poller_id}, + vault_count => $options{vault_count} + ); + + $self->{logger}->writeLogDebug("[autodiscovery] -servicediscovery- $self->{uuid} [" . + $self->{discovery}->{rules}->{$rule_id}->{rule_alias} . "] [" . + $self->{service_pollers}->{$poller_id}->{name} . "] [" . + $host->{host_name} . "] -> substitute string: " . $command + ); + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgoneautodiscovery', + event => 'SERVICEDISCOVERYLISTENER', + target => $poller_id, + token => 'svc-disco-' . $self->{uuid} . '-' . $rule_id . '-' . $host_id, + timeout => 120, + log_pace => 15 + } + ] + }); + + $self->send_internal_action({ + action => 'COMMAND', + target => $poller_id, + token => 'svc-disco-' . $self->{uuid} . '-' . $rule_id . '-' . $host_id, + data => { + instant => 1, + content => [ + { + command => $command, + timeout => 90 + } + ] + } + }); + } + } + } +} + +sub launchdiscovery { + my ($self, %options) = @_; + + my $data = $options{frame}->getData(); + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} discovery start"); + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { message => 'servicediscovery start' } + ); + + ################ + # get pollers + ################ + $self->{logger}->writeLogDebug("[autodiscovery] -servicediscovery- $self->{uuid} load pollers configuration"); + my ($status, $message, $pollers) = gorgone::modules::centreon::autodiscovery::services::resources::get_pollers( + class_object_centreon => $self->{class_object_centreon} + ); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + $self->{service_pollers} = $pollers; + + ################ + # get audit user + ################ + $self->{logger}->writeLogDebug("[autodiscovery] -servicediscovery- $self->{uuid} load audit configuration"); + + ($status, $message, my $audit_enable) = gorgone::modules::centreon::autodiscovery::services::resources::get_audit( + class_object_centstorage => $self->{class_object_centstorage} + ); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + + if (!defined($self->{tpapi_clapi}->get_username())) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => 'clapi ' . $self->{tpapi_clapi}->error()); + return -1; + } + ($status, $message, my $user_id) = gorgone::modules::centreon::autodiscovery::services::resources::get_audit_user_id( + class_object_centreon => $self->{class_object_centreon}, + clapi_user => $self->{tpapi_clapi}->get_username() + ); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + $self->{audit_user_id} = $user_id; + + ################## + # get vault config + ################## + + ($status, $message, my $vault_count) = $self->get_vault_count(); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + + ################ + # get rules + ################ + + $self->{logger}->writeLogDebug("[autodiscovery] -servicediscovery- $self->{uuid} load rules configuration"); + + ($status, $message, my $rules) = gorgone::modules::centreon::autodiscovery::services::resources::get_rules( + class_object_centreon => $self->{class_object_centreon}, + filter_rules => $data->{content}->{filter_rules}, + force_rule => (defined($data->{content}->{force_rule}) && $data->{content}->{force_rule} =~ /^1$/) ? 1 : 0 + ); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + + ################# + # get hosts + ################# + gorgone::modules::centreon::autodiscovery::services::resources::reset_macro_hosts(); + my $all_hosts = {}; + my $total = 0; + foreach my $rule_id (keys %$rules) { + ($status, $message, my $hosts, my $count) = gorgone::modules::centreon::autodiscovery::services::resources::get_hosts( + host_template => $rules->{$rule_id}->{host_template}, + poller_id => $rules->{$rule_id}->{poller_id}, + class_object_centreon => $self->{class_object_centreon}, + with_macro => 1, + host_lookup => $data->{content}->{filter_hosts}, + poller_lookup => $data->{content}->{filter_pollers}, + vault_count => $vault_count + ); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + + if (!defined($hosts) || scalar(keys %$hosts) == 0) { + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} no hosts found for rule '" . $options{rule}->{rule_alias} . "'"); + next; + } + + $total += $count; + $rules->{$rule_id}->{hosts} = $hosts->{pollers}; + $all_hosts = { %$all_hosts, %{$hosts->{infos}} }; + + foreach (('rule_scan_display_custom', 'rule_variable_custom')) { + if (defined($rules->{$rule_id}->{$_}) && $rules->{$rule_id}->{$_} ne '') { + $rules->{$rule_id}->{$_} =~ s/\$([a-zA-Z_\-\.]*?)\$/\$values->{attributes}->{$1}/msg; + $rules->{$rule_id}->{$_} =~ s/\@SERVICENAME\@/\$values->{service_name}/msg; + } + } + } + + if ($total == 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => 'no hosts found'); + return -1; + } + + $self->{discovery} = { + token => $options{token}, + count_discoveries => $total, + failed_discoveries => 0, + done_discoveries => 0, + progress_div => 0, + rules => $rules, + manual => {}, + is_manual => (defined($data->{content}->{manual}) && $data->{content}->{manual} =~ /^1$/) ? 1 : 0, + dry_run => (defined($data->{content}->{dry_run}) && $data->{content}->{dry_run} =~ /^1$/) ? 1 : 0, + audit_enable => $audit_enable, + no_generate_config => (defined($data->{content}->{no_generate_config}) && $data->{content}->{no_generate_config} =~ /^1$/) ? 1 : 0, + options => defined($data->{content}) ? $data->{content} : {}, + hosts => $all_hosts, + journal => [], + pollers_reload => {} + }; + + $self->service_execute_commands(vault_count => $vault_count); + + return 0; +} + +sub event { + my ($self, %options) = @_; + + $self->{class_autodiscovery}->event(); +} + +sub get_vault_count() { + my ($self, %options) = @_; + + # Check if vault config file exists + if (-e $self->{config}->{vault_file}) { + my ($fh, $size); + # Read config file + if (!open($fh, '<', $self->{config}->{vault_file})) { + return (-1, "Could not open $self->{config}->{vault_file}: $!"); + } + my $content = do { + local $/; + <$fh> + }; + close $fh; + # Check JSON validity + my $vault_config; + eval { + $vault_config = JSON::XS->new->decode($content); + }; + if ($@) { + return (-1, "Cannot decode json $self->{config}->{vault_file}: $!"); + } + return (0, '', 1); + } + return (0, '', 0); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/autodiscovery/services/resources.pm b/gorgone/gorgone/modules/centreon/autodiscovery/services/resources.pm new file mode 100644 index 00000000000..2d03081ed0a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/autodiscovery/services/resources.pm @@ -0,0 +1,632 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::autodiscovery::services::resources; + +use strict; +use warnings; + +sub get_pollers { + my (%options) = @_; + + my ($status, $pollers) = $options{class_object_centreon}->custom_execute( + request => 'SELECT id, name FROM nagios_server', + mode => 1, + keys => 'id' + ); + if ($status == -1) { + return (-1, 'cannot get poller list'); + } + + if (scalar(keys %$pollers) == 0) { + return (-1, 'no pollers found in configuration'); + } + + foreach my $poller_id (keys %$pollers) { + $pollers->{$poller_id}->{resources} = {}; + ($status, my $resources) = $options{class_object_centreon}->custom_execute( + request => + 'SELECT resource_name, resource_line FROM cfg_resource_instance_relations, cfg_resource WHERE cfg_resource_instance_relations.instance_id = ?' . + " AND cfg_resource_instance_relations.resource_id = cfg_resource.resource_id AND resource_activate = '1'", + bind_values => [$poller_id], + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules resource list'); + } + + foreach (@$resources) { + $pollers->{$poller_id}->{resources}->{ $_->[0] } = $_->[1]; + } + } + + return (0, '', $pollers); +} + +sub get_audit { + my (%options) = @_; + my $audit = 0; + + my ($status, $rows) = $options{class_object_centstorage}->custom_execute( + request => + 'SELECT audit_log_option FROM config LIMIT 1', + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get audit configuration'); + } + if (defined($rows->[0]->[0])) { + $audit = $rows->[0]->[0]; + } + + return (1, '', $audit); +} + +sub get_audit_user_id { + my (%options) = @_; + my $user_id = 0; + + my ($status, $contacts) = $options{class_object_centreon}->custom_execute( + request => 'SELECT contact_id FROM contact WHERE contact_alias = ?', + bind_values => [$options{clapi_user}], + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get audit user'); + } + + if (defined($contacts->[0])) { + $user_id = $contacts->[0]->[0]; + } + + return (0, '', $user_id); +} + +sub get_rules { + my (%options) = @_; + + my $filter = "rule_activate = '1' AND "; + if (defined($options{force_rule}) && $options{force_rule} == 1) { + $filter = ''; + } + + my @bind_values = (); + if (defined($options{filter_rules}) && scalar(@{$options{filter_rules}}) > 0) { + my $append = ''; + $filter .= 'rule_alias IN ('; + foreach my $rule (@{$options{filter_rules}}) { + $filter .= $append . '?'; + $append = ', '; + push @bind_values, $rule; + } + $filter .= ') AND '; + } + + my ($status, $rules) = $options{class_object_centreon}->custom_execute( + request => + "SELECT rule_id, rule_alias, service_display_name, rule_disable, rule_update, command_line, service_template_model_id, rule_scan_display_custom, rule_variable_custom + FROM mod_auto_disco_rule, command WHERE " . $filter . " mod_auto_disco_rule.command_command_id = command.command_id", + bind_values => \@bind_values, + mode => 1, + keys => 'rule_id' + ); + if ($status == -1) { + return (-1, 'cannot get rules list'); + } + if (scalar(keys %$rules) == 0) { + return (-1, 'no rules found in configuration'); + } + + $filter = '(' . join(',', keys %$rules) . ')'; + + ############################ + # Get mod_auto_disco_change + ($status, my $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_id, change_str, change_regexp, change_replace, change_modifier FROM mod_auto_disco_change WHERE rule_id IN ' . $filter . ' ORDER BY rule_id, change_order ASC', + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules change list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{change} = [] if (!defined($rules->{ $_->[0] }->{change})); + push @{$rules->{ $_->[0] }->{change}}, { change_str => $_->[1], change_regexp => $_->[2], change_replace => $_->[3], change_modifier => $_->[4] }; + } + + ######################################### + # Get mod_auto_disco_inclusion_exclusion + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_id, exinc_type, exinc_str, exinc_regexp FROM mod_auto_disco_inclusion_exclusion WHERE rule_id IN ' . $filter . ' ORDER BY rule_id, exinc_order ASC', + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules exinc list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{exinc} = [] if (!defined($rules->{ $_->[0] }->{exinc})); + push @{$rules->{ $_->[0] }->{exinc}}, { exinc_type => $_->[1], exinc_str => $_->[2], exinc_regexp => $_->[3] }; + } + + ######################################### + # Get mod_auto_disco_macro + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_id, macro_name, macro_value, is_empty FROM mod_auto_disco_macro WHERE rule_id IN ' . $filter, + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules macro list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{macro} = {} if (!defined($rules->{ $_->[0] }->{macro})); + $rules->{ $_->[0] }->{macro}->{ $_->[1] } = { macro_value => $_->[2], is_empty => $_->[3] }; + } + + ######################################### + # Get mod_auto_disco_inst_rule_relation + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_rule_id as rule_id, instance_id FROM mod_auto_disco_inst_rule_relation WHERE rule_rule_id IN ' . $filter, + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules instance list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{poller_id} = [] if (!defined($rules->{ $_->[0] }->{poller_id})); + push @{$rules->{ $_->[0] }->{poller_id}}, $_->[1]; + } + + ######################################### + # Get mod_auto_disco_ht_rule_relation + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_rule_id as rule_id, host_host_id FROM mod_auto_disco_ht_rule_relation WHERE rule_rule_id IN ' . $filter, + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules host template list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{host_template} = [] if (!defined($rules->{ $_->[0] }->{host_template})); + push @{$rules->{ $_->[0] }->{host_template}}, $_->[1]; + } + + ######################################## + # Get services added by autodisco + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_rule_id as rule_id, host_host_id as host_id, service_id, service_activate, service_description FROM mod_auto_disco_rule_service_relation, service, host_service_relation WHERE rule_rule_id IN ' . $filter . " AND mod_auto_disco_rule_service_relation.service_service_id = service.service_id AND service.service_id = host_service_relation.service_service_id", + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules host template list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{linked_services} = {} if (!defined($rules->{ $_->[0] }->{linked_services})); + $rules->{ $_->[0] }->{linked_services}->{ $_->[1] } = {} if (!defined($rules->{ $_->[0] }->{linked_services}->{ $_->[1] })); + $rules->{ $_->[0] }->{linked_services}->{ $_->[1] }->{ $_->[2] } = { + service_activate => $_->[3], service_description => $_->[4] + }; + } + + ######################################### + # Get Contact + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_id, contact_id, cg_id FROM mod_auto_disco_rule_contact_relation WHERE rule_id IN ' . $filter, + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules contact list'); + } + foreach (@$datas) { + if (defined($_->[1])) { + # Already add it + next if (defined($rules->{ $_->[0] }->{contact}->{ $_->[1] })); + if ((my $contact = get_contact(class_object_centreon => $options{class_object_centreon}, contact_id => $_->[1]))) { + $rules->{ $_->[0] }->{contact} = {} if (!defined($rules->{ $_->[0] }->{contact})); + $rules->{ $_->[0] }->{contact}->{ $contact->{contact_id} } = { contact_email => $contact->{contact_email} }; + } + } elsif (defined($_->[2])) { + ($status, my $datas2) = $options{class_object_centreon}->custom_execute( + request => "SELECT contact_contact_id as contact_id FROM contactgroup, contactgroup_contact_relation WHERE contactgroup.cg_id = '" . $_->[2] . "' AND contactgroup.cg_id = contactgroup_contact_relation.contactgroup_cg_id", + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules contactgroup list'); + } + foreach my $row (@$datas2) { + # Already add it + next if (defined($rules->{ $_->[0] }->{contact}->{ $row->[0] })); + if ((my $contact = get_contact(class_object_centreon => $options{class_object_centreon}, contact_id => $row->[0]))) { + $rules->{ $_->[0] }->{contact} = {} if (!defined($rules->{ $_->[0] }->{contact})); + $rules->{ $_->[0] }->{contact}->{ $contact->{contact_id} } = { contact_email => $contact->{contact_email} }; + } + } + } + } + + # Filter rules + if (defined($options{filter_rules}) && ref($options{filter_rules}) eq 'SCALAR') { + foreach (keys %$rules) { + my $find = 0; + foreach my $opt_rule (@{$options{filter_rules}}) { + if ($opt_rule eq $rules->{$_}->{rule_alias}) { + $find = 1; + last; + } + } + + if ($find == 0) { + delete $rules->{$_}; + } + } + } + + return (0, '', $rules); +} + +sub get_contact { + my (%options) = @_; + + my ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => "SELECT contact_id, contact_email FROM contact WHERE contact_id = '" . $options{contact_id} . "' AND contact_activate = '1'", + mode => 1, + keys => 'contact_id' + ); + + if ($status == -1) { + return 0; + } + + return defined($datas->{ $options{contact_id} }) ? $datas->{ $options{contact_id} } : undef; +} + +my $done_macro_host = {}; + +sub reset_macro_hosts { + $done_macro_host = {}; +} + +sub get_hosts { + my (%options) = @_; + + if (!defined($options{host_template}) || scalar(@{$options{host_template}}) == 0) { + return (0, 'cannot get host list', []); + } + + my $filter = ''; + my $filter_append = ''; + my @bind_values = (); + + my $filter_host = ''; + if (defined($options{host_lookup}) && ref($options{host_lookup}) eq 'ARRAY' && scalar(@{$options{host_lookup}}) > 0) { + my $filter_append = ''; + foreach (@{$options{host_lookup}}) { + $filter_host .= $filter_append . '?'; + $filter_append = ', '; + push @bind_values, $_; + } + $filter_host = ' host.host_name IN (' . $filter_host . ') AND '; + } + + foreach (@{$options{host_template}}) { + $filter .= $filter_append . '?'; + $filter_append = ', '; + push @bind_values, $_; + } + $filter = ' host_template_relation.host_tpl_id IN (' . $filter . ') AND '; + + my $filter_poller = ''; + my $join_table = ''; + if (defined($options{poller_lookup}) && ref($options{poller_lookup}) eq 'ARRAY' && scalar(@{$options{poller_lookup}}) > 0) { + my $filter_append = ''; + foreach (@{$options{poller_lookup}}) { + $filter_poller .= $filter_append . '?'; + $filter_append = ', '; + push @bind_values, $_; + } + $filter_poller = ' nagios_server.name IN ('. $filter_poller .') AND nagios_server.id = ns_host_relation.nagios_server_id AND '; + $join_table = ', nagios_server '; + } elsif (defined($options{poller_id}) && scalar(@{$options{poller_id}}) > 0){ + my $filter_append = ''; + foreach (@{$options{poller_id}}) { + $filter_poller .= $filter_append . '?'; + $filter_append = ', '; + push @bind_values, $_; + } + $filter_poller =' ns_host_relation.nagios_server_id IN (' . $filter_poller . ') AND nagios_server.id = ns_host_relation.nagios_server_id AND '; + $join_table = ', nagios_server '; + } + + my ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => "SELECT host_id, host_address, host_name, nagios_server_id as poller_id + FROM host_template_relation, host, ns_host_relation " . $join_table . " + WHERE " . $filter_host . $filter . " host_template_relation.host_host_id = host.host_id + AND " . $filter_poller . " host.host_id = ns_host_relation.host_host_id + AND `host_activate` = '1' + ", + bind_values => \@bind_values, + mode => 1, + keys => 'host_id' + ); + if ($status == -1) { + return (-1, 'cannot host list'); + } + + my $hosts = { pollers => {}, infos => {} }; + my $count = 0; + foreach my $host_id (keys %$datas) { + if (defined($options{with_macro}) && $options{with_macro} == 1) { + if (defined($done_macro_host->{ $host_id })) { + $datas->{$host_id}->{macros} = $done_macro_host->{ $host_id }; + } else { + ($status, my $message, my $macros) = get_macros_host( + host_id => $host_id, + class_object_centreon => $options{class_object_centreon}, + vault_count => $options{vault_count} + ); + if ($status == -1) { + return (-1, $message); + } + $datas->{$host_id}->{macros} = $macros; + $done_macro_host->{ $host_id } = $macros; + } + } + + $count++; + push @{$hosts->{pollers}->{ $datas->{$host_id}->{poller_id} }}, $host_id; + $hosts->{infos}->{$host_id} = $datas->{$host_id}; + } + + return (0, '', $hosts, $count); +} + +sub set_macro { + my ($macros, $name, $value) = @_; + + if (!defined($macros->{$name})) { + $macros->{$name} = $value; + } +} + +sub get_macros_host { + my (%options) = @_; + my ($status, $datas); + my %macros = (); + my %loop_stop = (); + my @stack = ($options{host_id}); + + while ((my $lhost_id = shift(@stack))) { + if (defined($loop_stop{$lhost_id})) { + # Already done the host + next; + } + $loop_stop{$lhost_id} = 1; + + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => "SELECT host_snmp_community, host_snmp_version FROM host WHERE host_id = " . $lhost_id . " LIMIT 1", + mode => 2 + ); + if ($status == -1) { + return (-1, 'get macro: cannot get snmp information'); + } + + if (defined($datas->[0]->[0]) && $datas->[0]->[0] ne '') { + set_macro(\%macros, '$_HOSTSNMPCOMMUNITY$', $datas->[0]->[0]); + } + if (defined($datas->[0]->[1]) && $datas->[0]->[1] ne '') { + set_macro(\%macros, '$_HOSTSNMPVERSION$', $datas->[0]->[1]); + } + + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => "SELECT host_macro_name, host_macro_value, is_password FROM on_demand_macro_host WHERE host_host_id = " . $lhost_id, + mode => 2 + ); + if ($status == -1) { + return (-1, 'get macro: cannot get on_demand_macro_host'); + } + foreach (@$datas) { + my $macro_name = $_->[0]; + my $macro_value = $_->[1]; + my $is_password = $_->[2]; + # Replace macro value if a vault is used + if (defined($options{vault_count}) && $options{vault_count} > 0 && defined($is_password) && $is_password == 1) { + set_macro(\%macros, $macro_name, "{" . $macro_name . "::secret::" . $macro_value . "}"); + } else { + set_macro(\%macros, $macro_name, $macro_value); + } + } + + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => "SELECT host_tpl_id FROM host_template_relation WHERE host_host_id = " . $lhost_id . " ORDER BY `order` DESC", + mode => 2 + ); + if ($status == -1) { + return (-1, 'get macro: cannot get host_template_relation'); + } + foreach (@$datas) { + unshift @stack, $_->[0]; + } + } + + return (0, '', \%macros); +} + +sub substitute_service_discovery_command { + my (%options) = @_; + + my $command = $options{command_line}; + while ($command =~ /(\$_HOST.*?\$)/) { + my ($substitute_str, $macro) = ('', $1); + $substitute_str = $options{host}->{macros}->{$macro} if (defined($options{host}->{macros}->{$macro})); + $command =~ s/\Q$macro\E/$substitute_str/g; + } + while ($command =~ /(\$(?:USER.*?|CENTREONPLUGINS)\$)/) { + my ($substitute_str, $macro) = ('', $1); + $substitute_str = $options{poller}->{resources}->{$macro} if (defined($options{poller}->{resources}->{$macro})); + $command =~ s/\Q$macro\E/$substitute_str/g; + } + + $command =~ s/\$HOSTADDRESS\$/$options{host}->{host_address}/g; + $command =~ s/\$HOSTNAME\$/$options{host}->{host_name}/g; + + if (defined($options{vault_count}) && $options{vault_count} > 0) { + $command .= ' --pass-manager="centreonvault"'; + } + + return $command; +} + +sub change_vars { + my (%options) = @_; + + # First we change '$$' values + if (defined($options{rule}->{change})) { + foreach my $change (@{$options{rule}->{change}}) { + next if (!defined($change->{change_str}) || $change->{change_str} eq '' || + !defined($change->{change_regexp}) || $change->{change_regexp} eq '' || + $change->{change_str} =~ /\@SERVICENAME\@/); + + if ($change->{change_str} !~ /\$(.+?)\$/) { + $options{logger}->writeLogError("$options{logger_pre_message} -> not a valid change configuration"); + next; + } + my $attr = $1; + if (!defined($options{discovery_svc}->{attributes}->{$attr})) { + $options{logger}->writeLogError("$options{logger_pre_message} -> change: '$attr' not exist in XML"); + next; + } + + eval "\$options{discovery_svc}->{attributes}->{\$attr} =~ s{$change->{change_regexp}}{$change->{change_replace}}$change->{change_modifier}"; + } + } + + $options{discovery_svc}->{service_name} = substitute_vars( + value => $options{rule}->{service_display_name}, + service_name => $options{discovery_svc}->{service_name}, + attributes => $options{discovery_svc}->{attributes} + ); + + if (defined($options{rule}->{change})) { + # Second pass for service_name now + foreach my $change (@{$options{rule}->{change}}) { + next if (!defined($change->{change_str}) || $change->{change_str} eq '' || + !defined($change->{change_regexp}) || $change->{change_regexp} eq '' || + $change->{change_str} !~ /\@SERVICENAME\@/); + eval "\$options{discovery_svc}->{service_name} =~ s{$change->{change_regexp}}{$change->{change_replace}}$change->{change_modifier}"; + } + } +} + +sub substitute_vars { + my (%options) = @_; + + my $value = $options{value}; + while ($value =~ /\$(.+?)\$/) { + my ($substitute_str, $macro) = ('', $1); + $substitute_str = $options{attributes}->{$macro} if (defined($options{attributes}->{$macro})); + $value =~ s/\$\Q$macro\E\$/$substitute_str/g; + } + $value =~ s/\@SERVICENAME\@/$options{service_name}/g; + return $value; +} + +sub change_bytes { + my (%options) = @_; + my $divide = defined($options{network}) ? 1000 : 1024; + my @units = ('K', 'M', 'G', 'T'); + my $unit = ''; + + for (my $i = 0; $i < scalar(@units); $i++) { + last if (($options{value} / $divide) < 1); + $unit = $units[$i]; + $options{value} = $options{value} / $divide; + } + + return (sprintf("%.2f", $options{value}), $unit . (defined($options{network}) ? 'b' : 'B')); +} + +sub check_exinc { + my (%options) = @_; + + return 0 if (!defined($options{rule}->{exinc})); + foreach my $exinc (@{$options{rule}->{exinc}}) { + next if (!defined($exinc->{exinc_str}) || $exinc->{exinc_str} eq ''); + my $value = substitute_vars( + value => $exinc->{exinc_str}, + service_name => $options{discovery_svc}->{service_name}, + attributes => $options{discovery_svc}->{attributes} + ); + if ($exinc->{exinc_type} == 1 && $value =~ /$exinc->{exinc_regexp}/) { + $options{logger}->writeLogDebug("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> inclusion '$exinc->{exinc_regexp}'"); + return 0; + } elsif ($exinc->{exinc_type} == 0 && $value =~ /$exinc->{exinc_regexp}/) { + $options{logger}->writeLogDebug("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> exclusion '$exinc->{exinc_regexp}'"); + return 1; + } + } + + return 0; +} + +sub get_macros { + my (%options) = @_; + my $macros = {}; + + return $macros if (!defined($options{rule}->{macro})); + foreach my $macro (keys %{$options{rule}->{macro}}) { + $macros->{$macro} = substitute_vars( + value => $options{rule}->{macro}->{$macro}->{macro_value}, + service_name => $options{discovery_svc}->{service_name}, + attributes => $options{discovery_svc}->{attributes} + ); + } + + return $macros; +} + +sub get_service { + my (%options) = @_; + + my $service; + my ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT service_id, service_template_model_stm_id, service_activate, svc_macro_name, svc_macro_value FROM host, host_service_relation, service LEFT JOIN on_demand_macro_service ON on_demand_macro_service.svc_svc_id = service.service_id WHERE host_id = ' . $options{host_id} . + " AND host.host_id = host_service_relation.host_host_id AND host_service_relation.service_service_id = service.service_id AND service.service_description = ?", + bind_values => [$options{service_name}], + mode => 2 + ); + if ($status == -1) { + $options{logger}->writeLogError("$options{logger_pre_message} [" . $options{service_name} . "] -> cannot check service in configuration"); + return 1; + } + + foreach (@$datas) { + $service = { + id => $_->[0], + template_id => $_->[1], + activate => $_->[2], + macros => {} + } if (!defined($service->{id})); + if (defined($_->[3])) { + $service->{macros}->{ $_->[3] } = $_->[4]; + } + } + + return (0, $service); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/engine/class.pm b/gorgone/gorgone/modules/centreon/engine/class.pm new file mode 100644 index 00000000000..178092a33ee --- /dev/null +++ b/gorgone/gorgone/modules/centreon/engine/class.pm @@ -0,0 +1,317 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::engine::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use JSON::XS; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{timeout} = defined($connector->{config}->{timeout}) ? $connector->{config}->{timeout} : 5; + + $connector->set_signal_handlers; + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[engine] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub action_enginecommand { + my ($self, %options) = @_; + + my $command_file = ''; + if (defined($options{data}->{content}->{command_file}) && $options{data}->{content}->{command_file} ne '') { + $command_file = $options{data}->{content}->{command_file}; + } elsif (defined($self->{config}->{command_file}) && $self->{config}->{command_file} ne '') { + $command_file = $self->{config}->{command_file}; + } + + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has started", + request_content => $options{data}->{content} + } + ); + + if (!defined($command_file) || $command_file eq '') { + $self->{logger}->writeLogError("[engine] Need command_file (config or call) argument"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "need command_file (config or call) argument" + } + ); + return -1; + } + if (! -e $command_file) { + $self->{logger}->writeLogError("[engine] Command file '$command_file' must exist"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command file '$command_file' must exist" + } + ); + return -1; + } + if (! -p $command_file) { + $self->{logger}->writeLogError("[engine] Command file '$command_file' must be a pipe file"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command file '$command_file' must be a pipe file" + } + ); + return -1; + } + if (! -w $command_file) { + $self->{logger}->writeLogError("[engine] Command file '$command_file' must be writeable"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command file '$command_file' must be writeable" + } + ); + return -1; + } + + my $fh; + eval { + local $SIG{ALRM} = sub { die 'Timeout command' }; + alarm $self->{timeout}; + open($fh, ">", $command_file) or die "cannot open '$command_file': $!"; + + foreach my $command (@{$options{data}->{content}->{commands}}) { + $self->{logger}->writeLogInfo("[engine] Processing external command '" . $command . "'"); + print $fh $command . "\n"; + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command has been submitted", + command => $command + } + ); + } + + close $fh; + alarm 0; + }; + if ($@) { + close $fh if (defined($fh)); + $self->{logger}->writeLogError("[engine] Submit engine command issue: $@"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "submit engine command issue: $@" + } + ); + return -1 + } + + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has finished" + } + ); + + return 0; +} + +sub action_run { + my ($self, %options) = @_; + + my $context; + { + local $SIG{__DIE__}; + $context = ZMQ::FFI->new(); + } + + my $socket_log = gorgone::standard::library::connect_com( + context => $context, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-engine-'. $$, + logger => $self->{logger}, + zmq_linger => 60000, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + + if ($options{action} eq 'ENGINECOMMAND') { + $self->action_enginecommand(%options, socket_log => $socket_log); + } else { + $self->send_log( + socket => $socket_log, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'action unknown' } + ); + return -1; + } + + $socket_log->close(); +} + +sub create_child { + my ($self, %options) = @_; + + $options{message} =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + + my ($action, $token) = ($1, $2); + my ($rv, $data) = $self->json_decode(argument => $3, token => $token); + return undef if ($rv); + + if ($action =~ /^BCAST.*/) { + if ((my $method = $self->can('action_' . lc($action)))) { + $method->($self, token => $token, data => $data); + } + return undef; + } + + $self->{logger}->writeLogDebug('[engine] Create sub-process'); + my $child_pid = fork(); + if (!defined($child_pid)) { + $self->{logger}->writeLogError("[engine] Cannot fork process: $!"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { message => "cannot fork: $!" } + ); + return undef; + } + + if ($child_pid == 0) { + $self->set_fork(); + $self->action_run(action => $action, token => $token, data => $data); + exit(0); + } +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $self->read_message(); + next if (!defined($message)); + + $self->{logger}->writeLogDebug("[engine] Event: $message"); + + if ($message !~ /^\[ACK\]/) { + $self->create_child(message => $message); + } + } +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[engine] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-engine', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'ENGINEREADY', + data => {} + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/engine/hooks.pm b/gorgone/gorgone/modules/centreon/engine/hooks.pm new file mode 100644 index 00000000000..ef402d9b0be --- /dev/null +++ b/gorgone/gorgone/modules/centreon/engine/hooks.pm @@ -0,0 +1,154 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::engine::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::standard::constants qw(:all); +use gorgone::modules::centreon::engine::class; + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'engine'; +use constant EVENTS => [ + { event => 'ENGINEREADY' }, + { event => 'ENGINECOMMAND', uri => '/command', method => 'POST' } +]; + +my $config_core; +my $config; +my $engine = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config->{command_file} = defined($config->{command_file}) ? $config->{command_file} : '/var/lib/centreon-engine/rw/centengine.cmd'; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'ENGINEREADY') { + $engine->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$engine->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { msg => 'gorgoneengine: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-engine', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($engine->{running}) && $engine->{running} == 1) { + $options{logger}->writeLogDebug("[engine] Send TERM signal $engine->{pid}"); + CORE::kill('TERM', $engine->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($engine->{running} == 1) { + $options{logger}->writeLogDebug("[engine] Send KILL signal for pool"); + CORE::kill('KILL', $engine->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($engine->{pid}) || $engine->{pid} != $pid); + + $engine = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($engine->{running}) && $engine->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[engine] Create module 'engine' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-engine'; + my $module = gorgone::modules::centreon::engine::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[engine] PID $child_pid (gorgone-engine)"); + $engine = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/judge/class.pm b/gorgone/gorgone/modules/centreon/judge/class.pm new file mode 100644 index 00000000000..45fc4b23f64 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/judge/class.pm @@ -0,0 +1,576 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::judge::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::class::db; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use JSON::XS; +use gorgone::modules::centreon::judge::type::distribute; +use gorgone::modules::centreon::judge::type::spare; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{timeout} = 600; + $connector->{check_alive_sync} = defined($connector->{config}->{check_alive}) && $connector->{config}->{check_alive} =~ /(\d+)/ ? $1 : 60; + $connector->{check_alive_last} = -1; + $connector->{check_alive} = 0; + + $connector->{cache_dir} = (defined($connector->{config}->{cache_dir}) && $connector->{config}->{cache_dir} ne '') ? + $connector->{config}->{cache_dir} : '/var/cache/centreon'; + + $connector->check_config(); + $connector->set_signal_handlers(); + return $connector; +} + +sub check_config { + my ($self, %options) = @_; + + $self->{clusters_spare} = {}; + $self->{clusters_distribute} = {}; + $self->{nodes} = {}; + if (defined($self->{config}->{cluster})) { + foreach (@{$self->{config}->{cluster}}) { + if (!defined($_->{name}) || $_->{name} eq '') { + $self->{logger}->writeLogError('[judge] -class- missing name for cluster in config'); + next; + } + + if (!defined($_->{type}) || $_->{type} !~ /distribute|spare/) { + $self->{logger}->writeLogError('[judge] -class- missing/unknown type for cluster in config'); + next; + } + + my $config; + if ($_->{type} =~ /(distribute)/) { + $config = gorgone::modules::centreon::judge::type::distribute::check_config(config => $_, logger => $self->{logger}); + } elsif ($_->{type} =~ /(spare)/) { + $config = gorgone::modules::centreon::judge::type::spare::check_config(config => $_, logger => $self->{logger}); + } + + next if (!defined($config)); + + $self->{'clusters_' . $1}->{$_->{name}} = $config; + + foreach (@{$config->{nodes}}) { + $self->{nodes}->{$_} = {}; + } + $self->{nodes}->{ $config->{spare} } = {} if (defined($config->{spare})); + } + } +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[judge] -class- $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub get_pollers_config { + my ($self, %options) = @_; + + $self->{pollers_config} = {}; + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => 'SELECT nagios_server_id, command_file, cfg_dir, centreonbroker_cfg_path, snmp_trapd_path_conf, ' . + 'engine_start_command, engine_stop_command, engine_restart_command, engine_reload_command, ' . + 'broker_reload_command, init_script_centreontrapd ' . + 'FROM cfg_nagios ' . + 'JOIN nagios_server ' . + 'WHERE id = nagios_server_id', + mode => 1, + keys => 'nagios_server_id' + ); + if ($status == -1 || !defined($datas)) { + $self->{logger}->writeLogError('[judge] -class- cannot get configuration for pollers'); + return -1; + } + + $self->{pollers_config} = $datas; + + return 0; +} + +sub get_clapi_user { + my ($self, %options) = @_; + + $self->{clapi_user} = $self->{config}->{clapi_user}; + $self->{clapi_password} = $self->{config}->{clapi_password}; + + if (!defined($self->{clapi_password})) { + $self->{logger}->writeLogError('[judge] -class- cannot get configuration for CLAPI user'); + return -1; + } + + return 0; + +=pod + $self->{clapi_user} = undef; + $self->{clapi_password} = undef; + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => "SELECT contact_alias, contact_passwd " . + "FROM `contact` " . + "WHERE `contact_admin` = '1' " . + "AND `contact_activate` = '1' " . + "AND `contact_passwd` IS NOT NULL " . + "LIMIT 1 ", + mode => 2 + ); + + if ($status == -1 || !defined($datas->[0]->[0])) { + $self->{logger}->writeLogError('[judge] -class- cannot get configuration for CLAPI user'); + return -1; + } + + my $clapi_user = $datas->[0]->[0]; + my $clapi_password = $datas->[0]->[1]; + if ($clapi_password =~ m/^md5__(.*)/) { + $clapi_password = $1; + } + + $self->{clapi_user} = $clapi_user; + $self->{clapi_password} = $clapi_password; +=cut + + return 0; +} + +sub action_judgemove { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + # { content => { cluster_name => 'moncluster', node_move => 2 } } + + return -1 if (!defined($options{data}->{content}->{cluster_name}) || $options{data}->{content}->{cluster_name} eq ''); + return -1 if (!defined($options{data}->{content}->{node_move}) || $options{data}->{content}->{node_move} eq ''); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { message => 'failover start' } + ); + + if (!defined($self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} })) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "unknown cluster_name '" . $options{data}->{content}->{cluster_name} . "' in config" } + ); + return -1; + } + + my $node_configured = 0; + foreach (@{$self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} }->{nodes}}) { + if ($_ eq $options{data}->{content}->{node_move}) { + $node_configured = 1; + last; + } + } + if ($node_configured == 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "unknown node '" . $options{data}->{content}->{node_move} . "' in cluster config" } + ); + return -1; + } + + $self->check_alive(); + if ($self->{check_alive} == 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cannot check cluster nodes status' } + ); + return -1; + } + + if (!gorgone::modules::centreon::judge::type::spare::is_ready_status(status => $self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} }->{live}->{status})) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cluster status not ready to move' } + ); + return -1; + } + if (!gorgone::modules::centreon::judge::type::spare::is_spare_ready(module => $self, ctime => time(), cluster => $self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} })) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cluster spare not ready' } + ); + return -1; + } + + gorgone::modules::centreon::judge::type::spare::migrate_steps_1_2_3( + token => $options{token}, + module => $self, + node_src => $options{data}->{content}->{node_move}, + cluster => $options{data}->{content}->{cluster_name}, + clusters => $self->{clusters_spare}, + no_update_running_failed => 1 + ); + + return 0; +} + +sub action_judgefailback { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + # { content => { cluster_name => 'moncluster' } } + + return -1 if (!defined($options{data}->{content}->{cluster_name}) || $options{data}->{content}->{cluster_name} eq ''); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { message => 'failback start' } + ); + + if (!defined($self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} })) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "unknown cluster_name '" . $options{data}->{content}->{cluster_name} . "' in config" } + ); + return -1; + } + + $self->check_alive(); + if ($self->{check_alive} == 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cannot check cluster nodes status' } + ); + return -1; + } + + if ($self->get_clapi_user() != 0 || + $self->get_pollers_config() != 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cannot get clapi user informations and/or poller config' } + ); + return -1; + } + + if (!gorgone::modules::centreon::judge::type::spare::is_failover_status(status => $self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} }->{live}->{status})) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cluster status not ready to failback' } + ); + return -1; + } + + gorgone::modules::centreon::judge::type::spare::failback_start( + token => $options{token}, + module => $self, + cluster => $options{data}->{content}->{cluster_name}, + clusters => $self->{clusters_spare} + ); + + return 0; +} + +sub action_judgeclean { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + # { content => { cluster_name => 'moncluster' } } + + return -1 if (!defined($options{data}->{content}->{cluster_name}) || $options{data}->{content}->{cluster_name} eq ''); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { message => 'clean start' } + ); + + if (!defined($self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} })) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "unknown cluster_name '" . $options{data}->{content}->{cluster_name} . "' in config" } + ); + return -1; + } + + gorgone::modules::centreon::judge::type::spare::clean( + token => $options{token}, + module => $self, + cluster => $options{data}->{content}->{cluster_name}, + clusters => $self->{clusters_spare} + ); + + return 0; +} + +sub action_judgelistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token})); + + if ($options{token} =~ /^judge-spare##(.*?)##(\d+)##/) { + gorgone::modules::centreon::judge::type::spare::migrate_steps_listener_response( + token => $options{token}, + cluster => $1, + state => $2, + clusters => $self->{clusters_spare}, + module => $self, + code => $options{data}->{code} + ); + } + + return 1; +} + +sub check_alive { + my ($self, %options) = @_; + + return if (time() - $self->{check_alive_sync} < $self->{check_alive_last}); + $self->{check_alive_last} = time(); + $self->{check_alive} = 0; + + my $request = q( + SELECT instances.instance_id, instances.running, instances.last_alive, count(hosts.instance_id) + FROM instances LEFT JOIN hosts ON hosts.instance_id = instances.instance_id AND hosts.enabled = 1 + GROUP BY instances.instance_id + ); + my ($status, $datas) = $self->{class_object_centstorage}->custom_execute( + request => $request, + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError('[judge] -class- cannot get pollers status'); + return 1; + } + + foreach (@$datas) { + if (defined($self->{nodes}->{ $_->[0] })) { + $self->{nodes}->{ $_->[0] }->{running} = $_->[1]; + $self->{nodes}->{ $_->[0] }->{last_alive} = $_->[2]; + $self->{nodes}->{ $_->[0] }->{count_hosts} = $_->[3]; + } + } + + $self->{check_alive} = 1; +} + +sub add_pipeline_config_reload_poller { + my ($self, %options) = @_; + + my $actions = [ + { + action => 'REMOTECOPY', + target => $options{poller_id}, + timeout => 120, + log_pace => 5, + data => { + content => { + source => $self->{cache_dir} . '/config/engine/' . $options{poller_id}, + destination => $self->{pollers_config}->{ $options{poller_id} }->{cfg_dir} . '/', + cache_dir => $self->{cache_dir}, + owner => 'centreon-engine', + group => 'centreon-engine', + } + } + }, + { + action => 'REMOTECOPY', + target => $options{poller_id}, + timeout => 120, + log_pace => 5, + data => { + content => { + source => $self->{cache_dir} . '/config/broker/' . $options{poller_id}, + destination => $self->{pollers_config}->{ $options{poller_id} }->{centreonbroker_cfg_path} . '/', + cache_dir => $self->{cache_dir}, + owner => 'centreon-broker', + group => 'centreon-broker', + } + } + }, + { + action => 'COMMAND', + target => $options{poller_id}, + timeout => 60, + data => { + content => [ { + command => 'sudo ' . $self->{pollers_config}->{ $options{poller_id} }->{engine_reload_command} + } ] + } + } + ]; + + if (!defined($options{no_generate_config})) { + my $cmd = 'centreon -u ' . $self->{clapi_user} . ' -p ' . $self->{clapi_password} . ' -a POLLERGENERATE -v ' . $options{poller_id}; + unshift @$actions, { + action => 'COMMAND', + data => { + content => [ { + command => $cmd + } ] + } + }; + } + + $self->send_internal_action({ + action => 'ADDPIPELINE', + token => $options{token}, + timeout => $options{pipeline_timeout}, + data => $actions + }); +} + +sub test_types { + my ($self, %options) = @_; + + # we don't test if we cannot do check_alive + return if ($self->{check_alive} == 0); + + # distribute clusters + my $all_pollers = {}; + foreach (values %{$self->{clusters_distribute}}) { + my $pollers = gorgone::modules::centreon::judge::type::distribute::assign(cluster => $_, module => $self); + $all_pollers = { %$pollers, %$all_pollers }; + } + + if (scalar(keys %$all_pollers) > 0 && + $self->get_clapi_user() == 0 && + $self->get_pollers_config() == 0 + ) { + foreach (keys %$all_pollers) { + $self->add_pipeline_config_reload_poller(poller_id => $_); + } + } + + # spare clusters + gorgone::modules::centreon::judge::type::spare::init( + clusters => $self->{clusters_spare}, + module => $self + ); + gorgone::modules::centreon::judge::type::spare::check_migrate( + clusters => $self->{clusters_spare}, + module => $self + ); +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[judge] -class- $$ has quit"); + exit(0); + } + + $connector->check_alive(); + $connector->test_types(); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-judge', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $connector->send_internal_action({ + action => 'JUDGEREADY', + data => {} + }); + + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{class_object_centstorage} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centstorage}); + $self->{class_object_centreon} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centreon}); + + $self->{db_gorgone} = gorgone::class::db->new( + type => $self->get_core_config(name => 'gorgone_db_type'), + db => $self->get_core_config(name => 'gorgone_db_name'), + host => $self->get_core_config(name => 'gorgone_db_host'), + port => $self->get_core_config(name => 'gorgone_db_port'), + user => $self->get_core_config(name => 'gorgone_db_user'), + password => $self->get_core_config(name => 'gorgone_db_password'), + force => 2, + logger => $self->{logger} + ); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/judge/hooks.pm b/gorgone/gorgone/modules/centreon/judge/hooks.pm new file mode 100644 index 00000000000..29154a078d5 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/judge/hooks.pm @@ -0,0 +1,161 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::judge::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::judge::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'judge'; +use constant EVENTS => [ + { event => 'JUDGEREADY' }, + { event => 'JUDGELISTENER' }, + { event => 'JUDGEFAILBACK', uri => '/failback', method => 'POST' }, + { event => 'JUDGEMOVE', uri => '/move', method => 'POST' }, + { event => 'JUDGECLEAN', uri => '/clean', method => 'POST' } +]; + +my $config_core; +my $config; +my ($config_db_centreon, $config_db_centstorage); +my $judge = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centstorage = $options{config_db_centstorage}; + $config_db_centreon = $options{config_db_centreon}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'JUDGEREADY') { + $judge->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$judge->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-judge: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-judge', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($judge->{running}) && $judge->{running} == 1) { + $options{logger}->writeLogDebug("[judge] Send TERM signal $judge->{pid}"); + CORE::kill('TERM', $judge->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($judge->{running} == 1) { + $options{logger}->writeLogDebug('[judge] Send KILL signal for subprocess'); + CORE::kill('KILL', $judge->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($judge->{pid}) || $judge->{pid} != $pid); + + $judge = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($judge->{running}) && $judge->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[judge] Create module 'judge' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-judge'; + my $module = gorgone::modules::centreon::judge::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[judge] PID $child_pid (gorgone-judge)"); + $judge = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/judge/type/distribute.pm b/gorgone/gorgone/modules/centreon/judge/type/distribute.pm new file mode 100644 index 00000000000..910c3694c65 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/judge/type/distribute.pm @@ -0,0 +1,117 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::judge::type::distribute; + +use strict; +use warnings; + +sub check_config { + my (%options) = @_; + + my $config = $options{config}; + my $sync = defined($config->{sync}) && $config->{sync} =~ /(\d+)/ ? $1 : 3600; + $config->{sync} = $sync; + $config->{sync_last} = -1; + + if (!defined($config->{sync}) || $config->{hcategory} eq '') { + $options{logger}->writeLogError("[judge] -class- please set hcategory for cluster '" . $config->{name} . "'"); + return undef; + } + + if (!defined($config->{nodes}) || scalar(@{$config->{nodes}}) <= 0) { + $options{logger}->writeLogError("[judge] -class- please set nodes for cluster '" . $config->{name} . "'"); + return undef; + } + + return $config; +} + +sub least_poller_hosts { + my (%options) = @_; + + my $poller_id; + my $lowest_hosts; + my $current_time = time(); + foreach (keys %{$options{module}->{nodes}}) { + next if (!defined($options{module}->{nodes}->{$_}->{running}) || $options{module}->{nodes}->{$_}->{running} == 0); + next if (($current_time - 300) > $options{module}->{nodes}->{$_}->{last_alive}); + + if (!defined($lowest_hosts) || $options{module}->{nodes}->{$_}->{count_hosts} < $lowest_hosts) { + $lowest_hosts = $options{module}->{nodes}->{$_}->{count_hosts}; + $poller_id = $_; + } + } + + if (defined($poller_id)) { + $options{module}->{nodes}->{$_}->{count_hosts}++; + } + return $poller_id; +} + +sub assign { + my (%options) = @_; + + return {} if (time() - $options{cluster}->{sync} < $options{cluster}->{sync_last}); + $options{cluster}->{sync_last} = time(); + + my $request = " + SELECT nhr.host_host_id + FROM hostcategories hc, hostcategories_relation hcr, ns_host_relation nhr, nagios_server ns + WHERE hc.hc_activate = '1' AND hc.hc_name = ? + AND hc.hc_id = hcr.hostcategories_hc_id + AND hcr.host_host_id = nhr.host_host_id + AND nhr.nagios_server_id = ns.id + AND ns.is_default = 1 + AND ns.ns_activate = '0' + "; + my ($status, $datas) = $options{module}->{class_object_centreon}->custom_execute( + request => $request, + bind_values => [$options{cluster}->{hcategory}], + mode => 2 + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{cluster}->{name} . "': cannot get hosts"); + return {}; + } + + my $pollers_reload = {}; + foreach (@$datas) { + my $poller_id = least_poller_hosts(module => $options{module}); + if (!defined($poller_id)) { + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{cluster}->{name} . "': cannot find poller for host '$_->[0]'"); + next; + } + + $pollers_reload->{$poller_id} = 1; + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{cluster}->{name} . "': assign host '$_->[0]' --> poller '$poller_id'"); + + ($status) = $options{module}->{class_object_centreon}->custom_execute( + request => "UPDATE `ns_host_relation` SET `nagios_server_id` = $poller_id WHERE `host_host_id` = $_->[0]" + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{cluster}->{name} . "': cannot assign host '$_->[0]' --> poller '$poller_id'"); + } + } + + return $pollers_reload; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/judge/type/spare.pm b/gorgone/gorgone/modules/centreon/judge/type/spare.pm new file mode 100644 index 00000000000..9dc28907964 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/judge/type/spare.pm @@ -0,0 +1,1001 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::judge::type::spare; + +use strict; +use warnings; +use gorgone::standard::constants qw(:all); + +=pod +cluster status: +UNKNOWN_STATUS: module restart when failoverProgress or failbackProgress running +NOTREADY_STATUS: init phase or sqlite issue at beginning +READY_STATUS: cluster can migrate +FAILOVER_RUNNING_STATUS +FAILOVER_FAIL_STATUS +FAILOVER_SUCCESS_STATUS +FAILBACK_RUNNING_STATUS +FAILBACK_FAIL_STATUS +FAILBACK_SUCCESS_STATUS + +migrate step: +1) update gorgone sqlite status = FAILOVER_RUNNING_STATUS (state = STATE_MIGRATION_UPDATE_SQLITE) +2) change centreon DB poller configuration (state = STATE_MIGRATION_UPDATE_CENTREON_DB) +3) generate config files for 2 configuration (listener on 2 clapi commands) (state = STATE_MIGRATION_GENERATE_CONFIGS) +4) push config/reload poller failed (listener on a pipeline) (state = STATE_MIGRATION_POLLER_FAILED) (continue even if it's failed) +5) push config/reload poller spare (listener on a pipeline) (state = STATE_MIGRATION_POLLER_SPARE) +6) update 'running' poller failed in centreon DB (state = STATE_MIGRATION_UPDATE_RUNNING_POLLER_FAILED) + +timeout on each step of a pipeline (default: 600 seconds) (finish and get an error if we have a listener on global pipeline token) +timeout on listener (default: 600 seconds). Need to set a listener value higher than each steps + +=cut + +use constant UNKNOWN_STATUS => -2; +use constant NOTREADY_STATUS => -1; +use constant READY_STATUS => 0; +use constant FAILOVER_RUNNING_STATUS => 1; +use constant FAILOVER_FAIL_STATUS => 2; +use constant FAILOVER_SUCCESS_STATUS => 3; +use constant FAILBACK_RUNNING_STATUS => 10; +use constant FAILBACK_FAIL_STATUS => 11; +use constant FAILBACK_SUCCESS_STATUS => 12; + +use constant STATE_MIGRATION_UPDATE_SQLITE => 1; +use constant STATE_MIGRATION_UPDATE_CENTREON_DB => 2; +use constant STATE_MIGRATION_GENERATE_CONFIGS => 3; +use constant STATE_MIGRATION_POLLER_FAILED => 4; +use constant STATE_MIGRATION_POLLER_SPARE => 5; +use constant STATE_MIGRATION_UPDATE_RUNNING_POLLER_FAILED => 6; + +use constant STATE_FAILBACK_GET_SQLITE => 10; +use constant STATE_FAILBACK_UPDATE_CENTREON_DB => 11; +use constant STATE_FAILBACK_GENERATE_CONFIGS => 12; +use constant STATE_FAILBACK_POLLER_SRC => 13; +use constant STATE_FAILBACK_POLLER_DST => 14; + +sub check_config { + my (%options) = @_; + + my $config = $options{config}; + if (!defined($config->{nodes}) || scalar(@{$config->{nodes}}) <= 0) { + $options{logger}->writeLogError("[judge] -class- please set nodes for cluster '" . $config->{name} . "'"); + return undef; + } + if (!defined($config->{spare})) { + $options{logger}->writeLogError("[judge] -class- please set spare for cluster '" . $config->{name} . "'"); + return undef; + } + + $config->{alive_timeout} = defined($config->{alive_timeout}) && $config->{alive_timeout} =~ /(\d+)/ ? $1 : 600; + $config->{live} = { status => NOTREADY_STATUS }; + + return $config; +} + +sub init { + my (%options) = @_; + + foreach (keys %{$options{clusters}}) { + next if ($options{clusters}->{$_}->{live}->{status} != NOTREADY_STATUS); + + my ($status, $sth) = $options{module}->{db_gorgone}->query({ + query => 'SELECT `status` FROM gorgone_centreon_judge_spare WHERE cluster_name = ?', + bind_values => [$options{clusters}->{$_}->{name}] + }); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- sqlite error to get cluster information '" . $options{clusters}->{$_}->{name} . "': cannot select"); + next; + } + + if (my $row = $sth->fetchrow_hashref()) { + $options{clusters}->{$_}->{live}->{status} = $row->{status}; + } else { + ($status) = $options{module}->{db_gorgone}->query({ + query => 'INSERT INTO gorgone_centreon_judge_spare (`cluster_name`, `status`) VALUES (?, ' . READY_STATUS . ')', + bind_values => [$options{clusters}->{$_}->{name}] + }); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- sqlite error to get cluster information '" . $options{clusters}->{$_}->{name} . "': cannot insert"); + next; + } + $options{clusters}->{$_}->{live}->{status} = READY_STATUS; + } + + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{$_}->{name} . "' init status is " . $options{clusters}->{$_}->{live}->{status}); + } +} + +sub send_log { + my (%options) = @_; + + $options{module}->send_log( + code => $options{code}, + token => defined($options{token}) ? $options{token} : $options{live}->{token}, + data => defined($options{data}) ? $options{data} : $options{live} + ); +} + +sub is_ready_status { + my (%options) = @_; + + if ($options{status} == READY_STATUS) { + return 1; + } + + return 0; +} + +sub is_failover_status { + my (%options) = @_; + + if ($options{status} == FAILOVER_FAIL_STATUS || $options{status} == FAILOVER_SUCCESS_STATUS) { + return 1; + } + + return 0; +} + +sub is_spare_ready { + my (%options) = @_; + + if (!defined($options{module}->{nodes}->{ $options{cluster}->{spare} }->{running}) || + $options{module}->{nodes}->{ $options{cluster}->{spare} }->{running} == 0 || + ($options{ctime} - $options{cluster}->{alive_timeout}) > $options{module}->{nodes}->{ $options{cluster}->{spare} }->{last_alive} + ) { + return 0; + } + + return 1; +} + +sub update_status { + my (%options) = @_; + + my ($status) = $options{module}->{db_gorgone}->query({ + query => 'UPDATE gorgone_centreon_judge_spare SET `status` = ' . $options{status} . ' WHERE `cluster_name` = ?', + bind_values => [$options{cluster}] + }); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{cluster} . "' step $options{step}: cannot update status"); + return -1; + } + + return 0; +} + +sub check_migrate { + my (%options) = @_; + + my $ctime = time(); + foreach (keys %{$options{clusters}}) { + next if ($options{clusters}->{$_}->{live}->{status} != READY_STATUS); + + if (!is_spare_ready(module => $options{module}, cluster => $options{clusters}->{$_}, ctime => $ctime)) { + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{$_}->{name} . "' cannot migrate - spare poller not alive"); + next; + } + + my $node_src; + foreach my $node_id (@{$options{clusters}->{$_}->{nodes}}) { + if (defined($options{module}->{nodes}->{$node_id}->{running}) && $options{module}->{nodes}->{$node_id}->{running} == 1 && + (($ctime - $options{clusters}->{$_}->{alive_timeout}) > $options{module}->{nodes}->{$node_id}->{last_alive}) + ) { + $node_src = $node_id; + last; + } + } + + if (defined($node_src)) { + my $token = $options{module}->generate_token(); + send_log( + module => $options{module}, + code => GORGONE_ACTION_BEGIN, + token => $token, + data => { message => 'failover start' } + ); + migrate_steps_1_2_3( + token => $options{token}, + module => $options{module}, + node_src => $node_src, + clusters => $options{clusters}, + cluster => $_ + ); + } + } +} + +sub clean { + my (%options) = @_; + + $options{clusters}->{ $options{cluster} }->{live}->{status} = READY_STATUS; + if (update_status( + module => $options{module}, + cluster => $options{cluster}, + status => READY_STATUS, + step => 'clean' + ) == -1) { + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'clean: cannot update status' } + ); + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' clean: cannot update status"); + return -1; + } + + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' clean: status updated"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { message => 'clean: status updated' } + ); + return 0; +} + +=pod + +********************** +Failover migrate steps +********************** + +=cut + +sub migrate_steps_1_2_3 { + my (%options) = @_; + + $options{clusters}->{ $options{cluster} }->{live}->{token} = $options{token}; + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILOVER_RUNNING_STATUS; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_failed} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_spare} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_responses} = 0; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_failed} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_spare} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{node_src} = $options{node_src}; + $options{clusters}->{ $options{cluster} }->{live}->{node_dst} = $options{clusters}->{ $options{cluster} }->{token_config_node_spare}; + $options{clusters}->{ $options{cluster} }->{live}->{no_update_running_failed} = $options{no_update_running_failed}; + $options{clusters}->{ $options{cluster} }->{live}->{state} = undef; + + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + if ($options{module}->get_clapi_user() != 0 || + $options{module}->get_pollers_config() != 0) { + $options{clusters}->{ $options{cluster} }->{live}->{status} = READY_STATUS; + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot get clapi user informations and/or poller config' } + ); + return -1; + } + + my ($status, $datas) = $options{module}->{class_object_centreon}->custom_execute( + request => 'SELECT host_host_id ' . + 'FROM ns_host_relation ' . + 'WHERE nagios_server_id = ?', + bind_values => [$options{node_src}], + mode => 2 + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' cannot get hosts associated --> poller $options{node_src}"); + $options{clusters}->{ $options{cluster} }->{live}->{status} = READY_STATUS; + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot get hosts associated with source poller' } + ); + return -1; + } + if (scalar(@$datas) <= 0) { + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' no hosts associated --> poller $options{node_src}"); + $options{clusters}->{ $options{cluster} }->{live}->{status} = READY_STATUS; + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'nothing done - no hosts associated with source poller' } + ); + return 0; + } + + ######## + # Step 1 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_SQLITE started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_UPDATE_SQLITE; + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + my $data = { node_src => $options{node_src}, hosts => [] }; + push @{$data->{hosts}}, $_->[0] foreach (@$datas); + ($status, my $encoded) = $options{module}->json_encode( + argument => $data, + method => "-class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_SQLITE" + ); + if ($status == 1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_SQLITE: cannot encode json"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot encode json' } + ); + return -1; + } + + ($status) = $options{module}->{db_gorgone}->query({ + query => 'UPDATE gorgone_centreon_judge_spare SET `status` = ' . FAILOVER_RUNNING_STATUS . ', `data` = ? WHERE `cluster_name` = ?', + bind_values => [$encoded, $options{cluster}] + }); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_SQLITE: cannot update sqlite"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot update sqlite' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_SQLITE finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 2 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_CENTREON_DB started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_UPDATE_CENTREON_DB; + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + ($status) = $options{module}->{class_object_centreon}->custom_execute( + request => 'UPDATE ns_host_relation SET nagios_server_id = ?' . + ' WHERE host_host_id IN (' . join(',', @{$data->{hosts}}) . ')', + bind_values => [$options{clusters}->{ $options{cluster} }->{spare}] + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_CENTREON_DB: cannot update database"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot update database' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_CENTREON_DB finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 3 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_GENERATE_CONFIGS started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_GENERATE_CONFIGS; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_failed} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_MIGRATION_GENERATE_CONFIGS . '##' . $options{module}->generate_token(length => 8); + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_spare} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_MIGRATION_GENERATE_CONFIGS . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_failed}, + timeout => 200 + } + ] + ); + $options{module}->send_internal_action( + action => 'ADDPIPELINE', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_failed}, + timeout => 180, + data => [ + { + action => 'COMMAND', + timeout => 150, + data => { + content => [ { + command => 'centreon -u ' . $options{module}->{clapi_user} . ' -p ' . $options{module}->{clapi_password} . ' -a POLLERGENERATE -v ' . $options{node_src}, + } ] + } + } + ] + ); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_spare}, + timeout => 200 + } + ] + ); + $options{module}->send_internal_action( + action => 'ADDPIPELINE', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_spare}, + timeout => 180, + data => [ + { + action => 'COMMAND', + timeout => 150, + data => { + content => [ { + command => 'centreon -u ' . $options{module}->{clapi_user} . ' -p ' . $options{module}->{clapi_password} . ' -a POLLERGENERATE -v ' . $options{clusters}->{ $options{cluster} }->{spare}, + } ] + } + } + ] + ); + + return 0; +} + +sub migrate_step_3 { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + return 0 if ($options{token} ne $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_failed} && + $options{token} ne $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_spare}); + + if ($options{code} == GORGONE_ACTION_FINISH_KO) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_GENERATE_CONFIGS: generate config error"); + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILOVER_FAIL_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILOVER_FAIL_STATUS, + step => 'STATE_MIGRATION_GENERATE_CONFIGS' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'generate config error' } + ); + return -1; + } + + $options{clusters}->{ $options{cluster} }->{live}->{token_config_responses}++; + if ($options{clusters}->{ $options{cluster} }->{live}->{token_config_responses} < 2) { + return 0; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_GENERATE_CONFIGS finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 4 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_POLLER_FAILED started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_POLLER_FAILED; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_failed} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_MIGRATION_POLLER_FAILED . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_failed}, + timeout => 450 + } + ] + ); + $options{module}->add_pipeline_config_reload_poller( + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_failed}, + poller_id => $options{clusters}->{ $options{cluster} }->{live}->{node_src}, + no_generate_config => 1, + pipeline_timeout => 400 + ); + + return 0; +} + +sub migrate_step_4 { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_POLLER_FAILED finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 5 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_POLLER_SPARE started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_POLLER_SPARE; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_spare} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_MIGRATION_POLLER_SPARE . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_spare}, + timeout => 450 + } + ] + ); + $options{module}->add_pipeline_config_reload_poller( + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_spare}, + poller_id => $options{clusters}->{ $options{cluster} }->{spare}, + no_generate_config => 1, + pipeline_timeout => 400 + ); +} + +sub migrate_step_5 { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + + if ($options{code} == GORGONE_ACTION_FINISH_KO) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_POLLER_SPARE: pipeline error"); + $options{clusters}->{ $options{cluster} }->{status} = FAILOVER_FAIL_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILOVER_FAIL_STATUS, + step => 'STATE_MIGRATION_POLLER_SPARE' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'pipeline error' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_POLLER_SPARE finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 6 + ######## + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_UPDATE_RUNNING_POLLER_FAILED; + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + if (!defined($options{clusters}->{ $options{cluster} }->{live}->{no_update_running_failed}) || + $options{clusters}->{ $options{cluster} }->{live}->{no_update_running_failed} != 1) { + my ($status) = $options{module}->{class_object_centstorage}->custom_execute( + request => 'UPDATE instances SET running = 0 ' . + ' WHERE instance_id = ?', + bind_values => [$options{clusters}->{ $options{cluster} }->{live}->{node_src}] + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_RUNNING_POLLER_FAILED: cannot update database"); + } + } + + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILOVER_SUCCESS_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILOVER_SUCCESS_STATUS, + step => 'STATE_MIGRATION_POLLER_SPARE' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'failover finished' } + ); + + return 0; +} +sub migrate_steps_listener_response { + my (%options) = @_; + + return -1 if (!defined($options{clusters}->{ $options{cluster} })); + if ($options{state} != $options{clusters}->{ $options{cluster} }->{live}->{state}) { + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' wrong or old step responce received"); + return -1; + } + + if ($options{state} == STATE_MIGRATION_GENERATE_CONFIGS) { + return migrate_step_3(%options); + } + if ($options{state} == STATE_MIGRATION_POLLER_FAILED) { + return migrate_step_4(%options); + } + if ($options{state} == STATE_MIGRATION_POLLER_SPARE) { + return migrate_step_5(%options); + } + + if ($options{state} == STATE_FAILBACK_GENERATE_CONFIGS) { + return failback_generate_configs(%options); + } + if ($options{state} == STATE_FAILBACK_POLLER_SRC) { + return failback_poller_src(%options); + } + if ($options{state} == STATE_FAILBACK_POLLER_DST) { + return failback_poller_dst(%options); + } +} + +=pod + +********************** +Failback migrate steps +********************** + +=cut + +sub failback_start { + my (%options) = @_; + + $options{clusters}->{ $options{cluster} }->{live}->{token} = $options{token}; + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILBACK_RUNNING_STATUS; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_src} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_dst} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_responses} = 0; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_src} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_dst} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{node_src} = $options{clusters}->{ $options{cluster} }->{spare}; + $options{clusters}->{ $options{cluster} }->{live}->{node_dst} = undef; + + ######## + # Step 1 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_GET_SQLITE started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_FAILBACK_GET_SQLITE; + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + my ($status, $sth) = $options{module}->{db_gorgone}->query({ + query => 'SELECT `status`, `data` FROM gorgone_centreon_judge_spare WHERE cluster_name = ?', + bind_values => [$options{clusters}->{ $options{cluster} }->{name}] + }); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' cannot get sqlite information"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot get sqlite information' } + ); + return -1; + } + my $row = $sth->fetchrow_hashref(); + if (!defined($row)) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' no data in sqlite"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'no data in sqlite' } + ); + return -1; + } + ($status, my $decoded) = $options{module}->json_decode( + argument => $row->{data}, + method => "-class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' cannot decode json information" + ); + if ($status == 1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' cannot decode json"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot decode json' } + ); + return -1; + } + + $options{clusters}->{ $options{cluster} }->{live}->{node_dst} = $decoded->{node_src}; + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_GET_SQLITE finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 2 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_UPDATE_CENTREON_DB started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_FAILBACK_UPDATE_CENTREON_DB; + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + ($status) = $options{module}->{class_object_centreon}->custom_execute( + request => 'UPDATE ns_host_relation SET nagios_server_id = ?' . + ' WHERE host_host_id IN (' . join(',', @{$decoded->{hosts}}) . ')', + bind_values => [$options{clusters}->{ $options{cluster} }->{live}->{node_dst}] + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_UPDATE_CENTREON_DB: cannot update database"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot update database' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_UPDATE_CENTREON_DB finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 3 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_GENERATE_CONFIGS started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_FAILBACK_GENERATE_CONFIGS; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_src} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_FAILBACK_GENERATE_CONFIGS . '##' . $options{module}->generate_token(length => 8); + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_dst} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_FAILBACK_GENERATE_CONFIGS . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_src}, + timeout => 200 + } + ] + ); + $options{module}->send_internal_action( + action => 'ADDPIPELINE', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_src}, + timeout => 180, + data => [ + { + action => 'COMMAND', + timeout => 150, + data => { + content => [ { + command => 'centreon -u ' . $options{module}->{clapi_user} . ' -p ' . $options{module}->{clapi_password} . ' -a POLLERGENERATE -v ' . $options{clusters}->{ $options{cluster} }->{live}->{node_src} + } ] + } + } + ] + ); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_dst}, + timeout => 200 + } + ] + ); + $options{module}->send_internal_action( + action => 'ADDPIPELINE', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_dst}, + timeout => 180, + data => [ + { + action => 'COMMAND', + timeout => 150, + data => { + content => [ { + command => 'centreon -u ' . $options{module}->{clapi_user} . ' -p ' . $options{module}->{clapi_password} . ' -a POLLERGENERATE -v ' . $options{clusters}->{ $options{cluster} }->{live}->{node_dst} + } ] + } + } + ] + ); + + return 0; +} + +sub failback_generate_configs { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + return 0 if ($options{token} ne $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_src} && + $options{token} ne $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_dst}); + + if ($options{code} == GORGONE_ACTION_FINISH_KO) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_GENERATE_CONFIGS: generate config error"); + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILBACK_FAIL_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILBACK_FAIL_STATUS, + step => 'STATE_FAILBACK_GENERATE_CONFIGS' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'generate config error' } + ); + return -1; + } + + $options{clusters}->{ $options{cluster} }->{live}->{token_config_responses}++; + if ($options{clusters}->{ $options{cluster} }->{live}->{token_config_responses} < 2) { + return 0; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_GENERATE_CONFIGS finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 4 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_SRC started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_FAILBACK_POLLER_SRC; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_src} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_FAILBACK_POLLER_SRC . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_src}, + timeout => 450 + } + ] + ); + $options{module}->add_pipeline_config_reload_poller( + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_src}, + poller_id => $options{clusters}->{ $options{cluster} }->{live}->{node_src}, + no_generate_config => 1, + pipeline_timeout => 400 + ); + + return 0; +} + +sub failback_poller_src { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + + if ($options{code} == GORGONE_ACTION_FINISH_KO) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_SRC: pipeline error"); + $options{clusters}->{ $options{cluster} }->{status} = FAILBACK_FAIL_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILBACK_FAIL_STATUS, + step => 'STATE_FAILBACK_POLLER_SRC' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'pipeline error' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_SRC finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 5 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_DST started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_FAILBACK_POLLER_DST; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_dst} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_FAILBACK_POLLER_DST . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_dst}, + timeout => 450 + } + ] + ); + $options{module}->add_pipeline_config_reload_poller( + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_dst}, + poller_id => $options{clusters}->{ $options{cluster} }->{live}->{node_dst}, + no_generate_config => 1, + pipeline_timeout => 400 + ); +} + +sub failback_poller_dst { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + + if ($options{code} == GORGONE_ACTION_FINISH_KO) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_DST: pipeline error"); + $options{clusters}->{ $options{cluster} }->{status} = FAILBACK_FAIL_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILBACK_FAIL_STATUS, + step => 'STATE_FAILBACK_POLLER_DST' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'pipeline error' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_DST finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILBACK_SUCCESS_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILBACK_SUCCESS_STATUS, + step => 'STATE_FAILBACK_POLLER_DST' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'failback finished' } + ); + + return 0; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/legacycmd/class.pm b/gorgone/gorgone/modules/centreon/legacycmd/class.pm new file mode 100644 index 00000000000..67d2a2121ad --- /dev/null +++ b/gorgone/gorgone/modules/centreon/legacycmd/class.pm @@ -0,0 +1,831 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::legacycmd::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use gorgone::class::sqlquery; +use gorgone::class::tpapi::clapi; +use File::Copy; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{tpapi_clapi_name} = defined($options{config}->{tpapi_clapi}) && $options{config}->{tpapi_clapi} ne '' + ? $options{config}->{tpapi_clapi} + : 'clapi'; + if (!defined($connector->{config}->{cmd_file}) || $connector->{config}->{cmd_file} eq '') { + $connector->{config}->{cmd_file} = '/var/lib/centreon/centcore.cmd'; + } + if (!defined($connector->{config}->{cmd_dir}) || $connector->{config}->{cmd_dir} eq '') { + $connector->{config}->{cmd_dir} = '/var/lib/centreon/centcore/'; + } + $connector->{config}->{bulk_external_cmd} = + defined($connector->{config}->{bulk_external_cmd}) && $connector->{config}->{bulk_external_cmd} =~ /(\d+)/ ? $1 : 50; + $connector->{config}->{bulk_external_cmd_sequential} = + defined($connector->{config}->{bulk_external_cmd_sequential}) && $connector->{config}->{bulk_external_cmd_sequential} =~ /^False|0$/i ? 0 : 1; + $connector->{config}->{dirty_mode} = defined($connector->{config}->{dirty_mode}) ? $connector->{config}->{dirty_mode} : 1; + $connector->{gorgone_illegal_characters} = '`'; + $connector->{cache_refresh_interval} = 60; + $connector->{cache_refresh_last} = -1; + $connector->{bulk_commands} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[legacycmd] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub cache_refresh { + my ($self, %options) = @_; + + return if ((time() - $self->{cache_refresh_interval}) < $self->{cache_refresh_last}); + $self->{cache_refresh_last} = time(); + + # get pollers config + $self->{pollers} = undef; + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => 'SELECT nagios_server_id, command_file, cfg_dir, centreonbroker_cfg_path, snmp_trapd_path_conf, ' . + 'engine_start_command, engine_stop_command, engine_restart_command, engine_reload_command, ' . + 'broker_reload_command, init_script_centreontrapd ' . + 'FROM cfg_nagios, nagios_server ' . + "WHERE nagios_server.id = cfg_nagios.nagios_server_id AND cfg_nagios.nagios_activate = '1'", + mode => 1, + keys => 'nagios_server_id' + ); + if ($status == -1 || !defined($datas)) { + $self->{logger}->writeLogError('[legacycmd] Cannot get configuration for pollers'); + return ; + } + + $self->{pollers} = $datas; + + # check illegal characters + ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => "SELECT `value` FROM options WHERE `key` = 'gorgone_illegal_characters'", + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError('[legacycmd] Cannot get illegal characters'); + return ; + } + + if (defined($datas->[0]->[0])) { + $self->{gorgone_illegal_characters} = $datas->[0]->[0]; + } +} + +sub check_pollers_config { + my ($self, %options) = @_; + + return defined($self->{pollers}) ? 1 : 0; +} + +sub send_external_commands { + my ($self, %options) = @_; + my $token = $options{token}; + $token = $self->generate_token() if (!defined($token)); + + my $targets = []; + $targets = [$options{target}] if (defined($options{target})); + if (scalar(@$targets) <= 0) { + $targets = [keys %{$self->{bulk_commands}}]; + } + + foreach my $target (@$targets) { + next if (!defined($self->{bulk_commands}->{$target}) || scalar(@{$self->{bulk_commands}->{$target}}) <= 0); + $self->send_internal_action({ + action => 'ENGINECOMMAND', + target => $target, + token => $token, + data => { + logging => $options{logging}, + content => { + command_file => $self->{pollers}->{$target}->{command_file}, + commands => [ + join("\n", @{$self->{bulk_commands}->{$target}}) + ] + } + } + }); + + $self->{logger}->writeLogDebug("[legacycmd] send external commands for '$target'"); + $self->{bulk_commands}->{$target} = []; + } +} + +sub add_external_command { + my ($self, %options) = @_; + + $options{param} =~ s/[\Q$self->{gorgone_illegal_characters}\E]//g + if (defined($self->{gorgone_illegal_characters}) && $self->{gorgone_illegal_characters} ne ''); + if ($options{action} == 1) { + $self->send_internal_action({ + action => 'ENGINECOMMAND', + target => $options{target}, + token => $options{token}, + data => { + logging => $options{logging}, + content => { + command_file => $self->{pollers}->{ $options{target} }->{command_file}, + commands => [ + $options{param} + ] + } + } + }); + } else { + $self->{bulk_commands}->{ $options{target} } = [] if (!defined($self->{bulk_commands}->{ $options{target} })); + push @{$self->{bulk_commands}->{ $options{target} }}, $options{param}; + if (scalar(@{$self->{bulk_commands}->{ $options{target} }}) > $self->{config}->{bulk_external_cmd}) { + $self->send_external_commands(%options); + } + } +} + +sub execute_cmd { + my ($self, %options) = @_; + + chomp $options{target}; + chomp $options{param} if (defined($options{param})); + my $token = $options{token}; + $token = $self->generate_token() if (!defined($token)); + + my $msg = "[legacycmd] Handling command '" . $options{cmd} . "'"; + $msg .= ", Target: '" . $options{target} . "'" if (defined($options{target})); + $msg .= ", Parameters: '" . $options{param} . "'" if (defined($options{param})); + $self->{logger}->writeLogInfo($msg); + + if ($options{cmd} eq 'EXTERNALCMD') { + $self->add_external_command( + action => $options{action}, + param => $options{param}, + target => $options{target}, + token => $options{token}, + logging => $options{logging} + ); + return 0; + } + + $self->send_external_commands(target => $options{target}) + if (defined($options{target}) && $self->{config}->{bulk_external_cmd_sequential} == 1); + + if ($options{cmd} eq 'SENDCFGFILE') { + my $cache_dir = (defined($connector->{config}->{cache_dir}) && $connector->{config}->{cache_dir} ne '') ? + $connector->{config}->{cache_dir} : '/var/cache/centreon'; + # engine + $self->send_internal_action({ + action => 'REMOTECOPY', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + source => $cache_dir . '/config/engine/' . $options{target}, + destination => $self->{pollers}->{$options{target}}->{cfg_dir} . '/', + cache_dir => $cache_dir, + owner => 'centreon-engine', + group => 'centreon-engine', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'SENDCFGFILE' + } + } + } + }); + # broker + $self->send_internal_action({ + action => 'REMOTECOPY', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + source => $cache_dir . '/config/broker/' . $options{target}, + destination => $self->{pollers}->{$options{target}}->{centreonbroker_cfg_path} . '/', + cache_dir => $cache_dir, + owner => 'centreon-broker', + group => 'centreon-broker', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'SENDCFGFILE' + } + } + } + }); + } elsif ($options{cmd} eq 'SENDEXPORTFILE') { + if (!defined($self->{clapi_password})) { + return (-1, 'need centreon clapi password to execute SENDEXPORTFILE command'); + } + + my $cache_dir = (defined($connector->{config}->{cache_dir}) && $connector->{config}->{cache_dir} ne '') ? + $connector->{config}->{cache_dir} : '/var/cache/centreon'; + my $remote_dir = (defined($connector->{config}->{remote_dir})) ? + $connector->{config}->{remote_dir} : '/var/cache/centreon/config/remote-data/'; + # remote server + $self->send_internal_action({ + action => 'REMOTECOPY', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + source => $cache_dir . '/config/export/' . $options{target}, + destination => $remote_dir, + cache_dir => $cache_dir, + owner => 'centreon', + group => 'centreon', + metadata => { + centcore_cmd => 'SENDEXPORTFILE' + } + } + } + }); + + # Forward data use to be done by createRemoteTask as well as task_id in a gorgone command + # Command name: AddImportTaskWithParent + # Data: ['parent_id' => $task->getId()] + $self->send_internal_action({ + action => 'ADDIMPORTTASKWITHPARENT', + token => $options{token}, + target => $options{target}, + data => { + logging => $options{logging}, + content => { + parent_id => $options{param}, + cbd_reload => 'sudo ' . $self->{pollers}->{ $options{target} }->{broker_reload_command} + } + } + }); + } elsif ($options{cmd} eq 'SYNCTRAP') { + my $cache_dir = (defined($connector->{config}->{cache_dir}) && $connector->{config}->{cache_dir} ne '') ? + $connector->{config}->{cache_dir} : '/var/cache/centreon'; + my $cache_dir_trap = (defined($connector->{config}->{cache_dir_trap}) && $connector->{config}->{cache_dir_trap} ne '') ? + $connector->{config}->{cache_dir_trap} : '/etc/snmp/centreon_traps/'; + # centreontrapd + $self->send_internal_action({ + action => 'REMOTECOPY', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + source => $cache_dir_trap . '/' . $options{target} . '/centreontrapd.sdb', + destination => $self->{pollers}->{$options{target}}->{snmp_trapd_path_conf} . '/', + cache_dir => $cache_dir, + owner => 'centreon', + group => 'centreon', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'SYNCTRAP' + } + } + } + }); + } elsif ($options{cmd} eq 'ENGINERESTART') { + my $cmd = $self->{pollers}->{$options{target}}->{engine_restart_command}; + $self->send_internal_action({ + action => 'ACTIONENGINE', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + command => 'sudo ' . $cmd, + plugins => $self->{pollers}->{ $options{target} }->{cfg_dir} . '/plugins.json', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'ENGINERESTART' + } + } + } + }); + } elsif ($options{cmd} eq 'RESTART') { + my $cmd = $self->{pollers}->{$options{target}}->{engine_restart_command}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo ' . $cmd, + metadata => { + centcore_proxy => 1, + centcore_cmd => 'RESTART' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'ENGINERELOAD') { + my $cmd = $self->{pollers}->{ $options{target} }->{engine_reload_command}; + $self->send_internal_action({ + action => 'ACTIONENGINE', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + command => 'sudo ' . $cmd, + plugins => $self->{pollers}->{ $options{target} }->{cfg_dir} . '/plugins.json', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'ENGINERELOAD' + } + } + } + }); + } elsif ($options{cmd} eq 'RELOAD') { + my $cmd = $self->{pollers}->{$options{target}}->{engine_reload_command}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo ' . $cmd, + metadata => { + centcore_proxy => 1, + centcore_cmd => 'RELOAD' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'START') { + my $cmd = $self->{pollers}->{$options{target}}->{engine_start_command}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo ' . $cmd, + metadata => { + centcore_proxy => 1, + centcore_cmd => 'START' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'STOP') { + my $cmd = $self->{pollers}->{$options{target}}->{engine_stop_command}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo ' . $cmd, + metadata => { + centcore_proxy => 1, + centcore_cmd => 'STOP' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'RELOADBROKER') { + my $cmd = $self->{pollers}->{$options{target}}->{broker_reload_command}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo ' . $cmd, + metadata => { + centcore_proxy => 1, + centcore_cmd => 'RELOADBROKER' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'RESTARTCENTREONTRAPD') { + my $cmd = $self->{pollers}->{$options{target}}->{init_script_centreontrapd}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo service ' . $cmd . ' restart', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'RESTARTCENTREONTRAPD' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'RELOADCENTREONTRAPD') { + my $cmd = $self->{pollers}->{$options{target}}->{init_script_centreontrapd}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo service ' . $cmd . ' reload', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'RELOADCENTREONTRAPD' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'STARTWORKER') { + if (!defined($self->{clapi_password})) { + return (-1, 'need centreon clapi password to execute STARTWORKER command'); + } + my $centreon_dir = (defined($connector->{config}->{centreon_dir})) ? + $connector->{config}->{centreon_dir} : '/usr/share/centreon'; + my $cmd = $centreon_dir . '/bin/centreon -u "' . $self->{clapi_user} . '" -p "' . + $self->{clapi_password} . '" -w -o CentreonWorker -a processQueue'; + $self->send_internal_action({ + action => 'COMMAND', + target => undef, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => $cmd, + metadata => { + centcore_cmd => 'STARTWORKER' + } + } + ] + } + }); + } + + return 0; +} + +sub action_addimporttaskwithparent { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}->{parent_id})) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "expected parent_id task ID, found '" . $options{data}->{content}->{parent_id} . "'", + } + ); + return -1; + } + + my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time()); + my $datetime = sprintf('%04d-%02d-%02d %02d:%02d:%02d', $year+1900, $mon+1, $mday, $hour, $min, $sec); + + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => "INSERT INTO task (`type`, `status`, `parent_id`, `created_at`) VALUES ('import', 'pending', '" . $options{data}->{content}->{parent_id} . "', '" . $datetime . "')" + ); + if ($status == -1) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "Cannot add import task on Remote Server.", + } + ); + return -1; + } + + my $centreon_dir = (defined($connector->{config}->{centreon_dir})) ? + $connector->{config}->{centreon_dir} : '/usr/share/centreon'; + my $cmd = $centreon_dir . '/bin/centreon -u "' . $self->{clapi_user} . '" -p "' . + $self->{clapi_password} . '" -w -o CentreonWorker -a processQueue'; + $self->send_internal_action({ + action => 'COMMAND', + token => $options{token}, + data => { + logging => $options{data}->{logging}, + content => [ + { + command => $cmd + } + ], + parameters => { no_fork => 1 } + } + }); + $self->send_internal_action({ + action => 'COMMAND', + token => $options{token}, + data => { + logging => $options{data}->{logging}, + content => [ + { + command => $options{data}->{content}->{cbd_reload} + } + ] + } + }); + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => 'Task inserted on Remote Server', + } + ); + + return 0; +} + +sub move_cmd_file { + my ($self, %options) = @_; + + my $operator = '+<'; + if ($self->{config}->{dirty_mode} == 1) { + $operator = '<'; + } + my $handle; + if (-e $options{dst}) { + if (!open($handle, $operator, $options{dst})) { + $self->{logger}->writeLogError("[legacycmd] Cannot open file '" . $options{dst} . "': $!"); + return -1; + } + + return (0, $handle); + } + + return -1 if (!defined($options{src})); + return -1 if (! -e $options{src}); + + if (!File::Copy::move($options{src}, $options{dst})) { + $self->{logger}->writeLogError("[legacycmd] Cannot move file '" . $options{src} . "': $!"); + return -1; + } + + if (!open($handle, $operator, $options{dst})) { + $self->{logger}->writeLogError("[legacycmd] Cannot open file '" . $options{dst} . "': $!"); + return -1; + } + + return (0, $handle); +} + +sub handle_file { + my ($self, %options) = @_; + require bytes; + + $self->{logger}->writeLogDebug("[legacycmd] Processing file '" . $options{file} . "'"); + my $handle = $options{handle}; + while (my $line = <$handle>) { + if ($self->{stop} == 1) { + close($handle); + return -1; + } + + if ($line =~ /^(.*?):([^:]*)(?::(.*)){0,1}/) { + $self->execute_cmd(action => 0, cmd => $1, target => $2, param => $3, logging => 0); + if ($self->{config}->{dirty_mode} != 1) { + my $current_pos = tell($handle); + seek($handle, $current_pos - bytes::length($line), 0); + syswrite($handle, '-'); + # line is useless + $line = <$handle>; + } + } + } + + close($handle); + unlink($options{file}); + return 0; +} + +sub handle_centcore_cmd { + my ($self, %options) = @_; + + my ($code, $handle) = $self->move_cmd_file( + src => $self->{config}->{cmd_file}, + dst => $self->{config}->{cmd_file} . '_read', + ); + return if ($code == -1); + $self->handle_file(handle => $handle, file => $self->{config}->{cmd_file} . '_read'); +} + +sub handle_centcore_dir { + my ($self, %options) = @_; + + my ($dh, @files); + if (!opendir($dh, $self->{config}->{cmd_dir})) { + $self->{logger}->writeLogError("[legacycmd] Cannot open directory '" . $self->{config}->{cmd_dir} . "': $!"); + return ; + } + @files = sort { + (stat($self->{config}->{cmd_dir} . '/' . $a))[10] <=> (stat($self->{config}->{cmd_dir} . '/' . $b))[10] + } (readdir($dh)); + closedir($dh); + + my ($code, $handle); + foreach (@files) { + next if ($_ =~ /^\./); + my $file = $self->{config}->{cmd_dir} . '/' . $_; + if ($file =~ /_read$/) { + ($code, $handle) = $self->move_cmd_file( + dst => $file, + ); + } else { + ($code, $handle) = $self->move_cmd_file( + src => $file, + dst => $file . '_read', + ); + $file .= '_read'; + } + return if ($code == -1); + if ($self->handle_file(handle => $handle, file => $file) == -1) { + return ; + } + } +} + +sub handle_cmd_files { + my ($self, %options) = @_; + + return if ($self->check_pollers_config() == 0); + $self->handle_centcore_cmd(); + $self->handle_centcore_dir(); + $self->send_external_commands(logging => 0); +} + +sub action_centreoncommand { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[legacycmd] -class- start centreoncommand'); + $options{token} = $self->generate_token() if (!defined($options{token})); + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action centreoncommand proceed' }); + + if (!defined($options{data}->{content}) || ref($options{data}->{content}) ne 'ARRAY') { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "expected array, found '" . ref($options{data}->{content}) . "'", + } + ); + return -1; + } + + if ($self->check_pollers_config() == 0) { + $self->{logger}->writeLogError('[legacycmd] cannot get centreon database configuration'); + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get centreon database configuration' }); + return 1; + } + + foreach my $command (@{$options{data}->{content}}) { + my ($code, $message) = $self->execute_cmd( + action => 1, + token => $options{token}, + target => $command->{target}, + cmd => $command->{command}, + param => $command->{param}, + logging => 1 + ); + + if ($code == -1) { + $self->{logger}->writeLogError('[legacycmd] -class- ' . $message); + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => $message }); + return 1; + } + } + + $self->{logger}->writeLogDebug('[legacycmd] -class- finish centreoncommand'); + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[legacycmd] $$ has quit"); + exit(0); + } + + $connector->cache_refresh(); + $connector->handle_cmd_files(); +} + +sub run { + my ($self, %options) = @_; + + $self->{tpapi_clapi} = gorgone::class::tpapi::clapi->new(); + $self->{tpapi_clapi}->set_configuration( + config => $self->{tpapi}->get_configuration(name => $self->{tpapi_clapi_name}) + ); + + $self->{clapi_user} = $self->{tpapi_clapi}->get_username(); + $self->{clapi_password} = $self->{tpapi_clapi}->get_password(protected => 1); + + # Connect internal + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-legacycmd', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'LEGACYCMDREADY', + data => {} + }); + + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{class_object_centreon} = gorgone::class::sqlquery->new( + logger => $self->{logger}, + db_centreon => $self->{db_centreon} + ); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/legacycmd/hooks.pm b/gorgone/gorgone/modules/centreon/legacycmd/hooks.pm new file mode 100644 index 00000000000..25636686c3a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/legacycmd/hooks.pm @@ -0,0 +1,162 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::legacycmd::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::legacycmd::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'legacycmd'; +use constant EVENTS => [ + { event => 'CENTREONCOMMAND', uri => '/command', method => 'POST' }, + { event => 'LEGACYCMDREADY' }, + { event => 'ADDIMPORTTASKWITHPARENT' } +]; + +my $config_core; +my $config; +my $legacycmd = {}; +my $stop = 0; +my $config_db_centreon; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centreon = $options{config_db_centreon}; + $config->{cmd_file} = defined($config->{cmd_file}) ? $config->{cmd_file} : '/var/lib/centreon/centcore.cmd'; + $config->{cache_dir} = defined($config->{cache_dir}) ? $config->{cache_dir} : '/var/cache/centreon/'; + $config->{cache_dir_trap} = defined($config->{cache_dir_trap}) ? $config->{cache_dir_trap} : '/etc/snmp/centreon_traps/'; + $config->{remote_dir} = defined($config->{remote_dir}) ? $config->{remote_dir} : '/var/lib/centreon/remote-data/'; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'LEGACYCMDREADY') { + $legacycmd->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$legacycmd->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-legacycmd: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-legacycmd', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($legacycmd->{running}) && $legacycmd->{running} == 1) { + $options{logger}->writeLogDebug("[legacycmd] Send TERM signal $legacycmd->{running}"); + CORE::kill('TERM', $legacycmd->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($legacycmd->{running} == 1) { + $options{logger}->writeLogDebug("[legacycmd] Send KILL signal for pool"); + CORE::kill('KILL', $legacycmd->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($legacycmd->{pid}) || $legacycmd->{pid} != $pid); + + $legacycmd = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($legacycmd->{running}) && $legacycmd->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[legacycmd] Create module 'legacycmd' process"); + + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-legacycmd'; + my $module = gorgone::modules::centreon::legacycmd::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[legacycmd] PID $child_pid (gorgone-legacycmd)"); + $legacycmd = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etl/class.pm b/gorgone/gorgone/modules/centreon/mbi/etl/class.pm new file mode 100644 index 00000000000..420342fcc25 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etl/class.pm @@ -0,0 +1,879 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etl::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use gorgone::class::http::http; +use XML::LibXML::Simple; +use JSON::XS; +use gorgone::modules::centreon::mbi::libs::Messages; +use gorgone::modules::centreon::mbi::etl::import::main; +use gorgone::modules::centreon::mbi::etl::event::main; +use gorgone::modules::centreon::mbi::etl::perfdata::main; +use gorgone::modules::centreon::mbi::libs::centreon::ETLProperties; +use Try::Tiny; +use EV; + +use constant NONE => 0; +use constant RUNNING => 1; +use constant STOP => 2; + +use constant NOTDONE => 0; +use constant DONE => 1; + +use constant UNPLANNED => -1; +use constant PLANNED => 0; +#use constant RUNNING => 1; +use constant FINISHED => 2; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{cbis_profile} = (defined($connector->{config}->{cbis_profile}) && $connector->{config}->{cbis_profile} ne '') ? + $connector->{config}->{cbis_profile} : '/etc/centreon-bi/cbis-profile.xml'; + $connector->{reports_profile} = (defined($connector->{config}->{reports_profile}) && $connector->{config}->{reports_profile} ne '') ? + $connector->{config}->{reports_profile} : '/etc/centreon-bi/reports-profile.xml'; + + $connector->{run} = { status => NONE }; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[nodes] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub reset { + my ($self, %options) = @_; + + $self->{run} = { status => NONE }; +} + +sub runko { + my ($self, %options) = @_; + + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => defined($options{token}) ? $options{token} : $self->{run}->{token}, + data => { + messages => [ ['E', $options{msg} ] ] + } + ); + + $self->check_stopped_ko(); + return 1; +} + +sub db_parse_xml { + my ($self, %options) = @_; + + my ($rv, $message, $content) = gorgone::standard::misc::slurp(file => $options{file}); + return (0, $message) if (!$rv); + eval { + $SIG{__WARN__} = sub {}; + $content = XMLin($content, ForceArray => [], KeyAttr => []); + }; + if ($@) { + die 'cannot read xml file: ' . $@; + } + + my $dbcon = {}; + if (!defined($content->{profile})) { + die 'no profile'; + } + foreach my $profile (@{$content->{profile}}) { + my $name = lc($profile->{name}); + $name =~ s/censtorage/centstorage/; + $dbcon->{$name} = { port => 3306 }; + foreach my $prop (@{$profile->{baseproperties}->{property}}) { + if ($prop->{name} eq 'odaURL' && $prop->{value} =~ /jdbc\:[a-z]+\:\/\/([^:]*)(\:\d+)?\/(.*)/) { + $dbcon->{$name}->{host} = $1; + $dbcon->{$name}->{db} = $3; + if (defined($2) && $2 ne '') { + $dbcon->{$name}->{port} = $2; + $dbcon->{$name}->{port} =~ s/\://; + } + $dbcon->{$name}->{db} =~ s/\?autoReconnect\=true//; + } elsif ($prop->{name} eq 'odaUser') { + $dbcon->{$name}->{user} = $prop->{value}; + } elsif ($prop->{name} eq 'odaPassword') { + $dbcon->{$name}->{password} = $prop->{value}; + } + } + } + foreach my $profile ('centreon', 'centstorage') { + die 'cannot find profile ' . $profile if (!defined($dbcon->{$profile})); + foreach ('host', 'db', 'port', 'user', 'password') { + die "property $_ for profile $profile must be defined" + if (!defined($dbcon->{$profile}->{$_}) || $dbcon->{$profile}->{$_} eq ''); + } + } + + return $dbcon; +} + +sub execute_action { + my ($self, %options) = @_; + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgone-' . $self->{module_id}, + event => 'CENTREONMBIETLLISTENER', + token => $self->{module_id} . '-' . $self->{run}->{token} . '-' . $options{substep}, + timeout => 43200 + } + ] + }); + + my $content = { + dbmon => $self->{run}->{dbmon}, + dbbi => $self->{run}->{dbbi}, + params => $options{params} + }; + if (defined($options{etlProperties})) { + $content->{etlProperties} = $self->{run}->{etlProperties}; + } + if (defined($options{dataRetention})) { + $content->{dataRetention} = $self->{run}->{dataRetention}; + } + if (defined($options{options})) { + $content->{options} = $self->{run}->{options}; + } + + $self->send_internal_action({ + action => $options{action}, + token => $self->{module_id} . '-' . $self->{run}->{token} . '-' . $options{substep}, + data => { + instant => 1, + content => $content + } + }); +} + +sub watch_etl_event { + my ($self, %options) = @_; + + if (defined($options{indexes})) { + $self->{run}->{schedule}->{event}->{substeps_executed}++; + my ($idx, $idx2) = split(/-/, $options{indexes}); + $self->{run}->{schedule}->{event}->{stages}->[$idx]->[$idx2]->{status} = FINISHED; + } + + return if (!$self->check_stopped_ko()); + + if ($self->{run}->{schedule}->{event}->{substeps_executed} >= $self->{run}->{schedule}->{event}->{substeps_total}) { + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][EVENT] <<<<<<< end'] ] }); + $self->{run}->{schedule}->{event}->{status} = FINISHED; + $self->check_stopped_ok(); + return ; + } + + my $stage = $self->{run}->{schedule}->{event}->{current_stage}; + my $stage_finished = 0; + while ($stage <= 2) { + while (my ($idx, $val) = each(@{$self->{run}->{schedule}->{event}->{stages}->[$stage]})) { + if (!defined($val->{status})) { + $self->{logger}->writeLogDebug("[mbi-etl] execute substep event-$stage-$idx"); + $self->{run}->{schedule}->{event}->{substeps_execute}++; + $self->execute_action( + action => 'CENTREONMBIETLWORKERSEVENT', + substep => "event-$stage-$idx", + etlProperties => 1, + options => 1, + params => $self->{run}->{schedule}->{event}->{stages}->[$stage]->[$idx] + ); + $self->{run}->{schedule}->{event}->{stages}->[$stage]->[$idx]->{status} = RUNNING; + } elsif ($val->{status} == FINISHED) { + $stage_finished++; + } + } + + if ($stage_finished >= scalar(@{$self->{run}->{schedule}->{event}->{stages}->[$stage]})) { + $self->{run}->{schedule}->{event}->{current_stage}++; + $stage = $self->{run}->{schedule}->{event}->{current_stage}; + } else { + last; + } + } +} + +sub watch_etl_perfdata { + my ($self, %options) = @_; + + if (defined($options{indexes})) { + $self->{run}->{schedule}->{perfdata}->{substeps_executed}++; + my ($idx, $idx2) = split(/-/, $options{indexes}); + $self->{run}->{schedule}->{perfdata}->{stages}->[$idx]->[$idx2]->{status} = FINISHED; + } + + return if (!$self->check_stopped_ko()); + + if ($self->{run}->{schedule}->{perfdata}->{substeps_executed} >= $self->{run}->{schedule}->{perfdata}->{substeps_total}) { + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][PERFDATA] <<<<<<< end'] ] }); + $self->{run}->{schedule}->{perfdata}->{status} = FINISHED; + $self->check_stopped_ok(); + return ; + } + + my $stage = $self->{run}->{schedule}->{perfdata}->{current_stage}; + my $stage_finished = 0; + while ($stage <= 2) { + while (my ($idx, $val) = each(@{$self->{run}->{schedule}->{perfdata}->{stages}->[$stage]})) { + if (!defined($val->{status})) { + $self->{logger}->writeLogDebug("[mbi-etl] execute substep perfdata-$stage-$idx"); + $self->{run}->{schedule}->{perfdata}->{substeps_execute}++; + $self->execute_action( + action => 'CENTREONMBIETLWORKERSPERFDATA', + substep => "perfdata-$stage-$idx", + etlProperties => 1, + options => 1, + params => $self->{run}->{schedule}->{perfdata}->{stages}->[$stage]->[$idx] + ); + $self->{run}->{schedule}->{perfdata}->{stages}->[$stage]->[$idx]->{status} = RUNNING; + } elsif ($val->{status} == FINISHED) { + $stage_finished++; + } + } + + if ($stage_finished >= scalar(@{$self->{run}->{schedule}->{perfdata}->{stages}->[$stage]})) { + $self->{run}->{schedule}->{perfdata}->{current_stage}++; + $stage = $self->{run}->{schedule}->{perfdata}->{current_stage}; + } else { + last; + } + } +} + +sub watch_etl_dimensions { + my ($self, %options) = @_; + + if (defined($options{indexes})) { + $self->{run}->{schedule}->{dimensions}->{substeps_executed}++; + } + + return if (!$self->check_stopped_ko()); + + if ($self->{run}->{schedule}->{dimensions}->{substeps_executed} >= $self->{run}->{schedule}->{dimensions}->{substeps_total}) { + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][DIMENSIONS] <<<<<<< end'] ] }); + $self->{run}->{schedule}->{dimensions}->{status} = FINISHED; + $self->run_etl(); + $self->check_stopped_ok(); + return ; + } + + $self->{run}->{schedule}->{dimensions}->{substeps_execute}++; + $self->execute_action( + action => 'CENTREONMBIETLWORKERSDIMENSIONS', + substep => 'dimensions-1', + etlProperties => 1, + options => 1, + params => {} + ); +} + +sub watch_etl_import { + my ($self, %options) = @_; + + if (defined($options{indexes})) { + $self->{run}->{schedule}->{import}->{substeps_executed}++; + my ($idx, $idx2) = split(/-/, $options{indexes}); + if (defined($idx) && defined($idx2)) { + $self->{run}->{schedule}->{import}->{actions}->[$idx]->{actions}->[$idx2]->{status} = FINISHED; + } else { + $self->{run}->{schedule}->{import}->{actions}->[$idx]->{status} = FINISHED; + } + } + + return if (!$self->check_stopped_ko()); + + if ($self->{run}->{schedule}->{import}->{substeps_executed} >= $self->{run}->{schedule}->{import}->{substeps_total}) { + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][IMPORT] <<<<<<< end'] ] }); + $self->{run}->{schedule}->{import}->{status} = FINISHED; + $self->run_etl(); + $self->check_stopped_ok(); + return ; + } + + while (my ($idx, $val) = each(@{$self->{run}->{schedule}->{import}->{actions}})) { + if (!defined($val->{status})) { + $self->{logger}->writeLogDebug("[mbi-etl] execute substep import-$idx"); + $self->{run}->{schedule}->{import}->{substeps_execute}++; + $self->{run}->{schedule}->{import}->{actions}->[$idx]->{status} = RUNNING; + $self->execute_action( + action => 'CENTREONMBIETLWORKERSIMPORT', + substep => "import-$idx", + params => { + type => $val->{type}, + db => $val->{db}, + sql => $val->{sql}, + command => $val->{command}, + message => $val->{message} + } + ); + } elsif ($val->{status} == FINISHED) { + while (my ($idx2, $val2) = each(@{$val->{actions}})) { + next if (defined($val2->{status})); + + $self->{logger}->writeLogDebug("[mbi-etl] execute substep import-$idx-$idx2"); + $self->{run}->{schedule}->{import}->{substeps_execute}++; + $self->{run}->{schedule}->{import}->{actions}->[$idx]->{actions}->[$idx2]->{status} = RUNNING; + $self->execute_action( + action => 'CENTREONMBIETLWORKERSIMPORT', + substep => "import-$idx-$idx2", + params => $val2 + ); + } + } + } +} + +sub run_etl_import { + my ($self, %options) = @_; + + if ((defined($self->{run}->{etlProperties}->{'host.dedicated'}) && $self->{run}->{etlProperties}->{'host.dedicated'} eq 'false') + || ($self->{run}->{dbbi}->{centstorage}->{host} . ':' . $self->{run}->{dbbi}->{centstorage}->{port} eq $self->{run}->{dbmon}->{centstorage}->{host} . ':' . $self->{run}->{dbmon}->{centstorage}->{port}) + || ($self->{run}->{dbbi}->{centreon}->{host} . ':' . $self->{run}->{dbbi}->{centreon}->{port} eq $self->{run}->{dbmon}->{centreon}->{host} . ':' . $self->{run}->{dbmon}->{centreon}->{port})) { + die 'Do not execute this script if the reporting engine is installed on the monitoring server. In case of "all in one" installation, do not consider this message'; + } + + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][IMPORT] >>>>>>> start' ] ] }); + + gorgone::modules::centreon::mbi::etl::import::main::prepare($self); + + $self->{run}->{schedule}->{import}->{status} = RUNNING; + + $self->{run}->{schedule}->{import}->{substeps_execute} = 0; + $self->{run}->{schedule}->{import}->{substeps_executed} = 0; + $self->{run}->{schedule}->{import}->{substeps_total} = 0; + foreach (@{$self->{run}->{schedule}->{import}->{actions}}) { + $self->{run}->{schedule}->{import}->{substeps_total}++; + my $num = defined($_->{actions}) ? scalar(@{$_->{actions}}) : 0; + $self->{run}->{schedule}->{import}->{substeps_total} += $num if ($num > 0); + } + + $self->{logger}->writeLogDebug("[mbi-etl] import substeps " . $self->{run}->{schedule}->{import}->{substeps_total}); + + $self->watch_etl_import(); +} + +sub run_etl_dimensions { + my ($self, %options) = @_; + + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][DIMENSIONS] >>>>>>> start' ] ] }); + $self->{run}->{schedule}->{dimensions}->{status} = RUNNING; + $self->{run}->{schedule}->{dimensions}->{substeps_execute} = 0; + $self->{run}->{schedule}->{dimensions}->{substeps_executed} = 0; + $self->{run}->{schedule}->{dimensions}->{substeps_total} = 1; + $self->watch_etl_dimensions(); +} + +sub run_etl_event { + my ($self, %options) = @_; + + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][EVENT] >>>>>>> start' ] ] }); + + gorgone::modules::centreon::mbi::etl::event::main::prepare($self); + + $self->{run}->{schedule}->{event}->{status} = RUNNING; + $self->{run}->{schedule}->{event}->{current_stage} = 0; + $self->{run}->{schedule}->{event}->{substeps_execute} = 0; + $self->{run}->{schedule}->{event}->{substeps_executed} = 0; + $self->{run}->{schedule}->{event}->{substeps_total} = + scalar(@{$self->{run}->{schedule}->{event}->{stages}->[0]}) + scalar(@{$self->{run}->{schedule}->{event}->{stages}->[1]}) + scalar(@{$self->{run}->{schedule}->{event}->{stages}->[2]}); + + $self->{logger}->writeLogDebug("[mbi-etl] event substeps " . $self->{run}->{schedule}->{event}->{substeps_total}); + + $self->watch_etl_event(); +} + +sub run_etl_perfdata { + my ($self, %options) = @_; + + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][PERFDATA] >>>>>>> start' ] ] }); + + gorgone::modules::centreon::mbi::etl::perfdata::main::prepare($self); + + $self->{run}->{schedule}->{perfdata}->{status} = RUNNING; + $self->{run}->{schedule}->{perfdata}->{current_stage} = 0; + $self->{run}->{schedule}->{perfdata}->{substeps_execute} = 0; + $self->{run}->{schedule}->{perfdata}->{substeps_executed} = 0; + $self->{run}->{schedule}->{perfdata}->{substeps_total} = + scalar(@{$self->{run}->{schedule}->{perfdata}->{stages}->[0]}) + scalar(@{$self->{run}->{schedule}->{perfdata}->{stages}->[1]}) + scalar(@{$self->{run}->{schedule}->{perfdata}->{stages}->[2]}); + + $self->{logger}->writeLogDebug("[mbi-etl] perfdata substeps " . $self->{run}->{schedule}->{perfdata}->{substeps_total}); + + $self->watch_etl_perfdata(); +} + +sub run_etl { + my ($self, %options) = @_; + + if ($self->{run}->{schedule}->{import}->{status} == PLANNED) { + $self->run_etl_import(); + return ; + } elsif ($self->{run}->{schedule}->{dimensions}->{status} == PLANNED) { + $self->run_etl_dimensions(); + return ; + } + if ($self->{run}->{schedule}->{event}->{status} == PLANNED) { + $self->run_etl_event(); + } + if ($self->{run}->{schedule}->{perfdata}->{status} == PLANNED) { + $self->run_etl_perfdata(); + } +} + +sub check_stopped_ko_import { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{import}->{substeps_executed} >= $self->{run}->{schedule}->{import}->{substeps_execute}); + + return 1; +} + +sub check_stopped_ko_dimensions { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{dimensions}->{substeps_executed} >= $self->{run}->{schedule}->{dimensions}->{substeps_execute}); + + return 1; +} + +sub check_stopped_ko_event { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{event}->{substeps_executed} >= $self->{run}->{schedule}->{event}->{substeps_execute}); + + return 1; +} + +sub check_stopped_ko_perfdata { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{perfdata}->{substeps_executed} >= $self->{run}->{schedule}->{perfdata}->{substeps_execute}); + + return 1; +} + +sub check_stopped_ko { + my ($self, %options) = @_; + + # if nothing planned. we stop + if ($self->{run}->{schedule}->{planned} == NOTDONE) { + $self->reset(); + return 0; + } + + return 1 if ($self->{run}->{status} != STOP); + + my $stopped = 0; + $stopped += $self->check_stopped_ko_import() + if ($self->{run}->{schedule}->{import}->{status} == RUNNING); + $stopped += $self->check_stopped_ko_dimensions() + if ($self->{run}->{schedule}->{dimensions}->{status} == RUNNING); + $stopped += $self->check_stopped_ko_event() + if ($self->{run}->{schedule}->{event}->{status} == RUNNING); + $stopped += $self->check_stopped_ko_perfdata() + if ($self->{run}->{schedule}->{perfdata}->{status} == RUNNING); + + if ($stopped == 0) { + $self->reset(); + return 0; + } + + return 1; +} + +sub check_stopped_ok_import { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{import}->{substeps_executed} >= $self->{run}->{schedule}->{import}->{substeps_total}); + + return 1; +} + +sub check_stopped_ok_dimensions { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{dimensions}->{substeps_executed} >= $self->{run}->{schedule}->{dimensions}->{substeps_total}); + + return 1; +} + +sub check_stopped_ok_event { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{event}->{substeps_executed} >= $self->{run}->{schedule}->{event}->{substeps_total}); + + return 1; +} + +sub check_stopped_ok_perfdata { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{perfdata}->{substeps_executed} >= $self->{run}->{schedule}->{perfdata}->{substeps_total}); + + return 1; +} + +sub check_stopped_ok { + my ($self, %options) = @_; + + return 1 if ($self->{run}->{status} == STOP); + + my $stopped = 0; + $stopped += $self->check_stopped_ok_import() + if ($self->{run}->{schedule}->{import}->{status} == RUNNING); + $stopped += $self->check_stopped_ok_dimensions() + if ($self->{run}->{schedule}->{dimensions}->{status} == RUNNING); + $stopped += $self->check_stopped_ok_event() + if ($self->{run}->{schedule}->{event}->{status} == RUNNING); + $stopped += $self->check_stopped_ok_perfdata() + if ($self->{run}->{schedule}->{perfdata}->{status} == RUNNING); + + if ($stopped == 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $self->{run}->{token}, + data => { + messages => [ ['I', '[SCHEDULER] <<<<<<< end' ] ] + } + ); + $self->reset(); + return 0; + } + + return 1; +} + +sub planning { + my ($self, %options) = @_; + + if ($self->{run}->{options}->{import} == 1) { + $self->{run}->{schedule}->{import}->{status} = PLANNED; + $self->{run}->{schedule}->{steps_total}++; + } + if ($self->{run}->{options}->{dimensions} == 1) { + $self->{run}->{schedule}->{dimensions}->{status} = PLANNED; + $self->{run}->{schedule}->{steps_total}++; + } + if ($self->{run}->{options}->{event} == 1) { + $self->{run}->{schedule}->{event}->{status} = PLANNED; + $self->{run}->{schedule}->{steps_total}++; + } + if ($self->{run}->{options}->{perfdata} == 1) { + $self->{run}->{schedule}->{perfdata}->{status} = PLANNED; + $self->{run}->{schedule}->{steps_total}++; + } + + if ($self->{run}->{schedule}->{steps_total} == 0) { + die "[SCHEDULING] nothing planned"; + } + + $self->{run}->{schedule}->{steps_executed} = 0; + $self->{run}->{schedule}->{planned} = DONE; +} + +sub check_basic_options { + my ($self, %options) = @_; + + if (($options{daily} == 0 && $options{rebuild} == 0 && $options{create_tables} == 0 && !defined($options{centile})) + || ($options{daily} == 1 && $options{rebuild} == 1)) { + die "Specify one execution method"; + } + if (($options{rebuild} == 1 || $options{create_tables} == 1) + && (($options{start} ne '' && $options{end} eq '') + || ($options{start} eq '' && $options{end} ne ''))) { + die "Specify both options start and end or neither of them to use default data retention options"; + } + if ($options{rebuild} == 1 && $options{start} ne '' && $options{end} ne '' + && ($options{start} !~ /[1-2][0-9]{3}\-[0-1][0-9]\-[0-3][0-9]/ || $options{end} !~ /[1-2][0-9]{3}\-[0-1][0-9]\-[0-3][0-9]/)) { + die "Verify period start or end date format"; + } +} + +sub action_centreonmbietlrun { + my ($self, %options) = @_; + + try { + $options{token} = $self->generate_token() if (!defined($options{token})); + + return $self->runko(token => $options{token}, msg => '[SCHEDULER] already running') if ($self->{run}->{status} == RUNNING); + return $self->runko(token => $options{token}, msg => '[SCHEDULER] currently wait previous execution finished - can restart gorgone mbi process') if ($self->{run}->{status} == STOP); + + $self->{run}->{token} = $options{token}; + $self->{run}->{messages} = gorgone::modules::centreon::mbi::libs::Messages->new(); + + $self->check_basic_options(%{$options{data}->{content}}); + + $self->{run}->{schedule} = { + steps_total => 0, + steps_executed => 0, + planned => NOTDONE, + import => { status => UNPLANNED, actions => [] }, + dimensions => { status => UNPLANNED }, + event => { status => UNPLANNED, stages => [ [], [], [] ] }, + perfdata => { status => UNPLANNED, stages => [ [], [], [] ] } + }; + $self->{run}->{status} = RUNNING; + + $self->{run}->{options} = $options{data}->{content}; + + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER] >>>>>>> start' ] ] }); + + $self->{run}->{dbmon} = $self->db_parse_xml(file => $self->{cbis_profile}); + $self->{run}->{dbbi} = $self->db_parse_xml(file => $self->{reports_profile}); + + $self->{run}->{dbmon_centreon_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$self->{run}->{dbmon}->{centreon}} + ); + $self->{run}->{dbmon_centstorage_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$self->{run}->{dbmon}->{centstorage}} + ); + $self->{run}->{dbbi_centstorage_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$self->{run}->{dbbi}->{centstorage}} + ); + + $self->{etlProp} = gorgone::modules::centreon::mbi::libs::centreon::ETLProperties->new($self->{logger}, $self->{run}->{dbmon_centreon_con}); + ($self->{run}->{etlProperties}, $self->{run}->{dataRetention}) = $self->{etlProp}->getProperties(); + + $self->planning(); + $self->run_etl(); + } catch { + $self->runko(msg => $_); + $self->reset(); + }; + + return 0; +} + +sub action_centreonmbietllistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token}) || $options{token} !~ /^$self->{module_id}-$self->{run}->{token}-(.*?)-(.*)$/); + my ($type, $indexes) = ($1, $2); + + if ($options{data}->{code} == GORGONE_ACTION_FINISH_KO) { + $self->{run}->{status} = STOP; + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $self->{run}->{token}, data => $options{data}->{data}); + } elsif ($options{data}->{code} == GORGONE_ACTION_FINISH_OK) { + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => $options{data}->{data}); + } else { + return 0; + } + + if ($type eq 'import') { + $self->watch_etl_import(indexes => $indexes); + } elsif ($type eq 'dimensions') { + $self->watch_etl_dimensions(indexes => $indexes); + } elsif ($type eq 'event') { + $self->watch_etl_event(indexes => $indexes); + } elsif ($type eq 'perfdata') { + $self->watch_etl_perfdata(indexes => $indexes); + } + + return 1; +} + +sub action_centreonmbietlkill { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + if ($self->{run}->{status} == NONE) { + $self->{logger}->writeLogDebug('[mbi-etl] kill action - etl not running'); + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + messages => 'etl not running' + } + ); + return 0; + } + + $self->{logger}->writeLogDebug('[mbi-etl] kill sent to the module etlworkers'); + + $self->send_internal_action({ + action => 'KILL', + token => $options{token}, + data => { + content => { + package => 'gorgone::modules::centreon::mbi::etlworkers::hooks' + } + } + }); + + # RUNNING or STOP + $self->send_log( + code => GORGONE_ACTION_CONTINUE, + token => $options{token}, + data => { + messages => 'kill sent to the module etlworkers' + } + ); + + $self->reset(); + + return 0; +} + +sub action_centreonmbietlstatus { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + my $map_etl_status = { + 0 => 'ready', + 1 => 'running', + 2 => 'stopping' + }; + + my $map_planning_status = { + 0 => 'running', + 1 => 'ok' + }; + + my $map_section_status = { + -1 => 'unplanned', + 0 => 'planned', + 1 => 'running', + 2 => 'ok' + }; + + my $section = {}; + foreach ('import', 'dimensions', 'event', 'perfdata') { + next if (!defined($self->{run}->{schedule})); + + $section->{$_} = { + status => $self->{run}->{schedule}->{$_}->{status}, + statusStr => $map_section_status->{ $self->{run}->{schedule}->{$_}->{status} } + }; + if ($self->{run}->{schedule}->{$_}->{status} == RUNNING) { + $section->{$_}->{steps_total} = $self->{run}->{schedule}->{$_}->{substeps_total}; + $section->{$_}->{steps_executed} = $self->{run}->{schedule}->{$_}->{substeps_executed}; + } + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + token => defined($self->{run}->{token}) ? $self->{run}->{token} : undef, + + status => $self->{run}->{status}, + statusStr => $map_etl_status->{ $self->{run}->{status} }, + + planning => defined($self->{run}->{schedule}->{planned}) ? $self->{run}->{schedule}->{planned} : undef, + planningStr => defined($self->{run}->{schedule}->{planned}) ? $map_planning_status->{ $self->{run}->{schedule}->{planned} } : undef, + + sections => $section + } + ); + + return 0; +} + + + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[" . $connector->{module_id} . "] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-' . $self->{module_id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CENTREONMBIETLREADY', + data => {} + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etl/event/main.pm b/gorgone/gorgone/modules/centreon/mbi/etl/event/main.pm new file mode 100644 index 00000000000..6ccbcc447f5 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etl/event/main.pm @@ -0,0 +1,292 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etl::event::main; + +use strict; +use warnings; + +use gorgone::modules::centreon::mbi::libs::bi::Time; +use gorgone::modules::centreon::mbi::libs::bi::LiveService; +use gorgone::modules::centreon::mbi::libs::bi::MySQLTables; +use gorgone::modules::centreon::mbi::libs::Utils; + +my ($biTables, $utils, $liveService, $time); +my ($start, $end); + +sub initVars { + my ($etl) = @_; + + $biTables = gorgone::modules::centreon::mbi::libs::bi::MySQLTables->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); + $utils = gorgone::modules::centreon::mbi::libs::Utils->new($etl->{run}->{messages}); + $liveService = gorgone::modules::centreon::mbi::libs::bi::LiveService->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); + $time = gorgone::modules::centreon::mbi::libs::bi::Time->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); +} + +sub emptyTableForRebuild { + my ($etl, %options) = @_; + + my $sql = [ [ '[CREATE] Deleting table [' . $options{name} . ']', 'DROP TABLE IF EXISTS `' . $options{name} . '`' ] ]; + + my $structure = $biTables->dumpTableStructure($options{name}); + $structure =~ s/KEY.*\(\`$options{column}\`\)\,//g; + $structure =~ s/KEY.*\(\`$options{column}\`\)//g; + $structure =~ s/\,[\n\s+]+\)/\n\)/g; + + if (defined($options{start})) { + $structure =~ s/\n.*PARTITION.*//g; + $structure =~ s/\,[\n\s]+\)/\)/; + $structure .= ' PARTITION BY RANGE(`' . $options{column} . '`) ('; + + my $partitionsPerf = $utils->getRangePartitionDate($options{start}, $options{end}); + + my $append = ''; + foreach (@$partitionsPerf) { + $structure .= $append . "PARTITION p" . $_->{name} . " VALUES LESS THAN (" . $_->{epoch} . ")"; + $append = ','; + } + $structure .= ');'; + } + + push @$sql, + [ '[CREATE] Add table [' . $options{name} . ']', $structure ], + [ "[INDEXING] Adding index [idx_$options{name}_$options{column}] on table [$options{name}]", "ALTER TABLE `$options{name}` ADD INDEX `idx_$options{name}_$options{column}` (`$options{column}`)" ]; + + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, { type => 'sql', db => 'centstorage', sql => $sql }; +} + +sub deleteEntriesForRebuild { + my ($etl, %options) = @_; + + my $sql = []; + if (!$biTables->isTablePartitioned($options{name})) { + push @$sql, + [ + "[PURGE] Delete table [$options{name}] from $options{start} to $options{end}", + "DELETE FROM $options{name} WHERE time_id >= " . $utils->getDateEpoch($options{start}) . " AND time_id < " . $utils->getDateEpoch($options{end}) + ]; + } else { + my $structure = $biTables->dumpTableStructure($options{name}); + my $partitionsPerf = $utils->getRangePartitionDate($options{start}, $options{end}); + foreach (@$partitionsPerf) { + if ($structure =~ /p$_->{name}/m) { + push @$sql, + [ + "[PURGE] Truncate partition $_->{name} on table [$options{name}]", + "ALTER TABLE $options{name} TRUNCATE PARTITION p$_->{name}" + ]; + } else { + push @$sql, + [ + '[PARTITIONS] Add partition [p' . $_->{name} . '] on table [' . $options{name} . ']', + "ALTER TABLE `$options{name}` ADD PARTITION (PARTITION `p$_->{name}` VALUES LESS THAN(" . $_->{epoch} . "))" + ]; + } + } + } + + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, { type => 'sql', db => 'centstorage', sql => $sql }; +} + +sub purgeAvailabilityTables { + my ($etl, $start, $end) = @_; + + my $firstDayOfMonth = $start; + $firstDayOfMonth =~ s/([1-2][0-9]{3})\-([0-1][0-9])\-[0-3][0-9]/$1\-$2\-01/; + + if ($etl->{run}->{options}->{nopurge} == 0) { + if (!defined($etl->{run}->{options}->{service_only}) || $etl->{run}->{options}->{service_only} == 0) { + if (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0) { + emptyTableForRebuild($etl, name => 'mod_bi_hostavailability', column => 'time_id', start => $start, end => $end); + } + + emptyTableForRebuild($etl, name => 'mod_bi_hgmonthavailability', column => 'time_id'); + } + if (!defined($etl->{run}->{options}->{host_only}) || $etl->{run}->{options}->{host_only} == 0) { + if (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0) { + emptyTableForRebuild($etl, name => 'mod_bi_serviceavailability', column => 'time_id', start => $start, end => $end); + } + + emptyTableForRebuild($etl, name => 'mod_bi_hgservicemonthavailability', column => 'time_id'); + } + } else { + if (!defined($etl->{run}->{options}->{service_only}) || $etl->{run}->{options}->{service_only} == 0) { + if (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0) { + deleteEntriesForRebuild($etl, name => 'mod_bi_hostavailability', start => $start, end => $end); + } + + deleteEntriesForRebuild($etl, name => 'mod_bi_hgmonthavailability', start => $firstDayOfMonth, end => $end); + } + if (!defined($etl->{run}->{options}->{host_only}) || $etl->{run}->{options}->{host_only} == 0) { + if (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0) { + deleteEntriesForRebuild($etl, name => 'mod_bi_serviceavailability', start => $start, end => $end); + } + deleteEntriesForRebuild($etl, name => 'mod_bi_hgservicemonthavailability', start => $firstDayOfMonth, end => $end); + } + } +} + +sub processByDay { + my ($etl, $liveServices, $start, $end) = @_; + + while (my ($liveserviceName, $liveserviceId) = each (%$liveServices)) { + if (!defined($etl->{run}->{options}->{service_only}) || $etl->{run}->{options}->{service_only} == 0) { + push @{$etl->{run}->{schedule}->{event}->{stages}->[1]}, { + type => 'availability_day_hosts', + liveserviceName => $liveserviceName, + liveserviceId => $liveserviceId, + start => $start, + end => $end + }; + } + + if (!defined($etl->{run}->{options}->{host_only}) || $etl->{run}->{options}->{host_only} == 0) { + push @{$etl->{run}->{schedule}->{event}->{stages}->[1]}, { + type => 'availability_day_services', + liveserviceName => $liveserviceName, + liveserviceId => $liveserviceId, + start => $start, + end => $end + }; + } + } +} + +sub processHostgroupAvailability { + my ($etl, $start, $end) = @_; + + $time->insertTimeEntriesForPeriod($start, $end); + if (!defined($etl->{run}->{options}->{service_only}) || $etl->{run}->{options}->{service_only} == 0) { + push @{$etl->{run}->{schedule}->{event}->{stages}->[2]}, { + type => 'availability_month_services', + start => $start, + end => $end + }; + } + if (!defined($etl->{run}->{options}->{host_only}) || $etl->{run}->{options}->{host_only} == 0) { + push @{$etl->{run}->{schedule}->{event}->{stages}->[2]}, { + type => 'availability_month_hosts', + start => $start, + end => $end + }; + } +} + +sub dailyProcessing { + my ($etl, $liveServices) = @_; + + # getting yesterday start and end date to process yesterday data + my ($start, $end) = $utils->getYesterdayTodayDate(); + # daily mod_bi_time table filling + $time->insertTimeEntriesForPeriod($start, $end); + + my ($epoch, $partName) = $utils->getDateEpoch($end); + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_hostavailability]', + "ALTER TABLE `mod_bi_hostavailability` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_serviceavailability]', + "ALTER TABLE `mod_bi_serviceavailability` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + + # Calculating availability of hosts and services for the current day + processByDay($etl, $liveServices, $start, $end); + + # Calculating statistics for last month if day of month si 1 + my ($year, $mon, $day) = split('-', $end); + if ($day == 1) { + processHostgroupAvailability($etl, $utils->subtractDateMonths($end, 1), $utils->subtractDateDays($end, 1)); + } + + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, + { type => 'events', services => 1, start => $start, end => $end }, { type => 'events', hosts => 1, start => $start, end => $end }; +} + +# rebuild availability statistics +sub rebuildAvailability { + my ($etl, $start, $end, $liveServices) = @_; + + my $days = $utils->getRangePartitionDate($start, $end); + foreach (@$days) { + $end = $_->{date}; + processByDay($etl, $liveServices, $start, $end); + + my ($year, $mon, $day) = split('-', $end); + if ($day == 1) { + processHostgroupAvailability($etl, $utils->subtractDateMonths($end, 1), $utils->subtractDateDays($end, 1)); + } + + $start = $end; + } +} + +sub rebuildProcessing { + my ($etl, $liveServices) = @_; + + if ($etl->{run}->{options}->{start} ne '' && $etl->{run}->{options}->{end} ne '') { + # setting manually start and end dates for each granularity of perfdata + ($start, $end) = ($etl->{run}->{options}->{start}, $etl->{run}->{options}->{end}); + }else { + # getting max perfdata retention period to fill mod_bi_time + my $periods = $etl->{etlProp}->getRetentionPeriods(); + ($start, $end) = ($periods->{'availability.daily'}->{start}, $periods->{'availability.daily'}->{end}); + } + + # insert entries into table mod_bi_time + $time->insertTimeEntriesForPeriod($start, $end); + if (!defined($etl->{run}->{options}->{events_only}) || $etl->{run}->{options}->{events_only} == 0) { + purgeAvailabilityTables($etl, $start, $end); + rebuildAvailability($etl, $start, $end, $liveServices); + } + + if (!defined($etl->{run}->{options}->{availability_only}) || $etl->{run}->{options}->{availability_only} == 0) { + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, + { type => 'events', services => 1, start => $start, end => $end }, { type => 'events', hosts => 1, start => $start, end => $end }; + } +} + +sub prepare { + my ($etl) = @_; + + initVars($etl); + + my $liveServiceList = $liveService->getLiveServicesByNameForTpIds($etl->{run}->{etlProperties}->{'liveservices.availability'}); + + if ($etl->{run}->{options}->{daily} == 1) { + dailyProcessing($etl, $liveServiceList); + } elsif ($etl->{run}->{options}->{rebuild} == 1) { + rebuildProcessing($etl, $liveServiceList); + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etl/hooks.pm b/gorgone/gorgone/modules/centreon/mbi/etl/hooks.pm new file mode 100644 index 00000000000..bc210ca41e6 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etl/hooks.pm @@ -0,0 +1,156 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etl::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::mbi::etl::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'mbietl'; +use constant EVENTS => [ + { event => 'CENTREONMBIETLRUN', uri => '/run', method => 'POST' }, + { event => 'CENTREONMBIETLKILL', uri => '/kill', method => 'GET' }, + { event => 'CENTREONMBIETLSTATUS', uri => '/status', method => 'GET' }, + { event => 'CENTREONMBIETLLISTENER' }, + { event => 'CENTREONMBIETLREADY' } +]; + +my $config_core; +my $config; +my $run = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'CENTREONMBIETLREADY') { + $run->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$run->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-' . NAME . ': still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-' . NAME, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($run->{running}) && $run->{running} == 1) { + $options{logger}->writeLogDebug("[" . NAME . "] Send TERM signal $run->{pid}"); + CORE::kill('TERM', $run->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($run->{running} == 1) { + $options{logger}->writeLogDebug("[" . NAME . "] Send KILL signal $run->{pid}"); + CORE::kill('KILL', $run->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($run->{pid}) || $run->{pid} != $pid); + + $run = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($run->{running}) && $run->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[" . NAME . "] Create module '" . NAME . "' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-' . NAME; + my $module = gorgone::modules::centreon::mbi::etl::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[" . NAME . "] PID $child_pid (gorgone-" . NAME . ")"); + $run = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etl/import/main.pm b/gorgone/gorgone/modules/centreon/mbi/etl/import/main.pm new file mode 100644 index 00000000000..54bf7b0c6f5 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etl/import/main.pm @@ -0,0 +1,427 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etl::import::main; + +use strict; +use warnings; + +use gorgone::modules::centreon::mbi::libs::bi::MySQLTables; +use gorgone::modules::centreon::mbi::libs::Utils; + +my ($biTables, $monTables, $utils); +my ($argsMon, $argsBi); + +sub initVars { + my ($etl) = @_; + + $biTables = gorgone::modules::centreon::mbi::libs::bi::MySQLTables->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); + $monTables = gorgone::modules::centreon::mbi::libs::bi::MySQLTables->new($etl->{run}->{messages}, $etl->{run}->{dbmon_centstorage_con}); + $utils = gorgone::modules::centreon::mbi::libs::Utils->new($etl->{run}->{messages}); + $argsMon = $utils->buildCliMysqlArgs($etl->{run}->{dbmon}->{centstorage}); + $argsBi = $utils->buildCliMysqlArgs($etl->{run}->{dbbi}->{centstorage}); +} + +# Create tables for centstorage database on reporting server +sub createTables { + my ($etl, $periods, $options, $notTimedTables) = @_; + + #Creating all centreon bi tables exept the one already created + my $sth = $etl->{run}->{dbmon_centstorage_con}->query({ query => "SHOW TABLES LIKE 'mod_bi_%'" }); + while (my @row = $sth->fetchrow_array()) { + my $name = $row[0]; + if (!$biTables->tableExists($name)) { + my $structure = $monTables->dumpTableStructure($name); + push @{$etl->{run}->{schedule}->{import}->{actions}}, + { + type => 1, db => 'centstorage', sql => [ ["[CREATE] add table [$name]", $structure] ], actions => [] + }; + } + } + + # Manage centreonAcl + my $action; + if ($options->{create_tables} == 0) { + #Update centreon_acl table each time centreon-only is started - not the best way but need for Widgets + my $cmd = sprintf( + "mysqldump --replace --no-create-info --skip-add-drop-table --skip-add-locks --skip-comments %s '%s' %s | mysql %s '%s'", + $argsMon, + $etl->{run}->{dbmon}->{centstorage}->{db}, + 'centreon_acl', + $argsBi, + $etl->{run}->{dbbi}->{centstorage}->{db} + ); + $action = { type => 2, message => '[LOAD] import table [centreon_acl]', command => $cmd }; + } + + if (!$biTables->tableExists('centreon_acl')) { + my $structure = $monTables->dumpTableStructure('centreon_acl'); + push @{$etl->{run}->{schedule}->{import}->{actions}}, + { + type => 1, db => 'centstorage', sql => [ ["[CREATE] add table [centreon_acl]", $structure] ], actions => defined($action) ? [$action] : [] + }; + } elsif (defined($action)) { + if ($options->{rebuild} == 1 && $options->{nopurge} == 0) { + push @{$etl->{run}->{schedule}->{import}->{actions}}, { + type => 1, db => 'centstorage', sql => [ ["[TRUNCATE] table [centreon_acl]", 'TRUNCATE table centreon_acl'] ], actions => defined($action) ? [$action] : [] + }; + } else { + push @{$etl->{run}->{schedule}->{import}->{actions}}, $action; + } + } + + my $tables = join('|', @$notTimedTables); + $sth = $etl->{run}->{dbmon_centstorage_con}->query({ query => "SHOW TABLES LIKE 'mod_bam_reporting_%'" }); + while (my @row = $sth->fetchrow_array()) { + my $name = $row[0]; + next if ($name =~ /^(?:$tables)$/); + + if (!$biTables->tableExists($name)) { + my $structure = $monTables->dumpTableStructure($name); + push @{$etl->{run}->{schedule}->{import}->{actions}}, + { + type => 1, db => 'centstorage', sql => [ ["[CREATE] Add table [$name]", $structure] ], actions => [] + }; + } + } +} + +# Extract data from Centreon DB server +sub extractData { + my ($etl, $options, $notTimedTables) = @_; + + foreach my $name (@$notTimedTables) { + my $action = { type => 1, db => 'centstorage', sql => [], actions => [] }; + + push @{$action->{sql}}, [ '[CREATE] Deleting table [' . $name . ']', 'DROP TABLE IF EXISTS `' . $name . '`' ]; + + my $structure = $monTables->dumpTableStructure($name); + $structure =~ s/(CONSTRAINT.*\n)//g; + $structure =~ s/(\,\n\s+\))/\)/g; + $structure =~ s/auto_increment\=[0-9]+//i; + $structure =~ s/auto_increment//i; + + push @{$action->{sql}}, [ "[CREATE] Add table [$name]", $structure ]; + if ($name eq 'hoststateevents' || $name eq 'servicestateevents') { + # add drop indexes + my $indexes = $etl->{run}->{dbmon_centstorage_con}->query({ query => "SHOW INDEX FROM " . $name }); + my $previous = ''; + while (my $row = $indexes->fetchrow_hashref()) { + if ($row->{Key_name} ne $previous) { + if (lc($row->{Key_name}) eq lc('PRIMARY')) { + push @{$action->{sql}}, + [ + "[INDEXING] Deleting index [PRIMARY KEY] on table [".$name."]", + "ALTER TABLE `" . $name . "` DROP PRIMARY KEY" + ]; + } else { + push @{$action->{sql}}, + [ + "[INDEXING] Deleting index [$row->{Key_name}] on table [".$name."]", + "ALTER TABLE `" . $name . "` DROP INDEX " . $row->{Key_name} + ]; + } + } + $previous = $row->{Key_name}; + } + + push @{$action->{sql}}, + [ + "[INDEXING] Adding index [in_downtime, start_time, end_time] on table [" . $name . "]", + "ALTER TABLE `" . $name . "` ADD INDEX `idx_" . $name . "_downtime_start_end_time` (in_downtime, start_time, end_time)" + ], + [ + "[INDEXING] Adding index [end_time] on table [" . $name . "]", + "ALTER TABLE `" . $name . "` ADD INDEX `idx_" . $name . "_end_time` (`end_time`)" + ]; + if ($name eq 'servicestateevents') { + push @{$action->{sql}}, + [ + "[INDEXING] Adding index [host_id, service_id, start_time, end_time, ack_time, state, last_update] on table [servicestateevents]", + "ALTER TABLE `servicestateevents` ADD INDEX `idx_servicestateevents_multi` (host_id, service_id, start_time, end_time, ack_time, state, last_update)" + ]; + } + } + + my $cmd = sprintf( + "mysqldump --no-create-info --skip-add-drop-table --skip-add-locks --skip-comments %s '%s' %s | mysql %s '%s'", + $argsMon, + $etl->{run}->{dbmon}->{centstorage}->{db}, + $name, + $argsBi, + $etl->{run}->{dbbi}->{centstorage}->{db} + ); + push @{$action->{actions}}, { type => 2, message => '[LOAD] import table [' . $name . ']', command => $cmd }; + push @{$etl->{run}->{schedule}->{import}->{actions}}, $action; + } +} + +# load data into the reporting server from files copied from the monitoring server +sub extractCentreonDB { + my ($etl, $etlProperties) = @_; + + my $tables = 'host hostgroup_relation hostgroup hostcategories_relation hostcategories ' . + 'host_service_relation service service_categories service_categories_relation ' . + 'timeperiod mod_bi_options servicegroup mod_bi_options_centiles servicegroup_relation contact contactgroup_service_relation '. + 'host_template_relation command contact_host_relation contactgroup_host_relation contactgroup contact_service_relation'; + + my $mon = $utils->buildCliMysqlArgs($etl->{run}->{dbmon}->{centreon}); + my $bi = $utils->buildCliMysqlArgs($etl->{run}->{dbbi}->{centreon}); + + my $cmd = sprintf( + "mysqldump --skip-add-drop-table --skip-add-locks --skip-comments %s '%s' %s | mysql --force %s '%s'", + $mon, + $etl->{run}->{dbmon}->{centreon}->{db}, + $tables, + $bi, + $etl->{run}->{dbbi}->{centreon}->{db} + ); + + push @{$etl->{run}->{schedule}->{import}->{actions}}, { + type => 1, + db => 'centreon', + sql => [ + [ '[DROPDB] database ' . $etl->{run}->{dbbi}->{centreon}->{db}, "DROP DATABASE `$etl->{run}->{dbbi}->{centreon}->{db}`" ], + [ '[CREATEDB] database ' . $etl->{run}->{dbbi}->{centreon}->{db}, "CREATE DATABASE `$etl->{run}->{dbbi}->{centreon}->{db}`" ], + ], + actions => [ + { type => 2, message => '[LOAD] import table [' . $tables . ']', command => $cmd } + ] + }; +} + +sub dataBin { + my ($etl, $etlProperties, $options, $periods) = @_; + + return if ($options->{ignore_databin} == 1 || $options->{centreon_only} == 1 || (defined($options->{bam_only}) && $options->{bam_only} == 1)); + + my $action = { type => 1, db => 'centstorage', sql => [], actions => [] }; + + my $drop = 0; + if ($options->{rebuild} == 1 && $options->{nopurge} == 0) { + push @{$action->{sql}}, [ '[CREATE] Deleting table [data_bin]', 'DROP TABLE IF EXISTS `data_bin`' ]; + $drop = 1; + } + + my $isExists = 0; + $isExists = 1 if ($biTables->tableExists('data_bin')); + + my $partitionsPerf = $utils->getRangePartitionDate($periods->{raw_perfdata}->{start}, $periods->{raw_perfdata}->{end}); + + if ($isExists == 0 || $drop == 1) { + $action->{create} = 1; + + my $structure = $monTables->dumpTableStructure('data_bin'); + $structure =~ s/KEY.*\(\`id_metric\`\)\,//g; + $structure =~ s/KEY.*\(\`id_metric\`\)//g; + $structure =~ s/\n.*PARTITION.*//g; + $structure =~ s/\,[\n\s]+\)/\)/; + $structure .= " PARTITION BY RANGE(`ctime`) ("; + + my $append = ''; + foreach (@$partitionsPerf) { + $structure .= $append . "PARTITION p" . $_->{name} . " VALUES LESS THAN (" . $_->{epoch} . ")"; + $append = ','; + } + $structure .= ');'; + + push @{$action->{sql}}, + [ '[CREATE] Add table [data_bin]', $structure ], + [ '[INDEXING] Adding index [ctime] on table [data_bin]', "ALTER TABLE `data_bin` ADD INDEX `idx_data_bin_ctime` (`ctime`)" ], + [ '[INDEXING] Adding index [id_metric_id, ctime] on table [data_bin]', "ALTER TABLE `data_bin` ADD INDEX `idx_data_bin_idmetric_ctime` (`id_metric`,`ctime`)" ]; + } + + if ($isExists == 1 && $drop == 0) { + my $start = $biTables->getLastPartRange('data_bin'); + my $partitions = $utils->getRangePartitionDate($start, $periods->{raw_perfdata}->{end}); + foreach (@$partitions) { + push @{$action->{sql}}, + [ '[PARTITIONS] Add partition [' . $_->{name} . '] on table [data_bin]', "ALTER TABLE `data_bin` ADD PARTITION (PARTITION `p$_->{name}` VALUES LESS THAN($_->{epoch}))"]; + } + } + + if ($etl->{run}->{options}->{create_tables} == 0 && ($etlProperties->{'statistics.type'} eq 'all' || $etlProperties->{'statistics.type'} eq 'perfdata')) { + my $epoch = $utils->getDateEpoch($periods->{raw_perfdata}->{start}); + + my $overCond = 'ctime >= ' . $epoch . ' AND '; + foreach (@$partitionsPerf) { + my $cmd = sprintf( + "mysqldump --insert-ignore --single-transaction --no-create-info --skip-add-drop-table --skip-disable-keys --skip-add-locks --skip-comments %s --databases '%s' --tables %s --where=\"%s\" | mysql --init-command='SET SESSION unique_checks=0' %s '%s'", + $argsMon, + $etl->{run}->{dbmon}->{centstorage}->{db}, + 'data_bin', + $overCond . 'ctime < ' . $_->{epoch}, + $argsBi, + $etl->{run}->{dbbi}->{centstorage}->{db} + ); + $overCond = 'ctime >= ' . $_->{epoch} . ' AND '; + push @{$action->{actions}}, { type => 2, message => '[LOAD] partition [' . $_->{name} . '] on table [data_bin]', command => $cmd }; + } + + #my $file = $etlProperties->{'reporting.storage.directory'} . '/data_bin.sql'; + #push @{$action->{actions}}, { + # type => 3, + # message => '[LOAD] table [data_bin]', + # table => 'data_bin', + # db => 'centstorage', + # dump => $cmd, + # file => $file, + # load => "LOAD DATA LOCAL INFILE '" . $file . "' INTO TABLE `data_bin` CHARACTER SET UTF8 IGNORE 1 LINES" + #}; + } + + push @{$etl->{run}->{schedule}->{import}->{actions}}, $action; +} + +sub selectTables { + my ($etl, $etlProperties, $options) = @_; + + my @notTimedTables = (); + my %timedTables = (); + + my @ctime = ('ctime', 'ctime'); + my @startEnd = ('date_start', 'date_end'); + my @timeId = ('time_id', 'time_id'); + my $importComment = $etlProperties->{'import.comments'}; + my $importDowntimes = $etlProperties->{'import.downtimes'}; + + if (!defined($etlProperties->{'statistics.type'})) { + die 'cannot determine statistics type or compatibility mode for data integration'; + } + + if (!defined($options->{databin_only}) || $options->{databin_only} == 0) { + if (!defined($options->{bam_only}) || $options->{bam_only} == 0) { + if ($etlProperties->{'statistics.type'} eq 'all') { + push @notTimedTables, 'index_data'; + push @notTimedTables, 'metrics'; + push @notTimedTables, 'hoststateevents'; + push @notTimedTables, 'servicestateevents'; + push @notTimedTables, 'instances'; + push @notTimedTables, 'hosts'; + + if ($importComment eq 'true'){ + push @notTimedTables, 'comments'; + } + if ($importDowntimes eq 'true'){ + push @notTimedTables, 'downtimes'; + } + + push @notTimedTables, 'acknowledgements'; + } + if ($etlProperties->{'statistics.type'} eq 'availability') { + push @notTimedTables, 'hoststateevents'; + push @notTimedTables, 'servicestateevents'; + push @notTimedTables, 'instances'; + push @notTimedTables, 'hosts'; + if ($importComment eq 'true'){ + push @notTimedTables, 'comments'; + } + push @notTimedTables, 'acknowledgements'; + } + if ($etlProperties->{'statistics.type'} eq "perfdata") { + push @notTimedTables, 'index_data'; + push @notTimedTables, 'metrics'; + push @notTimedTables, 'instances'; + push @notTimedTables, 'hosts'; + push @notTimedTables, 'acknowledgements'; + + } + } + + my $sth = $etl->{run}->{dbmon_centreon_con}->query({ query => "SELECT id FROM modules_informations WHERE name='centreon-bam-server'" }); + if (my $row = $sth->fetchrow_array() && $etlProperties->{'statistics.type'} ne 'perfdata') { + push @notTimedTables, "mod_bam_reporting_ba_availabilities"; + push @notTimedTables, "mod_bam_reporting_ba"; + push @notTimedTables, "mod_bam_reporting_ba_events"; + push @notTimedTables, "mod_bam_reporting_ba_events_durations"; + push @notTimedTables, "mod_bam_reporting_bv"; + push @notTimedTables, "mod_bam_reporting_kpi"; + push @notTimedTables, "mod_bam_reporting_kpi_events"; + push @notTimedTables, "mod_bam_reporting_relations_ba_bv"; + push @notTimedTables, "mod_bam_reporting_relations_ba_kpi_events"; + push @notTimedTables, "mod_bam_reporting_timeperiods"; + } + } + + return (\@notTimedTables, \%timedTables); +} + +sub prepare { + my ($etl) = @_; + + initVars($etl); + + # define data extraction period based on program options --start & --end or on data retention period + my %periods; + if ($etl->{run}->{options}->{rebuild} == 1 || $etl->{run}->{options}->{create_tables}) { + if ($etl->{run}->{options}->{start} eq '' && $etl->{run}->{options}->{end} eq '') { + # get max values for retention by type of statistics in order to be able to rebuild hourly and daily stats + my ($start, $end) = $etl->{etlProp}->getMaxRetentionPeriodFor('perfdata'); + + $periods{raw_perfdata} = { start => $start, end => $end }; + ($start, $end) = $etl->{etlProp}->getMaxRetentionPeriodFor('availability'); + $periods{raw_availabilitydata} = { start => $start, end => $end}; + } elsif ($etl->{run}->{options}->{start} ne '' && $etl->{run}->{options}->{end} ne '') { + # set period defined manually + my %dates = (start => $etl->{run}->{options}->{start}, end => $etl->{run}->{options}->{end}); + $periods{raw_perfdata} = \%dates; + $periods{raw_availabilitydata} = \%dates; + } + } else { + # set yesterday start and end dates as period (--daily) + my %dates; + ($dates{start}, $dates{end}) = $utils->getYesterdayTodayDate(); + $periods{raw_perfdata} = \%dates; + $periods{raw_availabilitydata} = \%dates; + } + + # identify the Centreon Storage DB tables to extract based on ETL properties + my ($notTimedTables, $timedTables) = selectTables( + $etl, + $etl->{run}->{etlProperties}, + $etl->{run}->{options} + ); + + dataBin( + $etl, + $etl->{run}->{etlProperties}, + $etl->{run}->{options}, + \%periods + ); + + # create non existing tables + createTables($etl, \%periods, $etl->{run}->{options}, $notTimedTables); + + # If we only need to create empty tables, create them then exit program + return if ($etl->{run}->{options}->{create_tables} == 1); + + # extract raw availability and perfdata from monitoring server and insert it into reporting server + if ($etl->{run}->{options}->{centreon_only} == 0) { + extractData($etl, $etl->{run}->{options}, $notTimedTables); + } + + # extract Centreon configuration DB from monitoring server and insert it into reporting server + if ((!defined($etl->{run}->{options}->{databin_only}) || $etl->{run}->{options}->{databin_only} == 0) + && (!defined($etl->{run}->{options}->{bam_only}) || $etl->{run}->{options}->{bam_only} == 0)) { + extractCentreonDB($etl, $etl->{run}->{etlProperties}); + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etl/perfdata/main.pm b/gorgone/gorgone/modules/centreon/mbi/etl/perfdata/main.pm new file mode 100644 index 00000000000..352ef950c9d --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etl/perfdata/main.pm @@ -0,0 +1,449 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etl::perfdata::main; + +use strict; +use warnings; + +use gorgone::modules::centreon::mbi::libs::bi::Time; +use gorgone::modules::centreon::mbi::libs::bi::LiveService; +use gorgone::modules::centreon::mbi::libs::bi::MySQLTables; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::constants qw(:all); + +my ($biTables, $utils, $liveService, $time); + +sub initVars { + my ($etl) = @_; + + $biTables = gorgone::modules::centreon::mbi::libs::bi::MySQLTables->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); + $utils = gorgone::modules::centreon::mbi::libs::Utils->new($etl->{run}->{messages}); + $liveService = gorgone::modules::centreon::mbi::libs::bi::LiveService->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); + $time = gorgone::modules::centreon::mbi::libs::bi::Time->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); +} + +sub emptyTableForRebuild { + my ($etl, %options) = @_; + + my $sql = [ [ '[CREATE] Deleting table [' . $options{name} . ']', 'DROP TABLE IF EXISTS `' . $options{name} . '`' ] ]; + + my $structure = $biTables->dumpTableStructure($options{name}); + $structure =~ s/KEY.*\(\`$options{column}\`\)\,//g; + $structure =~ s/KEY.*\(\`$options{column}\`\)//g; + $structure =~ s/\,[\n\s+]+\)/\n\)/g; + + if (defined($options{start})) { + $structure =~ s/\n.*PARTITION.*//g; + $structure =~ s/\,[\n\s]+\)/\)/; + $structure .= ' PARTITION BY RANGE(`' . $options{column} . '`) ('; + + my $partitionsPerf = $utils->getRangePartitionDate($options{start}, $options{end}); + + my $append = ''; + foreach (@$partitionsPerf) { + $structure .= $append . "PARTITION p" . $_->{name} . " VALUES LESS THAN (" . $_->{epoch} . ")"; + $append = ','; + } + $structure .= ');'; + } + + push @$sql, + [ '[CREATE] Add table [' . $options{name} . ']', $structure ], + [ "[INDEXING] Adding index [idx_$options{name}_$options{column}] on table [$options{name}]", "ALTER TABLE `$options{name}` ADD INDEX `idx_$options{name}_$options{column}` (`$options{column}`)" ]; + + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { type => 'sql', db => 'centstorage', sql => $sql }; +} + +sub deleteEntriesForRebuild { + my ($etl, %options) = @_; + + my $sql = []; + if (!$biTables->isTablePartitioned($options{name})) { + push @$sql, + [ + "[PURGE] Delete table [$options{name}] from $options{start} to $options{end}", + "DELETE FROM $options{name} WHERE time_id >= " . $utils->getDateEpoch($options{start}) . " AND time_id < " . $utils->getDateEpoch($options{end}) + ]; + } else { + my $structure = $biTables->dumpTableStructure($options{name}); + my $partitionsPerf = $utils->getRangePartitionDate($options{start}, $options{end}); + foreach (@$partitionsPerf) { + if ($structure =~ /p$_->{name}/m) { + push @$sql, + [ + "[PURGE] Truncate partition $_->{name} on table [$options{name}]", + "ALTER TABLE $options{name} TRUNCATE PARTITION p$_->{name}" + ]; + } else { + push @$sql, + [ + '[PARTITIONS] Add partition [p' . $_->{name} . '] on table [' . $options{name} . ']', + "ALTER TABLE `$options{name}` ADD PARTITION (PARTITION `p$_->{name}` VALUES LESS THAN(" . $_->{epoch} . "))" + ]; + } + } + } + + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { type => 'sql', db => 'centstorage', sql => $sql }; +} + +sub purgeTables { + my ($etl, $periods) = @_; + + my ($daily_start, $daily_end) = ($periods->{'perfdata.daily'}->{'start'}, $periods->{'perfdata.daily'}->{'end'}); + my ($hourly_start, $hourly_end) = ($periods->{'perfdata.hourly'}->{'start'}, $periods->{'perfdata.hourly'}->{'end'}); + + #To prevent from purging monthly data when the no-purge rebuild is made inside one month + my $firstDayOfMonth = $daily_start; + my $firstDayOfMonthEnd = $daily_end; + my $startAndEndSameMonth = 0; + $firstDayOfMonth =~ s/([1-2][0-9]{3})\-([0-1][0-9])\-[0-3][0-9]/$1\-$2\-01/; + $firstDayOfMonthEnd =~ s/([1-2][0-9]{3})\-([0-1][0-9])\-[0-3][0-9]/$1\-$2\-01/; + + if ($firstDayOfMonth eq $firstDayOfMonthEnd) { + $startAndEndSameMonth = 1; + } + + if ($etl->{run}->{options}->{nopurge} == 1) { + # deleting data that will be rewritten + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} ne 'hour' && (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0)) { + if ((!defined($etl->{run}->{options}->{centile_only}) || $etl->{run}->{options}->{centile_only} == 0)) { + deleteEntriesForRebuild($etl, name => 'mod_bi_metricdailyvalue', start => $daily_start, end => $daily_end); + + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} ne "day" && (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0)) { + deleteEntriesForRebuild($etl, name => 'mod_bi_metrichourlyvalue', start => $hourly_start, end => $hourly_end); + } + + #Deleting monthly data only if start and end are not in the same month + if (!$startAndEndSameMonth) { + deleteEntriesForRebuild($etl, name => 'mod_bi_metricmonthcapacity', start => $firstDayOfMonth, end => $daily_end); + } + } + + if ((!defined($etl->{run}->{options}->{no_centile}) || $etl->{run}->{options}->{no_centile} == 0)) { + if (defined($etl->{run}->{etlProperties}->{'centile.day'}) && $etl->{run}->{etlProperties}->{'centile.day'} eq '1') { + deleteEntriesForRebuild($etl, name => 'mod_bi_metriccentiledailyvalue', start => $daily_start, end => $daily_end); + } + if (defined($etl->{run}->{etlProperties}->{'centile.week'}) && $etl->{run}->{etlProperties}->{'centile.week'} eq '1') { + deleteEntriesForRebuild($etl, name => 'mod_bi_metriccentileweeklyvalue', start => $daily_start, end => $daily_end); + } + + if (defined($etl->{run}->{etlProperties}->{'centile.month'}) && $etl->{run}->{etlProperties}->{'centile.month'} eq '1' && !$startAndEndSameMonth) { + deleteEntriesForRebuild($etl, name => 'mod_bi_metriccentilemonthlyvalue', start => $firstDayOfMonth, end => $daily_end); + } + } + } + } else { + # deleting and recreating tables, recreating partitions for daily and hourly tables + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} ne "hour" && (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0)) { + if ((!defined($etl->{run}->{options}->{centile_only}) || $etl->{run}->{options}->{centile_only} == 0)) { + emptyTableForRebuild($etl, name => 'mod_bi_metricdailyvalue', column => 'time_id', start => $daily_start, end => $daily_end); + + emptyTableForRebuild($etl, name => 'mod_bi_metricmonthcapacity', column => 'time_id'); + } + + if ((!defined($etl->{run}->{options}->{no_centile}) || $etl->{run}->{options}->{no_centile} == 0)) { + #Managing Daily Centile table + if (defined($etl->{run}->{etlProperties}->{'centile.day'}) && $etl->{run}->{etlProperties}->{'centile.day'} eq '1') { + emptyTableForRebuild($etl, name => 'mod_bi_metriccentiledailyvalue', column => 'time_id', start => $daily_start, end => $daily_end); + } + #Managing Weekly Centile table + if (defined($etl->{run}->{etlProperties}->{'centile.week'}) && $etl->{run}->{etlProperties}->{'centile.week'} eq '1') { + emptyTableForRebuild($etl, name => 'mod_bi_metriccentileweeklyvalue', column => 'time_id', start => $daily_start, end => $daily_end); + } + #Managing Monthly Centile table + if (defined($etl->{run}->{etlProperties}->{'centile.month'}) && $etl->{run}->{etlProperties}->{'centile.month'} eq '1') { + emptyTableForRebuild($etl, name => 'mod_bi_metriccentilemonthlyvalue', column => 'time_id', start => $daily_start, end => $daily_end); + } + } + } + + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} ne "day" && + (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0) && + (!defined($etl->{run}->{options}->{centile_only}) || $etl->{run}->{options}->{centile_only} == 0)) { + emptyTableForRebuild($etl, name => 'mod_bi_metrichourlyvalue', column => 'time_id', start => $hourly_start, end => $hourly_end); + } + } +} + +sub processDay { + my ($etl, $liveServices, $start, $end) = @_; + + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} eq 'hour' || + (defined($etl->{run}->{options}->{month_only}) && $etl->{run}->{options}->{month_only} == 1)) { + return 1; + } + + my ($currentDayId, $currentDayUtime) = $time->getEntryID($start); + + if ((!defined($etl->{run}->{options}->{centile_only}) || $etl->{run}->{options}->{centile_only} == 0)) { + while (my ($liveServiceName, $liveServiceId) = each (%$liveServices)) { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[1]}, { + type => 'perfdata_day', + liveserviceName => $liveServiceName, + liveserviceId => $liveServiceId, + start => $start, + end => $end + }; + } + } + + if ((!defined($etl->{run}->{options}->{no_centile}) || $etl->{run}->{options}->{no_centile} == 0)) { + if (defined($etl->{run}->{etlProperties}->{'centile.include.servicecategories'}) && $etl->{run}->{etlProperties}->{'centile.include.servicecategories'} ne '') { + if (defined($etl->{run}->{etlProperties}->{'centile.day'}) && $etl->{run}->{etlProperties}->{'centile.day'} eq '1') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[2]}, { + type => 'centile_day', + start => $start, + end => $end + }; + } + if (defined($etl->{run}->{etlProperties}->{'centile.week'}) && $etl->{run}->{etlProperties}->{'centile.week'} eq '1') { + if ($utils->getDayOfWeek($end) eq $etl->{run}->{etlProperties}->{'centile.weekFirstDay'}) { + processWeek($etl, $end); + } + } + } + } +} + +sub processWeek { + my ($etl, $date) = @_; + + my $start = $utils->subtractDateDays($date, 7); + my $end = $utils->subtractDateDays($date, 1); + + $time->insertTimeEntriesForPeriod($start, $end); + + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[2]}, { + type => 'centile_week', + start => $start, + end => $end + }; +} + +sub processMonth { + my ($etl, $liveServices, $date) = @_; + + my $start = $utils->subtractDateMonths($date, 1); + my $end = $utils->subtractDateDays($date, 1); + + $time->insertTimeEntriesForPeriod($start, $end); + + my ($previousMonthStartTimeId, $previousMonthStartUtime) = $time->getEntryID($start); + my ($previousMonthEndTimeId, $previousMonthEndUtime) = $time->getEntryID($end); + + if (!defined($etl->{run}->{etlProperties}->{'capacity.include.servicecategories'}) || $etl->{run}->{etlProperties}->{'capacity.include.servicecategories'} eq "" + || !defined($etl->{run}->{etlProperties}->{'capacity.include.liveservices'}) || $etl->{run}->{etlProperties}->{'capacity.include.liveservices'} eq "") { + $etl->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $etl->{run}->{token}, data => { messages => [ ['I', "[SCHEDULER][PERFDATA] Skipping month: [" . $start . "] to [" . $end . "]" ] ] }); + return ; + } + + if ((!defined($etl->{run}->{options}->{centile_only}) || $etl->{run}->{options}->{centile_only} == 0) && + $etl->{run}->{etlProperties}->{'perfdata.granularity'} ne 'hour') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[2]}, { + type => 'perfdata_month', + start => $start, + end => $end + }; + } + + if ((!defined($etl->{run}->{options}->{no_centile}) || $etl->{run}->{options}->{no_centile} == 0) && + $etl->{run}->{etlProperties}->{'centile.month'} && $etl->{run}->{etlProperties}->{'perfdata.granularity'} ne 'hour') { + if (defined($etl->{run}->{etlProperties}->{'centile.include.servicecategories'}) && $etl->{run}->{etlProperties}->{'centile.include.servicecategories'} ne '') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[2]}, { + type => 'centile_month', + start => $start, + end => $end + }; + } + } +} + +sub processHours { + my ($etl, $start, $end) = @_; + + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} eq 'day' || + (defined($etl->{run}->{options}->{month_only}) && $etl->{run}->{options}->{month_only} == 1) || + (defined($etl->{run}->{options}->{centile_only}) && $etl->{run}->{options}->{centile_only} == 1)) { + return 1; + } + + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[2]}, { + type => 'perfdata_hour', + start => $start, + end => $end + }; +} + +sub processDayAndMonthAgregation { + my ($etl, $liveServices, $start, $end) = @_; + + processDay($etl, $liveServices, $start, $end); + my ($year, $mon, $day) = split ("-", $end); + if ($day == 1) { + processMonth($etl, $liveServices, $end); + } +} + +sub dailyProcessing { + my ($etl, $liveServices) = @_; + + # getting yesterday start and end date to process yesterday data + my ($start, $end) = $utils->getYesterdayTodayDate(); + # daily mod_bi_time table filling + $time->insertTimeEntriesForPeriod($start, $end); + + my ($epoch, $partName) = $utils->getDateEpoch($end); + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_metricdailyvalue]', + "ALTER TABLE `mod_bi_metricdailyvalue` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} ne 'day') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_metrichourlyvalue]', + "ALTER TABLE `mod_bi_metrichourlyvalue` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + } + if (defined($etl->{run}->{etlProperties}->{'centile.day'}) && $etl->{run}->{etlProperties}->{'centile.day'} eq '1') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_metriccentiledailyvalue]', + "ALTER TABLE `mod_bi_metriccentiledailyvalue` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + } + if (defined($etl->{run}->{etlProperties}->{'centile.week'}) && $etl->{run}->{etlProperties}->{'centile.week'} eq '1') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_metriccentileweeklyvalue]', + "ALTER TABLE `mod_bi_metriccentileweeklyvalue` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + } + if (defined($etl->{run}->{etlProperties}->{'centile.month'}) && $etl->{run}->{etlProperties}->{'centile.month'} eq '1') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_metriccentilemonthlyvalue]', + "ALTER TABLE `mod_bi_metriccentilemonthlyvalue` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + } + + # processing agregation by month. If the day is the first day of the month, also processing agregation by month + processDayAndMonthAgregation($etl, $liveServices, $start, $end); + + # processing agregation by hour + processHours($etl, $start, $end); +} + +sub rebuildProcessing { + my ($etl, $liveServices) = @_; + + # getting rebuild period by granularity of perfdata from data retention rules + my $periods = $etl->{etlProp}->getRetentionPeriods(); + + my ($start, $end); + if ($etl->{run}->{options}->{start} ne '' && $etl->{run}->{options}->{end} ne '') { + ($start, $end) = ($etl->{run}->{options}->{start}, $etl->{run}->{options}->{end}); + while (my ($key, $values) = each %$periods) { + $values->{start} = $etl->{run}->{options}->{start}; + $values->{end} = $etl->{run}->{options}->{end}; + } + } else { + # getting max perfdata retention period to fill mod_bi_time + ($start, $end) = $etl->{etlProp}->getMaxRetentionPeriodFor('perfdata'); + } + + # insert entries into table mod_bi_time + $time->insertTimeEntriesForPeriod($start, $end); + + purgeTables($etl, $periods); + + # rebuilding statistics by day and by month + ($start, $end) = ($periods->{'perfdata.daily'}->{start}, $periods->{'perfdata.daily'}->{end}); + + my $days = $utils->getRangePartitionDate($start, $end); + foreach (@$days) { + $end = $_->{date}; + processDayAndMonthAgregation($etl, $liveServices, $start, $end); + $start = $end; + } + + # rebuilding statistics by hour + ($start, $end) = ($periods->{'perfdata.hourly'}->{start}, $periods->{'perfdata.hourly'}->{'end'}); + + $days = $utils->getRangePartitionDate($start, $end); + foreach (@$days) { + $end = $_->{date}; + processHours($etl, $start, $end); + $start = $end; + } +} + +sub prepare { + my ($etl) = @_; + + initVars($etl); + + if (!defined($etl->{run}->{etlProperties}->{'statistics.type'}) || $etl->{run}->{etlProperties}->{'statistics.type'} eq "availability") { + $etl->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $etl->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][PERFDATA] Performance statistics calculation disabled' ] ] }); + return ; + } + + if ((!defined($etl->{run}->{options}->{no_centile}) || $etl->{run}->{options}->{no_centile} == 0) && + defined($etl->{run}->{etlProperties}->{'centile.include.servicecategories'}) and $etl->{run}->{etlProperties}->{'centile.include.servicecategories'} eq '') { + $etl->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $etl->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][PERFDATA] No service categories selected for centile calculation - centile agregation will not be calculated' ] ] }); + } + + my $liveServiceList = $liveService->getLiveServicesByNameForTpIds($etl->{run}->{etlProperties}->{'liveservices.perfdata'}); + + if ($etl->{run}->{options}->{daily} == 1) { + dailyProcessing($etl, $liveServiceList); + } elsif ($etl->{run}->{options}->{rebuild} == 1) { + rebuildProcessing($etl, $liveServiceList); + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/class.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/class.pm new file mode 100644 index 00000000000..9ba89780dbc --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/class.pm @@ -0,0 +1,326 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::http::http; +use JSON::XS; +use Try::Tiny; +use gorgone::modules::centreon::mbi::etlworkers::import::main; +use gorgone::modules::centreon::mbi::etlworkers::dimensions::main; +use gorgone::modules::centreon::mbi::etlworkers::event::main; +use gorgone::modules::centreon::mbi::etlworkers::perfdata::main; +use gorgone::modules::centreon::mbi::libs::Messages; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{pool_id} = $options{pool_id}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[nodes] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub db_connections { + my ($self, %options) = @_; + + if (!defined($self->{dbmon_centstorage_con}) || $self->{dbmon_centstorage_con}->sameParams(%{$options{dbmon}->{centstorage}}) == 0) { + $self->{dbmon_centstorage_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$options{dbmon}->{centstorage}} + ); + } + if (!defined($self->{dbbi_centstorage_con}) || $self->{dbbi_centstorage_con}->sameParams(%{$options{dbbi}->{centstorage}}) == 0) { + $self->{dbbi_centstorage_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$options{dbbi}->{centstorage}} + ); + } + + if (!defined($self->{dbmon_centreon_con}) || $self->{dbmon_centreon_con}->sameParams(%{$options{dbmon}->{centreon}}) == 0) { + $self->{dbmon_centreon_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$options{dbmon}->{centreon}} + ); + } + if (!defined($self->{dbbi_centreon_con}) || $self->{dbbi_centreon_con}->sameParams(%{$options{dbbi}->{centreon}}) == 0) { + $self->{dbbi_centreon_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$options{dbbi}->{centreon}} + ); + } +} + +sub action_centreonmbietlworkersimport { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{messages} = gorgone::modules::centreon::mbi::libs::Messages->new(); + my $code = GORGONE_ACTION_FINISH_OK; + + try { + $self->db_connections( + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi} + ); + if ($options{data}->{content}->{params}->{type} == 1) { + gorgone::modules::centreon::mbi::etlworkers::import::main::sql($self, params => $options{data}->{content}->{params}); + } elsif ($options{data}->{content}->{params}->{type} == 2) { + gorgone::modules::centreon::mbi::etlworkers::import::main::command($self, params => $options{data}->{content}->{params}); + } elsif ($options{data}->{content}->{params}->{type} == 3) { + gorgone::modules::centreon::mbi::etlworkers::import::main::load($self, params => $options{data}->{content}->{params}); + } + } catch { + $code = GORGONE_ACTION_FINISH_KO; + $self->{messages}->writeLog('ERROR', $_, 1); + }; + + $self->send_log( + code => $code, + token => $options{token}, + data => { + messages => $self->{messages}->getLogs() + } + ); +} + +sub action_centreonmbietlworkersdimensions { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{messages} = gorgone::modules::centreon::mbi::libs::Messages->new(); + my $code = GORGONE_ACTION_FINISH_OK; + + try { + $self->db_connections( + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi} + ); + + gorgone::modules::centreon::mbi::etlworkers::dimensions::main::execute( + $self, + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi}, + params => $options{data}->{content}->{params}, + etlProperties => $options{data}->{content}->{etlProperties}, + options => $options{data}->{content}->{options} + ); + } catch { + $code = GORGONE_ACTION_FINISH_KO; + $self->{messages}->writeLog('ERROR', $_, 1); + }; + + $self->send_log( + code => $code, + token => $options{token}, + data => { + messages => $self->{messages}->getLogs() + } + ); +} + +sub action_centreonmbietlworkersevent { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{messages} = gorgone::modules::centreon::mbi::libs::Messages->new(); + my $code = GORGONE_ACTION_FINISH_OK; + + try { + $self->db_connections( + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi} + ); + if ($options{data}->{content}->{params}->{type} eq 'sql') { + gorgone::modules::centreon::mbi::etlworkers::event::main::sql($self, params => $options{data}->{content}->{params}); + } elsif ($options{data}->{content}->{params}->{type} eq 'events') { + gorgone::modules::centreon::mbi::etlworkers::event::main::events( + $self, + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi}, + etlProperties => $options{data}->{content}->{etlProperties}, + params => $options{data}->{content}->{params}, + options => $options{data}->{content}->{options} + ); + } elsif ($options{data}->{content}->{params}->{type} =~ /^availability_/) { + gorgone::modules::centreon::mbi::etlworkers::event::main::availability( + $self, + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi}, + etlProperties => $options{data}->{content}->{etlProperties}, + params => $options{data}->{content}->{params} + ); + } + } catch { + $code = GORGONE_ACTION_FINISH_KO; + $self->{messages}->writeLog('ERROR', $_, 1); + }; + + $self->send_log( + code => $code, + token => $options{token}, + data => { + messages => $self->{messages}->getLogs() + } + ); +} + +sub action_centreonmbietlworkersperfdata { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{messages} = gorgone::modules::centreon::mbi::libs::Messages->new(); + my $code = GORGONE_ACTION_FINISH_OK; + + try { + $self->db_connections( + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi} + ); + + if ($options{data}->{content}->{params}->{type} eq 'sql') { + gorgone::modules::centreon::mbi::etlworkers::perfdata::main::sql($self, params => $options{data}->{content}->{params}); + } elsif ($options{data}->{content}->{params}->{type} =~ /^perfdata_/) { + gorgone::modules::centreon::mbi::etlworkers::perfdata::main::perfdata( + $self, + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi}, + etlProperties => $options{data}->{content}->{etlProperties}, + params => $options{data}->{content}->{params}, + options => $options{data}->{content}->{options}, + pool_id => $self->{pool_id} + ); + } elsif ($options{data}->{content}->{params}->{type} =~ /^centile_/) { + gorgone::modules::centreon::mbi::etlworkers::perfdata::main::centile( + $self, + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi}, + etlProperties => $options{data}->{content}->{etlProperties}, + params => $options{data}->{content}->{params}, + pool_id => $self->{pool_id} + ); + } + } catch { + $code = GORGONE_ACTION_FINISH_KO; + $self->{messages}->writeLog('ERROR', $_, 1); + }; + + $self->send_log( + code => $code, + token => $options{token}, + data => { + messages => $self->{messages}->getLogs() + } + ); +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[" . $connector->{module_id} . "] $$ has quit"); + exit(0); + } + + $connector->event(); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-' . $self->{module_id} . '-' . $self->{pool_id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CENTREONMBIETLWORKERSREADY', + data => { + pool_id => $self->{pool_id} + } + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/dimensions/main.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/dimensions/main.pm new file mode 100644 index 00000000000..f0206a4c2fd --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/dimensions/main.pm @@ -0,0 +1,263 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::dimensions::main; + +use strict; +use warnings; + +use IO::Socket::INET; + +use gorgone::modules::centreon::mbi::libs::centreon::Host; +use gorgone::modules::centreon::mbi::libs::centreon::HostGroup; +use gorgone::modules::centreon::mbi::libs::centreon::HostCategory; +use gorgone::modules::centreon::mbi::libs::centreon::ServiceCategory; +use gorgone::modules::centreon::mbi::libs::centreon::Service; +use gorgone::modules::centreon::mbi::libs::centreon::Timeperiod; +use gorgone::modules::centreon::mbi::libs::bi::BIHost; +use gorgone::modules::centreon::mbi::libs::bi::BIHostGroup; +use gorgone::modules::centreon::mbi::libs::bi::BIHostCategory; +use gorgone::modules::centreon::mbi::libs::bi::BIServiceCategory; +use gorgone::modules::centreon::mbi::libs::bi::BIService; +use gorgone::modules::centreon::mbi::libs::bi::BIMetric; +use gorgone::modules::centreon::mbi::libs::bi::Time; +use gorgone::modules::centreon::mbi::libs::bi::LiveService; +use gorgone::modules::centreon::mbi::libs::bi::DataQuality; + +my ($time, $liveService, $host, $service); +my ($hostBI, $biHost, $hostCentreon, $biService, $timePeriod, $biMetric); +my ($biHostgroup, $biServicecategory, $biHostcategory, $hostgroup, $servicecategory, $hostcategory, $biDataQuality); + +# Initialize objects for program +sub initVars { + my ($etlwk, %options) = @_; + + # instance of + $host = gorgone::modules::centreon::mbi::libs::centreon::Host->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $hostcategory = gorgone::modules::centreon::mbi::libs::centreon::HostCategory->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $servicecategory = gorgone::modules::centreon::mbi::libs::centreon::ServiceCategory->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $hostgroup = gorgone::modules::centreon::mbi::libs::centreon::HostGroup->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $service = gorgone::modules::centreon::mbi::libs::centreon::Service->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $timePeriod = gorgone::modules::centreon::mbi::libs::centreon::Timeperiod->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $biHost = gorgone::modules::centreon::mbi::libs::bi::BIHost->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biHostgroup = gorgone::modules::centreon::mbi::libs::bi::BIHostGroup->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biHostcategory = gorgone::modules::centreon::mbi::libs::bi::BIHostCategory->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biServicecategory = gorgone::modules::centreon::mbi::libs::bi::BIServiceCategory->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biService = gorgone::modules::centreon::mbi::libs::bi::BIService->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $time = gorgone::modules::centreon::mbi::libs::bi::Time->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $liveService = gorgone::modules::centreon::mbi::libs::bi::LiveService->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biMetric = gorgone::modules::centreon::mbi::libs::bi::BIMetric->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biDataQuality = gorgone::modules::centreon::mbi::libs::bi::DataQuality->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); +} + +# temporary method to list liveservices for job configuration in Centreon +sub copyLiveServicesToMonitoringDB { + my ($etlwk, %options) = @_; + + return if ($etlwk->{dbmon_centstorage_con}->sameParams(%{$options{dbbi}->{centstorage}}) == 1); + + $etlwk->{dbmon_centstorage_con}->query({ query => "TRUNCATE TABLE mod_bi_liveservice" }); + my $sth = $etlwk->{dbbi_centstorage_con}->query({ query => "SELECT id, name, timeperiod_id FROM mod_bi_liveservice" }); + while (my $row = $sth->fetchrow_hashref()) { + my $insertQuery = "INSERT INTO mod_bi_liveservice (id, name, timeperiod_id) VALUES (". + $row->{'id'} . ",'" . $row->{name} . "'," . $row->{timeperiod_id} . ")"; + $etlwk->{dbmon_centstorage_con}->query({ query => $insertQuery }); + } +} + +sub truncateDimensionTables { + my ($etlwk, %options) = @_; + + if ($options{options}->{rebuild} == 1 && $options{options}->{nopurge} == 0) { + $biHostgroup->truncateTable(); + $biHostcategory->truncateTable(); + $biServicecategory->truncateTable(); + $biHost->truncateTable(); + $biService->truncateTable(); + $biMetric->truncateTable(); + $time->truncateTable(); + $liveService->truncateTable(); + } +} + +sub denormalizeDimensionsFromCentreon { + my ($etlwk, %options) = @_; + + #set etlProperties for all dimensions object to be able to use it when filtering on hg/hc/sc + $host->setEtlProperties($options{etlProperties}); + $hostcategory->setEtlProperties($options{etlProperties}); + $servicecategory->setEtlProperties($options{etlProperties}); + $hostgroup->setEtlProperties($options{etlProperties}); + $service->setEtlProperties($options{etlProperties}); + + $etlwk->{messages}->writeLog("INFO", "Getting host properties from Centreon database"); + my $rows = $host->getHostGroupAndCategories(); + $etlwk->{messages}->writeLog("INFO", "Updating host dimension in Centstorage"); + if ($options{options}->{rebuild} == 1 && $options{options}->{nopurge} == 0) { + $biHost->insert($rows); + } else { + $biHost->update($rows, $options{etlProperties}->{'tmp.storage.memory'}); + } + + $etlwk->{messages}->writeLog("INFO", "Getting hostgroup properties from Centreon database"); + $rows = $hostgroup->getAllEntries(); + $etlwk->{messages}->writeLog("INFO", "Updating hostgroup dimension in Centstorage"); + $biHostgroup->insert($rows); + + $etlwk->{messages}->writeLog("INFO", "Getting hostcategories properties from Centreon database"); + $rows = $hostcategory->getAllEntries(); + $etlwk->{messages}->writeLog("INFO", "Updating hostcategories dimension in Centstorage"); + $biHostcategory->insert($rows); + + $etlwk->{messages}->writeLog("INFO", "Getting servicecategories properties from Centreon database"); + $rows = $servicecategory->getAllEntries(); + $etlwk->{messages}->writeLog("INFO", "Updating servicecategories dimension in Centstorage"); + $biServicecategory->insert($rows); + $etlwk->{messages}->writeLog("INFO", "Getting service properties from Centreon database"); + + my $hostRows = $biHost->getHostsInfo(); + my $serviceRows = $service->getServicesWithHostAndCategory($hostRows); + $etlwk->{messages}->writeLog("INFO", "Updating service dimension in Centstorage"); + if ($options{options}->{rebuild} == 1 && $options{options}->{nopurge} == 0) { + $biService->insert($serviceRows); + } else { + $biService->update($serviceRows, $options{etlProperties}->{'tmp.storage.memory'}); + } + + if (!defined($options{etlProperties}->{'statistics.type'}) || $options{etlProperties}->{'statistics.type'} ne 'availability') { + $etlwk->{messages}->writeLog("INFO", "Updating metric dimension in Centstorage"); + if ($options{options}->{rebuild} == 1 && $options{options}->{nopurge} == 0) { + $biMetric->insert(); + } else { + $biMetric->update($options{etlProperties}->{'tmp.storage.memory'}); + } + } + + # Getting live services to calculate reporting by time range + $etlwk->{messages}->writeLog("INFO", "Updating liveservice dimension in Centstorage"); + + my $timeperiods = $timePeriod->getPeriods($options{etlProperties}->{'liveservices.availability'}); + $liveService->insertList($timeperiods); + $timeperiods = $timePeriod->getPeriods($options{etlProperties}->{'liveservices.perfdata'}); + $liveService->insertList($timeperiods); + $timeperiods = $timePeriod->getCentilePeriods(); + $liveService->insertList($timeperiods); +} + +sub insertCentileParamToBIStorage{ + my ($etlwk, %options) = @_; + + my %result; + my $sth; + + #Insert potential missing time periods related to centile calculation in mod_bi_liveservices + $sth = $etlwk->{dbbi_centreon_con}->query({ query => "SELECT tp_id, tp_name FROM timeperiod WHERE tp_id IN (SELECT timeperiod_id FROM mod_bi_options_centiles)" }); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{tp_id}} = $row->{tp_name}; + } + + #If not time period is found in centile configuration, exit the function + if (%result eq 0){ + $etlwk->{messages}->writeLog("INFO", "No configuration found for centile calculation"); + return; + } + $etlwk->{messages}->writeLog("INFO", "Updating centile properties"); + + my $timeperiods = $timePeriod->getPeriods(\%result); + $liveService->insertList($timeperiods); + + #In case of rebuild, delete all centile parameters + if ($options{options}->{rebuild} == 1){ + $etlwk->{dbbi_centstorage_con}->query({ query => "TRUNCATE TABLE mod_bi_centiles" }); + } + $sth = $etlwk->{dbbi_centreon_con}->query({ query => "select * from mod_bi_options_centiles" }); + while (my $row = $sth->fetchrow_hashref()) { + my ($tpName,$liveServiceId) = $liveService->getLiveServicesByNameForTpId($row->{'timeperiod_id'}); + my $insertQuery = "INSERT IGNORE INTO mod_bi_centiles (id, centile_param, liveservice_id,tp_name) VALUES (".$row->{'id'}.",'".$row->{'centile_param'}."',".$liveServiceId.",'".$tpName."')"; + $etlwk->{dbbi_centstorage_con}->query({ query => $insertQuery }); + } +} + +sub copyCentileToMonitoringDB { + my ($etlwk, %options) = @_; + + return if ($etlwk->{dbmon_centstorage_con}->sameParams(%{$options{dbbi}->{centstorage}}) == 1); + + $etlwk->{dbmon_centstorage_con}->query({ query => "TRUNCATE TABLE mod_bi_centiles" }); + my $sth = $etlwk->{dbbi_centstorage_con}->query({ query => "SELECT id, centile_param, liveservice_id, tp_name FROM mod_bi_centiles" }); + while (my $row = $sth->fetchrow_hashref()) { + my $insertQuery = "INSERT INTO mod_bi_centiles (id, centile_param, liveservice_id,tp_name) VALUES (". + $row->{id} . ",'" . $row->{centile_param} . "'," . $row->{liveservice_id} . ",'" . $row->{tp_name} . "')"; + $etlwk->{dbmon_centstorage_con}->query({ query => $insertQuery }); + } +} + +sub startCbisAclSync{ + my ($etlwk, %options) = @_; + + # create a connecting socket + my $socket = new IO::Socket::INET( + PeerHost => 'localhost', + PeerPort => '1234', + Proto => 'tcp' + ); + + if (!$socket){ + $etlwk->{messages}->writeLog("WARNING", "Can't start ACL synchronization, make sure CBIS is started on port 1234"); + return 0; + } + #die "[ERROR] Cannot connect to CBIS on port 1234" unless $socket; + # XML ACL request + my $req = "\n". + "\n". + " \n". + " \n". + " \n". + "\n"; + $etlwk->{messages}->writeLog("INFO", "Send ACL synchronization signal to CBIS"); + my $size = $socket->send($req); + + # notify server that request has been sent + shutdown($socket, 1); + + # receive a response of up to 1024 characters from server + my $response = ""; + $socket->recv($response, 1024); + $socket->close(); +} + +sub execute { + my ($etlwk, %options) = @_; + + initVars($etlwk, %options); + + $biDataQuality->searchAndDeleteDuplicateEntries(); + if (!defined($options{options}->{centile}) || $options{options}->{centile} == 0) { + truncateDimensionTables($etlwk, %options); + denormalizeDimensionsFromCentreon($etlwk, %options); + copyLiveServicesToMonitoringDB($etlwk, %options); + } + + insertCentileParamToBIStorage($etlwk, %options); + copyCentileToMonitoringDB($etlwk, %options); + startCbisAclSync($etlwk, %options); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/event/main.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/event/main.pm new file mode 100644 index 00000000000..b83dd818a5b --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/event/main.pm @@ -0,0 +1,259 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::event::main; + +use strict; +use warnings; +use gorgone::modules::centreon::mbi::libs::centreon::Timeperiod; +use gorgone::modules::centreon::mbi::libs::bi::HostAvailability; +use gorgone::modules::centreon::mbi::libs::bi::ServiceAvailability; +use gorgone::modules::centreon::mbi::libs::bi::HGMonthAvailability; +use gorgone::modules::centreon::mbi::libs::bi::HGServiceMonthAvailability; +use gorgone::modules::centreon::mbi::libs::bi::Time; +use gorgone::modules::centreon::mbi::libs::bi::MySQLTables; +use gorgone::modules::centreon::mbi::libs::bi::BIHostStateEvents; +use gorgone::modules::centreon::mbi::libs::bi::BIServiceStateEvents; +use gorgone::modules::centreon::mbi::libs::bi::LiveService; +use gorgone::modules::centreon::mbi::libs::centstorage::HostStateEvents; +use gorgone::modules::centreon::mbi::libs::centstorage::ServiceStateEvents; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; + +my ($utils, $time, $tablesManager, $timePeriod); +my ($hostAv, $serviceAv); +my ($hgAv, $hgServiceAv); +my ($biHostEvents, $biServiceEvents); +my ($hostEvents, $serviceEvents); +my ($liveService); + +sub initVars { + my ($etlwk, %options) = @_; + + $utils = gorgone::modules::centreon::mbi::libs::Utils->new($etlwk->{messages}); + $timePeriod = gorgone::modules::centreon::mbi::libs::centreon::Timeperiod->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $time = gorgone::modules::centreon::mbi::libs::bi::Time->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $tablesManager = gorgone::modules::centreon::mbi::libs::bi::MySQLTables->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biHostEvents = gorgone::modules::centreon::mbi::libs::bi::BIHostStateEvents->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $timePeriod); + $biServiceEvents = gorgone::modules::centreon::mbi::libs::bi::BIServiceStateEvents->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $timePeriod); + $liveService = gorgone::modules::centreon::mbi::libs::bi::LiveService->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $hostEvents = gorgone::modules::centreon::mbi::libs::centstorage::HostStateEvents->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $biHostEvents, $timePeriod); + $serviceEvents = gorgone::modules::centreon::mbi::libs::centstorage::ServiceStateEvents->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $biServiceEvents, $timePeriod); + $hostAv = gorgone::modules::centreon::mbi::libs::bi::HostAvailability->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $serviceAv = gorgone::modules::centreon::mbi::libs::bi::ServiceAvailability->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $hgAv = gorgone::modules::centreon::mbi::libs::bi::HGMonthAvailability->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $hgServiceAv = gorgone::modules::centreon::mbi::libs::bi::HGServiceMonthAvailability->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); +} + +sub sql { + my ($etlwk, %options) = @_; + + return if (!defined($options{params}->{sql})); + + foreach (@{$options{params}->{sql}}) { + $etlwk->{messages}->writeLog('INFO', $_->[0]); + if ($options{params}->{db} eq 'centstorage') { + $etlwk->{dbbi_centstorage_con}->query({ query => $_->[1] }); + } elsif ($options{params}->{db} eq 'centreon') { + $etlwk->{dbbi_centreon_con}->query({ query => $_->[1] }); + } + } +} + +sub processEventsHosts { + my ($etlwk, %options) = @_; + + my $mode = 'daily'; + if ($options{options}->{rebuild} == 1) { + $tablesManager->emptyTableForRebuild($biHostEvents->getName(), $tablesManager->dumpTableStructure($biHostEvents->getName()), $biHostEvents->getTimeColumn()); + $mode = 'rebuild'; + } else { + $biHostEvents->deleteUnfinishedEvents(); + } + + if ($options{options}->{rebuild} == 1) { + $tablesManager->dropIndexesFromReportingTable('mod_bi_hoststateevents'); + } + + #Agreggate events by TP and store them into a temporary table (mod_bi_hoststateevents_tmp) + $etlwk->{messages}->writeLog("INFO", "[HOST] Processing host events"); + $hostEvents->agreggateEventsByTimePeriod( + $options{etlProperties}->{'liveservices.availability'}, + $options{start}, + $options{end}, + $options{liveServices}, + $mode + ); + + #Dump the result of aggregated data join to dimensions and load this to the final mod_bi_hoststateevents table + my $request = "INSERT INTO mod_bi_hoststateevents "; + $request .= " SELECT id, t1.modbiliveservice_id, t1.state, t1.start_time, t1.end_time, t1.duration, t1.sla_duration,"; + $request .= " t1.ack_time, t1.last_update from mod_bi_hoststateevents_tmp t1"; + $request .= " INNER JOIN mod_bi_tmp_today_hosts t2 on t1.host_id = t2.host_id"; + + $etlwk->{messages}->writeLog("INFO", "[HOST] Loading calculated events in reporting table"); + $etlwk->{dbbi_centstorage_con}->query({ query => $request }); + + if ($options{options}->{rebuild} == 1 && $options{options}->{rebuild} == 0) { + $etlwk->{messages}->writeLog("DEBUG", "[HOST] Creating index"); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_hoststateevents ADD INDEX `modbihost_id` (`modbihost_id`,`modbiliveservice_id`,`state`,`start_time`,`end_time`)' }); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_hoststateevents ADD INDEX `state` (`state`,`modbiliveservice_id`,`start_time`,`end_time`)' }); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_hoststateevents ADD INDEX `idx_mod_bi_hoststateevents_end_time` (`end_time`)' }); + } +} + +sub processEventsServices { + my ($etlwk, %options) = @_; + + my $mode = 'daily'; + if ($options{options}->{rebuild} == 1) { + $tablesManager->emptyTableForRebuild($biServiceEvents->getName(), $tablesManager->dumpTableStructure($biServiceEvents->getName()), $biServiceEvents->getTimeColumn()); + $mode = 'rebuild'; + } else { + $biServiceEvents->deleteUnfinishedEvents(); + } + + if ($options{options}->{rebuild} == 1) { + $tablesManager->dropIndexesFromReportingTable('mod_bi_servicestateevents'); + } + + #Agreggate events by TP and store them into a temporary table (mod_bi_hoststateevents_tmp) + $etlwk->{messages}->writeLog("INFO", "[SERVICE] Processing service events"); + $serviceEvents->agreggateEventsByTimePeriod( + $options{etlProperties}->{'liveservices.availability'}, + $options{start}, + $options{end}, + $options{liveServices}, + $mode + ); + + #Dump the result of aggregated data join to dimensions and load this to the final mod_bi_hoststateevents table + my $request = "INSERT INTO mod_bi_servicestateevents "; + $request .= " SELECT id,t1.modbiliveservice_id,t1.state,t1.start_time,t1.end_time,t1.duration,t1.sla_duration,"; + $request .= " t1.ack_time,t1.last_update FROM mod_bi_servicestateevents_tmp t1 INNER JOIN mod_bi_tmp_today_services t2 "; + $request .= " ON t1.host_id = t2.host_id AND t1.service_id = t2.service_id"; + + $etlwk->{messages}->writeLog("INFO", "[SERVICE] Loading calculated events in reporting table"); + $etlwk->{dbbi_centstorage_con}->query({ query => $request }); + + if ($options{options}->{rebuild} == 1 && $options{options}->{rebuild} == 0) { + $etlwk->{messages}->writeLog("DEBUG", "[SERVICE] Creating index"); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_servicestateevents ADD INDEX `modbiservice_id` (`modbiservice_id`,`modbiliveservice_id`,`state`,`start_time`,`end_time`)' }); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_servicestateevents ADD INDEX `state` (`state`,`modbiliveservice_id`,`start_time`,`end_time`)' }); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_servicestateevents ADD INDEX `idx_mod_bi_servicestateevents_end_time` (`end_time`)' }); + } +} + +sub events { + my ($etlwk, %options) = @_; + + initVars($etlwk, %options); + + my ($startTimeId, $startUtime) = $time->getEntryID($options{params}->{start}); + my ($endTimeId, $endUtime) = $time->getEntryID($options{params}->{end}); + + my $liveServices = $liveService->getLiveServicesByTpId(); + + if (defined($options{params}->{hosts}) && $options{params}->{hosts} == 1) { + processEventsHosts($etlwk, start => $startUtime, end => $endUtime, liveServices => $liveServices, %options); + } elsif (defined($options{params}->{services}) && $options{params}->{services} == 1) { + processEventsServices($etlwk, start => $startUtime, end => $endUtime, liveServices => $liveServices, %options); + } +} + +sub availabilityDayHosts { + my ($etlwk, %options) = @_; + + $etlwk->{messages}->writeLog("INFO", "[AVAILABILITY] Processing hosts day: $options{params}->{start} => $options{params}->{end} [$options{params}->{liveserviceName}]"); + my $ranges = $timePeriod->getTimeRangesForDay($options{startWeekDay}, $options{params}->{liveserviceName}, $options{startUtime}); + my $dayEvents = $biHostEvents->getDayEvents($options{startUtime}, $options{endUtime}, $options{params}->{liveserviceId}, $ranges); + $hostAv->insertStats($dayEvents, $options{startTimeId}, $options{params}->{liveserviceId}); +} + +sub availabilityDayServices { + my ($etlwk, %options) = @_; + + $etlwk->{messages}->writeLog("INFO", "[AVAILABILITY] Processing services day: $options{params}->{start} => $options{params}->{end} [$options{params}->{liveserviceName}]"); + my $ranges = $timePeriod->getTimeRangesForDay($options{startWeekDay}, $options{params}->{liveserviceName}, $options{startUtime}); + my $dayEvents = $biServiceEvents->getDayEvents($options{startUtime}, $options{endUtime}, $options{params}->{liveserviceId}, $ranges); + $serviceAv->insertStats($dayEvents, $options{startTimeId}, $options{params}->{liveserviceId}); +} + +sub availabilityMonthHosts { + my ($etlwk, %options) = @_; + + $etlwk->{messages}->writeLog("INFO", "[AVAILABILITY] Processing services month: $options{params}->{start} => $options{params}->{end}"); + my $data = $hostAv->getHGMonthAvailability($options{params}->{start}, $options{params}->{end}, $biHostEvents); + $hgAv->insertStats($options{startTimeId}, $data); +} + +sub availabilityMonthServices { + my ($etlwk, %options) = @_; + + $etlwk->{messages}->writeLog("INFO", "[AVAILABILITY] Processing hosts month: $options{params}->{start} => $options{params}->{end}"); + my $data = $serviceAv->getHGMonthAvailability_optimised($options{params}->{start}, $options{params}->{end}, $biServiceEvents); + $hgServiceAv->insertStats($options{startTimeId}, $data); +} + +sub availability { + my ($etlwk, %options) = @_; + + initVars($etlwk, %options); + + my ($startTimeId, $startUtime) = $time->getEntryID($options{params}->{start}); + my ($endTimeId, $endUtime) = $time->getEntryID($options{params}->{end}); + my $startWeekDay = $utils->getDayOfWeek($options{params}->{start}); + + if ($options{params}->{type} eq 'availability_day_hosts') { + availabilityDayHosts( + $etlwk, + startTimeId => $startTimeId, + startUtime => $startUtime, + endTimeId => $endTimeId, + endUtime => $endUtime, + startWeekDay => $startWeekDay, + %options + ); + } elsif ($options{params}->{type} eq 'availability_day_services') { + availabilityDayServices( + $etlwk, + startTimeId => $startTimeId, + startUtime => $startUtime, + endTimeId => $endTimeId, + endUtime => $endUtime, + startWeekDay => $startWeekDay, + %options + ); + } elsif ($options{params}->{type} eq 'availability_month_services') { + availabilityMonthServices( + $etlwk, + startTimeId => $startTimeId, + %options + ); + } elsif ($options{params}->{type} eq 'availability_month_hosts') { + availabilityMonthHosts( + $etlwk, + startTimeId => $startTimeId, + %options + ); + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/hooks.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/hooks.pm new file mode 100644 index 00000000000..b286e7192a9 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/hooks.pm @@ -0,0 +1,236 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::hooks; + +use warnings; +use strict; +use JSON::XS; +use gorgone::class::core; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::modules::centreon::mbi::etlworkers::class; + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'mbi-etlworkers'; +use constant EVENTS => [ + { event => 'CENTREONMBIETLWORKERSIMPORT' }, + { event => 'CENTREONMBIETLWORKERSDIMENSIONS' }, + { event => 'CENTREONMBIETLWORKERSEVENT' }, + { event => 'CENTREONMBIETLWORKERSPERFDATA' }, + { event => 'CENTREONMBIETLWORKERSREADY' } +]; + +my $config_core; +my $config; + +my $pools = {}; +my $pools_pid = {}; +my $rr_current = 0; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + + $config->{pool} = defined($config->{pool}) && $config->{pool} =~ /(\d+)/ ? $1 : 8; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + for my $pool_id (1..$config->{pool}) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } +} + +sub routing { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + $options{logger}->writeLogError("[" . NAME . "] Cannot decode json data: " . $options{frame}->getLastError()); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => NAME . ' - cannot decode json' }, + json_encode => 1 + }); + return undef; + } + + if ($options{action} eq 'CENTREONMBIETLWORKERSREADY') { + if (defined($data->{pool_id})) { + $pools->{ $data->{pool_id} }->{ready} = 1; + } + return undef; + } + + my $pool_id = rr_pool(); + if (!defined($pool_id)) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => NAME . ' - no pool ready' }, + json_encode => 1 + }); + return undef; + } + + my $identity = 'gorgone-' . NAME . '-' . $pool_id; + + $options{gorgone}->send_internal_message( + identity => $identity, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + foreach my $pool_id (keys %$pools) { + if (defined($pools->{$pool_id}->{running}) && $pools->{$pool_id}->{running} == 1) { + $options{logger}->writeLogDebug("[" . NAME . "] Send TERM signal for pool '" . $pool_id . "'"); + CORE::kill('TERM', $pools->{$pool_id}->{pid}); + } + } +} + +sub kill { + my (%options) = @_; + + foreach (keys %$pools) { + if ($pools->{$_}->{running} == 1) { + $options{logger}->writeLogDebug("[" . NAME . "] Send KILL signal for pool '" . $_ . "'"); + CORE::kill('KILL', $pools->{$_}->{pid}); + } + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check_create_child { + my (%options) = @_; + + return if ($stop == 1); + + # Check if we need to create a child + for my $pool_id (1..$config->{pool}) { + if (!defined($pools->{$pool_id})) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } + } +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($pools_pid->{$pid})); + + # If someone dead, we recreate + my $pool_id = $pools_pid->{$pid}; + delete $pools->{$pools_pid->{$pid}}; + delete $pools_pid->{$pid}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } + } + + check_create_child(dbh => $options{dbh}, logger => $options{logger}); + + foreach (keys %$pools) { + $count++ if ($pools->{$_}->{running} == 1); + } + + return ($count, 1); +} + +sub broadcast { + my (%options) = @_; + + foreach my $pool_id (keys %$pools) { + next if ($pools->{$pool_id}->{ready} != 1); + + $options{gorgone}->send_internal_message( + identity => 'gorgone-' . NAME . '-' . $pool_id, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); + } +} + +# Specific functions +sub rr_pool { + my (%options) = @_; + + my ($loop, $i) = ($config->{pool}, 0); + while ($i <= $loop) { + $rr_current = $rr_current % $config->{pool}; + if ($pools->{$rr_current + 1}->{ready} == 1) { + $rr_current++; + return $rr_current; + } + $rr_current++; + $i++; + } + + return undef; +} + +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[" . NAME . "] Create module '" . NAME . "' child process for pool id '" . $options{pool_id} . "'"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-' . NAME; + my $module = gorgone::modules::centreon::mbi::etlworkers::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + pool_id => $options{pool_id}, + container_id => $options{pool_id} + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[" . NAME . "] PID $child_pid (gorgone-" . NAME . ") for pool id '" . $options{pool_id} . "'"); + $pools->{$options{pool_id}} = { pid => $child_pid, ready => 0, running => 1 }; + $pools_pid->{$child_pid} = $options{pool_id}; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/import/main.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/import/main.pm new file mode 100644 index 00000000000..2430aea7229 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/import/main.pm @@ -0,0 +1,86 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::import::main; + +use strict; +use warnings; +use gorgone::standard::misc; +use File::Basename; + +sub sql { + my ($etlwk, %options) = @_; + + return if (!defined($options{params}->{sql})); + + foreach (@{$options{params}->{sql}}) { + $etlwk->{messages}->writeLog('INFO', $_->[0]); + if ($options{params}->{db} eq 'centstorage') { + $etlwk->{dbbi_centstorage_con}->query({ query => $_->[1] }); + } elsif ($options{params}->{db} eq 'centreon') { + $etlwk->{dbbi_centreon_con}->query({ query => $_->[1] }); + } + } +} + +sub command { + my ($etlwk, %options) = @_; + + return if (!defined($options{params}->{command}) || $options{params}->{command} eq ''); + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => $options{params}->{command}, + timeout => 7200, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + + if ($error != 0) { + die $options{params}->{message} . ": execution failed: $stdout"; + } + + $etlwk->{messages}->writeLog('INFO', $options{params}->{message}); + $etlwk->{logger}->writeLogDebug("[mbi-etlworkers] succeeded command (code: $return_code): $stdout"); +} + +sub load { + my ($etlwk, %options) = @_; + + return if (!defined($options{params}->{file})); + + my ($file, $dir) = File::Basename::fileparse($options{params}->{file}); + + if (! -d "$dir" && ! -w "$dir") { + $etlwk->{messages}->writeLog('ERROR', "Cannot write into directory " . $dir); + } + + command($etlwk, params => { command => $options{params}->{dump}, message => $options{params}->{message} }); + + if ($options{params}->{db} eq 'centstorage') { + $etlwk->{dbbi_centstorage_con}->query({ query => $options{params}->{load} }); + } elsif ($options{params}->{db} eq 'centreon') { + $etlwk->{dbbi_centreon_con}->query({ query => $options{params}->{load} }); + } + + unlink($options{params}->{file}); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/perfdata/main.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/perfdata/main.pm new file mode 100644 index 00000000000..ead6bbd9c61 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/perfdata/main.pm @@ -0,0 +1,190 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::perfdata::main; + +use strict; +use warnings; + +use gorgone::modules::centreon::mbi::libs::centreon::Timeperiod; +use gorgone::modules::centreon::mbi::libs::centreon::CentileProperties; +use gorgone::modules::centreon::mbi::libs::bi::LiveService; +use gorgone::modules::centreon::mbi::libs::bi::Time; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::modules::centreon::mbi::libs::centstorage::Metrics; +use gorgone::modules::centreon::mbi::libs::bi::MetricDailyValue; +use gorgone::modules::centreon::mbi::libs::bi::MetricHourlyValue; +use gorgone::modules::centreon::mbi::libs::bi::MetricCentileValue; +use gorgone::modules::centreon::mbi::libs::bi::MetricMonthCapacity; +use gorgone::standard::misc; + +my ($utils, $time, $timePeriod, $centileProperties, $liveService); +my ($metrics); +my ($dayAgregates, $hourAgregates, $centileAgregates, $metricMonthCapacity); + +sub initVars { + my ($etlwk, %options) = @_; + + $timePeriod = gorgone::modules::centreon::mbi::libs::centreon::Timeperiod->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $centileProperties = gorgone::modules::centreon::mbi::libs::centreon::CentileProperties->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $liveService = gorgone::modules::centreon::mbi::libs::bi::LiveService->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $time = gorgone::modules::centreon::mbi::libs::bi::Time->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $utils = gorgone::modules::centreon::mbi::libs::Utils->new($etlwk->{messages}); + $metrics = gorgone::modules::centreon::mbi::libs::centstorage::Metrics->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $options{pool_id}); + $dayAgregates = gorgone::modules::centreon::mbi::libs::bi::MetricDailyValue->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $options{pool_id}); + $hourAgregates = gorgone::modules::centreon::mbi::libs::bi::MetricHourlyValue->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $options{pool_id}); + $metricMonthCapacity = gorgone::modules::centreon::mbi::libs::bi::MetricMonthCapacity->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + + $centileAgregates = gorgone::modules::centreon::mbi::libs::bi::MetricCentileValue->new( + logger => $etlwk->{messages}, + centstorage => $etlwk->{dbbi_centstorage_con}, + centreon => $etlwk->{dbbi_centreon_con}, + time => $time, + centileProperties => $centileProperties, + timePeriod => $timePeriod, + liveService => $liveService + ); +} + +sub sql { + my ($etlwk, %options) = @_; + + return if (!defined($options{params}->{sql})); + + foreach (@{$options{params}->{sql}}) { + $etlwk->{messages}->writeLog('INFO', $_->[0]); + if ($options{params}->{db} eq 'centstorage') { + $etlwk->{dbbi_centstorage_con}->query({ query => $_->[1] }); + } elsif ($options{params}->{db} eq 'centreon') { + $etlwk->{dbbi_centreon_con}->query({ query => $_->[1] }); + } + } +} + +sub perfdataDay { + my ($etlwk, %options) = @_; + + my ($currentDayId, $currentDayUtime) = $time->getEntryID($options{params}->{start}); + my $ranges = $timePeriod->getTimeRangesForDayByDateTime( + $options{params}->{liveserviceName}, + $options{params}->{start}, + $utils->getDayOfWeek($options{params}->{start}) + ); + if (scalar(@$ranges)) { + $etlwk->{messages}->writeLog("INFO", "[PERFDATA] Processing day: $options{params}->{start} => $options{params}->{end} [$options{params}->{liveserviceName}]"); + $metrics->getMetricsValueByDay($ranges, $options{etlProperties}->{'tmp.storage.memory'}); + $dayAgregates->insertValues($options{params}->{liveserviceId}, $currentDayId); + } +} + +sub perfdataMonth { + my ($etlwk, %options) = @_; + + my ($previousMonthStartTimeId, $previousMonthStartUtime) = $time->getEntryID($options{params}->{start}); + my ($previousMonthEndTimeId, $previousMonthEndUtime) = $time->getEntryID($options{params}->{end}); + + $etlwk->{messages}->writeLog("INFO", "[PERFDATA] Processing month: $options{params}->{start} => $options{params}->{end}"); + my $data = $dayAgregates->getMetricCapacityValuesOnPeriod($previousMonthStartTimeId, $previousMonthEndTimeId, $options{etlProperties}); + $metricMonthCapacity->insertStats($previousMonthStartTimeId, $data); +} + +sub perfdataHour { + my ($etlwk, %options) = @_; + + $etlwk->{messages}->writeLog("INFO", "[PERFDATA] Processing hours: $options{params}->{start} => $options{params}->{end}"); + + $metrics->getMetricValueByHour($options{params}->{start}, $options{params}->{end}, $options{etlProperties}->{'tmp.storage.memory'}); + $hourAgregates->insertValues(); +} + +sub perfdata { + my ($etlwk, %options) = @_; + + initVars($etlwk, %options); + + if ($options{params}->{type} eq 'perfdata_day') { + perfdataDay($etlwk, %options); + } elsif ($options{params}->{type} eq 'perfdata_month') { + perfdataMonth($etlwk, %options); + } elsif ($options{params}->{type} eq 'perfdata_hour') { + perfdataHour($etlwk, %options); + } +} + +sub centileDay { + my ($etlwk, %options) = @_; + + my ($currentDayId) = $time->getEntryID($options{params}->{start}); + + my $metricsId = $centileAgregates->getMetricsCentile(etlProperties => $options{etlProperties}); + $centileAgregates->calcMetricsCentileValueMultipleDays( + metricsId => $metricsId, + start => $options{params}->{start}, + end => $options{params}->{end}, + granularity => 'day', + timeId => $currentDayId + ); +} + +sub centileMonth { + my ($etlwk, %options) = @_; + + my ($previousMonthStartTimeId) = $time->getEntryID($options{params}->{start}); + + my $metricsId = $centileAgregates->getMetricsCentile(etlProperties => $options{etlProperties}); + $centileAgregates->calcMetricsCentileValueMultipleDays( + metricsId => $metricsId, + start => $options{params}->{start}, + end => $options{params}->{end}, + granularity => 'month', + timeId => $previousMonthStartTimeId + ); +} + +sub centileWeek { + my ($etlwk, %options) = @_; + + my ($currentDayId) = $time->getEntryID($options{params}->{start}); + + my $metricsId = $centileAgregates->getMetricsCentile(etlProperties => $options{etlProperties}); + $centileAgregates->calcMetricsCentileValueMultipleDays( + metricsId => $metricsId, + start => $options{params}->{start}, + end => $options{params}->{end}, + granularity => 'week', + timeId => $currentDayId + ); +} + +sub centile { + my ($etlwk, %options) = @_; + + initVars($etlwk, %options); + + if ($options{params}->{type} eq 'centile_day') { + centileDay($etlwk, %options); + } elsif ($options{params}->{type} eq 'centile_month') { + centileMonth($etlwk, %options); + } elsif ($options{params}->{type} eq 'centile_week') { + centileWeek($etlwk, %options); + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/Messages.pm b/gorgone/gorgone/modules/centreon/mbi/libs/Messages.pm new file mode 100644 index 00000000000..52fa032ae4c --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/Messages.pm @@ -0,0 +1,55 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::Messages; + +sub new { + my $class = shift; + my $self = {}; + + $self->{messages} = []; + + bless $self, $class; + return $self; +} + +sub writeLog { + my ($self, $severity, $message, $nodie) = @_; + + $severity = lc($severity); + + my %severities = ('debug' => 'D', 'info' => 'I', 'warning' => 'I', 'error' => 'E', 'fatal' => 'F'); + if ($severities{$severity} eq 'E' || $severities{$severity} eq 'F') { + die $message if (!defined($nodie) || $nodie == 0); + } + + push @{$self->{messages}}, [$severities{$severity}, $message]; +} + +sub getLogs { + my ($self) = @_; + + return $self->{messages}; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/Utils.pm b/gorgone/gorgone/modules/centreon/mbi/libs/Utils.pm new file mode 100644 index 00000000000..7a313819a60 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/Utils.pm @@ -0,0 +1,252 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; +use POSIX; +use Time::Local; +use Tie::File; +use DateTime; + +package gorgone::modules::centreon::mbi::libs::Utils; + +sub new { + my $class = shift; + my $self = {}; + bless $self, $class; + + $self->{logger} = shift; + $self->{tz} = DateTime::TimeZone->new(name => 'local')->name(); + return $self; +} + +sub checkBasicOptions { + my ($self, $options) = @_; + + # check execution mode daily to extract yesterday data or rebuild to get more historical data + if (($options->{daily} == 0 && $options->{rebuild} == 0 && (!defined($options->{create_tables}) || $options->{create_tables} == 0) && (!defined($options->{centile}) || $options->{centile} == 0)) + || ($options->{daily} == 1 && $options->{rebuild} == 1)) { + $self->{logger}->writeLogError("Specify one execution method. Check program help for more informations"); + return 1; + } + + # check if options are set correctly for rebuild mode + if (($options->{rebuild} == 1 || (defined($options->{create_tables}) && $options->{create_tables} == 1)) + && ($options->{start} ne '' && $options->{end} eq '') + || ($options->{start} eq '' && $options->{end} ne '')) { + $self->{logger}->writeLogError("Specify both options --start and --end or neither of them to use default data retention options"); + return 1; + } + # check start and end dates format + if ($options->{rebuild} == 1 && $options->{start} ne '' && $options->{end} ne '' + && !$self->checkDateFormat($options->{start}, $options->{end})) { + $self->{logger}->writeLogError("Verify period start or end date format"); + return 1; + } + + return 0; +} + +sub buildCliMysqlArgs { + my ($self, $con) = @_; + my $password = $con->{password}; + # as we will use a bash command we need to use single quote to protect against every characters, and escape single quote) + $password =~ s/'/'"'"'/; + my $args = "-u'" . $con->{user} . "' " . + "-p'" . $password . "' " . + "-h '" . $con->{host} . "' " . + "-P " . $con->{port}; + return $args; +} +sub getYesterdayTodayDate { + my ($self) = @_; + + my $dt = DateTime->from_epoch( + epoch => time(), + time_zone => $self->{tz} + ); + + my $month = $dt->month(); + $month = '0' . $month if ($month < 10); + my $day = $dt->day(); + $day = '0' . $day if ($day < 10); + my $today = $dt->year() . '-' . $month . '-' . $day; + + $dt->subtract(days => 1); + $month = $dt->month(); + $month = '0' . $month if ($month < 10); + $day = $dt->day(); + $day = '0' . $day if ($day < 10); + my $yesterday = $dt->year() . '-' . $month . '-' . $day; + + return ($yesterday, $today); +} + +sub subtractDateMonths { + my ($self, $date, $num) = @_; + + if ($date !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify date format"); + } + + my $dt = DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz})->subtract(months => $num); + + my $month = $dt->month(); + $month = '0' . $month if ($month < 10); + my $day = $dt->day(); + $day = '0' . $day if ($day < 10); + return $dt->year() . '-' . $month . '-' . $day; +} + +sub subtractDateDays { + my ($self, $date, $num) = @_; + + if ($date !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify date format"); + } + + my $dt = DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz})->subtract(days => $num); + + my $month = $dt->month(); + $month = '0' . $month if ($month < 10); + my $day = $dt->day(); + $day = '0' . $day if ($day < 10); + return $dt->year() . '-' . $month . '-' . $day; +} + +sub getDayOfWeek { + my ($self, $date) = @_; + + if ($date !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify date format"); + } + + return lc(DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz})->day_name()); +} + +sub getDateEpoch { + my ($self, $date) = @_; + + if ($date !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify date format"); + } + + my $epoch = DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz})->epoch(); + $date =~ s/-//g; + + return wantarray ? ($epoch, $date) : $epoch; +} + +sub getRangePartitionDate { + my ($self, $start, $end) = @_; + + if ($start !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify period start format"); + } + my $dt1 = DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz}); + + if ($end !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify period end format"); + } + my $dt2 = DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz}); + + my $epoch = $dt1->epoch(); + my $epoch_end = $dt2->epoch(); + if ($epoch_end <= $epoch) { + $self->{logger}->writeLog('ERROR', "Period end date is older"); + } + + my $partitions = []; + while ($epoch < $epoch_end) { + $dt1->add(days => 1); + + $epoch = $dt1->epoch(); + my $month = $dt1->month(); + $month = '0' . $month if ($month < 10); + my $day = $dt1->day(); + $day = '0' . $day if ($day < 10); + + push @$partitions, { + name => $dt1->year() . $month . $day, + date => $dt1->year() . '-' . $month . '-' . $day, + epoch => $epoch + }; + } + + return $partitions; +} + +sub checkDateFormat { + my ($self, $start, $end) = @_; + + if (defined($start) && $start =~ /[1-2][0-9]{3}\-[0-1][0-9]\-[0-3][0-9]/ + && defined($end) && $end =~ /[1-2][0-9]{3}\-[0-1][0-9]\-[0-3][0-9]/) { + return 1; + } + return 0; +} + +sub getRebuildPeriods { + my ($self, $start, $end) = @_; + + my ($day,$month,$year) = (localtime($start))[3,4,5]; + $start = POSIX::mktime(0,0,0,$day,$month,$year,0,0,-1); + my $previousDay = POSIX::mktime(0,0,0,$day - 1,$month,$year,0,0,-1); + my @days = (); + while ($start < $end) { + # if there is few hour gap (time change : winter/summer), we also readjust it + if ($start == $previousDay) { + $start = POSIX::mktime(0,0,0, ++$day, $month, $year,0,0,-1); + } + my $dayEnd = POSIX::mktime(0, 0, 0, ++$day, $month, $year, 0, 0, -1); + + my %period = ("start" => $start, "end" => $dayEnd); + $days[scalar(@days)] = \%period; + $previousDay = $start; + $start = $dayEnd; + } + return (\@days); +} + +#parseFlatFile (file, key,value) : replace a line with a key by a value (entire line) to the specified file +sub parseAndReplaceFlatFile{ + my $self = shift; + my $file = shift; + my $key = shift; + my $value = shift; + + if (!-e $file) { + $self->{logger}->writeLog('ERROR', "File missing [".$file."]. Make sure you installed all the pre-requisites before executing this script"); + } + + tie my @flatfile, 'Tie::File', $file or die $!; + + foreach my $line(@flatfile) + { + if( $line =~ m/$key/ ) { + my $previousLine = $line; + $line =~ s/$key/$value/g; + $self->{logger}->writeLog('DEBUG', "[".$file."]"); + $self->{logger}->writeLog('DEBUG', "Replacing [".$previousLine."] by [".$value."]"); + } + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHost.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHost.pm new file mode 100644 index 00000000000..5ba2c40e667 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHost.pm @@ -0,0 +1,233 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIHost; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"today_table"} = "mod_bi_tmp_today_hosts"; + $self->{"tmp_comp"} = "mod_bi_tmp_hosts"; + $self->{"tmp_comp_storage"} = "mod_bi_tmp_hosts_storage"; + $self->{"table"} = "mod_bi_hosts"; + bless $self, $class; + return $self; +} + +sub getHostsInfo { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `id`, `host_id`, `host_name`, `hc_id`, `hc_name`, `hg_id`, `hg_name`"; + $query .= " FROM `".$self->{"today_table"}."`"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + if (defined($result{$row->{'host_id'}})) { + my $tab_ref = $result{$row->{'host_id'}}; + my @tab = @$tab_ref; + push @tab , $row->{"host_id"}.";".$row->{"host_name"}.";". + $row->{"hg_id"}.";".$row->{"hg_name"}.";". + $row->{"hc_id"}.";".$row->{"hc_name"}; + $result{$row->{'host_id'}} = \@tab; + }else { + my @tab = ($row->{"host_id"}.";".$row->{"host_name"}.";". + $row->{"hg_id"}.";".$row->{"hg_name"}.";". + $row->{"hc_id"}.";".$row->{"hc_name"}); + $result{$row->{'host_id'}} = \@tab; + } + } + $sth->finish(); + return (\%result); +} + +sub insert { + my $self = shift; + my $data = shift; + my $db = $self->{"centstorage"}; + $self->insertIntoTable("".$self->{"table"}."", $data); + $self->createTempTodayTable("false"); + my $fields = "id, host_name, host_id, hc_id, hc_name, hg_id, hg_name"; + my $query = "INSERT INTO ".$self->{"today_table"}." (".$fields.")"; + $query .= " SELECT ".$fields." FROM ".$self->{"table"}." "; + $db->query({ query => $query }); +} + +sub update { + my ($self, $data, $useMemory) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + $self->createTempComparisonTable($useMemory); + $self->insertIntoTable($self->{"tmp_comp"}, $data); + $self->createTempStorageTable($useMemory); + $self->joinNewAndCurrentEntries(); + $self->insertNewEntries(); + $db->query({ query => "DROP TABLE `".$self->{"tmp_comp_storage"}."`" }); + $self->createTempTodayTable("false"); + $self->insertTodayEntries(); + $db->query({ query => "DROP TABLE `".$self->{"tmp_comp"}."`" }); +} + +sub insertIntoTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $table = shift; + my $data = shift; + my $query = "INSERT INTO `".$table."`". + " (`host_id`, `host_name`, `hg_id`, `hg_name`, `hc_id`, `hc_name`)". + " VALUES (?,?,?,?,?,?)"; + my $sth = $db->prepare($query); + my $inst = $db->getInstance; + $inst->begin_work; + my $counter = 0; + + foreach (@$data) { + my ($host_id, $host_name, $hg_id, $hg_name, $hc_id, $hc_name) = split(";", $_); + $sth->bind_param(1, $host_id); + $sth->bind_param(2, $host_name); + $sth->bind_param(3, $hg_id); + $sth->bind_param(4, $hg_name); + $sth->bind_param(5, $hc_id); + $sth->bind_param(6, $hc_name); + $sth->execute; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", $self->{"table"}." insertion execute error : ".$inst->errstr); + } + if ($counter >= 1000) { + $counter = 0; + $inst->commit; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", $self->{"table"}." insertion commit error : ".$inst->errstr); + } + $inst->begin_work; + } + $counter++; + } + $inst->commit; +} + +sub createTempComparisonTable { + my ($self, $useMemory) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `" . $self->{"tmp_comp"} . "`" }); + my $query = "CREATE TABLE `".$self->{"tmp_comp"}."` ("; + $query .= "`host_id` int(11) NOT NULL,`host_name` varchar(255) NOT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL, `hc_name` varchar(255) NOT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL, `hg_name` varchar(255) NOT NULL"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub createTempStorageTable { + my ($self,$useMemory) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `" . $self->{"tmp_comp_storage"} . "`" }); + my $query = "CREATE TABLE `".$self->{"tmp_comp_storage"}."` ("; + $query .= "`id` INT NOT NULL,"; + $query .= "`host_id` int(11) NOT NULL,`host_name` varchar(255) NOT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL, `hc_name` varchar(255) NOT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL, `hg_name` varchar(255) NOT NULL,"; + $query .= " KEY `id` (`id`)"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub createTempTodayTable { + my ($self,$useMemory) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"today_table"}."`" }); + my $query = "CREATE TABLE `".$self->{"today_table"}."` ("; + $query .= "`id` INT NOT NULL,"; + $query .= "`host_id` int(11) NOT NULL,`host_name` varchar(255) NOT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL, `hc_name` varchar(255) NOT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL, `hg_name` varchar(255) NOT NULL,"; + $query .= " KEY `id` (`host_id`)"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub joinNewAndCurrentEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + + my $query = "INSERT INTO ".$self->{"tmp_comp_storage"}. " (id, host_name, host_id, hc_id, hc_name, hg_id, hg_name)"; + $query .= " SELECT IFNULL(h.id, 0), t.host_name, t.host_id, t.hc_id, t.hc_name, t.hg_id, t.hg_name FROM ".$self->{"tmp_comp"}." t"; + $query .= " LEFT JOIN ".$self->{"table"}." h USING (host_name, host_id, hc_id, hc_name, hg_id, hg_name)"; + $db->query({ query => $query }); +} + +sub insertNewEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $fields = "host_name, host_id, hc_id, hc_name, hg_id, hg_name"; + my $query = " INSERT INTO `".$self->{"table"}."` (".$fields.") "; + $query .= " SELECT ".$fields." FROM ".$self->{"tmp_comp_storage"}; + $query .= " WHERE id = 0"; + $db->query({ query => $query }); +} + +sub insertTodayEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $fields = "host_name, host_id, hc_id, hc_name, hg_id, hg_name"; + my $query = "INSERT INTO ".$self->{"today_table"}." (id, host_name, host_id, hc_id, hc_name, hg_id, hg_name)"; + $query .= " SELECT h.id, t.host_name, t.host_id, t.hc_id, t.hc_name, t.hg_id, t.hg_name FROM ".$self->{"tmp_comp"}." t"; + $query .= " JOIN ".$self->{"table"}." h USING (host_name, host_id, hc_id, hc_name, hg_id, hg_name)"; + $db->query({ query => $query }); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + $db->query({ query => "TRUNCATE TABLE `".$self->{"table"}."`" }); + $db->query({ query => "ALTER TABLE `".$self->{"table"}."` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostCategory.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostCategory.pm new file mode 100644 index 00000000000..ad0a4442b6a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostCategory.pm @@ -0,0 +1,129 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIHostCategory; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + bless $self, $class; + return $self; +} + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `hc_id`, `hc_name`"; + $query .= " FROM `mod_bi_hostcategories`"; + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"hc_id"}.";".$row->{"hc_name"}; + } + $sth->finish(); + return (\@entries); +} + +sub getEntryIds { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `id`, `hc_id`, `hc_name`"; + $query .= " FROM `mod_bi_hostcategories`"; + my $sth = $db->query({ query => $query }); + my %entries = (); + while (my $row = $sth->fetchrow_hashref()) { + $entries{$row->{"hc_id"}.";".$row->{"hc_name"}} = $row->{"id"}; + } + $sth->finish(); + return (\%entries); +} + +sub entryExists { + my $self = shift; + my ($value, $entries) = (shift, shift); + foreach(@$entries) { + if ($value eq $_) { + return 1; + } + } + return 0; +} +sub insert { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $data = shift; + my $query = "INSERT INTO `mod_bi_hostcategories`". + " (`hc_id`, `hc_name`)". + " VALUES (?,?)"; + my $sth = $db->prepare($query); + my $inst = $db->getInstance; + $inst->begin_work; + my $counter = 0; + + my $existingEntries = $self->getAllEntries; + foreach (@$data) { + if (!$self->entryExists($_, $existingEntries)) { + my ($hc_id, $hc_name) = split(";", $_); + $sth->bind_param(1, $hc_id); + $sth->bind_param(2, $hc_name); + $sth->execute; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "hostcategories insertion execute error : ".$inst->errstr); + } + if ($counter >= 1000) { + $counter = 0; + $inst->commit; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "hostcategories insertion commit error : ".$inst->errstr); + } + $inst->begin_work; + } + $counter++; + } + } + $inst->commit; +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `mod_bi_hostcategories`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `mod_bi_hostcategories` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostGroup.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostGroup.pm new file mode 100644 index 00000000000..f5cdcf21b58 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostGroup.pm @@ -0,0 +1,131 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIHostGroup; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + bless $self, $class; + return $self; +} + + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `id`, `hg_id`, `hg_name`"; + $query .= " FROM `mod_bi_hostgroups`"; + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"hg_id"}.";".$row->{"hg_name"}; + } + $sth->finish(); + return (\@entries); +} + +sub getEntryIds { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `id`, `hg_id`, `hg_name`"; + $query .= " FROM `mod_bi_hostgroups`"; + my $sth = $db->query({ query => $query }); + my %entries = (); + while (my $row = $sth->fetchrow_hashref()) { + $entries{$row->{"hg_id"}.";".$row->{"hg_name"}} = $row->{"id"}; + } + $sth->finish(); + return (\%entries); +} + +sub entryExists { + my $self = shift; + my ($value, $entries) = (shift, shift); + foreach(@$entries) { + if ($value eq $_) { + return 1; + } + } + return 0; +} +sub insert { + my $self = shift; + + my $db = $self->{centstorage}; + my $logger = $self->{logger}; + my $data = shift; + my $query = "INSERT INTO `mod_bi_hostgroups`". + " (`hg_id`, `hg_name`)". + " VALUES (?,?)"; + my $sth = $db->prepare($query); + my $inst = $db->getInstance(); + $inst->begin_work(); + my $counter = 0; + + my $existingEntries = $self->getAllEntries(); + foreach (@$data) { + if (!$self->entryExists($_, $existingEntries)) { + my ($hg_id, $hg_name) = split(";", $_); + $sth->bind_param(1, $hg_id); + $sth->bind_param(2, $hg_name); + $sth->execute; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "hostgroups insertion execute error : ".$inst->errstr); + } + if ($counter >= 1000) { + $counter = 0; + $inst->commit; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "hostgroups insertion commit error : ".$inst->errstr); + } + $inst->begin_work; + } + $counter++; + } + } + $inst->commit(); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `mod_bi_hostgroups`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `mod_bi_hostgroups` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostStateEvents.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostStateEvents.pm new file mode 100644 index 00000000000..e89e20bf069 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostStateEvents.pm @@ -0,0 +1,243 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIHostStateEvents; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + $self->{'timeperiod'} = shift; + $self->{'bind_counter'} = 0; + $self->{'statement'} = undef; + $self->{'name'} = "mod_bi_hoststateevents"; + $self->{'tmp_name'} = "mod_bi_hoststateevents_tmp"; + $self->{'timeColumn'} = "end_time"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub createTempBIEventsTable{ + my ($self) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `mod_bi_hoststateevents_tmp`" }); + my $createTable = " CREATE TABLE `mod_bi_hoststateevents_tmp` ("; + $createTable .= " `host_id` int(11) NOT NULL,"; + $createTable .= " `modbiliveservice_id` tinyint(4) NOT NULL,"; + $createTable .= " `state` tinyint(4) NOT NULL,"; + $createTable .= " `start_time` int(11) NOT NULL,"; + $createTable .= " `end_time` int(11) DEFAULT NULL,"; + $createTable .= " `duration` int(11) NOT NULL,"; + $createTable .= " `sla_duration` int(11) NOT NULL,"; + $createTable .= " `ack_time` int(11) DEFAULT NULL,"; + $createTable .= " `last_update` tinyint(4) NOT NULL DEFAULT '0',"; + $createTable .= " KEY `modbihost_id` (`host_id`)"; + $createTable .= " ) ENGINE=InnoDB DEFAULT CHARSET=utf8"; + $db->query({ query => $createTable }); +} + +sub prepareTempQuery { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "INSERT INTO `".$self->{'tmp_name'}."`". + " (`host_id`, `modbiliveservice_id`,". + " `state`, `start_time`, `sla_duration`,". + " `end_time`, `ack_time`, `last_update`, `duration`) ". + " VALUES (?,?,?,?,?,?,?,?, TIMESTAMPDIFF(SECOND, FROM_UNIXTIME(?), FROM_UNIXTIME(?)))"; + $self->{'statement'} = $db->prepare($query); + $self->{'dbinstance'} = $db->getInstance; + ($self->{'dbinstance'})->begin_work; +} + +sub prepareQuery { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "INSERT INTO `".$self->{'name'}."`". + " (`modbihost_id`, `modbiliveservice_id`,". + " `state`, `start_time`, `sla_duration`,". + " `end_time`, `ack_time`, `last_update`, `duration`) ". + " VALUES (?,?,?,?,?,?,?,?, TIMESTAMPDIFF(SECOND, FROM_UNIXTIME(?), FROM_UNIXTIME(?)))"; + $self->{'statement'} = $db->prepare($query); + $self->{'dbinstance'} = $db->getInstance; + ($self->{'dbinstance'})->begin_work; +} + +sub bindParam { + my ($self, $row) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $size = scalar(@$row); + my $sth = $self->{'statement'}; + for (my $i = 0; $i < $size; $i++) { + $sth->bind_param($i + 1, $row->[$i]); + } + $sth->bind_param($size+1, $row->[3]); + $sth->bind_param($size+2, $row->[5]); + ($self->{'statement'})->execute; + if (defined(($self->{'dbinstance'})->errstr)) { + $logger->writeLog("FATAL", $self->{'name'}." insertion execute error : ".($self->{'dbinstance'})->errstr); + } + if ($self->{'bind_counter'} >= 1000) { + $self->{'bind_counter'} = 0; + ($self->{'dbinstance'})->commit; + if (defined(($self->{'dbinstance'})->errstr)) { + $logger->writeLog("FATAL", $self->{'name'}." insertion commit error : ".($self->{'dbinstance'})->errstr); + } + ($self->{'dbinstance'})->begin_work; + } + $self->{'bind_counter'} += 1; + +} + +sub getDayEvents { + my $self = shift; + my $db = $self->{"centstorage"}; + my $timeperiod = $self->{'timeperiod'}; + my ($start, $end, $liveserviceId, $ranges) = @_; + my %results = (); + + my $query = "SELECT start_time, end_time, state, modbihost_id"; + $query .= " FROM `" . $self->{name} . "`"; + $query .= " WHERE `start_time` < ".$end.""; + $query .= " AND `end_time` > ".$start.""; + $query .= " AND `state` in (0,1,2)"; + $query .= " AND modbiliveservice_id = ".$liveserviceId; + my $sth = $db->query({ query => $query }); + + #For each events, for the current day, calculate statistics for the day + my $rows = []; + while (my $row = ( + shift(@$rows) || + shift(@{$rows = $sth->fetchall_arrayref(undef,10_000) || []}) ) + ) { + my $entryID = $row->[3]; + + my ($started, $ended) = (0, 0); + my $rangeSize = scalar(@$ranges); + my $eventDuration = 0; + for(my $count = 0; $count < $rangeSize; $count++) { + my $currentStart = $row->[0]; + my $currentEnd = $row->[1]; + + my $range = $ranges->[$count]; + my ($rangeStart, $rangeEnd) = ($range->[0], $range->[1]); + if ($currentStart < $rangeEnd && $currentEnd > $rangeStart) { + if ($currentStart < $rangeStart) { + $currentStart = $rangeStart; + }elsif ($count == 0) { + $started = 1; + } + if ($currentEnd > $rangeEnd) { + $currentEnd = $rangeEnd; + }elsif ($count == $rangeSize - 1) { + $ended = 1; + } + $eventDuration += $currentEnd - $currentStart; + } + } + if (!defined($results{$entryID})) { + my @tab = (0, 0, 0, 0, 0, 0, 0); + + #New version - sync with tables in database + # 0: UP, 1: DOWN time, 2: Unreachable time , 3 : DOWN alerts opened + # 4: Down time alerts closed, 5: unreachable alerts started, 6 : unreachable alerts ended + $results{$entryID} = \@tab; + } + + my $stats = $results{$entryID}; + my $state = $row->[2]; + + if ($state == 0) { + $stats->[0] += $eventDuration; + }elsif ($state == 1) { + $stats->[1] += $eventDuration; + $stats->[3] += $started; + $stats->[4] += $ended; + }elsif ($state == 2) { + $stats->[2] += $eventDuration; + $stats->[5] += $started; + $stats->[6] += $ended; + } + + $results{$entryID} = $stats; + } + + return (\%results); +} + +#Deprecated +sub getNbEvents { + my ($self, $start, $end, $groupId, $catId, $liveServiceID) = @_; + my $db = $self->{"centstorage"}; + + my $query = "SELECT count(state) as nbEvents, state"; + $query .= " FROM mod_bi_hosts h, ".$self->{'name'}." e"; + $query .= " WHERE h.hg_id = ".$groupId." AND h.hc_id=".$catId; + $query .= " AND h.id = e.modbihost_id"; + $query .= " AND e.modbiliveservice_id=".$liveServiceID; + $query .= " AND start_time < UNIX_TIMESTAMP('".$end."')"; + $query .= " AND end_time > UNIX_TIMESTAMP('".$start."')"; + $query .= " AND state in (1,2)"; + $query .= " GROUP BY state"; + my $sth = $db->query({ query => $query }); + + my ($downEvents, $unrEvents) = (undef, undef); + while (my $row = $sth->fetchrow_hashref()) { + if ($row->{'state'} == 1) { + $downEvents = $row->{'nbEvents'}; + }else { + $unrEvents = $row->{'nbEvents'}; + } + } + return ($downEvents, $unrEvents); +} + +sub deleteUnfinishedEvents { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "DELETE FROM `mod_bi_hoststateevents`"; + $query .= " WHERE last_update = 1 OR end_time is null"; + $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIMetric.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIMetric.pm new file mode 100644 index 00000000000..d240db5ab7c --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIMetric.pm @@ -0,0 +1,199 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIMetric; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + + $self->{logger} = shift; + $self->{centstorage} = shift; + if (@_) { + $self->{centreon} = shift; + } + $self->{today_table} = "mod_bi_tmp_today_servicemetrics"; + $self->{tmpTable} = "mod_bi_tmp_servicemetrics"; + $self->{CRC32} = "mod_bi_tmp_servicemetrics_crc32"; + $self->{table} = "mod_bi_servicemetrics"; + + bless $self, $class; + return $self; +} + +sub insert { + my $self = shift; + my $db = $self->{centstorage}; + + $self->insertMetricsIntoTable("mod_bi_servicemetrics"); + $self->createTodayTable("false"); + my $query = "INSERT INTO ".$self->{today_table}. " (id, metric_id, metric_name, sc_id,hg_id,hc_id)"; + $query .= " SELECT id, metric_id, metric_name,sc_id,hg_id,hc_id FROM " . $self->{table} . " "; + $db->query({ query => $query }); +} + +sub update { + my ($self,$useMemory) = @_; + + my $db = $self->{centstorage}; + + $self->createTempTable($useMemory); + $self->insertMetricsIntoTable($self->{tmpTable}); + $self->createCRC32Table(); + $self->insertNewEntries(); + $self->createCRC32Table(); + $self->createTodayTable("false"); + $self->insertTodayEntries(); + $db->query({ query => "DROP TABLE `".$self->{"tmpTable"}."`" }); + $db->query({ query => "DROP TABLE `".$self->{"CRC32"}."`" }); +} + +sub insertMetricsIntoTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $table = shift; + my $query = "INSERT INTO `".$table."` (`metric_id`, `metric_name`, `metric_unit`, `service_id`, `service_description`,"; + $query .= " `sc_id`, `sc_name`, `host_id`, `host_name`, `hc_id`, `hc_name`, `hg_id`, `hg_name`)"; + $query .= " SELECT `metric_id`, `metric_name`, `unit_name`, s.`service_id`, s.`service_description`, "; + $query .= " s.`sc_id`, s.`sc_name`, s.`host_id`, s.`host_name`, `hc_id`, `hc_name`, `hg_id`, `hg_name`"; + $query .= " FROM `mod_bi_tmp_today_services` s, `metrics` m, `index_data` i"; + $query .= " WHERE i.id = m.index_id and i.host_id=s.host_id and i.service_id=s.service_id"; + $query .= " group by s.hg_id, s.hc_id, s.sc_id, m.index_id, m.metric_id"; + my $sth = $db->query({ query => $query }); + return $sth; +} + +sub createTempTable { + my ($self, $useMemory) = @_; + + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"tmpTable"}."`" }); + my $query = "CREATE TABLE `".$self->{"tmpTable"}."` ("; + $query .= "`metric_id` int(11) NOT NULL,`metric_name` varchar(255) NOT NULL,`metric_unit` char(32) DEFAULT NULL,"; + $query .= "`service_id` int(11) NOT NULL,`service_description` varchar(255) DEFAULT NULL,"; + $query .= "`sc_id` int(11) DEFAULT NULL,`sc_name` varchar(255) DEFAULT NULL,"; + $query .= "`host_id` int(11) DEFAULT NULL,`host_name` varchar(255) DEFAULT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL,`hc_name` varchar(255) DEFAULT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL,`hg_name` varchar(255) DEFAULT NULL"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub createCRC32Table { + my ($self) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"CRC32"}."`" }); + my $query = "CREATE TABLE `".$self->{"CRC32"}."` CHARSET=utf8 COLLATE=utf8_general_ci"; + $query .= " SELECT `id`, CRC32(CONCAT_WS('-', COALESCE(metric_id, '?'),"; + $query .= " COALESCE(service_id, '?'),COALESCE(service_description, '?'),"; + $query .= " COALESCE(host_id, '?'),COALESCE(host_name, '?'), COALESCE(sc_id, '?'),COALESCE(sc_name, '?'),"; + $query .= " COALESCE(hc_id, '?'),COALESCE(hc_name, '?'), COALESCE(hg_id, '?'),COALESCE(hg_name, '?'))) as mycrc"; + $query .= " FROM ".$self->{"table"}; + $db->query({ query => $query }); + $query = "ALTER TABLE `".$self->{"CRC32"}."` ADD INDEX (`mycrc`)"; + $db->query({ query => $query }); +} + +sub insertNewEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $fields = "metric_id, metric_name, metric_unit, service_id, service_description, host_name, host_id, sc_id, sc_name, hc_id, hc_name, hg_id, hg_name"; + my $tmpTableFields = "tmpTable.metric_id, tmpTable.metric_name,tmpTable.metric_unit,"; + $tmpTableFields .= " tmpTable.service_id, tmpTable.service_description, tmpTable.host_name, tmpTable.host_id, tmpTable.sc_id,"; + $tmpTableFields .= "tmpTable.sc_name, tmpTable.hc_id, tmpTable.hc_name, tmpTable.hg_id, tmpTable.hg_name"; + my $query = " INSERT INTO `".$self->{"table"}."` (".$fields.") "; + $query .= " SELECT ".$tmpTableFields." FROM ".$self->{"tmpTable"}." as tmpTable"; + $query .= " LEFT JOIN (".$self->{"CRC32"}. " INNER JOIN ".$self->{"table"}." as finalTable using (id))"; + $query .= " ON CRC32(CONCAT_WS('-', COALESCE(tmpTable.metric_id, '?'), COALESCE(tmpTable.service_id, '?'),COALESCE(tmpTable.service_description, '?'),"; + $query .= " COALESCE(tmpTable.host_id, '?'),COALESCE(tmpTable.host_name, '?'), COALESCE(tmpTable.sc_id, '?'),COALESCE(tmpTable.sc_name, '?'),"; + $query .= " COALESCE(tmpTable.hc_id, '?'),COALESCE(tmpTable.hc_name, '?'), COALESCE(tmpTable.hg_id, '?'),COALESCE(tmpTable.hg_name, '?'))) = mycrc"; + $query .= " AND tmpTable.metric_id=finalTable.metric_id"; + $query .= " AND tmpTable.service_id=finalTable.service_id AND tmpTable.service_description=finalTable.service_description"; + $query .= " AND tmpTable.host_id=finalTable.host_id AND tmpTable.host_name=finalTable.host_name"; + $query .= " AND tmpTable.sc_id=finalTable.sc_id AND tmpTable.sc_name=finalTable.sc_name"; + $query .= " AND tmpTable.hc_id=finalTable.hc_id AND tmpTable.hc_name=finalTable.hc_name"; + $query .= " AND tmpTable.hg_id=finalTable.hg_id AND tmpTable.hg_name=finalTable.hg_name"; + $query .= " WHERE finalTable.id is null"; + $db->query({ query => $query }); +} + +sub createTodayTable { + my ($self,$useMemory) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"today_table"}."`" }); + my $query = "CREATE TABLE `" . $self->{"today_table"} . "` ("; + $query .= "`id` BIGINT(20) UNSIGNED NOT NULL,"; + $query .= "`metric_id` BIGINT(20) UNSIGNED NOT NULL,"; + $query .= "`metric_name` varchar(255) NOT NULL,"; + $query .= "`sc_id` int(11) NOT NULL,"; + $query .= "`hg_id` int(11) NOT NULL,"; + $query .= "`hc_id` int(11) NOT NULL,"; + $query .= " KEY `metric_id` (`metric_id`),"; + $query .= " KEY `schghc_id` (`sc_id`,`hg_id`,`hc_id`)"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub insertTodayEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $query = "INSERT INTO ".$self->{"today_table"}. " (id, metric_id, metric_name, sc_id,hg_id,hc_id)"; + $query .= " SELECT finalTable.id, finalTable.metric_id, finalTable.metric_name, finalTable.sc_id, finalTable.hg_id, finalTable.hc_id FROM ".$self->{"tmpTable"}." t"; + $query .= " LEFT JOIN (".$self->{"CRC32"}." INNER JOIN ".$self->{"table"}." finalTable USING (id))"; + $query .= " ON CRC32(CONCAT_WS('-', COALESCE(t.metric_id, '?'), COALESCE(t.service_id, '?'),COALESCE(t.service_description, '?'),"; + $query .= " COALESCE(t.host_id, '?'),COALESCE(t.host_name, '?'), COALESCE(t.sc_id, '?'),COALESCE(t.sc_name, '?'),"; + $query .= " COALESCE(t.hc_id, '?'),COALESCE(t.hc_name, '?'), COALESCE(t.hg_id, '?'),COALESCE(t.hg_name, '?'))) = mycrc"; + $query .= " AND finalTable.metric_id=t.metric_id"; + $query .= " AND finalTable.service_id=t.service_id AND finalTable.service_description=t.service_description "; + $query .= " AND finalTable.host_id=t.host_id AND finalTable.host_name=t.host_name "; + $query .= " AND finalTable.sc_id=t.sc_id AND finalTable.sc_name=t.sc_name "; + $query .= " AND finalTable.hc_id=t.hc_id AND finalTable.hc_name=t.hc_name "; + $query .= " AND finalTable.hg_id=t.hg_id AND finalTable.hg_name=t.hg_name "; + $db->query({ query => $query }); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `".$self->{"table"}."`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `".$self->{"table"}."` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIService.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIService.pm new file mode 100644 index 00000000000..981f5663bd8 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIService.pm @@ -0,0 +1,221 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIService; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"today_table"} = "mod_bi_tmp_today_services"; + $self->{"tmpTable"} = "mod_bi_tmp_services"; + $self->{"CRC32"} = "mod_bi_tmp_services_crc32"; + $self->{"table"} = "mod_bi_services"; + + bless $self, $class; + return $self; +} + +sub insert { + my $self = shift; + my $data = shift; + my $db = $self->{"centstorage"}; + $self->insertIntoTable($self->{"table"}, $data); + $self->createTodayTable("false"); + my $fields = "id, service_id, service_description, host_name, host_id, sc_id, sc_name, hc_id, hc_name, hg_id, hg_name"; + my $query = "INSERT INTO ".$self->{"today_table"}. "(".$fields.")"; + $query .= " SELECT ".$fields." FROM ".$self->{"table"}; + $db->query({ query => $query }); +} + +sub update { + my ($self, $data, $useMemory) = @_; + my $db = $self->{"centstorage"}; + + $self->createTempTable($useMemory); + $self->insertIntoTable($self->{"tmpTable"}, $data); + $self->createCRC32Table(); + $self->insertNewEntries(); + $self->createCRC32Table(); + $self->createTodayTable("false"); + $self->insertTodayEntries(); + $db->query({ query => "DROP TABLE `".$self->{"tmpTable"}."`" }); + $db->query({ query => "DROP TABLE `".$self->{"CRC32"}."`" }); +} + +sub insertIntoTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $table = shift; + my $data = shift; + my $name = shift; + my $id = shift; + my $query = "INSERT INTO `".$table."`". + " (`service_id`, `service_description`, `sc_id`, `sc_name`,". + " `host_id`, `host_name`,`hg_id`, `hg_name`, `hc_id`, `hc_name`)". + " VALUES (?,?,?,?,?,?,?,?,?,?)"; + my $sth = $db->prepare($query); + my $inst = $db->getInstance; + $inst->begin_work; + my $counter = 0; + + foreach (@$data) { + my ($service_id, $service_description, $sc_id, $sc_name, $host_id, $host_name, $hg_id, $hg_name, $hc_id, $hc_name) = split(";", $_); + $sth->bind_param(1, $service_id); + $sth->bind_param(2, $service_description); + $sth->bind_param(3, $sc_id); + $sth->bind_param(4, $sc_name); + $sth->bind_param(5, $host_id); + $sth->bind_param(6, $host_name); + $sth->bind_param(7, $hg_id); + $sth->bind_param(8, $hg_name); + $sth->bind_param(9, $hc_id); + $sth->bind_param(10, $hc_name); + $sth->execute; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", $table." insertion execute error : ".$inst->errstr); + } + if ($counter >= 1000) { + $counter = 0; + $inst->commit; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", $table." insertion commit error : ".$inst->errstr); + } + $inst->begin_work; + } + $counter++; + } + $inst->commit; +} +sub createTempTable { + my ($self, $useMemory) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"tmpTable"}."`" }); + my $query = "CREATE TABLE `".$self->{"tmpTable"}."` ("; + $query .= "`service_id` int(11) NOT NULL,`service_description` varchar(255) NOT NULL,"; + $query .= "`sc_id` int(11) NOT NULL,`sc_name` varchar(255) NOT NULL,"; + $query .= "`host_id` int(11) DEFAULT NULL,`host_name` varchar(255) NOT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL,`hc_name` varchar(255) NOT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL,`hg_name` varchar(255) NOT NULL"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub createCRC32Table { + my ($self) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"CRC32"}."`" }); + my $query = "CREATE TABLE `".$self->{"CRC32"}."` CHARSET=utf8 COLLATE=utf8_general_ci"; + $query .= " SELECT `id`, CRC32(CONCAT_WS('-', COALESCE(service_id, '?'),COALESCE(service_description, '?'),"; + $query .= " COALESCE(host_id, '?'),COALESCE(host_name, '?'), COALESCE(sc_id, '?'),COALESCE(sc_name, '?'),"; + $query .= " COALESCE(hc_id, '?'),COALESCE(hc_name, '?'), COALESCE(hg_id, '?'),COALESCE(hg_name, '?'))) as mycrc"; + $query .= " FROM ".$self->{"table"}; + $db->query({ query => $query }); + $query = "ALTER TABLE `".$self->{"CRC32"}."` ADD INDEX (`mycrc`)"; + $db->query({ query => $query }); +} + +sub insertNewEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $fields = "service_id, service_description, host_name, host_id, sc_id, sc_name, hc_id, hc_name, hg_id, hg_name"; + my $tmpTableFields = "tmpTable.service_id, tmpTable.service_description, tmpTable.host_name, tmpTable.host_id, tmpTable.sc_id,"; + $tmpTableFields .= "tmpTable.sc_name, tmpTable.hc_id, tmpTable.hc_name, tmpTable.hg_id, tmpTable.hg_name"; + my $query = " INSERT INTO `".$self->{"table"}."` (".$fields.") "; + $query .= " SELECT ".$tmpTableFields." FROM ".$self->{"tmpTable"}." as tmpTable"; + $query .= " LEFT JOIN (".$self->{"CRC32"}. " INNER JOIN ".$self->{"table"}." as finalTable using (id))"; + $query .= " ON CRC32(CONCAT_WS('-', COALESCE(tmpTable.service_id, '?'),COALESCE(tmpTable.service_description, '?'),"; + $query .= " COALESCE(tmpTable.host_id, '?'),COALESCE(tmpTable.host_name, '?'), COALESCE(tmpTable.sc_id, '?'),COALESCE(tmpTable.sc_name, '?'),"; + $query .= " COALESCE(tmpTable.hc_id, '?'),COALESCE(tmpTable.hc_name, '?'), COALESCE(tmpTable.hg_id, '?'),COALESCE(tmpTable.hg_name, '?'))) = mycrc"; + $query .= " AND tmpTable.service_id=finalTable.service_id AND tmpTable.service_description=finalTable.service_description"; + $query .= " AND tmpTable.host_id=finalTable.host_id AND tmpTable.host_name=finalTable.host_name"; + $query .= " AND tmpTable.sc_id=finalTable.sc_id AND tmpTable.sc_name=finalTable.sc_name"; + $query .= " AND tmpTable.hc_id=finalTable.hc_id AND tmpTable.hc_name=finalTable.hc_name"; + $query .= " AND tmpTable.hg_id=finalTable.hg_id AND tmpTable.hg_name=finalTable.hg_name"; + $query .= " WHERE finalTable.id is null"; + $db->query({ query => $query }); +} + +sub createTodayTable { + my ($self,$useMemory) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"today_table"}."`" }); + my $query = "CREATE TABLE `".$self->{"today_table"}."` ("; + $query .= "`id` INT NOT NULL,"; + $query .= "`service_id` int(11) NOT NULL,`service_description` varchar(255) NOT NULL,"; + $query .= "`sc_id` int(11) NOT NULL,`sc_name` varchar(255) NOT NULL,"; + $query .= "`host_id` int(11) DEFAULT NULL,`host_name` varchar(255) NOT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL,`hc_name` varchar(255) NOT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL,`hg_name` varchar(255) NOT NULL,"; + $query .= " KEY `host_service` (`host_id`, `service_id`)"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub insertTodayEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $query = "INSERT INTO ".$self->{"today_table"}. " (id, service_id, service_description, host_name, host_id, sc_id, sc_name, hc_id, hc_name, hg_id, hg_name)"; + $query .= " SELECT s.id, t.service_id, t.service_description, t.host_name, t.host_id, t.sc_id, t.sc_name, t.hc_id, t.hc_name, t.hg_id, t.hg_name FROM ".$self->{"tmpTable"}." t"; + $query .= " LEFT JOIN (".$self->{"CRC32"}." INNER JOIN ".$self->{"table"}." s USING (id))"; + $query .= " ON CRC32(CONCAT_WS('-', COALESCE(t.service_id, '?'),COALESCE(t.service_description, '?'),"; + $query .= " COALESCE(t.host_id, '?'),COALESCE(t.host_name, '?'), COALESCE(t.sc_id, '?'),COALESCE(t.sc_name, '?'),"; + $query .= " COALESCE(t.hc_id, '?'),COALESCE(t.hc_name, '?'), COALESCE(t.hg_id, '?'),COALESCE(t.hg_name, '?'))) = mycrc"; + $query .= " AND s.service_id=t.service_id AND s.service_description=t.service_description "; + $query .= " AND s.host_id=t.host_id AND s.host_name=t.host_name "; + $query .= " AND s.sc_id=t.sc_id AND s.sc_name=t.sc_name "; + $query .= " AND s.hc_id=t.hc_id AND s.hc_name=t.hc_name "; + $query .= " AND s.hg_id=t.hg_id AND s.hg_name=t.hg_name "; + $db->query({ query => $query }); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `".$self->{"table"}."`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `".$self->{"table"}."` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceCategory.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceCategory.pm new file mode 100644 index 00000000000..74e5d7f2e3f --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceCategory.pm @@ -0,0 +1,128 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIServiceCategory; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + bless $self, $class; + return $self; +} + + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `sc_id`, `sc_name`"; + $query .= " FROM `mod_bi_servicecategories`"; + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"sc_id"}.";".$row->{"sc_name"}; + } + return (\@entries); +} + +sub getEntryIds { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `id`, `sc_id`, `sc_name`"; + $query .= " FROM `mod_bi_servicecategories`"; + my $sth = $db->query({ query => $query }); + my %entries = (); + while (my $row = $sth->fetchrow_hashref()) { + $entries{$row->{"sc_id"}.";".$row->{"sc_name"}} = $row->{"id"}; + } + return (\%entries); +} + +sub entryExists { + my $self = shift; + my ($value, $entries) = (shift, shift); + foreach(@$entries) { + if ($value eq $_) { + return 1; + } + } + return 0; +} +sub insert { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $data = shift; + my $query = "INSERT INTO `mod_bi_servicecategories`". + " (`sc_id`, `sc_name`)". + " VALUES (?,?)"; + my $sth = $db->prepare($query); + my $inst = $db->getInstance; + $inst->begin_work; + my $counter = 0; + + my $existingEntries = $self->getAllEntries; + foreach (@$data) { + if (!$self->entryExists($_, $existingEntries)) { + my ($sc_id, $sc_name) = split(";", $_); + $sth->bind_param(1, $sc_id); + $sth->bind_param(2, $sc_name); + $sth->execute; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "servicecategories insertion execute error : ".$inst->errstr); + } + if ($counter >= 1000) { + $counter = 0; + $inst->commit; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "servicecategories insertion commit error : ".$inst->errstr); + } + $inst->begin_work; + } + $counter++; + } + } + $inst->commit; +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `mod_bi_servicecategories`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `mod_bi_servicecategories` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceStateEvents.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceStateEvents.pm new file mode 100644 index 00000000000..567634b680a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceStateEvents.pm @@ -0,0 +1,251 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIServiceStateEvents; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + $self->{'timeperiod'} = shift; + $self->{'bind_counter'} = 0; + $self->{'name'} = "mod_bi_servicestateevents"; + $self->{'tmp_name'} = "mod_bi_servicestateevents_tmp"; + $self->{'timeColumn'} = "end_time"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub prepareQuery { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "INSERT INTO `".$self->{'name'}."`". + " (`modbiservice_id`, `modbiliveservice_id`,". + " `state`, `start_time`, `sla_duration`,". + " `end_time`, `ack_time`, `last_update`, `duration`) ". + " VALUES (?,?,?,?,?,?,?,?, TIMESTAMPDIFF(SECOND, FROM_UNIXTIME(?), FROM_UNIXTIME(?)))"; + $self->{'statement'} = $db->prepare($query); + $self->{'dbinstance'} = $db->getInstance; + ($self->{'dbinstance'})->begin_work; +} + +sub createTempBIEventsTable { + my ($self) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `mod_bi_servicestateevents_tmp`" }); + my $createTable = " CREATE TABLE `mod_bi_servicestateevents_tmp` ("; + $createTable .= " `host_id` int(11) NOT NULL,"; + $createTable .= " `service_id` int(11) NOT NULL,"; + $createTable .= " `modbiliveservice_id` tinyint(4) NOT NULL,"; + $createTable .= " `state` tinyint(4) NOT NULL,"; + $createTable .= " `start_time` int(11) NOT NULL,"; + $createTable .= " `end_time` int(11) DEFAULT NULL,"; + $createTable .= " `duration` int(11) NOT NULL,"; + $createTable .= " `sla_duration` int(11) NOT NULL,"; + $createTable .= " `ack_time` int(11) DEFAULT NULL,"; + $createTable .= " `last_update` tinyint(4) DEFAULT '0',"; + $createTable .= " KEY `modbiservice_id` (`host_id`,`service_id`)"; + $createTable .= " ) ENGINE=InnoDB DEFAULT CHARSET=utf8"; + $db->query({ query => $createTable }); +} + +sub prepareTempQuery { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "INSERT INTO `".$self->{'tmp_name'}."`". + " (`host_id`,`service_id`,`modbiliveservice_id`,". + " `state`, `start_time`, `sla_duration`,". + " `end_time`, `ack_time`, `last_update`, `duration`) ". + " VALUES (?,?,?,?,?,?,?,?,?, TIMESTAMPDIFF(SECOND, FROM_UNIXTIME(?), FROM_UNIXTIME(?)))"; + $self->{'statement'} = $db->prepare($query); + $self->{'dbinstance'} = $db->getInstance; + ($self->{'dbinstance'})->begin_work; +} + +sub bindParam { + my ($self, $row) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $size = scalar(@$row); + my $sth = $self->{'statement'}; + for (my $i = 0; $i < $size; $i++) { + $sth->bind_param($i + 1, $row->[$i]); + } + $sth->bind_param($size + 1, $row->[4]); + $sth->bind_param($size + 2, $row->[6]); + + ($self->{'statement'})->execute; + if (defined(($self->{'dbinstance'})->errstr)) { + $logger->writeLog("FATAL", $self->{'name'}." insertion execute error : ".($self->{'dbinstance'})->errstr); + } + if ($self->{'bind_counter'} >= 1000) { + $self->{'bind_counter'} = 0; + ($self->{'dbinstance'})->commit; + if (defined(($self->{'dbinstance'})->errstr)) { + $logger->writeLog("FATAL", $self->{'name'}." insertion commit error : ".($self->{'dbinstance'})->errstr); + } + ($self->{'dbinstance'})->begin_work; + } + $self->{'bind_counter'} += 1; + +} + +sub getDayEvents { + my $self = shift; + my $db = $self->{"centstorage"}; + my $timeperiod = $self->{'timeperiod'}; + my ($start, $end, $liveserviceId, $ranges) = @_; + my $liveServiceList = shift; + my %results = (); + + my $query = "SELECT start_time, end_time, state, modbiservice_id"; + $query .= " FROM `" . $self->{'name'} . "`"; + $query .= " WHERE `start_time` < " . $end; + $query .= " AND `end_time` > " . $start; + $query .= " AND `state` IN (0,1,2,3)"; + $query .= " AND modbiliveservice_id=" . $liveserviceId; + my $sth = $db->query({ query => $query }); + + if (!scalar(@$ranges)) { + return \%results; + } + + my $rows = []; + while (my $row = ( + shift(@$rows) || + shift(@{$rows = $sth->fetchall_arrayref(undef,10_000) || []}) ) + ) { + my $entryID = $row->[3]; + + my ($started, $ended) = (0,0); + my $rangeSize = scalar(@$ranges); + my $eventDuration = 0; + for (my $count = 0; $count < $rangeSize; $count++) { + my $currentStart = $row->[0]; + my $currentEnd = $row->[1]; + + my $range = $ranges->[$count]; + my ($rangeStart, $rangeEnd) = ($range->[0], $range->[1]); + if ($currentStart < $rangeEnd && $currentEnd > $rangeStart) { + if ($currentStart < $rangeStart) { + $currentStart = $rangeStart; + } elsif ($count == 0) { + $started = 1; + } + if ($currentEnd > $rangeEnd) { + $currentEnd = $rangeEnd; + } elsif ($count == $rangeSize - 1) { + $ended = 1; + } + $eventDuration += $currentEnd - $currentStart; + } + } + if (!defined($results{$entryID})) { + my @tab = (0, 0, 0, 0, 0, 0, 0, 0, 0); + + #New table - sync with the real table in centreon_storage database + # 0: OK time , 1: CRITICAL time, 2 : DEGRADED time 3 : alert_unavailable_opened + # 4: alert unavailable_closed 5 : alert_degraded_opened 6 : alertes_degraded_closed + # 7 : alert_unknown_opened 8 : alert_unknown_closed + $results{$entryID} = \@tab; + } + my $stats = $results{$entryID}; + my $state = $row->[2]; + if ($state == 0) { + $stats->[0] += $eventDuration; + } elsif ($state == 1) { + $stats->[2] += $eventDuration; + $stats->[5] += $started; + $stats->[6] += $ended; + } elsif ($state == 2) { + $stats->[1] += $eventDuration; + $stats->[3] += $started; + $stats->[4] += $ended; + } else { + $stats->[7] += $started; + $stats->[8] += $ended; + } + $results{$entryID} = $stats; + } + + return (\%results); +} + +#Deprecated +sub getNbEvents { + my ($self, $start, $end, $groupId, $hcatId, $scatId, $liveServiceID) = @_; + my $db = $self->{"centstorage"}; + + my $query = "SELECT count(state) as nbEvents, state"; + $query .= " FROM mod_bi_services s, ".$self->{'name'}." e"; + $query .= " WHERE s.hg_id = ".$groupId." AND s.hc_id=".$hcatId." AND s.sc_id=".$scatId; + $query .= " AND s.id = e.modbiservice_id"; + $query .= " AND start_time < UNIX_TIMESTAMP('".$end."')"; + $query .= " AND end_time > UNIX_TIMESTAMP('".$start."')"; + $query .= " AND e.modbiliveservice_id=".$liveServiceID; + $query .= " AND e.state in (1,2,3)"; + $query .= " GROUP BY e.state"; + my $sth = $db->query({ query => $query }); + + my ($warnEvents, $criticalEvents, $otherEvents) = (undef, undef, undef); + while (my $row = $sth->fetchrow_hashref()) { + if ($row->{'state'} == 1) { + $warnEvents = $row->{'nbEvents'}; + }elsif ($row->{'state'} == 2) { + $criticalEvents = $row->{'nbEvents'}; + }else { + $otherEvents = $row->{'nbEvents'}; + } + } + return ($warnEvents, $criticalEvents, $otherEvents); +} + +sub deleteUnfinishedEvents { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "DELETE FROM `".$self->{'name'}."`"; + $query .= " WHERE last_update = 1 OR end_time is null"; + $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/DBConfigParser.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/DBConfigParser.pm new file mode 100644 index 00000000000..6c5571b4807 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/DBConfigParser.pm @@ -0,0 +1,85 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; +use POSIX; +use XML::LibXML; +use Data::Dumper; + +package gorgone::modules::centreon::mbi::libs::bi::DBConfigParser; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database + +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + bless $self, $class; + return $self; +} + +sub parseFile { + my $self = shift; + my $logger = $self->{"logger"}; + my $file = shift; + + my %connProfiles = (); + if (! -r $file) { + $logger->writeLog("ERROR", "Cannot read file ".$file); + } + my $parser = XML::LibXML->new(); + my $root = $parser->parse_file($file); + foreach my $profile ($root->findnodes('/DataTools.ServerProfiles/profile')) { + my $base = $profile->findnodes('@name'); + + foreach my $property ($profile->findnodes('./baseproperties/property')) { + my $name = $property->findnodes('@name')->to_literal; + my $value = $property->findnodes('@value')->to_literal; + if ($name eq 'odaURL') { + if ($value =~ /jdbc\:[a-z]+\:\/\/([^:]*)(\:\d+)?\/(.*)/) { + $connProfiles{$base."_host"} = $1; + if(defined($2) && $2 ne ''){ + $connProfiles{$base."_port"} = $2; + $connProfiles{$base."_port"} =~ s/\://; + }else{ + $connProfiles{$base."_port"} = '3306'; + } + $connProfiles{$base."_db"} = $3; + $connProfiles{$base."_db"} =~ s/\?autoReconnect\=true//; + } + } + if ($name eq 'odaUser') { + $connProfiles{$base."_user"} = sprintf('%s',$value); + } + if ($name eq 'odaPassword') { + $connProfiles{$base."_pass"} = sprintf('%s', $value); + } + } + } + + return (\%connProfiles); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/DataQuality.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/DataQuality.pm new file mode 100644 index 00000000000..f9f87a36728 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/DataQuality.pm @@ -0,0 +1,99 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::DataQuality; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database + +sub new { + my $class = shift; + my $self = {}; + + $self->{logger} = shift; + $self->{centreon} = shift; + bless $self, $class; + return $self; +} + +sub searchAndDeleteDuplicateEntries { + my $self = shift; + + $self->{logger}->writeLog("INFO", "Searching for duplicate host/service entries"); + my $relationIDS = $self->getDuplicateRelations(); + if (@$relationIDS) { + $self->deleteDuplicateEntries($relationIDS); + } +} + +# return table of IDs to delete +sub getDuplicateRelations { + my $self = shift; + + my @relationIDS; + #Get duplicated relations and exclude BAM or Metaservices data + my $duplicateEntriesQuery = "SELECT host_host_id, service_service_id, count(*) as nbRelations ". + "FROM host_service_relation t1, host t2 WHERE t1.host_host_id = t2.host_id ". + "AND t2.host_name not like '_Module%' group by host_host_id, service_service_id HAVING COUNT(*) > 1"; + + my $sth = $self->{centreon}->query({ query => $duplicateEntriesQuery }); + while (my $row = $sth->fetchrow_hashref()) { + if (defined($row->{host_host_id})) { + $self->{logger}->writeLog( + "WARNING", + "Found the following duplicate data (host-service) : " . $row->{host_host_id}." - ".$row->{service_service_id}." - Cleaning data" + ); + #Get all relation IDs related to duplicated data + my $relationIdQuery = "SELECT hsr_id from host_service_relation ". + "WHERE host_host_id = ".$row->{host_host_id}." AND service_service_id = ".$row->{service_service_id}; + my $sth2 = $self->{centreon}->query({ query => $relationIdQuery }); + while (my $hsr = $sth2->fetchrow_hashref()) { + if (defined($hsr->{hsr_id})) { + push(@relationIDS,$hsr->{hsr_id}); + } + } + $self->deleteDuplicateEntries(\@relationIDS); + @relationIDS = (); + } + } + return (\@relationIDS); +} + +# Delete N-1 duplicate entry +sub deleteDuplicateEntries { + my $self = shift; + + my @relationIDS = @{$_[0]}; + #WARNING : very important so at least 1 relation is kept + pop @relationIDS; + foreach (@relationIDS) { + my $idToDelete = $_; + my $deleteQuery = "DELETE FROM host_service_relation WHERE hsr_id = ".$idToDelete; + $self->{centreon}->query({ query => $deleteQuery }) + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/Dumper.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Dumper.pm new file mode 100644 index 00000000000..198c52cc99e --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Dumper.pm @@ -0,0 +1,132 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::Dumper; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{'tempFolder'} = "/tmp/"; + bless $self, $class; + return $self; +} + +sub setStorageDir { + my $self = shift; + my $logger = $self->{'logger'}; + my $tempFolder = shift; + + if (!defined($tempFolder)) { + $logger->writeLog("ERROR", "Temporary storage folder is not defined"); + } + if (! -d $tempFolder && ! -w $tempFolder) { + $logger->writeLog("ERROR", "Cannot write into directory ".$tempFolder); + } + if ($tempFolder !~ /\/$/) { + $tempFolder .= "/"; + } + $self->{'tempFolder'} = $tempFolder; +} + +# Dump data in a MySQL table. (db connection,table name, [not mandatory] start column, end column,start date,end date,exclude end time?) +# and return the file name created +# Ex $file = $dumper->dumpData($hostCentreon, 'toto', 'data_start', 'date_end', '2015-01-02', '2015-02-01', 0); +sub dumpData { + my $self = shift; + my $db = $self->{"centstorage"}; + my ($hostCentreon, $tableName) = (shift, shift); + my ($day,$month,$year,$hour,$min) = (localtime(time))[3,4,5,2,1]; + my $fileName = $self->{'tempFolder'}.$tableName; + my $query = "SELECT * FROM ".$tableName." "; + my $logger = $self->{'logger'}; + if (@_) { + my ($startColumn, $endColumn, $startTime, $endTime, $excludeEndTime) = @_; + $query .= " WHERE ".$startColumn." >= UNIX_TIMESTAMP('".$startTime."') "; + if ($excludeEndTime == 0) { + $query .= "AND ".$endColumn." <= UNIX_TIMESTAMP('".$endTime."')"; + }else { + $query .= "AND ".$endColumn." < UNIX_TIMESTAMP('".$endTime."')"; + } + } + my @loadCmdArgs = ('mysql', "-q", "-u", $hostCentreon->{'Censtorage_user'}, "-p".$hostCentreon->{'Censtorage_pass'}, + "-h", $hostCentreon->{'Censtorage_host'}, $hostCentreon->{'Censtorage_db'}, + "-e", $query.">".$fileName); + system("mysql -q -u".$hostCentreon->{'Censtorage_user'}." -p".$hostCentreon->{'Censtorage_pass'}." -P".$hostCentreon->{'Censtorage_port'}." -h".$hostCentreon->{'Censtorage_host'}. + " ".$hostCentreon->{'Censtorage_db'}." -e \"".$query."\" > ".$fileName); + $logger->writeLog("DEBUG","mysql -q -u".$hostCentreon->{'Censtorage_user'}." -p".$hostCentreon->{'Censtorage_pass'}." -P".$hostCentreon->{'Censtorage_port'}." -h".$hostCentreon->{'Censtorage_host'}. + " ".$hostCentreon->{'Censtorage_db'}." -e \"".$query."\" > ".$fileName); + return ($fileName); +} + +sub dumpRequest{ + my $self = shift; + my $db = $self->{"centstorage"}; + my ($hostCentreon, $requestName,$query) = (shift, shift,shift); + my $fileName = $self->{'tempFolder'}.$requestName; + my $logger = $self->{'logger'}; + system("mysql -q -u".$hostCentreon->{'Censtorage_user'}." -p".$hostCentreon->{'Censtorage_pass'}." -h".$hostCentreon->{'Censtorage_host'}. " -P".$hostCentreon->{'Censtorage_port'}. + " ".$hostCentreon->{'Censtorage_db'}." -e \"".$query."\" > ".$fileName); + return ($fileName); +} + +sub dumpTableStructure { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{'logger'}; + my ($tableName) = (shift); + + my $sql = ""; + my $sth = $db->query({ query => "SHOW CREATE TABLE ".$tableName }); + if (my $row = $sth->fetchrow_hashref()) { + $sql = $row->{'Create Table'}; + $sql =~ s/(CONSTRAINT.*\n)//g; + $sql =~ s/(\,\n\s+\))/\)/g; + }else { + $logger->writeLog("WARNING", "Cannot get structure for table : ".$tableName); + return (undef); + } + $sth->finish; + return ($sql); +} + +sub insertData { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($tableName, $inFile) = (shift, shift); + my $query = "LOAD DATA INFILE '".$inFile."' INTO TABLE `".$tableName."`"; + my $sth = $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGMonthAvailability.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGMonthAvailability.pm new file mode 100644 index 00000000000..e0f3d1a82de --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGMonthAvailability.pm @@ -0,0 +1,92 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::HGMonthAvailability; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{'name'} = "mod_bi_hgmonthavailability"; + $self->{'timeColumn'} = "time_id"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + + +sub insertStats { + my $self = shift; + my ($time_id, $data) = @_; + my $insertParam = 1000; + + my $query_start = "INSERT INTO `".$self->{'name'}."`". + " (`time_id`, `modbihg_id`, `modbihc_id`, `liveservice_id`, `available`, `unavailable_time`,". + " `alert_unavailable_opened`, `alert_unavailable_closed`, ". + " `alert_unreachable_opened`, `alert_unreachable_closed`,". + " `alert_unavailable_total`, `alert_unreachable_total`,". + " `mtrs`, `mtbf`, `mtbsi`)". + " VALUES "; + my $counter = 0; + my $query = $query_start; + my $append = ''; + + foreach my $entry (@$data) { + my $size = scalar(@$entry); + $query .= $append . "($time_id"; + for (my $i = 0; $i < $size; $i++) { + $query .= ', ' . (defined($entry->[$i]) ? $entry->[$i] : 'NULL'); + } + $query .= ')'; + + $append = ','; + $counter++; + if ($counter >= $insertParam) { + $self->{centstorage}->query({ query => $query }); + $query = $query_start; + $counter = 0; + $append = ''; + } + } + $self->{centstorage}->query({ query => $query }) if ($counter > 0); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGServiceMonthAvailability.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGServiceMonthAvailability.pm new file mode 100644 index 00000000000..7422107a833 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGServiceMonthAvailability.pm @@ -0,0 +1,93 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::HGServiceMonthAvailability; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{'name'} = "mod_bi_hgservicemonthavailability"; + $self->{'timeColumn'} = "time_id"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub insertStats { + my $self = shift; + my ($time_id, $data) = @_; + my $insertParam = 1000; + + my $query_start = "INSERT INTO `".$self->{'name'}."`". + " (`time_id`, `modbihg_id`, `modbihc_id`, `modbisc_id`, `liveservice_id`, `available`,". + " `unavailable_time`, `degraded_time`, `alert_unavailable_opened`, `alert_unavailable_closed`, ". + " `alert_degraded_opened`, `alert_degraded_closed`, ". + " `alert_other_opened`, `alert_other_closed`, ". + " `alert_degraded_total`, `alert_unavailable_total`,". + " `alert_other_total`, `mtrs`, `mtbf`, `mtbsi`)". + " VALUES "; + my $counter = 0; + my $query = $query_start; + my $append = ''; + + foreach my $entry (@$data) { + my $size = scalar(@$entry); + + $query .= $append . "($time_id"; + for (my $i = 0; $i < $size; $i++) { + $query .= ', ' . (defined($entry->[$i]) ? $entry->[$i] : 'NULL'); + } + $query .= ')'; + + $append = ','; + $counter++; + if ($counter >= $insertParam) { + $self->{centstorage}->query({ query => $query }); + $query = $query_start; + $counter = 0; + $append = ''; + } + } + $self->{centstorage}->query({ query => $query }) if ($counter > 0); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/HostAvailability.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HostAvailability.pm new file mode 100644 index 00000000000..f908982669a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HostAvailability.pm @@ -0,0 +1,175 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; +use POSIX; +use Time::Local; + +package gorgone::modules::centreon::mbi::libs::bi::HostAvailability; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"name"} = "mod_bi_hostavailability"; + $self->{"timeColumn"} = "time_id"; + $self->{"nbLinesInFile"} = 0; + $self->{"commitParam"} = 500000; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +#Only for daily mode +sub insertStats { + my $self = shift; + my ($data, $time_id, $liveserviceId) = @_; + my $insertParam = 10000; + + my $query_start = "INSERT INTO `" . $self->{name} . "`". + " (`modbihost_id`, `time_id`, `liveservice_id`, `available`, ". + " `unavailable`,`unreachable`, `alert_unavailable_opened`, `alert_unavailable_closed`, ". + " `alert_unreachable_opened`, `alert_unreachable_closed`) ". + " VALUES "; + my $counter = 0; + my $query = $query_start; + my $append = ''; + + while (my ($modBiHostId, $stats) = each %$data) { + my @tab = @$stats; + if ($stats->[0] + $stats->[1] + $stats->[2] == 0) { + next; + } + + $query .= $append . "($modBiHostId, $time_id, $liveserviceId"; + for (my $i = 0; $i < scalar(@$stats); $i++) { + $query .= ', ' . $stats->[$i]; + } + $query .= ')'; + + $append = ','; + $counter++; + if ($counter >= $insertParam) { + $self->{centstorage}->query({ query => $query }); + $query = $query_start; + $counter = 0; + $append = ''; + } + } + $self->{centstorage}->query({ query => $query }) if ($counter > 0); +} + +sub saveStatsInFile { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($data, $time_id, $liveserviceId,$fh) = @_; + my $query; + my $row; + + while (my ($modBiHostId, $stats) = each %$data) { + my @tab = @$stats; + if ($stats->[0]+$stats->[1]+$stats->[4] == 0) { + next; + } + + #Filling the dump file with data + $row = $modBiHostId."\t".$time_id."\t".$liveserviceId; + for (my $i = 0; $i < scalar(@$stats); $i++) { + $row.= "\t".$stats->[$i] + } + $row .= "\n"; + + #Write row into file + print $fh $row; + $self->{"nbLinesInFile"}+=1; + } +} + +sub getCurrentNbLines{ + my $self = shift; + return $self->{"nbLinesInFile"}; +} + +sub getCommitParam{ + my $self = shift; + return $self->{"commitParam"}; +} +sub setCurrentNbLines{ + my $self = shift; + my $nbLines = shift; + $self->{"nbLinesInFile"} = $nbLines; +} + +sub getHGMonthAvailability { + my ($self, $start, $end, $eventObj) = @_; + my $db = $self->{"centstorage"}; + + $self->{"logger"}->writeLog("DEBUG","[HOST] Calculating availability for hosts"); + my $query = "SELECT h.hg_id, h.hc_id, hc.id as cat_id, hg.id as group_id, ha.liveservice_id, avg(available/(available+unavailable+unreachable)) as av_percent,"; + $query .= " sum(available) as av_time, sum(unavailable) as unav_time, sum(alert_unavailable_opened) as unav_opened, sum(alert_unavailable_closed) as unav_closed,"; + $query .= " sum(alert_unreachable_opened) as unr_opened, sum(alert_unreachable_closed) as unr_closed"; + $query .= " FROM ".$self->{"name"}." ha"; + $query .= " STRAIGHT_JOIN mod_bi_time t ON (t.id = ha.time_id )"; + $query .= " STRAIGHT_JOIN mod_bi_hosts h ON (ha.modbihost_id = h.id)"; + $query .= " STRAIGHT_JOIN mod_bi_hostgroups hg ON (h.hg_name=hg.hg_name AND h.hg_id=hg.hg_id)"; + $query .= " STRAIGHT_JOIN mod_bi_hostcategories hc ON (h.hc_name=hc.hc_name AND h.hc_id=hc.hc_id)"; + $query .= " WHERE t.year = YEAR('".$start."') AND t.month = MONTH('".$start."') and t.hour=0"; + $query .= " GROUP BY h.hg_id, h.hc_id, ha.liveservice_id"; + my $sth = $db->query({ query => $query }); + + $self->{"logger"}->writeLog("DEBUG","[HOST] Calculating MTBF/MTRS/MTBSI for Host"); + my @data = (); + while (my $row = $sth->fetchrow_hashref()) { + my ($totalDownEvents, $totalUnrEvents) = $eventObj->getNbEvents($start, $end, $row->{'hg_id'}, $row->{'hc_id'}, $row->{'liveservice_id'}); + my ($mtrs, $mtbf, $mtbsi) = (undef, undef, undef); + if (defined($totalDownEvents) && $totalDownEvents != 0) { + $mtrs = $row->{'unav_time'}/$totalDownEvents; + $mtbf = $row->{'av_time'}/$totalDownEvents; + $mtbsi = ($row->{'unav_time'}+$row->{'av_time'})/$totalDownEvents; + } + my @tab = ($row->{'group_id'}, $row->{'cat_id'}, $row->{'liveservice_id'}, $row->{'av_percent'}, $row->{'unav_time'}, + $row->{'unav_opened'}, $row->{'unav_closed'}, $row->{'unr_opened'}, $row->{'unr_closed'}, + $totalDownEvents, $totalUnrEvents, $mtrs, $mtbf, $mtbsi); + push @data, \@tab; + } + + return \@data; +} +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/LiveService.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/LiveService.pm new file mode 100644 index 00000000000..79a97c086d6 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/LiveService.pm @@ -0,0 +1,223 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::LiveService; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + + $self->{logger} = shift; + $self->{centstorage} = shift; + if (@_) { + $self->{centreon} = shift; + } + bless $self, $class; + return $self; +} + +sub getLiveServicesByName { + my $self = shift; + my $db = $self->{"centstorage"}; + my $name = shift; + my $interval = shift; + my $query = "SELECT `id`, `name`"; + $query .= " FROM `mod_bi_liveservice`"; + $query .= " WHERE `name` like '".$name."%'"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{ $row->{name} } = $row->{id}; + } + return (\%result); +} + +sub getLiveServicesByTpId { + my $self = shift; + my $db = $self->{"centstorage"}; + my $name = shift; + my $interval = shift; + my $query = "SELECT `id`, `timeperiod_id`"; + $query .= " FROM `mod_bi_liveservice` "; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{'timeperiod_id'}} = $row->{"id"}; + } + return (\%result); +} + +sub getLiveServicesByNameForTpId { + my $self = shift; + my $db = $self->{"centstorage"}; + my $tpId = shift; + my $query = "SELECT `id`, `name`"; + $query .= " FROM `mod_bi_liveservice` "; + $query .= "WHERE timeperiod_id = ".$tpId; + my $sth = $db->query({ query => $query }); + my ($name, $id); + + while (my $row = $sth->fetchrow_hashref()) { + ($name, $id) = ($row->{'name'}, $row->{'id'}); + } + return ($name,$id); +} + +sub getLiveServiceIdsInString { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{'logger'}; + my $ids = shift; + + my $idStr = ""; + + my $query = "SELECT `id`"; + $query .= " FROM mod_bi_liveservice"; + $query .= " WHERE timeperiod_id IN (".$ids.")"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $idStr .= $row->{'id'}.","; + } + $idStr =~ s/\,$//; + return $idStr; +} + +sub getLiveServicesByNameForTpIds { + my $self = shift; + my $db = $self->{"centstorage"}; + my $ids = shift; + + my $idStr = ""; + + foreach my $key (keys %$ids) { + if ($idStr eq "") { + $idStr .= $key; + }else { + $idStr .= ",".$key; + } + } + if ($idStr eq "") { + $self->{logger}->writeLog("ERROR", "Select a timeperiod in the ETL configuration menu"); + } + my $query = "SELECT `id`, `name`"; + $query .= " FROM mod_bi_liveservice"; + $query .= " WHERE timeperiod_id IN (".$idStr.")"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{ $row->{name} } = $row->{id}; + } + return \%result; +} + +sub getTimeperiodName { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $id = shift; + my $query = "SELECT name FROM mod_bi_liveservice WHERE timeperiod_id=".$id; + my $sth = $db->query({ query => $query }); + my $name = ""; + if (my $row = $sth->fetchrow_hashref()) { + $name = $row->{'name'}; + } + return($name); +} + +sub getTimeperiodId { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $name = shift; + my $query = "SELECT timeperiod_id FROM mod_bi_liveservice WHERE name='".$name."'"; + my $sth = $db->query({ query => $query }); + my $id = 0; + if (my $row = $sth->fetchrow_hashref()) { + $id = $row->{'timeperiod_id'}; + } + return($id); +} + +sub insert { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $name = shift; + my $id = shift; + my $query = "INSERT INTO `mod_bi_liveservice` (`name`, `timeperiod_id`) VALUES ('".$name."', ".$id.")"; + my $sth = $db->query({ query => $query }); +} + +sub insertList { + my $self = shift; + my $db = $self->{"centstorage"}; + my $list = shift; + + while (my ($id, $name) = each %$list) { + my $tpName = $self->getTimeperiodName($id); + my $tpId = $self->getTimeperiodId($name); + if ($tpName ne "" && $name ne $tpName) { + $self->updateById($id, $name); + }elsif ($tpId > 0 && $tpId != $id) { + $self->update($name, $id); + }elsif ($tpId == 0 && $tpName eq "") { + $self->insert($name, $id); + } + } +} + +sub update { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $name = shift; + my $id = shift; + my $query = "UPDATE `mod_bi_liveservice` SET `timeperiod_id`=".$id." WHERE name='".$name."'"; + $db->query({ query => $query }); +} + +sub updateById { + my $self = shift; + my $db = $self->{"centstorage"}; + + my ($id, $name) = (shift, shift); + my $query = "UPDATE `mod_bi_liveservice` SET `name`='".$name."' WHERE timeperiod_id=".$id; + $db->query({ query => $query }); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `mod_bi_liveservice`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `mod_bi_liveservice` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/Loader.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Loader.pm new file mode 100644 index 00000000000..6f4c7b421c0 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Loader.pm @@ -0,0 +1,121 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; +use POSIX; + +package gorgone::modules::centreon::mbi::libs::bi::Loader; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{'tempFolder'} = "/tmp/"; + bless $self, $class; + return $self; +} + +sub setStorageDir { + my $self = shift; + my $logger = $self->{'logger'}; + my $tempFolder = shift; + if (!defined($tempFolder)) { + $logger->writeLog("ERROR", "Temporary storage folder is not defined"); + } + if (! -d $tempFolder && ! -w $tempFolder) { + $logger->writeLog("ERROR", "Cannot write into directory ".$tempFolder); + } + if ($tempFolder !~ /\/$/) { + $tempFolder .= "/"; + } + $self->{'tempFolder'} = $tempFolder; +} +sub getStorageDir { + my $self = shift; + return $self->{'tempFolder'}; +} +sub loadData { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($tableName, $inFile) = (shift, shift); + my $query = "LOAD DATA LOCAL INFILE '".$inFile."' INTO TABLE `".$tableName."` CHARACTER SET UTF8 IGNORE 1 LINES"; + my $sth = $db->query({ query => $query }); +} +sub disableKeys { + my $self = shift; + my $db = $self->{"centstorage"}; + my $tableName = shift; + my $query = "ALTER TABLE `".$tableName."` DISABLE KEYS"; + my $sth = $db->query({ query => $query }); +} + +sub enableKeys { + my $self = shift; + my $db = $self->{"centstorage"}; + my $tableName = shift; + my $query = "ALTER TABLE `".$tableName."` ENABLE KEYS"; + my $sth = $db->query({ query => $query }); +} + +sub dumpTableStructure { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{'logger'}; + my ($tableName) = (shift); + + my $sql = ""; + my $sth = $db->query({ query => "SHOW CREATE TABLE ".$tableName }); + if (my $row = $sth->fetchrow_hashref()) { + $sql = $row->{'Create Table'}; + }else { + $logger->writeLog("WARNING", "Cannot get structure for table : ".$tableName); + return (undef); + } + $sth->finish; + return ($sql); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $tableName = shift; + my $query = "TRUNCATE TABLE `".$tableName."`"; + my $sth = $db->query({ query => $query }); +} +sub dropTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $tableName = shift; + my $query = "DROP TABLE IF EXISTS `".$tableName."`"; + my $sth = $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricCentileValue.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricCentileValue.pm new file mode 100644 index 00000000000..a4348a7ce1c --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricCentileValue.pm @@ -0,0 +1,182 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::MetricCentileValue; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centstorage: Instance of centreonDB class for connection to Centreon database +# $centreon: Instance of centreonDB class for connection to Centstorage database +sub new { + my ($class, %options) = (shift, @_); + my $self = {}; + $self->{logger} = $options{logger}; + $self->{centstorage} = $options{centstorage}; + $self->{centreon} = $options{centreon}; + $self->{time} = $options{time}; + $self->{centileProperties} = $options{centileProperties}; + $self->{timePeriod} = $options{timePeriod}; + $self->{liveService} = $options{liveService}; + + $self->{today_servicemetrics} = "mod_bi_tmp_today_servicemetrics"; #BIMetric -> createTodayTable + + #Daily values + $self->{name} = "mod_bi_metriccentiledailyvalue"; + + #Week values + $self->{name_week} = "mod_bi_metriccentileweeklyvalue"; + + #Month values + $self->{name_month} = "mod_bi_metriccentilemonthlyvalue"; + + $self->{timeColumn} = "time_id"; + bless $self, $class; + return $self; +} + +#getName($granularity) : "month","week" +sub getName { + my $self = shift; + my $granularity = shift; + my $name = $self->{name}; + + if (defined($granularity) && ($granularity eq "month" || $granularity eq "week")) { + my $key = 'name_' . $granularity; + $name = $self->{$key}; + } + return $name; +} + +sub getTmpName { + my ($self, $granularity) = @_; + my $name = $self->{tmp_name}; + if (defined $granularity && ($granularity eq "month" || $granularity eq "week")) { + my $key = 'tmp_name_' . $granularity; + $name = $self->{$key}; + } + + return $name; +} + +sub getTimeColumn { + my $self = shift; + + return $self->{timeColumn}; +} + +sub getMetricsCentile { + my ($self, %options) = @_; + + my $results = {}; + my $centileServiceCategories = $options{etlProperties}->{'centile.include.servicecategories'}; + my $query = 'SELECT id, metric_id FROM ' . $self->{today_servicemetrics} . ' sm ' . + ' WHERE sm.sc_id IN (' . $centileServiceCategories . ')'; + my $sth = $self->{centstorage}->query({ query => $query }); + while (my $row = $sth->fetchrow_arrayref()) { + $results->{$$row[1]} = [] if (!defined($results->{$$row[1]})); + push @{$results->{$$row[1]}}, $$row[0]; + } + + return $results; +} + +sub getTimePeriodQuery { + my ($self, %options) = @_; + + my $subQuery = ''; + # Get the time period to apply to each days of the period given in parameter + my $totalDays = $self->{time}->getTotalDaysInPeriod($options{start}, $options{end}) + 1; # +1 because geTotalDaysInPeriod return the number of day between start 00:00 and end 00:00 + my $counter = 1; + my $currentStart = $options{start}; + my $append = ''; + while ($counter <= $totalDays) { + my $rangeDay = $self->{timePeriod}->getTimeRangesForDayByDateTime($options{liveServiceName}, $currentStart, $self->{time}->getDayOfWeek($currentStart)); + if (scalar($rangeDay)) { + my @tabPeriod = @$rangeDay; + my ($start_date, $end_date); + my $tabSize = scalar(@tabPeriod); + for (my $count = 0; $count < $tabSize; $count++) { + my $range = $tabPeriod[$count]; + if ($count == 0) { + $start_date = $range->[0]; + } + if ($count == $tabSize - 1) { + $end_date = $range->[1]; + } + $subQuery .= $append . "(ctime >= UNIX_TIMESTAMP(" . ($range->[0]) . ") AND ctime < UNIX_TIMESTAMP(" . ($range->[1]) . "))"; + $append = ' OR '; + } + } + $currentStart = $self->{time}->addDateInterval($currentStart, 1, "DAY"); + $counter++; + } + + return $subQuery; +} + +sub calcMetricsCentileValueMultipleDays { + my ($self, %options) = @_; + + my $centileParam = $self->{centileProperties}->getCentileParams(); + foreach (@$centileParam) { + my ($centile, $timeperiodId) = ($_->{centile_param}, $_->{timeperiod_id}); + my ($liveServiceName, $liveServiceId) = $self->{liveService}->getLiveServicesByNameForTpId($timeperiodId); + + #Get Id for the couple centile / timeperiod + my $centileId; + my $query = "SELECT id FROM mod_bi_centiles WHERE centile_param = " . $centile . " AND liveservice_id = (SELECT id FROM mod_bi_liveservice WHERE timeperiod_id = " . $timeperiodId . ")"; + my $sth = $self->{centstorage}->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + if (defined($row->{id})) { + $centileId = $row->{id}; + } + } + + next if (!defined($centileId)); + + my $total = scalar(keys %{$options{metricsId}}); + $self->{logger}->writeLog("INFO", "Processing " . $options{granularity} . " for Centile: [" . $options{start} . "] to [" . $options{end} . "] - " . $liveServiceName . " - " . $centile . ' (' . $total . ' metrics)'); + my $sub_query_timeperiod = $self->getTimePeriodQuery(start => $options{start}, end => $options{end}, liveServiceName => $liveServiceName); + $query = 'SELECT value FROM (SELECT value, @counter := @counter + 1 AS counter FROM (select @counter := 0) AS initvar, data_bin WHERE id_metric = ? AND (' . $sub_query_timeperiod . ') ORDER BY value ASC) AS X where counter = ceil(' . $centile . ' * @counter / 100)'; + my $sth_centile = $self->{centstorage}->prepare($query); + my $current = 1; + foreach my $metricId (keys %{$options{metricsId}}) { + $self->{logger}->writeLog("DEBUG", "Processing metric id for Centile: " . $metricId . " ($current/$total)"); + $sth_centile->execute($metricId); + my $row = $sth_centile->fetchrow_arrayref(); + $current++; + next if (!defined($row)); + + foreach (@{$options{metricsId}->{$metricId}}) { + my $query_insert = 'INSERT INTO ' . $self->getName($options{granularity}) . + '(servicemetric_id, time_id, liveservice_id, centile_value, centile_param, centile_id, total, warning_treshold, critical_treshold)' . + "SELECT '" . $_ . "', '" . $options{timeId} . "', '" . $liveServiceId . "', '" . $$row[0] . "', '" . $centile . "', '" . $centileId . "', " . + 'm.max, m.warn, m.crit FROM metrics m WHERE m.metric_id = ' . $metricId; + $self->{centstorage}->query({ query => $query_insert }); + } + } + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricDailyValue.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricDailyValue.pm new file mode 100644 index 00000000000..55c2def7b69 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricDailyValue.pm @@ -0,0 +1,146 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::MetricDailyValue; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centstorage} = shift; + + $self->{name_minmaxavg_tmp} = 'mod_bi_tmp_minmaxavgvalue'; + $self->{name_firstlast_tmp} = 'mod_bi_tmp_firstlastvalues'; + if (@_) { + $self->{name_minmaxavg_tmp} .= $_[0]; + $self->{name_firstlast_tmp} .= $_[0]; + } + + $self->{today_servicemetrics} = "mod_bi_tmp_today_servicemetrics"; + $self->{name} = "mod_bi_metricdailyvalue"; + $self->{timeColumn} = "time_id"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub dropTempTables { + my $self = shift; + my $db = $self->{"centstorage"}; + my $query = "DROP TABLE `" . $self->{name_minmaxavg_tmp} . "`"; + $db->query({ query => $query }); + $query = "DROP TABLE `" . $self->{name_firstlast_tmp} . "`"; + $db->query({ query => $query }); +} + +sub insertValues { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $liveServiceId = shift; + my $timeId = shift; + + my $query = "INSERT INTO " . $self->{"name"}; + $query .= " SELECT sm.id as servicemetric_id, '".$timeId."', ".$liveServiceId." as liveservice_id,"; + $query .= " mmavt.avg_value, mmavt.min_value, mmavt.max_value, flvt.`first_value`, flvt.`last_value`, m.max,"; + $query .= " m.warn, m.crit"; + $query .= " FROM " . $self->{name_minmaxavg_tmp} . " mmavt"; + $query .= " JOIN (metrics m, " . $self->{'today_servicemetrics'} . " sm)"; + $query .= " ON (mmavt.id_metric = m.metric_id and mmavt.id_metric = sm.metric_id)"; + $query .= " LEFT JOIN " . $self->{name_firstlast_tmp} . " flvt ON (mmavt.id_metric = flvt.id_metric)"; + $db->query({ query => $query }); + + $self->dropTempTables(); +} + +sub getMetricCapacityValuesOnPeriod { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($start_time_id, $end_time_id, $etlProperties) = @_; + + my $query = " SELECT servicemetric_id, liveservice_id, "; + $query .= " `first_value`, total"; + $query .= " FROM mod_bi_liveservice l, mod_bi_servicemetrics m, ".$self->{"name"}." v"; + $query .= " WHERE timeperiod_id IN (".$etlProperties->{'capacity.include.liveservices'}.")"; + $query .= " AND l.id = v.liveservice_id"; + $query .= " AND time_id = ".$start_time_id; + if (defined($etlProperties->{'capacity.exclude.metrics'}) && $etlProperties->{'capacity.exclude.metrics'} ne "") { + $query .= " AND metric_name NOT IN (".$etlProperties->{'capacity.exclude.metrics'}.")"; + } + $query .= " AND sc_id IN (".$etlProperties->{'capacity.include.servicecategories'}.")"; + $query .= " AND v.servicemetric_id = m.id"; + $query .= " GROUP BY servicemetric_id, liveservice_id"; + my $sth = $db->query({ query => $query }); + my %data = (); + while (my $row = $sth->fetchrow_hashref()) { + my @table = ($row->{"servicemetric_id"}, $row->{"liveservice_id"}, $row->{first_value}, $row->{"total"}); + $data{$row->{"servicemetric_id"}.";".$row->{"liveservice_id"}} = \@table; + } + + $query = " SELECT servicemetric_id, liveservice_id, "; + $query .= "`last_value`, total"; + $query .= " FROM mod_bi_liveservice l, mod_bi_servicemetrics m, ".$self->{"name"}." v"; + $query .= " WHERE timeperiod_id IN (".$etlProperties->{'capacity.include.liveservices'}.")"; + $query .= " AND l.id = v.liveservice_id"; + $query .= " AND time_id = ".$end_time_id; + if (defined($etlProperties->{'capacity.exclude.metrics'}) && $etlProperties->{'capacity.exclude.metrics'} ne "") { + $query .= " AND metric_name NOT IN (".$etlProperties->{'capacity.exclude.metrics'}.")"; + } + $query .= " AND sc_id IN (".$etlProperties->{'capacity.include.servicecategories'}.")"; + $query .= " AND v.servicemetric_id = m.id"; + $query .= " GROUP BY servicemetric_id, liveservice_id"; + + $sth = $db->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + my $entry = $data{$row->{servicemetric_id} . ';' . $row->{liveservice_id}}; + if (defined($entry)) { + $entry->[4] = $row->{last_value}; + $entry->[5] = $row->{total}; + } else { + my @table; + $table[0] = $row->{servicemetric_id}; + $table[1] = $row->{liveservice_id}; + $table[4] = $row->{last_value}; + $table[5] = $row->{total}; + $data{$row->{servicemetric_id} . ';' . $row->{liveservice_id}} = \@table; + } + } + return \%data; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricHourlyValue.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricHourlyValue.pm new file mode 100644 index 00000000000..a28483544d6 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricHourlyValue.pm @@ -0,0 +1,72 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::MetricHourlyValue; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centstorage} = shift; + + $self->{name_minmaxavg_tmp} = 'mod_bi_tmp_minmaxavgvalue'; + if (@_) { + $self->{name_minmaxavg_tmp} .= $_[0]; + } + + $self->{servicemetrics} = "mod_bi_tmp_today_servicemetrics"; + $self->{name} = "mod_bi_metrichourlyvalue"; + $self->{timeColumn} = "time_id"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub insertValues { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $query = "INSERT INTO ".$self->{"name"}; + $query .= " SELECT sm.id as servicemetric_id, t.id as time_id, mmavt.avg_value, mmavt.min_value, mmavt.max_value, m.max , m.warn, m.crit"; + $query .= " FROM " . $self->{name_minmaxavg_tmp} . " mmavt"; + $query .= " JOIN (metrics m, " . $self->{servicemetrics} . " sm, mod_bi_time t)"; + $query .= " ON (mmavt.id_metric = m.metric_id and mmavt.id_metric = sm.metric_id AND mmavt.valueTime = t.dtime)"; + $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricMonthCapacity.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricMonthCapacity.pm new file mode 100644 index 00000000000..9740d0c69f4 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricMonthCapacity.pm @@ -0,0 +1,89 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::MetricMonthCapacity; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"name"} = "mod_bi_metricmonthcapacity"; + $self->{"timeColumn"} = "time_id"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub insertStats { + my $self = shift; + my $db = $self->{centstorage}; + my ($time_id, $data) = @_; + my $insertParam = 5000; + + my $query_start = "INSERT INTO `" . $self->{name} . "`". + "(`time_id`, `servicemetric_id`, `liveservice_id`,". + " `first_value`, `first_total`, `last_value`, `last_total`)". + " VALUES "; + my $counter = 0; + my $query = $query_start; + my $append = ''; + + while (my ($key, $entry) = each %$data) { + $query .= $append . "($time_id"; + + for (my $i = 0; $i <= 5; $i++) { + $query .= ', ' . (defined($entry->[$i]) ? $entry->[$i] : 'NULL'); + } + $query .= ')'; + + $append = ','; + $counter++; + if ($counter >= $insertParam) { + $db->query({ query => $query }); + $query = $query_start; + $counter = 0; + $append = ''; + } + } + $db->query({ query => $query }) if ($counter > 0); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/MySQLTables.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MySQLTables.pm new file mode 100644 index 00000000000..05638b625dc --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MySQLTables.pm @@ -0,0 +1,307 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::libs::bi::MySQLTables; + +use strict; +use warnings; +use POSIX; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + + $self->{logger} = shift; + $self->{centstorage} = shift; + if (@_) { + $self->{centreon} = shift; + } + bless $self, $class; + return $self; +} + +sub tableExists { + my $self = shift; + + my ($name) = (shift); + my $statement = $self->{centstorage}->query({ query => "SHOW TABLES LIKE '".$name."'" }); + + if (!(my @row = $statement->fetchrow_array())) { + return 0; + } else { + return 1; + } +} + +sub createTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($name, $structure, $mode) = @_; + my $statement = $db->query({ query => "SHOW TABLES LIKE '".$name."'" }); + if (!$self->tableExists($name)) { + if (defined($structure)) { + $logger->writeLog("DEBUG", "[CREATE] table [".$name."]"); + $db->query({ query => $structure }); + return 0; + }else { + $logger->writeLog("FATAL", "[CREATE] Cannot find table [".$name."] structure"); + } + } + return 1; +} + +sub dumpTableStructure { + my $self = shift; + my ($tableName) = (shift); + + my $sql = ""; + my $sth = $self->{centstorage}->query({ query => "SHOW CREATE TABLE " . $tableName }); + if (my $row = $sth->fetchrow_hashref()) { + $sql = $row->{'Create Table'}; + $sql =~ s/(CONSTRAINT.*\n)//g; + $sql =~ s/(\,\n\s+\))/\)/g; + }else { + die "Cannot get structure for table : ".$tableName; + } + return ($sql); +} + +# create table data_bin with partitions +sub createParts { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($start, $end, $tableStructure, $tableName, $column) = @_; + if (!defined($tableStructure)) { + $logger->writeLog("FATAL", "[CREATE] Cannot find table [".$tableName."] structure"); + } + if ($self->tableExists($tableName)) { + return 1; + } + $tableStructure =~ s/\n.*PARTITION.*//g; + $tableStructure =~ s/\,[\n\s]+\)/\)/; + $tableStructure .= " PARTITION BY RANGE(`".$column."`) ("; + my $timeObj = Time->new($logger,$db); + my $runningStart = $timeObj->addDateInterval($start, 1, "DAY"); + while ($timeObj->compareDates($end, $runningStart) > 0) { + my @partName = split (/\-/, $runningStart); + $tableStructure .= "PARTITION p" . $partName[0] . $partName[1] . $partName[2] . " VALUES LESS THAN (FLOOR(UNIX_TIMESTAMP('".$runningStart."'))),"; + $runningStart= $timeObj->addDateInterval($runningStart, 1, "DAY"); + } + my @partName = split (/\-/, $runningStart); + $tableStructure .= "PARTITION p".$partName[0].$partName[1].$partName[2]." VALUES LESS THAN (FLOOR(UNIX_TIMESTAMP('".$runningStart."'))));"; + $logger->writeLog("DEBUG", "[CREATE] table partitionned [".$tableName."] min value: ".$start.", max value: ".$runningStart.", range: 1 DAY\n"); + $db->query({ query => $tableStructure }); + return 0; +} + +sub updateParts { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($rangeEnd, $tableName) = @_; + my $timeObj = Time->new($logger,$db); + + my $isPartitioned = $self->isTablePartitioned($tableName); + if (!$isPartitioned) { + $logger->writeLog("WARNING", "[UPDATE PARTS] partitioning is not activated for table [".$tableName."]"); + } else { + my $range = $self->getLastPartRange($tableName); + $range = $timeObj->addDateInterval($range, 1, "DAY"); + while ($timeObj->compareDates($rangeEnd, $range) >= 0) { + $logger->writeLog("DEBUG", "[UPDATE PARTS] Updating partitions for table [".$tableName."] (last range : ".$range.")"); + my @partName = split (/\-/, $range); + my $query = "ALTER TABLE `".$tableName."` ADD PARTITION (PARTITION `p".$partName[0].$partName[1].$partName[2]."` VALUES LESS THAN(FLOOR(UNIX_TIMESTAMP('".$range."'))))"; + $db->query({ query => $query }); + $range = $timeObj->addDateInterval($range, 1, "DAY"); + } + } +} + +sub isTablePartitioned { + my $self = shift; + my $tableName = shift; + my $db = $self->{"centstorage"}; + + my $sth = $db->query({ query => "SHOW TABLE STATUS LIKE '".$tableName."'" }); + if (my $row = $sth->fetchrow_hashref()) { + my $createOptions = $row->{"Create_options"}; + if (defined($createOptions) && $createOptions =~ m/partitioned/i) { + return 1; + } elsif (!defined($createOptions) || $createOptions !~ m/partitioned/i) { + return 0; + } + } + die "[TABLE STATUS CHECK] Cannot check if table is partitioned [".$tableName."]"; +} + +sub getLastPartRange { + my $self = shift; + my $tableName = shift; + + my $query = "SHOW CREATE TABLE $tableName"; + + my $partName; + my $sth = $self->{centstorage}->query({ query => $query }); + if (my $row = $sth->fetchrow_hashref()) { + while ($row->{'Create Table'} =~ /PARTITION.*?p(\d{4})(\d{2})(\d{2}).*?VALUES LESS THAN \([0-9]+?\)/g) { + $partName = "$1-$2-$3"; + } + } + + if (!defined($partName)) { + die "[UPDATE PARTS] Cannot find table [data_bin] in database"; + } + + return $partName; +} + +sub deleteEntriesForRebuild { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($start, $end, $tableName) = @_; + + if (!$self->isTablePartitioned($tableName)) { + $db->query({ query => "DELETE FROM ".$tableName." WHERE time_id >= UNIX_TIMESTAMP('".$start."') AND time_id < UNIX_TIMESTAMP('".$end."')" }); + } else { + my $query = "SELECT partition_name FROM information_schema.partitions "; + $query .= "WHERE table_name='".$tableName."' AND table_schema='".$db->db."'"; + $query .= " AND CONVERT(PARTITION_DESCRIPTION, SIGNED INTEGER) > UNIX_TIMESTAMP('".$start."')"; + $query .= " AND CONVERT(PARTITION_DESCRIPTION, SIGNED INTEGER) <= UNIX_TIMESTAMP('".$end."')"; + my $sth = $db->query({ query => $query }); + while(my $row = $sth->fetchrow_hashref()) { + $db->query({ query => "ALTER TABLE ".$tableName." TRUNCATE PARTITION ".$row->{'partition_name'} }); + } + $self->updateParts($end, $tableName); + } +} + +sub emptyTableForRebuild { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $tableName = shift; + my $structure = shift; + my $column = shift; + + $structure =~ s/KEY.*\(\`$column\`\)\,//g; + $structure =~ s/KEY.*\(\`$column\`\)//g; + $structure =~ s/\,[\n\s+]+\)/\n\)/g; + if (!defined($_[0]) || !$self->isPartitionEnabled()) { + $db->query({ query => "DROP TABLE IF EXISTS ".$tableName }); + $db->query({ query => $structure }); + } else { + my ($start, $end) = @_; + $db->query({ query => "DROP TABLE IF EXISTS ".$tableName }); + $self->createParts($start, $end, $structure, $tableName, $column); + } + $db->query({ query => "ALTER TABLE `".$tableName."` ADD INDEX `idx_".$tableName."_".$column."` (`".$column."`)" }); +} + +sub dailyPurge { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($retentionDate, $tableName, $column) = @_; + if (!$self->isTablePartitioned($tableName)) { + $db->query({ query => "DELETE FROM `".$tableName."` WHERE ".$column." < UNIX_TIMESTAMP('".$retentionDate."')" }); + } else { + my $query = "SELECT GROUP_CONCAT(partition_name SEPARATOR ',') as partition_names FROM information_schema.partitions "; + $query .= "WHERE table_name='".$tableName."' AND table_schema='".$db->db."'"; + $query .= " AND CONVERT(PARTITION_DESCRIPTION, SIGNED INTEGER) < UNIX_TIMESTAMP('".$retentionDate."')"; + my $sth = $db->query({ query => $query }); + if(my $row = $sth->fetchrow_hashref()) { + if (defined($row->{'partition_names'}) && $row->{'partition_names'} ne "") { + $db->query({ query => "ALTER TABLE ".$tableName." DROP PARTITION ".$row->{'partition_names'} }); + } + } + } +} + +sub checkPartitionContinuity { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($table) = @_; + my $message = ""; + my $query = "select CONVERT(1+datediff(curdate(),(select from_unixtime(PARTITION_DESCRIPTION) from information_schema.partitions"; + $query .= " where table_schema = '".$db->{"db"}."' and table_name = '".$table."' and PARTITION_ORDINAL_POSITION=1)), SIGNED INTEGER) as nbDays,"; + $query .= " CONVERT(PARTITION_ORDINAL_POSITION, SIGNED INTEGER) as ordinalPosition "; + $query .= " from information_schema.partitions where table_schema = '".$db->{"db"}."' and table_name = '".$table."' order by PARTITION_ORDINAL_POSITION desc limit 1 "; + my $sth = $db->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + my $nbDays = int($row->{'nbDays'}); + my $ordinalPosition = int($row->{'ordinalPosition'}); + my $dif = int($nbDays - $ordinalPosition); + if($dif > 0){ + $message .= "[".$table.", last partition:".$self->checkLastTablePartition($table)." missing ".$dif." part.]"; + } + } + $sth->finish; + return($message); +} + +sub checkLastTablePartition{ + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($table) = @_; + my $message = ""; + my $query = "select from_unixtime(PARTITION_DESCRIPTION) as last_partition, IF(from_unixtime(PARTITION_DESCRIPTION)=CURDATE() AND HOUR(from_unixtime(PARTITION_DESCRIPTION))=0,1,0) as partition_uptodate "; + $query .="from information_schema.partitions where table_schema = '".$db->{"db"}."'"; + $query .= "and table_name = '".$table."'order by PARTITION_ORDINAL_POSITION desc limit 1"; + my $sth = $db->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + if($row->{'partition_uptodate'} == 0){ + $message = $row->{'last_partition'}; + } + } + $sth->finish; + return($message); +} + +sub dropIndexesFromReportingTable { + my $self = shift; + my $table = shift; + my $db = $self->{"centstorage"}; + my $indexes = $db->query({ query => "SHOW INDEX FROM ".$table }); + my $previous = ""; + while (my $row = $indexes->fetchrow_hashref()) { + if ($row->{"Key_name"} ne $previous) { + if (lc($row->{"Key_name"}) eq lc("PRIMARY")) { + $db->query({ query => "ALTER TABLE `".$table."` DROP PRIMARY KEY" }); + } else { + $db->query({ query => "ALTER TABLE `".$table."` DROP INDEX ".$row->{"Key_name"} }); + } + } + $previous = $row->{"Key_name"}; + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/ServiceAvailability.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/ServiceAvailability.pm new file mode 100644 index 00000000000..dee44a610b3 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/ServiceAvailability.pm @@ -0,0 +1,237 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::ServiceAvailability; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"name"} = "mod_bi_serviceavailability"; + $self->{"timeColumn"} = "time_id"; + $self->{"nbLinesInFile"} = 0; + $self->{"commitParam"} = 500000; + bless $self, $class; + return $self; +} + +sub getName { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub saveStatsInFile { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($data, $time_id, $liveserviceId,$fh) = @_; + my $query; + my $row; + + while (my ($modBiServiceId, $stats) = each %$data) { + my @tab = @$stats; + if ($stats->[0]+$stats->[1]+$stats->[2] == 0) { + next; + } + + #Filling the dump file with data + $row = $modBiServiceId."\t".$time_id."\t".$liveserviceId; + for (my $i = 0; $i < scalar(@$stats); $i++) { + $row.= "\t".$stats->[$i] + } + $row .= "\n"; + + #Write row into file + print $fh $row; + $self->{"nbLinesInFile"}++; + } +} + +sub insertStats { + my $self = shift; + my ($data, $time_id, $liveserviceId) = @_; + my $insertParam = 10000; + my $query_start = "INSERT INTO `" . $self->{name} . "`". + " (`modbiservice_id`, `time_id`, `liveservice_id`, `available`, ". + " `unavailable`, `degraded`, `alert_unavailable_opened`, `alert_unavailable_closed`, ". + " `alert_degraded_opened`, `alert_degraded_closed`, ". + " `alert_other_opened`, `alert_other_closed`)". + " VALUES "; + + #available+unvailable+alert_unavailable_closed + + my $counter = 0; + my $query = $query_start; + my $append = ''; + while (my ($modBiServiceId, $stats) = each %$data) { + my @tab = @$stats; + if ($stats->[0] + $stats->[1] + $stats->[2] == 0) { + next; + } + + $query .= $append . "($modBiServiceId, $time_id, $liveserviceId"; + for (my $i = 0; $i < scalar(@$stats); $i++) { + $query .= ', ' . $stats->[$i]; + } + $query .= ')'; + $append = ','; + $counter++; + + if ($counter >= $insertParam) { + $self->{centstorage}->query({ query => $query }); + $query = $query_start; + $counter = 0; + $append = ''; + } + } + + $self->{centstorage}->query({ query => $query }) if ($counter > 0); +} + +sub getCurrentNbLines { + my $self = shift; + return $self->{"nbLinesInFile"}; +} + +sub getCommitParam { + my $self = shift; + return $self->{"commitParam"}; +} + +sub setCurrentNbLines { + my $self = shift; + my $nbLines = shift; + $self->{"nbLinesInFile"} = $nbLines; +} + +sub getHGMonthAvailability { + my ($self, $start, $end, $eventObj) = @_; + my $db = $self->{"centstorage"}; + + my $query = "SELECT s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id,"; + $query .= " hc.id as hcat_id, hg.id as group_id, sc.id as scat_id,"; + $query .= " avg((available+degraded)/(available+unavailable+degraded)) as av_percent,"; + $query .= " sum(available) as av_time, sum(unavailable) as unav_time, sum(degraded) as degraded_time,"; + $query .= " sum(alert_unavailable_opened) as unav_opened,sum(alert_unavailable_closed) as unav_closed,"; + $query .= " sum(alert_degraded_opened) as deg_opened,sum(alert_degraded_closed) as deg_closed,"; + $query .= " sum(alert_other_opened) as other_opened,sum(alert_other_closed) as other_closed "; + $query .= " FROM ".$self->{'name'}." sa"; + $query .= " STRAIGHT_JOIN mod_bi_time t ON (t.id = sa.time_id )"; + $query .= " STRAIGHT_JOIN mod_bi_services s ON (sa.modbiservice_id = s.id)"; + $query .= " STRAIGHT_JOIN mod_bi_hostgroups hg ON (s.hg_name=hg.hg_name AND s.hg_id=hg.hg_id)"; + $query .= " STRAIGHT_JOIN mod_bi_hostcategories hc ON (s.hc_name=hc.hc_name AND s.hc_id=hc.hc_id)"; + $query .= " STRAIGHT_JOIN mod_bi_servicecategories sc ON (s.sc_id=sc.sc_id AND s.sc_name=sc.sc_name)"; + $query .= " WHERE t.year = YEAR('".$start."') AND t.month = MONTH('".$start."') and t.hour=0"; + $query .= " GROUP BY s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id"; + my $sth = $db->query({ query => $query }); + + my @data = (); + while (my $row = $sth->fetchrow_hashref()) { + my ($totalwarnEvents, $totalCritEvents, $totalOtherEvents) = $eventObj->getNbEvents($start, $end, $row->{'hg_id'}, $row->{'hc_id'}, $row->{'sc_id'}, $row->{'liveservice_id'}); + + my ($mtrs, $mtbf, $mtbsi) = (undef, undef, undef); + if (defined($totalCritEvents) && $totalCritEvents != 0) { + $mtrs = $row->{'unav_time'}/$totalCritEvents; + $mtbf = $row->{'av_time'}/$totalCritEvents; + $mtbsi = ($row->{'unav_time'}+$row->{'av_time'})/$totalCritEvents; + } + my @tab = ($row->{'group_id'}, $row->{'hcat_id'}, $row->{'scat_id'}, $row->{'liveservice_id'}, + $row->{'av_percent'}, $row->{'unav_time'}, $row->{'degraded_time'}, + $row->{'unav_opened'}, $row->{'unav_closed'}, $row->{'deg_opened'}, $row->{'deg_closed'}, $row->{'other_opened'}, $row->{'other_closed'}, + $totalwarnEvents, $totalCritEvents, $totalOtherEvents, $mtrs, $mtbf, $mtbsi); + push @data, \@tab; + } + return \@data; +} + +sub getHGMonthAvailability_optimised { + my ($self, $start, $end, $eventObj) = @_; + my $db = $self->{"centstorage"}; + + my $query = "SELECT * from ( SELECT s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id, hc.id as hcat_id, hg.id as group_id, sc.id as scat_id,"; + $query .= "avg((available+degraded)/(available+unavailable+degraded)) as av_percent, "; + $query .= "sum(available) as av_time, sum(unavailable) as unav_time, sum(degraded) as degraded_time, "; + $query .= "sum(alert_unavailable_opened) as unav_opened,sum(alert_unavailable_closed) as unav_closed, "; + $query .= "sum(alert_degraded_opened) as deg_opened,sum(alert_degraded_closed) as deg_closed, "; + $query .= "sum(alert_other_opened) as other_opened,sum(alert_other_closed) as other_closed "; + $query .= "FROM mod_bi_serviceavailability sa "; + $query .= "STRAIGHT_JOIN mod_bi_services s ON (sa.modbiservice_id = s.id) "; + $query .= "STRAIGHT_JOIN mod_bi_hostgroups hg ON (s.hg_name=hg.hg_name AND s.hg_id=hg.hg_id) "; + $query .= "STRAIGHT_JOIN mod_bi_hostcategories hc ON (s.hc_name=hc.hc_name AND s.hc_id=hc.hc_id) "; + $query .= "STRAIGHT_JOIN mod_bi_servicecategories sc ON (s.sc_id=sc.sc_id AND s.sc_name=sc.sc_name)"; + $query .= " WHERE YEAR(from_unixtime(time_id)) = YEAR('".$start."') AND MONTH(from_unixtime(time_id)) = MONTH('".$start."') and hour(from_unixtime(time_id)) = 0 "; + $query .= "GROUP BY s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id ) availability "; + $query .= "LEFT JOIN ( SELECT s.hg_id,s.hc_id,s.sc_id,e.modbiliveservice_id, "; + $query .= "SUM(IF(state=1,1,0)) as warningEvents, SUM(IF(state=2,1,0)) as criticalEvents, "; + $query .= "SUM(IF(state=3,1,0)) as unknownEvents FROM mod_bi_servicestateevents e "; + $query .= "STRAIGHT_JOIN mod_bi_services s ON (e.modbiservice_id = s.id) "; + $query .= "STRAIGHT_JOIN mod_bi_hostgroups hg ON (s.hg_name=hg.hg_name AND s.hg_id=hg.hg_id) "; + $query .= "STRAIGHT_JOIN mod_bi_hostcategories hc ON (s.hc_name=hc.hc_name AND s.hc_id=hc.hc_id) "; + $query .= "STRAIGHT_JOIN mod_bi_servicecategories sc ON (s.sc_id=sc.sc_id AND s.sc_name=sc.sc_name) "; + $query .= "AND s.id = e.modbiservice_id AND start_time < UNIX_TIMESTAMP('".$end."') "; + $query .= "AND end_time > UNIX_TIMESTAMP('".$start."') AND e.state in (1,2,3) "; + $query .= "GROUP BY s.hg_id, s.hc_id, s.sc_id, e.modbiliveservice_id ) events "; + $query .= "ON availability.hg_id = events.hg_id AND availability.hc_id = events.hc_id "; + $query .= "AND availability.sc_id = events.sc_id "; + $query .= "AND availability.liveservice_id = events.modbiliveservice_id"; + + #Fields returned : + #hg_id | hc_id | sc_id | liveservice_id | hcat_id | group_id | scat_id | av_percent | av_time | unav_time | degraded_time | + #unav_opened | unav_closed | deg_opened | deg_closed | other_opened | other_closed | hg_id | hc_id | sc_id | + #modbiliveservice_id | warningEvents | criticalEvents | unknownEvents + my $sth = $db->query({ query => $query }); + + my @data = (); + while (my $row = $sth->fetchrow_hashref()) { + my ($totalwarnEvents, $totalCritEvents, $totalUnknownEvents) = ($row->{'warningEvents'},$row->{'criticalEvents'},$row->{'unknownEvents'}); + + my ($mtrs, $mtbf, $mtbsi) = (undef, undef, undef); + if (defined($totalCritEvents) && $totalCritEvents != 0) { + $mtrs = $row->{'unav_time'}/$totalCritEvents; + $mtbf = $row->{'av_time'}/$totalCritEvents; + $mtbsi = ($row->{'unav_time'}+$row->{'av_time'})/$totalCritEvents; + } + my @tab = ($row->{'group_id'}, $row->{'hcat_id'}, $row->{'scat_id'}, $row->{'liveservice_id'}, + $row->{'av_percent'}, $row->{'unav_time'}, $row->{'degraded_time'}, + $row->{'unav_opened'}, $row->{'unav_closed'}, $row->{'deg_opened'}, $row->{'deg_closed'}, $row->{'other_opened'}, $row->{'other_closed'}, + $totalwarnEvents, $totalCritEvents, $totalUnknownEvents, $mtrs, $mtbf, $mtbsi); + push @data, \@tab; + } + return \@data; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/Time.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Time.pm new file mode 100644 index 00000000000..e7f8e6dffe1 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Time.pm @@ -0,0 +1,264 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::libs::bi::Time; + +use strict; +use warnings; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centstorage} = shift; + if (@_) { + $self->{centreon} = shift; + } + $self->{insertQuery} = "INSERT IGNORE INTO `mod_bi_time` (id, hour, day, month_label, month, year, week, dayofweek, utime, dtime) VALUES "; + bless $self, $class; + return $self; +} + +sub getEntriesDtime { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($start, $end) = @_; + my $query = "SELECT date_format('%Y-%m-%d', dtime) as dtime"; + $query .= " FROM `mod_bi_time`"; + $query .= " WHERE dtime >= '".$start."' AND dtime <'".$end."'"; + + my $sth = $db->query({ query => $query }); + my @results = (); + if (my $row = $sth->fetchrow_hashref()) { + push @results, $row->{dtime}; + } + $sth->finish(); + return (@results); +} + +sub getEntryID { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $dtime = shift; + my ($interval, $type); + if (@_) { + $interval = shift; + $type = shift; + } + my $query = "SELECT `id`, `utime`, date_format(dtime,'%Y-%m-%d') as dtime"; + $query .= " FROM `mod_bi_time`"; + if (!defined($interval)) { + $query .= " WHERE dtime = '".$dtime."'"; + }else { + $query .= " WHERE dtime = DATE_ADD('".$dtime."', INTERVAL ".$interval." ".$type.")"; + } + my $sth = $db->query({ query => $query }); + my @results = (); + if (my $row = $sth->fetchrow_hashref()) { + $results[0] = $row->{'id'}; + $results[1] = $row->{'utime'}; + } + $sth->finish(); + if (!scalar(@results)) { + $logger->writeLog("ERROR", "Cannot get time ID for date:".$dtime); + } + return (@results); +} + +sub getDayOfWeek { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $date = shift; + + my $sth = $db->query({ query => "SELECT LOWER(DAYNAME('".$date."')) as dayOfWeek" }); + my $dayofweek; + if (my $row = $sth->fetchrow_hashref()) { + $dayofweek = $row->{"dayOfWeek"}; + }else { + $logger->writeLog("ERROR", "TIME: Cannot get day of week for date :".$date); + } + if (!defined($dayofweek)) { + $logger->writeLog("ERROR", "TIME: day of week for date ".$date." is null"); + } + return $dayofweek; +} + +sub getYesterdayTodayDate { + my $self = shift; + + # get yesterday date. date format : YYYY-MM-DD + my $sth = $self->{centstorage}->query({ query => "SELECT CURRENT_DATE() as today, DATE_SUB(CURRENT_DATE(), INTERVAL 1 DAY) as yesterday" }); + + my $yesterday; + my $today; + if (my $row = $sth->fetchrow_hashref()) { + $yesterday = $row->{yesterday}; + $today = $row->{today}; + } else { + $self->{logger}->writeLog('ERROR', "TIME: cannot get yesterday date"); + } + if (!defined($yesterday)) { + $self->{logger}->writeLog('ERROR', "TIME: Yesterday start date is null"); + } + if (!defined($today)) { + $self->{logger}->writeLog('ERROR', "TIME: today start date is null"); + } + return ($yesterday, $today); +} + +sub addDateInterval { + my $self = shift; + my ($date, $interval, $intervalType) = @_; + + # get new date. date format : YYYY-MM-DD + my $sth = $self->{centstorage}->query({ query => "SELECT DATE_ADD('".$date."', INTERVAL ".$interval." ".$intervalType.") as newDate" }); + + my $newDate; + if (my $row = $sth->fetchrow_hashref()) { + $newDate = $row->{newDate}; + } + if (!defined($newDate)) { + $self->{logger}->writeLog('ERROR', "TIME: DATE_ADD('".$date."', INTERVAL ".$interval." ".$intervalType.") returns null value"); + } + return $newDate; +} + +sub compareDates { + my $self = shift; + my ($date1, $date2) = @_; + + my $sth = $self->{centstorage}->query({ query => "SELECT DATEDIFF('".$date1."','".$date2."') as nbDays" }); + if (my $row = $sth->fetchrow_hashref()) { + return $row->{nbDays}; + } + + $self->{logger}->writeLog('ERROR', "TIME: Cannot compare two dates : ".$date1." and ".$date2); +} + +sub insertTimeEntriesForPeriod { + my $self = shift; + my $db = $self->{"centstorage"}; + my ($start, $end) = @_; + + my $interval = $self->getTotalDaysInPeriod($start, $end) * 24; + my $counter = 0; + my $date = "ADDDATE('".$start."',INTERVAL ".$counter." HOUR)"; + my $query_suffix = ""; + while ($counter <= $interval) { + $query_suffix .= "(UNIX_TIMESTAMP(".$date."),"; + $query_suffix .= "HOUR(".$date."),"; + $query_suffix .= "DAYOFMONTH(".$date."),"; + $query_suffix .= "LOWER(DATE_FORMAT(".$date.",'%M')),"; + $query_suffix .= "MONTH(".$date."),"; + $query_suffix .= "YEAR(".$date."),"; + $query_suffix .= "WEEK(".$date.", 3),"; + $query_suffix .= "LOWER(DAYNAME(".$date.")),"; + $query_suffix .= "UNIX_TIMESTAMP(".$date."),"; + $query_suffix .= "".$date."),"; + $counter++; + $date = "ADDDATE('".$start."',INTERVAL ".$counter." HOUR)"; + if ($counter % 30 == 0) { + chop($query_suffix); + $db->query({ query => $self->{insertQuery} . $query_suffix }); + $query_suffix = ""; + } + } + chop($query_suffix); + if ($query_suffix ne "") { + $db->query({ query => $self->{insertQuery} . $query_suffix }); + } +} + +# Delete duplicated entries inserted on winter/summer time change (same timestamp for 02:00 and 03:00) +sub deleteDuplicateEntries { + my $self = shift; + my $db = $self->{"centstorage"}; + my ($start, $end) = @_; + my $query = "SELECT max(id) as id"; + $query .= " FROM mod_bi_time"; + $query .= " WHERE dtime >='".$start."'"; + $query .= " AND dtime <= '".$end."'"; + $query .= " GROUP BY utime"; + $query .= " HAVING COUNT(utime) > 1"; + my $sth = $db->query({ query => $query }); + my $ids_to_delete = ""; + while (my $row = $sth->fetchrow_hashref()) { + $ids_to_delete .= $row->{'id'}.","; + } + if ($ids_to_delete ne "") { + chop ($ids_to_delete); + $db->query({ query => "DELETE FROM mod_bi_time WHERE id IN (".$ids_to_delete.")" }); + } +} + +sub getTotalDaysInPeriod { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($start, $end) = @_; + + my $query = "SELECT DATEDIFF('".$end."', '".$start."') diff"; + my $sth = $db->query({ query => $query }); + my $diff; + if (my $row = $sth->fetchrow_hashref()) { + $diff = $row->{'diff'}; + }else { + $logger->writeLog("ERROR", "TIME : Cannot get difference between period start and end"); + } + if (!defined($diff)){ + $logger->writeLog("ERROR", "TIME : Cannot get difference between period start and end"); + } + if($diff == 0) { + $logger->writeLog("ERROR", "TIME : start date is equal to end date"); + }elsif ($diff < 0) { + $logger->writeLog("ERROR", "TIME : start date is greater than end date"); + } + return $diff; +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `mod_bi_time`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `mod_bi_time` AUTO_INCREMENT=1" }); +} + +sub deleteEntriesForPeriod { + my $self = shift; + my $db = $self->{"centstorage"}; + my ($start, $end) = @_; + + my $query = "DELETE FROM `mod_bi_time` WHERE dtime >= '".$start."' AND dtime < '".$end."'"; + $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/CentileProperties.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/CentileProperties.pm new file mode 100644 index 00000000000..d0d393891c4 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/CentileProperties.pm @@ -0,0 +1,60 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::CentileProperties; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centreon} = shift; + if (@_) { + $self->{centstorage} = shift; + } + bless $self, $class; + return $self; +} + +sub getCentileParams { + my $self = shift; + my $centreon = $self->{centreon}; + my $logger = $self->{logger}; + + my $centileParams = []; + my $query = "SELECT `centile_param`, `timeperiod_id` FROM `mod_bi_options_centiles`"; + my $sth = $centreon->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + if (defined($row->{centile_param}) && $row->{centile_param} ne '0' && defined($row->{timeperiod_id}) && $row->{timeperiod_id} ne '0'){ + push @{$centileParams}, { centile_param => $row->{centile_param}, timeperiod_id => $row->{timeperiod_id} }; + } + } + + return $centileParams; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ETLProperties.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ETLProperties.pm new file mode 100644 index 00000000000..b196b8dd6e5 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ETLProperties.pm @@ -0,0 +1,119 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::ETLProperties; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + + $self->{logger} = shift; + $self->{centreon} = shift; + if (@_) { + $self->{centstorage} = shift; + } + bless $self, $class; + return $self; +} + +# returns two references to two hash tables => hosts indexed by id and hosts indexed by name +sub getProperties { + my $self = shift; + + my $activated = 1; + if (@_) { + $activated = 0; + } + my (%etlProperties, %dataRetention); + + my $query = "SELECT `opt_key`, `opt_value` FROM `mod_bi_options` WHERE `opt_key` like 'etl.%'"; + my $sth = $self->{centreon}->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + if ($row->{opt_key} =~ /etl.retention.(.*)/) { + $dataRetention{$1} = $row->{opt_value}; + } elsif ($row->{opt_key} =~ /etl.list.(.*)/) { + my @tab = split (/,/, $row->{opt_value}); + my %hashtab = (); + foreach(@tab) { + $hashtab{$_} = 1; + } + $etlProperties{$1} = \%hashtab; + } elsif ($row->{opt_key} =~ /etl.(.*)/) { + $etlProperties{$1} = $row->{opt_value}; + } + } + if (defined($etlProperties{'capacity.exclude.metrics'})) { + $etlProperties{'capacity.exclude.metrics'} =~ s/^/\'/; + $etlProperties{'capacity.exclude.metrics'} =~ s/$/\'/; + $etlProperties{'capacity.exclude.metrics'} =~ s/,/\',\'/; + } + + return (\%etlProperties, \%dataRetention); +} + +# returns the max retention period defined by type of statistics, monthly stats are excluded +sub getMaxRetentionPeriodFor { + my $self = shift; + my $logger = $self->{'logger'}; + + my $type = shift; + my $query = "SELECT date_format(NOW(), '%Y-%m-%d') as period_end,"; + $query .= " date_format(DATE_ADD(NOW(), INTERVAL MAX(CAST(`opt_value` as SIGNED INTEGER))*-1 DAY), '%Y-%m-%d') as period_start"; + $query .= " FROM `mod_bi_options` "; + $query .= " WHERE `opt_key` IN ('etl.retention.".$type.".hourly','etl.retention.".$type.".daily', 'etl.retention.".$type.".raw')"; + my $sth = $self->{centreon}->query({ query => $query }); + + if (my $row = $sth->fetchrow_hashref()) { + return ($row->{period_start}, $row->{period_end}); + } + + die 'Cannot get max perfdata retention period. Verify your data retention options'; +} + +# Returns a start and a end date for each retention period +sub getRetentionPeriods { + my $self = shift; + my $logger = $self->{'logger'}; + + my $query = "SELECT date_format(NOW(), '%Y-%m-%d') as period_end,"; + $query .= " date_format(DATE_ADD(NOW(), INTERVAL (`opt_value`)*-1 DAY), '%Y-%m-%d') as period_start,"; + $query .= " opt_key "; + $query .= " FROM `mod_bi_options` "; + $query .= " WHERE `opt_key` like ('etl.retention.%')"; + my $sth = $self->{centreon}->query({ query => $query }); + my %periods = (); + while (my $row = $sth->fetchrow_hashref()) { + $row->{'opt_key'} =~ s/etl.retention.//; + $periods{$row->{'opt_key'}} = { start => $row->{period_start}, end => $row->{period_end}} ; + } + if (!scalar(keys %periods)){ + $logger->writeLog("FATAL", "Cannot retention periods information. Verify your data retention options"); + } + return (\%periods); +} +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Host.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Host.pm new file mode 100644 index 00000000000..6211e306f28 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Host.pm @@ -0,0 +1,381 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::libs::centreon::Host; + +use strict; +use warnings; +use Data::Dumper; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centreon} = shift; + $self->{etlProperties} = undef; + + if (@_) { + $self->{centstorage} = shift; + } + bless $self, $class; + return $self; +} + +#Set the etl properties as a variable of the class +sub setEtlProperties{ + my $self = shift; + $self->{etlProperties} = shift; +} + +# returns two references to two hash tables => hosts indexed by id and hosts indexed by name +sub getAllHosts { + my $self = shift; + my $centreon = $self->{centreon}; + my $activated = 1; + if (@_) { + $activated = 0; + } + my (%host_ids, %host_names); + + my $query = "SELECT `host_id`, `host_name`" . + " FROM `host`" . + " WHERE `host_register`='1'"; + if ($activated == 1) { + $query .= " AND `host_activate` ='1'"; + } + my $sth = $centreon->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + $host_ids{ $row->{host_name} } = $row->{host_id}; + $host_names{ $row->{host_id} } = $row->{host_name}; + } + return (\%host_ids, \%host_names); +} + +# Get all hosts, keys are IDs +sub getAllHostsByID { + my $self = shift; + my ($host_ids, $host_names) = $self->getAllHosts(); + return ($host_ids); +} + +# Get all hosts, keys are names +sub getAllHostsByName { + my $self = shift; + my ($host_ids, $host_names) = $self->getAllHosts(); + return ($host_names); +} + +sub loadAllCategories { + my $self = shift; + + $self->{hc} = {}; + $self->{host_hc_relations} = {}; + my $query = "SELECT hc.hc_id as category_id, hc.hc_name as category_name, host_host_id + FROM hostcategories hc, hostcategories_relation hr + WHERE hc.hc_activate = '1' AND hc.hc_id = hr.hostcategories_hc_id"; + my $sth = $self->{centreon}->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + $self->{hc}->{ $row->{category_id} } = $row->{category_name} if (!defined($self->{hc}->{ $row + ->{category_id} })); + $self->{host_hc_relations}->{ $row->{host_host_id} } = [] if (!defined($self->{host_hc_relations}->{ $row + ->{host_host_id} })); + push @{$self->{host_hc_relations}->{ $row->{host_host_id} }}, $row->{category_id}; + } +} + +sub loadAllHosts { + my $self = shift; + + $self->{hosts} = {}; + $self->{host_htpl_relations} = {}; + my $query = "SELECT h.host_id, h.host_name, host_tpl_id + FROM host h, host_template_relation htr + WHERE h.host_activate = '1' AND h.host_id = htr.host_host_id"; + my $sth = $self->{centreon}->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + $self->{hosts}->{ $row->{host_id} } = $row->{host_name} if (!defined($self->{hosts}->{ $row + ->{host_id} })); + $self->{host_htpl_relations}->{ $row->{host_id} } = [] if (!defined($self->{host_htpl_relations}->{ $row + ->{host_id} })); + push @{$self->{host_htpl_relations}->{ $row->{host_id} }}, $row->{host_tpl_id}; + } +} + +# returns host groups linked to hosts +# all hosts will be stored in a hash table +# each key of the hash table is a host id +# each key is linked to a table containing entries like : "hostgroup_id;hostgroup_name" +sub getHostGroups { + my $self = shift; + my $centreon = $self->{"centreon"}; + my $activated = 1; + my $etlProperties = $self->{'etlProperties'}; + if (@_) { + $activated = 0; + } + my %result = (); + + my $query = "SELECT `host_id`, `host_name`, `hg_id`, `hg_name`" . + " FROM `host`, `hostgroup_relation`, `hostgroup`" . + " WHERE `host_register`='1'" . + " AND `hostgroup_hg_id` = `hg_id`" . + " AND `host_id`= `host_host_id`"; + if ($activated == 1) { + $query .= " AND `host_activate` ='1'"; + } + if (!defined($etlProperties->{'dimension.all.hostgroups'}) && $etlProperties->{'dimension.hostgroups'} ne '') { + $query .= " AND `hg_id` IN (" . $etlProperties->{'dimension.hostgroups'} . ")"; + } + my $sth = $centreon->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + my $new_entry = $row->{"hg_id"} . ";" . $row->{"hg_name"}; + if (defined($result{$row->{"host_id"}})) { + my $tab_ref = $result{$row->{"host_id"}}; + my @tab = @$tab_ref; + my $exists = 0; + foreach (@tab) { + if ($_ eq $new_entry) { + $exists = 1; + last; + } + } + if (!$exists) { + push @tab, $new_entry; + } + $result{$row->{"host_id"}} = \@tab; + } else { + my @tab = ($new_entry); + $result{$row->{"host_id"}} = \@tab; + } + } + $sth->finish(); + return (\%result); +} + +#Fill a class Hash table that contains the relation between host_id and table[hc_id,hc_name] +sub getHostCategoriesWithTemplate { + my $self = shift; + my $centreon = $self->{"centreon"}; + my $activated = 1; + + #Hash : each key of the hash table is a host id + #each key is linked to a table containing entries like : "hc_id,hc_name" + my $hostCategoriesWithTemplate = $self->{'hostCategoriesWithTemplates'}; + if (@_) { + $activated = 0; + } + + my $query = "SELECT `host_id` FROM `host` WHERE `host_activate` ='1' AND `host_register` ='1'"; + + my $sth = $centreon->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + my @tab = (); + my $host_id = $row->{"host_id"}; + $self->getRecursiveCategoriesForOneHost($host_id, \@tab); + $self->getDirectLinkedCategories($host_id, \@tab); + $hostCategoriesWithTemplate->{$row->{"host_id"}} = [@tab]; + undef @tab; + } + $self->{'hostCategoriesWithTemplates'} = $hostCategoriesWithTemplate; + $sth->finish(); +} + +#Get the link between host and categories using direct link hc <> host +sub getDirectLinkedCategories { + my $self = shift; + my $host_id = shift; + my $ref_hostCat = shift; + my $centreon = $self->{"centreon"}; + my $etlProperties = $self->{"etlProperties"}; + my @tab = (); + + my $query = "SELECT `host_id`, `host_name`, `hc_id`, `hc_name`" . + " FROM `host`, `hostcategories_relation`, `hostcategories`" . + " WHERE `host_register`='1'" . + " AND `hostcategories_hc_id` = `hc_id`" . + " AND `host_id`= `host_host_id`" . + " AND `host_id`= " . $host_id . " " . + " AND `host_activate` ='1' AND hostcategories.hc_activate = '1' "; + + if (!defined($etlProperties->{'dimension.all.hostcategories'}) && $etlProperties->{'dimension.hostcategories'} + ne '') { + $query .= " AND `hc_id` IN (" . $etlProperties->{'dimension.hostcategories'} . ")"; + } + + my $sth = $centreon->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + my $new_entry = $row->{"hc_id"} . ";" . $row->{"hc_name"}; + if (!scalar(@$ref_hostCat)) { + @$ref_hostCat = ($new_entry); + } else { + @tab = @$ref_hostCat; + my $exists = 0; + foreach (@$ref_hostCat) { + if ($_ eq $new_entry) { + $exists = 1; + last; + } + } + if (!$exists) { + push @$ref_hostCat, $new_entry; + } + } + } + $sth->finish(); +} + +sub GetHostTemplateAndCategoryForOneHost { + my $self = shift; + my $host_id = shift; + + my $query = << "EOQ"; +SELECT + hhtemplates.host_id, + hhtemplates.host_name, + hhtemplates.template_id, + hhtemplates.template_name, + categories.hc_id as category_id, + categories.hc_activate as hc_activate, + categories.hc_name as category_name +FROM ( + SELECT + hst.host_id, + hst.host_name, + htpls.host_id as template_id, + htpls.host_name as template_name + FROM + host hst + JOIN + host_template_relation hst_htpl_rel + ON + hst.host_id = hst_htpl_rel.host_host_id + JOIN + host htpls + ON + hst_htpl_rel.host_tpl_id = htpls.host_id + WHERE + hst.host_activate ='1' + AND hst.host_id = $host_id +) hhtemplates +LEFT JOIN + hostcategories_relation hcs_rel + ON + hcs_rel.host_host_id = hhtemplates.template_id +LEFT JOIN + hostcategories categories + ON + hcs_rel.hostcategories_hc_id = categories.hc_id +EOQ + + return $self->{centreon}->query({ query => $query }); + +} + +#Get the link between host and categories using templates +sub getRecursiveCategoriesForOneHost { + my $self = shift; + my $host_id = shift; + my $ref_hostCat = shift; + my $etlProperties = $self->{"etlProperties"}; + + #Get all categories linked to the templates associated with the host or just template associated with host to be able to call the method recursively + my $sth = $self->GetHostTemplateAndCategoryForOneHost($host_id); + + my @hostCategoriesAllowed = split /,/, $etlProperties->{'dimension.hostcategories'}; + while (my $row = $sth->fetchrow_hashref()) { + my $new_entry; + my @tab = (); + my $categoryId = $row->{"category_id"}; + my $categoryName = $row->{"category_name"}; + my $categoryActivate = $row->{"hc_activate"}; + + #If current category is in allowed categories in ETL configuration + #add it to the categories link to the host, + #Then check for templates categories recursively + if (defined($categoryId) && defined($categoryName) && $categoryActivate == '1') { + if ((grep {$_ eq $categoryId} @hostCategoriesAllowed) + || (defined($etlProperties->{'dimension.all.hostcategories'}) + && $etlProperties->{'dimension.all.hostcategories'} ne '')) { + $new_entry = $categoryId . ";" . $categoryName; + #If no hostcat has been found for the host, create the line + if (!scalar(@$ref_hostCat)) { + @$ref_hostCat = ($new_entry); + } else { + #If the tab is not empty, check wether the combination already exists in the tab + @tab = @$ref_hostCat; + my $exists = 0; + foreach (@$ref_hostCat) { + if ($_ eq $new_entry) { + $exists = 1; + last; + } + } + #If the host category did not exist, add it to the table @$ref_hostCat + if (!$exists) { + push @$ref_hostCat, $new_entry; + } + } + } + } + $self->getRecursiveCategoriesForOneHost($row->{"template_id"}, $ref_hostCat); + } + $sth->finish(); +} + +sub getHostGroupAndCategories { + my $self = shift; + + my $hostGroups = $self->getHostGroups(); + + $self->loadAllCategories(); + $self->loadAllHosts(); + $self->getHostCategoriesWithTemplate(); + my $hostCategories = $self->{"hostCategoriesWithTemplates"}; + my @results; + + while (my ($hostId, $groups) = each(%$hostGroups)) { + my $categories_ref = $hostCategories->{$hostId}; + my @categoriesTab = (); + if (defined($categories_ref) && scalar(@$categories_ref)) { + @categoriesTab = @$categories_ref; + } + my $hostName = $self->{hosts}->{$hostId}; + foreach (@$groups) { + my $group = $_; + if (scalar(@categoriesTab)) { + foreach (@categoriesTab) { + push @results, $hostId . ';' . $hostName . ';' . $group . ';' . $_; + } + } else { + #If there is no category + push @results, $hostId . ";" . $hostName . ";" . $group . ";0;NoCategory"; + } + } + } + + return \@results; +} + +1; \ No newline at end of file diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostCategory.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostCategory.pm new file mode 100644 index 00000000000..8751350f763 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostCategory.pm @@ -0,0 +1,70 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::HostCategory; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centreon"} = shift; + $self->{'etlProperties'} = undef; + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +#Set the etl properties as a variable of the class +sub setEtlProperties{ + my $self = shift; + $self->{'etlProperties'} = shift; +} + + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centreon"}; + my $etlProperties = $self->{'etlProperties'}; + + my $query = "SELECT `hc_id`, `hc_name`"; + $query .= " FROM `hostcategories`"; + if(!defined($etlProperties->{'dimension.all.hostcategories'}) && $etlProperties->{'dimension.hostcategories'} ne ''){ + $query .= " WHERE `hc_id` IN (".$etlProperties->{'dimension.hostcategories'}.")"; + } + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"hc_id"}.";".$row->{"hc_name"}; + } + $sth->finish(); + return (\@entries); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostGroup.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostGroup.pm new file mode 100644 index 00000000000..e11579f363e --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostGroup.pm @@ -0,0 +1,136 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::HostGroup; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centreon"} = shift; + $self->{'etlProperties'} = undef; + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +#Set the etl properties as a variable of the class +sub setEtlProperties{ + my $self = shift; + $self->{'etlProperties'} = shift; +} + +# returns in a table all host/service of a group of host +sub getHostgroupServices { + my $self = shift; + my $db = $self->{"centreon"}; + my $etlProperties = $self->{'etlProperties'}; + my $hgId = 0; + if (@_) { + $hgId = shift; + } + my %result = (); + my $query = "SELECT h.`host_id`, h.`host_name`, s.`service_id`, s.`service_description`"; + $query .= " FROM `hostgroup` hg, `host_service_relation` hsr, `service` s, `hostgroup_relation` hgr, `host` h"; + $query .= " WHERE hg.`hg_id` = ".$hgId; + $query .= " AND hg.`hg_id` = hsr.`hostgroup_hg_id`"; + $query .= " AND hsr.`service_service_id` = s.`service_id`"; + $query .= " AND s.`service_activate` = '1'"; + $query .= " AND s.`service_register` = '1'"; + $query .= " AND hg.hg_id = hgr.`hostgroup_hg_id`"; + $query .= " AND hgr.`host_host_id` = h.`host_id`"; + $query .= " AND h.`host_activate` = '1'"; + $query .= " AND h.`host_register` = '1'"; + if(!defined($etlProperties->{'dimension.all.hostgroups'}) && $etlProperties->{'dimension.hostgroups'} ne ''){ + $query .= " AND hg.`hg_id` IN (".$etlProperties->{'dimension.hostgroups'}.")"; + } + my $sth = $db->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{"host_id"}.";".$row->{"service_id"}} = 1; + } + $sth->finish(); + return (\%result); +} + + +# returns in a table all host/service of a group of host +sub getHostgroupHostServices { + my $self = shift; + my $db = $self->{"centreon"}; + my %etlProperties = $self->{'etlProperties'}; + + my $hgId = 0; + if (@_) { + $hgId = shift; + } + my %result = (); + my $query = "SELECT h.`host_id`, s.`service_id`"; + $query .= " FROM `host` h, `hostgroup` hg, `hostgroup_relation` hgr, `host_service_relation` hsr, `service` s"; + $query .= " WHERE hg.`hg_id` = ".$hgId; + $query .= " AND hgr.`hostgroup_hg_id` = hg.`hg_id`"; + $query .= " AND hgr.`host_host_id` = h.`host_id`"; + $query .= " AND h.`host_activate` = '1'"; + $query .= " AND h.`host_register` = '1'"; + $query .= " AND h.`host_id` = hsr.`host_host_id`"; + $query .= " AND hsr.`service_service_id` = s.`service_id`"; + $query .= " AND s.`service_activate` = '1'"; + $query .= " AND s.`service_register` = '1'"; + if(!defined($etlProperties{'etl.dimension.all.hostgroups'}) && $etlProperties{'etl.dimension.hostgroups'} ne ''){ + $query .= " AND hg.`hg_id` IN (".$etlProperties{'etl.dimension.hostgroups'}.")"; + } + my $sth = $db->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{"host_id"}.";".$row->{"service_id"}} = 1; + } + %result = (%result, $self->getHostgroupServices($hgId)); + return (\%result); +} + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centreon"}; + my $etlProperties = $self->{'etlProperties'}; + + my $query = "SELECT `hg_id`, `hg_name`"; + $query .= " FROM `hostgroup`"; + if(!defined($etlProperties->{'dimension.all.hostgroups'}) && $etlProperties->{'dimension.hostgroups'} ne ''){ + $query .= " WHERE `hg_id` IN (".$etlProperties->{'dimension.hostgroups'}.")"; + } + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"hg_id"}.";".$row->{"hg_name"}; + } + $sth->finish(); + return (\@entries); +} + + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Service.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Service.pm new file mode 100644 index 00000000000..fc2f3138149 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Service.pm @@ -0,0 +1,213 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::Service; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centreon"} = shift; + $self->{'etlProperties'} = undef; + + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +sub setEtlProperties{ + my $self = shift; + $self->{'etlProperties'} = shift; +} + +# returns two references to two hash tables => services indexed by id and services indexed by name +sub getServicesWithHostAndCategory { + my $self = shift; + my $centreon = $self->{"centreon"}; + my $serviceId = ""; + my $hosts = shift; + if (@_) { + $serviceId = shift; + } + my $templateCategories = $self->getServicesTemplatesCategories; + + my (@results); + # getting services linked to hosts + my $query = "SELECT service_description, service_id, host_id, service_template_model_stm_id as tpl". + " FROM host, service, host_service_relation". + " WHERE host_id = host_host_id and service_service_id = service_id". + " AND service_register = '1'". + " AND host_activate = '1'". + " AND service_activate = '1'"; + + my $sth = $centreon->query({ query => $query }); + while(my $row = $sth->fetchrow_hashref()) { + # getting all host entries + my $serviceHostTable = $hosts->{$row->{"host_id"}}; + # getting all Categories entries + my @categoriesTable = (); + # getting categories directly linked to service + my $categories = $self->getServiceCategories($row->{"service_id"}); + while(my ($sc_id, $sc_name) = each(%$categories)) { + push @categoriesTable, $sc_id.";".$sc_name; + } + # getting categories linked to template + if (defined($row->{"tpl"}) && defined($templateCategories->{$row->{"tpl"}})) { + my $tplCategories = $templateCategories->{$row->{"tpl"}}; + while(my ($sc_id, $sc_name) = each(%$tplCategories)) { + if(!defined($categories->{$sc_id})) { + push @categoriesTable, $sc_id.";".$sc_name; + } + } + } + if (!scalar(@categoriesTable)) { + #ToDo push @categoriesTable, "0;NULL"; + } + if (defined($serviceHostTable)) { + foreach(@$serviceHostTable) { + my $hostInfos = $_; + foreach(@categoriesTable) { + push @results, $row->{"service_id"}.";".$row->{"service_description"}.";".$_.";".$hostInfos; + } + } + } + } + #getting services linked to hostgroup + $query = "SELECT DISTINCT service_description, service_id, host_id, service_template_model_stm_id as tpl". + " FROM host, service, host_service_relation hr, hostgroup_relation hgr". + " WHERE hr.hostgroup_hg_id is not null". + " AND hr.service_service_id = service_id". + " AND hr.hostgroup_hg_id = hgr.hostgroup_hg_id". + " AND hgr.host_host_id = host_id". + " AND service_register = '1'". + " AND host_activate = '1'". + " AND service_activate = '1'"; + + $sth = $centreon->query({ query => $query }); + while(my $row = $sth->fetchrow_hashref()) { + # getting all host entries + my $serviceHostTable = $hosts->{$row->{"host_id"}}; + # getting all Categories entries + my @categoriesTable = (); + # getting categories directly linked to service + my $categories = $self->getServiceCategories($row->{"service_id"}); + while(my ($sc_id, $sc_name) = each(%$categories)) { + push @categoriesTable, $sc_id.";".$sc_name; + } + # getting categories linked to template + if (defined($row->{"tpl"}) && defined($templateCategories->{$row->{"tpl"}})) { + my $tplCategories = $templateCategories->{$row->{"tpl"}}; + while(my ($sc_id, $sc_name) = each(%$tplCategories)) { + if(!defined($categories->{$sc_id})) { + push @categoriesTable, $sc_id.";".$sc_name; + } + } + } + if (!scalar(@categoriesTable)) { + push @categoriesTable, "0;NULL"; + } + if (defined($serviceHostTable)) { + foreach(@$serviceHostTable) { + my $hostInfos = $_; + foreach(@categoriesTable) { + push @results, $row->{"service_id"}.";".$row->{"service_description"}.";".$_.";".$hostInfos; + } + } + } + } + $sth->finish(); + return (\@results); +} + +sub getServicesTemplatesCategories { + my $self = shift; + my $db = $self->{"centreon"}; + my %results = (); + + my $query = "SELECT service_id, service_description, service_template_model_stm_id FROM service WHERE service_register = '0'"; + my $sth = $db->query({ query => $query }); + while(my $row = $sth->fetchrow_hashref()) { + my $currentTemplate = $row->{"service_id"}; + my $categories = $self->getServiceCategories($row->{"service_id"}); + my $parentId = $row->{"service_template_model_stm_id"}; + if (defined($parentId)) { + my $hasParent = 1; + # getting all parent templates category relations + while ($hasParent) { + my $parentQuery = "SELECT service_id, service_template_model_stm_id "; + $parentQuery .= "FROM service "; + $parentQuery .= "WHERE service_register = '0' and service_id=".$parentId; + my $sthparentQuery = $db->query({ query => $parentQuery }); + if(my $parentQueryRow = $sthparentQuery->fetchrow_hashref()) { + my $newCategories = $self->getServiceCategories($parentQueryRow->{"service_id"}); + while(my ($sc_id, $sc_name) = each(%$newCategories)) { + if (!defined($categories->{$sc_id})) { + $categories->{$sc_id} = $sc_name; + } + } + if (!defined($parentQueryRow->{'service_template_model_stm_id'})) { + $hasParent = 0; + last; + } + $parentId = $parentQueryRow->{'service_template_model_stm_id'}; + $sthparentQuery->finish(); + }else { + $hasParent = 0; + } + } + } + $results{$currentTemplate} = $categories; + } + $sth->finish(); + return \%results; +} + +sub getServiceCategories { + my $self = shift; + my $db = $self->{"centreon"}; + my $id = shift; + my %results = (); + my $etlProperties = $self->{'etlProperties'}; + + my $query = "SELECT sc.sc_id, sc_name "; + $query .= " FROM service_categories sc, service_categories_relation scr"; + $query .= " WHERE service_service_id = ".$id; + $query .= " AND sc.sc_id = scr.sc_id"; + if(!defined($etlProperties->{'dimension.all.servicecategories'}) && $etlProperties->{'dimension.servicecategories'} ne ''){ + $query .= " AND sc.sc_id IN (".$etlProperties->{'dimension.servicecategories'}.")"; + } + my $sth = $db->query({ query => $query }); + while(my $row = $sth->fetchrow_hashref()) { + $results{$row->{"sc_id"}} = $row->{"sc_name"}; + } + return (\%results); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ServiceCategory.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ServiceCategory.pm new file mode 100644 index 00000000000..637ec4fb08a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ServiceCategory.pm @@ -0,0 +1,96 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::ServiceCategory; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centreon"} = shift; + $self->{'etlProperties'} = undef; + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +#Set the etl properties as a variable of the class +sub setEtlProperties{ + my $self = shift; + $self->{'etlProperties'} = shift; +} + +# returns two references to two hash tables => services indexed by id and services indexed by name +sub getCategory { + my $self = shift; + my $db = $self->{"centreon"}; + my $etlProperties = $self->{'etlProperties'}; + my $scName = ""; + if (@_) { + $scName = shift; + } + + my $result = ""; + # getting services linked to hosts + my $query = "SELECT sc_id from service_categories WHERE sc_name='".$scName."'"; + if(!defined($etlProperties->{'dimension.all.servicecategories'}) && $etlProperties->{'dimension.servicecategories'} ne ''){ + $query .= " WHERE `sc_id` IN (".$etlProperties->{'dimension.servicecategories'}.")"; + } + my $sth = $db->query({ query => $query }); + if(my $row = $sth->fetchrow_hashref()) { + $result = $row->{"sc_id"}; + }else { + ($self->{"logger"})->writeLog("error", "Cannot find service category '" . $scName . "' in database"); + } + $sth->finish(); + + return ($result); +} + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centreon"}; + my $etlProperties = $self->{'etlProperties'}; + + my $query = "SELECT `sc_id`, `sc_name`"; + $query .= " FROM `service_categories`"; + if(!defined($etlProperties->{'dimension.all.servicecategories'}) && $etlProperties->{'dimension.servicecategories'} ne ''){ + $query .= " WHERE `sc_id` IN (".$etlProperties->{'dimension.servicecategories'}.")"; + } + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"sc_id"}.";".$row->{"sc_name"}; + } + $sth->finish(); + return (\@entries); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Timeperiod.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Timeperiod.pm new file mode 100644 index 00000000000..2dab0526666 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Timeperiod.pm @@ -0,0 +1,247 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; +use Time::Local; +use gorgone::modules::centreon::mbi::libs::Utils; + +package gorgone::modules::centreon::mbi::libs::centreon::Timeperiod; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centreon"} = shift; + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +sub getTimeRangesForDay { + my $self = shift; + my $db = $self->{"centreon"}; + my ($weekDay, $name, $unixtime) = @_; + my @results = (); + + my @weekDays = ("sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday"); + my $query = "SELECT tp_" . $weekDay; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_name = '" . $name . "'"; + my $sth = $db->query({ query => $query }); + if (my $row = $sth->fetchrow_hashref()) { + if (defined($row->{'tp_'.$weekDay})) { + my @ranges = split(",", $row->{'tp_' . $weekDay}); + foreach (@ranges) { + my ($start, $end) = split("-", $_); + my ($start_hour, $start_min) = split(':', $start); + my ($end_hour, $end_min) = split(':', $end); + my @range = ($unixtime+ $start_hour * 60 * 60 + $start_min * 60, $unixtime + $end_hour * 60 * 60 + $end_min * 60); + $results[scalar(@results)] = \@range; + } + } + } + + return (\@results); +} + +sub getTimeRangesForDayByDateTime { + my $self = shift; + my $db = $self->{"centreon"}; + my ($name, $dateTime, $weekDay) = @_; + my @results = (); + + my $query = "SELECT tp_".$weekDay; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_name='".$name."'"; + my $sth = $db->query({ query => $query }); + if(my $row = $sth->fetchrow_hashref()) { + if (defined($row->{'tp_'.$weekDay})) { + my @ranges = split(",", $row->{'tp_'.$weekDay}); + foreach(@ranges) { + my ($start, $end) = split("-", $_); + my $range_end = "'".$dateTime." ".$end.":00'"; + if ($end eq '24:00') { + $range_end = "DATE_ADD('".$dateTime."', INTERVAL 1 DAY)"; + } + my @range = ("'".$dateTime." ".$start.":00'", $range_end); + $results[scalar(@results)] = \@range; + } + } + } + $sth->finish(); + + return (\@results); +} + +sub getRangeTable { + my ($self, $rangeStr) = @_; + if (!defined($rangeStr)) { + $rangeStr = ""; + } + my @ranges = split(",", $rangeStr); + + my @results = (); + foreach(@ranges) { + my ($start, $end) = split("-", $_); + my ($start_hour, $start_min) = split(":", $start); + my ($end_hour, $end_min) = split(":", $end); + push @results, [$start_hour * 60 * 60 + $start_min * 60, $end_hour * 60 * 60 + $end_min * 60]; + } + return [@results]; +} + +sub getAllRangesForTpId { + my ($self, $timeperiod_id) = @_; + my $db = $self->{"centreon"}; + my $logger = $self->{"logger"}; + my $query = "SELECT tp_monday, tp_tuesday, tp_wednesday, tp_thursday, tp_friday, tp_saturday, tp_sunday"; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_id='".$timeperiod_id."'"; + my $sth = $db->query({ query => $query }); + + my @results = (); + if(my $row = $sth->fetchrow_hashref()) { + $results[0] = $self->getRangeTable($row->{'tp_sunday'}); + $results[1] = $self->getRangeTable($row->{'tp_monday'}); + $results[2] = $self->getRangeTable($row->{'tp_tuesday'}); + $results[3] = $self->getRangeTable($row->{'tp_wednesday'}); + $results[4] = $self->getRangeTable($row->{'tp_thursday'}); + $results[5] = $self->getRangeTable($row->{'tp_friday'}); + $results[6] = $self->getRangeTable($row->{'tp_saturday'}); + }else { + $logger->writeLog("ERROR", "Cannot find time period with id '".$timeperiod_id."' in Centreon Database"); + } + return [@results]; +} + +sub getTimeRangesForPeriod { + my $self = shift; + my ($timeperiodId, $start, $end) = @_; + my @results = (); + my @weekDays = ("sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday"); + my $days = gorgone::modules::centreon::mbi::libs::Utils->getRebuildPeriods($start, $end); + my $weekRanges = $self->getAllRangesForTpId($timeperiodId); + foreach (@$days) { + my $dayStart = $_->{'start'}; + my $dayRanges = $weekRanges->[(localtime($dayStart))[6]]; + foreach(@$dayRanges) { + push @results, [$dayStart+$_->[0], $dayStart+$_->[1]]; + } + } + return [@results]; +} + +sub getTimeRangesForPeriodAndTpList { + my $self = shift; + my ($timeperiodList, $start, $end) = @_; + + my %rangesByTP = (); + while (my ($key, $value) = each %$timeperiodList) { + $rangesByTP{$key} = $self->getTimeRangesForPeriod($key, $start, $end); + } + return \%rangesByTP; +} + +sub getId { + my $self = shift; + my $db = $self->{"centreon"}; + my $name = shift; + + my $query = "SELECT tp_id"; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_name = '".$name."'"; + my $sth = $db->query({ query => $query }); + my $result = -1; + if(my $row = $sth->fetchrow_hashref()) { + $result = $row->{'tp_id'}; + } + return $result; +} + +sub getPeriodsLike { + my $self = shift; + my $db = $self->{"centreon"}; + my $name = shift; + + my $query = "SELECT tp_id, tp_name"; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_name like '".$name."%'"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{'tp_id'}} = $row->{'tp_name'}; + } + return \%result; +} + +sub getPeriods { + my $self = shift; + my $db = $self->{"centreon"}; + my $logger = $self->{'logger'}; + my $ids = shift; + + my $idStr = ""; + + foreach my $key (keys %$ids) { + if ($idStr eq "") { + $idStr .= $key; + }else { + $idStr .= ",".$key; + } + } + if ($idStr eq "") { + $logger->writeLog("ERROR", "Select a timeperiod in the ETL configuration menu"); + } + my $query = "SELECT tp_id, tp_name"; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_id IN (".$idStr.")"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{'tp_id'}} = $row->{'tp_name'}; + } + return \%result; +} + +sub getCentilePeriods { + my $self = shift; + my $db = $self->{"centreon"}; + my $logger = $self->{'logger'}; + + my $query = "SELECT tp_id, tp_name"; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_id IN (select timeperiod_id from mod_bi_options_centiles)"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{'tp_id'}} = $row->{'tp_name'}; + } + return \%result; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/HostStateEvents.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/HostStateEvents.pm new file mode 100644 index 00000000000..55adc8324b1 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/HostStateEvents.pm @@ -0,0 +1,183 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centstorage::HostStateEvents; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + $self->{"biHostStateEventsObj"} = shift; + $self->{"timePeriodObj"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"name"} = "hoststateevents"; + $self->{"timeColumn"} = "end_time"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} +sub agreggateEventsByTimePeriod { + my ($self, $timeperiodList, $start, $end, $liveServiceByTpId, $mode) = @_; + my $logger = $self->{"logger"}; + my $nbEvents; + my $db = $self->{"centstorage"}; + + my $rangesByTP = ($self->{"timePeriodObj"})->getTimeRangesForPeriodAndTpList($timeperiodList, $start, $end); + my $query = " SELECT e.host_id, start_time, end_time, ack_time, state, last_update"; + $query .= " FROM `hoststateevents` e"; + $query .= " RIGHT JOIN (select host_id from mod_bi_tmp_today_hosts group by host_id) t2"; + $query .= " ON e.host_id = t2.host_id"; + $query .= " WHERE start_time < ".$end.""; + $query .= " AND end_time > ".$start.""; + $query .= " AND in_downtime = 0 "; + $query .= " ORDER BY start_time "; + + + my $hostEventObjects = $self->{"biHostStateEventsObj"}; + my $sth = $db->query({ query => $query }); + $hostEventObjects->createTempBIEventsTable(); + $hostEventObjects->prepareTempQuery(); + + while (my $row = $sth->fetchrow_hashref()) { + if (!defined($row->{'end_time'})) { + $row->{'end_time'} = $end; + } + while (my ($timeperiodID, $timeRanges) = each %$rangesByTP) { + my @tab = (); + $tab[0] = $row->{'host_id'}; + $tab[1] = $liveServiceByTpId->{$timeperiodID}; + $tab[2] = $row->{'state'}; + if ($mode eq "daily") { + $timeRanges = ($self->{"timePeriodObj"})->getTimeRangesForPeriod($timeperiodID, $row->{'start_time'}, $row->{'end_time'}); + } + ($tab[3], $tab[4]) = $self->processIncidentForTp($timeRanges,$row->{'start_time'}, $row->{'end_time'}); + $tab[5] = $row->{'end_time'}; + $tab[6] = defined($row->{ack_time}) ? $row->{ack_time} : 0; + $tab[7] = $row->{'last_update'}; + if (defined($tab[3]) && $tab[3] != -1) { + $hostEventObjects->bindParam(\@tab); + } + + } + } + ($db->getInstance)->commit; +} + +sub processIncidentForTp { + my ($self, $timeRanges, $start, $end) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $rangeSize = scalar(@$timeRanges); + my $duration = 0; + my $slaDuration = 0; + my $range = 0; + my $i = 0; + my $processed = 0; + my $slaStart = $start; + my $slaStartModified = 0; + + foreach(@$timeRanges) { + my $currentStart = $start; + my $currentEnd = $end; + + $range = $_; + my ($rangeStart, $rangeEnd) = ($range->[0], $range->[1]); + + if ($currentStart < $rangeEnd && $currentEnd > $rangeStart) { + $processed = 1; + if ($currentStart > $rangeStart) { + $slaStartModified = 1; + } elsif ($currentStart < $rangeStart) { + $currentStart = $rangeStart; + if (!$slaStartModified) { + $slaStart = $currentStart; + $slaStartModified = 1; + } + } + if ($currentEnd > $rangeEnd) { + $currentEnd = $rangeEnd; + } + $slaDuration += $currentEnd - $currentStart; + } + } + if (!$processed) { + return (-1, -1, -1); + } + + return ($slaStart, $slaDuration); +} + + +sub dailyPurge { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($end) = @_; + + $logger->writeLog("DEBUG", "[PURGE] [hoststateevents] purging data older than ".$end); + my $query = "DELETE FROM `hoststateevents` where end_time < UNIX_TIMESTAMP('".$end."')"; + $db->query({ query => $query }); +} + +sub getNbEvents{ + my $self = shift; + my $db = $self->{"centstorage"}; + my ($start, $end) = @_; + my $logger = $self->{"logger"}; + my $nbEvents = 0; + + my $query = "SELECT count(*) as nbEvents"; + $query .= " FROM `hoststateevents` e"; + $query .= " RIGHT JOIN (select host_id from mod_bi_tmp_today_hosts group by host_id) t2"; + $query .= " ON e.host_id = t2.host_id"; + $query .= " WHERE start_time < ".$end.""; + $query .= " AND end_time > ".$start.""; + $query .= " AND in_downtime = 0 "; + + my $sth = $db->query({ query => $query }); + + while (my $row = $sth->fetchrow_hashref()) { + $nbEvents = $row->{'nbEvents'}; + } + return $nbEvents; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/Metrics.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/Metrics.pm new file mode 100644 index 00000000000..93f6b2df733 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/Metrics.pm @@ -0,0 +1,230 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centstorage::Metrics; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centstorage} = shift; + + $self->{metrics} = (); + $self->{name} = 'data_bin'; + $self->{timeColumn} = 'ctime'; + $self->{name_minmaxavg_tmp} = 'mod_bi_tmp_minmaxavgvalue'; + $self->{name_firstlast_tmp} = 'mod_bi_tmp_firstlastvalues'; + $self->{name_minmaxctime_tmp} = 'mod_bi_tmp_minmaxctime'; + if (@_) { + $self->{name_minmaxavg_tmp} .= $_[0]; + $self->{name_firstlast_tmp} .= $_[0]; + $self->{name_minmaxctime_tmp} .= $_[0]; + } + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub createTempTableMetricMinMaxAvgValues { + my ($self, $useMemory, $granularity) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `" . $self->{name_minmaxavg_tmp} . "`" }); + my $createTable = " CREATE TABLE `" . $self->{name_minmaxavg_tmp} . "` ("; + $createTable .= " id_metric INT NULL,"; + $createTable .= " avg_value FLOAT NULL,"; + $createTable .= " min_value FLOAT NULL,"; + $createTable .= " max_value FLOAT NULL"; + if ($granularity eq "hour") { + $createTable .= ", valueTime DATETIME NULL"; + } + if (defined($useMemory) && $useMemory eq "true") { + $createTable .= ") ENGINE=MEMORY CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $createTable .= ") ENGINE=INNODB CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $createTable }); +} + +sub getMetricValueByHour { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($start, $end, $useMemory) = @_; + my $dateFormat = "%Y-%c-%e %k:00:00"; + + # Getting min, max, average + $self->createTempTableMetricMinMaxAvgValues($useMemory, "hour"); + my $query = "INSERT INTO `" . $self->{name_minmaxavg_tmp} . "` SELECT id_metric, avg(value) as avg_value, min(value) as min_value, max(value) as max_value, "; + $query .= " date_format(FROM_UNIXTIME(ctime), '".$dateFormat."') as valueTime "; + $query .= "FROM data_bin "; + $query .= "WHERE "; + $query .= "ctime >=UNIX_TIMESTAMP('".$start."') AND ctime < UNIX_TIMESTAMP('".$end."') "; + $query .= "GROUP BY id_metric, date_format(FROM_UNIXTIME(ctime), '".$dateFormat."')"; + + $db->query({ query => $query }); + $self->addIndexTempTableMetricMinMaxAvgValues("hour"); +} + +sub getMetricsValueByDay { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($period, $useMemory) = @_; + my $dateFormat = "%Y-%c-%e"; + + # Getting min, max, average + $self->createTempTableMetricMinMaxAvgValues($useMemory, "day"); + my $query = "INSERT INTO `" . $self->{name_minmaxavg_tmp} . "` SELECT id_metric, avg(value) as avg_value, min(value) as min_value, max(value) as max_value "; + #$query .= " date_format(FROM_UNIXTIME(ctime), '".$dateFormat."') as valueTime "; + $query .= "FROM data_bin "; + $query .= "WHERE "; + my @tabPeriod = @$period; + my ($start_date, $end_date); + my $tabSize = scalar(@tabPeriod); + for (my $count = 0; $count < $tabSize; $count++) { + my $range = $tabPeriod[$count]; + if ($count == 0) { + $start_date = $range->[0]; + } + if ($count == $tabSize - 1) { + $end_date = $range->[1]; + } + $query .= "(ctime >= UNIX_TIMESTAMP(".($range->[0]). ") AND ctime < UNIX_TIMESTAMP(".($range->[1]) .")) OR "; + } + + $query =~ s/OR $//; + $query .= "GROUP BY id_metric"; + + $db->query({ query => $query }); + $self->addIndexTempTableMetricMinMaxAvgValues("day"); + $self->getFirstAndLastValues($start_date, $end_date, $useMemory); +} + +sub createTempTableMetricDayFirstLastValues { + my ($self, $useMemory) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `" . $self->{name_firstlast_tmp} . "`" }); + my $createTable = " CREATE TABLE `" . $self->{name_firstlast_tmp} . "` ("; + $createTable .= " `first_value` FLOAT NULL,"; + $createTable .= " `last_value` FLOAT NULL,"; + $createTable .= " id_metric INT NULL"; + if (defined($useMemory) && $useMemory eq "true") { + $createTable .= ") ENGINE=MEMORY CHARSET=utf8 COLLATE=utf8_general_ci;"; + } else { + $createTable .= ") ENGINE=INNODB CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $createTable }); +} + +sub addIndexTempTableMetricDayFirstLastValues { + my $self = shift; + my $db = $self->{"centstorage"}; + $db->query({ query => "ALTER TABLE " . $self->{name_firstlast_tmp} . " ADD INDEX (`id_metric`)" }); +} + +sub addIndexTempTableMetricMinMaxAvgValues { + my $self = shift; + my $granularity = shift; + my $db = $self->{"centstorage"}; + my $index = "id_metric"; + if ($granularity eq "hour") { + $index .= ", valueTime"; + } + my $query = "ALTER TABLE " . $self->{name_minmaxavg_tmp} . " ADD INDEX (" . $index . ")"; + $db->query({ query => $query }); +} + +sub createTempTableCtimeMinMaxValues { + my ($self, $useMemory) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `" . $self->{name_minmaxctime_tmp} . "`" }); + my $createTable = " CREATE TABLE `" . $self->{name_minmaxctime_tmp} . "` ("; + $createTable .= " min_val INT NULL,"; + $createTable .= " max_val INT NULL,"; + $createTable .= " id_metric INT NULL"; + if (defined($useMemory) && $useMemory eq "true") { + $createTable .= ") ENGINE=MEMORY CHARSET=utf8 COLLATE=utf8_general_ci;"; + } else { + $createTable .= ") ENGINE=INNODB CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $createTable }); +} + +sub dropTempTableCtimeMinMaxValues { + my $self = shift; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE `" . $self->{name_minmaxctime_tmp} . "`" }); +} + +sub getFirstAndLastValues { + my $self = shift; + my $db = $self->{"centstorage"}; + + my ($start_date, $end_date, $useMemory) = @_; + + $self->createTempTableCtimeMinMaxValues($useMemory); + my $query = "INSERT INTO `" . $self->{name_minmaxctime_tmp} . "` SELECT min(ctime) as min_val, max(ctime) as max_val, id_metric "; + $query .= " FROM `data_bin`"; + $query .= " WHERE ctime >= UNIX_TIMESTAMP(" . $start_date . ") AND ctime < UNIX_TIMESTAMP(" . $end_date . ")"; + $query .= " GROUP BY id_metric"; + $db->query({ query => $query }); + + $self->createTempTableMetricDayFirstLastValues($useMemory); + $query = "INSERT INTO " . $self->{name_firstlast_tmp} . " SELECT d.value as `first_value`, d2.value as `last_value`, d.id_metric"; + $query .= " FROM data_bin as d, data_bin as d2, " . $self->{name_minmaxctime_tmp} . " as db"; + $query .= " WHERE db.id_metric=d.id_metric AND db.min_val=d.ctime"; + $query .= " AND db.id_metric=d2.id_metric AND db.max_val=d2.ctime"; + $query .= " GROUP BY db.id_metric"; + my $sth = $db->query({ query => $query }); + $self->addIndexTempTableMetricDayFirstLastValues(); + $self->dropTempTableCtimeMinMaxValues(); +} + +sub dailyPurge { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($end) = @_; + + my $query = "DELETE FROM `data_bin` where ctime < UNIX_TIMESTAMP('" . $end . "')"; + $logger->writeLog("DEBUG", "[PURGE] [data_bin] purging data older than " . $end); + $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/ServiceStateEvents.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/ServiceStateEvents.pm new file mode 100644 index 00000000000..b70130d1d1d --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/ServiceStateEvents.pm @@ -0,0 +1,179 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centstorage::ServiceStateEvents; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + $self->{"biServiceStateEventsObj"} = shift; + $self->{"timePeriodObj"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + + $self->{"name"} = "servicestateevents"; + $self->{"timeColumn"} = "end_time"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub agreggateEventsByTimePeriod { + my ($self, $timeperiodList, $start, $end, $liveServiceByTpId, $mode) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $rangesByTP = ($self->{"timePeriodObj"})->getTimeRangesForPeriodAndTpList($timeperiodList, $start, $end); + my $query = "SELECT e.host_id,e.service_id, start_time, end_time, ack_time, state, last_update"; + $query .= " FROM `servicestateevents` e"; + $query .= " RIGHT JOIN (select host_id,service_id from mod_bi_tmp_today_services group by host_id,service_id) t2"; + $query .= " ON e.host_id = t2.host_id AND e.service_id = t2.service_id"; + $query .= " WHERE start_time < ".$end.""; + $query .= " AND end_time > ".$start.""; + $query .= " AND in_downtime = 0 "; + $query .= " ORDER BY start_time "; + + my $serviceEventObjects = $self->{"biServiceStateEventsObj"}; + my $sth = $db->query({ query => $query }); + $serviceEventObjects->createTempBIEventsTable(); + $serviceEventObjects->prepareTempQuery(); + + while (my $row = $sth->fetchrow_hashref()) { + if (!defined($row->{'end_time'})) { + $row->{'end_time'} = $end; + } + while (my ($timeperiodID, $timeRanges) = each %$rangesByTP) { + my @tab = (); + $tab[0] = $row->{'host_id'}; + $tab[1] = $row->{'service_id'}; + $tab[2] = $liveServiceByTpId->{$timeperiodID}; + $tab[3] = $row->{'state'}; + if ($mode eq 'daily') { + $timeRanges = ($self->{"timePeriodObj"})->getTimeRangesForPeriod($timeperiodID, $row->{'start_time'}, $row->{'end_time'}); + } + ($tab[4], $tab[5]) = $self->processIncidentForTp($timeRanges,$row->{'start_time'}, $row->{'end_time'}); + $tab[6] = $row->{'end_time'}; + $tab[7] = defined($row->{ack_time}) ? $row->{ack_time} : 0; + $tab[8] = $row->{last_update}; + if (defined($tab[4]) && $tab[4] != -1) { + $serviceEventObjects->bindParam(\@tab); + } + } + } + ($db->getInstance)->commit; +} + +sub processIncidentForTp { + my ($self, $timeRanges, $start, $end) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $rangeSize = scalar(@$timeRanges); + my $duration = 0; + my $slaDuration = 0; + my $range = 0; + my $i = 0; + my $processed = 0; + my $slaStart = $start; + my $slaStartModified = 0; + + foreach(@$timeRanges) { + my $currentStart = $start; + my $currentEnd = $end; + $range = $_; + my ($rangeStart, $rangeEnd) = ($range->[0], $range->[1]); + if ($currentStart < $rangeEnd && $currentEnd > $rangeStart) { + $processed = 1; + if ($currentStart > $rangeStart) { + $slaStartModified = 1; + } elsif ($currentStart < $rangeStart) { + $currentStart = $rangeStart; + if (!$slaStartModified) { + $slaStart = $currentStart; + $slaStartModified = 1; + } + } + if ($currentEnd > $rangeEnd) { + $currentEnd = $rangeEnd; + } + $slaDuration += $currentEnd - $currentStart; + } + } + if (!$processed) { + return (-1, -1, -1); + } + return ($slaStart, $slaDuration); +} + +sub dailyPurge { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($end) = @_; + + $logger->writeLog("DEBUG", "[PURGE] [servicestateevents] purging data older than ".$end); + my $query = "DELETE FROM `servicestateevents` where end_time < UNIX_TIMESTAMP('".$end."')"; + $db->query({ query => $query }); +} + +sub getNbEvents { + my $self = shift; + my $db = $self->{"centstorage"}; + my ($start, $end) = @_; + my $nbEvents = 0; + my $logger = $self->{"logger"}; + + my $query = "SELECT count(*) as nbEvents"; + $query .= " FROM `servicestateevents` e"; + $query .= " RIGHT JOIN (select host_id,service_id from mod_bi_tmp_today_services group by host_id,service_id) t2"; + $query .= " ON e.host_id = t2.host_id AND e.service_id = t2.service_id"; + $query .= " WHERE start_time < ".$end.""; + $query .= " AND end_time > ".$start.""; + $query .= " AND in_downtime = 0 "; + + my $sth = $db->query({ query => $query }); + + while (my $row = $sth->fetchrow_hashref()) { + $nbEvents = $row->{'nbEvents'}; + } + return $nbEvents; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/nodes/class.pm b/gorgone/gorgone/modules/centreon/nodes/class.pm new file mode 100644 index 00000000000..d9deecb5ff6 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/nodes/class.pm @@ -0,0 +1,256 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::nodes::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use gorgone::class::http::http; +use MIME::Base64; +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{register_nodes} = {}; + + $connector->{default_resync_time} = (defined($options{config}->{resync_time}) && $options{config}->{resync_time} =~ /(\d+)/) ? $1 : 600; + $connector->{resync_time} = $connector->{default_resync_time}; + $connector->{last_resync_time} = -1; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[nodes] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub check_debug { + my ($self, %options) = @_; + + my $request = "SELECT `value` FROM options WHERE `key` = 'debug_gorgone'"; + my ($status, $datas) = $self->{class_object}->custom_execute(request => $request, mode => 2); + if ($status == -1) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find debug configuration' }); + $self->{logger}->writeLogError('[nodes] -class- cannot find debug configuration'); + return 1; + } + + my $map_values = { 0 => 'default', 1 => 'debug' }; + my $debug_gorgone = 0; + $debug_gorgone = $datas->[0]->[0] if (defined($datas->[0]->[0])); + if (!defined($self->{debug_gorgone}) || $self->{debug_gorgone} != $debug_gorgone) { + $self->send_internal_action({ action => 'BCASTLOGGER', data => { content => { severity => $map_values->{$debug_gorgone} } } }); + } + + $self->{debug_gorgone} = $debug_gorgone; + return 0; +} + +sub action_centreonnodessync { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action nodesresync proceed' }); + + # If we have a SQL issue: resync = 10 sec + if ($self->check_debug()) { + $self->{resync_time} = 10; + return 1; + } + + my $request = 'SELECT remote_server_id, poller_server_id FROM rs_poller_relation'; + my ($status, $datas) = $self->{class_object}->custom_execute(request => $request, mode => 2); + if ($status == -1) { + $self->{resync_time} = 10; + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find nodes remote configuration' }); + $self->{logger}->writeLogError('[nodes] Cannot find nodes remote configuration'); + return 1; + } + + # we set a pathscore of 100 because it's "slave" + my $register_subnodes = {}; + foreach (@$datas) { + $register_subnodes->{$_->[0]} = [] if (!defined($register_subnodes->{$_->[0]})); + unshift @{$register_subnodes->{$_->[0]}}, { id => $_->[1], pathscore => 100 }; + } + + $request = " + SELECT id, name, localhost, ns_ip_address, gorgone_port, remote_id, remote_server_use_as_proxy, gorgone_communication_type + FROM nagios_server + WHERE ns_activate = '1' + "; + ($status, $datas) = $self->{class_object}->custom_execute(request => $request, mode => 2); + if ($status == -1) { + $self->{resync_time} = 10; + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find nodes configuration' }); + $self->{logger}->writeLogError('[nodes] Cannot find nodes configuration'); + return 1; + } + + my $core_id; + my $register_temp = {}; + my $register_nodes = []; + foreach (@$datas) { + if ($_->[2] == 1) { + $core_id = $_->[0]; + next; + } + + # remote_server_use_as_proxy = 1 means: pass through the remote. otherwise directly. + if (defined($_->[5]) && $_->[5] =~ /\d+/ && $_->[6] == 1) { + $register_subnodes->{$_->[5]} = [] if (!defined($register_subnodes->{$_->[5]})); + unshift @{$register_subnodes->{$_->[5]}}, { id => $_->[0], pathscore => 1 }; + next; + } + $self->{register_nodes}->{$_->[0]} = 1; + $register_temp->{$_->[0]} = 1; + if ($_->[7] == 2) { + push @$register_nodes, { + id => $_->[0], + type => 'push_ssh', + address => $_->[3], + ssh_port => $_->[4], + ssh_username => $self->{config}->{ssh_username} + }; + } else { + push @$register_nodes, { + id => $_->[0], + type => 'push_zmq', + address => $_->[3], + port => $_->[4] + }; + } + } + + my $unregister_nodes = []; + foreach (keys %{$self->{register_nodes}}) { + if (!defined($register_temp->{$_})) { + push @$unregister_nodes, { id => $_ }; + delete $self->{register_nodes}->{$_}; + } + } + + # We add subnodes + foreach (@$register_nodes) { + if (defined($register_subnodes->{ $_->{id} })) { + $_->{nodes} = $register_subnodes->{ $_->{id} }; + } + } + + $self->send_internal_action({ action => 'SETCOREID', data => { id => $core_id } }) if (defined($core_id)); + $self->send_internal_action({ action => 'REGISTERNODES', data => { nodes => $register_nodes } }); + $self->send_internal_action({ action => 'UNREGISTERNODES', data => { nodes => $unregister_nodes } }); + + $self->{logger}->writeLogDebug("[nodes] Finish resync"); + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $options{token}, data => { message => 'action nodesresync finished' }); + + $self->{resync_time} = $self->{default_resync_time}; + return 0; +} + +sub periodic_exec { + my ($self, %options) = @_; + + if ($self->{stop} == 1) { + $self->{logger}->writeLogInfo("[nodes] -class- $$ has quit"); + exit(0); + } + + if (time() - $self->{resync_time} > $self->{last_resync_time}) { + $self->{last_resync_time} = time(); + $self->action_centreonnodessync(); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 0, + logger => $self->{logger} + ); + $self->{class_object} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centreon}); + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-nodes', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CENTREONNODESREADY', + data => {} + }); + + $self->periodic_exec(); + + my $watcher_timer = $self->{loop}->timer(5, 5, sub { $self->periodic_exec() } ); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/nodes/hooks.pm b/gorgone/gorgone/modules/centreon/nodes/hooks.pm new file mode 100644 index 00000000000..f7806358d3a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/nodes/hooks.pm @@ -0,0 +1,158 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::nodes::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::nodes::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'nodes'; +use constant EVENTS => [ + { event => 'CENTREONNODESSYNC', uri => '/sync', method => 'POST' }, + { event => 'CENTREONNODESREADY' } +]; + +my $config_core; +my $config; +my ($config_db_centreon); +my $nodes = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centreon = $options{config_db_centreon}; + $config->{resync_time} = defined($config->{resync_time}) && $config->{resync_time} =~ /(\d+)/ ? $1 : 600; + $config->{ssh_username} = defined($config->{ssh_username}) ? $config->{ssh_username} : 'centreon'; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'CENTREONNODESREADY') { + $nodes->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$nodes->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgonenodes: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-nodes', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($nodes->{running}) && $nodes->{running} == 1) { + $options{logger}->writeLogDebug("[nodes] Send TERM signal $nodes->{pid}"); + CORE::kill('TERM', $nodes->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($nodes->{running} == 1) { + $options{logger}->writeLogDebug("[nodes] Send KILL signal for pool"); + CORE::kill('KILL', $nodes->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($nodes->{pid}) || $nodes->{pid} != $pid); + + $nodes = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($nodes->{running}) && $nodes->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[nodes] Create module 'nodes' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-nodes'; + my $module = gorgone::modules::centreon::nodes::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[nodes] PID $child_pid (gorgone-nodes)"); + $nodes = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/statistics/class.pm b/gorgone/gorgone/modules/centreon/statistics/class.pm new file mode 100644 index 00000000000..92aa001f7e3 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/statistics/class.pm @@ -0,0 +1,672 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::statistics::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use File::Path qw(make_path); +use JSON::XS; +use Time::HiRes; +use RRDs; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{log_pace} = 3; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[statistics] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub get_pollers_config { + my ($self, %options) = @_; + + my ($status, $data) = $self->{class_object_centreon}->custom_execute( + request => "SELECT id, nagiostats_bin, cfg_dir, cfg_file FROM nagios_server, cfg_nagios WHERE ns_activate = '1' AND cfg_nagios.nagios_server_id = nagios_server.id", + mode => 1, + keys => 'id' + ); + if ($status == -1) { + $self->{logger}->writeLogError('[engine] Cannot get Pollers configuration'); + return -1; + } + + return $data; +} + +sub get_broker_stats_collection_flag { + my ($self, %options) = @_; + + my ($status, $data) = $self->{class_object_centreon}->custom_execute( + request => "SELECT `value` FROM options WHERE `key` = 'enable_broker_stats'", + mode => 2 + ); + if ($status == -1 || !defined($data->[0][0])) { + $self->{logger}->writeLogError('[statistics] Cannot get Broker statistics collection flag'); + return -1; + } + + return $data->[0]->[0]; +} + +sub action_brokerstats { + my ($self, %options) = @_; + + $options{token} = 'broker_stats' if (!defined($options{token})); + + $self->{logger}->writeLogDebug("[statistics] No Broker statistics collection configured"); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + instant => 1, + data => { + message => 'action brokerstats starting' + } + ); + + if ($self->get_broker_stats_collection_flag() < 1) { + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + instant => 1, + data => { + message => 'no collection configured' + } + ); + + return 0; + } + + my $request = "SELECT id, cache_directory, config_name FROM cfg_centreonbroker " . + "JOIN nagios_server " . + "WHERE ns_activate = '1' AND stats_activate = '1' AND ns_nagios_server = id"; + + if (defined($options{data}->{variables}[0]) && $options{data}->{variables}[0] =~ /\d+/) { + $request .= " AND id = '" . $options{data}->{variables}[0] . "'"; + } + + if (!defined($options{data}->{content}->{collect_localhost}) || + $options{data}->{content}->{collect_localhost} eq 'false') { + $request .= " AND localhost = '0'"; + } + + my ($status, $data) = $self->{class_object_centreon}->custom_execute(request => $request, mode => 2); + if ($status == -1) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + instant => 1, + data => { + message => 'cannot find configuration' + } + ); + $self->{logger}->writeLogError("[statistics] Cannot find configuration"); + return 1; + } + + foreach (@{$data}) { + my $target = $_->[0]; + my $statistics_file = $_->[1] . "/" . $_->[2] . "-stats.json"; + $self->{logger}->writeLogInfo( + "[statistics] Collecting Broker statistics file '" . $statistics_file . "' from target '" . $target . "'" + ); + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonestatistics', + event => 'STATISTICSLISTENER', + target => $target, + token => $options{token} . '-' . $target, + timeout => defined($options{data}->{content}->{timeout}) && $options{data}->{content}->{timeout} =~ /(\d+)/ ? + $1 + $self->{log_pace} + 5: undef, + log_pace => $self->{log_pace} + } + ] + }); + + $self->send_internal_action({ + target => $target, + action => 'COMMAND', + token => $options{token} . '-' . $target, + data => { + instant => 1, + content => [ + { + command => 'cat ' . $statistics_file, + timeout => $options{data}->{content}->{timeout}, + metadata => { + poller_id => $target, + config_name => $_->[2], + source => 'brokerstats' + } + } + ] + } + }); + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + instant => 1, + data => { + message => 'action brokerstats finished' + } + ); + + return 0; +} + +sub action_enginestats { + my ($self, %options) = @_; + + $options{token} = 'engine_stats' if (!defined($options{token})); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + instant => 1, + data => { + message => 'action enginestats starting' + } + ); + + my $pollers = $self->get_pollers_config(); + + foreach (keys %$pollers) { + my $target = $_; + my $enginestats_file = $pollers->{$_}->{nagiostats_bin}; + my $config_file = $pollers->{$_}->{cfg_dir} . '/' . $pollers->{$_}->{cfg_file}; + $self->{logger}->writeLogInfo( + "[statistics] Collecting Engine statistics from target '" . $target . "'" + ); + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonestatistics', + event => 'STATISTICSLISTENER', + target => $target, + token => $options{token} . '-' . $target, + timeout => defined($options{data}->{content}->{timeout}) && $options{data}->{content}->{timeout} =~ /(\d+)/ ? + $1 + $self->{log_pace} + 5: undef, + log_pace => $self->{log_pace} + } + ] + }); + + $self->send_internal_action({ + target => $target, + action => 'COMMAND', + token => $options{token} . '-' . $target, + data => { + instant => 1, + content => [ + { + command => $enginestats_file . ' -c ' . $config_file, + timeout => $options{data}->{content}->{timeout}, + metadata => { + poller_id => $target, + source => 'enginestats' + } + } + ] + } + }); + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + instant => 1, + data => { + message => 'action enginestats finished' + } + ); + + return 0; +} + +sub action_statisticslistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token})); + return 0 if ($options{data}->{code} != GORGONE_MODULE_ACTION_COMMAND_RESULT); + + if ($options{data}->{data}->{metadata}->{source} eq "brokerstats") { + $self->write_broker_stats(data => $options{data}->{data}); + } elsif ($options{data}->{data}->{metadata}->{source} eq "enginestats") { + $self->write_engine_stats(data => $options{data}->{data}); + } +} + +sub write_broker_stats { + my ($self, %options) = @_; + + return if (!defined($options{data}->{result}->{exit_code}) || $options{data}->{result}->{exit_code} != 0 || + !defined($options{data}->{metadata}->{poller_id}) || !defined($options{data}->{metadata}->{config_name})); + + my $broker_cache_dir = $self->{config}->{broker_cache_dir} . '/' . $options{data}->{metadata}->{poller_id}; + + if (! -d $broker_cache_dir ) { + if (make_path($broker_cache_dir) == 0) { + $self->{logger}->writeLogError("[statistics] Cannot create directory '" . $broker_cache_dir . "': $!"); + return 1; + } + } + + my $dest_file = $broker_cache_dir . '/' . $options{data}->{metadata}->{config_name} . '.json'; + $self->{logger}->writeLogDebug("[statistics] Writing file '" . $dest_file . "'"); + open(FH, '>', $dest_file); + print FH $options{data}->{result}->{stdout}; + close(FH); + + return 0 +} + +sub write_engine_stats { + my ($self, %options) = @_; + + return if (!defined($options{data}->{result}->{exit_code}) || $options{data}->{result}->{exit_code} != 0 || + !defined($options{data}->{metadata}->{poller_id})); + + my $engine_stats_dir = $self->{config}->{engine_stats_dir} . '/perfmon-' . $options{data}->{metadata}->{poller_id}; + + if (! -d $engine_stats_dir ) { + if (make_path($engine_stats_dir) == 0) { + $self->{logger}->writeLogError("[statistics] Cannot create directory '" . $engine_stats_dir . "': $!"); + return 1; + } + } + + foreach (split(/\n/, $options{data}->{result}->{stdout})) { + if ($_ =~ /Used\/High\/Total Command Buffers:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)/) { + my $dest_file = $engine_stats_dir . '/nagios_cmd_buffer.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "In_Use", "Max_Used", "Total_Available" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "In_Use", "Max_Used", "Total_Available" ], + values => [ $1, $2 , $3 ] + ); + } elsif ($_ =~ /Active Service Latency:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ sec/) { + my $status = $self->{class_object_centstorage}->custom_execute( + request => "DELETE FROM `nagios_stats` WHERE instance_id = '" . $options{data}->{metadata}->{poller_id} . "'" + ); + if ($status == -1) { + $self->{logger}->writeLogError("[statistics] Failed to delete statistics in 'nagios_stats table'"); + } else { + my $status = $self->{class_object_centstorage}->custom_execute( + request => "INSERT INTO `nagios_stats` (instance_id, stat_label, stat_key, stat_value) VALUES " . + "('$options{data}->{metadata}->{poller_id}', 'Service Check Latency', 'Min', '$1'), " . + "('$options{data}->{metadata}->{poller_id}', 'Service Check Latency', 'Max', '$2'), " . + "('$options{data}->{metadata}->{poller_id}', 'Service Check Latency', 'Average', '$3')" + ); + if ($status == -1) { + $self->{logger}->writeLogError("[statistics] Failed to add statistics Service Check Latency in 'nagios_stats table'"); + } + } + + my $dest_file = $engine_stats_dir . '/nagios_active_service_latency.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Min", "Max", "Average" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Min", "Max", "Average" ], + values => [ $1, $2 , $3 ] + ); + } elsif ($_ =~ /Active Service Execution Time:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ sec/) { + my $status = $self->{class_object_centstorage}->custom_execute( + request => "INSERT INTO `nagios_stats` (instance_id, stat_label, stat_key, stat_value) VALUES " . + "('$options{data}->{metadata}->{poller_id}', 'Service Check Execution Time', 'Min', '$1'), " . + "('$options{data}->{metadata}->{poller_id}', 'Service Check Execution Time', 'Max', '$2'), " . + "('$options{data}->{metadata}->{poller_id}', 'Service Check Execution Time', 'Average', '$3')" + ); + if ($status == -1) { + $self->{logger}->writeLogError("[statistics] Failed to add statistics Service Check Execution Time in 'nagios_stats table'"); + } + my $dest_file = $engine_stats_dir . '/nagios_active_service_execution.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Min", "Max", "Average" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Min", "Max", "Average" ], + values => [ $1, $2 , $3 ] + ); + } elsif ($_ =~ /Active Services Last 1\/5\/15\/60 min:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)/) { + my $dest_file = $engine_stats_dir . '/nagios_active_service_last.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Last_Min", "Last_5_Min", "Last_15_Min", "Last_Hour" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Last_Min", "Last_5_Min", "Last_15_Min", "Last_Hour" ], + values => [ $1, $2 , $3, $4 ] + ); + } elsif ($_ =~ /Services Ok\/Warn\/Unk\/Crit:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)/) { + my $dest_file = $engine_stats_dir . '/nagios_services_states.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Ok", "Warn", "Unk", "Crit" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Ok", "Warn", "Unk", "Crit" ], + values => [ $1, $2 , $3, $4 ] + ); + } elsif ($_ =~ /Active Host Latency:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ sec/) { + my $status = $self->{class_object_centstorage}->custom_execute( + request => "INSERT INTO `nagios_stats` (instance_id, stat_label, stat_key, stat_value) VALUES " . + "('$options{data}->{metadata}->{poller_id}', 'Host Check Latency ', 'Min', '$1'), " . + "('$options{data}->{metadata}->{poller_id}', 'Host Check Latency ', 'Max', '$2'), " . + "('$options{data}->{metadata}->{poller_id}', 'Host Check Latency ', 'Average', '$3')" + ); + if ($status == -1) { + $self->{logger}->writeLogError("[statistics] Failed to add statistics Host Check Latency in 'nagios_stats table'"); + } + my $dest_file = $engine_stats_dir . '/nagios_active_host_latency.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Min", "Max", "Average" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Min", "Max", "Average" ], + values => [ $1, $2 , $3 ] + ); + } elsif ($_ =~ /Active Host Execution Time:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ sec/) { + my $status = $self->{class_object_centstorage}->custom_execute( + request => "INSERT INTO `nagios_stats` (instance_id, stat_label, stat_key, stat_value) VALUES " . + "('$options{data}->{metadata}->{poller_id}', 'Host Check Execution Time', 'Min', '$1'), " . + "('$options{data}->{metadata}->{poller_id}', 'Host Check Execution Time', 'Max', '$2'), " . + "('$options{data}->{metadata}->{poller_id}', 'Host Check Execution Time', 'Average', '$3')" + ); + if ($status == -1) { + $self->{logger}->writeLogError("[statistics] Failed to add statistics Host Check Execution Time in 'nagios_stats table'"); + } + my $dest_file = $engine_stats_dir . '/nagios_active_host_execution.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Min", "Max", "Average" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Min", "Max", "Average" ], + values => [ $1, $2 , $3 ] + ); + } elsif ($_ =~ /Active Hosts Last 1\/5\/15\/60 min:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)/) { + my $dest_file = $engine_stats_dir . '/nagios_active_host_last.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Last_Min", "Last_5_Min", "Last_15_Min", "Last_Hour" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Last_Min", "Last_5_Min", "Last_15_Min", "Last_Hour" ], + values => [ $1, $2 , $3, $4 ] + ); + } elsif ($_ =~ /Hosts Up\/Down\/Unreach:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)/) { + my $dest_file = $engine_stats_dir . '/nagios_hosts_states.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Up", "Down", "Unreach" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Up", "Down", "Unreach" ], + values => [ $1, $2 , $3 ] + ); + } + } + $self->{logger}->writeLogInfo("[statistics] poller $options{data}->{metadata}->{poller_id} engine data was integrated in rrd and sql database."); +} + +sub rrd_create { + my ($self, %options) = @_; + + my @ds; + foreach my $ds (@{$options{ds}}) { + push @ds, "DS:" . $ds . ":GAUGE:" . $options{interval} . ":0:U"; + } + + RRDs::create( + $options{file}, + "-s" . $options{interval}, + @ds, + "RRA:AVERAGE:0.5:1:" . $options{number}, + "RRA:AVERAGE:0.5:12:" . $options{number} + ); + if (RRDs::error()) { + my $error = RRDs::error(); + $self->{logger}->writeLogError("[statistics] Error creating RRD file '" . $options{file} . "': " . $error); + return 1 + } + + foreach my $ds (@{$options{ds}}) { + RRDs::tune($options{file}, "-h", $ds . ":" . $options{heartbeat}); + if (RRDs::error()) { + my $error = RRDs::error(); + $self->{logger}->writeLogError("[statistics] Error tuning RRD file '" . $options{file} . "': " . $error); + return 1 + } + } + + return 0; +} + +sub rrd_update { + my ($self, %options) = @_; + + my $append = ''; + my $ds; + foreach (@{$options{ds}}) { + $ds .= $append . $_; + $append = ':'; + } + my $values; + foreach (@{$options{values}}) { + $values .= $append . $_; + } + RRDs::update( + $options{file}, + "--template", + $ds, + "N" . $values + ); + if (RRDs::error()) { + my $error = RRDs::error(); + $self->{logger}->writeLogError("[statistics] Error updating RRD file '" . $options{file} . "': " . $error); + return 1 + } + + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[statistics] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-statistics', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'STATISTICSREADY', + data => {} + }); + + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{class_object_centreon} = gorgone::class::sqlquery->new( + logger => $self->{logger}, + db_centreon => $self->{db_centreon} + ); + + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{class_object_centstorage} = gorgone::class::sqlquery->new( + logger => $self->{logger}, + db_centreon => $self->{db_centstorage} + ); + + if (defined($self->{config}->{cron})) { + $self->send_internal_action({ + action => 'ADDCRON', + data => { + content => $self->{config}->{cron} + } + }); + } + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/statistics/hooks.pm b/gorgone/gorgone/modules/centreon/statistics/hooks.pm new file mode 100644 index 00000000000..8d13dd0837f --- /dev/null +++ b/gorgone/gorgone/modules/centreon/statistics/hooks.pm @@ -0,0 +1,172 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::statistics::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::standard::constants qw(:all); +use gorgone::modules::centreon::statistics::class; + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'statistics'; +use constant EVENTS => [ + { event => 'STATISTICSREADY' }, + { event => 'STATISTICSLISTENER' }, + { event => 'BROKERSTATS', uri => '/broker', method => 'GET' }, + { event => 'ENGINESTATS', uri => '/engine', method => 'GET' } +]; + +my $config_core; +my $config; +my $config_db_centreon; +my $config_db_centstorage; +my $statistics = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centreon = $options{config_db_centreon}; + $config_db_centstorage = $options{config_db_centstorage}; + $config->{broker_cache_dir} = defined($config->{broker_cache_dir}) ? + $config->{broker_cache_dir} : '/var/cache/centreon/broker-stats/'; + $config->{engine_stats_dir} = defined($config->{config}->{engine_stats_dir}) ? + $config->{config}->{engine_stats_dir} : "/var/lib/centreon/nagios-perf/"; + + $config->{interval} = defined($config->{interval}) ? $config->{interval} : 300; + $config->{length} = defined($config->{length}) ? $config->{length} : 365; + $config->{number} = $config->{length} * 24 * 60 * 60 / $config->{interval}; + $config->{heartbeat_factor} = defined($config->{heartbeat_factor}) ? $config->{heartbeat_factor} : 10; + $config->{heartbeat} = $config->{interval} * $config->{heartbeat_factor}; + + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'STATISTICSREADY') { + $statistics->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$statistics->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { msg => 'gorgonestatistics: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-statistics', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($statistics->{running}) && $statistics->{running} == 1) { + $options{logger}->writeLogDebug("[statistics] Send TERM signal $statistics->{pid}"); + CORE::kill('TERM', $statistics->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($statistics->{running} == 1) { + $options{logger}->writeLogDebug("[statistics] Send KILL signal for pool"); + CORE::kill('KILL', $statistics->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($statistics->{pid}) || $statistics->{pid} != $pid); + + $statistics = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($statistics->{running}) && $statistics->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[statistics] Create module 'statistics' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-statistics'; + my $module = gorgone::modules::centreon::statistics::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[statistics] PID $child_pid (gorgone-statistics)"); + $statistics = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/action/class.pm b/gorgone/gorgone/modules/core/action/class.pm new file mode 100644 index 00000000000..aa2a0a84aec --- /dev/null +++ b/gorgone/gorgone/modules/core/action/class.pm @@ -0,0 +1,896 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::action::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use JSON::XS; +use File::Basename; +use File::Copy; +use File::Path qw(make_path); +use POSIX ":sys_wait_h"; +use MIME::Base64; +use Digest::MD5::File qw(file_md5_hex); +use Archive::Tar; +use Fcntl; +use Try::Tiny; +use EV; + +$Archive::Tar::SAME_PERMISSIONS = 1; +$Archive::Tar::WARN = 0; +$Digest::MD5::File::NOFATALS = 1; +my %handlers = (TERM => {}, HUP => {}, CHLD => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{process_copy_files_error} = {}; + + $connector->{command_timeout} = defined($connector->{config}->{command_timeout}) ? + $connector->{config}->{command_timeout} : 30; + $connector->{whitelist_cmds} = defined($connector->{config}->{whitelist_cmds}) && $connector->{config}->{whitelist_cmds} =~ /true|1/i ? + 1 : 0; + $connector->{allowed_cmds} = []; + $connector->{allowed_cmds} = $connector->{config}->{allowed_cmds} + if (defined($connector->{config}->{allowed_cmds}) && ref($connector->{config}->{allowed_cmds}) eq 'ARRAY'); + + if (defined($connector->{config}->{tar_insecure_extra_mode}) && $connector->{config}->{tar_insecure_extra_mode} =~ /^(?:1|true)$/) { + $Archive::Tar::INSECURE_EXTRACT_MODE = 1; + } + + $connector->{paranoid_plugins} = defined($connector->{config}->{paranoid_plugins}) && $connector->{config}->{paranoid_plugins} =~ /true|1/i ? + 1 : 0; + + $connector->{return_childs} = {}; + $connector->{engine_childs} = {}; + $connector->{max_concurrent_engine} = defined($connector->{config}->{max_concurrent_engine}) ? + $connector->{config}->{max_concurrent_engine} : 3; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; + $SIG{CHLD} = \&class_handle_CHLD; + $handlers{CHLD}->{$self} = sub { $self->handle_CHLD() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[action] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub handle_CHLD { + my $self = shift; + my $child_pid; + + while (($child_pid = waitpid(-1, &WNOHANG)) > 0) { + $self->{logger}->writeLogDebug("[action] Received SIGCLD signal (pid: $child_pid)"); + $self->{return_child}->{$child_pid} = 1; + } + + $SIG{CHLD} = \&class_handle_CHLD; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub class_handle_CHLD { + foreach (keys %{$handlers{CHLD}}) { + &{$handlers{CHLD}->{$_}}(); + } +} + +sub check_childs { + my ($self, %options) = @_; + + foreach (keys %{$self->{return_child}}) { + delete $self->{engine_childs}->{$_} if (defined($self->{engine_childs}->{$_})); + } + + $self->{return_child} = {}; +} + +sub get_package_manager { + my ($self, %options) = @_; + + my $os = 'unknown'; + my ($rv, $message, $content) = gorgone::standard::misc::slurp(file => '/etc/os-release'); + if ($rv && $content =~ /^ID="(.*?)"/mi) { + $os = $1; + } else { + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'lsb_release -a', + timeout => 5, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error == 0 && $stdout =~ /^Description:\s+(.*)$/mi) { + $os = $1; + } + } + + $self->{package_manager} = 'unknown'; + if ($os =~ /Debian|Ubuntu/i) { + $self->{package_manager} = 'deb'; + } elsif ($os =~ /CentOS|Redhat|rhel|almalinux|rocky/i) { + $self->{package_manager} = 'rpm'; + } elsif ($os eq 'ol' || $os =~ /Oracle Linux/i) { + $self->{package_manager} = 'rpm'; + } +} + +sub check_plugins_rpm { + my ($self, %options) = @_; + + #rpm -q centreon-plugin-Network-Microsens-G6-Snmp test centreon-plugin-Network-Generic-Bluecoat-Snmp + #centreon-plugin-Network-Microsens-G6-Snmp-20211228-150846.el7.centos.noarch + #package test is not installed + #centreon-plugin-Network-Generic-Bluecoat-Snmp-20211102-130335.el7.centos.noarch + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'rpm', + arguments => ['-q', keys %{$options{plugins}}], + timeout => 60, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + if ($error != 0) { + return (-1, 'check rpm plugins command issue: ' . $stdout); + } + + my $installed = []; + foreach my $package_name (keys %{$options{plugins}}) { + if ($stdout =~ /^$package_name-(\d+)-/m) { + my $current_version = $1; + if ($current_version < $options{plugins}->{$package_name}) { + push @$installed, $package_name . '-' . $options{plugins}->{$package_name}; + } + } else { + push @$installed, $package_name . '-' . $options{plugins}->{$package_name}; + } + } + + if (scalar(@$installed) > 0) { + return (1, 'install', $installed); + } + + $self->{logger}->writeLogInfo("[action] validate plugins - nothing to install"); + return 0; +} + +sub check_plugins_deb { + my ($self, %options) = @_; + + #dpkg -l centreon-plugin-* + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'dpkg', + arguments => ['-l', 'centreon-plugin-*'], + timeout => 60, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + + my $installed = []; + foreach my $package_name (keys %{$options{plugins}}) { + if ($stdout =~ /\s+$package_name\s+(\d+)-/m) { + my $current_version = $1; + if ($current_version < $options{plugins}->{$package_name}) { + push @$installed, $package_name . '=' . $options{plugins}->{$package_name}; + } + } else { + push @$installed, $package_name . '=' . $options{plugins}->{$package_name}; + } + } + + if (scalar(@$installed) > 0) { + return (1, 'install', $installed); + } + + $self->{logger}->writeLogInfo("[action] validate plugins - nothing to install"); + return 0; +} + +sub install_plugins { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[action] validate plugins - install " . join(' ', @{$options{installed}})); + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'sudo', + arguments => ['/usr/local/bin/gorgone_install_plugins.pl', '--type=' . $options{type}, @{$options{installed}}], + timeout => 300, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + $self->{logger}->writeLogDebug("[action] install plugins. Command output: [\"$stdout\"]"); + if ($error != 0) { + return (-1, 'install plugins command issue: ' . $stdout); + } + + return 0; +} + +sub validate_plugins_rpm { + my ($self, %options) = @_; + + my ($rv, $message, $installed) = $self->check_plugins_rpm(%options); + return (1, $message) if ($rv == -1); + return 0 if ($rv == 0); + + if ($rv == 1) { + ($rv, $message) = $self->install_plugins(type => 'rpm', installed => $installed); + return (1, $message) if ($rv == -1); + } + + ($rv, $message, $installed) = $self->check_plugins_rpm(%options); + return (1, $message) if ($rv == -1); + if ($rv == 1) { + $message = 'validate plugins - still some to install: ' . join(' ', @$installed); + $self->{logger}->writeLogError("[action] $message"); + return (1, $message); + } + + return 0; +} + +sub validate_plugins_deb { + my ($self, %options) = @_; + + my $plugins = {}; + foreach (keys %{$options{plugins}}) { + $plugins->{ lc($_) } = $options{plugins}->{$_}; + } + + my ($rv, $message, $installed) = $self->check_plugins_deb(plugins => $plugins); + return (1, $message) if ($rv == -1); + return 0 if ($rv == 0); + + if ($rv == 1) { + ($rv, $message) = $self->install_plugins(type => 'deb', installed => $installed); + return (1, $message) if ($rv == -1); + } + + ($rv, $message, $installed) = $self->check_plugins_deb(plugins => $plugins); + return (1, $message) if ($rv == -1); + if ($rv == 1) { + $message = 'validate plugins - still some to install: ' . join(' ', @$installed); + $self->{logger}->writeLogError("[action] $message"); + return (1, $message); + } + + return 0; +} + +sub validate_plugins { + my ($self, %options) = @_; + + my ($rv, $message, $content); + my $plugins = $options{plugins}; + if (!defined($plugins)) { + ($rv, $message, $content) = gorgone::standard::misc::slurp(file => $options{file}); + return (1, $message) if (!$rv); + + try { + $plugins = JSON::XS->new->decode($content); + } catch { + return (1, 'cannot decode json'); + }; + } + + # nothing to validate. so it's ok, show must go on!! :) + if (ref($plugins) ne 'HASH' || scalar(keys %$plugins) <= 0) { + return 0; + } + + if ($self->{package_manager} eq 'rpm') { + ($rv, $message) = $self->validate_plugins_rpm(plugins => $plugins); + } elsif ($self->{package_manager} eq 'deb') { + ($rv, $message) = $self->validate_plugins_deb(plugins => $plugins); + } else { + ($rv, $message) = (1, 'validate plugins - unsupported operating system'); + } + + return ($rv, $message); +} + +sub is_command_authorized { + my ($self, %options) = @_; + + return 0 if ($self->{whitelist_cmds} == 0); + + foreach my $regexp (@{$self->{allowed_cmds}}) { + return 0 if ($options{command} =~ /$regexp/); + } + + return 1; +} + +sub action_command { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}) || ref($options{data}->{content}) ne 'ARRAY') { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "expected array, found '" . ref($options{data}->{content}) . "'" + } + ); + return -1; + } + + my $index = 0; + foreach my $command (@{$options{data}->{content}}) { + if (!defined($command->{command}) || $command->{command} eq '') { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "need command argument at array index '" . $index . "'" + } + ); + return -1; + } + + if ($self->is_command_authorized(command => $command->{command})) { + $self->{logger}->writeLogError("[action] command not allowed (whitelist): " . $command->{command}); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command not allowed (whitelist) at array index '$index' : $command->{command}" + } + ); + return -1; + } + + $index++; + } + + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has started", + request_content => $options{data}->{content} + } + ); + + my $errors = 0; + foreach my $command (@{$options{data}->{content}}) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command has started", + command => $command->{command}, + metadata => $command->{metadata} + } + ); + + # check install pkg + if (defined($command->{metadata}) && defined($command->{metadata}->{pkg_install})) { + my ($rv, $message) = $self->validate_plugins(plugins => $command->{metadata}->{pkg_install}); + if ($rv && $self->{paranoid_plugins} == 1) { + $self->{logger}->writeLogError("[action] $message"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command execution issue", + command => $command->{command}, + metadata => $command->{metadata}, + result => { + exit_code => $rv, + stdout => $message + } + } + ); + next; + } + } + + my $start = time(); + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => $command->{command}, + timeout => (defined($command->{timeout})) ? $command->{timeout} : $self->{command_timeout}, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + my $end = time(); + if ($error <= -1000) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command execution issue", + command => $command->{command}, + metadata => $command->{metadata}, + result => { + exit_code => $return_code, + stdout => $stdout + }, + metrics => { + start => $start, + end => $end, + duration => $end - $start + } + } + ); + + if (defined($command->{continue_on_error}) && $command->{continue_on_error} == 0) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has been interrupted because of error" + } + ); + return -1; + } + + $errors = 1; + } else { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_MODULE_ACTION_COMMAND_RESULT, + token => $options{token}, + logging => $options{data}->{logging}, + instant => $options{data}->{instant}, + data => { + message => "command has finished successfully", + command => $command->{command}, + metadata => $command->{metadata}, + result => { + exit_code => $return_code, + stdout => $stdout + }, + metrics => { + start => $start, + end => $end, + duration => $end - $start + } + } + ); + } + } + + if ($errors) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has finished with errors" + } + ); + return -1; + } + + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has finished successfully" + } + ); + + return 0; +} + +sub action_processcopy { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}) || $options{data}->{content} eq '') { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => 'no content' } + ); + return -1; + } + + my $cache_file = $options{data}->{content}->{cache_dir} . '/copy_' . $options{token}; + if ($options{data}->{content}->{status} eq 'inprogress' && defined($options{data}->{content}->{chunk}->{data})) { + my $fh; + if (!sysopen($fh, $cache_file, O_RDWR|O_APPEND|O_CREAT, 0660)) { + # no need to insert too many logs + return -1 if (defined($self->{process_copy_files_error}->{$cache_file})); + $self->{process_copy_files_error}->{$cache_file} = 1; + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => "file '$cache_file' open failed: $!" } + ); + + $self->{logger}->writeLogError("[action] file '$cache_file' open failed: $!"); + return -1; + } + delete $self->{process_copy_files_error}->{$cache_file} if (defined($self->{process_copy_files_error}->{$cache_file})); + binmode($fh); + syswrite( + $fh, + MIME::Base64::decode_base64($options{data}->{content}->{chunk}->{data}), + $options{data}->{content}->{chunk}->{size} + ); + close $fh; + + $self->send_log( + code => GORGONE_MODULE_ACTION_PROCESSCOPY_INPROGRESS, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => 'process copy inprogress', + } + ); + $self->{logger}->writeLogInfo("[action] Copy processing - Received chunk for '" . $options{data}->{content}->{destination} . "'"); + return 0; + } elsif ($options{data}->{content}->{status} eq 'end' && defined($options{data}->{content}->{md5})) { + delete $self->{process_copy_files_error}->{$cache_file} if (defined($self->{process_copy_files_error}->{$cache_file})); + my $local_md5_hex = file_md5_hex($cache_file); + if (defined($local_md5_hex) && $options{data}->{content}->{md5} eq $local_md5_hex) { + if ($options{data}->{content}->{type} eq "archive") { + if (! -d $options{data}->{content}->{destination}) { + make_path($options{data}->{content}->{destination}); + } + + my $tar = Archive::Tar->new(); + $tar->setcwd($options{data}->{content}->{destination}); + unless ($tar->read($cache_file, undef, { extract => 1 })) { + my $tar_error = $tar->error(); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => "untar failed: $tar_error" } + ); + $self->{logger}->writeLogError("[action] Copy processing - Untar failed: $tar_error"); + return -1; + } + } elsif ($options{data}->{content}->{type} eq 'regular') { + copy($cache_file, $options{data}->{content}->{destination}); + my $uid = getpwnam($options{data}->{content}->{owner}); + my $gid = getgrnam($options{data}->{content}->{group}); + chown($uid, $gid, $options{data}->{content}->{destination}); + } + } else { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => 'md5 does not match' } + ); + $self->{logger}->writeLogError('[action] Copy processing - MD5 does not match'); + return -1; + } + } + + unlink($cache_file); + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "process copy finished successfully", + } + ); + $self->{logger}->writeLogInfo("[action] Copy processing - Copy to '" . $options{data}->{content}->{destination} . "' finished successfully"); + return 0; +} + +sub action_actionengine { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}) || $options{data}->{content} eq '') { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => 'no content' } + ); + return -1; + } + + if (!defined($options{data}->{content}->{command})) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "need valid command argument" + } + ); + return -1; + } + + if ($self->is_command_authorized(command => $options{data}->{content}->{command})) { + $self->{logger}->writeLogError("[action] command not allowed (whitelist): " . $options{data}->{content}->{command}); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => 'command not allowed (whitelist)' . $options{data}->{content}->{command} + } + ); + return -1; + } + + if (defined($options{data}->{content}->{plugins}) && $options{data}->{content}->{plugins} ne '') { + my ($rv, $message) = $self->validate_plugins(file => $options{data}->{content}->{plugins}); + if ($rv && $self->{paranoid_plugins} == 1) { + $self->{logger}->writeLogError("[action] $message"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => $message + } + ); + return -1; + } + } + + my $start = time(); + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => $options{data}->{content}->{command}, + timeout => $self->{command_timeout}, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + my $end = time(); + if ($error != 0) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command execution issue", + command => $options{data}->{content}->{command}, + result => { + exit_code => $return_code, + stdout => $stdout + }, + metrics => { + start => $start, + end => $end, + duration => $end - $start + } + } + ); + return -1; + } + + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => 'actionengine has finished successfully' + } + ); + + return 0; +} + +sub action_run { + my ($self, %options) = @_; + + my $context; + { + local $SIG{__DIE__}; + $context = ZMQ::FFI->new(); + } + + my $socket_log = gorgone::standard::library::connect_com( + context => $context, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-action-'. $$, + logger => $self->{logger}, + zmq_linger => 60000, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + + if ($options{action} eq 'COMMAND') { + $self->action_command(%options, socket_log => $socket_log); + } elsif ($options{action} eq 'ACTIONENGINE') { + $self->action_actionengine(%options, socket_log => $socket_log); + } else { + $self->send_log( + socket => $socket_log, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => "action unknown" } + ); + return -1; + } +} + +sub create_child { + my ($self, %options) = @_; + + if ($options{action} =~ /^BCAST.*/) { + if ((my $method = $self->can('action_' . lc($options{action})))) { + $method->($self, token => $options{token}, data => $options{data}); + } + return undef; + } + + if ($options{action} eq 'ACTIONENGINE') { + my $num = scalar(keys %{$self->{engine_childs}}); + if ($num > $self->{max_concurrent_engine}) { + $self->{logger}->writeLogInfo("[action] max_concurrent_engine limit reached ($num/$self->{max_concurrent_engine})"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "max_concurrent_engine limit reached ($num/$self->{max_concurrent_engine})" } + ); + return undef; + } + } + + $self->{logger}->writeLogDebug("[action] Create sub-process"); + my $child_pid = fork(); + if (!defined($child_pid)) { + $self->{logger}->writeLogError("[action] Cannot fork process: $!"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "cannot fork: $!" } + ); + return undef; + } + + if ($child_pid == 0) { + $self->set_fork(); + $self->action_run(action => $options{action}, token => $options{token}, data => $options{data}); + exit(0); + } else { + if ($options{action} eq 'ACTIONENGINE') { + $self->{engine_childs}->{$child_pid} = 1; + } + } +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $self->read_message(); + next if (!defined($message)); + + $self->{logger}->writeLogDebug("[action] Event: $message"); + + if ($message !~ /^\[ACK\]/) { + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + + my ($action, $token) = ($1, $2); + my ($rv, $data) = $self->json_decode(argument => $3, token => $token); + next if ($rv); + + if (defined($data->{parameters}->{no_fork})) { + if ((my $method = $self->can('action_' . lc($action)))) { + $method->($self, token => $token, data => $data); + } + } else { + $self->create_child(action => $action, token => $token, data => $data); + } + } + } +} + +sub periodic_exec { + $connector->check_childs(); + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[action] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-action', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'ACTIONREADY', + data => {} + }); + + $self->get_package_manager(); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/action/hooks.pm b/gorgone/gorgone/modules/core/action/hooks.pm new file mode 100644 index 00000000000..4adaf195c7f --- /dev/null +++ b/gorgone/gorgone/modules/core/action/hooks.pm @@ -0,0 +1,155 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::action::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::action::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'action'; +use constant EVENTS => [ + { event => 'ACTIONREADY' }, + { event => 'PROCESSCOPY' }, + { event => 'COMMAND', uri => '/command', method => 'POST' }, + { event => 'ACTIONENGINE', uri => '/engine', method => 'POST' } +]; + +my $config_core; +my $config; +my $action = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'ACTIONREADY') { + $action->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$action->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { msg => 'gorgoneaction: still not ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-action', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($action->{running}) && $action->{running} == 1) { + $options{logger}->writeLogDebug("[action] Send TERM signal $action->{running}"); + CORE::kill('TERM', $action->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($action->{running} == 1) { + $options{logger}->writeLogDebug("[action] Send KILL signal for pool"); + CORE::kill('KILL', $action->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($action->{pid}) || $action->{pid} != $pid); + + $action = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($action->{running}) && $action->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[action] Create module 'action' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-action'; + my $module = gorgone::modules::core::action::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[action] PID $child_pid (gorgone-action)"); + $action = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/cron/class.pm b/gorgone/gorgone/modules/core/cron/class.pm new file mode 100644 index 00000000000..275760b88f7 --- /dev/null +++ b/gorgone/gorgone/modules/core/cron/class.pm @@ -0,0 +1,500 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::cron::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use Schedule::Cron; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[cron] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub action_getcron { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + my $data; + my $id = $options{data}->{variables}[0]; + my $parameter = $options{data}->{variables}[1]; + if (defined($id) && $id ne '') { + if (defined($parameter) && $parameter =~ /^status$/) { + $self->{logger}->writeLogInfo("[cron] Get logs results for definition '" . $id . "'"); + $self->send_internal_action({ + action => 'GETLOG', + token => $options{token}, + data => { + token => $id, + ctime => $options{data}->{parameters}->{ctime}, + etime => $options{data}->{parameters}->{etime}, + limit => $options{data}->{parameters}->{limit}, + code => $options{data}->{parameters}->{code} + } + }); + + my $timeout = 5; + my $ctime = time(); + while (1) { + my $watcher_timer = $self->{loop}->timer(1, 0, \&stop_ev); + $self->{loop}->run(); + last if (time() > ($ctime + $timeout)); + } + + $data = $connector->{ack}->{data}->{data}->{result}; + } else { + my $idx; + eval { + $idx = $self->{cron}->check_entry($id); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron get failed to retrieve entry index"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'failed to retrieve entry index' } + ); + return 1; + } + if (!defined($idx)) { + $self->{logger}->writeLogError("[cron] Cron get failed no entry found for id"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'no entry found for id' } + ); + return 1; + } + + eval { + my $result = $self->{cron}->get_entry($idx); + push @{$data}, { %{$result->{args}[1]->{definition}} } if (defined($result->{args}[1]->{definition})); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron get failed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'get failed:' . $@ } + ); + return 1; + } + } + } else { + eval { + my @results = $self->{cron}->list_entries(); + foreach my $cron (@results) { + push @{$data}, { %{$cron->{args}[1]->{definition}} }; + } + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron get failed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'get failed:' . $@ } + ); + return 1; + } + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => $data + ); + return 0; +} + +sub action_addcron { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{logger}->writeLogDebug("[cron] Cron add start"); + + foreach my $definition (@{$options{data}->{content}}) { + if (!defined($definition->{timespec}) || $definition->{timespec} eq '' || + !defined($definition->{action}) || $definition->{action} eq '' || + !defined($definition->{id}) || $definition->{id} eq '') { + $self->{logger}->writeLogError("[cron] Cron add missing arguments"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'missing arguments' } + ); + return 1; + } + } + + eval { + foreach my $definition (@{$options{data}->{content}}) { + my $idx = $self->{cron}->check_entry($definition->{id}); + if (defined($idx)) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "id '" . $definition->{id} . "' already exists" } + ); + next; + } + $self->{logger}->writeLogInfo("[cron] Adding cron definition '" . $definition->{id} . "'"); + $self->{cron}->add_entry( + $definition->{timespec}, + $definition->{id}, + { + connector => $connector, + socket => $connector->{internal_socket}, + logger => $self->{logger}, + definition => $definition + } + ); + } + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron add failed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'add failed:' . $@ } + ); + return 1; + } + + $self->{logger}->writeLogDebug("[cron] Cron add finish"); + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { message => 'add succeed' } + ); + return 0; +} + +sub action_updatecron { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{logger}->writeLogDebug("[cron] Cron update start"); + + my $id = $options{data}->{variables}[0]; + if (!defined($id)) { + $self->{logger}->writeLogError("[cron] Cron update missing id"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'missing id' } + ); + return 1; + } + + my $idx; + eval { + $idx = $self->{cron}->check_entry($id); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron update failed to retrieve entry index"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'failed to retrieve entry index' } + ); + return 1; + } + if (!defined($idx)) { + $self->{logger}->writeLogError("[cron] Cron update failed no entry found for id"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'no entry found for id' } + ); + return 1; + } + + my $definition = $options{data}->{content}; + if ((!defined($definition->{timespec}) || $definition->{timespec} eq '') && + (!defined($definition->{command_line}) || $definition->{command_line} eq '')) { + $self->{logger}->writeLogError("[cron] Cron update missing arguments"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'missing arguments' } + ); + return 1; + } + + eval { + my $entry = $self->{cron}->get_entry($idx); + $entry->{time} = $definition->{timespec}; + $entry->{args}[1]->{definition}->{timespec} = $definition->{timespec} + if (defined($definition->{timespec})); + $entry->{args}[1]->{definition}->{command_line} = $definition->{command_line} + if (defined($definition->{command_line})); + $self->{cron}->update_entry($idx, $entry); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron update failed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'update failed:' . $@ } + ); + return 1; + } + + $self->{logger}->writeLogDebug("[cron] Cron update succeed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { message => 'update succeed' } + ); + return 0; +} + +sub action_deletecron { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{logger}->writeLogDebug("[cron] Cron delete start"); + + my $id = $options{data}->{variables}->[0]; + if (!defined($id) || $id eq '') { + $self->{logger}->writeLogError("[cron] Cron delete missing id"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'missing id' } + ); + return 1; + } + + my $idx; + eval { + $idx = $self->{cron}->check_entry($id); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron delete failed to retrieve entry index"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'failed to retrieve entry index' } + ); + return 1; + } + if (!defined($idx)) { + $self->{logger}->writeLogError("[cron] Cron delete failed no entry found for id"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'no entry found for id' } + ); + return 1; + } + + eval { + $self->{cron}->delete_entry($idx); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron delete failed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'delete failed:' . $@ } + ); + return 1; + } + + $self->{logger}->writeLogDebug("[cron] Cron delete finish"); + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { message => 'delete succeed' } + ); + return 0; +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $self->read_message(); + next if (!defined($message)); + + $self->{logger}->writeLogDebug("[cron] Event: $message"); + if ($message =~ /^\[ACK\]\s+\[(.*?)\]\s+(.*)$/m) { + my $token = $1; + my ($rv, $data) = $self->json_decode(argument => $2, token => $token); + next if ($rv); + + $self->{ack} = { + token => $token, + data => $data + }; + } else { + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + if ((my $method = $self->can('action_' . lc($1)))) { + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + my ($action, $token) = ($1, $2); + my ($rv, $data) = $self->json_decode(argument => $3, token => $token); + next if ($rv); + + $method->($self, token => $token, data => $data); + } + } + } +} + +sub stop_ev { + $connector->{loop}->break(); +} + +sub cron_sleep { + my $watcher_timer = $connector->{loop}->timer(1, 0, \&stop_ev); + $connector->{loop}->run(); + + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[cron] $$ has quit"); + exit(0); + } +} + +sub dispatcher { + my ($id, $options) = @_; + + $options->{logger}->writeLogInfo("[cron] Launching job '" . $id . "'"); + + my $token = (defined($options->{definition}->{keep_token})) && $options->{definition}->{keep_token} =~ /true|1/i + ? $options->{definition}->{id} : undef; + + $options->{connector}->send_internal_action({ + socket => $options->{socket}, + token => $token, + action => $options->{definition}->{action}, + target => $options->{definition}->{target}, + data => { + content => $options->{definition}->{parameters} + }, + json_encode => 1 + }); + + my $timeout = 5; + my $ctime = time(); + while (1) { + my $watcher_timer = $options->{connector}->{loop}->timer(1, 0, \&stop_ev); + $options->{connector}->{loop}->run(); + last if (time() > ($ctime + $timeout)); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-cron', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CRONREADY', + data => {} + }); + + # need at least one cron to get sleep working + push @{$self->{config}->{cron}}, { + id => "default", + timespec => "0 0 * * *", + action => "INFORMATION", + parameters => {} + }; + + $self->{cron} = new Schedule::Cron(\&dispatcher, nostatus => 1, nofork => 1, catch => 1); + + foreach my $definition (@{$self->{config}->{cron}}) { + $self->{cron}->add_entry( + $definition->{timespec}, + $definition->{id}, + { + connector => $connector, + socket => $connector->{internal_socket}, + logger => $self->{logger}, + definition => $definition + } + ); + } + + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + + $self->{cron}->run(sleep => \&cron_sleep); + + exit(0); +} + +1; diff --git a/gorgone/gorgone/modules/core/cron/hooks.pm b/gorgone/gorgone/modules/core/cron/hooks.pm new file mode 100644 index 00000000000..f2aaa00711c --- /dev/null +++ b/gorgone/gorgone/modules/core/cron/hooks.pm @@ -0,0 +1,156 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::cron::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::cron::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'cron'; +use constant EVENTS => [ + { event => 'CRONREADY' }, + { event => 'GETCRON', uri => '/definitions', method => 'GET' }, + { event => 'ADDCRON', uri => '/definitions', method => 'POST' }, + { event => 'DELETECRON', uri => '/definitions', method => 'DELETE' }, + { event => 'UPDATECRON', uri => '/definitions', method => 'PATCH' }, +]; + +my $config_core; +my $config; +my $cron = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'CRONREADY') { + $cron->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$cron->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgonecron: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-cron', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($cron->{running}) && $cron->{running} == 1) { + $options{logger}->writeLogDebug("[cron] Send TERM signal $cron->{pid}"); + CORE::kill('TERM', $cron->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($cron->{running} == 1) { + $options{logger}->writeLogDebug("[cron] Send KILL signal for pool"); + CORE::kill('KILL', $cron->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($cron->{pid}) || $cron->{pid} != $pid); + + $cron = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($cron->{running}) && $cron->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[cron] Create module 'cron' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-cron'; + my $module = gorgone::modules::core::cron::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[cron] PID $child_pid (gorgone-cron)"); + $cron = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/dbcleaner/class.pm b/gorgone/gorgone/modules/core/dbcleaner/class.pm new file mode 100644 index 00000000000..8e4c8350aeb --- /dev/null +++ b/gorgone/gorgone/modules/core/dbcleaner/class.pm @@ -0,0 +1,195 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::dbcleaner::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::class::db; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}, DIE => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{purge_timer} = time(); + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; + $SIG{__DIE__} = \&class_handle_DIE; + $handlers{DIE}->{$self} = sub { $self->handle_DIE($_[0]) }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[dbcleaner] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub handle_DIE { + my $self = shift; + my $msg = shift; + + $self->{logger}->writeLogError("[dbcleaner] Receiving DIE: $msg"); + $self->exit_process(); +} + +sub class_handle_DIE { + my ($msg) = @_; + + foreach (keys %{$handlers{DIE}}) { + &{$handlers{DIE}->{$_}}($msg); + } +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub exit_process { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[dbcleaner] $$ has quit"); + exit(0); +} + +sub action_dbclean { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + if (defined($options{cycle})) { + return 0 if ((time() - $self->{purge_timer}) < 3600); + } + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { + message => 'action dbclean proceed' + } + ) if (!defined($options{cycle})); + + $self->{logger}->writeLogDebug("[dbcleaner] Purge database in progress..."); + my ($status) = $self->{db_gorgone}->query({ + query => 'DELETE FROM gorgone_identity WHERE `mtime` < ?', + bind_values => [time() - $self->{config}->{purge_sessions_time}] + }); + my ($status2) = $self->{db_gorgone}->query({ + query => "DELETE FROM gorgone_history WHERE (instant = 1 AND `ctime` < " . (time() - 86400) . ") OR `ctime` < ?", + bind_values => [time() - $self->{config}->{purge_history_time}] + }); + $self->{purge_timer} = time(); + + $self->{logger}->writeLogDebug("[dbcleaner] Purge finished"); + + if ($status == -1 || $status2 == -1) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => 'action dbclean finished' + } + ) if (!defined($options{cycle})); + return 0; + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + message => 'action dbclean finished' + } + ) if (!defined($options{cycle})); + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->exit_process(); + } + + $connector->action_dbclean(cycle => 1); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-dbcleaner', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'DBCLEANERREADY', + data => {} + }); + + $self->{db_gorgone} = gorgone::class::db->new( + type => $self->get_core_config(name => 'gorgone_db_type'), + db => $self->get_core_config(name => 'gorgone_db_name'), + host => $self->get_core_config(name => 'gorgone_db_host'), + port => $self->get_core_config(name => 'gorgone_db_port'), + user => $self->get_core_config(name => 'gorgone_db_user'), + password => $self->get_core_config(name => 'gorgone_db_password'), + force => 2, + logger => $self->{logger} + ); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/dbcleaner/hooks.pm b/gorgone/gorgone/modules/core/dbcleaner/hooks.pm new file mode 100644 index 00000000000..dba893cb3a4 --- /dev/null +++ b/gorgone/gorgone/modules/core/dbcleaner/hooks.pm @@ -0,0 +1,163 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::dbcleaner::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::dbcleaner::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'dbcleaner'; +use constant EVENTS => [ + { event => 'DBCLEANERREADY' } +]; + +my $config_core; +my $config; +my ($config_db_centreon); +my $dbcleaner = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config->{purge_sessions_time} = + defined($config->{purge_sessions_time}) && $config->{purge_sessions_time} =~ /(\d+)/ ? + $1 : + 3600 + ; + $config->{purge_history_time} = + defined($config->{purge_history_time}) && $config->{purge_history_time} =~ /(\d+)/ ? + $1 : + 604800 + ; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'DBCLEANERREADY') { + $dbcleaner->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$dbcleaner->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgonedbcleaner: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-dbcleaner', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($dbcleaner->{running}) && $dbcleaner->{running} == 1) { + $options{logger}->writeLogDebug("[dbcleaner] Send TERM signal $dbcleaner->{pid}"); + CORE::kill('TERM', $dbcleaner->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($dbcleaner->{running} == 1) { + $options{logger}->writeLogDebug("[dbcleaner] Send KILL signal for pool"); + CORE::kill('KILL', $dbcleaner->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($dbcleaner->{pid}) || $dbcleaner->{pid} != $pid); + + $dbcleaner = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($dbcleaner->{running}) && $dbcleaner->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[dbcleaner] Create module 'dbcleaner' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-dbcleaner'; + my $module = gorgone::modules::core::dbcleaner::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[dbcleaner] PID $child_pid (gorgone-dbcleaner)"); + $dbcleaner = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/httpserver/class.pm b/gorgone/gorgone/modules/core/httpserver/class.pm new file mode 100644 index 00000000000..10b41e56d2b --- /dev/null +++ b/gorgone/gorgone/modules/core/httpserver/class.pm @@ -0,0 +1,407 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::httpserver::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::misc; +use gorgone::standard::api; +use HTTP::Daemon; +use HTTP::Status; +use MIME::Base64; +use JSON::XS; +use Socket; +use EV; + +my $time = time(); + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +my %dispatch; + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{api_endpoints} = $options{api_endpoints}; + + if ($connector->{config}->{ssl} eq 'true') { + exit(1) if (gorgone::standard::misc::mymodule_load( + logger => $connector->{logger}, + module => 'HTTP::Daemon::SSL', + error_msg => "[httpserver] -class- cannot load module 'HTTP::Daemon::SSL'") + ); + } + + $connector->{auth_enabled} = (defined($connector->{config}->{auth}->{enabled}) && $connector->{config}->{auth}->{enabled} eq 'true') ? 1 : 0; + + $connector->{allowed_hosts_enabled} = (defined($connector->{config}->{allowed_hosts}->{enabled}) && $connector->{config}->{allowed_hosts}->{enabled} eq 'true') ? 1 : 0; + if (gorgone::standard::misc::mymodule_load( + logger => $connector->{logger}, + module => 'NetAddr::IP', + error_msg => "[httpserver] -class- cannot load module 'NetAddr::IP'. Cannot use allowed_hosts configuration.") + ) { + $connector->{allowed_hosts_enabled} = 0; + } + + $connector->{tokens} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[httpserver] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub init_dispatch { + my ($self, $config_dispatch) = @_; + + $self->{dispatch} = { %{$self->{config}->{dispatch}} } + if (defined($self->{config}->{dispatch}) && $self->{config}->{dispatch} ne ''); +} + +sub check_allowed_host { + my ($self, %options) = @_; + + my $subnet = NetAddr::IP->new($options{peer_addr} . '/32'); + foreach (@{$self->{peer_subnets}}) { + return 1 if ($_->contains($subnet)); + } + + return 0; +} + +sub load_peer_subnets { + my ($self, %options) = @_; + + return if ($self->{allowed_hosts_enabled} == 0); + + $self->{peer_subnets} = []; + return if (!defined($connector->{config}->{allowed_hosts}->{subnets})); + + foreach (@{$self->{config}->{allowed_hosts}->{subnets}}) { + my $subnet = NetAddr::IP->new($_); + if (!defined($subnet)) { + $self->{logger}->writeLogError("[httpserver] Cannot load subnet: $_"); + next; + } + + push @{$self->{peer_subnets}}, $subnet; + } +} + +sub stop_ev { + $connector->{loop}->break(); +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $self->read_message(); + next if (!defined($message)); + + if ($message =~ /^\[(.*?)\]\s+\[([a-zA-Z0-9:\-_]*?)\]\s+\[.*?\]\s+(.*)$/m || + $message =~ /^\[(.*?)\]\s+\[([a-zA-Z0-9:\-_]*?)\]\s+(.*)$/m) { + my ($action, $token, $data) = ($1, $2, $3); + $self->{tokens}->{$token} = { + action => $action, + token => $token, + data => $data + }; + if ((my $method = $self->can('action_' . lc($action)))) { + my ($rv, $decoded) = $self->json_decode(argument => $data, token => $token); + next if ($rv); + $method->($self, token => $token, data => $decoded); + } + } + } + + if (defined($self->{break_token}) && defined($self->{tokens}->{ $self->{break_token} })) { + $self->{loop}->break(); + } +} + +sub run { + my ($self, %options) = @_; + + $self->load_peer_subnets(); + + # Connect internal + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $connector->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-httpserver', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'HTTPSERVERREADY', + data => {} + }); + + gorgone::standard::api::set_module($self); + + my $watcher_timer = $self->{loop}->timer(4, 0, \&stop_ev); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() }); + $self->{loop}->run(); + + $self->init_dispatch(); + + # HTTP daemon + my ($daemon, $message_error); + if ($self->{config}->{ssl} eq 'false') { + $message_error = '$@'; + $daemon = HTTP::Daemon->new( + LocalAddr => $self->{config}->{address} . ':' . $self->{config}->{port}, + ReusePort => 1, + Timeout => 5 + ); + } elsif ($self->{config}->{ssl} eq 'true') { + $message_error = '$!, ssl_error=$IO::Socket::SSL::SSL_ERROR'; + $daemon = HTTP::Daemon::SSL->new( + LocalAddr => $self->{config}->{address} . ':' . $self->{config}->{port}, + SSL_cert_file => $self->{config}->{ssl_cert_file}, + SSL_key_file => $self->{config}->{ssl_key_file}, + SSL_error_trap => \&ssl_error, + ReusePort => 1, + Timeout => 5 + ); + } + + if (!defined($daemon)) { + eval "\$message_error = \"$message_error\""; + $connector->{logger}->writeLogError("[httpserver] can't construct socket: $message_error"); + exit(1); + } + + while (1) { + my ($connection) = $daemon->accept(); + + if ($self->{stop} == 1) { + $self->{logger}->writeLogInfo("[httpserver] $$ has quit"); + $connection->close() if (defined($connection)); + exit(0); + } + + if (!defined($connection)) { + $self->event(); + next; + } + + while (my $request = $connection->get_request) { + if ($connection->antique_client eq '1') { + $connection->force_last_request; + next; + } + + my $msg = "[httpserver] " . $connection->peerhost() . " " . $request->method . " '" . $request->uri->path . "'"; + $msg .= " '" . $request->header("User-Agent") . "'" if (defined($request->header("User-Agent")) && $request->header("User-Agent") ne ''); + $connector->{logger}->writeLogInfo($msg); + + if ($connector->{allowed_hosts_enabled} == 1) { + if ($connector->check_allowed_host(peer_addr => inet_ntoa($connection->peeraddr())) == 0) { + $connector->{logger}->writeLogError("[httpserver] " . $connection->peerhost() . " Unauthorized"); + $self->send_error( + connection => $connection, + code => "401", + response => '{"error":"http_error_401","message":"unauthorized"}' + ); + next; + } + } + + if ($self->authentication($request->header('Authorization'))) { # Check Basic authentication + my ($root) = ($request->uri->path =~ /^(\/\w+)/); + + if ($root eq "/api") { # API + $self->send_response(connection => $connection, response => $self->api_call($request)); + } elsif (defined($self->{dispatch}->{$root})) { # Other dispatch definition + $self->send_response(connection => $connection, response => $self->dispatch_call(root => $root, request => $request)); + } else { # Forbidden + $connector->{logger}->writeLogError("[httpserver] " . $connection->peerhost() . " '" . $request->uri->path . "' Forbidden"); + $self->send_error( + connection => $connection, + code => "403", + response => '{"error":"http_error_403","message":"forbidden"}' + ); + } + } else { # Authen error + $connector->{logger}->writeLogError("[httpserver] " . $connection->peerhost() . " Unauthorized"); + $self->send_error( + connection => $connection, + code => "401", + response => '{"error":"http_error_401","message":"unauthorized"}' + ); + } + $connection->force_last_request; + } + $connection->close; + undef($connection); + } +} + +sub ssl_error { + my ($self, $error) = @_; + + chomp $error; + $connector->{logger}->writeLogError("[httpserver] ssl error: $error"); + ${*$self}{httpd_client_proto} = 1000; + ${*$self}{httpd_daemon} = HTTP::Daemon::SSL::DummyDaemon->new(); + $self->send_error(RC_BAD_REQUEST); + $self->close(); +} + +sub authentication { + my ($self, $header) = @_; + + return 1 if ($self->{auth_enabled} == 0); + + return 0 if (!defined($header) || $header eq ''); + + ($header =~ /Basic\s(.*)$/); + my ($user, $password) = split(/:/, MIME::Base64::decode($1), 2); + return 1 if (defined($self->{config}->{auth}->{user}) && $user eq $self->{config}->{auth}->{user} && + defined($self->{config}->{auth}->{password}) && $password eq $self->{config}->{auth}->{password}); + + return 0; +} + +sub send_response { + my ($self, %options) = @_; + + if (defined($options{response}) && $options{response} ne '') { + my $http_code = 200; + eval { + # we don't want to raise an error if we can't find an http code or if we don't send back + # something else than a json, so we don't check $@ variable + my $content = JSON::XS->new->decode($options{response}); + if ($content->{http_response_code}){ + $http_code = $content->{http_response_code}; + delete($content->{http_response_code}); + $options{response} = JSON::XS->new->encode($content); + } + elsif ($content->{error}){ + $http_code = 400; + } + }; + + + my $response = HTTP::Response->new($http_code); + $response->header('Content-Type' => 'application/json'); + $response->content($options{response} . "\n"); + $options{connection}->send_response($response); + } else { + my $response = HTTP::Response->new(204); + $options{connection}->send_response($response); + } +} + +sub send_error { + my ($self, %options) = @_; + + my $response = HTTP::Response->new($options{code}); + $response->header('Content-Type' => 'application/json'); + $response->content($options{response} . "\n"); + $options{connection}->send_response($response); +} + +sub api_call { + my ($self, $request) = @_; + + my $content; + eval { + $content = JSON::XS->new->decode($request->content) + if ($request->method =~ /POST|PATCH/ && defined($request->content)); + }; + if ($@) { + return '{"error":"decode_error","message":"POST content must be JSON-formated","http_response_code":"400"}'; + } + + my %parameters = $request->uri->query_form; + my $response = gorgone::standard::api::root( + method => $request->method, + uri => $request->uri->path, + parameters => \%parameters, + content => $content, + socket => $connector->{internal_socket}, + logger => $self->{logger}, + api_endpoints => $self->{api_endpoints}, + module => $self + ); + + return $response; +} + +sub dispatch_call { + my ($self, %options) = @_; + + my $class = $self->{dispatch}->{$options{root}}->{class}; + my $method = $self->{dispatch}->{$options{root}}->{method}; + my $response; + eval { + (my $file = "$class.pm") =~ s|::|/|g; + require $file; + $response = $class->$method(request => $options{request}); + }; + if ($@) { + $response = $@; + }; + + return $response; +} + +1; diff --git a/gorgone/gorgone/modules/core/httpserver/hooks.pm b/gorgone/gorgone/modules/core/httpserver/hooks.pm new file mode 100644 index 00000000000..9f751180f67 --- /dev/null +++ b/gorgone/gorgone/modules/core/httpserver/hooks.pm @@ -0,0 +1,169 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::httpserver::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::httpserver::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'httpserver'; +use constant EVENTS => [ + { event => 'HTTPSERVERREADY' }, +]; + +my $config_core; +my $config; +my $httpserver = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + my $loaded = 1; + $config = $options{config}; + $config_core = $options{config_core}; + $config->{address} = defined($config->{address}) && $config->{address} ne '' ? $config->{address} : '0.0.0.0'; + $config->{port} = defined($config->{port}) && $config->{port} =~ /(\d+)/ ? $1 : 8080; + if (defined($config->{auth}->{enabled}) && $config->{auth}->{enabled} eq 'true') { + if (!defined($config->{auth}->{user}) || $config->{auth}->{user} =~ /^\s*$/) { + $options{logger}->writeLogError('[httpserver] User option mandatory if authentication is enabled'); + $loaded = 0; + } + if (!defined($config->{auth}->{password}) || $config->{auth}->{password} =~ /^\s*$/) { + $options{logger}->writeLogError('[httpserver] Password option mandatory if authentication is enabled'); + $loaded = 0; + } + } + + return ($loaded, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}, api_endpoints => $options{api_endpoints}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'HTTPSERVERREADY') { + $httpserver->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$httpserver->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgonehttpserver: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-httpserver', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($httpserver->{running}) && $httpserver->{running} == 1) { + $options{logger}->writeLogDebug("[httpserver] Send TERM signal $httpserver->{pid}"); + CORE::kill('TERM', $httpserver->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($httpserver->{running} == 1) { + $options{logger}->writeLogDebug("[httpserver] Send KILL signal for pool"); + CORE::kill('KILL', $httpserver->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($httpserver->{pid}) || $httpserver->{pid} != $pid); + + $httpserver = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}, api_endpoints => $options{api_endpoints}); + } + + last; + } + + $count++ if (defined($httpserver->{running}) && $httpserver->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[httpserver] Create module 'httpserver' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-httpserver'; + my $module = gorgone::modules::core::httpserver::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + api_endpoints => $options{api_endpoints} + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[httpserver] PID $child_pid (gorgone-httpserver)"); + $httpserver = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/httpserverng/class.pm b/gorgone/gorgone/modules/core/httpserverng/class.pm new file mode 100644 index 00000000000..57ef32290ad --- /dev/null +++ b/gorgone/gorgone/modules/core/httpserverng/class.pm @@ -0,0 +1,726 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::httpserverng::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use Mojolicious::Lite; +use Mojo::Server::Daemon; +use Authen::Simple::Password; +use IO::Socket::SSL; +use IO::Handle; +use JSON::XS; +use IO::Poll qw(POLLIN POLLPRI); +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +websocket '/' => sub { + my $mojo = shift; + + $connector->{logger}->writeLogDebug('[httpserverng] websocket client connected: ' . $mojo->tx->connection); + + if ($connector->{allowed_hosts_enabled} == 1) { + if ($connector->check_allowed_host(peer_addr => $mojo->tx->remote_address) == 0) { + $connector->{logger}->writeLogError("[httpserverng] " . $mojo->tx->remote_address . " Unauthorized"); + $mojo->tx->send({json => { + code => 401, + message => 'unauthorized', + }}); + return ; + } + } + + $connector->{ws_clients}->{ $mojo->tx->connection } = { + tx => $mojo->tx, + logged => 0, + last_update => time(), + tokens => {} + }; + + $mojo->on(message => sub { + my ($mojo, $msg) = @_; + + $connector->{ws_clients}->{ $mojo->tx->connection }->{last_update} = time(); + + my $content; + eval { + $content = JSON::XS->new->decode($msg); + }; + if ($@) { + $connector->close_websocket( + code => 500, + message => 'decode error: unsupported format', + ws_id => $mojo->tx->connection + ); + return ; + } + + my $rv = $connector->is_logged_websocket(ws_id => $mojo->tx->connection, content => $content); + return if ($rv != 1); + + $connector->api_root_ws(ws_id => $mojo->tx->connection, content => $content); + }); + + $mojo->on(finish => sub { + my ($mojo, $code, $reason) = @_; + + $connector->{logger}->writeLogDebug('[httpserverng] websocket client disconnected: ' . $mojo->tx->connection); + $connector->clean_websocket(ws_id => $mojo->tx->connection, finish => 1); + }); +}; + +patch '/*' => sub { + my $mojo = shift; + + $connector->api_call( + mojo => $mojo, + method => 'PATCH' + ); +}; + +post '/*' => sub { + my $mojo = shift; + + $connector->api_call( + mojo => $mojo, + method => 'POST' + ); +}; + +get '/*' => sub { + my $mojo = shift; + + $connector->api_call( + mojo => $mojo, + method => 'GET' + ); +}; + +sub construct { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{api_endpoints} = $options{api_endpoints}; + $connector->{auth_enabled} = (defined($connector->{config}->{auth}->{enabled}) && $connector->{config}->{auth}->{enabled} eq 'true') ? 1 : 0; + $connector->{allowed_hosts_enabled} = (defined($connector->{config}->{allowed_hosts}->{enabled}) && $connector->{config}->{allowed_hosts}->{enabled} eq 'true') ? 1 : 0; + $connector->{clients} = {}; + $connector->{token_watch} = {}; + $connector->{ws_clients} = {}; + + if (gorgone::standard::misc::mymodule_load( + logger => $connector->{logger}, + module => 'NetAddr::IP', + error_msg => "[httpserverng] -class- cannot load module 'NetAddr::IP'. Cannot use allowed_hosts configuration.") + ) { + $connector->{allowed_hosts_enabled} = 0; + } + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[httpserverng] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub check_allowed_host { + my ($self, %options) = @_; + + my $subnet = NetAddr::IP->new($options{peer_addr} . '/32'); + foreach (@{$self->{peer_subnets}}) { + return 1 if ($_->contains($subnet)); + } + + return 0; +} + +sub load_peer_subnets { + my ($self, %options) = @_; + + return if ($self->{allowed_hosts_enabled} == 0); + + $self->{peer_subnets} = []; + return if (!defined($connector->{config}->{allowed_hosts}->{subnets})); + + foreach (@{$self->{config}->{allowed_hosts}->{subnets}}) { + my $subnet = NetAddr::IP->new($_); + if (!defined($subnet)) { + $self->{logger}->writeLogError("[httpserverng] Cannot load subnet: $_"); + next; + } + + push @{$self->{peer_subnets}}, $subnet; + } +} + +sub run { + my ($self, %options) = @_; + + $self->load_peer_subnets(); + + my $listen = 'reuse=1'; + if ($self->{config}->{ssl} eq 'true') { + if (!defined($self->{config}->{ssl_cert_file}) || $self->{config}->{ssl_cert_file} eq '' || + ! -r "$self->{config}->{ssl_cert_file}") { + $connector->{logger}->writeLogError("[httpserverng] cannot read/find ssl-cert-file"); + exit(1); + } + if (!defined($self->{config}->{ssl_key_file}) || $self->{config}->{ssl_key_file} eq '' || + ! -r "$self->{config}->{ssl_key_file}") { + $connector->{logger}->writeLogError("[httpserverng] cannot read/find ssl-key-file"); + exit(1); + } + $listen .= '&cert=' . $self->{config}->{ssl_cert_file} . '&key=' . $self->{config}->{ssl_key_file}; + } + my $proto = 'http'; + if ($self->{config}->{ssl} eq 'true') { + $proto = 'https'; + if (defined($self->{config}->{passphrase}) && $self->{config}->{passphrase} ne '') { + IO::Socket::SSL::set_defaults(SSL_passwd_cb => sub { return $connector->{config}->{passphrase} } ); + } + } + + # Connect internal + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $connector->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-httpserverng', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'HTTPSERVERNGREADY', + data => {} + }); + $self->read_zmq_events(); + + my $type = ref(Mojo::IOLoop->singleton->reactor); + my $watcher_io; + if ($type eq 'Mojo::Reactor::Poll') { + Mojo::IOLoop->singleton->reactor->{io}{ $self->{internal_socket}->get_fd()} = { + cb => sub { $connector->read_zmq_events(); }, + mode => POLLIN | POLLPRI + }; + } else { + # need EV version 4.32 + $watcher_io = EV::io( + $self->{internal_socket}->get_fd(), + EV::READ, + sub { + $connector->read_zmq_events(); + } + ); + } + + #my $socket_fd = gorgone::standard::library::zmq_getfd(socket => $self->{internal_socket}); + #my $socket = IO::Handle->new_from_fd($socket_fd, 'r'); + #Mojo::IOLoop->singleton->reactor->io($socket => sub { + # $connector->read_zmq_events(); + #}); + #Mojo::IOLoop->singleton->reactor->watch($socket, 1, 0); + + Mojo::IOLoop->singleton->recurring(60 => sub { + $connector->{logger}->writeLogDebug('[httpserverng] recurring timeout loop'); + my $ctime = time(); + foreach my $ws_id (keys %{$connector->{ws_clients}}) { + if (scalar(keys %{$connector->{ws_clients}->{$ws_id}->{tokens}}) <= 0 && ($ctime - $connector->{ws_clients}->{$ws_id}->{last_update}) > 300) { + $connector->{logger}->writeLogDebug('[httpserverng] websocket client timeout reached: ' . $ws_id); + $connector->close_websocket( + code => 500, + message => 'timeout reached', + ws_id => $ws_id + ); + } + } + }); + + $self->{basic_auth_plus} = 1; + eval { + local $SIG{__DIE__} = 'IGNORE'; + + app->plugin('basic_auth_plus'); + }; + if ($@) { + $self->{basic_auth_plus} = 0; + } + if ($self->{auth_enabled} == 1 && $self->{basic_auth_plus} == 0 && $self->{allowed_hosts_enabled} == 0) { + $connector->{logger}->writeLogError("[httpserverng] need to install the module basic_auth_plus"); + exit(1); + } + + app->mode('production'); + my $daemon = Mojo::Server::Daemon->new( + app => app, + listen => [$proto . '://' . $self->{config}->{address} . ':' . $self->{config}->{port} . '?' . $listen] + ); + # more than 2 minutes, need to use async system + $daemon->inactivity_timeout(120); + + #my $loop = Mojo::IOLoop->new(); + #my $reactor = Mojo::Reactor::EV->new(); + #$reactor->io($socket => sub { + # my $message = gorgone::standard::library::zmq_dealer_read_message(socket => $connector->{internal_socket}); + #}); + #$reactor->watch($socket, 1, 0); + #$loop->reactor($reactor); + #$daemon->ioloop($loop); + + $daemon->run(); + + exit(0); +} + +sub read_log_event { + my ($self, %options) = @_; + + my $token = $options{token}; + $token =~ s/-log$//; + my $response = { error => 'no_log', message => 'No log found for token', data => [], token => $token }; + if (defined($options{data})) { + my $content; + eval { + $content = JSON::XS->new->decode($options{data}); + }; + if ($@) { + $response = { error => 'decode_error', message => 'Cannot decode response' }; + } elsif (defined($content->{data}->{result}) && scalar(@{$content->{data}->{result}}) > 0) { + $response = { + message => 'Logs found', + token => $token, + data => $content->{data}->{result} + }; + } + } + + if (defined($self->{token_watch}->{ $options{token} }->{ws_id})) { + $response->{userdata} = $self->{token_watch}->{ $options{token} }->{userdata}; + $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{last_update} = time(); + $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{tx}->send({json => $response }); + delete $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{tokens}->{ $options{token} }; + } else { + $self->{token_watch}->{ $options{token} }->{mojo}->render(json => $response); + } + delete $self->{token_watch}->{ $options{token} }; +} + +sub read_listener { + my ($self, %options) = @_; + + my $content; + eval { + $content = JSON::XS->new->decode($options{data}); + }; + if ($@) { + $self->{token_watch}->{ $options{token} }->{mojo}->render(json => { error => 'decode_error', message => 'Cannot decode response' }); + delete $self->{token_watch}->{ $options{token} }; + return ; + } + + push @{$self->{token_watch}->{ $options{token} }->{results}}, $content; + if (defined($self->{token_watch}->{ $options{token} }->{ws_id})) { + $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{last_update} = time(); + } + + if ($content->{code} == GORGONE_ACTION_FINISH_KO || $content->{code} == GORGONE_ACTION_FINISH_OK) { + my $json = { data => $self->{token_watch}->{ $options{token} }->{results} }; + if (defined($self->{token_watch}->{ $options{token} }->{internal}) && $content->{code} == GORGONE_ACTION_FINISH_OK) { + $json = $content->{data}; + } + + if (defined($self->{token_watch}->{ $options{token} }->{ws_id})) { + $json->{userdata} = $self->{token_watch}->{ $options{token} }->{userdata}; + $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{tx}->send({json => $json }); + delete $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{tokens}->{ $options{token} }; + } else { + $self->{token_watch}->{ $options{token} }->{mojo}->render(json => $json); + } + delete $self->{token_watch}->{ $options{token} }; + } +} + +sub read_zmq_events { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $connector->read_message(); + $connector->{logger}->writeLogDebug('[httpserverng] zmq message received: ' . $message); + if ($message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m || + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+(.*)$/m) { + my ($action, $token, $data) = ($1, $2, $3); + if (defined($connector->{token_watch}->{$token})) { + if ($action eq 'HTTPSERVERNGLISTENER') { + $connector->read_listener(token => $token, data => $data); + } elsif ($token =~ /-log$/) { + $connector->read_log_event(token => $token, data => $data); + } + } + if ((my $method = $connector->can('action_' . lc($action)))) { + my ($rv, $decoded) = $connector->json_decode(argument => $data, token => $token); + if (!$rv) { + $method->($connector, token => $token, data => $decoded); + } + } + } + } +} + +sub api_call { + my ($self, %options) = @_; + + if ($self->{allowed_hosts_enabled} == 1) { + if ($self->check_allowed_host(peer_addr => $options{mojo}->tx->remote_address) == 0) { + $connector->{logger}->writeLogError("[httpserverng] " . $options{mojo}->tx->remote_address . " Unauthorized"); + return $options{mojo}->render(json => { message => 'unauthorized' }, status => 401); + } + } + + if ($self->{auth_enabled} == 1 && $self->{basic_auth_plus} == 1) { + my ($hash_ref, $auth_ok) = $options{mojo}->basic_auth( + 'Realm Name' => { + username => $self->{config}->{auth}->{user}, + password => $self->{config}->{auth}->{password} + } + ); + if (!$auth_ok) { + return $options{mojo}->render(json => { message => 'unauthorized' }, status => 401); + } + } + + my $path = $options{mojo}->tx->req->url->path; + my $names = $options{mojo}->req->params->names(); + my $params = {}; + foreach (@$names) { + $params->{$_} = $options{mojo}->param($_); + } + + my $content = $options{mojo}->req->json(); + + $self->api_root( + mojo => $options{mojo}, + method => $options{method}, + uri => $path, + parameters => $params, + content => $content + ); +} + +sub get_log { + my ($self, %options) = @_; + + if (defined($options{target}) && $options{target} ne '') { + $self->send_internal_action({ + target => $options{target}, + action => 'GETLOG', + data => {} + }); + $self->read_zmq_events(); + } + + my $token_log = $options{token} . '-log'; + + if (defined($options{ws_id})) { + $self->{ws_clients}->{ $options{ws_id} }->{tokens}->{$token_log} = 1; + } + $self->{token_watch}->{$token_log} = { + ws_id => $options{ws_id}, + userdata => $options{userdata}, + mojo => $options{mojo} + }; + + $self->send_internal_action({ + action => 'GETLOG', + token => $token_log, + data => { + token => $options{token}, + %{$options{parameters}} + } + }); + + $self->read_zmq_events(); + + # keep reference tx to avoid "Transaction already destroyed" + $self->{token_watch}->{$token_log}->{tx} = $options{mojo}->render_later()->tx if (!defined($options{ws_id})); +} + +sub call_action { + my ($self, %options) = @_; + + my $action_token = gorgone::standard::library::generate_token(); + + if ($options{async} == 0) { + if (defined($options{ws_id})) { + $self->{ws_clients}->{ $options{ws_id} }->{tokens}->{$action_token} = 1; + } + $self->{token_watch}->{$action_token} = { + ws_id => $options{ws_id}, + userdata => $options{userdata}, + mojo => $options{mojo}, + internal => $options{internal}, + results => [] + }; + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgone-httpserverng', + event => 'HTTPSERVERNGLISTENER', + token => $action_token, + target => $options{target}, + log_pace => 5, + timeout => 110 + } + ] + }); + $self->read_zmq_events(); + } + + $self->send_internal_action({ + action => $options{action}, + target => $options{target}, + token => $action_token, + data => $options{data} + }); + $self->read_zmq_events(); + + if ($options{async} == 1) { + $options{mojo}->render(json => { token => $action_token }, status => 200); + } else { + # keep reference tx to avoid "Transaction already destroyed" + $self->{token_watch}->{$action_token}->{tx} = $options{mojo}->render_later()->tx if (!defined($options{ws_id})); + } +} + +sub is_logged_websocket { + my ($self, %options) = @_; + + return 1 if ($self->{ws_clients}->{ $options{ws_id} }->{logged} == 1); + + if ($self->{auth_enabled} == 1) { + if (!defined($options{content}->{username}) || $options{content}->{username} eq '' || + !defined($options{content}->{password}) || $options{content}->{password} eq '') { + $self->close_websocket( + code => 500, + message => 'please set username/password', + ws_id => $options{ws_id} + ); + return 0; + } + + unless ($options{content}->{username} eq $self->{config}->{auth}->{user} && + Authen::Simple::Password->check($options{content}->{password}, $self->{config}->{auth}->{password})) { + $self->close_websocket( + code => 401, + message => 'unauthorized user', + ws_id => $options{ws_id} + ); + return 0; + } + } + + $self->{ws_clients}->{ $options{ws_id} }->{logged} = 1; + return 2; +} + +sub clean_websocket { + my ($self, %options) = @_; + + return if (!defined($self->{ws_clients}->{ $options{ws_id} })); + + $self->{ws_clients}->{ $options{ws_id} }->{tx}->finish() if (!defined($options{finish})); + foreach (keys %{$self->{ws_clients}->{ $options{ws_id} }->{tokens}}) { + delete $self->{token_watch}->{$_}; + } + delete $self->{ws_clients}->{ $options{ws_id} }; +} + +sub close_websocket { + my ($self, %options) = @_; + + $self->{ws_clients}->{ $options{ws_id} }->{tx}->send({json => { + code => $options{code}, + message => $options{message} + }}); + $self->clean_websocket(ws_id => $options{ws_id}); +} + +sub api_root_ws { + my ($self, %options) = @_; + + if (!defined($options{content}->{method})) { + $self->{ws_clients}->{ $options{ws_id} }->{tx}->send({json => { + code => 500, + message => 'unknown method', + userdata => $options{content}->{userdata} + }}); + return ; + } + if (!defined($options{content}->{uri})) { + $self->{ws_clients}->{ $options{ws_id} }->{tx}->send({json => { + code => 500, + message => 'unknown uri', + userdata => $options{content}->{userdata} + }}); + return ; + } + + $self->{logger}->writeLogInfo("[api] Requesting '" . $options{content}->{uri} . "' [" . $options{content}->{method} . "]"); + + if ($options{content}->{method} eq 'GET' && $options{content}->{uri} =~ /^\/api\/log\/?$/) { + $self->get_log( + ws_id => $options{ws_id}, + userdata => $options{content}->{userdata}, + target => $options{target}, + token => $options{content}->{token}, + parameters => $options{content}->{parameters} + ); + } elsif ($options{content}->{uri} =~ /^\/internal\/(\w+)\/?$/ + && defined($self->{api_endpoints}->{ $options{content}->{method} . '_/internal/' . $1 })) { + $self->call_action( + ws_id => $options{ws_id}, + userdata => $options{content}->{userdata}, + async => 0, + action => $self->{api_endpoints}->{ $options{content}->{method} . '_/internal/' . $1 }, + internal => $1, + target => $options{target}, + data => { + content => $options{content}->{data}, + parameters => $options{content}->{parameters}, + variables => $options{content}->{variable} + } + ); + } elsif ($options{content}->{uri} =~ /^\/(\w+)\/(\w+)\/(\w+)\/?$/ + && defined($self->{api_endpoints}->{ $options{content}->{method} . '_/' . $1 . '/' . $2 . '/' . $3 })) { + $self->call_action( + ws_id => $options{ws_id}, + userdata => $options{content}->{userdata}, + async => 0, + action => $self->{api_endpoints}->{ $options{content}->{method} . '_/' . $1 . '/' . $2 . '/' . $3 }, + target => $options{target}, + data => { + content => $options{content}->{data}, + parameters => $options{content}->{parameters}, + variables => $options{content}->{variable} + } + ); + } else { + $self->{ws_clients}->{ $options{ws_id} }->{tx}->send({json => { + code => 500, + message => 'method not implemented', + userdata => $options{userdata} + }}); + } +} + +sub api_root { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[api] Requesting '" . $options{uri} . "' [" . $options{method} . "]"); + + my $async = 0; + $async = 1 if (defined($options{parameters}->{async}) && $options{parameters}->{async} == 1); + + # async mode: + # provide the token directly and close the connection. need to call GETLOG on the token + # not working with GETLOG + + # listener is used for other case. + + if ($options{method} eq 'GET' && $options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?log\/(.*)$/) { + $self->get_log( + mojo => $options{mojo}, + target => $2, + token => $3, + parameters => $options{parameters} + ); + } elsif ($options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?internal\/(\w+)\/?([\w\/]*?)$/ + && defined($self->{api_endpoints}->{ $options{method} . '_/internal/' . $3 })) { + my @variables = split(/\//, $4); + $self->call_action( + mojo => $options{mojo}, + async => $async, + action => $self->{api_endpoints}->{ $options{method} . '_/internal/' . $3 }, + internal => $3, + target => $2, + data => { + content => $options{content}, + parameters => $options{parameters}, + variables => \@variables + } + ); + } elsif ($options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?(\w+)\/(\w+)\/(\w+)\/?([\w\/]*?)$/ + && defined($self->{api_endpoints}->{ $options{method} . '_/' . $3 . '/' . $4 . '/' . $5 })) { + my @variables = split(/\//, $6); + $self->call_action( + mojo => $options{mojo}, + async => $async, + action => $self->{api_endpoints}->{ $options{method} . '_/' . $3 . '/' . $4 . '/' . $5 }, + target => $2, + data => { + content => $options{content}, + parameters => $options{parameters}, + variables => \@variables + } + ); + } else { + $options{mojo}->render(json => { error => 'method_unknown', message => 'Method not implemented' }, status => 200); + return ; + } +} + +1; diff --git a/gorgone/gorgone/modules/core/httpserverng/hooks.pm b/gorgone/gorgone/modules/core/httpserverng/hooks.pm new file mode 100644 index 00000000000..14525e1c747 --- /dev/null +++ b/gorgone/gorgone/modules/core/httpserverng/hooks.pm @@ -0,0 +1,170 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::httpserverng::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::httpserverng::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'httpserverng'; +use constant EVENTS => [ + { event => 'HTTPSERVERNGLISTENER' }, + { event => 'HTTPSERVERNGREADY' } +]; + +my $config_core; +my $config; +my $httpserverng = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + my $loaded = 1; + $config = $options{config}; + $config_core = $options{config_core}; + $config->{address} = defined($config->{address}) && $config->{address} ne '' ? $config->{address} : '0.0.0.0'; + $config->{port} = defined($config->{port}) && $config->{port} =~ /(\d+)/ ? $1 : 8080; + if (defined($config->{auth}->{enabled}) && $config->{auth}->{enabled} eq 'true') { + if (!defined($config->{auth}->{user}) || $config->{auth}->{user} =~ /^\s*$/) { + $options{logger}->writeLogError('[httpserverng] User option mandatory if authentication is enabled'); + $loaded = 0; + } + if (!defined($config->{auth}->{password}) || $config->{auth}->{password} =~ /^\s*$/) { + $options{logger}->writeLogError('[httpserverng] Password option mandatory if authentication is enabled'); + $loaded = 0; + } + } + + return ($loaded, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}, api_endpoints => $options{api_endpoints}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'HTTPSERVERNGREADY') { + $httpserverng->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$httpserverng->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-httpserverng: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-httpserverng', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($httpserverng->{running}) && $httpserverng->{running} == 1) { + $options{logger}->writeLogDebug("[httpserverng] Send TERM signal $httpserverng->{pid}"); + CORE::kill('TERM', $httpserverng->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($httpserverng->{running} == 1) { + $options{logger}->writeLogDebug("[httpserverng] Send KILL signal for pool"); + CORE::kill('KILL', $httpserverng->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($httpserverng->{pid}) || $httpserverng->{pid} != $pid); + + $httpserverng = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}, api_endpoints => $options{api_endpoints}); + } + + last; + } + + $count++ if (defined($httpserverng->{running}) && $httpserverng->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[httpserverng] Create module 'httpserverng' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-httpserverng'; + my $module = gorgone::modules::core::httpserverng::class->construct( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + api_endpoints => $options{api_endpoints} + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[httpserverng] PID $child_pid (gorgone-httpserverng)"); + $httpserverng = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/pipeline/class.pm b/gorgone/gorgone/modules/core/pipeline/class.pm new file mode 100644 index 00000000000..bb80a24b0c0 --- /dev/null +++ b/gorgone/gorgone/modules/core/pipeline/class.pm @@ -0,0 +1,244 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pipeline::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::class::db; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{timeout} = 600; + $connector->{pipelines} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[pipeline] -class- $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub send_listener { + my ($self, %options) = @_; + + my $current = $self->{pipelines}->{ $options{token} }->{current}; + + $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{created} = time(); + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonepipeline', + event => 'PIPELINELISTENER', + target => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{target}, + token => $options{token} . '-' . $current, + timeout => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{timeout}, + log_pace => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{log_pace} + } + ] + }); + + $self->send_internal_action({ + action => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{action}, + target => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{target}, + token => $options{token} . '-' . $current, + data => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{data} + }); + + $self->{logger}->writeLogDebug("[pipeline] -class- pipeline '$options{token}' run $current"); + $self->send_log( + code => GORGONE_MODULE_PIPELINE_RUN_ACTION, + token => $options{token}, + data => { message => 'proceed action ' . ($current + 1), token => $options{token} . '-' . $current } + ); +} + +sub action_addpipeline { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + #[ + # { "action": "COMMAND", "data": { "content": [ { "command": "ls" } ] }, "continue": "ok", "continue_custom": "%{last_exit_code} == 1" }, // By default for COMMAND: "continue": "%{last_exit_code} == 0" + # { "action:" "COMMAND", "target": 10, "timeout": 60, "log_pace": 10, "data": { [ "content": { "command": "ls /tmp" } ] } } + #] + + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action addpipeline proceed' }); + + $self->{pipelines}->{$options{token}} = { current => 0, pipe => $options{data} }; + $self->send_listener(token => $options{token}); + + return 0; +} + +sub action_pipelinelistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token}) || $options{token} !~ /^(.*)-(\d+)$/); + my ($token, $current_event) = ($1, $2); + + return 0 if (!defined($self->{pipelines}->{ $token })); + my $current = $self->{pipelines}->{$token}->{current}; + return 0 if ($current != $current_event); + + if ($self->{pipelines}->{$token}->{pipe}->[$current]->{action} eq 'COMMAND') { + # we want to catch exit_code for command results + if ($options{data}->{code} == GORGONE_MODULE_ACTION_COMMAND_RESULT) { + $self->{pipelines}->{$token}->{pipe}->[$current]->{last_exit_code} = $options{data}->{data}->{result}->{exit_code}; + $self->{pipelines}->{$token}->{pipe}->[$current]->{total_exit_code} += $options{data}->{data}->{result}->{exit_code} + if (!defined($self->{pipelines}->{$token}->{pipe}->[$current]->{total_exit_code})); + return 0; + } + } + + return 0 if ($options{data}->{code} != GORGONE_ACTION_FINISH_OK && $options{data}->{code} != GORGONE_ACTION_FINISH_KO); + + my $continue = GORGONE_ACTION_FINISH_OK; + if (defined($self->{pipelines}->{$token}->{pipe}->[$current]->{continue}) && + $self->{pipelines}->{$token}->{pipe}->[$current]->{continue} eq 'ko') { + $continue = GORGONE_ACTION_FINISH_KO; + } + + my $success = 1; + if ($options{data}->{code} != $continue) { + $success = 0; + } + if ($self->{pipelines}->{$token}->{pipe}->[$current]->{action} eq 'COMMAND') { + my $eval = '%{last_exit_code} == 0'; + $eval = $self->{pipelines}->{$token}->{pipe}->[$current]->{continue_continue_custom} + if (defined($self->{pipelines}->{$token}->{pipe}->[$current]->{continue_continue_custom})); + $eval = $self->change_macros( + template => $eval, + macros => { + total_exit_code => '$self->{pipelines}->{$token}->{pipe}->[$current]->{total_exit_code}', + last_exit_code => '$self->{pipelines}->{$token}->{pipe}->[$current]->{last_exit_code}' + } + ); + if (! eval "$eval") { + $success = 0; + } + } + + if ($success == 0) { + $self->{logger}->writeLogDebug("[pipeline] -class- pipeline '$token' failed at $current"); + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $token, data => { message => 'action pipeline failed' }); + delete $self->{pipelines}->{$token}; + } else { + if (defined($self->{pipelines}->{$token}->{pipe}->[$current + 1])) { + $self->{pipelines}->{$token}->{current}++; + $self->send_listener(token => $token); + } else { + $self->{logger}->writeLogDebug("[pipeline] -class- pipeline '$token' finished successfully"); + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $token, data => { message => 'action pipeline finished successfully' }); + delete $self->{pipelines}->{$token}; + } + } + + return 0; +} + +sub check_timeout { + my ($self, %options) = @_; + + foreach (keys %{$self->{pipelines}}) { + my $current = $self->{pipelines}->{$_}->{current}; + my $timeout = defined($self->{pipelines}->{$_}->{pipe}->[$current]->{timeout}) && $self->{pipelines}->{$_}->{pipe}->[$current]->{timeout} =~ /(\d+)/ ? + $1 : $self->{timeout}; + + if ((time() - $self->{pipelines}->{$_}->{pipe}->[$current]->{created}) > $timeout) { + $self->{logger}->writeLogDebug("[pipeline] -class- delete pipeline '$_' timeout"); + delete $self->{pipelines}->{$_}; + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $_, data => { message => 'pipeline timeout reached' }); + } + } +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[pipeline] -class- $$ has quit"); + exit(0); + } + + $connector->check_timeout(); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-pipeline', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PIPELINEREADY', + data => {} + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/pipeline/hooks.pm b/gorgone/gorgone/modules/core/pipeline/hooks.pm new file mode 100644 index 00000000000..83aed872a2c --- /dev/null +++ b/gorgone/gorgone/modules/core/pipeline/hooks.pm @@ -0,0 +1,164 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pipeline::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::pipeline::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'pipeline'; +use constant EVENTS => [ + { event => 'PIPELINEREADY' }, + { event => 'PIPELINELISTENER' }, + { event => 'ADDPIPELINE', uri => '/definitions', method => 'POST' }, +]; + +my $config_core; +my $config; +my $pipeline = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config->{purge_sessions_time} = + defined($config->{purge_sessions_time}) && $config->{purge_sessions_time} =~ /(\d+)/ ? + $1 : + 3600 + ; + $config->{purge_history_time} = + defined($config->{purge_history_time}) && $config->{purge_history_time} =~ /(\d+)/ ? + $1 : + 604800 + ; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'PIPELINEREADY') { + $pipeline->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$pipeline->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-pipeline: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-pipeline', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($pipeline->{running}) && $pipeline->{running} == 1) { + $options{logger}->writeLogDebug("[pipeline] Send TERM signal $pipeline->{pid}"); + CORE::kill('TERM', $pipeline->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($pipeline->{running} == 1) { + $options{logger}->writeLogDebug('[pipeline] Send KILL signal for subprocess'); + CORE::kill('KILL', $pipeline->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($pipeline->{pid}) || $pipeline->{pid} != $pid); + + $pipeline = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($pipeline->{running}) && $pipeline->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[pipeline] Create module 'pipeline' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-pipeline'; + my $module = gorgone::modules::core::pipeline::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[pipeline] PID $child_pid (gorgone-pipeline)"); + $pipeline = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/proxy/class.pm b/gorgone/gorgone/modules/core/proxy/class.pm new file mode 100644 index 00000000000..7c5172b159b --- /dev/null +++ b/gorgone/gorgone/modules/core/proxy/class.pm @@ -0,0 +1,563 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::proxy::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::clientzmq; +use gorgone::modules::core::proxy::sshclient; +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{pool_id} = $options{pool_id}; + $connector->{clients} = {}; + $connector->{internal_channels} = {}; + $connector->{watchers} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[proxy] $$ Receiving order to stop..."); + $self->{stop} = 1; + $self->{stop_time} = time(); +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub exit_process { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[proxy] $$ has quit"); + $self->close_connections(); + foreach (keys %{$self->{internal_channels}}) { + $self->{logger}->writeLogInfo("[proxy] Close internal connection for $_"); + $self->{internal_channels}->{$_}->close(); + } + $self->{logger}->writeLogInfo("[proxy] Close control connection"); + $self->{internal_socket}->close(); + exit(0); +} + +sub read_message_client { + my (%options) = @_; + + return undef if (!defined($options{identity}) || $options{identity} !~ /^gorgone-proxy-(.*?)-(.*?)$/); + + my ($client_identity) = ($2); + if ($options{data} =~ /^\[PONG\]/) { + if ($options{data} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)/m) { + return undef; + } + my ($action, $token) = ($1, $2); + my ($code, $data) = $connector->json_decode(argument => $3); + return undef if ($code == 1); + + $data->{data}->{id} = $client_identity; + + # if we get a pong response, we can open the internal com read + $connector->{clients}->{ $client_identity }->{com_read_internal} = 1; + $connector->send_internal_action({ + action => 'PONG', + data => $data, + token => $token, + target => '' + }); + } elsif ($options{data} =~ /^\[(?:REGISTERNODES|UNREGISTERNODES|SYNCLOGS)\]/) { + if ($options{data} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)/ms) { + return undef; + } + my ($action, $token, $data) = ($1, $2, $3); + + $connector->send_internal_action({ + action => $action, + data => $data, + data_noencode => 1, + token => $token, + target => '' + }); + } elsif ($options{data} =~ /^\[ACK\]\s+\[(.*?)\]\s+(.*)/ms) { + my ($code, $data) = $connector->json_decode(argument => $2); + return undef if ($code == 1); + + # we set the id (distant node can not have id in configuration) + $data->{data}->{id} = $client_identity; + if (defined($data->{data}->{action}) && $data->{data}->{action} eq 'getlog') { + $connector->send_internal_action({ + action => 'SETLOGS', + data => $data, + token => $1, + target => '' + }); + } + } +} + +sub connect { + my ($self, %options) = @_; + + if ($self->{clients}->{$options{id}}->{type} eq 'push_zmq') { + $self->{clients}->{$options{id}}->{class} = gorgone::class::clientzmq->new( + context => $self->{zmq_context}, + core_loop => $self->{loop}, + identity => 'gorgone-proxy-' . $self->{core_id} . '-' . $options{id}, + cipher => $self->{clients}->{ $options{id} }->{cipher}, + vector => $self->{clients}->{ $options{id} }->{vector}, + client_pubkey => defined($self->{clients}->{ $options{id} }->{client_pubkey}) + && $self->{clients}->{ $options{id} }->{client_pubkey} ne '' + ? $self->{clients}->{ $options{id} }->{client_pubkey} + : $self->get_core_config(name => 'pubkey'), + client_privkey => defined($self->{clients}->{ $options{id} }->{client_privkey}) + && $self->{clients}->{ $options{id} }->{client_privkey} ne '' + ? $self->{clients}->{ $options{id} }->{client_privkey} + : $self->get_core_config(name => 'privkey'), + target_type => defined($self->{clients}->{ $options{id} }->{target_type}) ? + $self->{clients}->{ $options{id} }->{target_type} : + 'tcp', + target_path => defined($self->{clients}->{ $options{id} }->{target_path}) + ? $self->{clients}->{ $options{id} }->{target_path} + : $self->{clients}->{ $options{id} }->{address} . ':' . $self->{clients}->{ $options{id} }->{port}, + config_core => $self->get_core_config(), + logger => $self->{logger} + ); + $self->{clients}->{ $options{id} }->{class}->init(callback => \&read_message_client); + } elsif ($self->{clients}->{ $options{id} }->{type} eq 'push_ssh') { + $self->{clients}->{$options{id}}->{class} = gorgone::modules::core::proxy::sshclient->new(logger =>$self->{logger}); + my $code = $self->{clients}->{$options{id}}->{class}->open_session( + ssh_host => $self->{clients}->{$options{id}}->{address}, + ssh_port => $self->{clients}->{$options{id}}->{ssh_port}, + ssh_username => $self->{clients}->{$options{id}}->{ssh_username}, + ssh_password => $self->{clients}->{$options{id}}->{ssh_password}, + ssh_directory => $self->{clients}->{$options{id}}->{ssh_directory}, + ssh_known_hosts => $self->{clients}->{$options{id}}->{ssh_known_hosts}, + ssh_identity => $self->{clients}->{$options{id}}->{ssh_identity}, + strict_serverkey_check => $self->{clients}->{$options{id}}->{strict_serverkey_check}, + ssh_connect_timeout => $self->{clients}->{$options{id}}->{ssh_connect_timeout} + ); + if ($code != 0) { + $self->{clients}->{ $options{id} }->{delete} = 1; + return -1; + } + } + + return 0; +} + +sub action_proxyaddnode { + my ($self, %options) = @_; + + my ($code, $data) = $self->json_decode(argument => $options{data}); + return if ($code == 1); + + if (defined($self->{clients}->{ $data->{id} }->{class})) { + # test if a connection parameter changed + my $changed = 0; + foreach (keys %$data) { + if (ref($data->{$_}) eq '' + && (!defined($self->{clients}->{ $data->{id} }->{$_}) + || $data->{$_} ne $self->{clients}->{ $data->{id} }->{$_})) { + $changed = 1; + last; + } + } + + if ($changed == 0) { + $self->{logger}->writeLogInfo("[proxy] Session not changed $data->{id}"); + return; + } + + $self->{logger}->writeLogInfo("[proxy] Recreate session for $data->{id}"); + # we send a pong reset. because the ping can be lost + $self->send_internal_action({ + action => 'PONGRESET', + data => '{ "data": { "id": ' . $data->{id} . ' } }', + data_noencode => 1, + token => $self->generate_token(), + target => '' + }); + + $self->{clients}->{ $data->{id} }->{class}->close(); + $self->{clients}->{ $data->{id} }->{class}->cleanup(); + } else { + $self->{internal_channels}->{ $data->{id} } = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-proxy-channel-' . $data->{id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PROXYREADY', + data => { + node_id => $data->{id} + } + }); + $self->{watchers}->{ $data->{id} } = $self->{loop}->io( + $self->{internal_channels}->{ $data->{id} }->get_fd(), + EV::READ, + sub { + $connector->event(channel => $data->{id}); + } + ); + } + + $self->{clients}->{ $data->{id} } = $data; + $self->{clients}->{ $data->{id} }->{delete} = 0; + $self->{clients}->{ $data->{id} }->{class} = undef; + $self->{clients}->{ $data->{id} }->{com_read_internal} = 1; +} + +sub action_proxydelnode { + my ($self, %options) = @_; + + my ($code, $data) = $self->json_decode(argument => $options{data}); + return if ($code == 1); + + if (defined($self->{clients}->{$data->{id}})) { + $self->{clients}->{ $data->{id} }->{delete} = 1; + } +} + +sub action_proxycloseconnection { + my ($self, %options) = @_; + + my ($code, $data) = $self->json_decode(argument => $options{data}); + return if ($code == 1); + + return if (!defined($self->{clients}->{ $data->{id} })); + + $self->{logger}->writeLogInfo("[proxy] Close connectionn for $data->{id}"); + + $self->{clients}->{ $data->{id} }->{class}->close(); + $self->{clients}->{ $data->{id} }->{class}->cleanup(); + $self->{clients}->{ $data->{id} }->{delete} = 0; + $self->{clients}->{ $data->{id} }->{class} = undef; +} + +sub close_connections { + my ($self, %options) = @_; + + foreach (keys %{$self->{clients}}) { + if (defined($self->{clients}->{$_}->{class}) && $self->{clients}->{$_}->{type} eq 'push_zmq') { + $self->{logger}->writeLogInfo("[proxy] Close connection for $_"); + $self->{clients}->{$_}->{class}->close(); + $self->{clients}->{$_}->{class}->cleanup(); + } + } +} + +sub proxy_ssh { + my ($self, %options) = @_; + + my ($code, $decoded_data) = $self->json_decode(argument => $options{data}); + return if ($code == 1); + + if ($options{action} eq 'PING') { + if ($self->{clients}->{ $options{target_client} }->{class}->ping() == -1) { + $self->{clients}->{ $options{target_client} }->{delete} = 1; + } else { + $self->{clients}->{ $options{target_client} }->{com_read_internal} = 1; + $self->send_internal_action({ + action => 'PONG', + data => { data => { id => $options{target_client} } }, + token => $options{token}, + target => '' + }); + } + return; + } + + my $retry = 1; # manage server disconnected + while ($retry >= 0) { + my ($status, $data_ret) = $self->{clients}->{ $options{target_client} }->{class}->action( + action => $options{action}, + data => $decoded_data, + target_direct => $options{target_direct}, + target => $options{target}, + token => $options{token} + ); + + if (ref($data_ret) eq 'ARRAY') { + foreach (@{$data_ret}) { + $self->send_log( + code => $_->{code}, + token => $options{token}, + logging => $decoded_data->{logging}, + instant => $decoded_data->{instant}, + data => $_->{data} + ); + } + last; + } + + $self->{logger}->writeLogDebug("[proxy] Sshclient return: [message = $data_ret->{message}]"); + if ($status == 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $decoded_data->{logging}, + data => $data_ret + ); + last; + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $decoded_data->{logging}, + data => $data_ret + ); + + # quit because it's not a ssh connection issue + last if ($self->{clients}->{ $options{target_client} }->{class}->is_connected() != 0); + $retry--; + } +} + +sub proxy { + my (%options) = @_; + + if ($options{message} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[(.*?)\]\s+(.*)$/m) { + return undef; + } + my ($action, $token, $target_complete, $data) = ($1, $2, $3, $4); + $connector->{logger}->writeLogDebug( + "[proxy] Send message: [channel = $options{channel}] [action = $action] [token = $token] [target = $target_complete] [data = $data]" + ); + + if ($action eq 'PROXYADDNODE') { + $connector->action_proxyaddnode(data => $data); + return; + } elsif ($action eq 'PROXYDELNODE') { + $connector->action_proxydelnode(data => $data); + return; + } elsif ($action eq 'BCASTLOGGER' && $target_complete eq '') { + (undef, $data) = $connector->json_decode(argument => $data); + $connector->action_bcastlogger(data => $data); + return; + } elsif ($action eq 'BCASTCOREKEY' && $target_complete eq '') { + (undef, $data) = $connector->json_decode(argument => $data); + $connector->action_bcastcorekey(data => $data); + return; + } elsif ($action eq 'PROXYCLOSECONNECTION') { + $connector->action_proxycloseconnection(data => $data); + return; + } + + if ($target_complete !~ /^(.+)~~(.+)$/) { + $connector->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { + message => "unknown target format '$target_complete'" + } + ); + return; + } + + my ($target_client, $target, $target_direct) = ($1, $2, 1); + if ($target_client ne $target) { + $target_direct = 0; + } + if (!defined($connector->{clients}->{$target_client}->{class})) { + $connector->{logger}->writeLogInfo("[proxy] connect for $target_client"); + if ($connector->connect(id => $target_client) != 0) { + $connector->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { + message => "cannot connect on target node '$target_client'" + } + ); + return; + } + } + + if ($connector->{clients}->{$target_client}->{type} eq 'push_zmq') { + my ($status, $msg) = $connector->{clients}->{$target_client}->{class}->send_message( + action => $action, + token => $token, + target => $target_direct == 0 ? $target : undef, + data => $data + ); + if ($status != 0) { + $connector->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { + message => "Send message problem for '$target': $msg" + } + ); + $connector->{logger}->writeLogError("[proxy] Send message problem for '$target': $msg"); + $connector->{clients}->{$target_client}->{delete} = 1; + } + } elsif ($connector->{clients}->{$target_client}->{type} eq 'push_ssh') { + $connector->proxy_ssh( + action => $action, + data => $data, + target_client => $target_client, + target => $target, + target_direct => $target_direct, + token => $token + ); + } +} + +sub event { + my ($self, %options) = @_; + + my $socket; + if (defined($options{channel})) { + if (defined($self->{clients}->{ $options{channel} })) { + $self->{logger}->writeLogDebug("[proxy] event channel $options{channel} delete: $self->{clients}->{ $options{channel} }->{delete} com_read_internal: $self->{clients}->{ $options{channel} }->{com_read_internal}"); + } + return if (defined($self->{clients}->{ $options{channel} }) + && ( $self->{clients}->{ $options{channel} }->{com_read_internal} == 0 + || $self->{clients}->{ $options{channel} }->{delete} == 1) + ); + + $socket = $options{channel} eq 'control' ? $self->{internal_socket} : $self->{internal_channels}->{ $options{channel} }; + } else { + $socket = $options{socket}; + $options{channel} = 'control'; + } + + while ($socket->has_pollin()) { + my ($message) = $self->read_message(socket => $socket); + next if (!defined($message)); + + proxy(message => $message, channel => $options{channel}); + if ($self->{stop} == 1 && (time() - $self->{exit_timeout}) > $self->{stop_time}) { + $self->exit_process(); + } + return if (defined($self->{clients}->{ $options{channel} }) + &&($self->{clients}->{ $options{channel} }->{com_read_internal} == 0 + || $self->{clients}->{ $options{channel} }->{delete} == 1) + ); + } +} + +sub periodic_exec { + foreach (keys %{$connector->{clients}}) { + if (defined($connector->{clients}->{$_}->{delete}) && $connector->{clients}->{$_}->{delete} == 1) { + $connector->send_internal_action({ + action => 'PONGRESET', + data => '{ "data": { "id": ' . $_ . ' } }', + data_noencode => 1, + token => $connector->generate_token(), + target => '' + }); + if (defined($connector->{clients}->{$_}->{class})) { + $connector->{clients}->{$_}->{class}->close(); + $connector->{clients}->{$_}->{class}->cleanup(); + } + # if the connection to the node is not established, we stop listening for new event for this destination, + # so event will be stored in zmq buffer until we start processing them again (see proxy_addnode) + # zmq queues have a size limit (high water mark), so if the node never connects, we lose some messages, + # preventing us from having memory leaks or other inconvenient problems. + delete $connector->{watchers}->{$_}; + $connector->{clients}->{$_}->{class} = undef; + $connector->{clients}->{$_}->{delete} = 0; + $connector->{clients}->{$_}->{com_read_internal} = 0; + $connector->{logger}->writeLogInfo("[proxy] periodic close connection for $_"); + next; + } + } + + foreach (keys %{$connector->{clients}}) { + $connector->event(channel => $_); + } + + if ($connector->{stop} == 1) { + $connector->exit_process(); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-proxy-' . $self->{pool_id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PROXYREADY', + data => { + pool_id => $self->{pool_id} + } + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io( + $self->{internal_socket}->get_fd(), + EV::READ, + sub { + $connector->event(channel => 'control'); + } + ); + + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/proxy/hooks.pm b/gorgone/gorgone/modules/core/proxy/hooks.pm new file mode 100644 index 00000000000..1319abad40e --- /dev/null +++ b/gorgone/gorgone/modules/core/proxy/hooks.pm @@ -0,0 +1,1227 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::proxy::hooks; + +use warnings; +use strict; +use JSON::XS; +use gorgone::class::frame; +use gorgone::standard::misc; +use gorgone::class::core; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::modules::core::proxy::class; +use File::Basename; +use MIME::Base64; +use Digest::MD5::File qw(file_md5_hex); +use Fcntl; +use Time::HiRes; +use Try::Tiny; +use Archive::Tar; +use File::Find; + +$Archive::Tar::SAME_PERMISSIONS = 1; +$Archive::Tar::WARN = 0; + +=begin comment +for each proxy processus, we have: + one control channel (DEALER identity: gorgone-proxy-$poolid) + one channel by client (DEALER identity: gorgone-proxy-channel-$nodeid) +=cut + +use constant NAMESPACE => 'core'; +use constant NAME => 'proxy'; +use constant EVENTS => [ + { event => 'PROXYREADY' }, + { event => 'REMOTECOPY', uri => '/remotecopy', method => 'POST' }, + { event => 'SETLOGS' }, # internal. Shouldn't be used by third party clients + { event => 'PONG' }, # internal. Shouldn't be used by third party clients + { event => 'REGISTERNODES' }, # internal. Shouldn't be used by third party clients + { event => 'UNREGISTERNODES' }, # internal. Shouldn't be used by third party clients + { event => 'PROXYADDNODE' }, # internal. Shouldn't be used by third party clients + { event => 'PROXYDELNODE' }, # internal. Shouldn't be used by third party clients + { event => 'PROXYADDSUBNODE' }, # internal. Shouldn't be used by third party clients + { event => 'PONGRESET' }, # internal. Shouldn't be used by third party clients + { event => 'PROXYCLOSECONNECTION' }, + { event => 'PROXYSTOPREADCHANNEL' } +]; + +my $config_core; +my $config; + +my $synctime_error = 0; +my $synctime_nodes = {}; # get last time retrieved +my $synctime_lasttime; +my $synctime_option; +my $synctimeout_option; +my $ping_interval; + +my $last_pong = {}; +my $register_nodes = {}; +# With static routes we have a pathscore. Dynamic no pathscore. +# Dynamic comes from PONG result +# algo is: we use static routes first. after we use dynamic routes +# { +# subnode_id => { +# static => { +# parent_id1 => 1, +# parent_id2 => 2, +# }, +# dynamic => { +# parent_id3 => 1, +# parent_id5 => 1, +# } +# } +# } +# +my $register_subnodes = {}; +my $constatus_ping = {}; +my $parent_ping = {}; +my $pools = {}; +my $pools_pid = {}; +my $nodes_pool = {}; +my $prevails = {}; +my $prevails_subnodes = {}; +my $rr_current = 0; +my $stop = 0; + +# httpserver is only for pull wss client +my $httpserver = {}; + +my ($external_socket, $core_id); + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + + $synctime_option = defined($config->{synchistory_time}) ? $config->{synchistory_time} : 60; + $synctimeout_option = defined($config->{synchistory_timeout}) ? $config->{synchistory_timeout} : 30; + $ping_interval = defined($config->{ping}) ? $config->{ping} : 60; + $config->{pong_discard_timeout} = defined($config->{pong_discard_timeout}) ? $config->{pong_discard_timeout} : 300; + $config->{pong_max_timeout} = defined($config->{pong_max_timeout}) ? $config->{pong_max_timeout} : 3; + $config->{pool} = defined($config->{pool}) && $config->{pool} =~ /(\d+)/ ? $1 : 5; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + $synctime_lasttime = Time::HiRes::time(); + $core_id = $options{id}; + $external_socket = $options{external_socket}; + for my $pool_id (1..$config->{pool}) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } + if (defined($config->{httpserver}->{enable}) && $config->{httpserver}->{enable} eq 'true') { + create_httpserver_child(dbh => $options{dbh}, logger => $options{logger}); + } +} + +sub routing { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + $options{logger}->writeLogError("[proxy] Cannot decode json data: " . $options{frame}->getLastError()); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'proxy - cannot decode json' }, + json_encode => 1 + }); + return undef; + } + + if ($options{action} eq 'PONG') { + return undef if (!defined($data->{data}->{id}) || $data->{data}->{id} eq ''); + $constatus_ping->{ $data->{data}->{id} }->{in_progress_ping} = 0; + $constatus_ping->{ $data->{data}->{id} }->{ping_timeout} = 0; + $last_pong->{ $data->{data}->{id} } = time(); + $constatus_ping->{ $data->{data}->{id} }->{last_ping_recv} = time(); + $constatus_ping->{ $data->{data}->{id} }->{nodes} = $data->{data}->{data}; + $constatus_ping->{ $data->{data}->{id} }->{ping_ok}++; + register_subnodes(%options, id => $data->{data}->{id}, subnodes => $data->{data}->{data}); + $options{logger}->writeLogInfo("[proxy] Pong received from '" . $data->{data}->{id} . "'"); + return undef; + } + + if ($options{action} eq 'PONGRESET') { + return undef if (!defined($data->{data}->{id}) || $data->{data}->{id} eq ''); + if (defined($constatus_ping->{ $data->{data}->{id} })) { + $constatus_ping->{ $data->{data}->{id} }->{in_progress_ping} = 0; + $constatus_ping->{ $data->{data}->{id} }->{ping_timeout} = 0; + $constatus_ping->{ $data->{data}->{id} }->{ping_failed}++; + } + $options{logger}->writeLogInfo("[proxy] PongReset received from '" . $data->{data}->{id} . "'"); + return undef; + } + + if ($options{action} eq 'UNREGISTERNODES') { + unregister_nodes(%options, data => $data); + return undef; + } + + if ($options{action} eq 'REGISTERNODES') { + register_nodes(%options, data => $data); + return undef; + } + + if ($options{action} eq 'PROXYREADY') { + if (defined($data->{pool_id})) { + $pools->{ $data->{pool_id} }->{ready} = 1; + # we sent proxyaddnode to sync + foreach my $node_id (keys %$nodes_pool) { + next if ($nodes_pool->{$node_id} != $data->{pool_id}); + routing( + action => 'PROXYADDNODE', + target => $node_id, + frame => gorgone::class::frame->new(data => $register_nodes->{$node_id}), + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + } + } elsif (defined($data->{httpserver})) { + $httpserver->{ready} = 1; + } elsif (defined($data->{node_id}) && defined($synctime_nodes->{ $data->{node_id} })) { + $synctime_nodes->{ $data->{node_id} }->{channel_ready} = 1; + } + return undef; + } + + if ($options{action} eq 'SETLOGS') { + setlogs(dbh => $options{dbh}, data => $data, token => $options{token}, logger => $options{logger}); + return undef; + } + + my ($code, $is_ctrl_channel, $target_complete, $target_parent, $target) = pathway( + action => $options{action}, + target => $options{target}, + dbh => $options{dbh}, + token => $options{token}, + gorgone => $options{gorgone}, + logger => $options{logger} + ); + return if ($code == -1); + + # we check if we have all proxy connected + if (gorgone::class::core::waiting_ready_pool() == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'proxy - still not ready' }, + json_encode => 1 + }); + return ; + } + + if ($options{action} eq 'GETLOG') { + if (defined($register_nodes->{$target_parent}) && $register_nodes->{$target_parent}->{type} eq 'push_ssh') { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => "proxy - can't get log a ssh target or through a ssh node" }, + json_encode => 1 + }); + return undef; + } + + if (defined($register_nodes->{$target})) { + if ($synctime_nodes->{$target}->{synctime_error} == -1 && get_sync_time(dbh => $options{dbh}, node_id => $target) == -1) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - problem to getlog' }, + json_encode => 1 + }); + return undef; + } + + if ($synctime_nodes->{$target}->{in_progress} == 1) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - getlog already in progress' }, + json_encode => 1 + }); + return undef; + } + + # We put the good time to get + my $ctime = $synctime_nodes->{$target}->{ctime}; + $options{frame}->setData({ ctime => $ctime }); + $options{frame}->setRawData(); + $synctime_nodes->{$target}->{in_progress} = 1; + $synctime_nodes->{$target}->{in_progress_time} = time(); + } + } + + my $action = $options{action}; + my $bulk_actions; + push @{$bulk_actions}, $options{frame}->getRawData(); + + if ($options{action} eq 'REMOTECOPY' && defined($register_nodes->{$target_parent}) && + $register_nodes->{$target_parent}->{type} ne 'push_ssh') { + $action = 'PROCESSCOPY'; + ($code, $bulk_actions) = prepare_remote_copy( + dbh => $options{dbh}, + data => $data, + target => $target_parent, + token => $options{token}, + logger => $options{logger} + ); + return if ($code == -1); + } + + my $pool_id; + if (defined($nodes_pool->{$target_parent})) { + $pool_id = $nodes_pool->{$target_parent}; + } else { + $pool_id = rr_pool(); + $nodes_pool->{$target_parent} = $pool_id; + } + + my $identity = 'gorgone-proxy-' . $pool_id; + if ($is_ctrl_channel == 0 && $synctime_nodes->{$target_parent}->{channel_ready} == 1) { + $identity = 'gorgone-proxy-channel-' . $target_parent; + } + if ($register_nodes->{$target_parent}->{type} eq 'wss' || $register_nodes->{$target_parent}->{type} eq 'pullwss') { + $identity = 'gorgone-proxy-httpserver'; + } + + foreach my $raw_data_ref (@{$bulk_actions}) { + # Mode zmq pull + if ($register_nodes->{$target_parent}->{type} eq 'pull') { + pull_request( + gorgone => $options{gorgone}, + dbh => $options{dbh}, + action => $action, + raw_data_ref => $raw_data_ref, + token => $options{token}, + target_parent => $target_parent, + target => $target, + logger => $options{logger} + ); + next; + } + + $options{gorgone}->send_internal_message( + identity => $identity, + action => $action, + raw_data_ref => $raw_data_ref, + token => $options{token}, + target => $target_complete + ); + } +} + +sub gently { + my (%options) = @_; + + $stop = 1; + foreach my $pool_id (keys %$pools) { + if (defined($pools->{$pool_id}->{running}) && $pools->{$pool_id}->{running} == 1) { + $options{logger}->writeLogDebug("[proxy] Send TERM signal for pool '" . $pool_id . "'"); + CORE::kill('TERM', $pools->{$pool_id}->{pid}); + } + } + + if (defined($httpserver->{running}) && $httpserver->{running} == 1) { + $options{logger}->writeLogDebug("[action] Send TERM signal for httpserver"); + CORE::kill('TERM', $httpserver->{pid}); + } +} + +sub kill { + my (%options) = @_; + + foreach (keys %{$pools}) { + if ($pools->{$_}->{running} == 1) { + $options{logger}->writeLogDebug("[proxy] Send KILL signal for pool '" . $_ . "'"); + CORE::kill('KILL', $pools->{$_}->{pid}); + } + } + + if (defined($httpserver->{running}) && $httpserver->{running} == 1) { + $options{logger}->writeLogDebug("[action] Send KILL signal for httpserver"); + CORE::kill('KILL', $httpserver->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check_create_child { + my (%options) = @_; + + return if ($stop == 1); + + # Check if we need to create a child + for my $pool_id (1..$config->{pool}) { + if (!defined($pools->{$pool_id})) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } + } +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + if (defined($httpserver->{pid}) && $httpserver->{pid} == $pid) { + $httpserver = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_httpserver_child(logger => $options{logger}); + } + next; + } + + # Not me + next if (!defined($pools_pid->{$pid})); + + # If someone dead, we recreate + my $pool_id = $pools_pid->{$pid}; + delete $pools->{$pools_pid->{$pid}}; + delete $pools_pid->{$pid}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } + } + + check_create_child(dbh => $options{dbh}, logger => $options{logger}); + + $count++ if (defined($httpserver->{running}) && $httpserver->{running} == 1); + foreach (keys %$pools) { + $count++ if ($pools->{$_}->{running} == 1); + } + + # We check synclog/ping/ping request timeout + foreach (keys %$synctime_nodes) { + if ($register_nodes->{$_}->{type} =~ /^(?:pull|wss|pullwss)$/ && $constatus_ping->{$_}->{in_progress_ping} == 1) { + my $ping_timeout = defined($register_nodes->{$_}->{ping_timeout}) ? $register_nodes->{$_}->{ping_timeout} : 30; + if ((time() - $constatus_ping->{$_}->{in_progress_ping_pull}) > $ping_timeout) { + $constatus_ping->{$_}->{in_progress_ping} = 0; + $options{logger}->writeLogInfo("[proxy] Ping timeout from '" . $_ . "'"); + } + } + if ($register_nodes->{$_}->{type} !~ /^(?:pull|wss|pullwss)$/ && $constatus_ping->{$_}->{in_progress_ping} == 1) { + if (time() - $constatus_ping->{ $_ }->{last_ping_sent} > $config->{pong_discard_timeout}) { + $options{logger}->writeLogInfo("[proxy] Ping timeout from '" . $_ . "'"); + $constatus_ping->{$_}->{in_progress_ping} = 0; + $constatus_ping->{$_}->{ping_timeout}++; + $constatus_ping->{$_}->{ping_failed}++; + if (($constatus_ping->{$_}->{ping_timeout} % $config->{pong_max_timeout}) == 0) { + $options{logger}->writeLogInfo("[proxy] Ping max timeout reached from '" . $_ . "'"); + routing( + target => $_, + action => 'PROXYCLOSECONNECTION', + frame => gorgone::class::frame->new(data => { id => $_ }), + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + } + } + } + + if ($synctime_nodes->{$_}->{in_progress} == 1 && + time() - $synctime_nodes->{$_}->{in_progress_time} > $synctimeout_option) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + data => { message => "proxy - getlog in timeout for '$_'" }, + json_encode => 1 + }); + $synctime_nodes->{$_}->{in_progress} = 0; + } + } + + # We check if we need synclogs + if ($stop == 0 && + time() - $synctime_lasttime > $synctime_option) { + $synctime_lasttime = time(); + full_sync_history(gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } + + if ($stop == 0) { + ping_send(gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } + + # We clean all parents + foreach (keys %$parent_ping) { + if (time() - $parent_ping->{$_}->{last_time} > 1800) { # 30 minutes + delete $parent_ping->{$_}; + } + } + + return ($count, 1); +} + +sub broadcast { + my (%options) = @_; + + foreach my $pool_id (keys %$pools) { + next if ($pools->{$pool_id}->{ready} != 1); + + $options{gorgone}->send_internal_message( + identity => 'gorgone-proxy-' . $pool_id, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); + } + + if (defined($httpserver->{ready}) && $httpserver->{ready} == 1) { + $options{gorgone}->send_internal_message( + identity => 'gorgone-proxy-httpserver', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); + } +} + +# Specific functions +sub pathway { + my (%options) = @_; + + my $target = $options{target}; + if (!defined($target)) { + $options{logger}->writeLogDebug('[proxy] need a valid node id'); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - need a valid node id' }, + json_encode => 1 + }); + return -1; + } + + if (!defined($register_nodes->{$target}) && !defined($register_subnodes->{$target})) { + $options{logger}->writeLogDebug("[proxy] unknown target '$target'"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - unknown target ' . $target }, + json_encode => 1 + }); + return -1; + } + + my @targets = (); + if (defined($register_nodes->{$target})) { + push @targets, $target; + } + if (defined($register_subnodes->{$target}->{static})) { + push @targets, sort { $register_subnodes->{$target}->{static}->{$a} <=> $register_subnodes->{$target}->{static}->{$b} } keys %{$register_subnodes->{$target}->{static}}; + } + if (defined($register_subnodes->{$target}->{dynamic})) { + push @targets, keys %{$register_subnodes->{$target}->{dynamic}}; + } + + my $first_target; + foreach (@targets) { + if ($register_nodes->{$_}->{type} =~ /^(?:pull|wss|pullwss)$/ && !defined($register_nodes->{$_}->{identity})) { + $options{logger}->writeLogDebug("[proxy] skip node " . $register_nodes->{$_}->{type} . " target '$_' for node '$target' - never connected"); + next; + } + + # we let passthrough. it's for control channel + if ($options{action} =~ /^(?:PING|PROXYADDNODE|PROXYDELNODE|PROXYADDSUBNODE|PROXYCLOSECONNECTION|PROXYSTOPREADCHANNEL)$/ && $_ eq $target) { + return (1, 1, $_ . '~~' . $target, $_, $target); + } + + if (!defined($last_pong->{$_}) || $last_pong->{$_} == 0 || (time() - $config->{pong_discard_timeout} < $last_pong->{$_})) { + $options{logger}->writeLogDebug("[proxy] choose node target '$_' for node '$target'"); + return (1, 0, $_ . '~~' . $target, $_, $target); + } + + $first_target = $_ if (!defined($first_target)); + } + + if (!defined($first_target)) { + $options{logger}->writeLogDebug("[proxy] no pathway for target '$target'"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - no pathway for target ' . $target }, + json_encode => 1 + }); + return -1; + } + + # if there are here, we use the first pathway (because all pathways had an issue) + return (1, 0, $first_target . '~~' . $target, $first_target, $target); +} + +sub setlogs { + my (%options) = @_; + + if (!defined($options{data}->{data}->{id}) || $options{data}->{data}->{id} eq '') { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - need a id to setlogs' }, + json_encode => 1 + }); + return undef; + } + if ($synctime_nodes->{ $options{data}->{data}->{id} }->{in_progress} == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - skip setlogs response. Maybe too much time to get response. Retry' }, + json_encode => 1 + }); + return undef; + } + + $options{logger}->writeLogInfo("[proxy] Received setlogs for '$options{data}->{data}->{id}'"); + + # we have received the setlogs (it's like a pong response. not a problem if we received the pong after) + $constatus_ping->{ $options{data}->{data}->{id} }->{in_progress_ping} = 0; + $constatus_ping->{ $options{data}->{data}->{id} }->{ping_timeout} = 0; + $constatus_ping->{ $options{data}->{data}->{id} }->{last_ping_recv} = time(); + $last_pong->{ $options{data}->{data}->{id} } = time() if (defined($last_pong->{ $options{data}->{data}->{id} })); + + $synctime_nodes->{ $options{data}->{data}->{id} }->{in_progress} = 0; + + my $ctime_recent = 0; + # Transaction. We don't use last_id (problem if it's clean the sqlite table). + my $status; + $status = $options{dbh}->transaction_mode(1); + return -1 if ($status == -1); + + foreach (@{$options{data}->{data}->{result}}) { + # wrong timestamp inserted. we skip it + if ($_->{ctime} !~ /[0-9\.]/) { + $options{logger}->writeLogDebug("[proxy] wrong ctime for '$options{data}->{data}->{id}'"); + next; + } + $status = gorgone::standard::library::add_history({ + dbh => $options{dbh}, + etime => $_->{etime}, + code => $_->{code}, + token => $_->{token}, + instant => $_->{instant}, + data => $_->{data} + }); + last if ($status == -1); + $ctime_recent = $_->{ctime} if ($ctime_recent < $_->{ctime}); + } + if ($status == 0 && update_sync_time(dbh => $options{dbh}, id => $options{data}->{data}->{id}, ctime => $ctime_recent) == 0) { + $status = $options{dbh}->commit(); + return -1 if ($status == -1); + $options{dbh}->transaction_mode(0); + + $synctime_nodes->{ $options{data}->{data}->{id} }->{ctime} = $ctime_recent if ($ctime_recent != 0); + } else { + $options{dbh}->rollback(); + $options{dbh}->transaction_mode(0); + return -1; + } + + # We try to send it to parents + foreach (keys %$parent_ping) { + gorgone::class::core::send_message_parent( + router_type => $parent_ping->{$_}->{router_type}, + identity => $_, + response_type => 'SYNCLOGS', + data => { id => $core_id }, + code => GORGONE_ACTION_BEGIN, + token => undef, + ); + } + + return 0; +} + +sub ping_send { + my (%options) = @_; + + my $nodes_id = [keys %$register_nodes]; + $nodes_id = [$options{node_id}] if (defined($options{node_id})); + my $current_time = time(); + foreach my $id (@$nodes_id) { + next if ($constatus_ping->{$id}->{in_progress_ping} == 1 || $current_time < $constatus_ping->{$id}->{next_ping}); + + $constatus_ping->{$id}->{last_ping_sent} = $current_time; + $constatus_ping->{$id}->{next_ping} = $current_time + $ping_interval; + if ($register_nodes->{$id}->{type} eq 'push_zmq' || $register_nodes->{$id}->{type} eq 'push_ssh') { + $constatus_ping->{$id}->{in_progress_ping} = 1; + routing(action => 'PING', target => $id, frame => gorgone::class::frame->new(data => {}), gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } elsif ($register_nodes->{$id}->{type} =~ /^(?:pull|wss|pullwss)$/) { + $constatus_ping->{$id}->{in_progress_ping} = 1; + $constatus_ping->{$id}->{in_progress_ping_pull} = time(); + routing(action => 'PING', target => $id, frame => gorgone::class::frame->new(data => {}), gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } + } +} + +sub synclog { + my (%options) = @_; + + # We check if we need synclogs + if ($stop == 0) { + $synctime_lasttime = time(); + full_sync_history(gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } +} + +sub full_sync_history { + my (%options) = @_; + + foreach my $id (keys %{$register_nodes}) { + if ($register_nodes->{$id}->{type} eq 'push_zmq') { + routing(action => 'GETLOG', target => $id, frame => gorgone::class::frame->new(data => {}), gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } elsif ($register_nodes->{$id}->{type} =~ /^(?:pull|wss|pullwss)$/) { + routing(action => 'GETLOG', target => $id, frame => gorgone::class::frame->new(data => {}), gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } + } +} + +sub update_sync_time { + my (%options) = @_; + + # Nothing to update (no insert before) + return 0 if ($options{ctime} == 0); + + my ($status) = $options{dbh}->query({ + query => "REPLACE INTO gorgone_synchistory (`id`, `ctime`) VALUES (?, ?)", + bind_values => [$options{id}, $options{ctime}] + } + ); + return $status; +} + +sub get_sync_time { + my (%options) = @_; + + my ($status, $sth) = $options{dbh}->query({ query => "SELECT * FROM gorgone_synchistory WHERE id = '" . $options{node_id} . "'" }); + if ($status == -1) { + $synctime_nodes->{$options{node_id}}->{synctime_error} = -1; + return -1; + } + + $synctime_nodes->{$options{node_id}}->{synctime_error} = 0; + if (my $row = $sth->fetchrow_hashref()) { + $synctime_nodes->{ $row->{id} }->{ctime} = $row->{ctime}; + $synctime_nodes->{ $row->{id} }->{in_progress} = 0; + $synctime_nodes->{ $row->{id} }->{in_progress_time} = -1; + } + + return 0; +} + +sub is_all_proxy_ready { + my $ready = 0; + for my $pool_id (1..$config->{pool}) { + if (defined($pools->{$pool_id}) && $pools->{$pool_id}->{ready} == 1) { + $ready++; + } + } + + return ($ready * 100 / $config->{pool}); +} + +sub rr_pool { + my (%options) = @_; + + while (1) { + $rr_current = $rr_current % $config->{pool}; + if ($pools->{$rr_current + 1}->{ready} == 1) { + $rr_current++; + return $rr_current; + } + $rr_current++; + } +} + +sub create_child { + my (%options) = @_; + + if (!defined($core_id) || $core_id =~ /^\s*$/) { + $options{logger}->writeLogError("[proxy] Cannot create child, need a core id"); + return ; + } + + $options{logger}->writeLogInfo("[proxy] Create module 'proxy' child process for pool id '" . $options{pool_id} . "'"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-proxy'; + my $module = gorgone::modules::core::proxy::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + pool_id => $options{pool_id}, + core_id => $core_id, + container_id => $options{pool_id} + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[proxy] PID $child_pid (gorgone-proxy) for pool id '" . $options{pool_id} . "'"); + $pools->{$options{pool_id}} = { pid => $child_pid, ready => 0, running => 1 }; + $pools_pid->{$child_pid} = $options{pool_id}; +} + +sub create_httpserver_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[proxy] Create module 'proxy' httpserver child process"); + + my $rv = gorgone::standard::misc::mymodule_load( + logger => $options{logger}, + module => 'gorgone::modules::core::proxy::httpserver', + error_msg => "Cannot load module 'gorgone::modules::core::proxy::httpserver'" + ); + return if ($rv != 0); + + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-proxy-httpserver'; + my $module = gorgone::modules::core::proxy::httpserver->construct( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + container_id => 'httpserver' + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[proxy] PID $child_pid (gorgone-proxy-httpserver)"); + $httpserver = { pid => $child_pid, ready => 0, running => 1 }; +} + +sub pull_request { + my (%options) = @_; + + my $message = gorgone::standard::library::build_protocol( + action => $options{action}, + raw_data_ref => $options{raw_data_ref}, + token => $options{token}, + target => $options{target} + ); + + if (!defined($register_nodes->{ $options{target_parent} }->{identity})) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "proxy - node '" . $options{target_parent} . "' had never been connected" }, + json_encode => 1 + }); + return undef; + } + + my $identity = unpack('H*', $register_nodes->{ $options{target_parent} }->{identity}); + my ($rv, $cipher_infos) = $options{gorgone}->is_handshake_done( + identity => $identity + ); + if ($rv == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "proxy - node '" . $options{target_parent} . "' had never been connected" }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->external_core_response( + cipher_infos => $cipher_infos, + identity => $identity, + message => $message + ); +} + +sub get_constatus_result { + my (%options) = @_; + + return $constatus_ping; +} + +sub unregister_nodes { + my (%options) = @_; + + return if (!defined($options{data}->{nodes})); + + foreach my $node (@{$options{data}->{nodes}}) { + if (defined($register_nodes->{ $node->{id} }) && $register_nodes->{ $node->{id} }->{type} !~ /^(?:pull|wss|pullwss)$/) { + routing( + action => 'PROXYDELNODE', + target => $node->{id}, + frame => gorgone::class::frame->new(data => $node), + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + } + + my $prevail = 0; + $prevail = 1 if (defined($prevails->{ $node->{id} })); + + if (defined($register_nodes->{ $node->{id} }) && $register_nodes->{ $node->{id} }->{type} =~ /^(?:pull|wss|pullwss)$/ && $prevail == 1) { + $register_nodes->{ $node->{id} }->{identity} = undef; + } + + $options{logger}->writeLogInfo("[proxy] Node '" . $node->{id} . "' is unregistered"); + if (defined($register_nodes->{ $node->{id} }) && $register_nodes->{ $node->{id} }->{nodes}) { + foreach my $subnode (@{$register_nodes->{ $node->{id} }->{nodes}}) { + delete $register_subnodes->{ $subnode->{id} }->{static}->{ $node->{id} } + if (defined($register_subnodes->{ $subnode->{id} }->{static}->{ $node->{id} }) && $prevail == 0); + delete $register_subnodes->{ $subnode->{id} }->{dynamic}->{ $node->{id} } + if (defined($register_subnodes->{ $subnode->{id} }->{dynamic}->{ $node->{id} })); + } + } + + delete $nodes_pool->{ $node->{id} } if (defined($nodes_pool->{ $node->{id} })); + if (defined($register_nodes->{ $node->{id} })) { + delete $register_nodes->{ $node->{id} } if ($prevail == 0); + delete $synctime_nodes->{ $node->{id} }; + delete $constatus_ping->{ $node->{id} }; + delete $last_pong->{ $node->{id} }; + } + } +} + +# It comes from PONG result. +sub register_subnodes { + my (%options) = @_; + + # we remove dynamic values + foreach my $subnode_id (keys %$register_subnodes) { + delete $register_subnodes->{$subnode_id}->{dynamic}->{ $options{id} } + if (defined($register_subnodes->{$subnode_id}->{dynamic}->{ $options{id} })); + } + + # we can add in dynamic even if it's in static (not an issue) + my $subnodes = [$options{subnodes}]; + while (1) { + last if (scalar(@$subnodes) <= 0); + + my $entry = shift(@$subnodes); + foreach (keys %$entry) { + $register_subnodes->{$_}->{dynamic}->{ $options{id} } = 1; + } + push @$subnodes, $entry->{nodes} if (defined($entry->{nodes})); + } +} + +# 'pull' type: +# - it does a REGISTERNODES without subnodes (if it already exist, no new entry created, otherwise create an entry). We save the uniq identity +# - PING done by proxy and with PONG we get subnodes +sub register_nodes { + my (%options) = @_; + + return if (!defined($options{data}->{nodes})); + + foreach my $node (@{$options{data}->{nodes}}) { + my ($new_node, $prevail) = (1, 0); + + # prevail = 1 means: we cannot override the old one (if it exists) + if (defined($prevails_subnodes->{ $node->{id} })) { + $options{logger}->writeLogInfo("[proxy] cannot register node '$node->{id}': already defined as a subnode [prevails]"); + next; + } + $prevail = 1 if (defined($prevails->{ $node->{id} })); + $prevails->{ $node->{id} } = 1 if (defined($node->{prevail}) && $node->{prevail} == 1); + + if ($prevail == 1) { + $options{logger}->writeLogInfo("[proxy] cannot override node '$node->{id}' registration: prevails!!!"); + } + + if (defined($register_nodes->{ $node->{id} }) && $prevail == 0) { + # we remove subnodes before + foreach my $subnode_id (keys %$register_subnodes) { + delete $register_subnodes->{$subnode_id}->{static}->{ $node->{id} } + if (defined($register_subnodes->{$subnode_id}->{static}->{ $node->{id} })); + delete $register_subnodes->{$subnode_id}->{dynamic}->{ $node->{id} } + if (defined($register_subnodes->{$subnode_id}->{dynamic}->{ $node->{id} })); + } + } + + if (defined($register_nodes->{ $node->{id} })) { + $new_node = 0; + + if ($register_nodes->{ $node->{id} }->{type} !~ /^(?:pull|wss|pullwss)$/ && $node->{type} =~ /^(?:pull|wss|pullwss)$/) { + unregister_nodes( + data => { nodes => [ { id => $node->{id} } ] }, + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + $new_node = 1; + } + } + + if ($prevail == 0) { + $register_nodes->{ $node->{id} } = $node; + if (defined($node->{nodes})) { + foreach my $subnode (@{$node->{nodes}}) { + $register_subnodes->{ $subnode->{id} } = { static => {}, dynamic => {} } if (!defined($register_subnodes->{ $subnode->{id} })); + $register_subnodes->{ $subnode->{id} }->{static}->{ $node->{id} } = defined($subnode->{pathscore}) && $subnode->{pathscore} =~ /[0-9]+/ ? $subnode->{pathscore} : 1; + + # subnodes also prevails. we try to unregister it + if (defined($node->{prevail}) && $node->{prevail} == 1) { + unregister_nodes( + data => { nodes => [ { id => $subnode->{id} } ] }, + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + $prevails_subnodes->{ $subnode->{id} } = 1; + } + } + } + } + + # we update identity in all cases (already created or not) + if ($node->{type} =~ /^(?:pull|wss|pullwss)$/ && defined($node->{identity})) { + $register_nodes->{ $node->{id} }->{identity} = $node->{identity}; + $last_pong->{ $node->{id} } = time() if (defined($last_pong->{ $node->{id} })); + } + + $last_pong->{ $node->{id} } = 0 if (!defined($last_pong->{ $node->{id} })); + if (!defined($synctime_nodes->{ $node->{id} })) { + $synctime_nodes->{ $node->{id} } = { + ctime => 0, + in_progress => 0, + in_progress_time => -1, + synctime_error => 0, + channel_ready => 0 + }; + get_sync_time(node_id => $node->{id}, dbh => $options{dbh}); + } + + if ($register_nodes->{ $node->{id} }->{type} !~ /^(?:pull|wss|pullwss)$/) { + if ($prevail == 1) { + routing( + action => 'PROXYADDNODE', + target => $node->{id}, + frame => gorgone::class::frame->new(data => $register_nodes->{ $node->{id} }), + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + } else { + routing( + action => 'PROXYADDNODE', + target => $node->{id}, + frame => gorgone::class::frame->new(data => $node), + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + } + } + if ($new_node == 1) { + $constatus_ping->{ $node->{id} } = { + type => $node->{type}, + in_progress_ping => 0, + ping_timeout => 0, + last_ping_sent => 0, + last_ping_recv => 0, + next_ping => time() + int(rand($ping_interval)), + ping_ok => 0, + ping_failed => 0, + nodes => {} + }; + $options{logger}->writeLogInfo("[proxy] Node '" . $node->{id} . "' is registered"); + } + } +} + +sub prepare_remote_copy { + my (%options) = @_; + + my @actions = (); + + if (!defined($options{data}->{content}->{source}) || $options{data}->{content}->{source} eq '') { + $options{logger}->writeLogError('[proxy] Need source for remote copy'); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'remote copy failed' }, + json_encode => 1 + }); + return -1; + } + if (!defined($options{data}->{content}->{destination}) || $options{data}->{content}->{destination} eq '') { + $options{logger}->writeLogError('[proxy] Need destination for remote copy'); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'remote copy failed' }, + json_encode => 1 + }); + return -1; + } + + my $type; + my $filename; + my $localsrc = $options{data}->{content}->{source}; + my $src = $options{data}->{content}->{source}; + my $dst = $options{data}->{content}->{destination}; + + if (-f $options{data}->{content}->{source}) { + $type = 'regular'; + $localsrc = $src; + $filename = File::Basename::basename($src); + $dst .= $filename if ($dst =~ /\/$/); + } elsif (-d $options{data}->{content}->{source}) { + $type = 'archive'; + $filename = (defined($options{data}->{content}->{type}) ? $options{data}->{content}->{type} : 'tmp') . '-' . $options{target} . '.tar.gz'; + $localsrc = $options{data}->{content}->{cache_dir} . '/' . $filename; + + my $tar = Archive::Tar->new(); + unless (chdir($options{data}->{content}->{source})) { + $options{logger}->writeLogError("[proxy] cannot chdir: $!"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "cannot chdir: $!" }, + json_encode => 1 + }); + return -1; + } + + my @inventory = (); + File::Find::find ({ wanted => sub { push @inventory, $_ }, no_chdir => 1 }, '.'); + my $owner; + $owner = $options{data}->{content}->{owner} if (defined($options{data}->{content}->{owner}) && $options{data}->{content}->{owner} ne ''); + my $group; + $group = $options{data}->{content}->{group} if (defined($options{data}->{content}->{group}) && $options{data}->{content}->{group} ne ''); + foreach my $file (@inventory) { + next if ($file eq '.'); + $tar->add_files($file); + if (defined($owner) || defined($group)) { + $tar->chown($file, $owner, $group); + } + } + + unless (chdir($options{data}->{content}->{cache_dir})) { + $options{logger}->writeLogError("[proxy] cannot chdir: $!"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "cannot chdir: $!" }, + json_encode => 1 + }); + return -1; + } + unless ($tar->write($filename, COMPRESS_GZIP)) { + $options{logger}->writeLogError("[proxy] Tar failed: " . $tar->error()); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'tar failed' }, + json_encode => 1 + }); + return -1; + } + } else { + $options{logger}->writeLogError('[proxy] Unknown source for remote copy'); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'unknown source' }, + json_encode => 1 + }); + return -1; + } + + sysopen(FH, $localsrc, O_RDONLY); + binmode(FH); + my $buffer_size = (defined($config->{buffer_size})) ? $config->{buffer_size} : 500_000; + my $buffer; + while (my $bytes = sysread(FH, $buffer, $buffer_size)) { + my $action = JSON::XS->new->encode({ + logging => $options{data}->{logging}, + content => { + status => 'inprogress', + type => $type, + chunk => { + data => MIME::Base64::encode_base64($buffer), + size => $bytes, + }, + md5 => undef, + destination => $dst, + cache_dir => $options{data}->{content}->{cache_dir} + }, + parameters => { no_fork => 1 } + }); + push @actions, \$action; + } + close FH; + + my $action = JSON::XS->new->encode({ + logging => $options{data}->{logging}, + content => { + status => 'end', + type => $type, + chunk => undef, + md5 => file_md5_hex($localsrc), + destination => $dst, + cache_dir => $options{data}->{content}->{cache_dir}, + owner => $options{data}->{content}->{owner}, + group => $options{data}->{content}->{group} + }, + parameters => { no_fork => 1 } + }); + push @actions, \$action; + + return (0, \@actions); +} + +sub setcoreid { + my (%options) = @_; + + $core_id = $options{core_id}; + check_create_child(%options); +} + +sub add_parent_ping { + my (%options) = @_; + + $options{logger}->writeLogDebug("[proxy] Parent ping '" . $options{identity} . "' is registered"); + $parent_ping->{ $options{identity} } = { last_time => time(), router_type => $options{router_type} }; +} + +1; diff --git a/gorgone/gorgone/modules/core/proxy/httpserver.pm b/gorgone/gorgone/modules/core/proxy/httpserver.pm new file mode 100644 index 00000000000..e2ba6525a9f --- /dev/null +++ b/gorgone/gorgone/modules/core/proxy/httpserver.pm @@ -0,0 +1,381 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::proxy::httpserver; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use Mojolicious::Lite; +use Mojo::Server::Daemon; +use IO::Socket::SSL; +use IO::Handle; +use JSON::XS; +use IO::Poll qw(POLLIN POLLPRI); +use EV; +use HTML::Entities; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +websocket '/' => sub { + my $mojo = shift; + + $connector->{logger}->writeLogDebug('[proxy] httpserver websocket client connected: ' . $mojo->tx->connection); + + $connector->{ws_clients}->{ $mojo->tx->connection } = { + tx => $mojo->tx, + logged => 0, + last_update => time(), + authorization => $mojo->tx->req->headers->header('authorization') + }; + + $mojo->on(message => sub { + my ($mojo, $msg) = @_; + + $msg = HTML::Entities::decode_entities($msg); + + $connector->{ws_clients}->{ $mojo->tx->connection }->{last_update} = time(); + + $connector->{logger}->writeLogDebug("[proxy] httpserver receiving message: " . $msg); + + my $rv = $connector->is_logged_websocket(ws_id => $mojo->tx->connection, data => $msg); + return if ($rv == 0); + + read_message_client(data => $msg); + }); + + $mojo->on(finish => sub { + my ($mojo, $code, $reason) = @_; + + $connector->{logger}->writeLogDebug('[proxy] httpserver websocket client disconnected: ' . $mojo->tx->connection); + $connector->clean_websocket(ws_id => $mojo->tx->connection, finish => 1); + }); +}; + +sub construct { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{ws_clients} = {}; + $connector->{identities} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[proxy] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub run { + my ($self, %options) = @_; + + my $listen = 'reuse=1'; + if ($self->{config}->{httpserver}->{ssl} eq 'true') { + if (!defined($self->{config}->{httpserver}->{ssl_cert_file}) || $self->{config}->{httpserver}->{ssl_cert_file} eq '' || + ! -r "$self->{config}->{httpserver}->{ssl_cert_file}") { + $connector->{logger}->writeLogError("[proxy] httpserver cannot read/find ssl-cert-file"); + exit(1); + } + if (!defined($self->{config}->{httpserver}->{ssl_key_file}) || $self->{config}->{httpserver}->{ssl_key_file} eq '' || + ! -r "$self->{config}->{httpserver}->{ssl_key_file}") { + $connector->{logger}->writeLogError("[proxy] httpserver cannot read/find ssl-key-file"); + exit(1); + } + $listen .= '&cert=' . $self->{config}->{httpserver}->{ssl_cert_file} . '&key=' . $self->{config}->{httpserver}->{ssl_key_file}; + } + my $proto = 'http'; + if ($self->{config}->{httpserver}->{ssl} eq 'true') { + $proto = 'https'; + if (defined($self->{config}->{httpserver}->{passphrase}) && $self->{config}->{httpserver}->{passphrase} ne '') { + IO::Socket::SSL::set_defaults(SSL_passwd_cb => sub { return $connector->{config}->{httpserver}->{passphrase} } ); + } + } + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-proxy-httpserver', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PROXYREADY', + data => { + httpserver => 1 + } + }); + $self->read_zmq_events(); + + my $type = ref(Mojo::IOLoop->singleton->reactor); + my $watcher_io; + if ($type eq 'Mojo::Reactor::Poll') { + Mojo::IOLoop->singleton->reactor->{io}{ $self->{internal_socket}->get_fd()} = { + cb => sub { $connector->read_zmq_events(); }, + mode => POLLIN | POLLPRI + }; + } else { + # need EV version 4.32 + $watcher_io = EV::io( + $self->{internal_socket}->get_fd(), + EV::READ, + sub { + $connector->read_zmq_events(); + } + ); + } + + #my $socket_fd = $self->{internal_socket}->get_fd(); + #my $socket = IO::Handle->new_from_fd($socket_fd, 'r'); + #Mojo::IOLoop->singleton->reactor->io($socket => sub { + # $connector->read_zmq_events(); + #}); + #Mojo::IOLoop->singleton->reactor->watch($socket, 1, 0); + + Mojo::IOLoop->singleton->recurring(60 => sub { + $connector->{logger}->writeLogDebug('[proxy] httpserver recurring timeout loop'); + my $ctime = time(); + foreach my $ws_id (keys %{$connector->{ws_clients}}) { + if (($ctime - $connector->{ws_clients}->{$ws_id}->{last_update}) > 300) { + $connector->{logger}->writeLogDebug('[proxy] httpserver websocket client timeout reached: ' . $ws_id); + $connector->close_websocket( + code => 500, + message => 'timeout reached', + ws_id => $ws_id + ); + } + } + }); + + app->mode('production'); + my $daemon = Mojo::Server::Daemon->new( + app => app, + listen => [$proto . '://' . $self->{config}->{httpserver}->{address} . ':' . $self->{config}->{httpserver}->{port} . '?' . $listen] + ); + $daemon->inactivity_timeout(180); + + $daemon->run(); + + exit(0); +} + +sub read_message_client { + my (%options) = @_; + + if ($options{data} =~ /^\[PONG\]/) { + return undef if ($options{data} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)/m); + + my ($action, $token) = ($1, $2); + my ($rv, $data) = $connector->json_decode(argument => $3); + return undef if ($rv == 1); + + $connector->send_internal_action({ + action => 'PONG', + data => $data, + token => $token, + target => '' + }); + $connector->read_zmq_events(); + } elsif ($options{data} =~ /^\[(?:REGISTERNODES|UNREGISTERNODES|SYNCLOGS|SETLOGS)\]/) { + return undef if ($options{data} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)/ms); + + my ($action, $token, $data) = ($1, $2, $3); + + $connector->send_internal_action({ + action => $action, + data => $data, + data_noencode => 1, + token => $token, + target => '' + }); + $connector->read_zmq_events(); + } +} + +sub proxy { + my (%options) = @_; + + return undef if ($options{message} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[(.*?)\]\s+(.*)$/m); + + my ($action, $token, $target_complete, $data) = ($1, $2, $3, $4); + $connector->{logger}->writeLogDebug( + "[proxy] httpserver send message: [action = $action] [token = $token] [target = $target_complete] [data = $data]" + ); + + if ($action eq 'BCASTLOGGER' && $target_complete eq '') { + (undef, $data) = $connector->json_decode(argument => $data); + $connector->action_bcastlogger(data => $data); + return ; + } elsif ($action eq 'BCASTCOREKEY' && $target_complete eq '') { + (undef, $data) = $connector->json_decode(argument => $data); + $connector->action_bcastcorekey(data => $data); + return ; + } + + if ($target_complete !~ /^(.+)~~(.+)$/) { + $connector->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { + message => "unknown target format '$target_complete'" + } + ); + $connector->read_zmq_events(); + return ; + } + + my ($target_client, $target, $target_direct) = ($1, $2, 1); + if ($target_client ne $target) { + $target_direct = 0; + } + + if (!defined($connector->{identities}->{$target_client})) { + $connector->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { + message => "cannot get connection from target node '$target_client'" + } + ); + $connector->read_zmq_events(); + return ; + } + + my $message = gorgone::standard::library::build_protocol( + action => $action, + token => $token, + target => $target_direct == 0 ? $target : undef, + data => $data + ); + + $connector->{ws_clients}->{ $connector->{identities}->{$target_client} }->{tx}->send({text => $message}); +} + +sub read_zmq_events { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $connector->read_message(); + proxy(message => $message); + } +} + +sub is_logged_websocket { + my ($self, %options) = @_; + + return 1 if ($self->{ws_clients}->{ $options{ws_id} }->{logged} == 1); + + if (!defined($self->{ws_clients}->{ $options{ws_id} }->{authorization}) || + $self->{ws_clients}->{ $options{ws_id} }->{authorization} !~ /^\s*Bearer\s+$self->{config}->{httpserver}->{token}\s*$/) { + $self->close_websocket( + code => 500, + message => 'token authorization unallowed', + ws_id => $options{ws_id} + ); + return 0; + } + + if ($options{data} !~ /^\[REGISTERNODES\]\s+\[(?:.*?)\]\s+\[.*?\]\s+(.*)/ms) { + $self->close_websocket( + code => 500, + message => 'please registernodes', + ws_id => $options{ws_id} + ); + return 0; + } + + my $content; + eval { + $content = JSON::XS->new->decode($1); + }; + if ($@) { + $self->close_websocket( + code => 500, + message => 'decode error: unsupported format', + ws_id => $options{ws_id} + ); + return 0; + } + + $self->{logger}->writeLogDebug("[proxy] httpserver client " . $content->{nodes}->[0]->{id} . " is logged"); + + $self->{ws_clients}->{ $options{ws_id} }->{identity} = $content->{nodes}->[0]->{id}; + $self->{identities}->{ $content->{nodes}->[0]->{id} } = $options{ws_id}; + $self->{ws_clients}->{ $options{ws_id} }->{logged} = 1; + return 2; +} + +sub clean_websocket { + my ($self, %options) = @_; + + return if (!defined($self->{ws_clients}->{ $options{ws_id} })); + + $self->{ws_clients}->{ $options{ws_id} }->{tx}->finish() if (!defined($options{finish})); + delete $self->{identities}->{ $self->{ws_clients}->{ $options{ws_id} }->{identity} } + if (defined($self->{ws_clients}->{ $options{ws_id} }->{identity})); + delete $self->{ws_clients}->{ $options{ws_id} }; +} + +sub close_websocket { + my ($self, %options) = @_; + + $self->{ws_clients}->{ $options{ws_id} }->{tx}->send({json => { + code => $options{code}, + message => $options{message} + }}); + $self->clean_websocket(ws_id => $options{ws_id}); +} + +1; diff --git a/gorgone/gorgone/modules/core/proxy/sshclient.pm b/gorgone/gorgone/modules/core/proxy/sshclient.pm new file mode 100644 index 00000000000..af81969bee0 --- /dev/null +++ b/gorgone/gorgone/modules/core/proxy/sshclient.pm @@ -0,0 +1,557 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::proxy::sshclient; + +use base qw(Libssh::Session); + +use strict; +use warnings; +use Libssh::Sftp qw(:all); +use POSIX; +use gorgone::standard::misc; +use File::Basename; +use Time::HiRes; +use gorgone::standard::constants qw(:all); +use MIME::Base64; + +sub new { + my ($class, %options) = @_; + my $self = $class->SUPER::new(%options); + bless $self, $class; + + $self->{save_options} = {}; + $self->{logger} = $options{logger}; + $self->{sftp} = undef; + return $self; +} + +sub open_session { + my ($self, %options) = @_; + + $self->{save_options} = { %options }; + my $timeout = defined($options{ssh_connect_timeout}) && $options{ssh_connect_timeout} =~ /^\d+$/ ? $options{ssh_connect_timeout} : 5; + if ($self->options( + host => $options{ssh_host}, + port => $options{ssh_port}, + user => $options{ssh_username}, + sshdir => $options{ssh_directory}, + knownhosts => $options{ssh_known_hosts}, + identity => $options{ssh_identity}, + timeout => $timeout + ) != Libssh::Session::SSH_OK) { + $self->{logger}->writeLogError('[sshclient] Options method: ' . $self->error()); + return -1; + } + + if ($self->connect(SkipKeyProblem => $options{strict_serverkey_check}) != Libssh::Session::SSH_OK) { + $self->{logger}->writeLogError('[sshclient] Connect method: ' . $self->error()); + return -1; + } + + if ($self->auth_publickey_auto() != Libssh::Session::SSH_AUTH_SUCCESS) { + $self->{logger}->writeLogInfo('[sshclient] Authentication publickey auto failure: ' . $self->error(GetErrorSession => 1)); + if (!defined($options{ssh_password}) || $options{ssh_password} eq '') { + $self->{logger}->writeLogError('[sshclient] Authentication issue: no password'); + return -1; + } + if ($self->auth_password(password => $options{ssh_password}) != Libssh::Session::SSH_AUTH_SUCCESS) { + $self->{logger}->writeLogError('[sshclient] Authentication issue: ' . $self->error(GetErrorSession => 1)); + return -1; + } + } + + $self->{logger}->writeLogInfo( + "[sshclient] Client authenticated successfully to 'ssh://" . $options{ssh_host} . ":" . $options{ssh_port} . "'" + ); + + $self->{sftp} = Libssh::Sftp->new(session => $self); + if (!defined($self->{sftp})) { + $self->{logger}->writeLogError('[sshclient] Cannot init sftp: ' . Libssh::Sftp::error()); + $self->disconnect(); + return -1; + } + + return 0; +} + +sub local_command { + my ($self, %options) = @_; + + my ($error, $stdout, $exit_code) = gorgone::standard::misc::backtick( + command => $options{command}, + timeout => (defined($options{timeout})) ? $options{timeout} : 120, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + if ($error <= -1000) { + return (-1, { message => "command '$options{command}' execution issue: $stdout" }); + } + if ($exit_code != 0) { + return (-1, { message => "command '$options{command}' execution issue ($exit_code): $stdout" }); + } + return 0; +} + +sub ping { + my ($self, %options) = @_; + + my $ret = $self->execute_simple( + cmd => 'hostname', + timeout => 5, + timeout_nodata => 5 + ); + if ($ret->{exit} == Libssh::Session::SSH_OK) { + return 0; + } + + return -1; +} + +sub action_centcore { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}->{command}) || $options{data}->{content}->{command} eq '') { + $self->{logger}->writeLogError('[sshclient] Action centcore - Need command'); + return (-1, { message => 'please set command' }); + } + if (!defined($options{data}->{content}->{target}) || $options{data}->{content}->{target} eq '') { + $self->{logger}->writeLogError('[sshclient] Action centcore - Need target'); + return (-1, { message => 'please set target' }); + } + + my $centcore_cmd = defined($options{data}->{content}->{centcore_cmd}) ? $options{data}->{content}->{centcore_dir} : '/var/lib/centreon/centcore/'; + my $time = Time::HiRes::time(); + $time =~ s/\.//g; + $centcore_cmd .= $time . '.cmd'; + + my $data = $options{data}->{content}->{command} . ':' . $options{data}->{content}->{target}; + $data .= ':' . $options{data}->{content}->{param} if (defined($options{data}->{content}->{param}) && $options{data}->{content}->{param} ne ''); + chomp $data; + + my $file = $self->{sftp}->open(file => $centcore_cmd, accesstype => O_WRONLY|O_CREAT|O_TRUNC, mode => 0660); + if (!defined($file)) { + return (-1, { message => "cannot open stat file '$centcore_cmd': " . $self->{sftp}->error() }); + } + if ($self->{sftp}->write(handle_file => $file, data => $data . "\n") != Libssh::Session::SSH_OK) { + return (-1, { message => "cannot write stat file '$centcore_cmd': " . $self->{sftp}->error() }); + } + + $self->{logger}->writeLogDebug("[sshclient] Action centcore - '" . $centcore_cmd . "' succeeded"); + return (0, { message => 'send action_centcore succeeded' }); +} + +sub action_actionengine { + my ($self, %options) = @_; + + # validate plugins unsupported with ssh + $self->action_command( + data => { + logging => $options{data}->{logging}, + content => [ + $options{data}->{content} + ] + }, + target_direct => $options{target_direct}, + target => $options{target}, + token => $options{token} + ); +} + +sub action_command { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}) || ref($options{data}->{content}) ne 'ARRAY') { + return (-1, { message => "expected array, found '" . ref($options{data}->{content}) . "'" }); + } + + my $index = 0; + foreach my $command (@{$options{data}->{content}}) { + if (!defined($command->{command}) || $command->{command} eq '') { + return (-1, { message => "need command argument at array index '" . $index . "'" }); + } + $index++; + } + + my $errors = 0; + my $results; + + push @{$results}, { + code => GORGONE_ACTION_BEGIN, + data => { + message => "commands processing has started", + request_content => $options{data}->{content} + } + }; + + foreach my $command (@{$options{data}->{content}}) { + my ($code, $data) = (0, {}); + + push @{$results}, { + code => GORGONE_ACTION_BEGIN, + data => { + message => "command has started", + command => $command->{command}, + metadata => $command->{metadata} + } + }; + + if (defined($command->{metadata}->{centcore_proxy}) && $options{target_direct} == 0) { + ($code, $data->{data}) = $self->action_centcore( + data => { + content => { + command => $command->{metadata}->{centcore_cmd}, + target => $options{target}, + } + } + ); + $data->{code} = ($code < 0) ? GORGONE_ACTION_FINISH_KO : GORGONE_ACTION_FINISH_OK; + } else { + my $timeout = defined($command->{timeout}) && $command->{timeout} =~ /(\d+)/ ? $1 : 60; + my $timeout_nodata = defined($command->{timeout_nodata}) && $command->{timeout_nodata} =~ /(\d+)/ ? $1 : 30; + + my $start = time(); + my $ret = $self->execute_simple( + cmd => $command->{command}, + timeout => $timeout, + timeout_nodata => $timeout_nodata + ); + my $end = time(); + + $data = { + data => { + command => $command->{command}, + metadata => $command->{metadata}, + result => { + exit_code => $ret->{exit_code}, + stdout => $ret->{stdout}, + stderr => $ret->{stderr}, + }, + metrics => { + start => $start, + end => $end, + duration => $end - $start + } + } + }; + + if ($ret->{exit} == Libssh::Session::SSH_OK) { + $data->{data}->{message} = "command has finished successfully"; + $data->{code} = GORGONE_MODULE_ACTION_COMMAND_RESULT; + } elsif ($ret->{exit} == Libssh::Session::SSH_AGAIN) { # AGAIN means timeout + $code = -1; + $data->{data}->{message} = "command has timed out"; + $data->{code} = GORGONE_ACTION_FINISH_KO; + } else { + $code = -1; + $data->{data}->{message} = $self->error(GetErrorSession => 1); + $data->{code} = GORGONE_ACTION_FINISH_KO; + } + } + + push @{$results}, $data; + + if ($code < 0) { + if (defined($command->{continue_on_error}) && $command->{continue_on_error} == 0) { + push @{$results}, { + code => 1, + data => { + message => "commands processing has been interrupted because of error" + } + }; + return (-1, $results); + } + + $errors = 1; + } + } + + if ($errors) { + push @{$results}, { + code => GORGONE_ACTION_FINISH_KO, + data => { + message => "commands processing has finished with errors" + } + }; + return (-1, $results); + } + + push @{$results}, { + code => GORGONE_ACTION_FINISH_OK, + data => { + message => "commands processing has finished successfully" + } + }; + + return (0, $results); +} + +sub action_enginecommand { + my ($self, %options) = @_; + + my $results; + + if ($options{target_direct} == 0) { + foreach my $command (@{$options{data}->{content}->{commands}}) { + chomp $command; + my $msg = "[sshclient] Handling command 'EXTERNALCMD'"; + $msg .= ", Target: '" . $options{target} . "'" if (defined($options{target})); + $msg .= ", Parameters: '" . $command . "'" if (defined($command)); + $self->{logger}->writeLogInfo($msg); + my ($code, $data) = $self->action_centcore( + data => { + content => { + command => 'EXTERNALCMD', + target => $options{target}, + param => $command, + } + } + ); + } + } else { + if (!defined($options{data}->{content}->{command_file}) || $options{data}->{content}->{command_file} eq '') { + $self->{logger}->writeLogError("[sshclient] Need command_file argument"); + return (-1, { message => "need command_file argument" }); + } + + my $command_file = $options{data}->{content}->{command_file}; + + my $ret = $self->{sftp}->stat_file(file => $command_file); + if (!defined($ret)) { + $self->{logger}->writeLogError("[sshclient] Command file '$command_file' must exist"); + return (-1, { message => "command file '$command_file' must exist", error => $self->{sftp}->get_msg_error() }); + } + + if ($ret->{type} != SSH_FILEXFER_TYPE_SPECIAL) { + $self->{logger}->writeLogError("[sshclient] Command file '$command_file' must be a pipe file"); + return (-1, { message => "command file '$command_file' must be a pipe file" }); + } + + my $file = $self->{sftp}->open(file => $command_file, accesstype => O_WRONLY|O_APPEND, mode => 0660); + if (!defined($file)) { + $self->{logger}->writeLogError("[sshclient] Cannot open command file '$command_file'"); + return (-1, { message => "cannot open command file '$command_file'", error => $self->{sftp}->error() }); + } + + push @{$results}, { + code => GORGONE_ACTION_BEGIN, + data => { + message => "commands processing has started", + request_content => $options{data}->{content} + } + }; + + foreach my $command (@{$options{data}->{content}->{commands}}) { + $self->{logger}->writeLogInfo("[sshclient] Processing external command '" . $command . "'"); + if ($self->{sftp}->write(handle_file => $file, data => $command . "\n") != Libssh::Session::SSH_OK) { + $self->{logger}->writeLogError("[sshclient] Command file '$command_file' must be writeable"); + push @{$results}, { + code => GORGONE_ACTION_FINISH_KO, + data => { + message => "command file '$command_file' must be writeable", + error => $self->{sftp}->error() + } + }; + + return (-1, $results); + } + + push @{$results}, { + code => GORGONE_ACTION_FINISH_OK, + data => { + message => "command has been submitted", + command => $command + } + }; + } + } + + push @{$results}, { + code => GORGONE_ACTION_FINISH_OK, + data => { + message => "commands processing has finished" + } + }; + + return (0, $results); +} + +sub action_processcopy { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}->{status}) || $options{data}->{content}->{status} !~ /^(?:inprogress|end)$/) { + $self->{logger}->writeLogError('[sshclient] Action process copy - need status'); + return (-1, { message => 'please set status' }); + } + if (!defined($options{data}->{content}->{type}) || $options{data}->{content}->{type} !~ /^(?:archive|regular)$/) { + $self->{logger}->writeLogError('[sshclient] Action process copy - need type'); + return (-1, { message => 'please set type' }); + } + if (!defined($options{data}->{content}->{cache_dir}) || $options{data}->{content}->{cache_dir} eq '') { + $self->{logger}->writeLogError('[sshclient] Action process copy - need cache_dir'); + return (-1, { message => 'please set cache_dir' }); + } + if ($options{data}->{content}->{status} eq 'end' && + (!defined($options{data}->{content}->{destination}) || $options{data}->{content}->{destination} eq '')) { + $self->{logger}->writeLogError('[sshclient] Action process copy - need destination'); + return (-1, { message => 'please set destination' }); + } + + my $copy_local_file = $options{data}->{content}->{cache_dir} . '/copy_local_' . $options{token}; + if ($options{data}->{content}->{status} eq 'inprogress') { + my $fh; + if (!sysopen($fh, $copy_local_file, O_RDWR|O_APPEND|O_CREAT, 0660)) { + return (-1, { message => "file '$copy_local_file' open failed: $!" }); + } + binmode($fh); + syswrite( + $fh, + MIME::Base64::decode_base64($options{data}->{content}->{chunk}->{data}), + $options{data}->{content}->{chunk}->{size} + ); + close $fh; + + return (0, [{ + code => GORGONE_MODULE_ACTION_PROCESSCOPY_INPROGRESS, + data => { + message => 'process copy inprogress' + } + }]); + } + if ($options{data}->{content}->{status} eq 'end') { + my $copy_file = $options{data}->{content}->{cache_dir} . '/copy_' . $options{token}; + my $code = $self->{sftp}->copy_file(src => $copy_local_file, dst => $copy_file); + unlink($copy_local_file); + if ($code == -1) { + return (-1, { message => "cannot sftp copy file : " . $self->{sftp}->error() }); + } + + if ($options{data}->{content}->{type} eq 'archive') { + return $self->action_command( + data => { + content => [ { command => "tar zxf $copy_file -C '" . $options{data}->{content}->{destination} . "' ." } ] + } + ); + } + if ($options{data}->{content}->{type} eq 'regular') { + return $self->action_command( + data => { + content => [ { command => "cp -f $copy_file '$options{data}->{content}->{destination}'" } ] + } + ); + } + } + + return (-1, { message => 'process copy unknown error' }); +} + +sub action_remotecopy { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}->{source}) || $options{data}->{content}->{source} eq '') { + $self->{logger}->writeLogError('[sshclient] Action remote copy - need source'); + return (-1, { message => 'please set source' }); + } + if (!defined($options{data}->{content}->{destination}) || $options{data}->{content}->{destination} eq '') { + $self->{logger}->writeLogError('[sshclient] Action remote copy - need destination'); + return (-1, { message => 'please set destination' }); + } + + my ($code, $message, $data); + + my $srcname; + my $localsrc = $options{data}->{content}->{source}; + my $src = $options{data}->{content}->{source}; + my ($dst, $dst_sftp) = ($options{data}->{content}->{destination}, $options{data}->{content}->{destination}); + if ($options{target_direct} == 0) { + $dst = $src; + $dst_sftp = $src; + } + + if (-f $options{data}->{content}->{source}) { + $localsrc = $src; + $srcname = File::Basename::basename($src); + $dst_sftp .= $srcname if ($dst =~ /\/$/); + } elsif (-d $options{data}->{content}->{source}) { + $srcname = (defined($options{data}->{content}->{type}) ? $options{data}->{content}->{type} : 'tmp') . '-' . $options{target} . '.tar.gz'; + $localsrc = $options{data}->{content}->{cache_dir} . '/' . $srcname; + $dst_sftp = $options{data}->{content}->{cache_dir} . '/' . $srcname; + + ($code, $message) = $self->local_command(command => "tar czf $localsrc -C '" . $src . "' ."); + return ($code, $message) if ($code == -1); + } else { + return (-1, { message => 'unknown source' }); + } + + if (($code = $self->{sftp}->copy_file(src => $localsrc, dst => $dst_sftp)) == -1) { + return (-1, { message => "cannot sftp copy file : " . $self->{sftp}->error() }); + } + + if (-d $options{data}->{content}->{source}) { + ($code, $data) = $self->action_command( + data => { + content => [ { command => "tar zxf $dst_sftp -C '" . $dst . "' ." } ] + } + ); + return ($code, $data) if ($code == -1); + } + + if (defined($options{data}->{content}->{metadata}->{centcore_proxy}) && $options{target_direct} == 0) { + $self->action_centcore( + data => { + content => { + command => $options{data}->{content}->{metadata}->{centcore_cmd}, + target => $options{target}, + } + } + ); + } + + return (0, { message => 'send remotecopy succeeded' }); +} + +sub action { + my ($self, %options) = @_; + + my $func = $self->can('action_' . lc($options{action})); + if (defined($func)) { + return $func->( + $self, + data => $options{data}, + target_direct => $options{target_direct}, + target => $options{target}, + token => $options{token} + ); + } + + $self->{logger}->writeLogError("[sshclient] Unsupported action '" . $options{action} . "'"); + return (-1, { message => 'unsupported action' }); +} + +sub close { + my ($self, %options) = @_; + + $self->disconnect(); +} + +sub cleanup {} + +1; diff --git a/gorgone/gorgone/modules/core/pull/class.pm b/gorgone/gorgone/modules/core/pull/class.pm new file mode 100644 index 00000000000..230cf96dd96 --- /dev/null +++ b/gorgone/gorgone/modules/core/pull/class.pm @@ -0,0 +1,233 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pull::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::class::db; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::clientzmq; +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{ping_timer} = time(); + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[pipeline] -class- $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub exit_process { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[pull] $$ has quit"); + + $self->{client}->send_message( + action => 'UNREGISTERNODES', + data => { nodes => [ { id => $self->get_core_config(name => 'id') } ] }, + json_encode => 1 + ); + $self->{client}->close(); + + exit(0); +} + +sub ping { + my ($self, %options) = @_; + + return if ((time() - $self->{ping_timer}) < 60); + + $self->{ping_timer} = time(); + + $self->{client}->ping( + poll => $self->{poll}, + action => 'REGISTERNODES', + data => { nodes => [ { id => $self->get_core_config(name => 'id'), type => 'pull', identity => $self->{client}->get_connect_identity() } ] }, + json_encode => 1 + ); +} + +sub transmit_back { + my (%options) = @_; + + return undef if (!defined($options{message})); + + if ($options{message} =~ /^\[ACK\]\s+\[(.*?)\]\s+(.*)/m) { + my $data; + eval { + $data = JSON::XS->new->decode($2); + }; + if ($@) { + return $options{message}; + } + + if (defined($data->{data}->{action}) && $data->{data}->{action} eq 'getlog') { + return '[SETLOGS] [' . $1 . '] [] ' . $2; + } + return undef; + } elsif ($options{message} =~ /^\[BCASTCOREKEY\]\s+\[.*?\]\s+\[.*?\]\s+(.*)/m) { + my $data; + eval { + $data = JSON::XS->new->decode($1); + }; + if ($@) { + $connector->{logger}->writeLogDebug("[pull] cannot decode BCASTCOREKEY: $@"); + return undef; + } + + $connector->action_bcastcorekey(data => $data); + return undef; + } elsif ($options{message} =~ /^\[(PONG|SYNCLOGS)\]/) { + return $options{message}; + } + return undef; +} + +sub read_message_client { + my (%options) = @_; + + # We skip. Dont need to send it in gorgone-core + if ($options{data} =~ /^\[ACK\]/) { + return undef; + } + + $connector->{logger}->writeLogDebug("[pull] read message from external: $options{data}"); + $connector->send_internal_action({ message => $options{data} }); +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $self->read_message(); + $message = transmit_back(message => $message); + next if (!defined($message)); + + # Only send back SETLOGS and PONG + $self->{logger}->writeLogDebug("[pull] read message from internal: $message"); + $self->{client}->send_message(message => $message); + } +} + +sub periodic_exec { + my ($self, %options) = @_; + + if ($self->{stop} == 1) { + $self->exit_process(); + } + + $self->ping(); +} + +sub run { + my ($self, %options) = @_; + + # Connect internal + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-pull', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PULLREADY', + data => {} + }); + + $self->{client} = gorgone::class::clientzmq->new( + context => $self->{zmq_context}, + core_loop => $self->{loop}, + identity => 'gorgone-' . $self->get_core_config(name => 'id'), + cipher => $self->{config}->{cipher}, + vector => $self->{config}->{vector}, + client_pubkey => + defined($self->{config}->{client_pubkey}) && $self->{config}->{client_pubkey} ne '' ? + $self->{config}->{client_pubkey} : $self->get_core_config(name => 'pubkey'), + client_privkey => + defined($self->{config}->{client_privkey}) && $self->{config}->{client_privkey} ne '' ? + $self->{config}->{client_privkey} : $self->get_core_config(name => 'privkey'), + target_type => $self->{config}->{target_type}, + target_path => $self->{config}->{target_path}, + config_core => $self->get_core_config(), + logger => $self->{logger}, + ping => $self->{config}->{ping}, + ping_timeout => $self->{config}->{ping_timeout} + ); + $self->{client}->init(callback => \&read_message_client); + + $self->{client}->send_message( + action => 'REGISTERNODES', + data => { nodes => [ { id => $self->get_core_config(name => 'id'), type => 'pull', identity => $self->{client}->get_connect_identity() } ] }, + json_encode => 1 + ); + + $self->periodic_exec(); + + my $watcher_timer = $self->{loop}->timer(5, 5, sub { $connector->periodic_exec() }); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/pull/hooks.pm b/gorgone/gorgone/modules/core/pull/hooks.pm new file mode 100644 index 00000000000..eb628261a92 --- /dev/null +++ b/gorgone/gorgone/modules/core/pull/hooks.pm @@ -0,0 +1,153 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pull::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::pull::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'pull'; +use constant EVENTS => [ + { event => 'PULLREADY' } +]; + +my $config_core; +my $config; +my $pull = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'PULLREADY') { + $pull->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$pull->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-pull: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-pull', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($pull->{running}) && $pull->{running} == 1) { + $options{logger}->writeLogDebug("[pull] Send TERM signal $pull->{pid}"); + CORE::kill('TERM', $pull->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($pull->{running} == 1) { + $options{logger}->writeLogDebug("[pull] Send KILL signal for pool"); + CORE::kill('KILL', $pull->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + + return 0; +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($pull->{pid}) || $pull->{pid} != $pid); + + $pull = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($pull->{running}) && $pull->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[pull] Create module 'pull' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-pull'; + my $module = gorgone::modules::core::pull::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[pull] PID $child_pid (gorgone-pull)"); + $pull = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/pullwss/class.pm b/gorgone/gorgone/modules/core/pullwss/class.pm new file mode 100644 index 00000000000..80cee8b3243 --- /dev/null +++ b/gorgone/gorgone/modules/core/pullwss/class.pm @@ -0,0 +1,295 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pullwss::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use Mojo::UserAgent; +use Mojo::IOLoop::Signal; +use IO::Socket::SSL; +use IO::Handle; +use JSON::XS; +use EV; +use HTML::Entities; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{ping_timer} = -1; + $connector->{connected} = 0; + $connector->{stop} = 0; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + # see https://metacpan.org/pod/EV#PERL-SIGNALS + # EV and Mojo::IOLoop don't seem to work in this module for setting a signal handler. + Mojo::IOLoop::Signal->on(TERM => sub { $self->handle_TERM() }); + Mojo::IOLoop::Signal->on(HUP => sub { $self->handle_HUP() }); + +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[pullwss] $$ Receiving order to stop..."); + $self->{stop} = 1; + + my $message = gorgone::standard::library::build_protocol( + action => 'UNREGISTERNODES', + data => { + nodes => [ + { + id => $self->get_core_config(name => 'id'), + type => 'wss', + identity => $self->get_core_config(name => 'id') + } + ] + }, + json_encode => 1 + ); + + if ($self->{connected} == 1) { + # if the websocket is still connected, we send a message to the other end so it know we are shutting down + # And we say to mojo to stop when he don't have other message to process. + $self->{logger}->writeLogDebug("[pullwss] sending UNREGISTERNODES message to central before quiting as we are still connected to them."); + $self->{tx}->send( {text => $message }); + + $self->{tx}->on(drain => sub { + $self->{logger}->writeLogDebug("[pullwss] starting the stop_gracefully mojo sub"); + Mojo::IOLoop->stop_gracefully() + }); + } + else { + # if the websocket is not connected, we simply remove zmq socket and shutdown + # we need to shutdown the zmq socket ourself or there is a c++ stack trace error in the log. + disconnect_zmq_socket_and_exit(); + } +} + +sub disconnect_zmq_socket_and_exit { + $connector->{logger}->writeLogDebug("[pullwss] removing zmq socket : $connector->{internal_socket}"); + # Following my tests we need both close() and undef to correctly close the zmq socket + # If we add only one of them the following error can arise after shutdown : + # Bad file descriptor (src/epoll.cpp:73) + $connector->{internal_socket}->close(); + undef $connector->{internal_socket}; + $connector->{logger}->writeLogInfo("[pullwss] exit now."); + exit(0); +} + +sub send_message { + my ($self, %options) = @_; + my $message = HTML::Entities::encode_entities($options{message}); + $self->{tx}->send({text => $message }); +} + +sub ping { + my ($self, %options) = @_; + + return if ($self->{ping_timer} != -1 && (time() - $self->{ping_timer}) < 30); + + $self->{ping_timer} = time(); + + my $message = gorgone::standard::library::build_protocol( + action => 'REGISTERNODES', + data => { + nodes => [ + { + id => $self->get_core_config(name => 'id'), + type => 'wss', + identity => $self->get_core_config(name => 'id') + } + ] + }, + json_encode => 1 + ); + + $self->{tx}->send({text => $message }) if ($self->{connected} == 1); +} + +sub wss_connect { + my ($self, %options) = @_; + + return if ($self->{stop} == 1 or $connector->{connected} == 1); + + $self->{ua} = Mojo::UserAgent->new(); + $self->{ua}->transactor->name('gorgone mojo'); + + if (defined($self->{config}->{proxy}) && $self->{config}->{proxy} ne '') { + $self->{ua}->proxy->http($self->{config}->{proxy})->https($self->{config}->{proxy}); + } + + my $proto = 'ws'; + if (defined($self->{config}->{ssl}) && $self->{config}->{ssl} eq 'true') { + $proto = 'wss'; + $self->{ua}->insecure(1); + } + + $self->{ua}->websocket( + $proto . '://' . $self->{config}->{address} . ':' . $self->{config}->{port} . '/' => { Authorization => 'Bearer ' . $self->{config}->{token} } => sub { + my ($ua, $tx) = @_; + + $connector->{tx} = $tx; + $connector->{logger}->writeLogError('[pullwss] ' . $tx->res->error->{message}) if $tx->res->error; + $connector->{logger}->writeLogError('[pullwss] webSocket handshake failed') and return unless $tx->is_websocket; + + $connector->{tx}->on( + finish => sub { + my ($tx, $code, $reason) = @_; + + $connector->{connected} = 0; + $connector->{logger}->writeLogError('[pullwss] websocket closed with status ' . $code); + } + ); + $connector->{tx}->on( + message => sub { + my ($tx, $msg) = @_; + + # We skip. Dont need to send it in gorgone-core + return undef if ($msg =~ /^\[ACK\]/); + + if ($msg =~ /^\[.*\]/) { + $connector->{logger}->writeLogDebug('[pullwss] websocket message: ' . $msg); + $connector->send_internal_action({message => $msg}); + $self->read_zmq_events(); + } else { + $connector->{logger}->writeLogInfo('[pullwss] websocket message: ' . $msg); + } + } + ); + + $connector->{logger}->writeLogInfo('[pullwss] websocket connected'); + $connector->{connected} = 1; + $connector->{ping_timer} = -1; + $connector->ping(); + } + ); + $self->{ua}->inactivity_timeout(120); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-pullwss', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PULLWSSREADY', + data => {} + }); + $self->read_zmq_events(); + + $self->wss_connect(); + + my $socket_fd = gorgone::standard::library::zmq_getfd(socket => $self->{internal_socket}); + my $socket = IO::Handle->new_from_fd($socket_fd, 'r'); + Mojo::IOLoop->singleton->reactor->io($socket => sub { + $connector->read_zmq_events(); + }); + Mojo::IOLoop->singleton->reactor->watch($socket, 1, 0); + + Mojo::IOLoop->singleton->recurring(60 => sub { + if (!$connector->{stop}){ + $connector->{logger}->writeLogDebug('[pullwss] recurring timeout loop'); + $connector->wss_connect(); + $connector->ping(); + } + }); + Mojo::IOLoop->start() unless (Mojo::IOLoop->is_running); + + disconnect_zmq_socket_and_exit(); + +} + +sub transmit_back { + my (%options) = @_; + + return undef if (!defined($options{message})); + + if ($options{message} =~ /^\[ACK\]\s+\[(.*?)\]\s+(.*)/m) { + my $data; + eval { + $data = JSON::XS->new->decode($2); + }; + if ($@) { + return $options{message}; + } + + if (defined($data->{data}->{action}) && $data->{data}->{action} eq 'getlog') { + return '[SETLOGS] [' . $1 . '] [] ' . $2; + } + return undef; + } elsif ($options{message} =~ /^\[BCASTCOREKEY\]\s+\[.*?\]\s+\[.*?\]\s+(.*)/m) { + my $data; + eval { + $data = JSON::XS->new->decode($1); + }; + if ($@) { + $connector->{logger}->writeLogDebug("[pull] cannot decode BCASTCOREKEY: $@"); + return undef; + } + + $connector->action_bcastcorekey(data => $data); + return undef; + } elsif ($options{message} =~ /^\[(PONG|SYNCLOGS)\]/) { + return $options{message}; + } + return undef; +} + +sub read_zmq_events { + my ($self, %options) = @_; + + while (!$self->{stop} and $self->{internal_socket}->has_pollin()) { + my ($message) = $connector->read_message(); + $message = transmit_back(message => $message); + next if (!defined($message)); + + # Only send back SETLOGS and PONG + $connector->{logger}->writeLogDebug("[pullwss] read message from internal: $message"); + $connector->send_message(message => $message); + } +} + +1; diff --git a/gorgone/gorgone/modules/core/pullwss/hooks.pm b/gorgone/gorgone/modules/core/pullwss/hooks.pm new file mode 100644 index 00000000000..62199d5815b --- /dev/null +++ b/gorgone/gorgone/modules/core/pullwss/hooks.pm @@ -0,0 +1,169 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pullwss::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::pullwss::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'pullwss'; +use constant EVENTS => [ + { event => 'PULLWSSREADY' } +]; + +my $config_core; +my $config; +my $pullwss = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + my $loaded = 1; + $config = $options{config}; + $config_core = $options{config_core}; + + if (!defined($config->{address}) || $config->{address} =~ /^\s*$/) { + $options{logger}->writeLogError('[pullwss] address option mandatory'); + $loaded = 0; + } + if (!defined($config->{port}) || $config->{port} !~ /^\d+$/) { + $options{logger}->writeLogError('[pullwss] port option mandatory'); + $loaded = 0; + } + if (!defined($config->{token}) || $config->{token} =~ /^\s*$/) { + $options{logger}->writeLogError('[pullwss] token option mandatory'); + $loaded = 0; + } + + return ($loaded, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'PULLWSSREADY') { + $pullwss->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$pullwss->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-pullwss: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-pullwss', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($pullwss->{running}) && $pullwss->{running} == 1) { + $options{logger}->writeLogDebug("[pullwss] Send TERM signal $pullwss->{pid}"); + CORE::kill('TERM', $pullwss->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($pullwss->{running} == 1) { + $options{logger}->writeLogDebug("[pullwss] Send KILL signal for $pullwss->{pid}"); + CORE::kill('KILL', $pullwss->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($pullwss->{pid}) || $pullwss->{pid} != $pid); + + $pullwss = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + + last; + } + + $count++ if (defined($pullwss->{running}) && $pullwss->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[pullwss] Create module 'pullwss' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-pullwss'; + my $module = gorgone::modules::core::pullwss::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[pullwss] PID $child_pid (gorgone-pullwss)"); + $pullwss = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/register/class.pm b/gorgone/gorgone/modules/core/register/class.pm new file mode 100644 index 00000000000..8adab31c01a --- /dev/null +++ b/gorgone/gorgone/modules/core/register/class.pm @@ -0,0 +1,170 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::register::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{register_nodes} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[register] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub action_registerresync { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { + message => 'action registerresync proceed' + } + ); + + my $config = gorgone::standard::library::read_config( + config_file => $self->{config}->{config_file}, + logger => $self->{logger} + ); + + my $register_temp = {}; + my $register_nodes = []; + if (defined($config->{nodes})) { + foreach (@{$config->{nodes}}) { + $self->{register_nodes}->{$_->{id}} = 1; + $register_temp->{$_->{id}} = 1; + push @{$register_nodes}, { %$_ }; + } + } + + my $unregister_nodes = []; + foreach (keys %{$self->{register_nodes}}) { + if (!defined($register_temp->{$_})) { + push @{$unregister_nodes}, { id => $_ }; + delete $self->{register_nodes}->{$_}; + } + } + + $self->send_internal_action({ + action => 'REGISTERNODES', + data => { + nodes => $register_nodes + } + }) if (scalar(@$register_nodes) > 0); + + $self->send_internal_action({ + action => 'UNREGISTERNODES', + data => { + nodes => $unregister_nodes + } + }) if (scalar(@$unregister_nodes) > 0); + + $self->{logger}->writeLogDebug("[register] Finish resync"); + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + message => 'action registerresync finished' + } + ); + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[register] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + # Connect internal + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-register', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'REGISTERREADY', + data => {} + }); + + $self->action_registerresync(); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/register/hooks.pm b/gorgone/gorgone/modules/core/register/hooks.pm new file mode 100644 index 00000000000..82d49e26571 --- /dev/null +++ b/gorgone/gorgone/modules/core/register/hooks.pm @@ -0,0 +1,158 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::register::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::register::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'register'; +use constant EVENTS => [ + { event => 'REGISTERREADY' }, +]; + +my $config_core; +my $config; +my ($config_db_centreon); +my $register = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + my $loaded = 1; + $config = $options{config}; + $config_core = $options{config_core}; + if (!defined($config->{config_file}) || $config->{config_file} =~ /^\s*$/) { + $options{logger}->writeLogError("[register] Option 'config_file' mandatory"); + $loaded = 0; + } + return ($loaded, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'REGISTERREADY') { + $register->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$register->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgoneregister: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-register', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($register->{running}) && $register->{running} == 1) { + $options{logger}->writeLogDebug("[register] Send TERM signal $register->{pid}"); + CORE::kill('TERM', $register->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($register->{running} == 1) { + $options{logger}->writeLogDebug("[register] Send KILL signal for pool"); + CORE::kill('KILL', $register->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($register->{pid}) || $register->{pid} != $pid); + + $register = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($register->{running}) && $register->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[register] Create module 'register' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-register'; + my $module = gorgone::modules::core::register::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[register] PID $child_pid (gorgone-register)"); + $register = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/plugins/newtest/class.pm b/gorgone/gorgone/modules/plugins/newtest/class.pm new file mode 100644 index 00000000000..2b45bbf5fe4 --- /dev/null +++ b/gorgone/gorgone/modules/plugins/newtest/class.pm @@ -0,0 +1,662 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::plugins::newtest::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::misc; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use MIME::Base64; +use JSON::XS; +use Data::Dumper; +use gorgone::modules::plugins::newtest::libs::stubs::ManagementConsoleService; +use gorgone::modules::plugins::newtest::libs::stubs::errors; +use Date::Parse; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{config_newtest} = $options{config_newtest}; + + $connector->{resync_time} = $options{config_newtest}->{resync_time}; + $connector->{last_resync_time} = time() - $connector->{resync_time}; + + $connector->{endpoint} = $options{config_newtest}->{nmc_endpoint}; + $connector->{nmc_username} = $options{config_newtest}->{nmc_username}; + $connector->{nmc_password} = $options{config_newtest}->{nmc_password}; + $connector->{nmc_timeout} = $options{config_newtest}->{nmc_timeout}; + $connector->{poller_name} = $options{config_newtest}->{poller_name}; + $connector->{list_scenario_status} = $options{config_newtest}->{list_scenario_status}; + $connector->{host_template} = $options{config_newtest}->{host_template}; + $connector->{host_prefix} = $options{config_newtest}->{host_prefix}; + $connector->{service_template} = $options{config_newtest}->{service_template}; + $connector->{service_prefix} = $options{config_newtest}->{service_prefix}; + + $connector->{clapi_generate_config_timeout} = defined($options{config}->{clapi_generate_config_timeout}) ? $options{config}->{clapi_generate_config_timeout} : 180; + $connector->{clapi_timeout} = defined($options{config}->{clapi_timeout}) ? $options{config}->{clapi_timeout} : 10; + $connector->{clapi_command} = defined($options{config}->{clapi_command}) && $options{config}->{clapi_command} ne '' ? $options{config}->{clapi_command} : '/usr/bin/centreon'; + $connector->{clapi_username} = $options{config}->{clapi_username}; + $connector->{clapi_password} = $options{config}->{clapi_password}; + $connector->{clapi_action_applycfg} = $options{config}->{clapi_action_applycfg}; + $connector->{cmdFile} = defined($options{config}->{centcore_cmd}) && $options{config}->{centcore_cmd} ne '' ? $options{config}->{centcore_cmd} : '/var/lib/centreon/centcore.cmd'; + $connector->{illegal_characters} = defined($options{config}->{illegal_characters}) && $options{config}->{illegal_characters} ne '' ? $options{config}->{illegal_characters} : '~!$%^&*"|\'<>?,()='; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[newtest] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +my %map_scenario_status = ( + Available => 0, Warning => 1, Failed => 2, Suspended => 2, + Canceled => 2, Unknown => 3, + OutOfRange => 0, # Not Scheduled scenario +); + +my %map_newtest_units = ( + Second => 's', Millisecond => 'ms', BytePerSecond => 'Bps', UnitLess => '', Unknown => '', +); + +my %map_service_status = ( + 0 => 'OK', 1 => 'WARNING', 2 => 'CRITICAL', 3 => 'UNKNOWN', 4 => 'PENDING', +); + +sub newtestresync_init { + my ($self, %options) = @_; + + # list from robot/scenario from db + # Format = { robot_name1 => { scenario1 => { last_execution_time => xxxx }, scenario2 => { } }, ... } + $self->{db_newtest} = {}; + $self->{api_newtest} = {}; + $self->{poller_id} = undef; + $self->{must_push_config} = 0; + $self->{external_commands} = []; + $self->{perfdatas} = []; + $self->{cache_robot_list_results} = undef; +} + +sub perfdata_add { + my ($self, %options) = @_; + + my $perfdata = {label => '', value => '', unit => '', warning => '', critical => '', min => '', max => ''}; + foreach (keys %options) { + next if (!defined($options{$_})); + $perfdata->{$_} = $options{$_}; + } + $perfdata->{label} =~ s/'/''/g; + push @{$self->{perfdatas}}, $perfdata; +} + +sub add_output { + my ($self, %options) = @_; + + my $str = $map_service_status{$self->{current_status}} . ': ' . $self->{current_text} . '|'; + foreach my $perf (@{$self->{perfdatas}}) { + $str .= " '" . $perf->{label} . "'=" . $perf->{value} . $perf->{unit} . ";" . $perf->{warning} . ";" . $perf->{critical} . ";" . $perf->{min} . ";" . $perf->{max}; + } + $self->{perfdatas} = []; + + $self->push_external_cmd( + cmd => 'PROCESS_SERVICE_CHECK_RESULT;' . $options{host_name} . ';' . + $options{service_name} . ';' . $self->{current_status} . ';' . $str, + time => $options{time} + ); +} + +sub convert_measure { + my ($self, %options) = @_; + + if (defined($map_newtest_units{$options{unit}}) && + $map_newtest_units{$options{unit}} eq 'ms') { + $options{value} /= 1000; + $options{unit} = 's'; + } + return ($options{value}, $options{unit}); +} + +sub get_poller_id { + my ($self, %options) = @_; + + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => 'SELECT id FROM nagios_server WHERE name = ?', + bind_values => [$self->{poller_name}], + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError("[newtest] cannot get poller id for poller '" . $self->{poller_name} . "'."); + return 1; + } + + if (!defined($datas->[0])) { + $self->{logger}->writeLogError("[newtest] cannot find poller id for poller '" . $self->{poller_name} . "'."); + return 1; + } + + $self->{poller_id} = $datas->[0]->[0]; + return 0; +} + +sub get_centreondb_cache { + my ($self, %options) = @_; + + my $request = " + SELECT host.host_name, service.service_description + FROM host + LEFT JOIN (host_service_relation, service) ON + (host_service_relation.host_host_id = host.host_id AND + service.service_id = host_service_relation.service_service_id AND + service.service_description LIKE ?) + WHERE host_name LIKE ? AND host_register = '1'"; + $request =~ s/%s/%/g; + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => $request, + bind_values => [$self->{service_prefix}, $self->{host_prefix}], + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError("[newtest] cannot get robot/scenarios list from centreon db."); + return 1; + } + + foreach (@$datas) { + $self->{db_newtest}->{$_->[0]} = {} if (!defined($self->{db_newtest}->{$_->[0]})); + if (defined($_->[1])) { + $self->{db_newtest}->{$_->[0]}->{$_->[1]} = {}; + } + } + + return 0; +} + +sub get_centstoragedb_cache { + my ($self, %options) = @_; + + my $request = 'SELECT hosts.name, services.description, services.last_check + FROM hosts LEFT JOIN services ON (services.host_id = hosts.host_id AND services.description LIKE ? + WHERE name like ?'; + $request =~ s/%s/%/g; + my ($status, $datas) = $self->{class_object_centstorage}->custom_execute( + request => $request, + bind_values => [$self->{service_prefix}, $self->{host_prefix}], + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError("[newtest] cannot get robot/scenarios list from centstorage db."); + return 1; + } + + foreach (@$datas) { + if (!defined($self->{db_newtest}->{$_->[0]})) { + $self->{logger}->writeLogError("[newtest] host '" . $_->[0] . "'is in censtorage DB but not in centreon config..."); + next; + } + if (defined($_->[1]) && !defined($self->{db_newtest}->{$_->[0]}->{$_->[1]})) { + $self->{logger}->writeLogError("[newtest] host scenario '" . $_->[0] . "/" . $_->[1] . "' is in censtorage DB but not in centreon config..."); + next; + } + + if (defined($_->[1])) { + $self->{db_newtest}->{$_->[0]}->{$_->[1]}->{last_execution_time} = $_->[2]; + } + } + + return 0; +} + +sub clapi_execute { + my ($self, %options) = @_; + + my $cmd = $self->{clapi_command} . " -u '" . $self->{clapi_username} . "' -p '" . $self->{clapi_password} . "' " . $options{cmd}; + my ($lerror, $stdout, $exit_code) = gorgone::standard::misc::backtick( + command => $cmd, + logger => $self->{logger}, + timeout => $options{timeout}, + wait_exit => 1, + ); + if ($lerror == -1 || ($exit_code >> 8) != 0) { + $self->{logger}->writeLogError("[newtest] clapi execution problem for command $cmd : " . $stdout); + return -1; + } + + return 0; +} + +sub push_external_cmd { + my ($self, %options) = @_; + my $time = defined($options{time}) ? $options{time} : time(); + + push @{$self->{external_commands}}, + 'EXTERNALCMD:' . $self->{poller_id} . ':[' . $time . '] ' . $options{cmd}; +} + +sub submit_external_cmd { + my ($self, %options) = @_; + + foreach my $cmd (@{$self->{external_commands}}) { + my ($lerror, $stdout, $exit_code) = gorgone::standard::misc::backtick(command => '/bin/echo "' . $cmd . '" >> ' . $self->{cmdFile}, + logger => $self->{logger}, + timeout => 5, + wait_exit => 1 + ); + if ($lerror == -1 || ($exit_code >> 8) != 0) { + $self->{logger}->writeLogError("[newtest] clapi execution problem for command $cmd : " . $stdout); + return -1; + } + } +} + +sub push_config { + my ($self, %options) = @_; + + if ($self->{must_push_config} == 1) { + $self->{logger}->writeLogInfo("[newtest] generation config for '$self->{poller_name}':"); + if ($self->clapi_execute(cmd => '-a POLLERGENERATE -v ' . $self->{poller_id}, + timeout => $self->{clapi_generate_config_timeout}) != 0) { + $self->{logger}->writeLogError("[newtest] generation config for '$self->{poller_name}': failed"); + return ; + } + $self->{logger}->writeLogInfo("[newtest] generation config for '$self->{poller_name}': succeeded."); + + $self->{logger}->writeLogInfo("[newtest] move config for '$self->{poller_name}':"); + if ($self->clapi_execute(cmd => '-a CFGMOVE -v ' . $self->{poller_id}, + timeout => $self->{clapi_timeout}) != 0) { + $self->{logger}->writeLogError("[newtest] move config for '$self->{poller_name}': failed"); + return ; + } + $self->{logger}->writeLogInfo("[newtest] move config for '$self->{poller_name}': succeeded."); + + $self->{logger}->writeLogInfo("[newtest] restart/reload config for '$self->{poller_name}':"); + if ($self->clapi_execute(cmd => '-a ' . $self->{clapi_action_applycfg} . ' -v ' . $self->{poller_id}, + timeout => $self->{clapi_timeout}) != 0) { + $self->{logger}->writeLogError("[newtest] restart/reload config for '$self->{poller_name}': failed"); + return ; + } + $self->{logger}->writeLogInfo("[newtest] restart/reload config for '$self->{poller_name}': succeeded."); + } +} + +sub get_newtest_diagnostic { + my ($self, %options) = @_; + + my $result = $self->{instance}->ListMessages('Instance', 30, 'Diagnostics', [$options{scenario}, $options{robot}]); + if (defined(my $com_error = gorgone::modules::plugins::newtest::libs::stubs::errors::get_error())) { + $self->{logger}->writeLogError("[newtest] newtest API error 'ListMessages' method: " . $com_error); + return -1; + } + + if (!(ref($result) && defined($result->{MessageItem}))) { + $self->{logger}->writeLogError("[newtest] no diagnostic found for scenario: " . $options{scenario} . '/' . $options{robot}); + return 1; + } + if (ref($result->{MessageItem}) eq 'HASH') { + $result->{MessageItem} = [$result->{MessageItem}]; + } + + my $macro_value = ''; + my $macro_append = ''; + foreach my $item (@{$result->{MessageItem}}) { + if (defined($item->{SubCategory})) { + $macro_value .= $macro_append . $item->{SubCategory} . ':' . $item->{Id}; + $macro_append = '|'; + } + } + + if ($macro_value ne '') { + $self->push_external_cmd(cmd => + 'CHANGE_CUSTOM_SVC_VAR;' . $options{host_name} . ';' . + $options{service_name} . ';NEWTEST_MESSAGEID;' . $macro_value + ); + } + return 0; +} + +sub get_scenario_results { + my ($self, %options) = @_; + + # Already test the robot but no response + if (defined($self->{cache_robot_list_results}->{$options{robot}}) && + !defined($self->{cache_robot_list_results}->{$options{robot}}->{ResultItem})) { + $self->{current_text} = sprintf("[newtest] no result avaiblable for scenario '%s'", $options{scenario}); + $self->{current_status} = 3; + return 1; + } + if (!defined($self->{cache_robot_list_results}->{$options{robot}})) { + my $result = $self->{instance}->ListResults('Robot', 30, [$options{robot}]); + if (defined(my $com_error = gorgone::modules::plugins::newtest::libs::stubs::errors::get_error())) { + $self->{logger}->writeLogError("[newtest] newtest API error 'ListResults' method: " . $com_error); + return -1; + } + + if (!(ref($result) && defined($result->{ResultItem}))) { + $self->{cache_robot_list_results}->{$options{robot}} = {}; + $self->{logger}->writeLogError("[newtest] no results found for robot: " . $options{robot}); + return 1; + } + + if (ref($result->{ResultItem}) eq 'HASH') { + $result->{ResultItem} = [$result->{ResultItem}]; + } + $self->{cache_robot_list_results}->{$options{robot}} = $result; + } + + # stop at first + foreach my $result (@{$self->{cache_robot_list_results}->{$options{robot}}->{ResultItem}}) { + if ($result->{MeasureName} eq $options{scenario}) { + my ($value, $unit) = $self->convert_measure( + value => $result->{ExecutionValue}, + unit => $result->{MeasureUnit} + ); + $self->{current_text} = sprintf( + "Execution status '%s'. Scenario '%s' total duration is %d%s.", + $result->{ExecutionStatus}, $options{scenario}, + $value, $unit + ); + $self->perfdata_add( + label => $result->{MeasureName}, unit => $unit, + value => sprintf("%d", $value), + min => 0 + ); + + $self->get_newtest_extra_metrics( + scenario => $options{scenario}, + robot => $options{robot}, + id => $result->{Id} + ); + + $self->{logger}->writeLogInfo("[newtest] result found for scenario: " . $options{scenario} . '/' . $options{robot}); + return 0; + } + } + + $self->{logger}->writeLogError("[newtest] no result found for scenario: " . $options{scenario} . '/' . $options{robot}); + return 1; +} + +sub get_newtest_extra_metrics { + my ($self, %options) = @_; + + my $result = $self->{instance}->ListResultChildren($options{id}); + if (defined(my $com_error = gorgone::modules::plugins::newtest::libs::stubs::errors::get_error())) { + $self->{logger}->writeLogError("[newtest] newtest API error 'ListResultChildren' method: " . $com_error); + return -1; + } + + if (!(ref($result) && defined($result->{ResultItem}))) { + $self->{logger}->writeLogError("[newtest] no extra metrics found for scenario: " . $options{scenario} . '/' . $options{robot}); + return 1; + } + + if (ref($result->{ResultItem}) eq 'HASH') { + $result->{ResultItem} = [$result->{ResultItem}]; + } + foreach my $item (@{$result->{ResultItem}}) { + $self->perfdata_add( + label => $item->{MeasureName}, unit => $map_newtest_units{$item->{MeasureUnit}}, + value => $item->{ExecutionValue} + ); + } + return 0; +} + +sub get_newtest_scenarios { + my ($self, %options) = @_; + + eval { + $self->{instance}->proxy($self->{endpoint}, timeout => $self->{nmc_timeout}); + }; + if ($@) { + $self->{logger}->writeLogError('[newtest] newtest proxy error: ' . $@); + return -1; + } + + if (defined($self->{nmc_username}) && $self->{nmc_username} ne '' && + defined($self->{nmc_password}) && $self->{nmc_password} ne '') { + $self->{instance}->transport->http_request->header( + 'Authorization' => 'Basic ' . MIME::Base64::encode($self->{nmc_username} . ':' . $self->{nmc_password}, '') + ); + } + my $result = $self->{instance}->ListScenarioStatus( + $self->{list_scenario_status}->{search}, + 0, + $self->{list_scenario_status}->{instances} + ); + if (defined(my $com_error = gorgone::modules::plugins::newtest::libs::stubs::errors::get_error())) { + $self->{logger}->writeLogError("[newtest] newtest API error 'ListScenarioStatus' method: " . $com_error); + return -1; + } + + if (defined($result->{InstanceScenarioItem})) { + if (ref($result->{InstanceScenarioItem}) eq 'HASH') { + $result->{InstanceScenarioItem} = [$result->{InstanceScenarioItem}]; + } + + foreach my $scenario (@{$result->{InstanceScenarioItem}}) { + my $scenario_name = $scenario->{MeasureName}; + my $robot_name = $scenario->{RobotName}; + my $last_check = sprintf("%d", Date::Parse::str2time($scenario->{LastMessageUtc}, 'UTC')); + my $host_name = sprintf($self->{host_prefix}, $robot_name); + my $service_name = sprintf($self->{service_prefix}, $scenario_name); + $self->{current_status} = $map_scenario_status{$scenario->{Status}}; + $self->{current_text} = ''; + + $host_name =~ s/[\Q$self->{illegal_characters}\E]//g; + $service_name =~ s/[\Q$self->{illegal_characters}\E]//g; + + # Add host config + if (!defined($self->{db_newtest}->{$host_name})) { + $self->{logger}->writeLogInfo("[newtest] create host '$host_name'"); + if ($self->clapi_execute(cmd => '-o HOST -a ADD -v "' . $host_name . ';' . $host_name . ';127.0.0.1;' . $self->{host_template} . ';' . $self->{poller_name} . ';"', + timeout => $self->{clapi_timeout}) == 0) { + $self->{db_newtest}->{$host_name} = {}; + $self->{must_push_config} = 1; + $self->{logger}->writeLogInfo("[newtest] create host '$host_name' succeeded."); + } + } + + # Add service config + if (defined($self->{db_newtest}->{$host_name}) && !defined($self->{db_newtest}->{$host_name}->{$service_name})) { + $self->{logger}->writeLogInfo("[newtest] create service '$service_name' for host '$host_name':"); + if ($self->clapi_execute(cmd => '-o SERVICE -a ADD -v "' . $host_name . ';' . $service_name . ';' . $self->{service_template} . '"', + timeout => $self->{clapi_timeout}) == 0) { + $self->{db_newtest}->{$host_name}->{$service_name} = {}; + $self->{must_push_config} = 1; + $self->{logger}->writeLogInfo("[newtest] create service '$service_name' for host '$host_name' succeeded."); + $self->clapi_execute(cmd => '-o SERVICE -a setmacro -v "' . $host_name . ';' . $service_name . ';NEWTEST_MESSAGEID;"', + timeout => $self->{clapi_timeout}); + } + } + + # Check if new message + if (defined($self->{db_newtest}->{$host_name}->{$service_name}->{last_execution_time}) && + $last_check <= $self->{db_newtest}->{$host_name}->{$service_name}->{last_execution_time}) { + $self->{logger}->writeLogInfo("[newtest] skip: service '$service_name' for host '$host_name' already submitted."); + next; + } + + if ($scenario->{Status} eq 'OutOfRange') { + $self->{current_text} = sprintf("scenario '%s' not scheduled", $scenario_name); + } else { + if ($self->{current_status} == 2) { + $self->get_newtest_diagnostic( + scenario => $scenario_name, robot => $robot_name, + host_name => $host_name, service_name => $service_name + ); + } + + if ($self->get_scenario_results(scenario => $scenario_name, robot => $robot_name, + host_name => $host_name, service_name => $service_name) == 1) { + $self->{current_text} = sprintf("No result avaiblable for scenario '%s'", $scenario_name); + $self->{current_status} = 3; + } + } + $self->add_output(time => $last_check, host_name => $host_name, service_name => $service_name); + } + } + + return 0; +} + +sub action_newtestresync { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{logger}->writeLogDebug("gorgone-newtest: container $self->{container_id}: begin resync"); + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action newtestresync proceed' }); + $self->newtestresync_init(); + + if ($self->get_poller_id()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get poller id' }); + return -1; + } + if ($self->get_centreondb_cache()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get centreon config cache' }); + return -1; + } + if ($self->get_centstoragedb_cache()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get centreon storage cache' }); + return -1; + } + + if ($self->get_newtest_scenarios(%options)) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get newtest scenarios' }); + return -1; + } + + $self->push_config(); + $self->submit_external_cmd(); + + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $options{token}, data => { message => 'action newtestresync finished' }); + return 0; +} + +sub event { + while (1) { + my ($message) = $connector->read_message(); + last if (!defined($message)); + + $connector->{logger}->writeLogDebug("gorgone-newtest: class: $message"); + if ($message =~ /^\[(.*?)\]/) { + if ((my $method = $connector->can('action_' . lc($1)))) { + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + my ($action, $token) = ($1, $2); + my $data = JSON::XS->new->decode($3); + $method->($connector, token => $token, data => $data); + } + } + } +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[newtest] $$ has quit"); + exit(0); + } + + if (time() - $connector->{resync_time} > $connector->{last_resync_time}) { + $connector->{last_resync_time} = time(); + $connector->action_newtestresync(); + } +} + +sub run { + my ($self, %options) = @_; + + # Database creation. We stay in the loop still there is an error + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + ##### Load objects ##### + $self->{class_object_centstorage} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centstorage}); + $self->{class_object_centreon} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centreon}); + $SOAP::Constants::PREFIX_ENV = 'SOAP-ENV'; + $self->{instance} = gorgone::modules::plugins::newtest::libs::stubs::ManagementConsoleService->new(); + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-newtest-' . $self->{container_id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'NEWTESTREADY', + data => { container_id => $self->{container_id} } + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/plugins/newtest/hooks.pm b/gorgone/gorgone/modules/plugins/newtest/hooks.pm new file mode 100644 index 00000000000..3265f864488 --- /dev/null +++ b/gorgone/gorgone/modules/plugins/newtest/hooks.pm @@ -0,0 +1,289 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::plugins::newtest::hooks; + +use warnings; +use strict; +use JSON::XS; +use gorgone::class::core; +use gorgone::modules::plugins::newtest::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'plugins'; +use constant NAME => 'newtest'; +use constant EVENTS => [ + { event => 'NEWTESTREADY' }, + { event => 'NEWTESTRESYNC', uri => '/resync', method => 'GET' }, +]; + +my ($config_core, $config); +my ($config_db_centreon, $config_db_centstorage); +my $last_containers = {}; # Last values from config ini +my $containers = {}; +my $containers_pid = {}; +my $stop = 0; +my $timer_check = time(); +my $config_check_containers_time; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centstorage = $options{config_db_centstorage}; + $config_db_centreon = $options{config_db_centreon}; + $config_check_containers_time = defined($config->{check_containers_time}) ? $config->{check_containers_time} : 3600; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + $last_containers = get_containers(logger => $options{logger}); + foreach my $container_id (keys %$last_containers) { + create_child(container_id => $container_id, logger => $options{logger}); + } +} + +sub routing { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + $options{logger}->writeLogError("[newtest] Cannot decode json data: $@"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-newtest: cannot decode json' }, + json_encode => 1 + }); + return undef; + } + + if ($options{action} eq 'NEWTESTREADY') { + $containers->{ $data->{container_id} }->{ready} = 1; + return undef; + } + + if (!defined($data->{container_id}) || !defined($last_containers->{ $data->{container_id} })) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-newtest: need a valid container id' }, + json_encode => 1 + }); + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$containers->{ $data->{container_id} }->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-newtest: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-newtest-' . $data->{container_id}, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + foreach my $container_id (keys %$containers) { + if (defined($containers->{$container_id}->{running}) && $containers->{$container_id}->{running} == 1) { + $options{logger}->writeLogDebug("[newtest] Send TERM signal for container '" . $container_id . "'"); + CORE::kill('TERM', $containers->{$container_id}->{pid}); + } + } +} + +sub kill_internal { + my (%options) = @_; + + foreach (keys %$containers) { + if ($containers->{$_}->{running} == 1) { + $options{logger}->writeLogDebug("[newtest] Send KILL signal for container '" . $_ . "'"); + CORE::kill('KILL', $containers->{$_}->{pid}); + } + } +} + +sub kill { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + if ($timer_check - time() > $config_check_containers_time) { + sync_container_childs(logger => $options{logger}); + $timer_check = time(); + } + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($containers_pid->{$pid})); + + # If someone dead, we recreate + delete $containers->{$containers_pid->{$pid}}; + delete $containers_pid->{$pid}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + # Need to check if we need to recreate (can be a container destruction)!!! + sync_container_childs(logger => $options{logger}); + } + } + + return $count; +} + +sub broadcast { + my (%options) = @_; + + foreach my $container_id (keys %$containers) { + next if ($containers->{$container_id}->{ready} != 1); + + $options{gorgone}->send_internal_message( + identity => 'gorgone-newtest-' . $container_id, + action => $options{action}, + frame => $options{frame}, + token => $options{token} + ); + } +} + +# Specific functions +sub get_containers { + my (%options) = @_; + + return $containers if (!defined($config->{containers})); + foreach (@{$config->{containers}}) { + next if (!defined($_->{name}) || $_->{name} eq ''); + + if (!defined($_->{nmc_endpoint}) || $_->{nmc_endpoint} eq '') { + $options{logger}->writeLogError("[newtest] cannot load container '" . $_->{name} . "' - please set nmc_endpoint option"); + next; + } + if (!defined($_->{poller_name}) || $_->{poller_name} eq '') { + $options{logger}->writeLogError("[newtest] cannot load container '" . $_->{name} . "' - please set poller_name option"); + next; + } + if (!defined($_->{list_scenario_status}) || $_->{list_scenario_status} eq '') { + $options{logger}->writeLogError("[newtest] cannot load container '" . $_->{name} . "' - please set list_scenario_status option"); + next; + } + + my $list_scenario; + eval { + $list_scenario = JSON::XS->new->decode($_->{list_scenario_status}); + }; + if ($@) { + $options{logger}->writeLogError("[newtest] cannot load container '" . $_->{name} . "' - cannot decode list scenario option"); + next; + } + + $containers->{$_->{name}} = { + nmc_endpoint => $_->{nmc_endpoint}, + nmc_timeout => (defined($_->{nmc_timeout}) && $_->{nmc_timeout} =~ /(\d+)/) ? + $1 : 10, + nmc_username => $_->{nmc_username}, + nmc_password => $_->{nmc_password}, + poller_name => $_->{poller_name}, + list_scenario_status => $list_scenario, + resync_time => + (defined($_->{resync_time}) && $_->{resync_time} =~ /(\d+)/) ? $1 : 300, + host_template => + defined($_->{host_template}) && $_->{host_template} ne '' ? $_->{host_template} : 'generic-active-host-custom', + host_prefix => + defined($_->{host_prefix}) && $_->{host_prefix} ne '' ? $_->{host_prefix} : 'Robot-%s', + service_template => + defined($_->{service_template}) && $_->{service_template} ne '' ? $_->{service_template} : 'generic-passive-service-custom', + service_prefix => + defined($_->{service_prefix}) && $_->{service_prefix} ne '' ? $_->{service_prefix} : 'Scenario-%s', + }; + } + + return $containers; +} + +sub sync_container_childs { + my (%options) = @_; + + $last_containers = get_containers(logger => $options{logger}); + foreach my $container_id (keys %$last_containers) { + if (!defined($containers->{$container_id})) { + create_child(container_id => $container_id, logger => $options{logger}); + } + } + + # Check if need to delete on containers + foreach my $container_id (keys %$containers) { + next if (defined($last_containers->{$container_id})); + + if ($containers->{$container_id}->{running} == 1) { + $options{logger}->writeLogDebug("[newtest] Send KILL signal for container '" . $container_id . "'"); + CORE::kill('KILL', $containers->{$container_id}->{pid}); + } + + delete $containers_pid->{ $containers->{$container_id}->{pid} }; + delete $containers->{$container_id}; + } +} + +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[newtest] Create 'gorgone-newtest' process for container '" . $options{container_id} . "'"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-newtest ' . $options{container_id}; + my $module = gorgone::modules::plugins::newtest::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage, + config_newtest => $last_containers->{$options{container_id}}, + container_id => $options{container_id} + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[newtest] PID $child_pid (gorgone-newtest) for container '" . $options{container_id} . "'"); + $containers->{$options{container_id}} = { pid => $child_pid, ready => 0, running => 1 }; + $containers_pid->{$child_pid} = $options{container_id}; +} + +1; diff --git a/gorgone/gorgone/modules/plugins/newtest/libs/stubs/ManagementConsoleService.pm b/gorgone/gorgone/modules/plugins/newtest/libs/stubs/ManagementConsoleService.pm new file mode 100644 index 00000000000..10688740d5e --- /dev/null +++ b/gorgone/gorgone/modules/plugins/newtest/libs/stubs/ManagementConsoleService.pm @@ -0,0 +1,392 @@ +package gorgone::modules::plugins::newtest::libs::stubs::ManagementConsoleService; + +sub SOAP::Serializer::as_SearchMode { + my $self = shift; + my($value, $name, $type, $attr) = @_; + return [$name, {'xsi:type' => 'tns:SearchMode', %$attr}, $value]; +} + +sub SOAP::Serializer::as_MessageCategory { + my $self = shift; + my($value, $name, $type, $attr) = @_; + return [$name, {'xsi:type' => 'tns:MessageCategory', %$attr}, $value]; +} + +sub SOAP::Serializer::as_ArrayOfString { + my $self = shift; + my($value, $name, $type, $attr) = @_; + + my $args = []; + foreach (@$value) { + push @$args, SOAP::Data->new(name => 'string', type => 's:string', attr => {}, prefix => 'tns', value => $_); + } + return [$name, {'xsi:type' => 'tns:ArrayOfString', %$attr}, $args]; +} + +# Generated by SOAP::Lite (v0.712) for Perl -- soaplite.com +# Copyright (C) 2000-2006 Paul Kulchenko, Byrne Reese +# -- generated at [Tue Oct 7 11:04:21 2014] +# -- generated from http://192.168.6.84/nws/managementconsoleservice.asmx?wsdl +my %methods = ( +ListInformationRangesFromDWH => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListInformationRangesFromDWH', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListInformationRangesFromDWH +ListComponentStatus => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListComponentStatus', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListComponentStatus +IsOptionAllowed => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/IsOptionAllowed', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'optionId', type => 's:int', attr => {}), + ], # end parameters + }, # end IsOptionAllowed +SendCommand => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/SendCommand', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'commandType', type => 'tns:CommandType', attr => {}), + SOAP::Data->new(name => 'agentName', type => 's:string', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end SendCommand +ListInformationRanges => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListInformationRanges', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListInformationRanges +ListResources => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListResources', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListResources +GetLocationProperties => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetLocationProperties', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'locationPath', type => 's:string', attr => {}), + ], # end parameters + }, # end GetLocationProperties +ListLocationChildren => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListLocationChildren', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'locationPath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + ], # end parameters + }, # end ListLocationChildren +ListBusinessChildren => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListBusinessChildren', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'businessPath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + ], # end parameters + }, # end ListBusinessChildren +GetMeasureProperties => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetMeasureProperties', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'measurePath', type => 's:string', attr => {}), + ], # end parameters + }, # end GetMeasureProperties +GetBusinessProperties => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetBusinessProperties', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'businessPath', type => 's:string', attr => {}), + ], # end parameters + }, # end GetBusinessProperties +ListResults => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListResults', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'range', type => 's:int', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}, prefix => 'tns'), + ], # end parameters + }, # end ListResults +ListRobotStatus => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListRobotStatus', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListRobotStatus +ListAllResults => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListAllResults', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'start', type => 's:dateTime', attr => {}), + SOAP::Data->new(name => 'end', type => 's:dateTime', attr => {}), + SOAP::Data->new(name => 'types', type => 'tns:MeasureType', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListAllResults +ListScenariosStatus => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListScenariosStatus', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'businessPath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'locationPath', type => 's:string', attr => {}), + ], # end parameters + }, # end ListScenariosStatus +ListAlarms => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListAlarms', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'range', type => 's:int', attr => {}), + SOAP::Data->new(name => 'types', type => 'tns:AlarmType', attr => {}), + SOAP::Data->new(name => 'levels', type => 'tns:AlarmLevel', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListAlarms +ListScenarios => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListScenarios', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'businessPath', type => 's:string', attr => {}), + ], # end parameters + }, # end ListScenarios +ListResultChildren => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListResultChildren', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'resultId', type => 's:long', attr => {}, prefix => 'tns'), + ], # end parameters + }, # end ListResultChildren +GetUserItem => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetUserItem', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'login', type => 's:string', attr => {}), + ], # end parameters + }, # end GetUserItem +ListCollectorStatus => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListCollectorStatus', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListCollectorStatus +GetDiagnostic => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetDiagnostic', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'messageId', type => 's:long', attr => {}), + ], # end parameters + }, # end GetDiagnostic +LogIn => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/LogIn', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'login', type => 's:string', attr => {}), + SOAP::Data->new(name => 'password', type => 's:string', attr => {}), + ], # end parameters + }, # end LogIn +ListCustomGroupChildren => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListCustomGroupChildren', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'customGroupPath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + ], # end parameters + }, # end ListCustomGroupChildren +GetCustomGroupProperties => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetCustomGroupProperties', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'customGroupPath', type => 's:string', attr => {}), + ], # end parameters + }, # end GetCustomGroupProperties +ListMessages => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListMessages', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'range', type => 's:int', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'categories', type => 'tns:MessageCategory', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}, prefix => 'tns'), + ], # end parameters + }, # end ListMessages +ListScenarioStatus => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListScenarioStatus', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'range', type => 's:int', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}, prefix => 'tns'), + ], # end parameters + }, # end ListScenarioStatus +ListMeasureResults => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListMeasureResults', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'measureId', type => 's:string', attr => {}), + SOAP::Data->new(name => 'locationPath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'range', type => 's:int', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + SOAP::Data->new(name => 'types', type => 'tns:MeasureType', attr => {}), + ], # end parameters + }, # end ListMeasureResults +GetUserProperties => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetUserProperties', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'login', type => 's:string', attr => {}), + ], # end parameters + }, # end GetUserProperties +ListMeasureChildren => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListMeasureChildren', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'measureId', type => 's:string', attr => {}), + SOAP::Data->new(name => 'types', type => 'tns:MeasureType', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + SOAP::Data->new(name => 'measurePath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + ], # end parameters + }, # end ListMeasureChildren +GetLicenceOptionValue => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetLicenceOptionValue', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'optionId', type => 's:int', attr => {}), + ], # end parameters + }, # end GetLicenceOptionValue +); # end my %methods + +use SOAP::Lite; +use gorgone::modules::plugins::newtest::libs::stubs::errors; +use Exporter; +use Carp (); + +use vars qw(@ISA $AUTOLOAD @EXPORT_OK %EXPORT_TAGS); +@ISA = qw(Exporter SOAP::Lite); +@EXPORT_OK = (keys %methods); +%EXPORT_TAGS = ('all' => [@EXPORT_OK]); + +sub _call { + my ($self, $method) = (shift, shift); + my $name = UNIVERSAL::isa($method => 'SOAP::Data') ? $method->name : $method; + my %method = %{$methods{$name}}; + $self->on_fault(\&gorgone::modules::plugins::newtest::libs::stubs::errors::soapGetBad); + $self->proxy($method{endpoint} || Carp::croak "No server address (proxy) specified") + unless $self->proxy; + my @templates = @{$method{parameters}}; + my @parameters = (); + foreach my $param (@_) { + if (@templates) { + my $template = shift @templates; + my ($prefix,$typename) = SOAP::Utils::splitqname($template->type); + my $method = 'as_'.$typename; + # TODO - if can('as_'.$typename) {...} + my $result = $self->serializer->$method($param, $template->name, $template->type, $template->attr); + #print Data::Dumper::Dumper($result); + push(@parameters, $template->value($result->[2])); + } + else { + push(@parameters, $param); + } + } + $self->endpoint($method{endpoint}) + ->ns($method{namespace}) + ->on_action(sub{qq!"$method{soapaction}"!}); + $self->serializer->register_ns("http://microsoft.com/wsdl/mime/textMatching/","tm"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/wsdl/soap12/","soap12"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/wsdl/mime/","mime"); + $self->serializer->register_ns("http://www.w3.org/2001/XMLSchema","s"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/wsdl/soap/","soap"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/wsdl/","wsdl"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/soap/encoding/","soapenc"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/wsdl/http/","http"); + $self->serializer->register_ns("http://www.auditec-newtest.com","tns"); + my $som = $self->SUPER::call($method => @parameters); + if ($self->want_som) { + return $som; + } + UNIVERSAL::isa($som => 'SOAP::SOM') ? wantarray ? $som->paramsall : $som->result : $som; +} + +sub BEGIN { + no strict 'refs'; + for my $method (qw(want_som)) { + my $field = '_' . $method; + *$method = sub { + my $self = shift->new; + @_ ? ($self->{$field} = shift, return $self) : return $self->{$field}; + } + } +} +no strict 'refs'; +for my $method (@EXPORT_OK) { + my %method = %{$methods{$method}}; + *$method = sub { + my $self = UNIVERSAL::isa($_[0] => __PACKAGE__) + ? ref $_[0] + ? shift # OBJECT + # CLASS, either get self or create new and assign to self + : (shift->self || __PACKAGE__->self(__PACKAGE__->new)) + # function call, either get self or create new and assign to self + : (__PACKAGE__->self || __PACKAGE__->self(__PACKAGE__->new)); + $self->_call($method, @_); + } +} + +sub AUTOLOAD { + my $method = substr($AUTOLOAD, rindex($AUTOLOAD, '::') + 2); + return if $method eq 'DESTROY' || $method eq 'want_som'; + die "Unrecognized method '$method'. List of available method(s): @EXPORT_OK\n"; +} + +1; diff --git a/gorgone/gorgone/modules/plugins/newtest/libs/stubs/errors.pm b/gorgone/gorgone/modules/plugins/newtest/libs/stubs/errors.pm new file mode 100644 index 00000000000..ba6b951f6d7 --- /dev/null +++ b/gorgone/gorgone/modules/plugins/newtest/libs/stubs/errors.pm @@ -0,0 +1,31 @@ + +package gorgone::modules::plugins::newtest::libs::stubs::errors; + +use strict; +use warnings; + +our $SOAP_ERRORS; + +sub soapGetBad { + my $soap = shift; + my $res = shift; + + if(ref($res)) { + chomp( my $err = $res->faultstring ); + $SOAP_ERRORS = "SOAP FAULT: $err"; + } else { + chomp( my $err = $soap->transport->status ); + $SOAP_ERRORS = "TRANSPORT ERROR: $err"; + } + return new SOAP::SOM; +} + +sub get_error { + my $error = $SOAP_ERRORS; + + $SOAP_ERRORS = undef; + return $error; +} + +1; + diff --git a/gorgone/gorgone/modules/plugins/newtest/libs/wsdl/newtest.wsdl b/gorgone/gorgone/modules/plugins/newtest/libs/wsdl/newtest.wsdl new file mode 100644 index 00000000000..f5cb180daec --- /dev/null +++ b/gorgone/gorgone/modules/plugins/newtest/libs/wsdl/newtest.wsdl @@ -0,0 +1,2097 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Return user item (Obsolete). + + + + + Return a list of scenarios instances statuses (Obsolete). + + + + + Return a list of results for a specified measure/agent item. Ordering of results is as follows: results, subresults, measure rank within subresults (Obsolete). + + + + + Return a list of children for a specific Measure node. Result set is ordered first into a hierarchical structure (measure, sub-measure) with a post-ordering on the measures' rank and then display name (Obsolete). + + + + + Check Licence Options + + + + + Get Option Value + + + + + Method used to log user in + + + + + Return user item + + + + + Gets a list of children of a specified Business node. + + + + + Gets specified Business node properties. + + + + + Return a list of children of a specified Location node. + + + + + Return specified Location node properties. + + + + + Return a list of children of a specified CustomGroup node. + + + + + Return CustomGroup node properties. + + + + + Return a list of robots statuses. + + + + + Return a list of Collectors statuses. + + + + + Return a list of Components statuses. + + + + + Gets a list of children of a specified Business node. + + + + + Return a list of children for a specified Measure node. Result set is ordered first into a hierarchical structure (measure, sub-measure) with a post-ordering on the measures' rank and then display name. + + + + + Return measure properties. + + + + + Return a list of scenarios instances statuses. + + + + + Return a list of results for a specific measure item. Ordering of results is as follows: results, subresults, measure rank within subresults. + + + + + Return a list of results for a specific measure item. Ordering of results is as follows: results, subresults, measure rank within subresults. + + + + + Return a list of sub results for a specific result item + + + + + Returns a list of alarms for specified parameter + + + + + Returns a list of messages for specified item + + + + + Returns a list of messages for specified item + + + + + Returns a list of information ranges from Newtest DWH for specified item + + + + + Returns a list of resources for specified item + + + + + Return diagnostic content for given message Id + + + + + Sends a command to a couple measure/agent + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gorgone/gorgone/modules/plugins/scom/class.pm b/gorgone/gorgone/modules/plugins/scom/class.pm new file mode 100644 index 00000000000..96fd1af1398 --- /dev/null +++ b/gorgone/gorgone/modules/plugins/scom/class.pm @@ -0,0 +1,518 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::plugins::scom::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use gorgone::class::http::http; +use MIME::Base64; +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{config_scom} = $options{config_scom}; + + $connector->{api_version} = $options{config_scom}->{api_version}; + $connector->{dsmhost} = $options{config_scom}->{dsmhost}; + $connector->{dsmslot} = $options{config_scom}->{dsmslot}; + $connector->{dsmmacro} = $options{config_scom}->{dsmmacro}; + $connector->{dsmalertmessage} = $options{config_scom}->{dsmalertmessage}; + $connector->{dsmrecoverymessage} = $options{config_scom}->{dsmrecoverymessage}; + $connector->{resync_time} = $options{config_scom}->{resync_time}; + $connector->{last_resync_time} = time() - $connector->{resync_time}; + $connector->{centcore_cmd} = + defined($connector->{config}->{centcore_cmd}) && $connector->{config}->{centcore_cmd} ne '' ? $connector->{config}->{centcore_cmd} : '/var/lib/centreon/centcore.cmd'; + + $connector->{scom_session_id} = undef; + + $connector->{dsmclient_bin} = + defined($connector->{config}->{dsmclient_bin}) ? $connector->{config}->{dsmclient_bin} : '/usr/share/centreon/bin/dsmclient.pl'; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[scom] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub http_check_error { + my ($self, %options) = @_; + + if ($options{status} == 1) { + $self->{logger}->writeLogError("[scom] Container $self->{container_id}: scom $options{method} issue"); + return 1; + } + + my $code = $self->{http}->get_code(); + if ($code !~ /^2/) { + $self->{logger}->writeLogError("[scom] Container $self->{container_id}: scom $options{method} issue - " . $self->{http}->get_message()); + return 1; + } + + return 0; +} + +sub get_httpauth { + my ($self, %options) = @_; + + my $httpauth = {}; + if ($self->{config_scom}->{httpauth} eq 'basic') { + $httpauth->{basic} = 1; + } elsif ($self->{config_scom}->{httpauth} eq 'ntlmv2') { + $httpauth->{ntlmv2} = 1; + } + return $httpauth; +} + +sub get_method { + my ($self, %options) = @_; + + my $api = 2016; + $api = 1801 if ($self->{api_version} == 1801); + return $self->can($options{method} . '_' . $api); +} + +sub submit_external_cmd { + my ($self, %options) = @_; + + my ($lerror, $stdout, $exit_code) = gorgone::standard::misc::backtick( + command => '/bin/echo "' . $options{cmd} . '" >> ' . $self->{centcore_cmd}, + logger => $self->{logger}, + timeout => 5, + wait_exit => 1 + ); + if ($lerror == -1 || ($exit_code >> 8) != 0) { + $self->{logger}->writeLogError("[scom] Command execution problem for command $options{cmd} : " . $stdout); + return -1; + } + + return 0; +} + +sub scom_authenticate_1801 { + my ($self, %options) = @_; + + my ($status) = $self->{http}->request( + method => 'POST', hostname => '', + full_url => $self->{config_scom}->{url} . '/OperationsManager/authenticate', + credentials => 1, username => $self->{config_scom}->{username}, password => $self->{config_scom}->{password}, ntlmv2 => 1, + query_form_post => '"' . MIME::Base64::encode_base64('Windows') . '"', + header => [ + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0'], + ); + + return 1 if ($self->http_check_error(status => $status, method => 'authenticate') == 1); + + my $header = $self->{http}->get_header(name => 'Set-Cookie'); + if (defined($header) && $header =~ /SCOMSessionId=([^;]+);/i) { + $connector->{scom_session_id} = $1; + } else { + $self->{logger}->writeLogError("[scom] Container $self->{container_id}: scom authenticate issue - error retrieving cookie"); + return 1; + } + + return 0; +} + +sub acknowledge_alert_2016 { + my ($self, %options) = @_; + + my $arguments = { + 'resolutionState' => $options{resolutionstate}, + }; + my ($status, $encoded_argument) = $self->json_encode(argument => $arguments); + return 1 if ($status == 1); + + my $curl_opts = []; + if (defined($self->{config_scom}->{curlopts})) { + foreach (keys %{$self->{config_scom}->{curlopts}}) { + push @{$curl_opts}, $_ . ' => ' . $self->{config_scom}->{curlopts}->{$_}; + } + } + my $httpauth = $self->get_httpauth(); + + ($status, my $response) = $self->{http}->request( + method => 'PUT', hostname => '', + full_url => $self->{config_scom}->{url} . 'alerts/' . $options{alert_id}, + query_form_post => $encoded_argument, + credentials => 1, + %$httpauth, + username => $self->{config_scom}->{username}, + password => $self->{config_scom}->{password}, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => $curl_opts, + ); + + return 1 if ($self->http_check_error(status => $status, method => 'data/alert') == 1); + + return 0; +} + +sub acknowledge_alert_1801 { + my ($self, %options) = @_; + +} + +sub get_realtime_scom_alerts_1801 { + my ($self, %options) = @_; + + $self->{scom_realtime_alerts} = {}; + if (!defined($connector->{scom_session_id})) { + return 1 if ($self->scom_authenticate_1801() == 1); + } + + my $arguments = { + 'classId' => '', + 'criteria' => "((ResolutionState <> '255') OR (ResolutionState <> '254'))", + 'displayColumns' => [ + 'id', 'severity', 'resolutionState', 'monitoringobjectdisplayname', 'name', 'age', 'repeatcount', 'lastModified', + ] + }; + my ($status, $encoded_argument) = $self->json_encode(argument => $arguments); + return 1 if ($status == 1); + + my $curl_opts = []; + if (defined($self->{config_scom}->{curlopts})) { + foreach (keys %{$self->{config_scom}->{curlopts}}) { + push @{$curl_opts}, $_ . ' => ' . $self->{config_scom}->{curlopts}->{$_}; + } + } + ($status, my $response) = $self->{http}->request( + method => 'POST', hostname => '', + full_url => $self->{config_scom}->{url} . '/OperationsManager/data/alert', + query_form_post => $encoded_argument, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + 'Cookie: SCOMSessionId=' . $self->{scom_session_id} . ';', + ], + curl_opt => $curl_opts, + ); + + return 1 if ($self->http_check_error(status => $status, method => 'data/alert') == 1); + + return 0; +} + +sub get_realtime_scom_alerts_2016 { + my ($self, %options) = @_; + + my $curl_opts = []; + if (defined($self->{config_scom}->{curlopts})) { + foreach (keys %{$self->{config_scom}->{curlopts}}) { + push @{$curl_opts}, $_ . ' => ' . $self->{config_scom}->{curlopts}->{$_}; + } + } + my $httpauth = $self->get_httpauth(); + + + $self->{scom_realtime_alerts} = {}; + my ($status, $response) = $self->{http}->request( + method => 'GET', hostname => '', + full_url => $self->{config_scom}->{url} . 'alerts', + credentials => 1, + %$httpauth, + username => $self->{config_scom}->{username}, + password => $self->{config_scom}->{password}, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => $curl_opts, + ); + + return 1 if ($self->http_check_error(status => $status, method => 'alerts') == 1); + + ($status, my $entries) = $self->json_decode(argument => $response); + return 1 if ($status == 1); + + # Resolution State: + # 0 => New + # 255 => Closed + # 254 => Resolved + # 250 => Scheduled + # 247 => Awaiting Evidence + # 248 => Assigned to Engineering + # 249 => Acknowledge + # Severity: + # 0 => Information + # 1 => Warning + # 2 => Critical + foreach (@$entries) { + next if (!defined($_->{alertGenerated}->{resolutionState})); + next if ($_->{alertGenerated}->{resolutionState} == 255); + next if ($_->{alertGenerated}->{severity} == 0); + + $self->{scom_realtime_alerts}->{$_->{alertGenerated}->{id}} = { + monitoringobjectdisplayname => $_->{alertGenerated}->{monitoringObjectDisplayName}, + resolutionstate => $_->{alertGenerated}->{resolutionState}, + name => $_->{alertGenerated}->{name}, + severity => $_->{alertGenerated}->{severity}, + timeraised => $_->{alertGenerated}->{timeRaised}, + description => $_->{alertGenerated}->{description}, + }; + } + + return 0; +} + +sub get_realtime_slots { + my ($self, %options) = @_; + + $self->{realtime_slots} = {}; + my $request = " + SELECT hosts.instance_id, hosts.host_id, hosts.name, services.description, services.state, cv.name, cv.value, services.acknowledged, hosts.instance_id + FROM hosts, services + LEFT JOIN customvariables cv ON services.host_id = cv.host_id AND services.service_id = cv.service_id AND cv.name = '$self->{dsmmacro}' + WHERE hosts.name = '$self->{dsmhost}' AND hosts.host_id = services.host_id AND services.enabled = '1' AND services.description LIKE '$self->{dsmslot}'; + "; + my ($status, $datas) = $self->{class_object}->custom_execute(request => $request, mode => 2); + return 1 if ($status == -1); + foreach (@$datas) { + my ($name, $id) = split('##', $$_[6]); + next if (!defined($id)); + $self->{realtime_slots}->{$id} = { + host_name => $$_[2], + host_id => $$_[1], + description => $$_[3], + state => $$_[4], + instance_id => $$_[0], + acknowledged => $$_[7], + instance_id => $$_[8], + }; + } + + return 0; +} + +sub sync_alerts { + my ($self, %options) = @_; + + my $func = $self->get_method(method => 'acknowledge_alert'); + # First we look closed alerts in centreon + foreach my $alert_id (keys %{$self->{realtime_slots}}) { + next if ($self->{realtime_slots}->{$alert_id}->{state} != 0); + next if (!defined($self->{scom_realtime_alerts}->{$alert_id}) || + $self->{scom_realtime_alerts}->{$alert_id}->{resolutionstate} == 254 || + $self->{scom_realtime_alerts}->{$alert_id}->{resolutionstate} == 255 + ); + $func->( + $self, + alert_id => $alert_id, + resolutionstate => 254, + ); + } + + # Look if scom alers is in centreon-dsm services + my $pool_prefix = $self->{dsmslot}; + $pool_prefix =~ s/%//g; + foreach my $alert_id (keys %{$self->{scom_realtime_alerts}}) { + if (!defined($self->{realtime_slots}->{$alert_id}) || + $self->{realtime_slots}->{$alert_id}->{state} == 0) { + my $output = $self->change_macros( + template => $self->{dsmalertmessage}, + macros => $self->{scom_realtime_alerts}->{$alert_id}, + escape => '[" . time() . "]"', + ); + $self->execute_shell_cmd( + cmd => $self->{config}->{dsmclient_bin} . + ' --Host "' . $connector->{dsmhost} . '"' . + ' --pool-prefix "' . $pool_prefix . '"' . + ' --status ' . $self->{scom_realtime_alerts}->{$alert_id}->{severity} . + ' --id "' . $alert_id . '"' . + ' --output "' . $output . '"' + ); + } + } + + # Close centreon alerts not present in scom + foreach my $alert_id (keys %{$self->{realtime_slots}}) { + next if ($self->{realtime_slots}->{$alert_id}->{state} == 0); + next if (defined($self->{scom_realtime_alerts}->{$alert_id}) && $self->{scom_realtime_alerts}->{$alert_id}->{resolutionstate} != 255); + my $output = $self->change_macros( + template => $self->{dsmrecoverymessage}, + macros => {}, + escape => '"', + ); + $self->execute_shell_cmd( + cmd => $self->{config}->{dsmclient_bin} . + ' --Host "' . $connector->{dsmhost} . '"' . + ' --pool-prefix "' . $pool_prefix . '"' . + ' --status 0 ' . + ' --id "' . $alert_id . '"' . + ' --output "' . $output . '"' + ); + } +} + +sub sync_acks { + my ($self, %options) = @_; + + my $func = $self->get_method(method => 'acknowledge_alert'); + foreach my $alert_id (keys %{$self->{realtime_slots}}) { + next if ($self->{realtime_slots}->{$alert_id}->{state} == 0); + next if ($self->{realtime_slots}->{$alert_id}->{acknowledged} == 0); + next if (!defined($self->{scom_realtime_alerts}->{$alert_id}) || + $self->{scom_realtime_alerts}->{$alert_id}->{resolutionstate} == 249); + $func->( + $self, + alert_id => $alert_id, + resolutionstate => 249, + ); + } + + foreach my $alert_id (keys %{$self->{scom_realtime_alerts}}) { + next if (!defined($self->{realtime_slots}->{$alert_id}) || + $self->{realtime_slots}->{$alert_id}->{state} == 0); + $self->submit_external_cmd( + cmd => sprintf( + 'EXTERNALCMD:%s:[%s] ACKNOWLEDGE_SVC_PROBLEM;%s;%s;%s;%s;%s;%s;%s', + $self->{realtime_slots}->{$alert_id}->{instance_id}, + time(), + $self->{realtime_slots}->{$alert_id}->{host_name}, + $self->{realtime_slots}->{$alert_id}->{description}, + 2, 0, 1, 'scom connector', 'ack from scom' + ) + ); + } +} + +sub action_scomresync { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action scomresync proceed' }); + $self->{logger}->writeLogDebug("[scom] Container $self->{container_id}: begin resync"); + + if ($self->get_realtime_slots()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find realtime slots' }); + $self->{logger}->writeLogError("[scom] Container $self->{container_id}: cannot find realtime slots"); + return 1; + } + + my $func = $self->get_method(method => 'get_realtime_scom_alerts'); + if ($func->($self)) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get scom realtime alerts' }); + $self->{logger}->writeLogError("[scom] Container $self->{container_id}: cannot get scom realtime alerts"); + return 1; + } + + $self->sync_alerts(); + $self->sync_acks(); + + $self->{logger}->writeLogDebug("[scom] Container $self->{container_id}: finish resync"); + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $options{token}, data => { message => 'action scomresync finished' }); + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[scom] $$ has quit"); + exit(0); + } + + if (time() - $self->{resync_time} > $connector->{last_resync_time}) { + $connector->{last_resync_time} = time(); + $connector->action_scomresync(); + } +} + +sub run { + my ($self, %options) = @_; + + # Database creation. We stay in the loop still there is an error + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 2, + logger => $self->{logger} + ); + ##### Load objects ##### + $self->{class_object} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centstorage}); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-scom-' . $self->{container_id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'SCOMREADY', + data => { container_id => $self->{container_id} } + }); + + my $watcher_timer = $self->{loop}->timer(5, 2, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/plugins/scom/hooks.pm b/gorgone/gorgone/modules/plugins/scom/hooks.pm new file mode 100644 index 00000000000..3c2d7414fc0 --- /dev/null +++ b/gorgone/gorgone/modules/plugins/scom/hooks.pm @@ -0,0 +1,275 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::plugins::scom::hooks; + +use warnings; +use strict; +use JSON::XS; +use gorgone::class::core; +use gorgone::modules::plugins::scom::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'plugins'; +use constant NAME => 'scom'; +use constant EVENTS => [ + { event => 'SCOMREADY' }, + { event => 'SCOMRESYNC', uri => '/resync', method => 'GET' }, +]; + +my ($config_core, $config); +my $config_db_centstorage; +my $last_containers = {}; # Last values from config ini +my $containers = {}; +my $containers_pid = {}; +my $stop = 0; +my $timer_check = time(); +my $config_check_containers_time; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centstorage = $options{config_db_centstorage}; + $config_check_containers_time = defined($config->{check_containers_time}) ? $config->{check_containers_time} : 3600; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + $last_containers = get_containers(logger => $options{logger}); + foreach my $container_id (keys %$last_containers) { + create_child(container_id => $container_id, logger => $options{logger}); + } +} + +sub routing { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + $options{logger}->writeLogError("[scom] Cannot decode json data: $@"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-scom: cannot decode json' }, + json_encode => 1 + }); + return undef; + } + + if ($options{action} eq 'SCOMREADY') { + $containers->{ $data->{container_id} }->{ready} = 1; + return undef; + } + + if (!defined($data->{container_id}) || !defined($last_containers->{ $data->{container_id} })) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-scom: need a valid container id' }, + json_encode => 1 + }); + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$containers->{ $data->{container_id} }->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-scom: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-scom-' . $data->{container_id}, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + foreach my $container_id (keys %$containers) { + if (defined($containers->{$container_id}->{running}) && $containers->{$container_id}->{running} == 1) { + $options{logger}->writeLogInfo("[scom] Send TERM signal for container '" . $container_id . "'"); + CORE::kill('TERM', $containers->{$container_id}->{pid}); + } + } +} + +sub kill_internal { + my (%options) = @_; + + foreach (keys %$containers) { + if ($containers->{$_}->{running} == 1) { + $options{logger}->writeLogInfo("[scom] Send KILL signal for container '" . $_ . "'"); + CORE::kill('KILL', $containers->{$_}->{pid}); + } + } +} + +sub kill { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + if ($timer_check - time() > $config_check_containers_time) { + sync_container_childs(logger => $options{logger}); + $timer_check = time(); + } + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($containers_pid->{$pid})); + + # If someone dead, we recreate + delete $containers->{$containers_pid->{$pid}}; + delete $containers_pid->{$pid}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + # Need to check if we need to recreate (can be a container destruction)!!! + sync_container_childs(logger => $options{logger}); + } + } + + return $count; +} + +sub broadcast { + my (%options) = @_; + + foreach my $container_id (keys %$containers) { + next if ($containers->{$container_id}->{ready} != 1); + + $options{gorgone}->send_internal_message( + identity => 'gorgone-scom-' . $container_id, + action => $options{action}, + frame => $options{frame}, + token => $options{token} + ); + } +} + +# Specific functions +sub get_containers { + my (%options) = @_; + + my $containers = {}; + return $containers if (!defined($config->{containers})); + foreach (@{$config->{containers}}) { + next if (!defined($_->{name}) || $_->{name} eq ''); + + if (!defined($_->{url}) || $_->{url} eq '') { + $options{logger}->writeLogError("[scom] Cannot load container '" . $_->{name} . "' - please set url option"); + next; + } + if (!defined($_->{dsmhost}) || $_->{dsmhost} eq '') { + $options{logger}->writeLogError("[scom] Cannot load container '" . $_->{name} . "' - please set dsmhost option"); + next; + } + if (!defined($_->{dsmslot}) || $_->{dsmslot} eq '') { + $options{logger}->writeLogError("[scom] Cannot load container '" . $_->{name} . "' - please set dsmslot option"); + next; + } + + $containers->{$_->{name}} = { + url => $_->{url}, + username => $_->{username}, + password => $_->{password}, + httpauth => defined($_->{httpauth}) && $_->{httpauth} =~ /(basic|ntlmv2)/ ? $_->{httpauth} : 'basic', + resync_time => + (defined($_->{resync_time}) && $_->{resync_time} =~ /(\d+)/) ? $1 : 300, + api_version => (defined($_->{api_version}) && $_->{api_version} =~ /(2012|2016|1801)/) ? $1 : '2016', + dsmhost => $_->{dsmhost}, + dsmslot => $_->{dsmslot}, + dsmmacro => defined($_->{dsmmacro}) ? $_->{dsmmacro} : 'ALARM_ID', + dsmalertmessage => defined($_->{dsmalertmessage}) ? $_->{dsmalertmessage} : '%{monitoringobjectdisplayname} %{name}', + dsmrecoverymessage => defined($_->{dsmrecoverymessage}) ? $_->{dsmrecoverymessage} : 'slot ok', + curlopts => $_->{curlopts}, + }; + } + + return $containers; +} + +sub sync_container_childs { + my (%options) = @_; + + $last_containers = get_containers(logger => $options{logger}); + foreach my $container_id (keys %$last_containers) { + if (!defined($containers->{$container_id})) { + create_child(container_id => $container_id, logger => $options{logger}); + } + } + + # Check if need to delete on containers + foreach my $container_id (keys %$containers) { + next if (defined($last_containers->{$container_id})); + + if ($containers->{$container_id}->{running} == 1) { + $options{logger}->writeLogDebug("[scom] Send KILL signal for container '" . $container_id . "'"); + CORE::kill('KILL', $containers->{$container_id}->{pid}); + } + + delete $containers_pid->{ $containers->{$container_id}->{pid} }; + delete $containers->{$container_id}; + } +} + +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[scom] Create 'gorgone-scom' process for container '" . $options{container_id} . "'"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-scom ' . $options{container_id}; + my $module = gorgone::modules::plugins::scom::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centstorage => $config_db_centstorage, + config_scom => $last_containers->{$options{container_id}}, + container_id => $options{container_id}, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[scom] PID $child_pid (gorgone-scom) for container '" . $options{container_id} . "'"); + $containers->{$options{container_id}} = { pid => $child_pid, ready => 0, running => 1 }; + $containers_pid->{$child_pid} = $options{container_id}; +} + +1; diff --git a/gorgone/gorgone/standard/api.pm b/gorgone/gorgone/standard/api.pm new file mode 100644 index 00000000000..89e7f3a5eb0 --- /dev/null +++ b/gorgone/gorgone/standard/api.pm @@ -0,0 +1,254 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::standard::api; + +use strict; +use warnings; +use gorgone::standard::library; +use Time::HiRes; +use JSON::XS; + +my $module; +my $socket; +my $action_token; + +sub set_module { + $module = $_[0]; +} + +sub root { + my (%options) = @_; + + $options{logger}->writeLogInfo("[api] Requesting '" . $options{uri} . "' [" . $options{method} . "]"); + + $options{module}->{tokens} = {}; + $socket = $options{socket}; + $module = $options{module}; + + my $response; + if ($options{method} eq 'GET' && $options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?log\/(.*)$/) { + $response = get_log( + target => $2, + token => $3, + sync_wait => (defined($options{parameters}->{sync_wait})) ? $options{parameters}->{sync_wait} : undef, + parameters => $options{parameters}, + module => $options{module} + ); + } elsif ($options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?internal\/(\w+)\/?([\w\/]*?)$/ + && defined($options{api_endpoints}->{$options{method} . '_/internal/' . $3})) { + my @variables = split(/\//, $4); + $response = call_internal( + action => $options{api_endpoints}->{$options{method} . '_/internal/' . $3}, + target => $2, + data => { + content => $options{content}, + parameters => $options{parameters}, + variables => \@variables + }, + log_wait => (defined($options{parameters}->{log_wait})) ? $options{parameters}->{log_wait} : undef, + sync_wait => (defined($options{parameters}->{sync_wait})) ? $options{parameters}->{sync_wait} : undef, + module => $options{module} + ); + } elsif ($options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?(\w+)\/(\w+)\/(\w+)\/?([\w\/]*?)$/ + && defined($options{api_endpoints}->{$options{method} . '_/' . $3 . '/' . $4 . '/' . $5})) { + my @variables = split(/\//, $6); + $response = call_action( + action => $options{api_endpoints}->{$options{method} . '_/' . $3 . '/' . $4 . '/' . $5}, + target => $2, + data => { + content => $options{content}, + parameters => $options{parameters}, + variables => \@variables + }, + log_wait => (defined($options{parameters}->{log_wait})) ? $options{parameters}->{log_wait} : undef, + sync_wait => (defined($options{parameters}->{sync_wait})) ? $options{parameters}->{sync_wait} : undef, + module => $options{module} + ); + } else { + $response = '{"error":"method_unknown","message":"Method not implemented","http_response_code":"404"}'; + } + + return $response; +} + +sub stop_ev { + $module->{loop}->break(); +} + +sub call_action { + my (%options) = @_; + + $action_token = gorgone::standard::library::generate_token() if (!defined($options{token})); + + $options{module}->send_internal_action({ + socket => $socket, + action => $options{action}, + target => $options{target}, + token => $action_token, + data => $options{data}, + json_encode => 1 + }); + + my $response = '{"token":"' . $action_token . '"}'; + if (defined($options{log_wait}) && $options{log_wait} ne '') { + Time::HiRes::usleep($options{log_wait}); + $response = get_log( + target => $options{target}, + token => $action_token, + sync_wait => $options{sync_wait}, + parameters => $options{data}->{parameters}, + module => $options{module} + ); + } + + return $response; +} + +sub call_internal { + my (%options) = @_; + + $action_token = gorgone::standard::library::generate_token(); + if (defined($options{target}) && $options{target} ne '') { + return call_action( + target => $options{target}, + action => $options{action}, + token => $action_token, + data => $options{data}, + json_encode => 1, + log_wait => $options{log_wait}, + sync_wait => $options{sync_wait}, + module => $options{module} + ); + } + + $options{module}->send_internal_action({ + socket => $socket, + action => $options{action}, + token => $action_token, + data => $options{data}, + json_encode => 1 + }); + + $options{module}->{break_token} = $action_token; + + my $timeout = 5; + my $ctime = time(); + while (1) { + my $watcher_timer = $options{module}->{loop}->timer(1, 0, \&stop_ev); + $options{module}->{loop}->run(); + last if (time() > ($ctime + $timeout) || defined($options{module}->{tokens}->{$action_token})); + } + + $options{module}->{break_token} = undef; + + my $response = '{"error":"no_result", "message":"No result found for action \'' . $options{action} . '\'"}'; + if (defined($options{module}->{tokens}->{$action_token}->{data})) { + my $content; + eval { + $content = JSON::XS->new->decode($options{module}->{tokens}->{$action_token}->{data}); + }; + if ($@) { + $response = '{"error":"decode_error","message":"Cannot decode response","http_response_code":"400"}'; + } else { + if (defined($content->{data})) { + eval { + $response = JSON::XS->new->encode($content->{data}); + }; + if ($@) { + $response = '{"error":"encode_error","message":"Cannot encode response","http_response_code":"400"}'; + } + } else { + $response = ''; + } + } + } + + return $response; +} + +sub get_log { + my (%options) = @_; + + if (defined($options{target}) && $options{target} ne '') { + $options{module}->send_internal_action({ + socket => $socket, + target => $options{target}, + action => 'GETLOG', + json_encode => 1 + }); + + my $sync_wait = (defined($options{sync_wait}) && $options{sync_wait} ne '') ? $options{sync_wait} : 10000; + Time::HiRes::usleep($sync_wait); + } + + my $token_log = $options{token} . '-log'; + $options{module}->send_internal_action({ + socket => $socket, + action => 'GETLOG', + token => $token_log, + data => { + token => $options{token}, + %{$options{parameters}} + }, + json_encode => 1 + }); + + $options{module}->{break_token} = $token_log; + + my $timeout = 5; + my $ctime = time(); + while (1) { + my $watcher_timer = $options{module}->{loop}->timer(1, 0, \&stop_ev); + $options{module}->{loop}->run(); + last if (time() > ($ctime + $timeout) || defined($options{module}->{tokens}->{$token_log})); + } + + $options{module}->{break_token} = undef; + + # Return http code 200 even if no log found to avoid error in web application, an evol may be done to return 404 and process it in web application + my $response = '{"error":"no_log","message":"No log found for token","data":[],"token":"' . $options{token} . '","http_response_code":"200"}'; + if (defined($options{module}->{tokens}->{$token_log}) && defined($options{module}->{tokens}->{ $token_log }->{data})) { + my $content; + eval { + $content = JSON::XS->new->decode($options{module}->{tokens}->{ $token_log }->{data}); + }; + if ($@) { + $response = '{"error":"decode_error","message":"Cannot decode response"}'; + } elsif (defined($content->{data}->{result}) && scalar(@{$content->{data}->{result}}) > 0) { + eval { + $response = JSON::XS->new->encode( + { + message => "Logs found", + token => $options{token}, + data => $content->{data}->{result} + } + ); + }; + if ($@) { + $response = '{"error":"encode_error","message":"Cannot encode response"}'; + } + } + } + + return $response; +} + +1; diff --git a/gorgone/gorgone/standard/constants.pm b/gorgone/gorgone/standard/constants.pm new file mode 100644 index 00000000000..82a5fd60279 --- /dev/null +++ b/gorgone/gorgone/standard/constants.pm @@ -0,0 +1,59 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::standard::constants; + +use strict; +use warnings; +use base qw(Exporter); + +my %constants; +BEGIN { + %constants = ( + GORGONE_ACTION_BEGIN => 0, + GORGONE_ACTION_FINISH_KO => 1, + GORGONE_ACTION_FINISH_OK => 2, + GORGONE_STARTED => 3, + GORGONE_ACTION_CONTINUE => 4, + + GORGONE_MODULE_ACTION_COMMAND_RESULT => 100, + GORGONE_MODULE_ACTION_PROCESSCOPY_INPROGRESS => 101, + + GORGONE_MODULE_PIPELINE_RUN_ACTION => 200, + GORGONE_MODULE_PIPELINE_FINISH_ACTION => 201, + + GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING => 300, + GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING => 301, + + GORGONE_MODULE_CENTREON_AUTODISCO_SVC_PROGRESS => 400, + + GORGONE_MODULE_CENTREON_AUDIT_PROGRESS => 500, + + GORGONE_MODULE_CENTREON_MBIETL_PROGRESS => 600 + ); +} + +use constant \%constants; +our @EXPORT; +our @EXPORT_OK = keys %constants; + +our %EXPORT_TAGS = (all => [ @EXPORT_OK ]); + +1; diff --git a/gorgone/gorgone/standard/library.pm b/gorgone/gorgone/standard/library.pm new file mode 100644 index 00000000000..8a6426e6b9c --- /dev/null +++ b/gorgone/gorgone/standard/library.pm @@ -0,0 +1,1011 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::standard::library; + +use strict; +use warnings; +use gorgone::standard::constants qw(:all); +use ZMQ::FFI qw(ZMQ_DEALER ZMQ_ROUTER ZMQ_ROUTER_HANDOVER ZMQ_IPV6 ZMQ_TCP_KEEPALIVE + ZMQ_CONNECT_TIMEOUT ZMQ_DONTWAIT ZMQ_SNDMORE ZMQ_IDENTITY ZMQ_FD ZMQ_EVENTS + ZMQ_LINGER ZMQ_SNDHWM ZMQ_RCVHWM ZMQ_RECONNECT_IVL); +use JSON::XS; +use File::Basename; +use Crypt::PK::RSA; +use Crypt::PRNG; +use Crypt::Mode::CBC; +use File::Path; +use File::Basename; +use MIME::Base64; +use Errno; +use Time::HiRes; +use Try::Tiny; +use YAML::XS; +use gorgone::class::frame; +$YAML::XS::Boolean = 'JSON::PP'; +$YAML::XS::LoadBlessed = 1; + +our $listener; +my %zmq_type = ('ZMQ_ROUTER' => ZMQ_ROUTER, 'ZMQ_DEALER' => ZMQ_DEALER); + +sub read_config { + my (%options) = @_; + + my $config; + try { + $config = YAML::XS::LoadFile($options{config_file}); + } catch { + $options{logger}->writeLogError("[core] Parsing config file error:"); + $options{logger}->writeLogError($@); + exit(1); + }; + + return $config; +} + +####################### +# Handshake functions +####################### + +sub generate_keys { + my (%options) = @_; + + my ($privkey, $pubkey); + try { + my $pkrsa = Crypt::PK::RSA->new(); + $pkrsa->generate_key(256, 65537); + $pubkey = $pkrsa->export_key_pem('public_x509'); + $privkey = $pkrsa->export_key_pem('private'); + } catch { + $options{logger}->writeLogError("[core] Cannot generate server keys: $_"); + return 0; + }; + + return (1, $privkey, $pubkey); +} + +sub loadpubkey { + my (%options) = @_; + my $quit = defined($options{noquit}) ? 0 : 1; + my $string_key = ''; + + if (defined($options{pubkey})) { + if (!open FILE, "<" . $options{pubkey}) { + $options{logger}->writeLogError("[core] Cannot read file '$options{pubkey}': $!") if (defined($options{logger})); + exit(1) if ($quit); + return 0; + } + while () { + $string_key .= $_; + } + close FILE; + } else { + $string_key = $options{pubkey_str}; + } + + my $pubkey; + try { + $pubkey = Crypt::PK::RSA->new(\$string_key); + } catch { + $options{logger}->writeLogError("[core] Cannot load pubkey '$options{pubkey}': $_") if (defined($options{logger})); + exit(1) if ($quit); + return 0; + }; + if ($pubkey->is_private()) { + $options{logger}->writeLogError("[core] '$options{pubkey}' is not a public key") if (defined($options{logger})); + exit(1) if ($quit); + return 0; + } + + return (1, $pubkey); +} + +sub loadprivkey { + my (%options) = @_; + my $string_key = ''; + my $quit = defined($options{noquit}) ? 0 : 1; + + if (!open FILE, "<" . $options{privkey}) { + $options{logger}->writeLogError("[core] Cannot read file '$options{privkey}': $!"); + exit(1) if ($quit); + return 0; + } + while () { + $string_key .= $_; + } + close FILE; + + my $privkey; + try { + $privkey = Crypt::PK::RSA->new(\$string_key); + } catch { + $options{logger}->writeLogError("[core] Cannot load privkey '$options{privkey}': $_"); + exit(1) if ($quit); + return 0; + }; + if (!$privkey->is_private()) { + $options{logger}->writeLogError("[core] '$options{privkey}' is not a private key"); + exit(1) if ($quit); + return 0; + } + + return (1, $privkey); +} + +sub zmq_core_pubkey_response { + my (%options) = @_; + + if (defined($options{identity})) { + $options{socket}->send(pack('H*', $options{identity}), ZMQ_DONTWAIT | ZMQ_SNDMORE); + } + my $client_pubkey = $options{pubkey}->export_key_pem('public'); + my $msg = '[PUBKEY] [' . MIME::Base64::encode_base64($client_pubkey, '') . ']'; + + $options{socket}->send($msg, ZMQ_DONTWAIT); + return 0; +} + +sub zmq_get_routing_id { + my (%options) = @_; + + return $options{socket}->get_identity(); +} + +sub zmq_getfd { + my (%options) = @_; + + return $options{socket}->get_fd(); +} + +sub zmq_events { + my (%options) = @_; + + return $options{socket}->get(ZMQ_EVENTS, 'int'); +} + +sub generate_token { + my (%options) = @_; + + my $length = (defined($options{length})) ? $options{length} : 64; + my $token = Crypt::PRNG::random_bytes_hex($length); + return $token; +} + +sub generate_symkey { + my (%options) = @_; + + my $random_key = Crypt::PRNG::random_bytes($options{keysize}); + return (0, $random_key); +} + +sub client_helo_encrypt { + my (%options) = @_; + my $ciphertext; + + my $client_pubkey = $options{client_pubkey}->export_key_pem('public'); + try { + $ciphertext = $options{server_pubkey}->encrypt('HELO', 'v1.5'); + } catch { + return (-1, "Encoding issue: $_"); + }; + + return (0, '[' . $options{identity} . '] [' . MIME::Base64::encode_base64($client_pubkey, '') . '] [' . MIME::Base64::encode_base64($ciphertext, '') . ']'); +} + +sub is_client_can_connect { + my (%options) = @_; + my $plaintext; + + if ($options{message} !~ /\[(.+)\]\s+\[(.+)\]\s+\[(.+)\]$/ms) { + $options{logger}->writeLogError("[core] Decoding issue. Protocol not good: $options{message}"); + return -1; + } + + my ($client, $client_pubkey_str, $cipher_text) = ($1, $2, $3); + try { + $plaintext = $options{privkey}->decrypt(MIME::Base64::decode_base64($cipher_text), 'v1.5'); + } catch { + $options{logger}->writeLogError("[core] Decoding issue: $_"); + return -1; + }; + if ($plaintext ne 'HELO') { + $options{logger}->writeLogError("[core] Encrypted issue for HELO"); + return -1; + } + + my ($client_pubkey); + $client_pubkey_str = MIME::Base64::decode_base64($client_pubkey_str); + try { + $client_pubkey = Crypt::PK::RSA->new(\$client_pubkey_str); + } catch { + $options{logger}->writeLogError("[core] Cannot load client pubkey '$client_pubkey': $_"); + return -1; + }; + + my $is_authorized = 0; + my $thumbprint = $client_pubkey->export_key_jwk_thumbprint('SHA256'); + if (defined($options{authorized_clients})) { + foreach (@{$options{authorized_clients}}) { + if ($_->{key} eq $thumbprint) { + $is_authorized = 1; + last; + } + } + } + + if ($is_authorized == 0) { + $options{logger}->writeLogError("[core] Client pubkey is not authorized. Thumbprint is '$thumbprint'"); + return -1; + } + + $options{logger}->writeLogInfo("[core] Connection from $client"); + return (0, $client_pubkey); +} + +####################### +# internal functions +####################### + +sub addlistener { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + foreach (@$data) { + $options{gorgone}->{listener}->add_listener( + identity => $options{identity}, + event => $_->{event}, + target => $_->{target}, + token => $_->{token}, + log_pace => $_->{log_pace}, + timeout => $_->{timeout} + ); + } + + return (GORGONE_ACTION_FINISH_OK, { action => 'addlistener', message => 'ok', data => $data }); +} + +sub getthumbprint { + my (%options) = @_; + + if ($options{gorgone}->{keys_loaded} == 0) { + return (GORGONE_ACTION_FINISH_KO, { action => 'getthumbprint', message => 'no public key loaded' }, 'GETTHUMBPRINT'); + } + my $thumbprint = $options{gorgone}->{server_pubkey}->export_key_jwk_thumbprint('SHA256'); + return (GORGONE_ACTION_FINISH_OK, { action => 'getthumbprint', message => 'ok', data => { thumbprint => $thumbprint } }, 'GETTHUMBPRINT'); +} + +sub information { + my (%options) = @_; + + my $data = { + counters => $options{gorgone}->{counters}, + modules => $options{gorgone}->{modules_id}, + api_endpoints => $options{gorgone}->{api_endpoints} + }; + return (GORGONE_ACTION_FINISH_OK, { action => 'information', message => 'ok', data => $data }, 'INFORMATION'); +} + +sub unloadmodule { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if (defined($data->{content}->{package}) && defined($options{gorgone}->{modules_register}->{ $data->{content}->{package} })) { + $options{gorgone}->{modules_register}->{ $data->{content}->{package} }->{gently}->(logger => $options{gorgone}->{logger}); + return (GORGONE_ACTION_BEGIN, { action => 'unloadmodule', message => "module '$data->{content}->{package}' unload in progress" }, 'UNLOADMODULE'); + } + if (defined($data->{content}->{name}) && + defined($options{gorgone}->{modules_id}->{$data->{content}->{name}}) && + defined($options{gorgone}->{modules_register}->{ $options{gorgone}->{modules_id}->{$data->{content}->{name}} })) { + $options{gorgone}->{modules_register}->{ $options{gorgone}->{modules_id}->{$data->{content}->{name}} }->{gently}->(logger => $options{gorgone}->{logger}); + return (GORGONE_ACTION_BEGIN, { action => 'unloadmodule', message => "module '$data->{content}->{name}' unload in progress" }, 'UNLOADMODULE'); + } + + return (GORGONE_ACTION_FINISH_KO, { action => 'unloadmodule', message => 'cannot find unload module' }, 'UNLOADMODULE'); +} + +sub loadmodule { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if ($options{gorgone}->load_module(config_module => $data->{content})) { + $options{gorgone}->{modules_register}->{ $data->{content}->{package} }->{init}->( + id => $options{gorgone}->{id}, + logger => $options{gorgone}->{logger}, + poll => $options{gorgone}->{poll}, + external_socket => $options{gorgone}->{external_socket}, + internal_socket => $options{gorgone}->{internal_socket}, + dbh => $options{gorgone}->{db_gorgone}, + api_endpoints => $options{gorgone}->{api_endpoints} + ); + return (GORGONE_ACTION_BEGIN, { action => 'loadmodule', message => "module '$data->{content}->{name}' is loaded" }, 'LOADMODULE'); + } + + return (GORGONE_ACTION_FINISH_KO, { action => 'loadmodule', message => "cannot load module '$data->{content}->{name}'" }, 'LOADMODULE'); +} + +sub synclogs { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if (!defined($data->{data}->{id})) { + return (GORGONE_ACTION_FINISH_KO, { action => 'synclog', message => 'please set id for synclog' }); + } + + if (defined($options{gorgone_config}->{gorgonecore}->{proxy_name}) && defined($options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}})) { + my $name = $options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}}; + my $method; + if (defined($name) && ($method = $name->can('synclog'))) { + $method->( + gorgone => $options{gorgone}, + dbh => $options{gorgone}->{db_gorgone}, + logger => $options{gorgone}->{logger} + ); + return (GORGONE_ACTION_BEGIN, { action => 'synclog', message => 'synclog launched' }); + } + } + + return (GORGONE_ACTION_FINISH_KO, { action => 'synclog', message => 'no proxy module' }); +} + +sub constatus { + my (%options) = @_; + + if (defined($options{gorgone_config}->{gorgonecore}->{proxy_name}) && defined($options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}})) { + my $name = $options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}}; + my $method; + if (defined($name) && ($method = $name->can('get_constatus_result'))) { + return (GORGONE_ACTION_FINISH_OK, { action => 'constatus', message => 'ok', data => $method->() }, 'CONSTATUS'); + } + } + + return (GORGONE_ACTION_FINISH_KO, { action => 'constatus', message => 'cannot get value' }, 'CONSTATUS'); +} + +sub setmodulekey { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if (!defined($data->{key})) { + return (GORGONE_ACTION_FINISH_KO, { action => 'setmodulekey', message => 'please set key' }); + } + + my $id = pack('H*', $options{identity}); + $options{gorgone}->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_identity_keys}->{$id} = { + key => pack('H*', $data->{key}), + ctime => time() + }; + + $options{logger}->writeLogInfo('[core] module key ' . $id . ' changed'); + return (GORGONE_ACTION_FINISH_OK, { action => 'setmodulekey', message => 'setmodulekey changed' }); +} + +sub setcoreid { + my (%options) = @_; + + if (defined($options{gorgone}->{config}->{configuration}->{gorgone}->{gorgonecore}->{id}) && + $options{gorgone}->{config}->{configuration}->{gorgone}->{gorgonecore}->{id} =~ /\d+/) { + return (GORGONE_ACTION_FINISH_OK, { action => 'setcoreid', message => 'setcoreid unchanged, use config value' }) + } + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if (!defined($data->{id})) { + return (GORGONE_ACTION_FINISH_KO, { action => 'setcoreid', message => 'please set id for setcoreid' }); + } + + if (defined($options{gorgone_config}->{gorgonecore}->{proxy_name}) && defined($options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}})) { + my $name = $options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}}; + my $method; + if (defined($name) && ($method = $name->can('setcoreid'))) { + $method->(dbh => $options{dbh}, core_id => $data->{id}, logger => $options{logger}); + } + } + + $options{logger}->writeLogInfo('[core] Setcoreid changed ' . $data->{id}); + $options{gorgone}->{id} = $data->{id}; + return (GORGONE_ACTION_FINISH_OK, { action => 'setcoreid', message => 'setcoreid changed' }); +} + +sub ping { + my (%options) = @_; + + my $constatus = {}; + if (defined($options{gorgone_config}->{gorgonecore}->{proxy_name}) && defined($options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}})) { + my $name = $options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}}; + my $method; + if (defined($name) && ($method = $name->can('get_constatus_result'))) { + $constatus = $method->(); + } + if (defined($name) && ($method = $name->can('add_parent_ping'))) { + $method->(router_type => $options{router_type}, identity => $options{identity}, logger => $options{logger}); + } + } + + return (GORGONE_ACTION_BEGIN, { action => 'ping', message => 'ping ok', id => $options{id}, hostname => $options{gorgone}->{hostname}, data => $constatus }, 'PONG'); +} + +sub putlog { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + my $status = add_history({ + dbh => $options{gorgone}->{db_gorgone}, + etime => $data->{etime}, + token => $data->{token}, + instant => $data->{instant}, + data => json_encode(data => $data->{data}, logger => $options{logger}), + code => $data->{code} + }); + if ($status == -1) { + return (GORGONE_ACTION_FINISH_KO, { message => 'database issue' }); + } + return (GORGONE_ACTION_BEGIN, { message => 'message inserted' }); +} + +sub getlog { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + my %filters = (); + my ($filter, $filter_append) = ('', ''); + my @bind_values = (); + foreach ((['id', '>'], ['token', '='], ['ctime', '>'], ['etime', '>'], ['code', '='])) { + if (defined($data->{$_->[0]}) && $data->{$_->[0]} ne '') { + $filter .= $filter_append . $_->[0] . ' ' . $_->[1] . ' ?'; + $filter_append = ' AND '; + push @bind_values, $data->{ $_->[0] }; + } + } + + if ($filter eq '') { + return (GORGONE_ACTION_FINISH_KO, { message => 'need at least one filter' }); + } + + my $query = "SELECT * FROM gorgone_history WHERE " . $filter; + $query .= " ORDER BY id DESC LIMIT " . $data->{limit} if (defined($data->{limit}) && $data->{limit} ne ''); + + my ($status, $sth) = $options{gorgone}->{db_gorgone}->query({ query => $query, bind_values => \@bind_values }); + if ($status == -1) { + return (GORGONE_ACTION_FINISH_KO, { message => 'database issue' }); + } + + my @result; + my $results = $sth->fetchall_hashref('id'); + foreach (sort keys %{$results}) { + push @result, $results->{$_}; + } + + return (GORGONE_ACTION_BEGIN, { action => 'getlog', result => \@result, id => $options{gorgone}->{id} }); +} + +sub kill { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if (defined($data->{content}->{package}) && defined($options{gorgone}->{modules_register}->{ $data->{content}->{package} })) { + $options{gorgone}->{modules_register}->{ $data->{content}->{package} }->{kill}->(logger => $options{gorgone}->{logger}); + return (GORGONE_ACTION_FINISH_OK, { action => 'kill', message => "module '$data->{content}->{package}' kill in progress" }); + } + if (defined($data->{content}->{name}) && + defined($options{gorgone}->{modules_id}->{ $data->{content}->{name} }) && + defined($options{gorgone}->{modules_register}->{ $options{gorgone}->{modules_id}->{ $data->{content}->{name} } })) { + $options{gorgone}->{modules_register}->{ $options{gorgone}->{modules_id}->{ $data->{content}->{name} } }->{kill}->(logger => $options{gorgone}->{logger}); + return (GORGONE_ACTION_FINISH_OK, { action => 'kill', message => "module '$data->{content}->{name}' kill in progress" }); + } + + return (GORGONE_ACTION_FINISH_KO, { action => 'kill', message => 'cannot find module' }); +} + +####################### +# Database functions +####################### + +sub update_identity_attrs { + my (%options) = @_; + + my @fields = (); + my @bind_values = (); + foreach ('key', 'oldkey', 'iv', 'oldiv', 'ctime') { + next if (!defined($options{$_})); + + if ($options{$_} eq 'NULL') { + push @fields, "`$_` = NULL"; + } else { + push @fields, "`$_` = ?"; + push @bind_values, $options{$_}; + } + } + push @bind_values, $options{identity}, $options{identity}; + + my ($status, $sth) = $options{dbh}->query({ + query => "UPDATE gorgone_identity SET " . join(', ', @fields) . + " WHERE `identity` = ? AND " . + " `id` = (SELECT `id` FROM gorgone_identity WHERE `identity` = ? ORDER BY `id` DESC LIMIT 1)", + bind_values => \@bind_values + }); + + return $status; +} + +sub update_identity_mtime { + my (%options) = @_; + + my ($status, $sth) = $options{dbh}->query({ + query => "UPDATE gorgone_identity SET `mtime` = ?" . + " WHERE `identity` = ? AND " . + " `id` = (SELECT `id` FROM gorgone_identity WHERE `identity` = ? ORDER BY `id` DESC LIMIT 1)", + bind_values => [time(), $options{identity}, $options{identity}] + }); + return $status; +} + +sub add_identity { + my (%options) = @_; + + my $time = time(); + my ($status, $sth) = $options{dbh}->query({ + query => "INSERT INTO gorgone_identity (`ctime`, `mtime`, `identity`, `key`, `iv`) VALUES (?, ?, ?, ?, ?)", + bind_values => [$time, $time, $options{identity}, unpack('H*', $options{key}), unpack('H*', $options{iv})] + }); + return $status; +} + +sub add_history { + my ($options) = (shift); + + if (defined($options->{data}) && defined($options->{json_encode})) { + return -1 if (!($options->{data} = json_encode(data => $options->{data}, logger => $options->{logger}))); + } + if (!defined($options->{ctime})) { + $options->{ctime} = Time::HiRes::time(); + } + if (!defined($options->{etime})) { + $options->{etime} = time(); + } + + my $fields = ''; + my $placeholder = ''; + my $append = ''; + my @bind_values = (); + foreach (('data', 'token', 'ctime', 'etime', 'code', 'instant')) { + if (defined($options->{$_})) { + $fields .= $append . $_; + $placeholder .= $append . '?'; + $append = ', '; + push @bind_values, $options->{$_}; + } + } + my ($status, $sth) = $options->{dbh}->query({ + query => "INSERT INTO gorgone_history ($fields) VALUES ($placeholder)", + bind_values => \@bind_values + }); + + if (defined($options->{token}) && $options->{token} ne '') { + $listener->event_log( + { + token => $options->{token}, + code => $options->{code}, + data => \$options->{data} + } + ); + } + + return $status; +} + +####################### +# Misc functions +####################### + +sub json_encode { + my (%options) = @_; + + try { + $options{data} = JSON::XS->new->encode($options{data}); + } catch { + if (defined($options{logger})) { + $options{logger}->writeLogError("[core] Cannot encode json data: $_"); + } + return undef; + }; + + return $options{data}; +} + +sub json_decode { + my (%options) = @_; + + try { + $options{data} = JSON::XS->new->decode($options{data}); + } catch { + if (defined($options{logger})) { + $options{logger}->writeLogError("[$options{module}] Cannot decode json data: $_"); + } + return undef; + }; + + return $options{data}; +} + +####################### +# Global ZMQ functions +####################### + +sub connect_com { + my (%options) = @_; + + my $socket = $options{context}->socket($zmq_type{$options{zmq_type}}); + if (!defined($socket)) { + $options{logger}->writeLogError("Can't setup server: $!"); + exit(1); + } + $socket->die_on_error(0); + + $socket->set_identity($options{name}); + $socket->set(ZMQ_LINGER, 'int', defined($options{zmq_linger}) ? $options{zmq_linger} : 0); # 0 we discard + $socket->set(ZMQ_SNDHWM, 'int', defined($options{zmq_sndhwm}) ? $options{zmq_sndhwm} : 0); + $socket->set(ZMQ_RCVHWM, 'int', defined($options{zmq_rcvhwm}) ? $options{zmq_rcvhwm} : 0); + $socket->set(ZMQ_RECONNECT_IVL, 'int', 1000); + $socket->set(ZMQ_CONNECT_TIMEOUT, 'int', defined($options{zmq_connect_timeout}) ? $options{zmq_connect_timeout} : 30000); + if ($options{zmq_type} eq 'ZMQ_ROUTER') { + $socket->set(ZMQ_ROUTER_HANDOVER, 'int', defined($options{zmq_router_handover}) ? $options{zmq_router_handover} : 1); + } + if ($options{type} eq 'tcp') { + $socket->set(ZMQ_TCP_KEEPALIVE, 'int', defined($options{zmq_tcp_keepalive}) ? $options{zmq_tcp_keepalive} : -1); + } + $options{logger}->writeLogInfo("connection to zmq socket : " . $options{type} . '://' . $options{path}); + $socket->connect($options{type} . '://' . $options{path}); + return $socket; +} + +sub create_com { + my (%options) = @_; + + my $socket = $options{context}->socket($zmq_type{$options{zmq_type}}); + if (!defined($socket)) { + $options{logger}->writeLogError("Can't setup server: $!"); + exit(1); + } + $socket->die_on_error(0); + + $socket->set_identity($options{name}); + $socket->set_linger(0); + $socket->set(ZMQ_ROUTER_HANDOVER, 'int', defined($options{zmq_router_handover}) ? $options{zmq_router_handover} : 1); + + if ($options{type} eq 'tcp') { + $socket->set(ZMQ_IPV6, 'int', defined($options{zmq_ipv6}) && $options{zmq_ipv6} =~ /true|1/i ? 1 : 0); + $socket->set(ZMQ_TCP_KEEPALIVE, 'int', defined($options{zmq_tcp_keepalive}) ? $options{zmq_tcp_keepalive} : -1); + + $socket->bind('tcp://' . $options{path}); + } elsif ($options{type} eq 'ipc') { + $socket->bind('ipc://' . $options{path}); + if ($socket->has_error) { + $options{logger}->writeLogDebug("[core] Cannot bind IPC '$options{path}': $!"); + # try create dir + $options{logger}->writeLogDebug("[core] Maybe directory not exist. We try to create it"); + if (!mkdir(dirname($options{path}))) { + $options{logger}->writeLogError("[core] Cannot create IPC file directory '$options{path}'"); + exit(1); + } + $socket->bind('ipc://' . $options{path}); + if ($socket->has_error) { + $options{logger}->writeLogError("[core] Cannot bind IPC '$options{path}': " . $socket->last_strerror); + exit(1); + } + } + } else { + $options{logger}->writeLogError("[core] ZMQ type '$options{type}' not managed"); + exit(1); + } + + return $socket; +} + +sub build_protocol { + my (%options) = @_; + my $data = $options{data}; + my $token = defined($options{token}) ? $options{token} : ''; + my $action = defined($options{action}) ? $options{action} : ''; + my $target = defined($options{target}) ? $options{target} : ''; + + if (defined($options{raw_data_ref})) { + return '[' . $action . '] [' . $token . '] [' . $target . '] ' . ${$options{raw_data_ref}}; + } elsif (defined($data)) { + if (defined($options{json_encode})) { + $data = json_encode(data => $data, logger => $options{logger}); + } + } else { + $data = json_encode(data => {}, logger => $options{logger}); + } + + return '[' . $action . '] [' . $token . '] [' . $target . '] ' . $data; +} + +sub zmq_dealer_read_message { + my (%options) = @_; + + my $data = $options{socket}->recv(ZMQ_DONTWAIT); + if ($options{socket}->has_error) { + return 1; + } + + if (defined($options{frame})) { + $options{frame}->setFrame(\$data); + return 0; + } + + return (0, $data); +} + +sub zmq_read_message { + my (%options) = @_; + + # Process all parts of the message + my $identity = $options{socket}->recv(ZMQ_DONTWAIT); + if ($options{socket}->has_error()) { + return undef if ($options{socket}->last_errno == Errno::EAGAIN); + + $options{logger}->writeLogError("[core] zmq_recvmsg error: $!"); + return undef; + } + + $identity = defined($identity) ? $identity : 'undef'; + if ($identity !~ /^gorgone-/) { + $options{logger}->writeLogError("[core] unknown identity: $identity"); + return undef; + } + + my $data = $options{socket}->recv(ZMQ_DONTWAIT); + if ($options{socket}->has_error()) { + return undef if ($options{socket}->last_errno == Errno::EAGAIN); + + $options{logger}->writeLogError("[core] zmq_recvmsg error: $!"); + return undef; + } + + my $frame = gorgone::class::frame->new(); + $frame->setFrame(\$data); + + return (unpack('H*', $identity), $frame); +} + +sub create_schema { + my (%options) = @_; + + $options{logger}->writeLogInfo("[core] create schema $options{version}"); + my $schema = [ + q{ + PRAGMA encoding = "UTF-8" + }, + q{ + CREATE TABLE `gorgone_information` ( + `key` varchar(1024) DEFAULT NULL, + `value` varchar(1024) DEFAULT NULL + ); + }, + qq{ + INSERT INTO gorgone_information (`key`, `value`) VALUES ('version', '$options{version}'); + }, + q{ + CREATE TABLE `gorgone_identity` ( + `id` INTEGER PRIMARY KEY, + `ctime` int(11) DEFAULT NULL, + `mtime` int(11) DEFAULT NULL, + `identity` varchar(2048) DEFAULT NULL, + `key` varchar(1024) DEFAULT NULL, + `oldkey` varchar(1024) DEFAULT NULL, + `iv` varchar(1024) DEFAULT NULL, + `oldiv` varchar(1024) DEFAULT NULL, + `parent` int(11) DEFAULT '0' + ); + }, + q{ + CREATE INDEX idx_gorgone_identity ON gorgone_identity (identity); + }, + q{ + CREATE INDEX idx_gorgone_parent ON gorgone_identity (parent); + }, + q{ + CREATE TABLE `gorgone_history` ( + `id` INTEGER PRIMARY KEY, + `token` varchar(2048) DEFAULT NULL, + `code` int(11) DEFAULT NULL, + `etime` int(11) DEFAULT NULL, + `ctime` FLOAT DEFAULT NULL, + `instant` int(11) DEFAULT '0', + `data` TEXT DEFAULT NULL + ); + }, + q{ + CREATE INDEX idx_gorgone_history_id ON gorgone_history (id); + }, + q{ + CREATE INDEX idx_gorgone_history_token ON gorgone_history (token); + }, + q{ + CREATE INDEX idx_gorgone_history_etime ON gorgone_history (etime); + }, + q{ + CREATE INDEX idx_gorgone_history_code ON gorgone_history (code); + }, + q{ + CREATE INDEX idx_gorgone_history_ctime ON gorgone_history (ctime); + }, + q{ + CREATE INDEX idx_gorgone_history_instant ON gorgone_history (instant); + }, + q{ + CREATE TABLE `gorgone_synchistory` ( + `id` int(11) NOT NULL, + `ctime` FLOAT DEFAULT NULL, + `last_id` int(11) DEFAULT NULL + ); + }, + q{ + CREATE UNIQUE INDEX idx_gorgone_synchistory_id ON gorgone_synchistory (id); + }, + q{ + CREATE TABLE `gorgone_target_fingerprint` ( + `id` INTEGER PRIMARY KEY, + `target` varchar(2048) DEFAULT NULL, + `fingerprint` varchar(4096) DEFAULT NULL + ); + }, + q{ + CREATE INDEX idx_gorgone_target_fingerprint_target ON gorgone_target_fingerprint (target); + }, + q{ + CREATE TABLE `gorgone_centreon_judge_spare` ( + `cluster_name` varchar(2048) NOT NULL, + `status` int(11) NOT NULL, + `data` TEXT DEFAULT NULL + ); + }, + q{ + CREATE INDEX idx_gorgone_centreon_judge_spare_cluster_name ON gorgone_centreon_judge_spare (cluster_name); + } + ]; + foreach (@$schema) { + my ($status, $sth) = $options{gorgone}->{db_gorgone}->query({ query => $_ }); + if ($status == -1) { + $options{logger}->writeLogError("[core] create schema issue"); + exit(1); + } + } +} + +sub init_database { + my (%options) = @_; + + if ($options{type} =~ /sqlite/i && $options{db} =~ /dbname=(.*)/i) { + my $sdb_path = File::Basename::dirname($1); + File::Path::make_path($sdb_path); + } + $options{gorgone}->{db_gorgone} = gorgone::class::db->new( + type => $options{type}, + db => $options{db}, + host => $options{host}, + port => $options{port}, + user => $options{user}, + password => $options{password}, + force => 2, + logger => $options{logger} + ); + $options{gorgone}->{db_gorgone}->set_inactive_destroy(); + if ($options{gorgone}->{db_gorgone}->connect() == -1) { + $options{logger}->writeLogError("[core] Cannot connect. We quit!!"); + exit(1); + } + + return if (!defined($options{autocreate_schema}) || $options{autocreate_schema} != 1); + + my $db_version = '1.0'; + my ($status, $sth) = $options{gorgone}->{db_gorgone}->query({ query => q{SELECT `value` FROM gorgone_information WHERE `key` = 'version'} }); + if ($status == -1) { + ($status, $sth) = $options{gorgone}->{db_gorgone}->query({ query => q{SELECT 1 FROM gorgone_identity LIMIT 1} }); + if ($status == -1) { + create_schema(gorgone => $options{gorgone}, logger => $options{logger}, version => $options{version}); + return ; + } + } else { + my $row = $sth->fetchrow_arrayref(); + $db_version = $row->[0] if (defined($row)); + } + + $options{logger}->writeLogInfo("[core] update schema $db_version -> $options{version}"); + + if ($db_version eq '1.0') { + my $schema = [ + q{ + PRAGMA encoding = "UTF-8" + }, + q{ + CREATE TABLE `gorgone_information` ( + `key` varchar(1024) DEFAULT NULL, + `value` varchar(1024) DEFAULT NULL + ); + }, + qq{ + INSERT INTO gorgone_information (`key`, `value`) VALUES ('version', '$options{version}'); + }, + q{ + ALTER TABLE `gorgone_identity` ADD COLUMN `mtime` int(11) DEFAULT NULL DEFAULT NULL; + }, + q{ + ALTER TABLE `gorgone_identity` ADD COLUMN `oldkey` varchar(1024) DEFAULT NULL; + }, + q{ + ALTER TABLE `gorgone_identity` ADD COLUMN `oldiv` varchar(1024) DEFAULT NULL; + }, + q{ + ALTER TABLE `gorgone_identity` ADD COLUMN `iv` varchar(1024) DEFAULT NULL; + } + ]; + foreach (@$schema) { + my ($status, $sth) = $options{gorgone}->{db_gorgone}->query({ query => $_ }); + if ($status == -1) { + $options{logger}->writeLogError("[core] update schema issue"); + exit(1); + } + } + $db_version = '22.04.0'; + } + + if ($db_version ne $options{version}) { + $options{gorgone}->{db_gorgone}->query({ query => "UPDATE gorgone_information SET `value` = '$options{version}' WHERE `key` = 'version'" }); + } +} + +1; diff --git a/gorgone/gorgone/standard/misc.pm b/gorgone/gorgone/standard/misc.pm new file mode 100644 index 00000000000..cbc5342b9d7 --- /dev/null +++ b/gorgone/gorgone/standard/misc.pm @@ -0,0 +1,325 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::standard::misc; + +use strict; +use warnings; +use vars qw($centreon_config); +use POSIX ":sys_wait_h"; +use File::Path; +use File::Basename; +use Try::Tiny; + +sub reload_db_config { + my ($logger, $config_file, $cdb, $csdb) = @_; + my ($cdb_mod, $csdb_mod) = (0, 0); + + unless (my $return = do $config_file) { + $logger->writeLogError("[core] Couldn't parse $config_file: $@") if $@; + $logger->writeLogError("[core] Couldn't do $config_file: $!") unless defined $return; + $logger->writeLogError("[core] Couldn't run $config_file") unless $return; + return -1; + } + + if (defined($cdb)) { + if ($centreon_config->{centreon_db} ne $cdb->db() || + $centreon_config->{db_host} ne $cdb->host() || + $centreon_config->{db_user} ne $cdb->user() || + $centreon_config->{db_passwd} ne $cdb->password() || + $centreon_config->{db_port} ne $cdb->port()) { + $logger->writeLogInfo("[core] Database centreon config has been modified"); + $cdb->db($centreon_config->{centreon_db}); + $cdb->host($centreon_config->{db_host}); + $cdb->user($centreon_config->{db_user}); + $cdb->password($centreon_config->{db_passwd}); + $cdb->port($centreon_config->{db_port}); + $cdb_mod = 1; + } + } + + if (defined($csdb)) { + if ($centreon_config->{centstorage_db} ne $csdb->db() || + $centreon_config->{db_host} ne $csdb->host() || + $centreon_config->{db_user} ne $csdb->user() || + $centreon_config->{db_passwd} ne $csdb->password() || + $centreon_config->{db_port} ne $csdb->port()) { + $logger->writeLogInfo("[core] Database centstorage config has been modified"); + $csdb->db($centreon_config->{centstorage_db}); + $csdb->host($centreon_config->{db_host}); + $csdb->user($centreon_config->{db_user}); + $csdb->password($centreon_config->{db_passwd}); + $csdb->port($centreon_config->{db_port}); + $csdb_mod = 1; + } + } + + return (0, $cdb_mod, $csdb_mod); +} + +sub get_all_options_config { + my ($extra_config, $centreon_db_centreon, $prefix) = @_; + + my $save_force = $centreon_db_centreon->force(); + $centreon_db_centreon->force(0); + + my ($status, $stmt) = $centreon_db_centreon->query({ + query => 'SELECT `key`, `value` FROM options WHERE `key` LIKE ? LIMIT 1', + bind_values => [$prefix . '_%'] + }); + if ($status == -1) { + $centreon_db_centreon->force($save_force); + return ; + } + while ((my $data = $stmt->fetchrow_hashref())) { + if (defined($data->{value}) && length($data->{value}) > 0) { + $data->{key} =~ s/^${prefix}_//; + $extra_config->{$data->{key}} = $data->{value}; + } + } + + $centreon_db_centreon->force($save_force); +} + +sub get_option_config { + my ($extra_config, $centreon_db_centreon, $prefix, $key) = @_; + my $data; + + my $save_force = $centreon_db_centreon->force(); + $centreon_db_centreon->force(0); + + my ($status, $stmt) = $centreon_db_centreon->query({ + query => 'SELECT value FROM options WHERE `key` = ? LIMIT 1', + bind_values => [$prefix . '_' . $key] + }); + if ($status == -1) { + $centreon_db_centreon->force($save_force); + return ; + } + if (($data = $stmt->fetchrow_hashref()) && defined($data->{value})) { + $extra_config->{$key} = $data->{value}; + } + + $centreon_db_centreon->force($save_force); +} + +sub check_debug { + my ($logger, $key, $cdb, $name) = @_; + + my ($status, $sth) = $cdb->query({ + query => 'SELECT `value` FROM options WHERE `key` = ?', + bind_values => [$key] + }); + return -1 if ($status == -1); + my $data = $sth->fetchrow_hashref(); + if (defined($data->{'value'}) && $data->{'value'} == 1) { + if (!$logger->is_debug()) { + $logger->severity("debug"); + $logger->writeLogInfo("[core] Enable Debug in $name"); + } + } else { + if ($logger->is_debug()) { + $logger->set_default_severity(); + $logger->writeLogInfo("[core] Disable Debug in $name"); + } + } + return 0; +} + +sub backtick { + my %arg = ( + command => undef, + arguments => [], + timeout => 30, + wait_exit => 0, + redirect_stderr => 0, + @_, + ); + my @output; + my $pid; + my $return_code; + + my $sig_do; + if ($arg{wait_exit} == 0) { + $sig_do = 'IGNORE'; + $return_code = undef; + } else { + $sig_do = 'DEFAULT'; + } + local $SIG{CHLD} = $sig_do; + $SIG{TTOU} = 'IGNORE'; + $| = 1; + + if (!defined($pid = open( KID, "-|" ))) { + $arg{logger}->writeLogError("[core] Cant fork: $!"); + return (-1000, "cant fork: $!"); + } + + if ($pid) { + try { + local $SIG{ALRM} = sub { die "Timeout by signal ALARM\n"; }; + alarm( $arg{timeout} ); + while () { + chomp; + push @output, $_; + } + + alarm(0); + } catch { + if ($pid != -1) { + kill -9, $pid; + } + + alarm(0); + return (-1000, "Command too long to execute (timeout)...", -1); + }; + if ($arg{wait_exit} == 1) { + # We're waiting the exit code + waitpid($pid, 0); + $return_code = ($? >> 8); + } + close KID; + } else { + # child + # set the child process to be a group leader, so that + # kill -9 will kill it and all its descendents + # We have ignore SIGTTOU to let write background processes + setpgrp(0, 0); + + if ($arg{redirect_stderr} == 1) { + open STDERR, ">&STDOUT"; + } + if (scalar(@{$arg{arguments}}) <= 0) { + exec($arg{command}); + } else { + exec($arg{command}, @{$arg{arguments}}); + } + # Exec is in error. No such command maybe. + exit(127); + } + + return (0, join("\n", @output), $return_code); +} + +sub mymodule_load { + my (%options) = @_; + my $file; + ($file = ($options{module} =~ /\.pm$/ ? $options{module} : $options{module} . '.pm')) =~ s{::}{/}g; + + eval { + local $SIG{__DIE__} = 'IGNORE'; + require $file; + $file =~ s{/}{::}g; + $file =~ s/\.pm$//; + }; + if ($@) { + $options{logger}->writeLogError('[core] ' . $options{error_msg} . ' - ' . $@); + return 1; + } + return wantarray ? (0, $file) : 0; +} + +sub write_file { + my (%options) = @_; + + File::Path::make_path(File::Basename::dirname($options{filename})); + my $fh; + if (!open($fh, '>', $options{filename})) { + $options{logger}->writeLogError("[core] Cannot open file '$options{filename}': $!"); + return 0; + } + print $fh $options{content}; + close $fh; + return 1; +} + +sub trim { + my ($value) = $_[0]; + + # Sometimes there is a null character + $value =~ s/\x00$//; + $value =~ s/^[ \t\n]+//; + $value =~ s/[ \t\n]+$//; + return $value; +} + +sub slurp { + my (%options) = @_; + + my ($fh, $size); + if (!open($fh, '<', $options{file})) { + return (0, "Could not open $options{file}: $!"); + } + my $buffer = do { local $/; <$fh> }; + close $fh; + return (1, 'ok', $buffer); +} + +sub scale { + my (%options) = @_; + + my ($src_quantity, $src_unit) = (undef, 'B'); + if (defined($options{src_unit}) && $options{src_unit} =~ /([kmgtpe])?(b)/i) { + $src_quantity = $1; + $src_unit = $2; + } + my ($dst_quantity, $dst_unit) = ('auto', $src_unit); + if (defined($options{dst_unit}) && $options{dst_unit} =~ /([kmgtpe])?(b)/i) { + $dst_quantity = $1; + $dst_unit = $2; + } + + my $base = 1024; + $options{value} *= 8 if ($dst_unit eq 'b' && $src_unit eq 'B'); + $options{value} /= 8 if ($dst_unit eq 'B' && $src_unit eq 'b'); + $base = 1000 if ($dst_unit eq 'b'); + + my %expo = (k => 1, m => 2, g => 3, t => 4, p => 5, e => 6); + my $src_expo = 0; + $src_expo = $expo{ lc($src_quantity) } if (defined($src_quantity)); + + if (defined($dst_quantity) && $dst_quantity eq 'auto') { + my @auto = ('', 'k', 'm', 'g', 't', 'p', 'e'); + for (; $src_expo < scalar(@auto); $src_expo++) { + last if ($options{value} < $base); + $options{value} = $options{value} / $base; + } + + if (defined($options{format}) && $options{format} ne '') { + $options{value} = sprintf($options{format}, $options{value}); + } + return ($options{value}, uc($auto[$src_expo]) . $dst_unit); + } + + my $dst_expo = 0; + $dst_expo = $expo{ lc($dst_quantity) } if (defined($dst_quantity)); + if ($dst_expo - $src_expo > 0) { + $options{value} = $options{value} / ($base ** ($dst_expo - $src_expo)); + } elsif ($dst_expo - $src_expo < 0) { + $options{value} = $options{value} * ($base ** (($dst_expo - $src_expo) * -1)); + } + + if (defined($options{format}) && $options{format} ne '') { + $options{value} = sprintf($options{format}, $options{value}); + } + return ($options{value}, $options{dst_unit}); +} + +1; diff --git a/gorgone/gorgoned b/gorgone/gorgoned new file mode 100644 index 00000000000..fdb423af470 --- /dev/null +++ b/gorgone/gorgoned @@ -0,0 +1,63 @@ +#!/usr/bin/perl +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +use FindBin; +use lib "$FindBin::Bin"; +use gorgone::class::core; + +gorgone::class::core->new()->run(); + +__END__ + +=head1 NAME + +gorgoned - a daemon to handle so many things. + +=head1 SYNOPSIS + +gorgoned [options] + +=head1 OPTIONS + +=over 8 + +=item B<--config> + +Specify the path to the yaml configuration file (default: ''). + +=item B<--help> + +Print a brief help message and exits. + +=item B<--version> + +Print version message and exits. + +=back + +=head1 DESCRIPTION + +B will survive + +=cut diff --git a/gorgone/inputvars.env b/gorgone/inputvars.env new file mode 100644 index 00000000000..b679f114365 --- /dev/null +++ b/gorgone/inputvars.env @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# +# Centreon installation variables user specific values. +# Uncomment variables to define values. Values are defaults. + +# INSTALLATION_TYPE="central" +# GORGONE_USER="centreon-gorgone" +# GORGONE_GROUP="centreon-gorgone" +# GORGONE_ETC_DIR="/etc/centreon-gorgone" +# GORGONE_LOG_DIR="/var/log/centreon-gorgone" +# GORGONE_VARLIB_DIR="/var/lib/centreon-gorgone" +# GORGONE_CACHE_DIR="/var/cache/centreon-gorgone" +# CENTREON_USER="centreon" +# CENTREON_HOME="/var/spool/centreon" +# CENTREON_ETC_DIR="/etc/centreon" +# CENTREON_SERVICE="centreon" +# ENGINE_USER="centreon-engine" +# ENGINE_GROUP="centreon-engine" +# BROKER_USER="centreon-broker" +# BROKER_GROUP="centreon-broker" +# BINARY_DIR="/usr/bin" +# PERL_BINARY="/usr/bin/perl" +# SYSTEMD_ETC_DIR="/etc/systemd/system" +# SYSCONFIG_ETC_DIR="/etc/sysconfig" +# LOGROTATED_ETC_DIR="/etc/logrotate.d" +# TMP_DIR="/tmp/centreon-setup" +# LOG_FILE="$BASE_DIR/log/install.log" \ No newline at end of file diff --git a/gorgone/install.sh b/gorgone/install.sh new file mode 100755 index 00000000000..149f46d5773 --- /dev/null +++ b/gorgone/install.sh @@ -0,0 +1,495 @@ +#!/bin/bash +#---- +## @Synopsis Install Script for Gorgone project +## @Copyright Copyright 2008, Guillaume Watteeux +## @Copyright Copyright 2008-2021, Centreon +## @License GPL : http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt +## Centreon Install Script +#---- +## Centreon is developed with GPL Licence 2.0 +## +## GPL License: http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt +## +## Developed by : Julien Mathis - Romain Le Merlus +## Contributors : Guillaume Watteeux - Maximilien Bersoult +## +## This program is free software; you can redistribute it and/or +## modify it under the terms of the GNU General Public License +## as published by the Free Software Foundation; either version 2 +## of the License, or (at your option) any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## For information : infos@centreon.com +# + +#---- +## Usage information for install.sh +## @Sdtout Usage information +#---- +usage() { + local program=$0 + echo -e "Usage: $program" + echo -e " -i\tinstall Gorgone with interactive interface" + echo -e " -u\tupgrade Gorgone specifying the directory of instGorgone.conf file" + echo -e " -s\tinstall/upgrade Gorgone silently" + echo -e " -e\textra variables, 'VAR=value' format (overrides input files)" + exit 1 +} + +## Use TRAPs to call clean_and_exit when user press +## CRTL+C or exec kill -TERM. +trap clean_and_exit SIGINT SIGTERM + +## Valid if you are root +if [ "${FORCE_NO_ROOT:-0}" -ne 0 ]; then + USERID=$(id -u) + if [ "$USERID" != "0" ]; then + echo -e "You must launch this script using a root user" + exit 1 + fi +fi + +## Define where are Gorgone sources +BASE_DIR=$(dirname $0) +BASE_DIR=$( cd $BASE_DIR; pwd ) +if [ -z "${BASE_DIR#/}" ] ; then + echo -e "You cannot select the filesystem root folder" + exit 1 +fi +INSTALL_DIR="$BASE_DIR/install" + +_tmp_install_opts="0" +silent_install="0" +upgrade="0" + +## Get options +while getopts "isu:e:h" Options +do + case ${Options} in + i ) silent_install="0" + _tmp_install_opts="1" + ;; + s ) silent_install="1" + _tmp_install_opts="1" + ;; + u ) silent_install="0" + UPGRADE_FILE="${OPTARG%/}" + upgrade="1" + _tmp_install_opts="1" + ;; + e ) env_opts+=("$OPTARG") + ;; + \?|h) usage ; exit 0 ;; + * ) usage ; exit 1 ;; + esac +done +shift $((OPTIND -1)) + +if [ "$_tmp_install_opts" -eq 0 ] ; then + usage + exit 1 +fi + +INSTALLATION_MODE="install" +if [ ! -z "$upgrade" ] && [ "$upgrade" -eq 1 ]; then + INSTALLATION_MODE="upgrade" +fi + +## Load default input variables +source $INSTALL_DIR/inputvars.default.env +## Load all functions used in this script +source $INSTALL_DIR/functions + +## Define a default log file +if [ ! -z $LOG_FILE ] ; then + LOG_FILE="$BASE_DIR/log/install.log" +fi +LOG_DIR=$(dirname $LOG_FILE) +[ ! -d "$LOG_DIR" ] && mkdir -p "$LOG_DIR" + +## Init LOG_FILE +if [ -e "$LOG_FILE" ] ; then + mv "$LOG_FILE" "$LOG_FILE.`date +%Y%m%d-%H%M%S`" +fi +${CAT} << __EOL__ > "$LOG_FILE" +__EOL__ + +# Checking installation script requirements +BINARIES="rm cp mv chmod chown echo more mkdir find grep cat sed tr" +binary_fail="0" +# For the moment, I check if all binary exists in PATH. +# After, I must look a solution to use complet path by binary +for binary in $BINARIES; do + if [ ! -e ${binary} ] ; then + pathfind_ret "$binary" "PATH_BIN" + if [ "$?" -ne 0 ] ; then + echo_error "${binary}" "FAILED" + binary_fail=1 + fi + fi +done + +## Script stop if one binary is not found +if [ "$binary_fail" -eq 1 ] ; then + echo_info "Please check failed binary and retry" + exit 1 +else + echo_success "Script requirements" "OK" +fi + +## Search distribution and version +if [ -z "$DISTRIB" ] || [ -z "$DISTRIB_VERSION" ] ; then + find_os +fi +echo_info "Found distribution" "$DISTRIB $DISTRIB_VERSION" + +## Load specific variables based on distribution +if [ -f $INSTALL_DIR/inputvars.$DISTRIB.env ]; then + echo_info "Loading distribution specific input variables" "install/inputvars.$DISTRIB.env" + source $INSTALL_DIR/inputvars.$DISTRIB.env +fi + +## Load specific variables based on version +if [ -f $INSTALL_DIR/inputvars.$DISTRIB.$DISTRIB_VERSION.env ]; then + echo_info "Loading version specific input variables" "install/inputvars.$DISTRIB.$DISTRIB_VERSION.env" + source $INSTALL_DIR/inputvars.$DISTRIB.$DISTRIB_VERSION.env +fi + +## Load specific variables defined by user +if [ -f $INSTALL_DIR/../inputvars.env ]; then + echo_info "Loading user specific input variables" "inputvars.env" + source $INSTALL_DIR/../inputvars.env +fi + +## Load previous installation input variables if upgrade +if [ "$upgrade" -eq 1 ] ; then + test_file "$UPGRADE_FILE" "Gorgone upgrade file" + if [ "$?" -eq 0 ] ; then + echo_info "Loading previous installation input variables" "$UPGRADE_FILE" + source $UPGRADE_FILE + else + echo_error "Missing previous installation input variables" "FAILED" + echo_info "Either specify it in command line or using UPGRADE_FILE input variable" + exit 1 + fi +fi + +## Load variables provided in command line +for env_opt in "${env_opts[@]}"; do + if [[ "${env_opt}" =~ .+=.+ ]] ; then + variable=$(echo $env_opt | cut -f1 -d "=") + value=$(echo $env_opt | cut -f2 -d "=") + if [ ! -z "$variable" ] && [ ! -z "$value" ] ; then + echo_info "Loading command line input variables" "${variable}=${value}" + eval ${variable}=${value} + fi + fi +done + +## Check installation mode +if [ -z "$INSTALLATION_TYPE" ] ; then + echo_error "Installation mode" "NOT DEFINED" + exit 1 +fi +if [[ ! "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + echo_error "Installation mode" "$INSTALLATION_TYPE" + exit 1 +fi +echo_info "Installation type" "$INSTALLATION_TYPE" +echo_info "Installation mode" "$INSTALLATION_MODE" + +## Check space of tmp dir +check_tmp_disk_space +if [ "$?" -eq 1 ] ; then + if [ "$silent_install" -eq 1 ] ; then + purge_centreon_tmp_dir "silent" + else + purge_centreon_tmp_dir + fi +fi + +## Installation is interactive +if [ "$silent_install" -ne 1 ] ; then + echo -e "\n" + echo_info "Welcome to Centreon installation script!" + yes_no_default "Should we start?" "$yes" + if [ "$?" -ne 0 ] ; then + echo_info "Exiting" + exit 1 + fi +fi + +# Start installation + +ERROR_MESSAGE="" + +# Centreon installation requirements +echo_title "Centreon installation requirements" + +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + # System + test_dir_from_var "LOGROTATED_ETC_DIR" "Logrotate directory" + test_dir_from_var "SYSTEMD_ETC_DIR" "SystemD directory" + test_dir_from_var "SYSCONFIG_ETC_DIR" "Sysconfig directory" + test_dir_from_var "BINARY_DIR" "System binary directory" + + ## Perl information + find_perl_info + test_file_from_var "PERL_BINARY" "Perl binary" + test_dir_from_var "PERL_LIB_DIR" "Perl libraries directory" +fi + +if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "Installation requirements" "FAILED" + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + exit 1 +fi + +echo_success "Installation requirements" "OK" + +## Gorgone information +echo_title "Gorgone information" + +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + test_var_and_show "GORGONE_USER" "Gorgone user" + test_var_and_show "GORGONE_GROUP" "Gorgone group" + test_var_and_show "GORGONE_ETC_DIR" "Gorgone configuration directory" + test_var_and_show "GORGONE_LOG_DIR" "Gorgone log directory" + test_var_and_show "GORGONE_VARLIB_DIR" "Gorgone variable library directory" + test_var_and_show "GORGONE_CACHE_DIR" "Gorgone cache directory" + test_var_and_show "CENTREON_USER" "Centreon user" + test_var_and_show "CENTREON_HOME" "Centreon home directory" + test_var_and_show "CENTREON_ETC_DIR" "Centreon configuration directory" + test_var_and_show "CENTREON_SERVICE" "Centreon service" + test_var_and_show "ENGINE_USER" "Engine user" + test_var_and_show "ENGINE_GROUP" "Engine group" + test_var_and_show "BROKER_USER" "Broker user" + test_var_and_show "BROKER_GROUP" "Broker group" +fi + +if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + exit 1 +fi + +if [ "$silent_install" -ne 1 ] ; then + yes_no_default "Everything looks good, proceed to installation?" + if [ "$?" -ne 0 ] ; then + purge_centreon_tmp_dir "silent" + exit 1 + fi +fi + +# Start installation + +## Build files +echo_title "Build files" +echo_line "Copying files to '$TMP_DIR'" + +if [ -d $TMP_DIR ] ; then + mv $TMP_DIR $TMP_DIR.`date +%Y%m%d-%k%m%S` +fi + +create_dir "$TMP_DIR/source" + +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + { + copy_dir "$BASE_DIR/config" "$TMP_DIR/source/" && + copy_dir "$BASE_DIR/gorgone" "$TMP_DIR/source/" && + copy_dir "$BASE_DIR/install" "$TMP_DIR/source/" && + copy_file "$BASE_DIR/gorgoned" "$TMP_DIR/source/" + } || { + echo_error_on_line "FAILED" + if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + fi + purge_centreon_tmp_dir "silent" + exit 1 + } +fi +echo_success_on_line "OK" + +echo_line "Replacing macros" +eval "echo \"$(cat "$TMP_DIR/source/install/src/instGorgone.conf")\"" > $TMP_DIR/source/install/src/instGorgone.conf +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + { + replace_macro "install/src" + } || { + echo_error_on_line "FAILED" + if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + fi + purge_centreon_tmp_dir "silent" + exit 1 + } +fi +echo_success_on_line "OK" + +test_user "$GORGONE_USER" +if [ $? -ne 0 ]; then + { + ### Create user and group + create_dir "$GORGONE_VARLIB_DIR" && + create_group "$GORGONE_GROUP" && + create_user "$GORGONE_USER" "$GORGONE_GROUP" "$GORGONE_VARLIB_DIR" && + set_ownership "$GORGONE_VARLIB_DIR" "$GORGONE_USER" "$GORGONE_GROUP" && + set_permissions "$GORGONE_VARLIB_DIR" "755" + } || { + if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + fi + purge_centreon_tmp_dir "silent" + exit 1 + } +fi + +echo_line "Building installation tree" +BUILD_DIR="$TMP_DIR/build" +create_dir "$BUILD_DIR" + +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + { + ### Configuration diretories and base file + create_dir "$BUILD_DIR/$GORGONE_ETC_DIR" "$GORGONE_USER" "$GORGONE_GROUP" "755" && + create_dir "$BUILD_DIR/$GORGONE_ETC_DIR/config.d" "$GORGONE_USER" "$GORGONE_GROUP" "775" && + create_dir "$BUILD_DIR/$GORGONE_ETC_DIR/config.d/cron.d" "$GORGONE_USER" "$GORGONE_GROUP" "775" && + copy_file "$TMP_DIR/source/install/src/config.yaml" "$BUILD_DIR/$GORGONE_ETC_DIR/config.yaml" \ + "$GORGONE_USER" "$GORGONE_GROUP" && + + ### Install save file + copy_file "$TMP_DIR/source/install/src/instGorgone.conf" \ + "$BUILD_DIR/$GORGONE_ETC_DIR/instGorgone.conf" \ + "$GORGONE_USER" "$GORGONE_GROUP" "644" && + + ### Log directory + create_dir "$BUILD_DIR/$GORGONE_LOG_DIR" "$GORGONE_USER" "$GORGONE_GROUP" "755" && + + ### Cache directories + create_dir "$BUILD_DIR/$GORGONE_CACHE_DIR" "$GORGONE_USER" "$GORGONE_GROUP" "755" && + create_dir "$BUILD_DIR/$GORGONE_CACHE_DIR/autodiscovery" "$GORGONE_USER" "$GORGONE_GROUP" "755" + } || { + echo_error_on_line "FAILED" + if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + fi + purge_centreon_tmp_dir "silent" + exit 1 + } +fi +echo_success_on_line "OK" + +## Install files +echo_title "Install builded files" +echo_line "Copying files from '$TMP_DIR' to final directory" +copy_dir "$BUILD_DIR/*" "/" +if [ "$?" -ne 0 ] ; then + echo_error_on_line "FAILED" + if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + fi + purge_centreon_tmp_dir "silent" + exit 1 +fi +echo_success_on_line "OK" + +## Install remaining files +echo_title "Install remaining files" + +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + ### Configurations + copy_file_no_replace "$TMP_DIR/source/install/src/centreon.yaml" \ + "$GORGONE_ETC_DIR/config.d/30-centreon.yaml" \ + "Centreon configuration" \ + "$GORGONE_USER" "$GORGONE_GROUP" "644" + copy_file_no_replace "$TMP_DIR/source/install/src/centreon-api.yaml" \ + "$GORGONE_ETC_DIR/config.d/31-centreon-api.yaml" \ + "Centreon API configuration" \ + "$GORGONE_USER" "$GORGONE_GROUP" "644" + + ### Perl libraries + copy_dir "$TMP_DIR/source/gorgone" "$PERL_LIB_DIR/gorgone" + + ### Gorgoned binary + copy_file "$TMP_DIR/source/gorgoned" "$BINARY_DIR" + + ### Systemd files + restart_gorgoned="0" + copy_file "$TMP_DIR/source/install/src/gorgoned.systemd" \ + "$SYSTEMD_ETC_DIR/gorgoned.service" && restart_gorgoned="1" + copy_file_no_replace "$TMP_DIR/source/install/src/gorgoned.sysconfig" "$SYSCONFIG_ETC_DIR/gorgoned" \ + "Sysconfig Gorgoned configuration" && restart_gorgoned="1" + + ### Logrotate configuration + copy_file_no_replace "$TMP_DIR/source/install/src/gorgoned.logrotate" "$LOGROTATED_ETC_DIR/gorgoned" \ + "Logrotate Gorgoned configuration" +fi + +if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + ERROR_MESSAGE="" +fi + +## Update groups memberships +echo_title "Update groups memberships" +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + add_user_to_group "$GORGONE_USER" "$BROKER_GROUP" + add_user_to_group "$GORGONE_USER" "$ENGINE_GROUP" + add_user_to_group "$ENGINE_USER" "$GORGONE_GROUP" + add_user_to_group "$BROKER_USER" "$GORGONE_GROUP" +fi + +if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + ERROR_MESSAGE="" +fi + +## Retrieve Centreon SSH key +if [ ! -d "$GORGONE_VARLIB_DIR/.ssh" ] && [ -d "$CENTREON_HOME/.ssh" ] ; then + echo_title "Retrieve Centreon SSH key" + copy_file "$CENTREON_HOME/.ssh/*" "$GORGONE_VARLIB_DIR/.ssh" "$GORGONE_USER" "$GORGONE_GROUP" && + set_permissions "$GORGONE_VARLIB_DIR/.ssh/id_rsa" "600" +fi + +## Configure and restart services +echo_title "Configure and restart services" +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + ### Gorgoned + enable_service "gorgoned" + + if [ "$restart_gorgoned" -eq 1 ] ; then + reload_daemon + restart_service "gorgoned" + fi +fi + +if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + ERROR_MESSAGE="" +fi + +## Purge working directories +purge_centreon_tmp_dir "silent" + +# End +echo_title "You're done!" +echo_info "" +echo_info "Take a look at the documentation" +echo_info "https://docs.centreon.com/current." +echo_info "Thanks for using Gorgone!" +echo_info "Follow us on https://github.com/centreon/centreon-gorgone!" + +exit 0 diff --git a/gorgone/install/functions b/gorgone/install/functions new file mode 100755 index 00000000000..97e5495f60b --- /dev/null +++ b/gorgone/install/functions @@ -0,0 +1,1122 @@ +#!/bin/bash +#---- +## @Synopsis This file contains functions to be used by Gorgone install script +## @Copyright Copyright 2008, Guillaume Watteeux +## @Copyright Copyright 2008-2021, Centreon +## @Licence GPLv2 +## This file contains functions to be used by Centreon install script +#---- +## Centreon is developed with GPL Licence 2.0 +## +## GPL License: http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt +## +## Developed by : Julien Mathis - Romain Le Merlus +## Contributors : Guillaume Watteeux - Maximilien Bersoult +## +## This program is free software; you can redistribute it and/or +## modify it under the terms of the GNU General Public License +## as published by the Free Software Foundation; either version 2 +## of the License, or (at your option) any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## For information : infos@centreon.com + +## VARS +yes="y" +no="n" + +## COLOR FUNCTIONS +RES_COL="70" +MOVE_TO_COL="\\033[${RES_COL}G" +SETCOLOR_INFO="\\033[1;38m" +SETCOLOR_SUCCESS="\\033[1;32m" +SETCOLOR_ERROR="\\033[1;31m" +SETCOLOR_WARNING="\\033[1;33m" +SETCOLOR_NORMAL="\\033[0;39m" + +#---- +## echo_title +## Print string in a title way. Also log in log file. +## @param string to display +## @stdout titled string +#---- +echo_title() { + [ "$silent_install" -eq 0 ] && echo -e "\n" + [ "$silent_install" -eq 0 ] && echo -e "$1" + [ "$silent_install" -eq 0 ] && printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + log "INFO" "$1" +} + +#---- +## echo_line +## Print message to screen and keep position, and in log file. +## @param message +## @stdout message +#---- +echo_line() { + [ "$silent_install" -eq 0 ] && echo -en "${1}" + log "INFO" "$1" +} + +#---- +## echo_success_on_line +## Print message to screen on right-end side, and in log file. +## @param message +## @stdout message +#---- +echo_success_on_line() { + [ "$silent_install" -eq 0 ] && echo -e "${MOVE_TO_COL}${SETCOLOR_SUCCESS}${1}${SETCOLOR_NORMAL}" + log "SUCCESS" "$1" +} + +#---- +## echo_succeecho_error_on_liness_on_line +## Print message to screen on right-end side, and in log file. +## @param message +## @stdout message +#---- +echo_error_on_line() { + [ "$silent_install" -eq 0 ] && echo -e "${MOVE_TO_COL}${SETCOLOR_ERROR}${1}${SETCOLOR_NORMAL}" + log "ERROR" "$1" +} + +#---- +## echo_info +## Print info message to screen and in log file. +## @param message +## @param type info (ex: INFO, username...) +## @stdout info message +#---- +echo_info() { + [ "$silent_install" -eq 0 ] && echo -e "${1}${MOVE_TO_COL}${SETCOLOR_INFO}${2}${SETCOLOR_NORMAL}" + log "INFO" "$1 : $2" +} + +#---- +## echo_success +## Print success message to screen and in log file. +## @param message +## @param word to specify success (ex: OK) +## @stdout success message +#---- +echo_success() { + [ "$silent_install" -eq 0 ] && echo -e "${1}${MOVE_TO_COL}${SETCOLOR_SUCCESS}${2}${SETCOLOR_NORMAL}" + log "SUCCESSS" "$1 : $2" +} + +#---- +## echo_warning +## Print warning message to screen and in log file. +## @param message +## @param word to specify warning (ex: warn) +## @stdout warning message +#---- +echo_warning() { + [ "$silent_install" -eq 0 ] && echo -e "${1}${MOVE_TO_COL}${SETCOLOR_WARNING}${2}${SETCOLOR_NORMAL}" + log "WARNING" "$1 : $2" +} + +#---- +## echo_error +## Print failure message to screen and in log file. +## @param message +## @param word to specify failure (ex: fail) +## @stdout failure message +#---- +echo_error() { + [ "$silent_install" -eq 0 ] && echo -e "${1}${MOVE_TO_COL}${SETCOLOR_ERROR}${2}${SETCOLOR_NORMAL}" + log "ERROR" "$1 : $2" +} + +#---- +## log +## Add message in log file +## @param type of message level (debug, info, ...) +## @param message +## @globals LOG_FILE +#---- +log() { + local type="$1" + shift + local message="$@" + echo -e "["`date +"%m-%d-%y %T"`"] [$type] $message" >> $LOG_FILE +} + +#---- +## trim +## Trim whitespaces and tabulations +## @param string to trim +## @return string +#---- +trim() { + echo "$1" | sed 's/^[ \t]*\(.*\)[ \t]*$/\1/' +} + +#---- +## yes_no_default +## Create a question with yes/no possiblity. Uses "no" response by default. +## @param message to print +## @param default response (default to no) +## @return 0 yes +## @return 1 no +#---- +yes_no_default() { + local message=$1 + local default=${2:-$no} + local res="not_define" + + while [ "$res" != "$yes" ] && [ "$res" != "$no" ] && [ ! -z "$res" ] ; do + echo -en "\n$message" + [ "$default" = "$yes" ] && echo " [Y/n]" + [ "$default" = "$no" ] && echo " [y/N]" + echo -en "> " + read res + [ -z "$res" ] && res="$default" + done + if [ "$res" = "$yes" ] ; then + return 0 + else + return 1 + fi +} + +#---- +## add_error_message +## Add an error message in global variable ERROR_MESSAGE. +## See this as an exceptions management. Used by test_* functions. +## @param message +## @globals ERROR_MESSAGE +#---- +add_error_message() { + local append="" + local message="$1" + + if [ ! -z "$ERROR_MESSAGE" ] ; then + append="\n" + fi + ERROR_MESSAGE="${ERROR_MESSAGE}$append $message" +} + +#---- +## test_var +## Test a global variable valueexists. +## @param global variable (as string) +## @param message to display as part of the returned error +## @return 0 show the message and value +## @return 1 add an error using add_error_message +#---- +test_var() { + local var="$1" + local message="$2" + local value=$(eval echo \$$var) + + if [ -z "$value" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_var_and_show +## Test a global variable value exists and show this value in a echo_info format. +## @param global variable (as string) +## @param message to display as part of the echo_info or returned error +## @return 0 show the message and value +## @return 1 add an error using add_error_message +#---- +test_var_and_show() { + local var="$1" + local message="$2" + local value=$(eval echo \$$var) + + if [ -z "$value" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + + echo_info "$message ($var)" "$value" + + return 0 +} + +#---- +## test_file +## Test a file existence. +## @param file absolute path +## @param message to display as part of the returned error +## @return 0 file found +## @return 1 add an error using add_error_message +#---- +test_file() { + local file="$1" + local message="$2" + + if [ -z "$file" ] ; then + add_error_message "Missing value for test_file function" + return 1 + fi + if [ ! -f $file ] ; then + add_error_message "Cannot find file '$file' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_file_from_var +## Test a file existence from a global variable. +## @param global variable (as string) +## @param message to display as part of the returned error +## @return 0 file found +## @return 1 add an error using add_error_message +#---- +test_file_from_var() { + local var="$1" + local message="$2" + local file=$(eval echo \$$var) + + if [ -z "$file" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + if [ ! -f $file ] ; then + add_error_message "Cannot find file '$file' from variable '$var' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_dir +## Test a directory existence. +## @param directory absolute path +## @param message to display as part of the returned error +## @return 0 directory found +## @return 1 add an error using add_error_message +#---- +test_dir() { + local dir="$1" + local message="$2" + + if [ -z "$dir" ] ; then + add_error_message "Missing value for test_dir function" + return 1 + fi + if [ ! -d "$dir" ] ; then + add_error_message "Cannot find directory '$dir' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_dir_from_var +## Test a directory existence from a global variable. +## @param global variable (as string) +## @param message to display as part of the returned error +## @return 0 directory found +## @return 1 add an error using add_error_message +#---- +test_dir_from_var() { + local var="$1" + local message="$2" + local dir=$(eval echo \$$var) + + if [ -z "$dir" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + if [ ! -d "$dir" ] ; then + add_error_message "Cannot find directory '$dir' from variable '$var' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_user_from_var +## Test a user existence from a global variable. +## @param global variable (as string) +## @param message to display as part of the returned error +## @return 0 user found +## @return 1 add an error using add_error_message +#---- +test_user_from_var() { + local var="$1" + local message="$2" + local user=$(eval echo \$$var) + + if [ -z "$user" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + grep "^$user:" /etc/passwd &>/dev/null + if [ $? -ne 0 ] ; then + add_error_message "Cannot find user '$user' from variable '$var' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_group_from_var +## Test a group existence from a global variable. +## @param global variable (as string) +## @param message to display as part of the returned error +## @return 0 group found +## @return 1 add an error using add_error_message +#---- +test_group_from_var() { + local var="$1" + local message="$2" + local group=$(eval echo \$$var) + + if [ -z "$group" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + grep "^$group:" /etc/group &>/dev/null + if [ $? -ne 0 ] ; then + add_error_message "Cannot find group '$group' from variable '$var' ($message)" + return 1 + fi + + return 0 +} + +#---- +## create_dir +## Create a directory if it does not exist. +## @param directory absolute path +## @param user to set ownership (optional) +## @param group to set ownership (optional) +## @param mode to set permisions (optional) +## @return 0 directory created +## @return 1 error message using echo_error +#---- +create_dir() { + local dirname="$1" + local user="$2" + local group="$3" + local mode="$4" + + if [ ! -d "$dirname" ] ; then + result="$(mkdir -p "$dirname" > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Could not create directory '$dirname': $result" + return 1 + fi + fi + if [ ! -z "$user" ] && [ ! -z "$group" ] ; then + set_ownership "$dirname" "$user" "$group" + [ $? -ne 0 ] && return 1 + fi + if [ ! -z "$mode" ] ; then + set_permissions "$dirname" "$mode" + [ $? -ne 0 ] && return 1 + fi + + return 0 +} + +#---- +## delete_file +## Delete a file or multiple files if wildcard specified. +## @param file absolute path +## @return 0 file deleted +## @return 1 error message using echo_error +#---- +delete_file() { + local file="$1" + + if [ ! -f "$file" ] && [[ ! "$file" =~ \*$ ]] ; then + echo_error "Not a file '$file'" "FAILED" + return 1 + else + result="$(rm -f $file 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + echo_error "Could not delete file '$file'" "FAILED" + echo_error "$result" + return 1 + fi + fi + + return 0 +} + +#---- +## copy_file +## Copy a file or multiple files (using wildcard) to a defined location. +## Simplistic but handles the needed cases. +## @param source, unique file absolute path or directory absolute path plus wildcard +## @param destination, can be unique file absolute path or directory absolute path +## @param user to set ownership (optional) +## @param group to set ownership (optional) +## @param mode to set permisions (optional) +## @return 0 copy done successfully +## @return 1 error message using echo_error +#---- +copy_file() { + local file="$1" + local dest="$2" + local user="$3" + local group="$4" + local mode="$5" + + if [ ! -f "$file" ] && [[ ! "$file" =~ \*$ ]] ; then + add_error_message "File '$file' does not exist" + return 1 + else + result="$(cp -f $file $dest 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Copy of '$file' to '$dest' failed: $result" + return 1 + fi + if [ ! -z "$user" ] && [ ! -z "$group" ] ; then + set_ownership "$dest" "$user" "$group" + [ $? -ne 0 ] && return 1 + fi + if [ ! -z "$mode" ] ; then + set_permissions "$dest" "$mode" + [ $? -ne 0 ] && return 1 + fi + fi + + return 0 +} + +#---- +## copy_file_no_replace +## Copy a file to a defined location. +## Simplistic but handles the needed cases. +## @param source, unique file absolute path +## @param destination, unique file absolute path +## @return 0 copy done successfully, returning echo_success message +## @return 1 error message using echo_error +## @return 2 message copied as .new, returning echo_info message +#---- +copy_file_no_replace() { + local file="$1" + local dest="$2" + local message="$3" + local exist=0 + + if [ ! -f "$file" ] ; then + add_error_message "File '$file' does not exist" + return 1 + elif [ -f "$dest" ] ; then + dest=${dest}".new" + exist=1 + fi + result="$(cp -f $file $dest 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Copy of '$file' to '$dest' failed: $result" + return 1 + elif [ $exist == "1" ] ; then + echo_info "$message" "$dest" + return 2 + else + echo_success "$message" "OK" + return 0 + fi +} + +#---- +## copy_dir +## Copy a directory or a directory content (using wildcard) to a defined location. +## Simplistic but handles the needed cases. +## @param source, unique directory absolute path or directory absolute path plus wildcard +## @param destination, directory absolute path +## @param user to set ownership (optional) +## @param group to set ownership (optional) +## @param mode to set permisions (optional) +## @return 0 copy done successfully +## @return 1 error message using echo_error +#---- +copy_dir() { + local dir="$1" + local dest="$2" + local user="$3" + local group="$4" + local mode="$5" + + if [ ! -d "$dir" ] && [[ ! "$dir" =~ \*$ ]] ; then + add_error_message "Directory '$dir' does not exist" + return 1 + else + result="$(cp -rpf $dir $dest 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Copy of '$dir' to '$dest' failed: $result" + return 1 + fi + if [ ! -z "$user" ] && [ ! -z "$group" ] ; then + set_ownership "$dest" "$user" "$group" + [ $? -ne 0 ] && return 1 + fi + if [ ! -z "$mode" ] ; then + set_permissions "$dest" "$mode" + [ $? -ne 0 ] && return 1 + fi + fi + + return 0 +} + +#---- +## create_symlink +## Create a symbolic link for a file. +## @param file absolute path +## @param link absolute path +## @param user to set ownership (optional) +## @param group to set ownership (optional) +## @param mode to set permisions (optional) +## @return 0 directory created +## @return 1 error message using echo_error +#---- +create_symlink() { + local file="$1" + local link="$2" + local user="$3" + local group="$4" + local mode="$5" + + if [ -f "$file" ] && [ ! -L "$link" ]; then + result="$(ln -s "$file" "$link" 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Could not create symbolic link '$file' to '$link': $result" + return 1 + fi + if [ ! -z "$user" ] && [ ! -z "$group" ] ; then + set_ownership "$link" "$user" "$group" + [ $? -ne 0 ] && return 1 + fi + if [ ! -z "$mode" ] ; then + set_permissions "$link" "$mode" + [ $? -ne 0 ] && return 1 + fi + fi + + return 0 +} + +#---- +## set_ownership +## Set the ownership on a unique file or on a directory. +## Simplistic but handles the needed cases. +## @param file or directory +## @param user +## @param group +## @return 0 ownership set successfully +## @return 1 error message using echo_error +#---- +set_ownership() { + local dir_file="$1" + local user="$2" + local group="$3" + + if [ -z "$dir_file" ] ; then + echo_info "File or directory not defined" + return 1 + fi + if [ -f "$dir_file" ] || [[ "$dir_file" =~ \*$ ]] ; then + result="$(chown -h $user:$group $dir_file 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Set ownership '$user:$group' on file '$dir_file' failed: $result" + return 1 + fi + elif [ -d "$dir_file" ] ; then + result="$(chown -R $user:$group $dir_file 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Set ownership '$user:$group' on directory '$dir_file' failed: $result" + return 1 + fi + fi + + return 0 +} + +#---- +## set_permissions +## Set the permissions on a unique file, on a directory and its content (recursively) +## or on files in directories (recursively) if using wildcard. +## Simplistic but handles the needed cases. +## @param file or directory +## @param mode +## @return 0 permissions set successfully +## @return 1 error message using echo_error +#---- +set_permissions() { + local dir_file="$1" + local mode="$2" + + if [ -z "$dir_file" ] ; then + add_error_message "File or directory not defined" + return 1 + fi + if [ -f "$dir_file" ] ; then + result="$(chmod $mode $dir_file 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Set permissions '$mode' on file '$dir_file' failed: $result" + return 1 + fi + elif [ -d "$dir_file" ] ; then + result="$(find $dir_file -type d -print | xargs -I '{}' chmod $mode '{}' 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Set permissions '$mode' on directories in '$dir_file' failed: $result" + return 1 + fi + elif [[ "$dir_file" =~ \*$ ]] ; then + result="$(find $dir_file -type f -print | xargs -I '{}' chmod $mode '{}' 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Set permissions '$mode' on files in '$dir_file' failed: $result" + return 1 + fi + else + add_error_message "Not a file or a directory '$dir_file'" + return 1 + fi + + return 0 +} + +#---- +## create_user +## Create a user if does not exist (checked using test_user). +## @param username +## @param groupname +## @param user's home +## @return 0 user created successfully +## @return 1 creation failed +#---- +create_user() { + local username="$1" + local groupname="$2" + local home="$3" + + test_user $username + if [ $? -ne 0 ]; then + echo_line "Create user '$username'" + result="$(useradd -r -s "/bin/sh" -d "$home" -g "$groupname" "$username" 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Create user '$username' failed: $result" + return 1 + fi + echo_success_on_line "OK" + fi + + return 0 +} + +#---- +## create_group +## Create a group if does not exist (checked using test_group). +## @param groupname +## @return 0 group created successfully +## @return 1 creation failed +#---- +create_group() { + local groupname="$1" + + test_group $groupname + if [ $? -ne 0 ]; then + echo_line "Create group '$groupname'" + result="$(groupadd -r "$groupname" 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Create group '$groupname' failed: $result" + return 1 + fi + echo_success_on_line "OK" + fi + + return 0 +} + +#---- +## test_user +## Test a user existence. +## @param user +## @return 0 user exists +## @return 1 user does not exist +#---- +test_user() { + result="$(grep "^$1:" /etc/passwd 2>&1 > /dev/null)" + return $? +} + +#---- +## test_group +## Test a group existence. +## @param user +## @return 0 group exists +## @return 1 group does not exist +#---- +test_group() { + result="$(grep "^$1:" /etc/group 2>&1 > /dev/null)" + return $? +} + +#---- +## add_user_to_group +## Add a user in a group +## @param user +## @param group +## @return 0 add successfull +## @return 1 add failed +#---- +add_user_to_group() { + local user=$1 + local group=$2 + echo_line "Add user '$user' to group '$group'" + if [ -z "$user" -o -z "$group" ]; then + echo_error_on_line "FAILED" + add_error_message "User or group not defined" + return 1 + fi + test_user $user + if [ $? -ne 0 ]; then + echo_error_on_line "FAILED" + add_error_message "Add user '$user' to group '$group' failed: user '$user' does not exist" + return 1 + fi + test_group $group + if [ $? -ne 0 ]; then + echo_error_on_line "FAILED" + add_error_message "Add user '$user' to group '$group' failed: group '$group' does not exist" + return 1 + fi + + result="$(usermod -a -G $group $user 2>&1 > /dev/null)" + local ret=$? + if [ "$ret" -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Add user '$user' to group '$group' failed: $result" + else + echo_success_on_line "OK" + fi + return $ret +} + +#---- +## find_perl_info +## Find Perl information. +## @return 0 search done +## @globals PERL_LIB_DIR +#---- +find_perl_info() { + if [ -z $PERL_LIB_DIR ] ; then + PERL_LIB_DIR=$(perl -V:installvendorlib | cut -d "'" -f 2) + # for freebsd + if [ "$PERL_LIB_DIR" = "" -o "$PERL_LIB_DIR" = "UNKNOWN" ]; then + PERL_LIB_DIR=$(perl -V:installsitelib | cut -d "'" -f 2) + fi + fi + + PERL_LIB_DIR=${PERL_LIB_DIR%/} + + return 0 +} + +#---- +## enable_service +## Enable a systemd service. +## @return 0 enabling ok +## @return 0 enabling failed +#---- +enable_service() { + local service="$1" + + if [ -x /bin/systemctl ] ; then + echo_line "Enabling service '$service'" + result="$(/bin/systemctl enable $service 2>&1 > /dev/null)" + local ret=$? + if [ "$ret" -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Enabling service '$service' failed: $result" + else + echo_success_on_line "OK" + fi + return $ret + fi + + return 1 +} + +#---- +## reload_service +## Reload a systemd service. +## @return 0 reloading ok +## @return 0 reloading failed +#---- +reload_service() { + local service="$1" + + if [ -x /bin/systemctl ] ; then + echo_line "Reloading service '$service'" + result="$(/bin/systemctl reload $service 2>&1 > /dev/null)" + local ret=$? + if [ "$ret" -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Reloading service '$service' failed: $result" + else + echo_success_on_line "OK" + fi + return $ret + fi + + return 1 +} + +#---- +## restart_service +## Restart a systemd service. +## @return 0 restarting ok +## @return 0 restarting failed +#---- +restart_service() { + local service="$1" + + if [ -x /bin/systemctl ] ; then + echo_line "Restarting service '$service'" + result="$(/bin/systemctl restart $service 2>&1 > /dev/null)" + local ret=$? + if [ "$ret" -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Restarting service '$service' failed: $result" + else + echo_success_on_line "OK" + fi + return $ret + fi + + return 1 +} + +#---- +## reload_daemon +## Reload systemd daemon. +## @return 0 reload ok +## @return 0 reload failed +#---- +reload_daemon() { + if [ -x /bin/systemctl ] ; then + echo_line "Reloading systemctl daemon" + result="$(/bin/systemctl daemon-reload 2>&1 > /dev/null)" + local ret=$? + if [ "$ret" -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Reloading systemctl daemon failed: $result" + else + echo_success_on_line "OK" + fi + return $ret + fi + + return 1 +} + +#---- +## replace_macro +## Replace @@ macros in all needed files in temporary directory. +## @return 0 replacement done successfully +## @return 1 replacement failed +## @globals TMP_DIR +#---- +replace_macro() { + local srclistcp="$1" + + { + for folder in $srclistcp ; do + result="$(find $TMP_DIR/source/$folder -type f | xargs --delimiter='\n' sed -i \ + -e 's|@GORGONE_USER@|'"$GORGONE_USER"'|gi' \ + -e 's|@GORGONE_LOG_DIR@|'"$GORGONE_LOG_DIR"'|gi' \ + -e 's|@GORGONE_ETC_DIR@|'"$GORGONE_ETC_DIR"'|gi' \ + -e 's|@CENTREON_ETC_DIR@|'"$CENTREON_ETC_DIR"'|gi' \ + -e 's|@CENTREON_SERVICE@|'"$CENTREON_SERVICE"'|gi' \ + -e 's|@SYSCONFIG_ETC_DIR@|'"$SYSCONFIG_ETC_DIR"'|gi' \ + -e 's|@PERL_BINARY@|'"$PERL_BINARY"'|gi' \ + -e 's|@BINARY_DIR@|'"$BINARY_DIR"'|gi' 2>&1 > /dev/null)" + done + } || { + add_error_message "Replacing macros failed: $result" + return 1 + } + + return 0 +} + +#---- +## find_os +## Search OS distribution and version. +## @return 0 search done +## @globals DISTRIB DISTRIB_VERSION +#---- +find_os() { + # From https://unix.stackexchange.com/questions/6345/how-can-i-get-distribution-name-and-version-number-in-a-simple-shell-script + if [ -f /etc/os-release ]; then + # freedesktop.org and systemd + . /etc/os-release + DISTRIB=${ID} + DISTRIB_VERSION=${VERSION_ID} + elif type lsb_release >/dev/null 2>&1; then + # linuxbase.org + DISTRIB=$(lsb_release -si | sed -e 's/\(.*\)/\L\1/') + DISTRIB_VERSION=$(lsb_release -sr) + elif [ -f /etc/lsb-release ]; then + # For some versions of Debian/Ubuntu without lsb_release command + . /etc/lsb-release + DISTRIB=${DISTRIB_ID} + DISTRIB_VERSION=${DISTRIB_RELEASE} + elif [ -f /etc/debian_version ]; then + # Older Debian/Ubuntu/etc. + DISTRIB=debian + DISTRIB_VERSION=$(cat /etc/debian_version | cut -d "." -f 1) + elif [ -f /etc/centos-release ]; then + # CentOS + DISTRIB=centos + DISTRIB_VERSION=$(cat /etc/centos-release | cut -d " " -f 4 | cut -d "." -f 1) + elif [ -f /etc/redhat-release ]; then + # Older Red Hat, CentOS, etc. + DISTRIB=centos + DISTRIB_VERSION=$(cat /etc/redhat-release | cut -d " " -f 4 | cut -d "." -f 1) + else + # Fall back to uname, e.g. "Linux ", also works for BSD, etc. + DISTRIB=$(uname -s) + DISTRIB_VERSION=$(uname -r) + fi + + return 0 +} + +#---- +## clean_and_exit +## Function to clean and exit Centreon install using purge_centreon_tmp_dir functionn, and exit. +#---- +clean_and_exit() { + local trap_sig=${1:-0} + + if [ $trap_sig -eq 0 ] ; then + echo -e "\nTrap interrupt, Centreon'll exit now and clean installation" + yes_no_default "Do you really want to quit Centreon installation?" "$no" + if [ $? -eq 1 ] ; then + echo "Continue..." + return 1 + fi + fi + + purge_centreon_tmp_dir "silent" + + exit 1 +} + +#---- +## check_tmp_disk_space +## Check space left for working directory. +## @return 0 space ok +## @return 1 no Space left +## @globals TMP_DIR +#---- +check_tmp_disk_space() { + local min_space="35584" + local free_space="" + local tmp_dir="" + + tmp_dir=$(dirname $TMP_DIR) + + free_space=$(df -P $tmp_dir | tail -1 | awk '{print $4}') + + if [ "$free_space" -lt "$min_space" ] ; then + echo_error "No space left on temporary directory '$tmp_dir' (<$min_space Ko)" "FAILED" + return 1 + else + return 0 + fi +} + +#---- +## purge_centreon_tmp_dir +## Ask to remove all temporaries working directory. +## @param silent option (silent) +## @return 0 remove done +## @return 1 don't remove (abort by user) +## @globals TMP_DIR +#---- +purge_centreon_tmp_dir() { + local silent="$1" + local not_clean="1" + local rc="0" + while [ $not_clean -ne 0 ] ; do + if [ "$silent" != "silent" ] ; then + yes_no_default "Do you want to remove the Centreon temporary working space to continue installation?" "$yes" + rc=$? + else + rc=0 + fi + if [ $rc -eq 0 ] ; then + local tmp_base_dir=`dirname $TMP_DIR` + local tmp_dir=`basename $TMP_DIR` + find $tmp_base_dir -name "$tmp_dir*" -type d \ + -exec rm -rf {} \; 2>/dev/null + not_clean="0" + else + return 1 + fi + done + return 0 +} + +#---- +## pathfind_ret +## Find in $PATH if binary exist and return dirname. +## @param file to test +## @param global variable to set a result +## @return 0 found +## @return 1 not found +## @Globals PATH +#---- +pathfind_ret() { + local bin=$1 + local var_ref=$2 + local OLDIFS="$IFS" + IFS=: + for p in $PATH; do + if [ -x "$p/$bin" ]; then + IFS="$OLDIFS" + eval $var_ref=$p + return 0 + fi + done + IFS="$OLDIFS" + return 1 +} + +#---- +## check_result +## Check result and print a message using echo_success or echo_error +## @param return code to check +## @param message to print +#---- +check_result() { + local code=$1 + shift + local message=$@ + + if [ $code -eq 0 ] ; then + echo_success "$message" "OK" + else + echo_error "$message" "FAILED" + fi + return 0 +} diff --git a/gorgone/install/inputvars.centos.env b/gorgone/install/inputvars.centos.env new file mode 100644 index 00000000000..04bbf24bd68 --- /dev/null +++ b/gorgone/install/inputvars.centos.env @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# +# Centreon installation variables specific values for CentOS. +# DO NOT EDIT! Edit inputvars.env file instead! diff --git a/gorgone/install/inputvars.debian.env b/gorgone/install/inputvars.debian.env new file mode 100644 index 00000000000..f42ab29d103 --- /dev/null +++ b/gorgone/install/inputvars.debian.env @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# +# Centreon installation variables specific values for Debian. +# DO NOT EDIT! Edit inputvars.env file instead! + +SYSTEMD_ETC_DIR="/lib/systemd/system" +SYSCONFIG_ETC_DIR="/etc/default" diff --git a/gorgone/install/inputvars.default.env b/gorgone/install/inputvars.default.env new file mode 100755 index 00000000000..b0c27111c5d --- /dev/null +++ b/gorgone/install/inputvars.default.env @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# +# Gorgone installation variables default values. +# DO NOT EDIT! Edit inputvars.env file instead! + +INSTALLATION_TYPE="central" +GORGONE_USER="centreon-gorgone" +GORGONE_GROUP="centreon-gorgone" +GORGONE_ETC_DIR="/etc/centreon-gorgone" +GORGONE_LOG_DIR="/var/log/centreon-gorgone" +GORGONE_VARLIB_DIR="/var/lib/centreon-gorgone" +GORGONE_CACHE_DIR="/var/cache/centreon-gorgone" +CENTREON_USER="centreon" +CENTREON_HOME="/var/spool/centreon" +CENTREON_ETC_DIR="/etc/centreon" +CENTREON_SERVICE="centreon" +ENGINE_USER="centreon-engine" +ENGINE_GROUP="centreon-engine" +BROKER_USER="centreon-broker" +BROKER_GROUP="centreon-broker" +BINARY_DIR="/usr/bin" +PERL_BINARY="/usr/bin/perl" +SYSTEMD_ETC_DIR="/etc/systemd/system" +SYSCONFIG_ETC_DIR="/etc/sysconfig" +LOGROTATED_ETC_DIR="/etc/logrotate.d" +TMP_DIR="/tmp/centreon-setup" +LOG_FILE="$BASE_DIR/log/install.log" diff --git a/gorgone/install/inputvars.opensuse-leap.env b/gorgone/install/inputvars.opensuse-leap.env new file mode 100644 index 00000000000..e8b10d5b58f --- /dev/null +++ b/gorgone/install/inputvars.opensuse-leap.env @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# +# Centreon installation variables specific values for OpenSUSE Leap. +# DO NOT EDIT! Edit inputvars.env file instead! + diff --git a/gorgone/install/inputvars.ubuntu.env b/gorgone/install/inputvars.ubuntu.env new file mode 100644 index 00000000000..9cd0068550e --- /dev/null +++ b/gorgone/install/inputvars.ubuntu.env @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# +# Centreon installation variables specific values for Ubuntu. +# DO NOT EDIT! Edit inputvars.env file instead! + +SYSTEMD_ETC_DIR="/lib/systemd/system" +SYSCONFIG_ETC_DIR="/etc/default" diff --git a/gorgone/install/src/centreon-api.yaml b/gorgone/install/src/centreon-api.yaml new file mode 100644 index 00000000000..39b7eb1ab0f --- /dev/null +++ b/gorgone/install/src/centreon-api.yaml @@ -0,0 +1,9 @@ +gorgone: + tpapi: + - name: centreonv2 + base_url: "http://127.0.0.1/centreon/api/latest/" + username: admin + password: Centreon!2021 + - name: clapi + username: admin + password: Centreon!2021 diff --git a/gorgone/install/src/centreon.yaml b/gorgone/install/src/centreon.yaml new file mode 100644 index 00000000000..4cb705e38d8 --- /dev/null +++ b/gorgone/install/src/centreon.yaml @@ -0,0 +1,3 @@ +name: centreon.yaml +description: Configure Centreon Gorgone to work with Centreon Web. +centreon: !include @CENTREON_ETC_DIR@/config.d/*.yaml diff --git a/gorgone/install/src/config.yaml b/gorgone/install/src/config.yaml new file mode 100644 index 00000000000..7675ec7b230 --- /dev/null +++ b/gorgone/install/src/config.yaml @@ -0,0 +1,3 @@ +name: config.yaml +description: Configuration brought by Centreon Gorgone install. SHOULD NOT BE EDITED! USE CONFIG.D DIRECTORY! +configuration: !include @GORGONE_ETC_DIR@/config.d/*.yaml diff --git a/gorgone/install/src/gorgoned.logrotate b/gorgone/install/src/gorgoned.logrotate new file mode 100644 index 00000000000..ee2210cc144 --- /dev/null +++ b/gorgone/install/src/gorgoned.logrotate @@ -0,0 +1,10 @@ +@GORGONE_LOG_DIR@/gorgoned.log { + copytruncate + weekly + rotate 52 + compress + delaycompress + notifempty + missingok + su root root +} diff --git a/gorgone/install/src/gorgoned.sysconfig b/gorgone/install/src/gorgoned.sysconfig new file mode 100644 index 00000000000..b1200066352 --- /dev/null +++ b/gorgone/install/src/gorgoned.sysconfig @@ -0,0 +1,4 @@ +# Configuration file for Centreon Gorgone. + +# OPTIONS for the daemon launch +OPTIONS="--config=@GORGONE_ETC_DIR@/config.yaml --logfile=@GORGONE_LOG_DIR@/gorgoned.log --severity=info" diff --git a/gorgone/install/src/gorgoned.systemd b/gorgone/install/src/gorgoned.systemd new file mode 100644 index 00000000000..6c228be2bbd --- /dev/null +++ b/gorgone/install/src/gorgoned.systemd @@ -0,0 +1,33 @@ +## +## Copyright 2019-2021 Centreon +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +## For more information : contact@centreon.com +## + +[Unit] +Description=Centreon Gorgone +PartOf=@CENTREON_SERVICE@.service +After=@CENTREON_SERVICE@.service +ReloadPropagatedFrom=@CENTREON_SERVICE@.service + +[Service] +EnvironmentFile=@SYSCONFIG_ETC_DIR@/gorgoned +ExecStart=@PERL_BINARY@ @BINARY_DIR@/gorgoned $OPTIONS +Type=simple +User=@GORGONE_USER@ + +[Install] +WantedBy=multi-user.target +WantedBy=@CENTREON_SERVICE@.service diff --git a/gorgone/install/src/instGorgone.conf b/gorgone/install/src/instGorgone.conf new file mode 100644 index 00000000000..8dea74f7bcb --- /dev/null +++ b/gorgone/install/src/instGorgone.conf @@ -0,0 +1,22 @@ +# Centreon installation variables saved from previous installation. + +INSTALLATION_TYPE=$INSTALLATION_TYPE +GORGONE_USER=$GORGONE_USER +GORGONE_GROUP=$GORGONE_GROUP +GORGONE_ETC_DIR=$GORGONE_ETC_DIR +GORGONE_LOG_DIR=$GORGONE_LOG_DIR +GORGONE_VARLIB_DIR=$GORGONE_VARLIB_DIR +GORGONE_CACHE_DIR=$GORGONE_CACHE_DIR +CENTREON_USER=$CENTREON_USER +CENTREON_HOME=$CENTREON_HOME +CENTREON_ETC_DIR=$CENTREON_ETC_DIR +CENTREON_SERVICE=$CENTREON_SERVICE +ENGINE_USER=$ENGINE_USER +ENGINE_GROUP=$ENGINE_GROUP +BROKER_USER=$BROKER_USER +BROKER_GROUP=$BROKER_GROUP +BINARY_DIR=$BINARY_DIR +PERL_BINARY=$PERL_BINARY +SYSTEMD_ETC_DIR=$SYSTEMD_ETC_DIR +SYSCONFIG_ETC_DIR=$SYSCONFIG_ETC_DIR +LOGROTATED_ETC_DIR=$LOGROTATED_ETC_DIR \ No newline at end of file diff --git a/gorgone/keys/central/privkey.pem b/gorgone/keys/central/privkey.pem new file mode 100644 index 00000000000..72d6ae80b9d --- /dev/null +++ b/gorgone/keys/central/privkey.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKgIBAAKCAgEAuQ0EjIm2FXh6Z/JtjBkJ1PHFdZcw1QBss0KQ1/NIYfg0dAl1 +X1SSDYGVTVvqr2Z5AiE5rwpDMZjUjxh2WLTzVpVbLAWIzsmc54RtaEYbB2QCi/p+ +uvOr7JGzf5PVRIgA+McnghSYmcqZsyWVi6sR2LhTLA4ndlNH32tJDKQ6lnXM43EA +vd9BiKDEzp4CzDehg8HWSaC36wv8OPCQ9EbOFmIDWU4kL6xE3ThyHbRPGfEKFykE +5PmPCuYKWmPGVkbB2OipBmwOuJ6C/MgIYhQBLH61e7aXnytoLw5NG/xb4IXyOOvV +U8gtmo4bjxeEmGBzIoiOXxTeb643rJpPLkjvub0PRlYvgHzIuBt3Zj3pBhfmlm7V +1mNwba0PAxJ6AU4sXskBxYEeWq6gNWOpGIxi4+fAF2MQ35OTjEBJp/rv5zY2weCS +KYby3xO6TrGRCZZyxmrYErcUFIqnfZNkfe7HUUEx6dCKA/wfmGucaHI+I2z/+iJN +bi3n59Rj6rQvf0PsETmiahjIGhDt0y+/F7dzCsKfMVsA07vO+uFWpF6uYmjoB5Zh +zBjn7BiIdpCc7tnJE4L2ctc4YQRNi5xqu0O2VtfqGwtb3fy6dEPge/Femp7/NGgj +bbGloLXiHCnujXpPHoaib9T0ryIGAMrRPPYrgGv+od1qZfDlw3UpnrodLe0CAwEA +AQKCAgB3y1+Yg0Xm3FmRlTUprsPYoLNNjpTEL0QvP/Z4djvzgDSkscLT9fGe4LaA +n/JrnhDUOiIXmLMrFl70S8fBYVLuSD3/X43TIF182xPgnxG8VRf4+i60ZC52NVRY +UKGNfeXzJyoCYcbwOGILwVbwVcx12c2uBXOye/NZRSDDGEX9RUFgM7VhNXg9NKZz +g4MYJSNgIknQ3ERz2wxq6AFOwc+EWesFEzsFyaXC+FmXtTRH/OylVZ6fhJb/XTBy +l4i8LY4sF2HkkGtvRN5TOkODCqQ/478k2W2KUxVc8QsmBNaNoOjPxIwTctFi7oAU +wArMghPG1VQlZWMiNUxBZpu/wOO+5WFzAg2hrR6SoYa/X8Hpk3+H44fmZ4sHGjLA +Tm+mCassH4F2PPxUsC2OaWa2jdYuJNZqb5FydOPtKV314ukSc7YBfLQTafuKv37Z +A7IMteYLsGGzhmLSvSLliTvXEkz/c5mPcJE1RW6fhMkLI1/PLvgQT44XeJQR3bJY +qaDbVQkm6YEjQ28aA4Lhu1zpC1f9bFzlY3nP6cw/d5Nx6bPtbn3qs9WaI2LlgIGx +9xQ4TQTJF/qf3qVTXFeVtvVh0xfyIoObP99CMnb0wAklpbenYStd97T0ZkHKnapk +ND7p5s8W+8OiyBFHjgvNR5pw3Ufk32t9OFT0CGVzJK3IJrCz2QKCAQEA634PL8y1 +Z9fZ44ErpLy2HLboeivhznK8T8fhDIzVTm+iKoW0CZuFFgm3pbLIcnu4W/EnLF2H +Ry1lF1msvL/B9GNQF2vP4zcFJDo6famtyfXTQz85Sh2QHSdhL0h3pqGUKdDtRsf0 +zXXhlTKYqxq6rJrIIoRXQniBUPUX+bk6TceEX7b4FJU+c0HgEOP/CgN4uvdFlR73 +NTjSdt66BijWiqGu6DDGWxmaKJEx7nW9NAqL3GjVxWesW1CnrNFEo0FnlMqTvYar +PEVr33CrhKdUrLP7dt6Qe/mCJ6/6mevR8gOm+Mo31Tra1pbFqT8yZojOr/eABj/U +bEHrjVYkSwhCvwKCAQEAySpw/sZO6Kx3vGaNlmCq8RpMcHWd1w4W5dEg1VQfKaRx +7PpWh5tAAlzvW25/xEmlS3VAuEnJDGM+hyh9FxHVmmcgVYQkeriBCS8ApjGr29Jx +SZ7iSHeN7PEEBls8OapR6UK5vZYlAnI4L8xS4gUv93E7RQ3GWWPfbMF2kI1vLR86 +fqkgbssyTBL0iwe4vzGbuwJ7NjqQwK5oNXKoJT7SE+jDbI0pjbJEvQ43/lPyMreH +nBqbEhkBZymy41TpecrEdDe24SghLq4SO+BpQvbwEKons+jLz+/19jRXIP1fmXlH +VkR0OGvcGD7g12bb3xM3TtufeF7bcGF+83dYeLT2UwKCAQEAs4YJO85aKMzjvU0W +oWJ/jopd1e0oGkNbjZJ53SBr6Hyv6qy84Gof3foQd5BAwQ3SML05uNegLkHMBC4H +wmiJCq6/OuuksrmaANEnD+9Pnlv57xT+rqK035TKwMoE9RHOqsYsbL44wHzyONQ2 +kJIy5yykD7RF9VV6d+Ywnd54NR05q+IHY2GXFzSMBTRalB6rZhTlhdXybS9hOt92 +fwWY8Fxrw3STcpWk8PInV3uIfmjf0GpXNUNgoMhu2w85vR86QLLiSCSm266sms0A +5ILPyUz4Edl/2hMPBwRgDgE5rr7cBmPahoJ0nAyaqPiVipcWwgzzG1CDtvfWA4w8 +5LpqbwKCAQEAha4FftkLkQUjYHiJ+HduwV/nkggnBsVfJAOQHROUzdhwuLk3DVB2 +/dsCWLEaiLcj9/wIMS8fQnMlFy4pyk3Ys416aDmzADZh0VeBx+9UNHUpQXIrD1sb +Xmxfb1XrtKphWnAz/C+tkm2StvjBz18BHB8L8vyPZdG/pIb/olnKmqKY/Zioa9fu +Ka2jAkz0UWHHCkRA2q2aieCccYArCu0vL3nLe/Rmu7nOgg/T19ezKE7b+DmZ+THS +w9pq/TTtHjlHya9IgWFog5u7lDyx1oVAzOI2FhFKd3kP6zem+s5FXDjC1ioRTXkn +vpjyU1IQJLKhW28JDzWB/7FaarJRgY1H7wKCAQEAtJp1vAw2IomD02EfDiTDi90M +I5EIaVf4z5Kw9YkYKX3D/gXBr3KKba4QQhVg5oO5S9RrpRGBnbKuE6sJNqoxCvxP +ro22Y0KpesYdaFuVv8x8AB3LnYSGgNrkl68hNgC/8z69ZJRRdhpcY3GofxMbfVhV +MMtUF6l/oEAOKNT+LCHWlBwGrGtswsBXo7Y1GRUBOfMYUzQoqGyV9QvrdPAHjzvE +VR2/A/pQTbDW9DumWbiU/QVAhXlgY5/VZ/DadWHzLcY7Kpfzcp2O0AmdH4qwSL2Y +ZDLtSMNuRAUmkX1HL4c06qCCOHxKT1ZZNrBbvsWI+X7z1BvU37yO2x5UY4vlVg== +-----END RSA PRIVATE KEY----- diff --git a/gorgone/keys/central/pubkey.crt b/gorgone/keys/central/pubkey.crt new file mode 100644 index 00000000000..7fb3f963e9c --- /dev/null +++ b/gorgone/keys/central/pubkey.crt @@ -0,0 +1,14 @@ +-----BEGIN PUBLIC KEY----- +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAuQ0EjIm2FXh6Z/JtjBkJ +1PHFdZcw1QBss0KQ1/NIYfg0dAl1X1SSDYGVTVvqr2Z5AiE5rwpDMZjUjxh2WLTz +VpVbLAWIzsmc54RtaEYbB2QCi/p+uvOr7JGzf5PVRIgA+McnghSYmcqZsyWVi6sR +2LhTLA4ndlNH32tJDKQ6lnXM43EAvd9BiKDEzp4CzDehg8HWSaC36wv8OPCQ9EbO +FmIDWU4kL6xE3ThyHbRPGfEKFykE5PmPCuYKWmPGVkbB2OipBmwOuJ6C/MgIYhQB +LH61e7aXnytoLw5NG/xb4IXyOOvVU8gtmo4bjxeEmGBzIoiOXxTeb643rJpPLkjv +ub0PRlYvgHzIuBt3Zj3pBhfmlm7V1mNwba0PAxJ6AU4sXskBxYEeWq6gNWOpGIxi +4+fAF2MQ35OTjEBJp/rv5zY2weCSKYby3xO6TrGRCZZyxmrYErcUFIqnfZNkfe7H +UUEx6dCKA/wfmGucaHI+I2z/+iJNbi3n59Rj6rQvf0PsETmiahjIGhDt0y+/F7dz +CsKfMVsA07vO+uFWpF6uYmjoB5ZhzBjn7BiIdpCc7tnJE4L2ctc4YQRNi5xqu0O2 +VtfqGwtb3fy6dEPge/Femp7/NGgjbbGloLXiHCnujXpPHoaib9T0ryIGAMrRPPYr +gGv+od1qZfDlw3UpnrodLe0CAwEAAQ== +-----END PUBLIC KEY----- diff --git a/gorgone/keys/poller/privkey.pem b/gorgone/keys/poller/privkey.pem new file mode 100644 index 00000000000..72d6ae80b9d --- /dev/null +++ b/gorgone/keys/poller/privkey.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKgIBAAKCAgEAuQ0EjIm2FXh6Z/JtjBkJ1PHFdZcw1QBss0KQ1/NIYfg0dAl1 +X1SSDYGVTVvqr2Z5AiE5rwpDMZjUjxh2WLTzVpVbLAWIzsmc54RtaEYbB2QCi/p+ +uvOr7JGzf5PVRIgA+McnghSYmcqZsyWVi6sR2LhTLA4ndlNH32tJDKQ6lnXM43EA +vd9BiKDEzp4CzDehg8HWSaC36wv8OPCQ9EbOFmIDWU4kL6xE3ThyHbRPGfEKFykE +5PmPCuYKWmPGVkbB2OipBmwOuJ6C/MgIYhQBLH61e7aXnytoLw5NG/xb4IXyOOvV +U8gtmo4bjxeEmGBzIoiOXxTeb643rJpPLkjvub0PRlYvgHzIuBt3Zj3pBhfmlm7V +1mNwba0PAxJ6AU4sXskBxYEeWq6gNWOpGIxi4+fAF2MQ35OTjEBJp/rv5zY2weCS +KYby3xO6TrGRCZZyxmrYErcUFIqnfZNkfe7HUUEx6dCKA/wfmGucaHI+I2z/+iJN +bi3n59Rj6rQvf0PsETmiahjIGhDt0y+/F7dzCsKfMVsA07vO+uFWpF6uYmjoB5Zh +zBjn7BiIdpCc7tnJE4L2ctc4YQRNi5xqu0O2VtfqGwtb3fy6dEPge/Femp7/NGgj +bbGloLXiHCnujXpPHoaib9T0ryIGAMrRPPYrgGv+od1qZfDlw3UpnrodLe0CAwEA +AQKCAgB3y1+Yg0Xm3FmRlTUprsPYoLNNjpTEL0QvP/Z4djvzgDSkscLT9fGe4LaA +n/JrnhDUOiIXmLMrFl70S8fBYVLuSD3/X43TIF182xPgnxG8VRf4+i60ZC52NVRY +UKGNfeXzJyoCYcbwOGILwVbwVcx12c2uBXOye/NZRSDDGEX9RUFgM7VhNXg9NKZz +g4MYJSNgIknQ3ERz2wxq6AFOwc+EWesFEzsFyaXC+FmXtTRH/OylVZ6fhJb/XTBy +l4i8LY4sF2HkkGtvRN5TOkODCqQ/478k2W2KUxVc8QsmBNaNoOjPxIwTctFi7oAU +wArMghPG1VQlZWMiNUxBZpu/wOO+5WFzAg2hrR6SoYa/X8Hpk3+H44fmZ4sHGjLA +Tm+mCassH4F2PPxUsC2OaWa2jdYuJNZqb5FydOPtKV314ukSc7YBfLQTafuKv37Z +A7IMteYLsGGzhmLSvSLliTvXEkz/c5mPcJE1RW6fhMkLI1/PLvgQT44XeJQR3bJY +qaDbVQkm6YEjQ28aA4Lhu1zpC1f9bFzlY3nP6cw/d5Nx6bPtbn3qs9WaI2LlgIGx +9xQ4TQTJF/qf3qVTXFeVtvVh0xfyIoObP99CMnb0wAklpbenYStd97T0ZkHKnapk +ND7p5s8W+8OiyBFHjgvNR5pw3Ufk32t9OFT0CGVzJK3IJrCz2QKCAQEA634PL8y1 +Z9fZ44ErpLy2HLboeivhznK8T8fhDIzVTm+iKoW0CZuFFgm3pbLIcnu4W/EnLF2H +Ry1lF1msvL/B9GNQF2vP4zcFJDo6famtyfXTQz85Sh2QHSdhL0h3pqGUKdDtRsf0 +zXXhlTKYqxq6rJrIIoRXQniBUPUX+bk6TceEX7b4FJU+c0HgEOP/CgN4uvdFlR73 +NTjSdt66BijWiqGu6DDGWxmaKJEx7nW9NAqL3GjVxWesW1CnrNFEo0FnlMqTvYar +PEVr33CrhKdUrLP7dt6Qe/mCJ6/6mevR8gOm+Mo31Tra1pbFqT8yZojOr/eABj/U +bEHrjVYkSwhCvwKCAQEAySpw/sZO6Kx3vGaNlmCq8RpMcHWd1w4W5dEg1VQfKaRx +7PpWh5tAAlzvW25/xEmlS3VAuEnJDGM+hyh9FxHVmmcgVYQkeriBCS8ApjGr29Jx +SZ7iSHeN7PEEBls8OapR6UK5vZYlAnI4L8xS4gUv93E7RQ3GWWPfbMF2kI1vLR86 +fqkgbssyTBL0iwe4vzGbuwJ7NjqQwK5oNXKoJT7SE+jDbI0pjbJEvQ43/lPyMreH +nBqbEhkBZymy41TpecrEdDe24SghLq4SO+BpQvbwEKons+jLz+/19jRXIP1fmXlH +VkR0OGvcGD7g12bb3xM3TtufeF7bcGF+83dYeLT2UwKCAQEAs4YJO85aKMzjvU0W +oWJ/jopd1e0oGkNbjZJ53SBr6Hyv6qy84Gof3foQd5BAwQ3SML05uNegLkHMBC4H +wmiJCq6/OuuksrmaANEnD+9Pnlv57xT+rqK035TKwMoE9RHOqsYsbL44wHzyONQ2 +kJIy5yykD7RF9VV6d+Ywnd54NR05q+IHY2GXFzSMBTRalB6rZhTlhdXybS9hOt92 +fwWY8Fxrw3STcpWk8PInV3uIfmjf0GpXNUNgoMhu2w85vR86QLLiSCSm266sms0A +5ILPyUz4Edl/2hMPBwRgDgE5rr7cBmPahoJ0nAyaqPiVipcWwgzzG1CDtvfWA4w8 +5LpqbwKCAQEAha4FftkLkQUjYHiJ+HduwV/nkggnBsVfJAOQHROUzdhwuLk3DVB2 +/dsCWLEaiLcj9/wIMS8fQnMlFy4pyk3Ys416aDmzADZh0VeBx+9UNHUpQXIrD1sb +Xmxfb1XrtKphWnAz/C+tkm2StvjBz18BHB8L8vyPZdG/pIb/olnKmqKY/Zioa9fu +Ka2jAkz0UWHHCkRA2q2aieCccYArCu0vL3nLe/Rmu7nOgg/T19ezKE7b+DmZ+THS +w9pq/TTtHjlHya9IgWFog5u7lDyx1oVAzOI2FhFKd3kP6zem+s5FXDjC1ioRTXkn +vpjyU1IQJLKhW28JDzWB/7FaarJRgY1H7wKCAQEAtJp1vAw2IomD02EfDiTDi90M +I5EIaVf4z5Kw9YkYKX3D/gXBr3KKba4QQhVg5oO5S9RrpRGBnbKuE6sJNqoxCvxP +ro22Y0KpesYdaFuVv8x8AB3LnYSGgNrkl68hNgC/8z69ZJRRdhpcY3GofxMbfVhV +MMtUF6l/oEAOKNT+LCHWlBwGrGtswsBXo7Y1GRUBOfMYUzQoqGyV9QvrdPAHjzvE +VR2/A/pQTbDW9DumWbiU/QVAhXlgY5/VZ/DadWHzLcY7Kpfzcp2O0AmdH4qwSL2Y +ZDLtSMNuRAUmkX1HL4c06qCCOHxKT1ZZNrBbvsWI+X7z1BvU37yO2x5UY4vlVg== +-----END RSA PRIVATE KEY----- diff --git a/gorgone/keys/poller/pubkey.crt b/gorgone/keys/poller/pubkey.crt new file mode 100644 index 00000000000..7fb3f963e9c --- /dev/null +++ b/gorgone/keys/poller/pubkey.crt @@ -0,0 +1,14 @@ +-----BEGIN PUBLIC KEY----- +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAuQ0EjIm2FXh6Z/JtjBkJ +1PHFdZcw1QBss0KQ1/NIYfg0dAl1X1SSDYGVTVvqr2Z5AiE5rwpDMZjUjxh2WLTz +VpVbLAWIzsmc54RtaEYbB2QCi/p+uvOr7JGzf5PVRIgA+McnghSYmcqZsyWVi6sR +2LhTLA4ndlNH32tJDKQ6lnXM43EAvd9BiKDEzp4CzDehg8HWSaC36wv8OPCQ9EbO +FmIDWU4kL6xE3ThyHbRPGfEKFykE5PmPCuYKWmPGVkbB2OipBmwOuJ6C/MgIYhQB +LH61e7aXnytoLw5NG/xb4IXyOOvVU8gtmo4bjxeEmGBzIoiOXxTeb643rJpPLkjv +ub0PRlYvgHzIuBt3Zj3pBhfmlm7V1mNwba0PAxJ6AU4sXskBxYEeWq6gNWOpGIxi +4+fAF2MQ35OTjEBJp/rv5zY2weCSKYby3xO6TrGRCZZyxmrYErcUFIqnfZNkfe7H +UUEx6dCKA/wfmGucaHI+I2z/+iJNbi3n59Rj6rQvf0PsETmiahjIGhDt0y+/F7dz +CsKfMVsA07vO+uFWpF6uYmjoB5ZhzBjn7BiIdpCc7tnJE4L2ctc4YQRNi5xqu0O2 +VtfqGwtb3fy6dEPge/Femp7/NGgjbbGloLXiHCnujXpPHoaib9T0ryIGAMrRPPYr +gGv+od1qZfDlw3UpnrodLe0CAwEAAQ== +-----END PUBLIC KEY----- diff --git a/gorgone/packaging/centreon-gorgone-centreon-config.yaml b/gorgone/packaging/centreon-gorgone-centreon-config.yaml new file mode 100644 index 00000000000..1a8d3a48ce6 --- /dev/null +++ b/gorgone/packaging/centreon-gorgone-centreon-config.yaml @@ -0,0 +1,70 @@ +name: "centreon-gorgone-centreon-config" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Configure Centreon Gorgone for use with Centreon Web + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "./configuration/centreon.yaml" + dst: "/etc/centreon-gorgone/config.d/30-centreon.yaml" + type: config|noreplace + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0640 + + - src: "./configuration/centreon-api.yaml" + dst: "/etc/centreon-gorgone/config.d/31-centreon-api.yaml" + type: config|noreplace + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0660 + + - src: "./configuration/centreon-audit.yaml" + dst: "/etc/centreon-gorgone/config.d/50-centreon-audit.yaml" + type: config|noreplace + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0640 + + - dst: "/var/cache/centreon-gorgone/autodiscovery" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0770 + +scripts: + postinstall: ./scripts/centreon-gorgone-centreon-config-postinstall.sh + +overrides: + rpm: + depends: + - centreon-gorgone = ${VERSION}-${RELEASE}${DIST} + deb: + depends: + - centreon-gorgone (= ${VERSION}-${RELEASE}${DIST}) + replaces: + - centreon-gorgone (<< 24.09.0) + +deb: + breaks: + - centreon-gorgone (<< 24.09.0) + +rpm: + summary: Configure Centreon Gorgone for use with Centreon Web + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/gorgone/packaging/centreon-gorgone-selinux.yaml b/gorgone/packaging/centreon-gorgone-selinux.yaml new file mode 100644 index 00000000000..42932221d9f --- /dev/null +++ b/gorgone/packaging/centreon-gorgone-selinux.yaml @@ -0,0 +1,43 @@ +name: "centreon-gorgone-selinux" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Selinux for centreon-gorgone + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +depends: + - policycoreutils + - centreon-common-selinux +replaces: + - centreon-gorgone-selinux-debuginfo + - centreon-gorgoned-selinux +conflicts: + - centreon-gorgone-selinux-debuginfo +provides: + - centreon-gorgone-selinux-debuginfo + - centreon-gorgoned-selinux + +contents: + - src: "../selinux/centreon-gorgoned.pp" + dst: "/usr/share/selinux/packages/centreon/centreon-gorgoned.pp" + file_info: + mode: 0655 + +scripts: + postinstall: ./scripts/centreon-gorgone-selinux-postinstall.sh + preremove: ./scripts/centreon-gorgone-selinux-preremove.sh + +rpm: + summary: Selinux for centreon-gorgone + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/gorgone/packaging/centreon-gorgone.yaml b/gorgone/packaging/centreon-gorgone.yaml new file mode 100644 index 00000000000..df883f824f2 --- /dev/null +++ b/gorgone/packaging/centreon-gorgone.yaml @@ -0,0 +1,234 @@ +name: "centreon-gorgone" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Centreon gorgone daemon + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - dst: "/etc/centreon-gorgone" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0755 + + - dst: "/etc/centreon-gorgone/config.d" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0770 + + - dst: "/etc/centreon-gorgone/config.d/cron.d" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0770 + + - dst: "/etc/centreon-gorgone/config.d/whitelist.conf.d" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0770 + + - src: "./configuration/config.yaml" + dst: "/etc/centreon-gorgone/config.yaml" + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0640 + + - src: "./configuration/action.yaml" + dst: "/etc/centreon-gorgone/config.d/39-action.yaml" + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0640 + + - src: "./configuration/whitelist.conf.d/centreon.yaml" + dst: "/etc/centreon-gorgone/config.d/whitelist.conf.d/centreon.yaml" + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0640 + + - dst: "/var/lib/centreon-gorgone" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0775 + + - dst: "/var/log/centreon-gorgone" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0775 + + - dst: "/var/cache/centreon-gorgone" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0775 + + - src: "./sudoers.d/centreon-gorgone" + dst: "/etc/sudoers.d/centreon-gorgone" + file_info: + mode: 0600 + + - src: "../config/systemd/gorgoned.rpm.service" + dst: "/etc/systemd/system/gorgoned.service" + file_info: + mode: 0644 + packager: rpm + - src: "../config/systemd/gorgoned.deb.service" + dst: "/lib/systemd/system/gorgoned.service" + file_info: + mode: 0644 + packager: deb + + - src: "../config/systemd/gorgoned-sysconfig" + dst: "/etc/sysconfig/gorgoned" + type: config|noreplace + packager: rpm + - src: "../config/systemd/gorgoned-sysconfig" + dst: "/etc/default/gorgoned" + type: config|noreplace + packager: deb + + - src: "../config/logrotate/gorgoned" + dst: "/etc/logrotate.d/gorgoned" + type: config|noreplace + + - src: "../gorgoned" + dst: "/usr/bin/gorgoned" + file_info: + mode: 0755 + + - src: "../gorgone" + dst: "${PERL_VENDORLIB}/gorgone" + expand: true + + - src: "../contrib/gorgone_config_init.pl" + dst: "/usr/local/bin/" + file_info: + mode: 0755 + + - src: "../contrib/gorgone_audit.pl" + dst: "/usr/local/bin/" + file_info: + mode: 0755 + + - src: "../contrib/gorgone_install_plugins.pl" + dst: "/usr/local/bin/" + file_info: + mode: 0750 + + - src: "../contrib/gorgone_key_thumbprint.pl" + dst: "/usr/local/bin/" + file_info: + mode: 0750 + + - src: "../contrib/gorgone_key_generation.pl" + dst: "/usr/local/bin/" + file_info: + mode: 0750 + +scripts: + preinstall: ./scripts/centreon-gorgone-preinstall.sh + postinstall: ./scripts/centreon-gorgone-postinstall.sh + preremove: ./scripts/centreon-gorgone-preremove.sh + +overrides: + rpm: + depends: + - centreon-common + - bzip2 + - perl-Libssh-Session >= 0.8 + - perl-CryptX + - perl-Mojolicious + - perl(Mojo::IOLoop::Signal) + - perl(Archive::Tar) + - perl(Schedule::Cron) + - perl(ZMQ::FFI) + - perl(EV) + - perl(JSON::XS) + - perl(JSON::PP) + - perl(XML::Simple) + - perl(XML::LibXML::Simple) + - perl(Net::SMTP) + - perl(YAML::XS) + - perl(DBD::SQLite) + - perl(DBD::mysql) + - perl(DBI) + - perl(UUID) + - perl(HTTP::Daemon) + - perl(HTTP::Status) + - perl(MIME::Base64) + - perl(Digest::MD5::File) + - perl(Net::Curl::Easy) + - perl(HTTP::Daemon::SSL) + - perl(NetAddr::IP) + - perl(Hash::Merge) + - perl(Clone) + - perl(Sys::Syslog) + - perl(DateTime) + - perl(Try::Tiny) + - tar + - perl(JSON) # gorgone_key_thumbprint.pl needs the json module, even when json::xs is already installed + - perl(RRDs) + - perl(lib) + deb: + depends: # those dependencies are taken from centreon-gorgone/packaging/debian/control + - centreon-common + - libdatetime-perl + - libtime-parsedate-perl + - libtry-tiny-perl + - libxml-simple-perl + - libxml-libxml-simple-perl + - libdigest-md5-file-perl + - libjson-pp-perl + - libjson-xs-perl + - libyaml-libyaml-perl + - libdbi-perl + - libdbd-sqlite3-perl + - libdbd-mysql-perl + - libhttp-daemon-perl + - libhttp-daemon-ssl-perl + - libnetaddr-ip-perl + - libschedule-cron-perl + - libhash-merge-perl + - libcryptx-perl + - libmojolicious-perl + - libmojo-ioloop-signal-perl + - libauthen-simple-perl + - libauthen-simple-net-perl + - libnet-curl-perl + - libssh-session-perl + - libssh-4 + - libev-perl + - libzmq-ffi-perl + - libclone-choose-perl + - libjson-perl # gorgone_key_thumbprint.pl needs the json module, even when json::xs is already installed + - librrds-perl + - perl-base + +rpm: + summary: Centreon gorgone daemon + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/gorgone/packaging/configuration/action.yaml b/gorgone/packaging/configuration/action.yaml new file mode 100644 index 00000000000..8dcebf2cd2f --- /dev/null +++ b/gorgone/packaging/configuration/action.yaml @@ -0,0 +1,8 @@ +gorgone: + modules: + - name: action + package: "gorgone::modules::core::action::hooks" + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: !include /etc/centreon-gorgone/config.d/whitelist.conf.d/*.yaml diff --git a/gorgone/packaging/configuration/centreon-api.yaml b/gorgone/packaging/configuration/centreon-api.yaml new file mode 100644 index 00000000000..e0c47e50e1d --- /dev/null +++ b/gorgone/packaging/configuration/centreon-api.yaml @@ -0,0 +1,9 @@ +gorgone: + tpapi: + - name: centreonv2 + base_url: "http://127.0.0.1/centreon/api/latest/" + username: "@GORGONE_USER@" + password: "@GORGONE_PASSWORD@" + - name: clapi + username: "@GORGONE_USER@" + password: "@GORGONE_PASSWORD@" diff --git a/gorgone/packaging/configuration/centreon-audit.yaml b/gorgone/packaging/configuration/centreon-audit.yaml new file mode 100644 index 00000000000..ae0f8c96c62 --- /dev/null +++ b/gorgone/packaging/configuration/centreon-audit.yaml @@ -0,0 +1,5 @@ +gorgone: + modules: + - name: audit + package: "gorgone::modules::centreon::audit::hooks" + enable: true diff --git a/gorgone/packaging/configuration/centreon.yaml b/gorgone/packaging/configuration/centreon.yaml new file mode 100644 index 00000000000..a66311890a3 --- /dev/null +++ b/gorgone/packaging/configuration/centreon.yaml @@ -0,0 +1,3 @@ +name: centreon.yaml +description: Configure Centreon Gorgone to work with Centreon Web. +centreon: !include /etc/centreon/config.d/*.yaml diff --git a/gorgone/packaging/configuration/config.yaml b/gorgone/packaging/configuration/config.yaml new file mode 100644 index 00000000000..d5fb3439db9 --- /dev/null +++ b/gorgone/packaging/configuration/config.yaml @@ -0,0 +1,3 @@ +name: config.yaml +description: Configuration brought by Centreon Gorgone package. SHOULD NOT BE EDITED! USE CONFIG.D DIRECTORY! +configuration: !include /etc/centreon-gorgone/config.d/*.yaml diff --git a/gorgone/packaging/configuration/whitelist.conf.d/centreon.yaml b/gorgone/packaging/configuration/whitelist.conf.d/centreon.yaml new file mode 100644 index 00000000000..d1313d9ed2a --- /dev/null +++ b/gorgone/packaging/configuration/whitelist.conf.d/centreon.yaml @@ -0,0 +1,20 @@ +# Configuration brought by Centreon Gorgone package. +# SHOULD NOT BE EDITED! CREATE YOUR OWN FILE IN WHITELIST.CONF.D DIRECTORY! +- ^sudo\s+(/bin/|/usr/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ +- ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ +- ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/+centengine\.cfg\s*$ +- ^cat\s+/var/lib/centreon-engine/+[a-zA-Z0-9\-]+-stats\.json\s*$ +- ^(sudo\s+)?/usr/lib/centreon/plugins/.*$ +- ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ +- ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ +- ^centreon +- ^mkdir +- ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host +- ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ +- ^/usr/bin/php (-q )?/usr/share/centreon/cron/[\w,\s.-]+ >> /var/log/centreon-gorgone/[\w,\s.-]+\s+2>&1$ +- ^/usr/bin/php -q /usr/share/centreon/www/modules/centreon-bi-server/tools/purgeArchivesFiles\.php >> /var/log/centreon-gorgone/centreon-bi-archive-retention\.log 2>&1$ +- ^/usr/share/centreon/cron/eventReportBuilder --config=/etc/centreon/conf\.pm >> /var/log/centreon-gorgone/eventReportBuilder\.log 2>&1$ +- ^/usr/share/centreon/cron/dashboardBuilder --config=/etc/centreon/conf\.pm >> /var/log/centreon-gorgone/dashboardBuilder\.log 2>&1$ +- ^/usr/share/centreon/www/modules/centreon-dsm/+cron/centreon_dsm_purge\.pl --config=\"/etc/centreon/conf.pm\" --severity=\S+ >> /var/log/centreon-gorgone/centreon_dsm_purge\.log 2>&1\s*$ +- ^/usr/share/centreon-bi-backup/centreon-bi-backup-web\.sh >> /var/log/centreon-gorgone/centreon-bi-backup-web\.log 2>&1$ +- ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/+cron/centreon_autodisco.pl --config='/etc/centreon/conf.pm' --config-extra='/etc/centreon/centreon_autodisco.pm' --severity=\S+ >> /var/log/centreon-gorgone/centreon_service_discovery.log 2>&1$ diff --git a/gorgone/packaging/packages/perl-Clone-Choose.spec b/gorgone/packaging/packages/perl-Clone-Choose.spec new file mode 100644 index 00000000000..5390763404e --- /dev/null +++ b/gorgone/packaging/packages/perl-Clone-Choose.spec @@ -0,0 +1,51 @@ +%define cpan_name Clone-Choose + +Name: perl-Clone-Choose +Version: 0.010 +Release: 1%{?dist} +Summary: Choose appropriate clone utility +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Clone::Choose +Source0: https://cpan.metacpan.org/authors/id/H/HE/HERMES/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(Clone::Choose) +AutoReqProv: no + +%description +Clone::Choose checks several different modules which provides a clone() function and selects an appropriate one. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-Clone.spec b/gorgone/packaging/packages/perl-Clone.spec new file mode 100644 index 00000000000..22fabb47db6 --- /dev/null +++ b/gorgone/packaging/packages/perl-Clone.spec @@ -0,0 +1,51 @@ +%define cpan_name Clone + +Name: perl-Clone +Version: 0.45 +Release: 1%{?dist} +Summary: recursively copy Perl datatypes +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Clone +Source0: https://cpan.metacpan.org/authors/id/A/AT/ATOOMIC/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: gcc +BuildRequires: make + +Provides: perl(Clone) +AutoReqProv: no + +%description +This module provides a clone() method which makes recursive copies of nested hash, array, scalar and reference types, including tied variables and objects. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorarch} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-CryptX.spec b/gorgone/packaging/packages/perl-CryptX.spec new file mode 100644 index 00000000000..6fd4f8c1a7f --- /dev/null +++ b/gorgone/packaging/packages/perl-CryptX.spec @@ -0,0 +1,46 @@ +%define cpan_name CryptX + +Name: perl-CryptX +Version: 0.068 +Release: 1%{?dist} +Summary: Cryptographic toolkit (self-contained, no external libraries needed) +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/CryptX +Source0: https://cpan.metacpan.org/authors/id/M/MI/MIK/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: make +BuildRequires: gcc + +%description +Cryptography in CryptX is based on https://github.com/libtom/libtomcrypt + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%{perl_vendorarch} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-Digest-MD5-File.spec b/gorgone/packaging/packages/perl-Digest-MD5-File.spec new file mode 100644 index 00000000000..9fc2af4166f --- /dev/null +++ b/gorgone/packaging/packages/perl-Digest-MD5-File.spec @@ -0,0 +1,53 @@ +%define cpan_name Digest-MD5-File + +Name: Digest-MD5-File +Version: 0.08 +Release: 1%{?dist} +Summary: Perl extension for getting MD5 sums for files and urls. +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Digest::MD5::File +Source0: https://cpan.metacpan.org/authors/id/D/DM/DMUEY/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(Digest::MD5::File) +Requires: perl(Digest::MD5) +Requires: perl(LWP::UserAgent) +AutoReqProv: no + +%description +Get MD5 sums for files of a given path or content of a given url. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-FFI-CheckLib.spec b/gorgone/packaging/packages/perl-FFI-CheckLib.spec new file mode 100644 index 00000000000..025166f200f --- /dev/null +++ b/gorgone/packaging/packages/perl-FFI-CheckLib.spec @@ -0,0 +1,54 @@ +%define cpan_name FFI-CheckLib + +Name: perl-FFI-CheckLib +Version: 0.31 +Release: 1%{?dist} +Summary: Check that a library is available for FFI +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/FFI::CheckLib +Source0: https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +Provides: perl(FFI::CheckLib) + +BuildRequires: make +BuildRequires: perl(ExtUtils::MakeMaker) + +Requires: perl(File::Which) +Requires: perl(List::Util) + +%description +This module checks whether a particular dynamic library is available for FFI to use. It is modeled heavily on Devel::CheckLib, but will find dynamic libraries even when development packages are not installed. It also provides a find_lib function that will return the full path to the found dynamic library, which can be feed directly into FFI::Platypus or another FFI system. + +%global debug_package %{nil} + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib}/ +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-FFI-Platypus.spec b/gorgone/packaging/packages/perl-FFI-Platypus.spec new file mode 100644 index 00000000000..7cc88d10e74 --- /dev/null +++ b/gorgone/packaging/packages/perl-FFI-Platypus.spec @@ -0,0 +1,58 @@ +%define cpan_name FFI-Platypus + +Name: perl-FFI-Platypus +Version: 2.05 +Release: 1%{?dist} +Summary: Write Perl bindings to non-Perl libraries with FFI. No XS required. +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/FFI::Platypus +Source0: https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: make +BuildRequires: gcc +BuildRequires: libffi-devel +BuildRequires: perl(ExtUtils::MakeMaker) + +Provides: perl(FFI::Platypus) + +Requires: libffi +Requires: perl(JSON::PP) +Requires: perl(FFI::CheckLib) +Requires: perl(Capture::Tiny) + +%description +Platypus is a library for creating interfaces to machine code libraries written in languages like C, C++, Go, Fortran, Rust, Pascal. Essentially anything that gets compiled into machine code. This implementation uses libffi to accomplish this task. libffi is battle tested by a number of other scripting and virtual machine languages, such as Python and Ruby to serve a similar role. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +export ODBCHOME=/usr/ +export PERL_MM_USE_DEFAULT="1" +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorarch}/ +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-HTTP-Daemon.spec b/gorgone/packaging/packages/perl-HTTP-Daemon.spec new file mode 100644 index 00000000000..d4ae42080cf --- /dev/null +++ b/gorgone/packaging/packages/perl-HTTP-Daemon.spec @@ -0,0 +1,57 @@ +%define cpan_name HTTP-Daemon + +Name: perl-HTTP-Daemon +Version: 6.06 +Release: 1%{?dist} +Summary: A simple http server class +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/HTTP::Daemon +Source0: https://cpan.metacpan.org/authors/id/O/OA/OALDERS/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: perl(Module::Build::Tiny) +BuildRequires: make + +Provides: perl(HTTP::Daemon) +Requires: perl(HTTP::Date) +Requires: perl(HTTP::Message) +Requires: perl(HTTP::Response) +Requires: perl(HTTP::Status) +Requires: perl(LWP::MediaTypes) +AutoReqProv: no + +%description +Instances of the HTTP::Daemon class are HTTP/1.1 servers that listen on a socket for incoming requests. The HTTP::Daemon is a subclass of IO::Socket::IP, so you can perform socket operations directly on it too. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-Hash-Merge.spec b/gorgone/packaging/packages/perl-Hash-Merge.spec new file mode 100644 index 00000000000..c2088f31b99 --- /dev/null +++ b/gorgone/packaging/packages/perl-Hash-Merge.spec @@ -0,0 +1,53 @@ +%define cpan_name Hash-Merge + +Name: perl-Hash-Merge +Version: 0.300 +Release: 1%{?dist} +Summary: Merges arbitrarily deep hashes into a single hash +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Hash::Merge +Source0: https://cpan.metacpan.org/authors/id/R/RE/REHSACK/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(Hash::Merge) +Requires: perl(Scalar::Util) +Requires: perl(Clone::Choose) +AutoReqProv: no + +%description +Hash::Merge merges two arbitrarily deep hashes into a single hash. That is, at any level, it will add non-conflicting key-value pairs from one hash to the other, and follows a set of specific rules when there are key value conflicts (as outlined below). The hash is followed recursively, so that deeply nested hashes that are at the same level will be merged when the parent hashes are merged. Please note that self-referencing hashes, or recursive references, are not handled well by this method. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-JSON-XS.spec b/gorgone/packaging/packages/perl-JSON-XS.spec new file mode 100644 index 00000000000..7b781dd4c6b --- /dev/null +++ b/gorgone/packaging/packages/perl-JSON-XS.spec @@ -0,0 +1,55 @@ +%define cpan_name JSON-XS + +Name: perl-JSON-XS +Version: 4.02 +Release: 1%{?dist} +Summary: JSON serialising/deserialising, done correctly and fast +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/JSON::XS +Source0: https://cpan.metacpan.org/authors/id/M/ML/MLEHMANN/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(Canary::Stability) +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(JSON::XS) +Requires: perl(common::sense) +Requires: perl(Types::Serialiser) +AutoReqProv: no + +%description +This module converts Perl data structures to JSON and vice versa. Its primary goal is to be correct and its secondary goal is to be fast. To reach the latter goal it was written in C. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +export PERL_CANARY_STABILITY_NOPROMPT=1 +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{_usr}/bin/* +%{perl_vendorarch} +%{_mandir} + +%changelog + diff --git a/gorgone/packaging/packages/perl-Net-Curl.spec b/gorgone/packaging/packages/perl-Net-Curl.spec new file mode 100644 index 00000000000..f6b0d5aa46a --- /dev/null +++ b/gorgone/packaging/packages/perl-Net-Curl.spec @@ -0,0 +1,60 @@ +%define cpan_name Net-Curl + +Name: perl-Net-Curl +Version: 0.44 +Release: 1%{?dist} +Summary: Perl interface for libcurl +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Net::Curl +Source0: https://cpan.metacpan.org/authors/id/S/SY/SYP/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +Provides: perl(Net::Curl) +Provides: perl(Net::Curl::Compat) +Provides: perl(Net::Curl::Easy) +Provides: perl(Net::Curl::Form) +Provides: perl(Net::Curl::Share) +Provides: perl(Net::Curl::Multi) + +BuildRequires: make +BuildRequires: gcc +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: libcurl-devel + +Requires: perl +Requires: libcurl +AutoReqProv: no + +%description +Net::Curl provides a Perl interface to libcurl created with object-oriented implementations in mind. This documentation contains Perl-specific details and quirks. For more information consult libcurl man pages and documentation at http://curl.haxx.se. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +#%doc Changes +%{perl_vendorarch}/ +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-Types-Serialiser.spec b/gorgone/packaging/packages/perl-Types-Serialiser.spec new file mode 100644 index 00000000000..ce879f79ca9 --- /dev/null +++ b/gorgone/packaging/packages/perl-Types-Serialiser.spec @@ -0,0 +1,52 @@ +%define cpan_name Types-Serialiser + +Name: perl-Types-Serialiser +Version: 1.0 +Release: 1%{?dist} +Summary: simple data types for common serialisation formats +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Types::Serialiser +Source0: https://cpan.metacpan.org/authors/id/M/ML/MLEHMANN/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(Types::Serialiser) +Requires: perl(common::sense) +AutoReqProv: no + +%description +This module provides some extra datatypes that are used by common serialisation formats such as JSON or CBOR. The idea is to have a repository of simple/small constants and containers that can be shared by different implementations so they become interoperable between each other. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-UUID.spec b/gorgone/packaging/packages/perl-UUID.spec new file mode 100644 index 00000000000..a7e71bc0a0e --- /dev/null +++ b/gorgone/packaging/packages/perl-UUID.spec @@ -0,0 +1,53 @@ +%define cpan_name UUID + +Name: perl-UUID +Version: 0.28 +Release: 1%{?dist} +Summary: DCE compatible Universally Unique Identifier library for Perl +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/UUID +Source0: https://cpan.metacpan.org/authors/id/J/JR/JRM/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(Devel::CheckLib) +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: libuuid-devel +BuildRequires: make + +Provides: perl(UUID) +Requires: libuuid +AutoReqProv: no + +%description +The UUID library is used to generate unique identifiers for objects that may be accessible beyond the local system. For instance, they could be used to generate unique HTTP cookies across multiple web servers without communication between the servers, and without fear of a name clash. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorarch} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-YAML-LibYAML.spec b/gorgone/packaging/packages/perl-YAML-LibYAML.spec new file mode 100644 index 00000000000..42e01e77934 --- /dev/null +++ b/gorgone/packaging/packages/perl-YAML-LibYAML.spec @@ -0,0 +1,47 @@ +%define cpan_name YAML-LibYAML + +Name: perl-YAML-LibYAML +Version: 0.80 +Release: 1%{?dist} +Summary: Perl YAML Serialization using XS and libyaml +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/release/YAML-LibYAML +Source0: https://cpan.metacpan.org/authors/id/T/TI/TINITA/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: make +BuildRequires: gcc + +%description +Kirill Simonov's libyaml is arguably the best YAML implementation. The C library is written precisely to the YAML 1.1 specification. It was originally bound to Python and was later bound to Ruby. +This module is a Perl XS binding to libyaml which offers Perl the best YAML support to date. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%{perl_vendorarch} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-ZMQ-FFI.spec b/gorgone/packaging/packages/perl-ZMQ-FFI.spec new file mode 100644 index 00000000000..ca4ef00bc76 --- /dev/null +++ b/gorgone/packaging/packages/perl-ZMQ-FFI.spec @@ -0,0 +1,62 @@ +%define cpan_name ZMQ-FFI + +Name: perl-ZMQ-FFI +Version: 1.18 +Release: 1%{?dist} +Summary: version agnostic Perl bindings for zeromq using ffi +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/ZMQ::FFI +Source0: https://cpan.metacpan.org/authors/id/G/GH/GHENRY/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +Provides: perl(ZMQ::FFI) + +BuildRequires: make +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: zeromq-devel + +Requires: zeromq +Requires: perl(FFI::CheckLib) +Requires: perl(FFI::Platypus) +Requires: perl(Moo) +Requires: perl(Moo::Role) +Requires: perl(Scalar::Util) +Requires: perl(Try::Tiny) +Requires: perl(namespace::clean) +Requires: perl(Import::Into) + +%description +ZMQ::FFI exposes a high level, transparent, OO interface to zeromq independent of the underlying libzmq version. Where semantics differ, it will dispatch to the appropriate backend for you. As it uses ffi, there is no dependency on XS or compilation. + +%global debug_package %{nil} + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib}/ +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-common-sense.spec b/gorgone/packaging/packages/perl-common-sense.spec new file mode 100644 index 00000000000..017c6c755be --- /dev/null +++ b/gorgone/packaging/packages/perl-common-sense.spec @@ -0,0 +1,50 @@ +%define cpan_name common-sense + +Name: perl-common-sense +Version: 3.75 +Release: 1%{?dist} +Summary: save a tree AND a kitten, use common::sense! +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/common::sense +Source0: https://cpan.metacpan.org/authors/id/M/ML/MLEHMANN/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(common::sense) +AutoReqProv: no + +%description +This module implements some sane defaults for Perl programs, as defined by two typical (or not so typical - use your common sense) specimens of Perl coders. In fact, after working out details on which warnings and strict modes to enable and make fatal, we found that we (and our code written so far, and others) fully agree on every option, even though we never used warnings before, so it seems this module indeed reflects a "common" sense among some long-time Perl coders. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/scripts/centreon-gorgone-centreon-config-postinstall.sh b/gorgone/packaging/scripts/centreon-gorgone-centreon-config-postinstall.sh new file mode 100644 index 00000000000..d01e7a0d637 --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-centreon-config-postinstall.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +fixConfigurationFileRights() { + # force update of configuration file rights since they are not updated automatically by nfpm + chmod 0640 /etc/centreon-gorgone/config.d/30-centreon.yaml + chmod 0640 /etc/centreon-gorgone/config.d/31-centreon-api.yaml + chmod 0640 /etc/centreon-gorgone/config.d/50-centreon-audit.yaml + chmod 0770 /etc/centreon-gorgone/config.d + chmod 0770 /etc/centreon-gorgone/config.d/cron.d +} + +manageUserGroups() { + if getent passwd centreon > /dev/null 2>&1; then + usermod -a -G centreon-gorgone centreon 2> /dev/null + fi + + if getent passwd centreon-engine > /dev/null 2>&1; then + usermod -a -G centreon-gorgone centreon-engine 2> /dev/null + fi + + if getent passwd centreon-broker > /dev/null 2>&1; then + usermod -a -G centreon-gorgone centreon-broker 2> /dev/null + fi + + if getent passwd centreon-gorgone > /dev/null 2>&1; then + usermod -a -G centreon centreon-gorgone 2> /dev/null + fi +} + +addGorgoneSshKeys() { + if [ ! -d /var/lib/centreon-gorgone/.ssh ] && [ -d /var/spool/centreon/.ssh ]; then + cp -r /var/spool/centreon/.ssh /var/lib/centreon-gorgone/.ssh + chown -R centreon-gorgone:centreon-gorgone /var/lib/centreon-gorgone/.ssh + chmod 600 /var/lib/centreon-gorgone/.ssh/id_rsa + fi +} + +action="$1" +if [ "$1" = "configure" ] && [ -z "$2" ]; then + # Alpine linux does not pass args, and deb passes $1=configure + action="install" +elif [ "$1" = "configure" ] && [ -n "$2" ]; then + # deb passes $1=configure $2= + action="upgrade" +fi + +case "$action" in + "1" | "install") + manageUserGroups + addGorgoneSshKeys + ;; + "2" | "upgrade") + manageUserGroups + fixConfigurationFileRights + addGorgoneSshKeys + ;; + *) + # $1 == version being installed + manageUserGroups + addGorgoneSshKeys + ;; +esac diff --git a/gorgone/packaging/scripts/centreon-gorgone-postinstall.sh b/gorgone/packaging/scripts/centreon-gorgone-postinstall.sh new file mode 100644 index 00000000000..0ff1468729e --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-postinstall.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +startGorgoned() { + systemctl daemon-reload ||: + systemctl unmask gorgoned.service ||: + systemctl preset gorgoned.service ||: + systemctl enable gorgoned.service ||: + systemctl restart gorgoned.service ||: +} + +action="$1" +if [ "$1" = "configure" ] && [ -z "$2" ]; then + # Alpine linux does not pass args, and deb passes $1=configure + action="install" +elif [ "$1" = "configure" ] && [ -n "$2" ]; then + # deb passes $1=configure $2= + action="upgrade" +fi + +case "$action" in + "1" | "install") + startGorgoned + ;; + "2" | "upgrade") + startGorgoned + ;; + *) + # $1 == version being installed + startGorgoned + ;; +esac diff --git a/gorgone/packaging/scripts/centreon-gorgone-preinstall.sh b/gorgone/packaging/scripts/centreon-gorgone-preinstall.sh new file mode 100644 index 00000000000..f4d22b0a160 --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-preinstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if ! getent group centreon-gorgone > /dev/null 2>&1; then + groupadd -r centreon-gorgone +fi + +# Check if the centreon-gorgone user exists, and create it if not +if ! getent passwd centreon-gorgone > /dev/null 2>&1; then + useradd -g centreon-gorgone -m -d /var/lib/centreon-gorgone -r centreon-gorgone 2> /dev/null +fi diff --git a/gorgone/packaging/scripts/centreon-gorgone-preremove.sh b/gorgone/packaging/scripts/centreon-gorgone-preremove.sh new file mode 100644 index 00000000000..3498c040c1f --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-preremove.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +systemctl stop gorgoned.service ||: diff --git a/gorgone/packaging/scripts/centreon-gorgone-selinux-postinstall.sh b/gorgone/packaging/scripts/centreon-gorgone-selinux-postinstall.sh new file mode 100644 index 00000000000..c7a5de1a198 --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-selinux-postinstall.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +install() { + semodule -i /usr/share/selinux/packages/centreon/centreon-gorgoned.pp > /dev/null 2>&1 || : +} + +upgrade() { + semodule -i /usr/share/selinux/packages/centreon/centreon-gorgoned.pp > /dev/null 2>&1 || : +} + +action="$1" +if [ "$1" = "configure" ] && [ -z "$2" ]; then + action="install" +elif [ "$1" = "configure" ] && [ -n "$2" ]; then + action="upgrade" +fi + +case "$action" in + "1" | "install") + install + ;; + "2" | "upgrade") + upgrade + ;; +esac diff --git a/gorgone/packaging/scripts/centreon-gorgone-selinux-preremove.sh b/gorgone/packaging/scripts/centreon-gorgone-selinux-preremove.sh new file mode 100644 index 00000000000..d3d21a909ce --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-selinux-preremove.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +if [ "$1" -lt "1" ]; then + semodule -r centreon-gorgoned > /dev/null 2>&1 || : +fi diff --git a/gorgone/packaging/sudoers.d/centreon-gorgone b/gorgone/packaging/sudoers.d/centreon-gorgone new file mode 100644 index 00000000000..ead5adc64dd --- /dev/null +++ b/gorgone/packaging/sudoers.d/centreon-gorgone @@ -0,0 +1,6 @@ +## BEGIN: GORGONE SUDO + +User_Alias GORGONE=centreon-gorgone +Defaults:GORGONE !requiretty + +GORGONE ALL = NOPASSWD: /usr/local/bin/gorgone_install_plugins.pl diff --git a/gorgone/schema/gorgone_database.sql b/gorgone/schema/gorgone_database.sql new file mode 100644 index 00000000000..7487a8b831d --- /dev/null +++ b/gorgone/schema/gorgone_database.sql @@ -0,0 +1,62 @@ +PRAGMA encoding = "UTF-8"; + +CREATE TABLE `gorgone_information` ( + `key` varchar(1024) DEFAULT NULL, + `value` varchar(1024) DEFAULT NULL +); + +CREATE TABLE IF NOT EXISTS `gorgone_identity` ( + `id` INTEGER PRIMARY KEY, + `ctime` int(11) DEFAULT NULL, + `mtime` int(11) DEFAULT NULL, + `identity` varchar(2048) DEFAULT NULL, + `key` varchar(1024) DEFAULT NULL, + `oldkey` varchar(1024) DEFAULT NULL, + `iv` varchar(1024) DEFAULT NULL, + `oldiv` varchar(1024) DEFAULT NULL, + `parent` int(11) DEFAULT '0' +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_identity ON gorgone_identity (identity); +CREATE INDEX IF NOT EXISTS idx_gorgone_parent ON gorgone_identity (parent); + +CREATE TABLE IF NOT EXISTS `gorgone_history` ( + `id` INTEGER PRIMARY KEY, + `token` varchar(2048) DEFAULT NULL, + `code` int(11) DEFAULT NULL, + `etime` int(11) DEFAULT NULL, + `ctime` FLOAT DEFAULT NULL, + `instant` int(11) DEFAULT '0', + `data` TEXT DEFAULT NULL +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_history_id ON gorgone_history (id); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_token ON gorgone_history (token); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_etime ON gorgone_history (etime); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_code ON gorgone_history (code); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_ctime ON gorgone_history (ctime); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_instant ON gorgone_history (instant); + +CREATE TABLE IF NOT EXISTS `gorgone_synchistory` ( + `id` int(11) NOT NULL, + `ctime` FLOAT DEFAULT NULL, + `last_id` int(11) DEFAULT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_gorgone_synchistory_id ON gorgone_synchistory (id); + +CREATE TABLE IF NOT EXISTS `gorgone_target_fingerprint` ( + `id` INTEGER PRIMARY KEY, + `target` varchar(2048) DEFAULT NULL, + `fingerprint` varchar(4096) DEFAULT NULL +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_target_fingerprint_target ON gorgone_target_fingerprint (target); + +CREATE TABLE IF NOT EXISTS `gorgone_centreon_judge_spare` ( + `cluster_name` varchar(2048) NOT NULL, + `status` int(11) NOT NULL, + `data` TEXT DEFAULT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_gorgone_centreon_judge_spare_cluster_name ON gorgone_centreon_judge_spare (cluster_name); diff --git a/gorgone/selinux/centreon-gorgoned.fc b/gorgone/selinux/centreon-gorgoned.fc new file mode 100644 index 00000000000..5e782b3c860 --- /dev/null +++ b/gorgone/selinux/centreon-gorgoned.fc @@ -0,0 +1,3 @@ +/usr/bin/gorgoned -- gen_context(system_u:object_r:centreon_gorgoned_exec_t,s0) +/etc/centreon-gorgone(/.*)? gen_context(system_u:object_r:centreon_etc_t,s0) +/var/lib/centreon-gorgone(/.*)? gen_context(system_u:object_r:centreon_gorgoned_t,s0) diff --git a/gorgone/selinux/centreon-gorgoned.if b/gorgone/selinux/centreon-gorgoned.if new file mode 100644 index 00000000000..ba267cf4710 --- /dev/null +++ b/gorgone/selinux/centreon-gorgoned.if @@ -0,0 +1 @@ +## Centreon Gorgoned Network monitoring server. diff --git a/gorgone/selinux/centreon-gorgoned.te b/gorgone/selinux/centreon-gorgoned.te new file mode 100644 index 00000000000..38cc2726970 --- /dev/null +++ b/gorgone/selinux/centreon-gorgoned.te @@ -0,0 +1,119 @@ +policy_module(centreon-gorgoned, @VERSION@) + +######################################## +# +# Declarations +# +require { + type unconfined_t; + type unconfined_service_t; + type useradd_t; + type fs_t; + type kernel_t; + type setroubleshootd_t; + type rpm_script_t; + type setfiles_t; + type unconfined_domain_type; +} + +type centreon_gorgoned_t; +type centreon_gorgoned_exec_t; +init_daemon_domain(centreon_gorgoned_t, centreon_gorgoned_exec_t) + +######################################## +# +# Centreon local policy +# + +allow centreon_gorgoned_t self:process { setpgid signal_perms }; +allow centreon_gorgoned_t self:tcp_socket { accept listen }; +allow centreon_gorgoned_t self:file { read open write getattr read_file_perms relabelto }; +allow centreon_gorgoned_t fs_t:filesystem associate; +allow rpm_script_t centreon_gorgoned_t:dir { getattr search }; + +#============= setroubleshootd_t ============== +allow setroubleshootd_t centreon_gorgoned_t:dir { getattr search }; +allow setroubleshootd_t centreon_gorgoned_t:file getattr; + +#============= unconfined_t ============== +allow unconfined_t centreon_gorgoned_t:dir { getattr setattr relabelfrom relabelto }; +allow unconfined_t centreon_gorgoned_t:file { getattr setattr relabelto rename }; + +#============= unconfined_service_t ============== +allow unconfined_service_t centreon_gorgoned_t:file { create read open write rename getattr setattr ioctl lock unlink }; +allow unconfined_service_t centreon_gorgoned_t:dir { getattr setattr search create write add_name remove_name }; + +#============= useradd_t ============== +allow useradd_t centreon_gorgoned_t:dir { getattr search setattr create write add_name remove_name }; +allow useradd_t centreon_gorgoned_t:file { open write read unlink create setattr getattr ioctl lock }; + +#============= setfiles_t ============== +allow setfiles_t centreon_gorgoned_t:dir relabelto; +allow setfiles_t centreon_gorgoned_t:file relabelto; + +#============= kernel_t ============== +allow kernel_t centreon_gorgoned_t:dir { getattr search setattr create write add_name remove_name }; +allow kernel_t centreon_gorgoned_t:file { open write read unlink create setattr getattr ioctl lock }; + +#============= cluster =============== +allow daemon initrc_transition_domain:fifo_file { ioctl read write getattr lock append }; +allow domain unconfined_domain_type:association recvfrom; +allow domain domain:key { search link }; +allow domain unconfined_domain_type:tcp_socket recvfrom; +allow centreon_gorgoned_t domain:lnk_file { read getattr }; +allow daemon initrc_domain:fd use; +allow centreon_gorgoned_t domain:file { ioctl read getattr lock open }; +allow daemon initrc_domain:process sigchld; +allow domain unconfined_domain_type:peer recv; +allow centreon_gorgoned_t domain:dir { ioctl read getattr lock search open }; +allow daemon initrc_transition_domain:fd use; +allow daemon initrc_domain:fifo_file { ioctl read write getattr lock append }; + +mysql_stream_connect(centreon_gorgoned_t) +mysql_tcp_connect(centreon_gorgoned_t) + +kernel_read_kernel_sysctls(centreon_gorgoned_t) +kernel_read_net_sysctls(centreon_gorgoned_t) +kernel_read_network_state(centreon_gorgoned_t) +kernel_read_system_state(centreon_gorgoned_t) +kernel_request_load_module(centreon_gorgoned_t) + +corecmd_exec_bin(centreon_gorgoned_t) +corecmd_exec_shell(centreon_gorgoned_t) + +corenet_port(centreon_gorgoned_t) +corenet_all_recvfrom_unlabeled(centreon_gorgoned_t) +corenet_all_recvfrom_netlabel(centreon_gorgoned_t) +corenet_tcp_sendrecv_generic_if(centreon_gorgoned_t) +corenet_udp_sendrecv_generic_if(centreon_gorgoned_t) +corenet_tcp_sendrecv_generic_node(centreon_gorgoned_t) +corenet_udp_sendrecv_generic_node(centreon_gorgoned_t) +corenet_tcp_bind_generic_node(centreon_gorgoned_t) +corenet_udp_bind_generic_node(centreon_gorgoned_t) +corenet_sendrecv_all_client_packets(centreon_gorgoned_t) +corenet_tcp_connect_all_ports(centreon_gorgoned_t) +corenet_tcp_sendrecv_all_ports(centreon_gorgoned_t) + +corenet_sendrecv_inetd_child_server_packets(centreon_gorgoned_t) +corenet_tcp_bind_inetd_child_port(centreon_gorgoned_t) +corenet_tcp_sendrecv_inetd_child_port(centreon_gorgoned_t) + +dev_read_sysfs(centreon_gorgoned_t) +dev_read_urand(centreon_gorgoned_t) + +domain_use_interactive_fds(centreon_gorgoned_t) +domain_read_all_domains_state(centreon_gorgoned_t) + +files_read_etc_runtime_files(centreon_gorgoned_t) +files_read_usr_files(centreon_gorgoned_t) + +fs_getattr_all_fs(centreon_gorgoned_t) +fs_search_auto_mountpoints(centreon_gorgoned_t) + +auth_use_nsswitch(centreon_gorgoned_t) + +logging_send_syslog_msg(centreon_gorgoned_t) + +miscfiles_read_localization(centreon_gorgoned_t) + +userdom_dontaudit_use_unpriv_user_fds(centreon_gorgoned_t) \ No newline at end of file diff --git a/gorgone/tests/robot/config/actions.yaml b/gorgone/tests/robot/config/actions.yaml new file mode 100644 index 00000000000..3a5074faee0 --- /dev/null +++ b/gorgone/tests/robot/config/actions.yaml @@ -0,0 +1,6 @@ +gorgone: + modules: + - name: action + package: "gorgone::modules::core::action::hooks" + enable: true + command_timeout: 30 diff --git a/gorgone/tests/robot/config/db_add_1_poller.sql b/gorgone/tests/robot/config/db_add_1_poller.sql new file mode 100644 index 00000000000..9cb398f7e11 --- /dev/null +++ b/gorgone/tests/robot/config/db_add_1_poller.sql @@ -0,0 +1,68 @@ +INSERT IGNORE INTO `nagios_server` + VALUES + ( + 1, 'Central', '1', 1, 1711560733, '127.0.0.1', + '1', '0', 'service centengine start', + 'service centengine stop', 'service centengine restart', + 'service centengine reload', '/usr/sbin/centengine', + '/usr/sbin/centenginestats', '/var/log/centreon-engine/service-perfdata', + 'service cbd reload', '/etc/centreon-broker', + '/usr/share/centreon/lib/centreon-broker', + '/usr/lib64/centreon-connector', + 22, '1', 5556, 'centreontrapd', '/etc/snmp/centreon_traps/', + NULL, NULL, NULL, NULL, '1', '0' + ), + ( + 2, 'pushpoller', '0', 0, NULL, '127.0.0.1', + '1', '0', 'service centengine start', + 'service centengine stop', 'service centengine restart', + 'service centengine reload', '/usr/sbin/centengine', + '/usr/sbin/centenginestats', '/var/log/centreon-engine/service-perfdata', + 'service cbd reload', '/etc/centreon-broker', + '/usr/share/centreon/lib/centreon-broker', + '/usr/lib64/centreon-connector', + 22, '1', 5556, 'centreontrapd', '/etc/snmp/centreon_traps/', + NULL, NULL, '/var/log/centreon-broker/', + NULL, '1', '0' + ); +INSERT IGNORE INTO `cfg_nagios` + VALUES + ( + 1, 'Centreon Engine Central', NULL, + '/var/log/centreon-engine/centengine.log', + '/etc/centreon-engine', '/var/log/centreon-engine/status.dat', + 60, '1', '1', '1', '1', '1', '1', '1', + 4096, '1s', '/var/lib/centreon-engine/rw/centengine.cmd', + '1', '/var/log/centreon-engine/retention.dat', + 60, '1', '1', '0', '1', '1', '1', '1', + NULL, '1', '1', NULL, NULL, NULL, 's', + 's', 's', 0, 15, 15, 5, '0', NULL, NULL, + '0', '25.0', '50.0', '25.0', '50.0', + '0', 60, 12, 30, 30, '1', '1', '0', NULL, + NULL, '0', NULL, 'euro', 30, '~!$%^&*\"|\'<>?,()=', + '`~$^&\"|\'<>', '0', '0', 'admin@localhost', + 'admin@localhost', 'Centreon Engine configuration file for a central instance', + '1', '-1', 1, '1', '1', 15, 15, NULL, + '0', 15, '/var/log/centreon-engine/centengine.debug', + 0, '0', '1', 1000000000, 'centengine.cfg', + '1', '0', '', 'log_v2_enabled' + ), + ( + 15, 'pushpoller', NULL, '/var/log/centreon-engine/centengine.log', + '/etc/centreon-engine/', '/var/log/centreon-engine/status.dat', + 60, '1', '1', '1', '1', '1', '1', '1', + 4096, '1s', '/var/lib/centreon-engine/rw/centengine.cmd', + '1', '/var/log/centreon-engine/retention.dat', + 60, '1', '0', '0', '1', '1', '1', '1', + '1', '1', '1', NULL, NULL, '0.5', 's', + 's', 's', 0, 15, 15, 5, '0', 30, 180, '0', + '25.0', '50.0', '25.0', '50.0', '0', + 60, 30, 30, 30, '1', '1', '0', NULL, NULL, + '0', NULL, 'euro', 30, '~!$%^&*\"|\'<>?,()=', + '`~$^&\"|\'<>', '0', '0', 'admin@localhost', + 'admin@localhost', 'Centreon Engine config file for a polling instance', + '1', '-1', 2, '1', '1', 15, 15, NULL, + '0', 15, '/var/log/centreon-engine/centengine.debug', + 0, '0', '1', 1000000000, 'centengine.cfg', + '1', '0', '', 'log_v2_enabled' + ); diff --git a/gorgone/tests/robot/config/db_delete_poller.sql b/gorgone/tests/robot/config/db_delete_poller.sql new file mode 100644 index 00000000000..69f3b03943a --- /dev/null +++ b/gorgone/tests/robot/config/db_delete_poller.sql @@ -0,0 +1,2 @@ +delete from cfg_nagios; +delete from nagios_server; \ No newline at end of file diff --git a/gorgone/tests/robot/config/gorgone_core_central.yaml b/gorgone/tests/robot/config/gorgone_core_central.yaml new file mode 100644 index 00000000000..a74882d3224 --- /dev/null +++ b/gorgone/tests/robot/config/gorgone_core_central.yaml @@ -0,0 +1,21 @@ +gorgone: + gorgonecore: + internal_com_type: ipc + internal_com_path: /etc/centreon-gorgone/@UNIQ_ID_FROM_ROBOT_TESTING_CONFIG_FILE@/routing.ipc + + gorgone_db_type: SQLite + gorgone_db_name: dbname=/etc/centreon-gorgone/@UNIQ_ID_FROM_ROBOT_TESTING_CONFIG_FILE@/history.sdb + + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + +centreon: + database: + db_configuration: + dsn: "mysql:host=@DBHOST@:port=3306;dbname=@DBNAME@" + username: "@DBUSER@" + password: "@DBPASSWORD@" + db_realtime: + dsn: "mysql:host=@DBHOST@:port=3306;dbname=@DBNAME_STORAGE@" + username: "@DBUSER@" + password: "@DBPASSWORD@" \ No newline at end of file diff --git a/gorgone/tests/robot/config/includer.yaml b/gorgone/tests/robot/config/includer.yaml new file mode 100644 index 00000000000..f1a5cad34bd --- /dev/null +++ b/gorgone/tests/robot/config/includer.yaml @@ -0,0 +1,3 @@ +name: config.yaml +description: Configuration of centreon-gorgone. Use config.d directory to change configuration +configuration: !include /etc/centreon-gorgone/@UNIQ_ID_FROM_ROBOT_TESTING_CONFIG_FILE@/config.d/*.yaml diff --git a/gorgone/tests/robot/config/pull_central_config.yaml b/gorgone/tests/robot/config/pull_central_config.yaml new file mode 100644 index 00000000000..f7f5fcfc2df --- /dev/null +++ b/gorgone/tests/robot/config/pull_central_config.yaml @@ -0,0 +1,34 @@ +gorgone: + gorgonecore: + external_com_type: tcp + external_com_path: "*:5556" + authorized_clients: + - key: @KEYTHUMBPRINT@ + id: 1 + + modules: + - name: register + package: "gorgone::modules::core::register::hooks" + enable: true + config_file: /etc/centreon-gorgone/@UNIQ_ID_FROM_ROBOT_TESTING_CONFIG_FILE@/config.d/pull_node_register_one_node.yaml + + - name: proxy + package: "gorgone::modules::core::proxy::hooks" + enable: true + + - name: nodes + package: "gorgone::modules::centreon::nodes::hooks" + enable: true + + - name: httpserver + package: "gorgone::modules::core::httpserver::hooks" + enable: true + address: "0.0.0.0" + port: "8085" + ssl: false + auth: + enabled: false + allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 diff --git a/gorgone/tests/robot/config/pull_node_register_one_node.yaml b/gorgone/tests/robot/config/pull_node_register_one_node.yaml new file mode 100644 index 00000000000..a8ae0f462d9 --- /dev/null +++ b/gorgone/tests/robot/config/pull_node_register_one_node.yaml @@ -0,0 +1,4 @@ +nodes: + - id: 2 + type: pull + prevail: 1 diff --git a/gorgone/tests/robot/config/pull_poller_config.yaml b/gorgone/tests/robot/config/pull_poller_config.yaml new file mode 100644 index 00000000000..f57d766203c --- /dev/null +++ b/gorgone/tests/robot/config/pull_poller_config.yaml @@ -0,0 +1,24 @@ +name: distant-server +description: Configuration for distant server +gorgone: + gorgonecore: + id: 2 + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" + + - name: pull + package: "gorgone::modules::core::pull::hooks" + enable: true + target_type: tcp + target_path: 127.0.0.1:5556 + ping: 1 diff --git a/gorgone/tests/robot/config/pullwss_central_config.yaml b/gorgone/tests/robot/config/pullwss_central_config.yaml new file mode 100644 index 00000000000..7e984f386d1 --- /dev/null +++ b/gorgone/tests/robot/config/pullwss_central_config.yaml @@ -0,0 +1,33 @@ +gorgone: + gorgonecore: + id: 1 + modules: + - name: proxy + package: "gorgone::modules::core::proxy::hooks" + enable: true + httpserver: + enable: true + ssl: false + #ssl_cert_file: /etc/centreon-gorgone/keys/certificate.crt + #ssl_key_file: /etc/centreon-gorgone/keys/private.key + token: "secret_token" + address: "0.0.0.0" + port: 8086 + + - name: register + package: "gorgone::modules::core::register::hooks" + enable: true + config_file: /etc/centreon-gorgone/@UNIQ_ID_FROM_ROBOT_TESTING_CONFIG_FILE@/config.d/pullwss_node_register_one_node.yaml + + - name: httpserver + package: "gorgone::modules::core::httpserver::hooks" + enable: true + address: "0.0.0.0" + port: "8085" + ssl: false + auth: + enabled: false + allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 diff --git a/gorgone/tests/robot/config/pullwss_node_register_one_node.yaml b/gorgone/tests/robot/config/pullwss_node_register_one_node.yaml new file mode 100644 index 00000000000..77811a9f32a --- /dev/null +++ b/gorgone/tests/robot/config/pullwss_node_register_one_node.yaml @@ -0,0 +1,4 @@ +nodes: + - id: 2 + type: wss + prevail: 1 diff --git a/gorgone/tests/robot/config/pullwss_poller_config.yaml b/gorgone/tests/robot/config/pullwss_poller_config.yaml new file mode 100644 index 00000000000..91785c34bc1 --- /dev/null +++ b/gorgone/tests/robot/config/pullwss_poller_config.yaml @@ -0,0 +1,12 @@ +gorgone: + gorgonecore: + id: 2 + modules: + - name: pullwss + package: "gorgone::modules::core::pullwss::hooks" + enable: true + ssl: false + port: 8086 + token: "secret_token" + address: 127.0.0.1 + ping: 1 diff --git a/gorgone/tests/robot/config/push_central_config.yaml b/gorgone/tests/robot/config/push_central_config.yaml new file mode 100644 index 00000000000..af8be08ac32 --- /dev/null +++ b/gorgone/tests/robot/config/push_central_config.yaml @@ -0,0 +1,24 @@ +gorgone: + gorgonecore: + id: 1 + modules: + - name: proxy + package: "gorgone::modules::core::proxy::hooks" + enable: true + + - name: nodes + package: "gorgone::modules::centreon::nodes::hooks" + enable: true + + - name: httpserver + package: "gorgone::modules::core::httpserver::hooks" + enable: true + address: "0.0.0.0" + port: "8085" + ssl: false + auth: + enabled: false + allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 diff --git a/gorgone/tests/robot/config/push_poller_config.yaml b/gorgone/tests/robot/config/push_poller_config.yaml new file mode 100644 index 00000000000..a3487f22dbb --- /dev/null +++ b/gorgone/tests/robot/config/push_poller_config.yaml @@ -0,0 +1,21 @@ +name: gorgoned-pushpoller +description: Configuration for poller pushpoller +gorgone: + gorgonecore: + internal_com_type: ipc + internal_com_path: /etc/centreon-gorgone/@UNIQ_ID_FROM_ROBOT_TESTING_CONFIG_FILE@/routing.ipc + gorgone_db_type: SQLite + gorgone_db_name: dbname=/etc/centreon-gorgone/@UNIQ_ID_FROM_ROBOT_TESTING_CONFIG_FILE@/history.sdb + id: 2 + external_com_type: tcp + external_com_path: "*:5556" + authorized_clients: + - key:@KEYTHUMBPRINT@ + + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true diff --git a/gorgone/tests/robot/config/statistics.yaml b/gorgone/tests/robot/config/statistics.yaml new file mode 100644 index 00000000000..4606434aa95 --- /dev/null +++ b/gorgone/tests/robot/config/statistics.yaml @@ -0,0 +1,17 @@ +gorgone: + modules: + - name: statistics + package: "gorgone::modules::centreon::statistics::hooks" + enable: true + broker_cache_dir: "/var/cache/centreon/broker-stats/" + cron: + - id: broker_stats + timespec: "*/5 * * * *" + action: BROKERSTATS + parameters: + timeout: 10 + - id: engine_stats + timespec: "*/5 * * * *" + action: ENGINESTATS + parameters: + timeout: 10 diff --git a/gorgone/tests/robot/resources/LogResearch.py b/gorgone/tests/robot/resources/LogResearch.py new file mode 100644 index 00000000000..0fe4afbd9d4 --- /dev/null +++ b/gorgone/tests/robot/resources/LogResearch.py @@ -0,0 +1,171 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# For more information : contact@centreon.com +# + +from robot.api import logger +import re +import time +from dateutil import parser +from datetime import datetime, timedelta +import requests + + +def ctn_get_api_log_with_timeout(token: str, node_path='', host='http://127.0.0.1:8085', timeout=15): + """! Query gorgone log API until the response contains a log with code 2 (success) or 1 (failure) + @param token: token to search in the API + @param node_path: part of the API URL defining if we use the local gorgone or another one, ex node/2/ + @param timeout: timeout in seconds + @param host: gorgone API URL with the port + @return True(when output of the command is found)/False(on failure or timeout), + and a json object containing the incriminated log for failure or success. + """ + limit_date = time.time() + timeout + api_json = [] + while time.time() < limit_date: + time.sleep(1) + uri = host + "/api/" + node_path + "log/" + token + response = requests.get(uri) + (status, output) = parse_json_response(response) + if status == '': + continue + return status, output + + return False, api_json["data"] + + +def parse_json_response(response): + api_json = response.json() + # http code should either be 200 for success or 404 for no log found if we are too early. + # as the time of writing, status code is always 200 because webapp autodiscovery module always expect a 200. + if response.status_code != 200 and response.status_code != 404: + return False, api_json + + if 'error' in api_json and api_json['error'] == "no_log": + return '', '' + for log_detail in api_json["data"]: + if log_detail["code"] == 2: + return False, log_detail + if log_detail["code"] == 100: + return True, log_detail + + +# these function search log in the gorgone log file +def ctn_find_in_log_with_timeout(log: str, content, timeout=20, date=-1, regex=False): + """! search a pattern in log from date param + @param log: path of the log file + @param date: date from witch it begins search, you might want to use robot Get Current Date function + @param content: array of pattern to search + @param timeout: time out in second + @param regex: search use regex, default to false + @return True/False, array of lines found for each pattern + """ + if date == -1: + date = datetime.now().timestamp() - 1 + limit = time.time() + timeout + c = "" + while time.time() < limit: + ok, c = ctn_find_in_log(log, date, content, regex) + if ok: + return True, c + time.sleep(5) + logger.console(f"Unable to find '{c}' from {date} during {timeout}s") + return False + + +def ctn_find_in_log(log: str, date, content, regex=False): + """Find content in log file from the given date + + Args: + log (str): The log file + date (_type_): A date as a string + content (_type_): An array of strings we want to find in the log. + + Returns: + boolean,str: The boolean is True on success, and the string contains the first string not found in logs otherwise. + """ + logger.info(f"regex={regex}") + res = [] + + try: + f = open(log, "r", encoding="latin1") + lines = f.readlines() + f.close() + idx = ctn_find_line_from(lines, date) + + for c in content: + found = False + for i in range(idx, len(lines)): + line = lines[i] + if regex: + match = re.search(c, line) + else: + match = c in line + if match: + logger.console(f"\"{c}\" found at line {i} from {idx}") + found = True + res.append(line) + break + if not found: + return False, c + + return True, res + except IOError: + logger.console("The file '{}' does not exist".format(log)) + return False, content[0] + + +def ctn_extract_date_from_log(line: str): + p = re.compile(r"(^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})") + m = p.match(line) + if m is None: + return None + try: + return parser.parse((m.group(1))) + except parser.ParserError: + logger.console(f"Unable to parse the date from the line {line}") + return None + + +def ctn_find_line_from(lines, date): + try: + my_date = parser.parse(date) + except: + my_date = datetime.fromtimestamp(date) + + # Let's find my_date + start = 0 + end = len(lines) - 1 + idx = start + while end > start: + idx = (start + end) // 2 + idx_d = ctn_extract_date_from_log(lines[idx]) + while idx_d is None: + logger.console("Unable to parse the date ({} <= {} <= {}): <<{}>>".format( + start, idx, end, lines[idx])) + idx -= 1 + if idx >= 0: + idx_d = ctn_extract_date_from_log(lines[idx]) + else: + logger.console("We are at the first line and no date found") + return 0 + if my_date <= idx_d and end != idx: + end = idx + elif my_date > idx_d and start != idx: + start = idx + else: + break + return idx diff --git a/gorgone/tests/robot/resources/import.resource b/gorgone/tests/robot/resources/import.resource new file mode 100644 index 00000000000..0c2d057cdb8 --- /dev/null +++ b/gorgone/tests/robot/resources/import.resource @@ -0,0 +1,11 @@ +*** Settings *** +Documentation This is the documentation for the import resource file. +Library Examples +Library OperatingSystem +Library String +Library Collections +Resource resources.resource +Library LogResearch.py +Library DatabaseLibrary +Library JSONLibrary +Library DateTime \ No newline at end of file diff --git a/gorgone/tests/robot/resources/resources.resource b/gorgone/tests/robot/resources/resources.resource new file mode 100644 index 00000000000..89d58688292 --- /dev/null +++ b/gorgone/tests/robot/resources/resources.resource @@ -0,0 +1,196 @@ +*** Settings *** +Documentation Centreon Gorgone library for Robot Framework + +Library Process +Library RequestsLibrary +Library OperatingSystem +Library DatabaseLibrary + +*** Variables *** +${gorgone_binary} /usr/bin/gorgoned +${ROOT_CONFIG} ${CURDIR}${/}..${/}config${/} +${pull_central_config} ${ROOT_CONFIG}pull_central_config.yaml +${pull_poller_config} ${ROOT_CONFIG}pull_poller_config.yaml +${pullwss_central_config} ${ROOT_CONFIG}pullwss_central_config.yaml +${pullwss_poller_config} ${ROOT_CONFIG}pullwss_poller_config.yaml +${push_central_config} ${ROOT_CONFIG}push_central_config.yaml +${push_poller_config} ${ROOT_CONFIG}push_poller_config.yaml +${gorgone_core_config} ${ROOT_CONFIG}gorgone_core_central.yaml + +${DBHOST} 127.0.0.1 +${DBPORT} 3306 +${DBNAME} centreon_gorgone_test +${DBNAME_STORAGE} centreon-storage +${DBUSER} centreon +${DBPASSWORD} password + +*** Keywords *** +Start Gorgone + [Arguments] ${SEVERITY} ${ALIAS} + ${process} Start Process + ... /usr/bin/perl + ... ${gorgone_binary} + ... --config + ... /etc/centreon-gorgone/${ALIAS}/includer.yaml + ... --logfile + ... /var/log/centreon-gorgone/${ALIAS}/gorgoned.log + ... --severity + ... ${SEVERITY} + ... alias=${ALIAS} + +Stop Gorgone And Remove Gorgone Config + [Documentation] This keyword stops the gorgone process and removes the configuration in the database. Configuration files are not modified as we want them in case something failed to analyse the problem. + [Arguments] @{process_alias} ${sql_file}= + Gorgone Execute Sql ${sql_file} + # remove configuration in db if needed. + + FOR ${process} IN @{process_alias} + ${result} Terminate Process ${process} + BuiltIn.Run Keyword And Continue On Failure Should Be True ${result.rc} == -15 or ${result.rc} == 0 Gorgone ${process} badly stopped, code returned is ${result.rc}. + END + +Gorgone Execute Sql + [Arguments] ${sql_file} + ${length} Get Length ${sql_file} + IF ${length} > 0 + Connect To Database pymysql ${DBNAME} ${DBUSER} ${DBPASSWORD} ${DBHOST} ${DBPORT} + Log To Console Executing sql file ${sql_file} + Execute SQL Script ${sql_file} + END + +Setup Gorgone Config + [Arguments] ${file_list} ${gorgone_name}=gorgone_process_name ${sql_file}= + Gorgone Execute Sql ${sql_file} + Create Directory /var/log/centreon-gorgone/${gorgone_name}/ + Copy File ${CURDIR}${/}..${/}config${/}includer.yaml /etc/centreon-gorgone/${gorgone_name}/includer.yaml + + FOR ${file} IN @{file_list} + Copy File ${file} /etc/centreon-gorgone/${gorgone_name}/config.d/ + END + ${key_thumbprint} Run perl /usr/local/bin/gorgone_key_thumbprint.pl --key-path=/var/lib/centreon-gorgone/.keys/rsakey.priv.pem | cut -d: -f4 + + ${result} Run sed -i -e 's/@UNIQ_ID_FROM_ROBOT_TESTING_CONFIG_FILE@/${gorgone_name}/g' /etc/centreon-gorgone/${gorgone_name}/includer.yaml + + ${CMD} Catenate + ... sed -i -e 's/@KEYTHUMBPRINT@/${key_thumbprint}/g' + ... -e 's/@DBNAME@/${DBNAME}/g' + ... -e 's/@DBNAME_STORAGE@/${DBNAME_STORAGE}/g' + ... -e 's/@DBHOST@/${DBHOST}/g' + ... -e 's/@DBPASSWORD@/${DBPASSWORD}/g' + ... -e 's/@DBUSER@/${DBUSER}/g' + ... -e 's/@UNIQ_ID_FROM_ROBOT_TESTING_CONFIG_FILE@/${gorgone_name}/g' + ... /etc/centreon-gorgone/${gorgone_name}/config.d/*.yaml + + ${result2} Run ${CMD} + +Check Poller Is Connected + [Arguments] ${port}= ${expected_nb}= + Log To Console checking TCP connection is established... + FOR ${i} IN RANGE 40 + Sleep 4 + ${nb_socket_connexion} Run ss -tnp | grep ':${port}' | grep ESTAB | wc -l + IF ${expected_nb} == ${nb_socket_connexion} + BREAK + END + END + Should Be True ${i} < 39 Gorgone did not establish tcp connection in 160 seconds. + Log To Console TCP connection established after ${i} attempt (4 seconds each) + +Check Poller Communicate + [Documentation] Ask the central Gorgone rest api if it have communicated with the poller using a given ID. + [Arguments] ${poller_id} + ${response} Set Variable ${EMPTY} + Log To Console checking Gorgone see poller in rest api response... + FOR ${i} IN RANGE 20 + Sleep 5 + ${response}= GET http://127.0.0.1:8085/api/internal/constatus + Log ${response.json()} + IF not ${response.json()}[data] + CONTINUE + END + IF ${response.json()}[data][${poller_id}][ping_failed] > 0 or ${response.json()}[data][${poller_id}][ping_ok] > 0 + BREAK + END + END + Log To Console json response : ${response.json()} + Should Be True ${i} < 19 timeout after ${i} time waiting for poller status in gorgone rest api (/api/internal/constatus) : ${response.json()} + Should Be True 0 == ${response.json()}[data][${poller_id}][ping_failed] there was failed ping between the central and the poller ${poller_id} + Should Be True 0 < ${response.json()}[data][${poller_id}][ping_ok] there was no successful ping between the central and the poller ${poller_id} + +Setup Two Gorgone Instances + [Arguments] ${central_config}=@{EMPTY} ${poller_config}=@{EMPTY} ${communication_mode}=push_zmq ${central_name}=gorgone_central ${poller_name}=gorgone_poller_2 + ${result} Run perl /usr/local/bin/gorgone_key_generation.pl + # generate key if there is none. + # gorgone can generate it's own key, but as we need the thumbprint in the configuration we need to generate them before launching gorgone. + # this script only create key if the files don't exists, and silently finish if the files already exists. + IF '${communication_mode}' == 'push_zmq' + @{central_push_config}= Copy List ${central_config} + Append To List ${central_push_config} ${push_central_config} ${gorgone_core_config} + + @{poller_push_config}= Copy List ${poller_config} + Append To List ${poller_push_config} ${push_poller_config} + + Setup Gorgone Config ${central_push_config} gorgone_name=${central_name} sql_file=${ROOT_CONFIG}db_add_1_poller.sql + Setup Gorgone Config ${poller_push_config} gorgone_name=${poller_name} + + Start Gorgone debug ${poller_name} + Wait Until Port Is Bind 5556 + Start Gorgone debug ${central_name} + + Check Poller Is Connected port=5556 expected_nb=2 + Check Poller Communicate 2 + + ELSE IF '${communication_mode}' == 'pullwss' + + @{central_pullwss_config}= Copy List ${central_config} + Append To List ${central_pullwss_config} ${pullwss_central_config} ${ROOT_CONFIG}pullwss_node_register_one_node.yaml ${gorgone_core_config} + + @{poller_pullwss_config}= Copy List ${poller_config} + Append To List ${poller_pullwss_config} ${gorgone_core_config} ${pullwss_poller_config} + + Setup Gorgone Config ${central_pullwss_config} gorgone_name=${central_name} sql_file=${ROOT_CONFIG}db_add_1_poller.sql + Setup Gorgone Config ${poller_pullwss_config} gorgone_name=${poller_name} + + Start Gorgone debug ${central_name} + Wait Until Port Is Bind 8086 + Start Gorgone debug ${poller_name} + Check Poller Is Connected port=8086 expected_nb=2 + Check Poller Communicate 2 + ELSE IF '${communication_mode}' == 'pull' + @{central_pull_config}= Copy List ${central_config} + Append To List ${central_pull_config} ${pull_central_config} ${ROOT_CONFIG}pull_node_register_one_node.yaml ${gorgone_core_config} + + @{poller_pull_config}= Copy List ${poller_config} + Append To List ${poller_pull_config} ${pull_poller_config} ${gorgone_core_config} + + Setup Gorgone Config ${central_pull_config} gorgone_name=${central_name} sql_file=${ROOT_CONFIG}db_add_1_poller.sql + Setup Gorgone Config ${poller_pull_config} gorgone_name=${poller_name} + + Start Gorgone debug ${poller_name} + Start Gorgone debug ${central_name} + Wait Until Port Is Bind 5556 + + Check Poller Is Connected port=5556 expected_nb=2 + Check Poller Communicate 2 + END + +Wait Until Port Is Bind + [Arguments] ${port} + FOR ${i} IN RANGE 15 + Sleep 0.5 + ${nb_port_listening} Run ss -tlnp | grep ':${port}' | grep LIST | wc -l + IF ${nb_port_listening} == 1 + BREAK + END + END + + Should Be True ${i} < 14 Gorgone did not listen on port ${port} on time. + Log To Console tcp port ${port} bind after ${i} attempt (0.5 seconds each) + +Ctn Check No Error In Logs + [Arguments] ${gorgone_id} + ${cmd}= Set Variable grep -vP '^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2} ' /var/log/centreon-gorgone/${gorgone_id}/gorgoned.log + Log To Console \n\n${cmd}\n\n + + ${log_line_wrong} RUN ${cmd} + Should Be Empty ${log_line_wrong} There is Log in ${gorgone_id} not mathcing the standard gorgone format : ${log_line_wrong} diff --git a/gorgone/tests/robot/tests/centreon/centenginestats b/gorgone/tests/robot/tests/centreon/centenginestats new file mode 100644 index 00000000000..137a89b04bd --- /dev/null +++ b/gorgone/tests/robot/tests/centreon/centenginestats @@ -0,0 +1,91 @@ +#!/bin/bash +# +# Copyright 2024 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# this script is a simple place holder to mock the centenginestats binary provided by centreon-engine package. +# it output static data for testing purpose. + +cat << EOF +Centreon Engine Statistics Utility 23.10.6 + +Copyright 2003-2008 Ethan Galstad +Copyright 2011-2013,2016 Centreon +License: GPLv2 + +CURRENT STATUS DATA +------------------------------------------------------ +Status File: /var/log/centreon-engine/status.dat +Status File Age: 0d 0h 0m 26s +Status File Version: (null) + +Program Running Time: 1d 6h 37m 49s +Centreon Engine PID: 597 +Used/High/Total Command Buffers: 0 / 0 / 4096 + +Total Services: 27 +Services Checked: 27 +Services Scheduled: 27 +Services Actively Checked: 27 +Services Passively Checked: 0 +Total Service State Change: 0.000 / 0.000 / 0.000 % +Active Service Latency: 0.102 / 0.955 / 0.550 sec +Active Service Execution Time: 0.001 / 0.332 / 0.132 sec +Active Service State Change: 0.000 / 0.000 / 0.000 % +Active Services Last 1/5/15/60 min: 1 / 16 / 24 / 27 +Passive Service Latency: 0.000 / 0.000 / 0.000 sec +Passive Service State Change: 0.000 / 0.000 / 0.000 % +Passive Services Last 1/5/15/60 min: 0 / 0 / 0 / 0 +Services Ok/Warn/Unk/Crit: 21 / 0 / 6 / 0 +Services Flapping: 0 +Services In Downtime: 0 + +Total Hosts: 6 +Hosts Checked: 5 +Hosts Scheduled: 5 +Hosts Actively Checked: 6 +Host Passively Checked: 0 +Total Host State Change: 0.000 / 0.000 / 0.000 % +Active Host Latency: 0.020 / 0.868 / 0.475 sec +Active Host Execution Time: 0.030 / 0.152 / 0.083 sec +Active Host State Change: 0.000 / 0.000 / 0.000 % +Active Hosts Last 1/5/15/60 min: 0 / 3 / 5 / 5 +Passive Host Latency: 0.000 / 0.000 / 0.000 sec +Passive Host State Change: 0.000 / 0.000 / 0.000 % +Passive Hosts Last 1/5/15/60 min: 0 / 0 / 0 / 0 +Hosts Up/Down/Unreach: 5 / 1 / 0 +Hosts Flapping: 0 +Hosts In Downtime: 0 + +Active Host Checks Last 1/5/15 min: 0 / 5 / 21 + Scheduled: 0 / 3 / 13 + On-demand: 0 / 2 / 8 + Parallel: 0 / 3 / 13 + Serial: 0 / 0 / 0 + Cached: 0 / 2 / 8 +Passive Host Checks Last 1/5/15 min: 0 / 0 / 0 +Active Service Checks Last 1/5/15 min: 1 / 18 / 57 + Scheduled: 1 / 18 / 57 + On-demand: 0 / 0 / 0 + Cached: 0 / 0 / 0 +Passive Service Checks Last 1/5/15 min: 0 / 0 / 0 + +External Commands Last 1/5/15 min: 0 / 0 / 0 + +EOF \ No newline at end of file diff --git a/gorgone/tests/robot/tests/centreon/statistics.robot b/gorgone/tests/robot/tests/centreon/statistics.robot new file mode 100644 index 00000000000..4e002a4d0e6 --- /dev/null +++ b/gorgone/tests/robot/tests/centreon/statistics.robot @@ -0,0 +1,106 @@ +*** Settings *** +Documentation test gorgone statistics module +Resource ${CURDIR}${/}..${/}..${/}resources${/}import.resource +Test Timeout 220s +Suite Setup Suite Setup Statistics Module +Suite Teardown Suite Teardown Statistic Module + +*** Test Cases *** +check statistic module add all centengine data in db ${communication_mode} + [Documentation] Check engine statistics are correctly added in sql Database + @{process_list} Create List ${communication_mode}_gorgone_central ${communication_mode}_gorgone_poller_2 + [Teardown] Stop Gorgone And Remove Gorgone Config @{process_list} sql_file=${ROOT_CONFIG}db_delete_poller.sql + + ${date} Get Current Date increment=-1s + @{central_config} Create List ${ROOT_CONFIG}statistics.yaml ${ROOT_CONFIG}actions.yaml + @{poller_config} Create List ${ROOT_CONFIG}actions.yaml + Setup Two Gorgone Instances central_config=${central_config} communication_mode=${communication_mode} central_name=${communication_mode}_gorgone_central poller_name=${communication_mode}_gorgone_poller_2 poller_config=${poller_config} + + # we first test the module when there is no data in the table, we will test it again when + # there is data in the table to be sure the data are correctly updated. + Execute SQL String DELETE FROM nagios_stats alias=storage + Check If Not Exists In Database SELECT * FROM nagios_stats alias=storage + + Ctn Gorgone Force Engine Statistics Retrieve + # statistics module send the GORGONE_ACTION_FINISH_OK once messages for the action module are sent. + # It don't wait for the action module to send back data or for the processing of the response to be finished. + # So I added a log each time a poller stat have finished to be processed. In this test I know + # I have 2 log because there is the central and one poller. + Ctn Wait For Log ${communication_mode} ${date} + + Ctn Gorgone Check Poller Engine Stats Are Present poller_id=1 + Ctn Gorgone Check Poller Engine Stats Are Present poller_id=2 + + # As the value we set in db are fake and hardcoded, we need to change the data before + # running again the module to be sure data are correctly updated, instead of letting the last value persist. + Query UPDATE nagios_stats SET stat_value=999; alias=storage + ${date2} Get Current Date increment=-1s + + Ctn Gorgone Force Engine Statistics Retrieve + + Ctn Wait For Log ${communication_mode} ${date2} + Ctn Gorgone Check Poller Engine Stats Are Present poller_id=1 + Ctn Gorgone Check Poller Engine Stats Are Present poller_id=2 + + Examples: communication_mode -- + ... push_zmq + ... pullwss + +*** Keywords *** + +Ctn Wait For Log + [Documentation] We can't make a single call because we don't know which will finish first + ... (even if it will often be the central node). So we check first for the central log, then for the poller node + ... from the starting point of the log. In the search, the lib search for the first log, and once it's found + ... start searching the second log from the first log position. + [Arguments] ${communication_mode} ${date} + + ${log_central} Create List poller 1 engine data was integrated in rrd and sql database. + ${result_central} Ctn Find In Log With Timeout log=/var/log/centreon-gorgone/${communication_mode}_gorgone_central/gorgoned.log content=${log_central} date=${date} regex=1 timeout=60 + Should Be True ${result_central} Didn't found the logs : ${result_central} + + ${log_poller2} Create List poller 2 engine data was integrated in rrd and sql database. + ${result_poller2} Ctn Find In Log With Timeout log=/var/log/centreon-gorgone/${communication_mode}_gorgone_central/gorgoned.log content=${log_poller2} date=${date} regex=1 timeout=60 + Should Be True ${result_poller2} Didn't found the Central logs : ${result_poller2} + +Ctn Gorgone Check Poller Engine Stats Are Present + [Arguments] ${poller_id}= + + &{Service Check Latency}= Create Dictionary Min=0.102 Max=0.955 Average=0.550 + &{Host Check Latency}= Create Dictionary Min=0.020 Max=0.868 Average=0.475 + &{Service Check Execution Time}= Create Dictionary Min=0.001 Max=0.332 Average=0.132 + &{Host Check Execution Time}= Create Dictionary Min=0.030 Max=0.152 Average=0.083 + + &{data_check} Create Dictionary Service Check Latency=&{Service Check Latency} Host Check Execution Time=&{Host Check Execution Time} Host Check Latency=&{Host Check Latency} Service Check Execution Time=&{Service Check Execution Time} + + FOR ${stat_label} ${stat_data} IN &{data_check} + + FOR ${stat_key} ${stat_value} IN &{stat_data} + Check If Exists In Database SELECT instance_id FROM nagios_stats WHERE stat_key = '${stat_key}' AND stat_value = '${stat_value}' AND stat_label = '${stat_label}' AND instance_id='${poller_id}'; alias=storage + + END + END + +Ctn Gorgone Force Engine Statistics Retrieve + ${response}= GET http://127.0.0.1:8085/api/centreon/statistics/engine + Log To Console ${response.json()} + Dictionary Should Not Contain Key ${response.json()} error api/centreon/statistics/engine api call resulted in an error : ${response.json()} + + Log To Console engine statistic are being retrived. Gorgone sent a log token : ${response.json()} + +Suite Setup Statistics Module + Set Centenginestat Binary + Connect To Database pymysql ${DBNAME_STORAGE} ${DBUSER} ${DBPASSWORD} ${DBHOST} ${DBPORT} + ... alias=storage + +Set Centenginestat Binary + [Documentation] this keyword add a centenginestats file from the local directory to the /usr/sbin + ... directory and make it executable. This allow to test the gorgone statistics module + ... without installing centreon-engine and starting the service + + Copy File /usr/sbin/centenginestats /usr/sbin/centenginestats-back + Copy File ${CURDIR}${/}centenginestats /usr/sbin/centenginestats + Run chmod 755 /usr/sbin/centenginestats + +Suite Teardown Statistic Module + Copy File /usr/sbin/centenginestats-back /usr/sbin/centenginestats diff --git a/gorgone/tests/robot/tests/core/action.robot b/gorgone/tests/robot/tests/core/action.robot new file mode 100644 index 00000000000..bb1f7df5a14 --- /dev/null +++ b/gorgone/tests/robot/tests/core/action.robot @@ -0,0 +1,120 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# For more information : contact@centreon.com +# + +*** Settings *** +Documentation test gorgone action module on local and distant target +Resource ${CURDIR}${/}..${/}..${/}resources${/}import.resource +Test Timeout 220s + +# @TODO : I know it's possible to have a remote server managing some poller. For now we don't test this case, but it should be tested and documented. +*** Test Cases *** +action module with ${communication_mode} communcation mode + [Documentation] test action on distant node, no whitelist configured + @{process_list} Create List ${communication_mode}_gorgone_central ${communication_mode}_gorgone_poller_2 + [Teardown] Stop Gorgone And Remove Gorgone Config @{process_list} sql_file=${ROOT_CONFIG}db_delete_poller.sql + + @{central_config} Create List ${ROOT_CONFIG}actions.yaml + @{poller_config} Create List ${ROOT_CONFIG}actions.yaml + Setup Two Gorgone Instances + ... central_config=${central_config} + ... communication_mode=${communication_mode} + ... central_name=${communication_mode}_gorgone_central + ... poller_name=${communication_mode}_gorgone_poller_2 + ... poller_config=${poller_config} + + # first we test the api without waiting for the output of the command. + # check by default the api launch the query in local + Test Async Action Module + # check the central can execute a command and send back the output + Test Async Action Module node_path=nodes/1/ + # check a distant poller can execute a command and send back the output + ${start_date} Get Current Date increment=-10s + Test Async Action Module node_path=nodes/2/ + # we need to check it is the poller and not the central that have done the action. + ${log_poller2_query} Create List Robot test write with param: for node nodes/2/ + ${logs_poller} Ctn Find In Log With Timeout log=/var/log/centreon-gorgone/${communication_mode}_gorgone_poller_2/gorgoned.log content=${log_poller2_query} date=${start_date} timeout=10 + Should Be True ${logs_poller} Didn't found the logs in the poller file : ${logs_poller} + + # Now we test the action api by waiting for the command output in one call. + # This make gorgone wait for 3 seconds before querying for logs, and wait again 0.5 seconds for log to be received by central. + # On my machine the sync_wait was at least 0.22 seconds to work sometime, it always worked with 0.5s. + # In real world where poller is not on the same server the delay will be greater and more random, + # so the async method should be privileged. + ${get_params}= Set Variable ?log_wait=3000000&sync_wait=500000 + Test Sync Action Module get_params=${get_params} + Test Sync Action Module get_params=${get_params} node_path=nodes/1/ + Test Sync Action Module get_params=${get_params} node_path=nodes/2/ + # we need to check it is the poller and not the central that have done the action. + ${start_date} Get Current Date increment=-10s + ${log_poller2_query_sync} Create List Robot test write with param:${get_params} for node nodes/2/ + ${logs_poller} Ctn Find In Log With Timeout log=/var/log/centreon-gorgone/${communication_mode}_gorgone_poller_2/gorgoned.log content=${log_poller2_query_sync} date=${start_date} timeout=10 + Should Be True ${logs_poller} Didn't found the logs in the poller file: ${logs_poller} + + Run rm /tmp/actionLogs + + Examples: communication_mode -- + ... push_zmq + ... pullwss + +*** Keywords *** +Test Sync Action Module + [Arguments] ${get_params}= ${node_path}= + + ${action_api_result}= Post Action Endpoint node_path=${node_path} get_params=${get_params} + ${status} ${logs} Parse Json Response ${action_api_result} + Check Action Api Do Something ${status} ${logs} ${node_path} ${get_params} + + +Test Async Action Module + [Documentation] This make an api call to write to a dummy file and output a string. as gorgone central and poller and robot are executed on the same host we can access the file to check the result. + [Arguments] ${node_path}=${EMPTY} + ${action_api_result}= Post Action Endpoint node_path=${node_path} + # need to get the data from the token with getlog. + # this call multiples time the api until the response is available. + ${status} ${logs} Ctn Get Api Log With Timeout token=${action_api_result.json()}[token] node_path=${node_path} + Check Action Api Do Something ${status} ${logs} ${node_path} ${EMPTY} + + +Post Action Endpoint + [Arguments] ${node_path}=${EMPTY} ${get_params}=${EMPTY} + + # Ideally, Gorgone should not allow any bash interpretation on command it execute. + # As there is a whitelist in gorgone, if there was no bash interpretation we could allow only our required binary and be safe. + # As gorgone always had bash interpretation available, most of the internal use of this module use redirection, pipe or other sh feature. + ${bodycmd}= Create Dictionary command=echo 'Robot test write with param:${get_params} for node ${node_path}' | tee -a /tmp/actionLogs + ${body}= Create List ${bodycmd} + ${result} POST http://127.0.0.1:8085/api/${node_path}core/action/command${get_params} json=${body} + RETURN ${result} + + +Check Action Api Do Something + [Arguments] ${status} ${logs} ${node_path} ${get_params} + + Should Be True ${status} No log found in the gorgone api or the command failed. + # the log api send back a json containing a list of log, with for each logs the token, id, creation time (ctime), status code(code), and data (among other thing) + # data is a stringified json that need to be evaluated separately. + ${internal_json}= Evaluate json.loads("""${logs}[data]""") json + + Should Be Equal As Numbers 0 ${internal_json}[result][exit_code] + Should Be Equal As Strings + ... Robot test write with param:${get_params} for node ${node_path} + ... ${internal_json}[result][stdout] + ... output of the gorgone action api should be the bash command output. + + ${file_nb_line}= Run grep 'Robot test write with param:${get_params} for node ${node_path}\$' /tmp/actionLogs | wc -l + Should Be Equal 1 ${file_nb_line} command launched with gorgone api should set only one line in the file per tests diff --git a/gorgone/tests/robot/tests/core/httpserver.robot b/gorgone/tests/robot/tests/core/httpserver.robot new file mode 100644 index 00000000000..882784de421 --- /dev/null +++ b/gorgone/tests/robot/tests/core/httpserver.robot @@ -0,0 +1,54 @@ +*** Settings *** +Documentation check gorgone api response +Suite Setup Setup Gorgone +Suite Teardown Stop Gorgone And Remove Gorgone Config @{gorgone_process_name} +Resource ${CURDIR}${/}..${/}..${/}resources${/}import.resource +Test Timeout 220s + +*** Variables *** +@{gorgone_process_name}= httpserver_api_statuscode + +*** Test Cases *** +check http api get status code ${tc} + ${expected_code}= Convert To Integer ${http_status_code} + ${api_response}= GET http://127.0.0.1:8085${endpoint} expected_status=anything + + Log To Console \nendpoint code is : ${api_response.status_code} output is : ${api_response.text} + + Should Be Equal ${api_response.status_code} ${expected_code} + ${expected_json}= evaluate json.loads('''${expected_response}''') json + Dictionaries Should Be Equal ${api_response.json()} ${expected_json} + + Examples: tc http_status_code endpoint expected_response -- + ... forbidden 403 /bad/endpoint {"error":"http_error_403","message":"forbidden"} + ... constatus Ok 200 /api/internal/constatus {"data":{},"action":"constatus","message":"ok"} + ... method not found 404 /api/internal/wrongendpoint {"error":"method_unknown","message":"Method not implemented"} + ... get log 200 /api/nodes/1/log/wrongtoken {"error":"no_log","message":"No log found for token","data":[],"token":"wrongtoken"} + +check http api post api ${tc} + ${expected_code}= Convert To Integer ${http_status_code} + ${api_response}= POST http://127.0.0.1:8085${endpoint} expected_status=anything data=${body} + + Log To Console \nendpoint code is : ${api_response.status_code} output is : ${api_response.text} + + Should Be Equal ${api_response.status_code} ${expected_code} + IF len("""${expected_response}""") > 0 + ${expected}= evaluate json.loads('''${expected_response}''') json + Dictionaries Should Be Equal ${api_response.json()} ${expected} + END + + Examples: tc http_status_code endpoint body expected_response -- + ... body is not json 400 /api/centreon/nodes/sync { {"error":"decode_error","message":"POST content must be JSON-formated"} + ... body is valid json 200 /api/centreon/nodes/sync {} ${EMPTY} # api send back a random token. + + +*** Keywords *** + +Setup Gorgone + @{gorgone_conf} Create List ${push_central_config} ${gorgone_core_config} + Setup Gorgone Config ${gorgone_conf} gorgone_name=httpserver_api_statuscode + Start Gorgone debug httpserver_api_statuscode + + Log To Console \nGorgone Started. We have to wait for it to be ready to respond. + Sleep 10 + Log To Console Gorgone should be ready. \n \ No newline at end of file diff --git a/gorgone/tests/robot/tests/core/pullwss.robot b/gorgone/tests/robot/tests/core/pullwss.robot new file mode 100644 index 00000000000..bf2552031bd --- /dev/null +++ b/gorgone/tests/robot/tests/core/pullwss.robot @@ -0,0 +1,26 @@ +*** Settings *** +Documentation Start and stop gorgone in pullwss mode + +Resource ${CURDIR}${/}..${/}..${/}resources${/}import.resource +Test Timeout 220s + +*** Variables *** +@{process_list} pullwss_gorgone_poller_2 pullwss_gorgone_central + +*** Test Cases *** +check one poller can connect to a central and gorgone central stop first + [Teardown] Stop Gorgone And Remove Gorgone Config @{process_list} sql_file=${ROOT_CONFIG}db_delete_poller.sql + @{process_list} Set Variable pullwss_gorgone_central pullwss_gorgone_poller_2 + Log To Console \nStarting the gorgone setup + Setup Two Gorgone Instances communication_mode=pullwss central_name=pullwss_gorgone_central poller_name=pullwss_gorgone_poller_2 + Ctn Check No Error In Logs pullwss_gorgone_poller_2 + Log To Console End of tests. + +check one poller can connect to a central and gorgone poller stop first + [Teardown] Stop Gorgone And Remove Gorgone Config @{process_list} sql_file=${ROOT_CONFIG}db_delete_poller.sql + @{process_list} Set Variable pullwss_gorgone_poller_2 pullwss_gorgone_central + Log To Console \nStarting the gorgone setup + + Setup Two Gorgone Instances communication_mode=pullwss central_name=pullwss_gorgone_central poller_name=pullwss_gorgone_poller_2 + Ctn Check No Error In Logs pullwss_gorgone_poller_2 + Log To Console End of tests. diff --git a/gorgone/tests/robot/tests/core/push.robot b/gorgone/tests/robot/tests/core/push.robot new file mode 100644 index 00000000000..5dd5410628b --- /dev/null +++ b/gorgone/tests/robot/tests/core/push.robot @@ -0,0 +1,65 @@ +*** Settings *** +Documentation Start and stop gorgone + +Resource ${CURDIR}${/}..${/}..${/}resources${/}import.resource +Test Timeout 220s + +*** Variables *** +@{process_list} push_zmq_gorgone_central push_zmq_gorgone_poller_2 + +*** Test Cases *** +connect 1 poller to a central + [Teardown] Stop Gorgone And Remove Gorgone Config @{process_list} sql_file=${ROOT_CONFIG}db_delete_poller.sql + + Log To Console \nStarting the gorgone setup + Setup Two Gorgone Instances communication_mode=push_zmq central_name=push_zmq_gorgone_central poller_name=push_zmq_gorgone_poller_2 + # Test + Log To Console End of tests. + +check central don't eat cpu when poller is not connected + [Tags] long_tests MON-130747 + ${central_name}= Set Variable push_zmq_gorgone_central + [Teardown] Stop Gorgone And Remove Gorgone Config push_zmq_gorgone_central sql_file=${ROOT_CONFIG}db_delete_poller.sql + + @{central_push_config}= Create List ${push_central_config} ${gorgone_core_config} + Setup Gorgone Config ${central_push_config} gorgone_name=${central_name} sql_file=${ROOT_CONFIG}db_add_1_poller.sql + Start Gorgone debug ${central_name} + Wait Until Port Is Bind 8085 + Ctn Wait Until Poller Fail To Connect 1 + Ctn Check Cpu Until Timeout + + +*** Keywords *** +Ctn Check Cpu Until Timeout + [Arguments] ${timeout}=60s ${process_whitelist}=gorgone-proxy ${max_cpu_usage}=40 + ${max_date} Get Current Date increment=${timeout} + ${current_date} Get Current Date + + WHILE '${max_date}' > '${current_date}' + ${cpu_conso} Run echo $(( $(ps -eo cp,args:100 | grep -v grep | grep -i ${process_whitelist} | awk '{print $1}' | paste -sd+) )) + Should Be True ${cpu_conso} < ${max_cpu_usage} Gorgone consume too much cpu : ${cpu_conso} + ${current_date} Get Current Date + Sleep 2 + END + + +Ctn Wait Until Poller Fail To Connect + [Arguments] ${nb_fail}=1 ${poller_id}=2 + + ${response} Set Variable ${EMPTY} + FOR ${i} IN RANGE 35 + Sleep 5 + ${response}= GET http://127.0.0.1:8085/api/internal/constatus + Log ${response.json()} + IF not ${response.json()}[data] + CONTINUE + END + IF ${response.json()}[data][${poller_id}][ping_failed] >= ${nb_fail} or ${response.json()}[data][${poller_id}][ping_ok] > 0 + BREAK + END + END + Log To Console json response : ${response.json()} + Should Be True ${i} < 34 timeout after ${i} time waiting for poller status in gorgone rest api (/api/internal/constatus) + Should Be True ${nb_fail} == ${response.json()}[data][${poller_id}][ping_failed] there was failed ping between the central and the poller ${poller_id} + Should Be True 0 == ${response.json()}[data][${poller_id}][ping_ok] there was successful ping between the central and the poller ${poller_id} + Log To Console ${nb_fail} failed ping between the central and the poller ${poller_id} diff --git a/gorgone/tests/robot/tests/long_tests/fd-leak.robot b/gorgone/tests/robot/tests/long_tests/fd-leak.robot new file mode 100644 index 00000000000..54f2178787f --- /dev/null +++ b/gorgone/tests/robot/tests/long_tests/fd-leak.robot @@ -0,0 +1,33 @@ +*** Settings *** +Documentation check Gorgone don't leak file descriptor when a poller become unavailable + +Resource ${CURDIR}${/}..${/}..${/}resources${/}import.resource +Test Timeout 1200s + +*** Test Cases *** +check gorgone proxy do not leak file descriptor with a poller + [Tags] long_tests + [Teardown] Stop Gorgone And Remove Gorgone Config push_zmq_gorgone_central sql_file=${ROOT_CONFIG}db_delete_poller.sql + ${cmd_count_file_descriptor}= Set Variable count=0; for pid in \$(ps aux | grep gorgone-proxy | grep -v grep | awk '{ print \$2 }') ; do num=\$(lsof | grep \$pid | wc -l); count=\$((count + \$num)) ; done ; echo \$count + + Log To Console \nStarting the gorgone setup + Setup Two Gorgone Instances communication_mode=push_zmq central_name=push_zmq_gorgone_central poller_name=push_zmq_gorgone_poller_2 + # We wait for gorgone to be ready, and grab all file descriptor it need. + Sleep 10 + ${before_kill_fd_nb} Run ${cmd_count_file_descriptor} + Stop Gorgone And Remove Gorgone Config push_zmq_gorgone_poller_2 + Sleep 10 + # check what is the normal number of file descriptor for gorgone to take + ${initial_fd_nb} Run ${cmd_count_file_descriptor} + Log To Console \n number of file descriptor on before killing poller : ${before_kill_fd_nb} and after : ${initial_fd_nb} \n + ${max}= Evaluate ${initial_fd_nb} + 15 + Log To Console max is ${max} + Sleep 20 + FOR ${i} IN RANGE 60 + ${current_fd_nb} Run ${cmd_count_file_descriptor} + IF ${i} % 10 == 0 + Log To Console exec ${i} \t got ${current_fd_nb} + END + Should Be True ${max} > ${current_fd_nb} gorgone is using more and more file descriptor after a poller disconnect, starting at ${initial_fd_nb} and after ${i} iteration (5 sec each) to ${current_fd_nb} + Sleep 5 + END diff --git a/gorgone/tests/robot/tests/start_stop/config.yaml b/gorgone/tests/robot/tests/start_stop/config.yaml new file mode 100644 index 00000000000..c35b99ed705 --- /dev/null +++ b/gorgone/tests/robot/tests/start_stop/config.yaml @@ -0,0 +1,34 @@ +gorgone: + gorgonecore: + internal_com_type: ipc + internal_com_path: /etc/centreon-gorgone/@UNIQ_ID_FROM_ROBOT_TESTING_CONFIG_FILE@/routing.ipc + external_com_type: tcp + + gorgone_db_type: SQLite + gorgone_db_name: dbname=/etc/centreon-gorgone/@UNIQ_ID_FROM_ROBOT_TESTING_CONFIG_FILE@/history.sdb + id: 1 + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + modules: + - name: httpserver + package: "gorgone::modules::core::httpserver::hooks" + enable: true + address: "0.0.0.0" + port: "8085" + ssl: false + auth: + enabled: false + allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 +centreon: + database: + db_configuration: + dsn: "mysql:host=@DBHOST@:port=3306;dbname=@DBNAME@" + username: "@DBUSER@" + password: "@DBPASSWORD@" + db_realtime: + dsn: "mysql:host=@DBHOST@:port=3306;dbname=centreon_storage" + username: "@DBUSER@" + password: "@DBPASSWORD@" diff --git a/gorgone/tests/robot/tests/start_stop/start_stop.robot b/gorgone/tests/robot/tests/start_stop/start_stop.robot new file mode 100644 index 00000000000..f374e9e9732 --- /dev/null +++ b/gorgone/tests/robot/tests/start_stop/start_stop.robot @@ -0,0 +1,20 @@ +*** Settings *** +Documentation Start and stop gorgone + +Resource ${CURDIR}${/}..${/}..${/}resources${/}import.resource +Test Timeout 120s + +*** Test Cases *** +Start and stop gorgone + @{configfile} Create List ${CURDIR}${/}config.yaml + FOR ${i} IN RANGE 5 + ${gorgone_name}= Set Variable gorgone_start_stop${i} + + Setup Gorgone Config ${configfile} gorgone_name=${gorgone_name} + Log To Console Starting Gorgone... + Start Gorgone debug ${gorgone_name} + Sleep 5s + Log To Console Stopping Gorgone... + Stop Gorgone And Remove Gorgone Config gorgone_start_stop${i} + sleep 2s + END \ No newline at end of file diff --git a/gorgone/veracode.json b/gorgone/veracode.json new file mode 100644 index 00000000000..329f76f89be --- /dev/null +++ b/gorgone/veracode.json @@ -0,0 +1,3 @@ +{ + "ignorethirdparty": "false" +} \ No newline at end of file diff --git a/lua-curl/packaging/lua-curl.yaml b/lua-curl/packaging/lua-curl.yaml new file mode 100644 index 00000000000..6f3916f8479 --- /dev/null +++ b/lua-curl/packaging/lua-curl.yaml @@ -0,0 +1,53 @@ +name: "lua-curl" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + lua curl library + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../../lua-curl-src/lcurl.so" + dst: "/usr/lib64/lua/@luaver@/lcurl.so" + file_info: + mode: 0644 + packager: rpm + - src: "../../lua-curl-src/lcurl.so" + dst: "/usr/lib/x86_64-linux-gnu/lua/@luaver@/lcurl.so" + file_info: + mode: 0644 + packager: deb + + - src: "../../lua-curl-src/src/lua/cURL.lua" + dst: "/usr/share/lua/@luaver@/cURL.lua" + + - src: "../../lua-curl-src/src/lua/cURL" + dst: "/usr/share/lua/@luaver@/cURL" + +overrides: + rpm: + depends: + - lua + deb: + depends: + - lua@luaver@ + provides: + - lua@luaver@-curl + conflicts: + - lua@luaver@-curl + replaces: + - lua@luaver@-curl + +rpm: + summary: lua curl + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/packaging/centreon-broker-core.yaml b/packaging/centreon-broker-core.yaml index 61bf0228188..1ebb52de51a 100644 --- a/packaging/centreon-broker-core.yaml +++ b/packaging/centreon-broker-core.yaml @@ -47,9 +47,11 @@ overrides: conflicts: - centreon-broker-storage - centreon-broker-core-devel + - centreon-broker-caching_sha2_password replaces: - centreon-broker-storage - centreon-broker-core-devel + - centreon-broker-caching_sha2_password provides: - centreon-broker-storage - centreon-broker-core-devel @@ -65,9 +67,11 @@ overrides: conflicts: - centreon-broker-storage - centreon-broker-core-dev + - centreon-broker-caching-sha2-password replaces: - centreon-broker-storage - centreon-broker-core-dev + - centreon-broker-caching-sha2-password provides: - centreon-broker-storage - centreon-broker-core-dev diff --git a/packaging/centreon-monitoring-agent.yaml b/packaging/centreon-monitoring-agent.yaml index 83bba81f424..c452432cf47 100644 --- a/packaging/centreon-monitoring-agent.yaml +++ b/packaging/centreon-monitoring-agent.yaml @@ -51,6 +51,16 @@ contents: owner: centreon-monitoring-agent group: centreon-monitoring-agent +overrides: + rpm: + depends: + - openssl-libs >= 3 + - zlib + deb: + depends: + - libssl1.1 | libssl3 + - zlib1g + scripts: preinstall: ./scripts/centreon-monitoring-agent-preinstall.sh postinstall: ./scripts/centreon-monitoring-agent-postinstall.sh diff --git a/tests/README.md b/tests/README.md index 0a6775c6ec9..f66f6e8a837 100644 --- a/tests/README.md +++ b/tests/README.md @@ -145,54 +145,55 @@ Here is the list of the currently implemented tests: 34. [x] **BGRPCSSU3**: Start-Stop with unified_sql one instance of broker configured with grpc and no coredump 35. [x] **BGRPCSSU4**: Start/Stop with unified_sql 10 times broker configured with grpc stream with 1sec interval and no coredump 36. [x] **BGRPCSSU5**: Start-Stop with unified_sql with reversed connection on grpc acceptor with only one instance and no deadlock -37. [x] **BLDIS1**: Start broker with core logs 'disabled' -38. [x] **BLEC1**: Change live the core level log from trace to debug -39. [x] **BLEC2**: Change live the core level log from trace to foo raises an error -40. [x] **BLEC3**: Change live the foo level log to trace raises an error -41. [x] **BSCSS1**: Start-Stop two instances of broker and no coredump -42. [x] **BSCSS2**: Start/Stop 10 times broker with 300ms interval and no coredump -43. [x] **BSCSS3**: Start-Stop one instance of broker and no coredump -44. [x] **BSCSS4**: Start/Stop 10 times broker with 1sec interval and no coredump -45. [x] **BSCSSC1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is enabled on client side. -46. [x] **BSCSSC2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is disabled on client side. -47. [x] **BSCSSCG1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on client side. -48. [x] **BSCSSCGRR1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on output side. Reversed connection with retention and grpc transport protocol. -49. [x] **BSCSSCGRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on output side. Reversed connection with retention and grpc transport protocol. -50. [x] **BSCSSCRR1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is enabled on client side. Connection reversed with retention. -51. [x] **BSCSSCRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is disabled on client side. Connection reversed with retention. -52. [x] **BSCSSG1**: Start-Stop two instances of broker and no coredump -53. [x] **BSCSSG2**: Start/Stop 10 times broker with 300ms interval and no coredump -54. [x] **BSCSSG3**: Start-Stop one instance of broker and no coredump -55. [x] **BSCSSG4**: Start/Stop 10 times broker with 1sec interval and no coredump -56. [x] **BSCSSGA1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. An authorization token is added on the server. Error messages are raised. -57. [x] **BSCSSGA2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. An authorization token is added on the server and also on the client. All looks ok. -58. [x] **BSCSSGRR1**: Start-Stop two instances of broker and no coredump, reversed and retention, with transport protocol grpc, start-stop 5 times. -59. [x] **BSCSSK1**: Start-Stop two instances of broker, server configured with grpc and client with tcp. No connectrion established and error raised on client side. -60. [x] **BSCSSK2**: Start-Stop two instances of broker, server configured with tcp and client with grpc. No connection established and error raised on client side. -61. [x] **BSCSSP1**: Start-Stop two instances of broker and no coredump. The server contains a listen address -62. [x] **BSCSSPRR1**: Start-Stop two instances of broker and no coredump. The server contains a listen address, reversed and retention. centreon-broker-master-rrd is then a failover. -63. [x] **BSCSSR1**: Start-Stop two instances of broker and no coredump. Connection with bbdo_server/bbdo_client and reversed. -64. [x] **BSCSSRR1**: Start-Stop two instances of broker and no coredump. Connection with bbdo_server/bbdo_client, reversed and retention. centreon-broker-master-rrd is then a failover. -65. [x] **BSCSSRR2**: Start/Stop 10 times broker with 300ms interval and no coredump, reversed and retention. centreon-broker-master-rrd is then a failover. -66. [x] **BSCSST1**: Start-Stop two instances of broker and no coredump. Encryption is enabled on client side. -67. [x] **BSCSST2**: Start-Stop two instances of broker and no coredump. Encryption is enabled on client side. -68. [x] **BSCSSTG1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. This is not sufficient, then an error is raised. -69. [x] **BSCSSTG2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. It works with good certificates and keys. -70. [x] **BSCSSTG3**: Start-Stop two instances of broker. The connection cannot be established if the server private key is missing and an error message explains this issue. -71. [x] **BSCSSTGRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. It works with good certificates and keys. Reversed grpc connection with retention. -72. [x] **BSCSSTRR1**: Start-Stop two instances of broker and no coredump. Encryption is enabled. transport protocol is tcp, reversed and retention. -73. [x] **BSCSSTRR2**: Start-Stop two instances of broker and no coredump. Encryption is enabled. -74. [x] **BSS1**: Start-Stop two instances of broker and no coredump -75. [x] **BSS2**: Start/Stop 10 times broker with 300ms interval and no coredump -76. [x] **BSS3**: Start-Stop one instance of broker and no coredump -77. [x] **BSS4**: Start/Stop 10 times broker with 1sec interval and no coredump -78. [x] **BSS5**: Start-Stop with reversed connection on TCP acceptor with only one instance and no deadlock -79. [x] **BSSU1**: Start-Stop with unified_sql two instances of broker and no coredump -80. [x] **BSSU2**: Start/Stop with unified_sql 10 times broker with 300ms interval and no coredump -81. [x] **BSSU3**: Start-Stop with unified_sql one instance of broker and no coredump -82. [x] **BSSU4**: Start/Stop with unified_sql 10 times broker with 1sec interval and no coredump -83. [x] **BSSU5**: Start-Stop with unified_sql with reversed connection on TCP acceptor with only one instance and no deadlock -84. [x] **START_STOP_CBD**: restart cbd with unified_sql services state must not be null after restart +37. [x] **BLBD**: Start Broker with loggers levels by default +38. [x] **BLDIS1**: Start broker with core logs 'disabled' +39. [x] **BLEC1**: Change live the core level log from trace to debug +40. [x] **BLEC2**: Change live the core level log from trace to foo raises an error +41. [x] **BLEC3**: Change live the foo level log to trace raises an error +42. [x] **BSCSS1**: Start-Stop two instances of broker and no coredump +43. [x] **BSCSS2**: Start/Stop 10 times broker with 300ms interval and no coredump +44. [x] **BSCSS3**: Start-Stop one instance of broker and no coredump +45. [x] **BSCSS4**: Start/Stop 10 times broker with 1sec interval and no coredump +46. [x] **BSCSSC1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is enabled on client side. +47. [x] **BSCSSC2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is disabled on client side. +48. [x] **BSCSSCG1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on client side. +49. [x] **BSCSSCGRR1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on output side. Reversed connection with retention and grpc transport protocol. +50. [x] **BSCSSCGRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on output side. Reversed connection with retention and grpc transport protocol. +51. [x] **BSCSSCRR1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is enabled on client side. Connection reversed with retention. +52. [x] **BSCSSCRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is disabled on client side. Connection reversed with retention. +53. [x] **BSCSSG1**: Start-Stop two instances of broker and no coredump +54. [x] **BSCSSG2**: Start/Stop 10 times broker with 300ms interval and no coredump +55. [x] **BSCSSG3**: Start-Stop one instance of broker and no coredump +56. [x] **BSCSSG4**: Start/Stop 10 times broker with 1sec interval and no coredump +57. [x] **BSCSSGA1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. An authorization token is added on the server. Error messages are raised. +58. [x] **BSCSSGA2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. An authorization token is added on the server and also on the client. All looks ok. +59. [x] **BSCSSGRR1**: Start-Stop two instances of broker and no coredump, reversed and retention, with transport protocol grpc, start-stop 5 times. +60. [x] **BSCSSK1**: Start-Stop two instances of broker, server configured with grpc and client with tcp. No connectrion established and error raised on client side. +61. [x] **BSCSSK2**: Start-Stop two instances of broker, server configured with tcp and client with grpc. No connection established and error raised on client side. +62. [x] **BSCSSP1**: Start-Stop two instances of broker and no coredump. The server contains a listen address +63. [x] **BSCSSPRR1**: Start-Stop two instances of broker and no coredump. The server contains a listen address, reversed and retention. centreon-broker-master-rrd is then a failover. +64. [x] **BSCSSR1**: Start-Stop two instances of broker and no coredump. Connection with bbdo_server/bbdo_client and reversed. +65. [x] **BSCSSRR1**: Start-Stop two instances of broker and no coredump. Connection with bbdo_server/bbdo_client, reversed and retention. centreon-broker-master-rrd is then a failover. +66. [x] **BSCSSRR2**: Start/Stop 10 times broker with 300ms interval and no coredump, reversed and retention. centreon-broker-master-rrd is then a failover. +67. [x] **BSCSST1**: Start-Stop two instances of broker and no coredump. Encryption is enabled on client side. +68. [x] **BSCSST2**: Start-Stop two instances of broker and no coredump. Encryption is enabled on client side. +69. [x] **BSCSSTG1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. This is not sufficient, then an error is raised. +70. [x] **BSCSSTG2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. It works with good certificates and keys. +71. [x] **BSCSSTG3**: Start-Stop two instances of broker. The connection cannot be established if the server private key is missing and an error message explains this issue. +72. [x] **BSCSSTGRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. It works with good certificates and keys. Reversed grpc connection with retention. +73. [x] **BSCSSTRR1**: Start-Stop two instances of broker and no coredump. Encryption is enabled. transport protocol is tcp, reversed and retention. +74. [x] **BSCSSTRR2**: Start-Stop two instances of broker and no coredump. Encryption is enabled. +75. [x] **BSS1**: Start-Stop two instances of broker and no coredump +76. [x] **BSS2**: Start/Stop 10 times broker with 300ms interval and no coredump +77. [x] **BSS3**: Start-Stop one instance of broker and no coredump +78. [x] **BSS4**: Start/Stop 10 times broker with 1sec interval and no coredump +79. [x] **BSS5**: Start-Stop with reversed connection on TCP acceptor with only one instance and no deadlock +80. [x] **BSSU1**: Start-Stop with unified_sql two instances of broker and no coredump +81. [x] **BSSU2**: Start/Stop with unified_sql 10 times broker with 300ms interval and no coredump +82. [x] **BSSU3**: Start-Stop with unified_sql one instance of broker and no coredump +83. [x] **BSSU4**: Start/Stop with unified_sql 10 times broker with 1sec interval and no coredump +84. [x] **BSSU5**: Start-Stop with unified_sql with reversed connection on TCP acceptor with only one instance and no deadlock +85. [x] **START_STOP_CBD**: restart cbd with unified_sql services state must not be null after restart ### Broker/database 1. [x] **DEDICATED_DB_CONNECTION_${nb_conn}_${store_in_data_bin}**: count database connection @@ -313,191 +314,204 @@ Here is the list of the currently implemented tests: 103. [x] **BENCH_${nb_checks}_SERVICE_STATUS_TRACES_WITHOUT_SQL**: Broker is configured without SQL output. External command CHECK_SERVICE_RESULT is sent ${nb_checks} times. Logs are in trace level. 104. [x] **BENCH_${nb_checks}_SERVICE_STATUS_WITHOUT_SQL**: Broker is configured without SQL output. External command CHECK_SERVICE_RESULT is sent ${nb_checks} times. 105. [x] **BENCH_1000STATUS_100${suffixe}**: external command CHECK_SERVICE_RESULT 100 times with 100 pollers with 20 services -106. [x] **BEPBBEE1**: central-module configured with bbdo_version 3.0 but not others. Unable to establish connection. -107. [x] **BEPBBEE2**: bbdo_version 3 not compatible with sql/storage -108. [x] **BEPBBEE3**: bbdo_version 3 generates new bbdo protobuf service status messages. -109. [x] **BEPBBEE4**: bbdo_version 3 generates new bbdo protobuf host status messages. -110. [x] **BEPBBEE5**: bbdo_version 3 generates new bbdo protobuf service messages. -111. [x] **BEPBCVS**: bbdo_version 3 communication of custom variables. -112. [x] **BEPBHostParent**: bbdo_version 3 communication of host parent relations -113. [x] **BEPBINST_CONF**: bbdo_version 3 communication of instance configuration. -114. [x] **BEPBRI1**: bbdo_version 3 use pb_resource new bbdo protobuf ResponsiveInstance message. -115. [x] **BEPB_HOST_DEPENDENCY**: BBDO 3 communication of host dependencies. -116. [x] **BEPB_SERVICE_DEPENDENCY**: bbdo_version 3 communication of host dependencies. -117. [x] **BERD1**: Starting/stopping Broker does not create duplicated events. -118. [x] **BERD2**: Starting/stopping Engine does not create duplicated events. -119. [x] **BERDUC1**: Starting/stopping Broker does not create duplicated events in usual cases -120. [x] **BERDUC2**: Starting/stopping Engine does not create duplicated events in usual cases -121. [x] **BERDUC3U1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql and BBDO 3.0 -122. [x] **BERDUC3U2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql and BBDO 3.0 -123. [x] **BERDUCA300**: Starting/stopping Engine is stopped ; it should emit a stop event and receive an ack event with events to clean from broker. -124. [x] **BERDUCA301**: Starting/stopping Engine is stopped ; it should emit a stop event and receive an ack event with events to clean from broker with bbdo 3.0.1. -125. [x] **BERDUCU1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql -126. [x] **BERDUCU2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql -127. [x] **BERES1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) -128. [x] **BESERVCHECK**: external command CHECK_SERVICE_RESULT -129. [x] **BESS1**: Start-Stop Broker/Engine - Broker started first - Broker stopped first -130. [x] **BESS2**: Start-Stop Broker/Engine - Broker started first - Engine stopped first -131. [x] **BESS2U**: Start-Stop Broker/Engine - Broker started first - Engine stopped first. Unified_sql is used. -132. [x] **BESS3**: Start-Stop Broker/Engine - Engine started first - Engine stopped first -133. [x] **BESS4**: Start-Stop Broker/Engine - Engine started first - Broker stopped first -134. [x] **BESS5**: Start-Stop Broker/engine - Engine debug level is set to all, it should not hang -135. [x] **BESSBQ1**: A very bad queue file is written for broker. Broker and Engine are then started, Broker must read the file raising an error because of that file and then get data sent by Engine. -136. [x] **BESS_CRYPTED_GRPC1**: Start-Stop grpc version Broker/Engine - well configured -137. [x] **BESS_CRYPTED_GRPC2**: Start-Stop grpc version Broker/Engine only server crypted -138. [x] **BESS_CRYPTED_GRPC3**: Start-Stop grpc version Broker/Engine only engine crypted -139. [x] **BESS_CRYPTED_REVERSED_GRPC1**: Start-Stop grpc version Broker/Engine - well configured -140. [x] **BESS_CRYPTED_REVERSED_GRPC2**: Start-Stop grpc version Broker/Engine only engine server crypted -141. [x] **BESS_CRYPTED_REVERSED_GRPC3**: Start-Stop grpc version Broker/Engine only engine crypted -142. [x] **BESS_ENGINE_DELETE_HOST**: once engine and cbd started, stop and restart cbd, delete an host and reload engine, cbd mustn't core -143. [x] **BESS_GRPC1**: Start-Stop grpc version Broker/Engine - Broker started first - Broker stopped first -144. [x] **BESS_GRPC2**: Start-Stop grpc version Broker/Engine - Broker started first - Engine stopped first -145. [x] **BESS_GRPC3**: Start-Stop grpc version Broker/Engine - Engine started first - Engine stopped first -146. [x] **BESS_GRPC4**: Start-Stop grpc version Broker/Engine - Engine started first - Broker stopped first -147. [x] **BESS_GRPC5**: Start-Stop grpc version Broker/engine - Engine debug level is set to all, it should not hang -148. [x] **BESS_GRPC_COMPRESS1**: Start-Stop grpc version Broker/Engine - Broker started first - Broker stopped last compression activated -149. [x] **BETAG1**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Broker is started before. -150. [x] **BETAG2**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. -151. [x] **BEUTAG1**: Engine is configured with some tags. When broker receives them through unified_sql stream, it stores them in the centreon_storage.tags table. Broker is started before. -152. [x] **BEUTAG10**: some services are configured with tags on two pollers. Then tags are removed from some of them and in centreon_storage, we can observe resources_tags table updated. -153. [x] **BEUTAG11**: some services are configured with tags on two pollers. Then several tags are removed, and we can observe resources_tags table updated. -154. [x] **BEUTAG12**: Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. The tag6 and tag8 are removed and resources_tags is also well updated. -155. [x] **BEUTAG2**: Engine is configured with some tags. A new service is added with a tag. Broker should make the relations. -156. [x] **BEUTAG3**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. -157. [x] **BEUTAG4**: Engine is configured with some tags. Group tags tag9, tag13 are set to services 1 and 3. Category tags tag3 and tag11 are added to services 1, 3, 5 and 6. The centreon_storage.resources and resources_tags tables are well filled. -158. [x] **BEUTAG5**: Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. -159. [x] **BEUTAG6**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.resources_tags table. Engine is started before. -160. [x] **BEUTAG7**: Some services are configured with tags on two pollers. Then tags configuration is modified. -161. [x] **BEUTAG8**: Services have tags provided by templates. -162. [x] **BEUTAG9**: hosts have tags provided by templates. -163. [x] **BEUTAG_REMOVE_HOST_FROM_HOSTGROUP**: remove a host from hostgroup, reload, insert 2 host in the hostgroup must not make sql error -164. [x] **BE_DEFAULT_NOTIFCATION_INTERVAL_IS_ZERO_SERVICE_RESOURCE**: default notification_interval must be set to NULL in services, hosts and resources tables. -165. [x] **BE_NOTIF_OVERFLOW**: bbdo 2.0 notification number =40000. make an overflow => notification_number null in db -166. [x] **BE_TIME_NULL_SERVICE_RESOURCE**: With BBDO 3, notification_interval time must be set to NULL on 0 in services, hosts and resources tables. -167. [x] **BRCS1**: Broker reverse connection stopped -168. [x] **BRCTS1**: Broker reverse connection too slow -169. [x] **BRCTSMN**: Broker connected to map with neb filter -170. [x] **BRCTSMNS**: Broker connected to map with neb and storage filters -171. [x] **BRGC1**: Broker good reverse connection -172. [x] **BRRDCDDID1**: RRD metrics deletion from index ids with rrdcached. -173. [x] **BRRDCDDIDDB1**: RRD metrics deletion from index ids with a query in centreon_storage with rrdcached. -174. [x] **BRRDCDDIDU1**: RRD metrics deletion from index ids with unified sql output with rrdcached. -175. [x] **BRRDCDDM1**: RRD metrics deletion from metric ids with rrdcached. -176. [x] **BRRDCDDMDB1**: RRD metrics deletion from metric ids with a query in centreon_storage and rrdcached. -177. [x] **BRRDCDDMID1**: RRD deletion of non existing metrics and indexes with rrdcached -178. [x] **BRRDCDDMIDU1**: RRD deletion of non existing metrics and indexes with rrdcached -179. [x] **BRRDCDDMU1**: RRD metric deletion on table metric with unified sql output with rrdcached -180. [x] **BRRDCDRB1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with storage/sql sql output and rrdcached. -181. [x] **BRRDCDRBDB1**: RRD metric rebuild with a query in centreon_storage and unified sql with rrdcached -182. [x] **BRRDCDRBU1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with unified_sql output and rrdcached. -183. [x] **BRRDCDRBUDB1**: RRD metric rebuild with a query in centreon_storage and unified sql with rrdcached -184. [x] **BRRDDID1**: RRD metrics deletion from index ids. -185. [x] **BRRDDIDDB1**: RRD metrics deletion from index ids with a query in centreon_storage. -186. [x] **BRRDDIDU1**: RRD metrics deletion from index ids with unified sql output. -187. [x] **BRRDDM1**: RRD metrics deletion from metric ids. -188. [x] **BRRDDMDB1**: RRD metrics deletion from metric ids with a query in centreon_storage. -189. [x] **BRRDDMID1**: RRD deletion of non existing metrics and indexes -190. [x] **BRRDDMIDU1**: RRD deletion of non existing metrics and indexes -191. [x] **BRRDDMU1**: RRD metric deletion on table metric with unified sql output -192. [x] **BRRDRBDB1**: RRD metric rebuild with a query in centreon_storage and unified sql -193. [x] **BRRDRBUDB1**: RRD metric rebuild with a query in centreon_storage and unified sql -194. [x] **BRRDRM1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with storage/sql sql output. -195. [x] **BRRDRMU1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with unified_sql output. -196. [x] **BRRDUPLICATE**: RRD metric rebuild with a query in centreon_storage and unified sql with duplicate rows in database -197. [x] **BRRDWM1**: We are working with BBDO3. This test checks protobuf metrics and status are sent to cbd RRD. -198. [x] **CBD_RELOAD_AND_FILTERS**: We start engine/broker with a classical configuration. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. -199. [x] **CBD_RELOAD_AND_FILTERS_WITH_OPR**: We start engine/broker with an almost classical configuration, just the connection between cbd central and cbd rrd is reversed with one peer retention. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. -200. [x] **DTIM**: New services with several pollers are created. Then downtimes are set on all configured hosts. This action results on 5250 downtimes if we also count impacted services. Then all these downtimes are removed. This test is done with BBDO 3.0.1 -201. [x] **EBBPS1**: 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table resources. -202. [x] **EBBPS2**: 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table services. -203. [x] **EBDP1**: Four new pollers are started and then we remove Poller3. -204. [x] **EBDP2**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. -205. [x] **EBDP3**: Three new pollers are started, then they are killed. It is still possible to remove Poller2 if removed from the configuration. -206. [x] **EBDP4**: Four new pollers are started and then we remove Poller3 with its hosts and services. All service status/host status are then refused by Broker. -207. [x] **EBDP5**: Four new pollers are started and then we remove Poller3. -208. [x] **EBDP6**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. -209. [x] **EBDP7**: Three new pollers are started, then they are killed. It is still possible to remove Poller2 if removed from the configuration. -210. [x] **EBDP8**: Four new pollers are started and then we remove Poller3 with its hosts and services. All service status/host status are then refused by broker. -211. [x] **EBDP_GRPC2**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. -212. [x] **EBMSSM**: 1000 services are configured with 100 metrics each. The rrd output is removed from the broker configuration. GetSqlManagerStats is called to measure writes into data_bin. -213. [x] **EBNHG1**: New host group with several pollers and connections to DB -214. [x] **EBNHG4**: New host group with several pollers and connections to DB with broker and rename this hostgroup -215. [x] **EBNHGU1**: New host group with several pollers and connections to DB with broker configured with unified_sql -216. [x] **EBNHGU2**: New host group with several pollers and connections to DB with broker configured with unified_sql -217. [x] **EBNHGU3**: New host group with several pollers and connections to DB with broker configured with unified_sql -218. [x] **EBNHGU4_${test_label}**: New host group with several pollers and connections to DB with broker and rename this hostgroup -219. [x] **EBNSG1**: New service group with several pollers and connections to DB -220. [x] **EBNSGU1**: New service group with several pollers and connections to DB with broker configured with unified_sql -221. [x] **EBNSGU2**: New service group with several pollers and connections to DB with broker configured with unified_sql -222. [x] **EBNSGU3_${test_label}**: New service group with several pollers and connections to DB with broker and rename this servicegroup -223. [x] **EBNSVC1**: New services with several pollers -224. [x] **EBPS2**: 1000 services are configured with 20 metrics each. The rrd output is removed from the broker configuration to avoid to write too many rrd files. While metrics are written in bulk, the database is stopped. This must not crash broker. -225. [x] **EBSAU2**: New services with action_url with more than 2000 characters -226. [x] **EBSN3**: New services with notes with more than 500 characters -227. [x] **EBSNU1**: New services with notes_url with more than 2000 characters -228. [x] **ENRSCHE1**: Verify that next check of a rescheduled host is made at last_check + interval_check -229. [x] **FILTER_ON_LUA_EVENT**: stream connector with a bad configured filter generate a log error message -230. [x] **GRPC_CLOUD_FAILURE**: simulate a broker failure in cloud environment, we provide a muted grpc server and there must remain only one grpc connection. Then we start broker and connection must be ok -231. [x] **GRPC_RECONNECT**: We restart broker and engine must reconnect to it and send data -232. [x] **LCDNU**: the lua cache updates correctly service cache. -233. [x] **LCDNUH**: the lua cache updates correctly host cache -234. [x] **LOGV2DB1**: log-v2 disabled old log enabled check broker sink -235. [x] **LOGV2DB2**: log-v2 disabled old log disabled check broker sink -236. [x] **LOGV2DF1**: log-v2 disabled old log enabled check logfile sink -237. [x] **LOGV2DF2**: log-v2 disabled old log disabled check logfile sink -238. [x] **LOGV2EB1**: Checking broker sink when log-v2 is enabled and legacy logs are disabled. -239. [x] **LOGV2EB2**: log-v2 enabled old log enabled check broker sink -240. [x] **LOGV2EBU1**: Checking broker sink when log-v2 is enabled and legacy logs are disabled with bbdo3. -241. [x] **LOGV2EBU2**: Check Broker sink with log-v2 enabled and legacy log enabled with BBDO3. -242. [x] **LOGV2EF1**: log-v2 enabled old log disabled check logfile sink -243. [x] **LOGV2EF2**: log-v2 enabled old log enabled check logfile sink -244. [x] **LOGV2FE2**: log-v2 enabled old log enabled check logfile sink -245. [x] **RLCode**: Test if reloading LUA code in a stream connector applies the changes -246. [x] **RRD1**: RRD metric rebuild asked with gRPC API. Three non existing indexes IDs are selected then an error message is sent. This is done with unified_sql output. -247. [x] **SDER**: The check attempts and the max check attempts of (host_1,service_1) are changed to 280 thanks to the retention.dat file. Then Engine and Broker are started and Broker should write these values in the services and resources tables. We only test the services table because we need a resources table that allows bigger numbers for these two attributes. But we see that Broker doesn't crash anymore. -248. [x] **SEVERAL_FILTERS_ON_LUA_EVENT**: Two stream connectors with different filters are configured. -249. [x] **STORAGE_ON_LUA**: The category 'storage' is applied on the stream connector. Only events of this category should be sent to this stream. -250. [x] **STUPID_FILTER**: Unified SQL is configured with only the bbdo category as filter. An error is raised by broker and broker should run correctly. -251. [x] **Service_increased_huge_check_interval**: New services with high check interval at creation time. -252. [x] **Services_and_bulks_${id}**: One service is configured with one metric with a name of 150 to 1021 characters. -253. [x] **Start_Stop_Broker_Engine_${id}**: Start-Stop Broker/Engine - Broker started first - Engine stopped first -254. [x] **Start_Stop_Engine_Broker_${id}**: Start-Stop Broker/Engine - Broker started first - Broker stopped first -255. [x] **UNIFIED_SQL_FILTER**: With bbdo version 3.0.1, we watch events written or rejected in unified_sql -256. [x] **VICT_ONE_CHECK_METRIC**: victoria metrics metric output -257. [x] **VICT_ONE_CHECK_METRIC_AFTER_FAILURE**: victoria metrics metric output after victoria shutdown -258. [x] **VICT_ONE_CHECK_STATUS**: victoria metrics status output -259. [x] **Whitelist_Directory_Rights**: log if /etc/centreon-engine-whitelist has not mandatory rights or owner -260. [x] **Whitelist_Empty_Directory**: log if /etc/centreon-engine-whitelist is empty -261. [x] **Whitelist_Host**: test allowed and forbidden commands for hosts -262. [x] **Whitelist_No_Whitelist_Directory**: log if /etc/centreon-engine-whitelist doesn't exist -263. [x] **Whitelist_Perl_Connector**: test allowed and forbidden commands for services -264. [x] **Whitelist_Service**: test allowed and forbidden commands for services -265. [x] **Whitelist_Service_EH**: test allowed and forbidden event handler for services -266. [x] **metric_mapping**: Check if metric name exists using a stream connector -267. [x] **not1**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK HARD state. -268. [x] **not10**: This test case involves scheduling downtime on a down host that already had a critical notification. When The Host return to UP state we should receive a recovery notification. -269. [x] **not11**: This test case involves configuring one service and checking that three alerts are sent for it. -270. [x] **not12**: Escalations -271. [x] **not13**: notification for a dependensies host -272. [x] **not14**: notification for a Service dependency -273. [x] **not15**: several notification commands for the same user. -274. [x] **not16**: notification for a dependensies services group -275. [x] **not17**: notification for a dependensies host group -276. [x] **not18**: notification delay where first notification delay equal retry check -277. [x] **not19**: notification delay where first notification delay greater than retry check -278. [x] **not1_WL_KO**: This test case configures a single service. When it is in non-OK HARD state a notification should be sent but it is not allowed by the whitelist -279. [x] **not1_WL_OK**: This test case configures a single service. When it is in non-OK HARD state a notification is sent because it is allowed by the whitelist -280. [x] **not2**: This test case configures a single service and verifies that a recovery notification is sent -281. [x] **not20**: notification delay where first notification delay samller than retry check -282. [x] **not3**: This test case configures a single service and verifies the notification system's behavior during and after downtime -283. [x] **not4**: This test case configures a single service and verifies the notification system's behavior during and after acknowledgement -284. [x] **not5**: This test case configures two services with two different users being notified when the services transition to a critical state. -285. [x] **not6**: This test case validate the behavior when the notification time period is set to null. -286. [x] **not7**: This test case simulates a host alert scenario. -287. [x] **not8**: This test validates the critical host notification. -288. [x] **not9**: This test case configures a single host and verifies that a recovery notification is sent after the host recovers from a non-OK state. -289. [x] **not_in_timeperiod_with_send_recovery_notifications_anyways**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK state and OK is sent outside timeperiod when _send_recovery_notifications_anyways is set -290. [x] **not_in_timeperiod_without_send_recovery_notifications_anyways**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK state and OK is not sent outside timeperiod when _send_recovery_notifications_anyways is not set +106. [x] **BEOTEL_CENTREON_AGENT_CHECK_HOST**: agent check host and we expect to get it in check result +107. [x] **BEOTEL_CENTREON_AGENT_CHECK_HOST_CRYPTED**: agent check host with encrypted connection and we expect to get it in check result +108. [x] **BEOTEL_CENTREON_AGENT_CHECK_SERVICE**: agent check service and we expect to get it in check result +109. [x] **BEOTEL_REVERSE_CENTREON_AGENT_CHECK_HOST**: agent check host with reversed connection and we expect to get it in check result +110. [x] **BEOTEL_REVERSE_CENTREON_AGENT_CHECK_HOST_CRYPTED**: agent check host with encrypted reversed connection and we expect to get it in check result +111. [x] **BEOTEL_REVERSE_CENTREON_AGENT_CHECK_SERVICE**: agent check service with reversed connection and we expect to get it in check result +112. [x] **BEOTEL_SERVE_TELEGRAF_CONFIGURATION_CRYPTED**: we configure engine with a telegraf conf server and we check telegraf conf file +113. [x] **BEOTEL_SERVE_TELEGRAF_CONFIGURATION_NO_CRYPTED**: we configure engine with a telegraf conf server and we check telegraf conf file +114. [x] **BEOTEL_TELEGRAF_CHECK_HOST**: we send nagios telegraf formatted datas and we expect to get it in check result +115. [x] **BEOTEL_TELEGRAF_CHECK_SERVICE**: we send nagios telegraf formatted datas and we expect to get it in check result +116. [x] **BEPBBEE1**: central-module configured with bbdo_version 3.0 but not others. Unable to establish connection. +117. [x] **BEPBBEE2**: bbdo_version 3 not compatible with sql/storage +118. [x] **BEPBBEE3**: bbdo_version 3 generates new bbdo protobuf service status messages. +119. [x] **BEPBBEE4**: bbdo_version 3 generates new bbdo protobuf host status messages. +120. [x] **BEPBBEE5**: bbdo_version 3 generates new bbdo protobuf service messages. +121. [x] **BEPBCVS**: bbdo_version 3 communication of custom variables. +122. [x] **BEPBHostParent**: bbdo_version 3 communication of host parent relations +123. [x] **BEPBINST_CONF**: bbdo_version 3 communication of instance configuration. +124. [x] **BEPBRI1**: bbdo_version 3 use pb_resource new bbdo protobuf ResponsiveInstance message. +125. [x] **BERD1**: Starting/stopping Broker does not create duplicated events. +126. [x] **BERD2**: Starting/stopping Engine does not create duplicated events. +127. [x] **BERDUC1**: Starting/stopping Broker does not create duplicated events in usual cases +128. [x] **BERDUC2**: Starting/stopping Engine does not create duplicated events in usual cases +129. [x] **BERDUC3U1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql and BBDO 3.0 +130. [x] **BERDUC3U2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql and BBDO 3.0 +131. [x] **BERDUCA300**: Starting/stopping Engine is stopped ; it should emit a stop event and receive an ack event with events to clean from broker. +132. [x] **BERDUCA301**: Starting/stopping Engine is stopped ; it should emit a stop event and receive an ack event with events to clean from broker with bbdo 3.0.1. +133. [x] **BERDUCU1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql +134. [x] **BERDUCU2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql +135. [x] **BERES1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) +136. [x] **BESERVCHECK**: external command CHECK_SERVICE_RESULT +137. [x] **BESS1**: Start-Stop Broker/Engine - Broker started first - Broker stopped first +138. [x] **BESS2**: Start-Stop Broker/Engine - Broker started first - Engine stopped first +139. [x] **BESS2U**: Start-Stop Broker/Engine - Broker started first - Engine stopped first. Unified_sql is used. +140. [x] **BESS3**: Start-Stop Broker/Engine - Engine started first - Engine stopped first +141. [x] **BESS4**: Start-Stop Broker/Engine - Engine started first - Broker stopped first +142. [x] **BESS5**: Start-Stop Broker/engine - Engine debug level is set to all, it should not hang +143. [x] **BESSBQ1**: A very bad queue file is written for broker. Broker and Engine are then started, Broker must read the file raising an error because of that file and then get data sent by Engine. +144. [x] **BESS_CRYPTED_GRPC1**: Start-Stop grpc version Broker/Engine - well configured +145. [x] **BESS_CRYPTED_GRPC2**: Start-Stop grpc version Broker/Engine only server crypted +146. [x] **BESS_CRYPTED_GRPC3**: Start-Stop grpc version Broker/Engine only engine crypted +147. [x] **BESS_CRYPTED_REVERSED_GRPC1**: Start-Stop grpc version Broker/Engine - well configured +148. [x] **BESS_CRYPTED_REVERSED_GRPC2**: Start-Stop grpc version Broker/Engine only engine server crypted +149. [x] **BESS_CRYPTED_REVERSED_GRPC3**: Start-Stop grpc version Broker/Engine only engine crypted +150. [x] **BESS_ENGINE_DELETE_HOST**: once engine and cbd started, stop and restart cbd, delete an host and reload engine, cbd mustn't core +151. [x] **BESS_GRPC1**: Start-Stop grpc version Broker/Engine - Broker started first - Broker stopped first +152. [x] **BESS_GRPC2**: Start-Stop grpc version Broker/Engine - Broker started first - Engine stopped first +153. [x] **BESS_GRPC3**: Start-Stop grpc version Broker/Engine - Engine started first - Engine stopped first +154. [x] **BESS_GRPC4**: Start-Stop grpc version Broker/Engine - Engine started first - Broker stopped first +155. [x] **BESS_GRPC5**: Start-Stop grpc version Broker/engine - Engine debug level is set to all, it should not hang +156. [x] **BESS_GRPC_COMPRESS1**: Start-Stop grpc version Broker/Engine - Broker started first - Broker stopped last compression activated +157. [x] **BETAG1**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Broker is started before. +158. [x] **BETAG2**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. +159. [x] **BEUTAG1**: Engine is configured with some tags. When broker receives them through unified_sql stream, it stores them in the centreon_storage.tags table. Broker is started before. +160. [x] **BEUTAG10**: some services are configured with tags on two pollers. Then tags are removed from some of them and in centreon_storage, we can observe resources_tags table updated. +161. [x] **BEUTAG11**: some services are configured with tags on two pollers. Then several tags are removed, and we can observe resources_tags table updated. +162. [x] **BEUTAG12**: Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. The tag6 and tag8 are removed and resources_tags is also well updated. +163. [x] **BEUTAG2**: Engine is configured with some tags. A new service is added with a tag. Broker should make the relations. +164. [x] **BEUTAG3**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. +165. [x] **BEUTAG4**: Engine is configured with some tags. Group tags tag9, tag13 are set to services 1 and 3. Category tags tag3 and tag11 are added to services 1, 3, 5 and 6. The centreon_storage.resources and resources_tags tables are well filled. +166. [x] **BEUTAG5**: Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. +167. [x] **BEUTAG6**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.resources_tags table. Engine is started before. +168. [x] **BEUTAG7**: Some services are configured with tags on two pollers. Then tags configuration is modified. +169. [x] **BEUTAG8**: Services have tags provided by templates. +170. [x] **BEUTAG9**: hosts have tags provided by templates. +171. [x] **BEUTAG_REMOVE_HOST_FROM_HOSTGROUP**: remove a host from hostgroup, reload, insert 2 host in the hostgroup must not make sql error +172. [x] **BE_BACKSLASH_CHECK_RESULT**: external command PROCESS_SERVICE_CHECK_RESULT with \: +173. [x] **BE_DEFAULT_NOTIFCATION_INTERVAL_IS_ZERO_SERVICE_RESOURCE**: default notification_interval must be set to NULL in services, hosts and resources tables. +174. [x] **BE_NOTIF_OVERFLOW**: bbdo 2.0 notification number =40000. make an overflow => notification_number null in db +175. [x] **BE_TIME_NULL_SERVICE_RESOURCE**: With BBDO 3, notification_interval time must be set to NULL on 0 in services, hosts and resources tables. +176. [x] **BRCS1**: Broker reverse connection stopped +177. [x] **BRCTS1**: Broker reverse connection too slow +178. [x] **BRCTSMN**: Broker connected to map with neb filter +179. [x] **BRCTSMNS**: Broker connected to map with neb and storage filters +180. [x] **BRGC1**: Broker good reverse connection +181. [x] **BRRDCDDID1**: RRD metrics deletion from index ids with rrdcached. +182. [x] **BRRDCDDIDDB1**: RRD metrics deletion from index ids with a query in centreon_storage with rrdcached. +183. [x] **BRRDCDDIDU1**: RRD metrics deletion from index ids with unified sql output with rrdcached. +184. [x] **BRRDCDDM1**: RRD metrics deletion from metric ids with rrdcached. +185. [x] **BRRDCDDMDB1**: RRD metrics deletion from metric ids with a query in centreon_storage and rrdcached. +186. [x] **BRRDCDDMID1**: RRD deletion of non existing metrics and indexes with rrdcached +187. [x] **BRRDCDDMIDU1**: RRD deletion of non existing metrics and indexes with rrdcached +188. [x] **BRRDCDDMU1**: RRD metric deletion on table metric with unified sql output with rrdcached +189. [x] **BRRDCDRB1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with storage/sql sql output and rrdcached. +190. [x] **BRRDCDRBDB1**: RRD metric rebuild with a query in centreon_storage and unified sql with rrdcached +191. [x] **BRRDCDRBU1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with unified_sql output and rrdcached. +192. [x] **BRRDCDRBUDB1**: RRD metric rebuild with a query in centreon_storage and unified sql with rrdcached +193. [x] **BRRDDID1**: RRD metrics deletion from index ids. +194. [x] **BRRDDIDDB1**: RRD metrics deletion from index ids with a query in centreon_storage. +195. [x] **BRRDDIDU1**: RRD metrics deletion from index ids with unified sql output. +196. [x] **BRRDDM1**: RRD metrics deletion from metric ids. +197. [x] **BRRDDMDB1**: RRD metrics deletion from metric ids with a query in centreon_storage. +198. [x] **BRRDDMID1**: RRD deletion of non existing metrics and indexes +199. [x] **BRRDDMIDU1**: RRD deletion of non existing metrics and indexes +200. [x] **BRRDDMU1**: RRD metric deletion on table metric with unified sql output +201. [x] **BRRDRBDB1**: RRD metric rebuild with a query in centreon_storage and unified sql +202. [x] **BRRDRBUDB1**: RRD metric rebuild with a query in centreon_storage and unified sql +203. [x] **BRRDRM1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with storage/sql sql output. +204. [x] **BRRDRMU1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with unified_sql output. +205. [x] **BRRDSTATUS**: We are working with BBDO3. This test checks status are correctly handled independently from their value. +206. [x] **BRRDSTATUSRETENTION**: We are working with BBDO3. This test checks status are not sent twice after Engine reload. +207. [x] **BRRDUPLICATE**: RRD metric rebuild with a query in centreon_storage and unified sql with duplicate rows in database +208. [x] **BRRDWM1**: We are working with BBDO3. This test checks protobuf metrics and status are sent to cbd RRD. +209. [x] **CBD_RELOAD_AND_FILTERS**: We start engine/broker with a classical configuration. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. +210. [x] **CBD_RELOAD_AND_FILTERS_WITH_OPR**: We start engine/broker with an almost classical configuration, just the connection between cbd central and cbd rrd is reversed with one peer retention. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. +211. [x] **DTIM**: New services with several pollers are created. Then downtimes are set on all configured hosts. This action results on 5250 downtimes if we also count impacted services. Then all these downtimes are removed. This test is done with BBDO 3.0.1 +212. [x] **EBBM1**: A service status contains metrics that do not fit in a float number. +213. [x] **EBBPS1**: 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table resources. +214. [x] **EBBPS2**: 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table services. +215. [x] **EBDP1**: Four new pollers are started and then we remove Poller3. +216. [x] **EBDP2**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. +217. [x] **EBDP3**: Three new pollers are started, then they are killed. It is still possible to remove Poller2 if removed from the configuration. +218. [x] **EBDP4**: Four new pollers are started and then we remove Poller3 with its hosts and services. All service status/host status are then refused by Broker. +219. [x] **EBDP5**: Four new pollers are started and then we remove Poller3. +220. [x] **EBDP6**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. +221. [x] **EBDP7**: Three new pollers are started, then they are killed. It is still possible to remove Poller2 if removed from the configuration. +222. [x] **EBDP8**: Four new pollers are started and then we remove Poller3 with its hosts and services. All service status/host status are then refused by broker. +223. [x] **EBDP_GRPC2**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. +224. [x] **EBMSSM**: 1000 services are configured with 100 metrics each. The rrd output is removed from the broker configuration. GetSqlManagerStats is called to measure writes into data_bin. +225. [x] **EBNHG1**: New host group with several pollers and connections to DB +226. [x] **EBNHG4**: New host group with several pollers and connections to DB with broker and rename this hostgroup +227. [x] **EBNHGU1**: New host group with several pollers and connections to DB with broker configured with unified_sql +228. [x] **EBNHGU2**: New host group with several pollers and connections to DB with broker configured with unified_sql +229. [x] **EBNHGU3**: New host group with several pollers and connections to DB with broker configured with unified_sql +230. [x] **EBNHGU4_${test_label}**: New host group with several pollers and connections to DB with broker and rename this hostgroup +231. [x] **EBNSG1**: New service group with several pollers and connections to DB +232. [x] **EBNSGU1**: New service group with several pollers and connections to DB with broker configured with unified_sql +233. [x] **EBNSGU2**: New service group with several pollers and connections to DB with broker configured with unified_sql +234. [x] **EBNSGU3_${test_label}**: New service group with several pollers and connections to DB with broker and rename this servicegroup +235. [x] **EBNSVC1**: New services with several pollers +236. [x] **EBPS2**: 1000 services are configured with 20 metrics each. The rrd output is removed from the broker configuration to avoid to write too many rrd files. While metrics are written in bulk, the database is stopped. This must not crash broker. +237. [x] **EBSAU2**: New services with action_url with more than 2000 characters +238. [x] **EBSN3**: New services with notes with more than 500 characters +239. [x] **EBSNU1**: New services with notes_url with more than 2000 characters +240. [x] **ENRSCHE1**: Verify that next check of a rescheduled host is made at last_check + interval_check +241. [x] **FILTER_ON_LUA_EVENT**: stream connector with a bad configured filter generate a log error message +242. [x] **GRPC_CLOUD_FAILURE**: simulate a broker failure in cloud environment, we provide a muted grpc server and there must remain only one grpc connection. Then we start broker and connection must be ok +243. [x] **GRPC_RECONNECT**: We restart broker and engine must reconnect to it and send data +244. [x] **LCDNU**: the lua cache updates correctly service cache. +245. [x] **LCDNUH**: the lua cache updates correctly host cache +246. [x] **LOGV2DB1**: log-v2 disabled old log enabled check broker sink +247. [x] **LOGV2DB2**: log-v2 disabled old log disabled check broker sink +248. [x] **LOGV2DF1**: log-v2 disabled old log enabled check logfile sink +249. [x] **LOGV2DF2**: log-v2 disabled old log disabled check logfile sink +250. [x] **LOGV2EB1**: Checking broker sink when log-v2 is enabled and legacy logs are disabled. +251. [x] **LOGV2EB2**: log-v2 enabled old log enabled check broker sink +252. [x] **LOGV2EBU1**: Checking broker sink when log-v2 is enabled and legacy logs are disabled with bbdo3. +253. [x] **LOGV2EBU2**: Check Broker sink with log-v2 enabled and legacy log enabled with BBDO3. +254. [x] **LOGV2EF1**: log-v2 enabled old log disabled check logfile sink +255. [x] **LOGV2EF2**: log-v2 enabled old log enabled check logfile sink +256. [x] **LOGV2FE2**: log-v2 enabled old log enabled check logfile sink +257. [x] **NO_FILTER_NO_ERROR**: no filter configured => no filter error. +258. [x] **RLCode**: Test if reloading LUA code in a stream connector applies the changes +259. [x] **RRD1**: RRD metric rebuild asked with gRPC API. Three non existing indexes IDs are selected then an error message is sent. This is done with unified_sql output. +260. [x] **SDER**: The check attempts and the max check attempts of (host_1,service_1) are changed to 280 thanks to the retention.dat file. Then Engine and Broker are started and Broker should write these values in the services and resources tables. We only test the services table because we need a resources table that allows bigger numbers for these two attributes. But we see that Broker doesn't crash anymore. +261. [x] **SEVERAL_FILTERS_ON_LUA_EVENT**: Two stream connectors with different filters are configured. +262. [x] **STORAGE_ON_LUA**: The category 'storage' is applied on the stream connector. Only events of this category should be sent to this stream. +263. [x] **STUPID_FILTER**: Unified SQL is configured with only the bbdo category as filter. An error is raised by broker and broker should run correctly. +264. [x] **Service_increased_huge_check_interval**: New services with high check interval at creation time. +265. [x] **Services_and_bulks_${id}**: One service is configured with one metric with a name of 150 to 1021 characters. +266. [x] **Start_Stop_Broker_Engine_${id}**: Start-Stop Broker/Engine - Broker started first - Engine stopped first +267. [x] **Start_Stop_Engine_Broker_${id}**: Start-Stop Broker/Engine - Broker started first - Broker stopped first +268. [x] **UNIFIED_SQL_FILTER**: With bbdo version 3.0.1, we watch events written or rejected in unified_sql +269. [x] **VICT_ONE_CHECK_METRIC**: victoria metrics metric output +270. [x] **VICT_ONE_CHECK_METRIC_AFTER_FAILURE**: victoria metrics metric output after victoria shutdown +271. [x] **VICT_ONE_CHECK_STATUS**: victoria metrics status output +272. [x] **Whitelist_Directory_Rights**: log if /etc/centreon-engine-whitelist has not mandatory rights or owner +273. [x] **Whitelist_Empty_Directory**: log if /etc/centreon-engine-whitelist is empty +274. [x] **Whitelist_Host**: test allowed and forbidden commands for hosts +275. [x] **Whitelist_No_Whitelist_Directory**: log if /etc/centreon-engine-whitelist doesn't exist +276. [x] **Whitelist_Perl_Connector**: test allowed and forbidden commands for services +277. [x] **Whitelist_Service**: test allowed and forbidden commands for services +278. [x] **Whitelist_Service_EH**: test allowed and forbidden event handler for services +279. [x] **metric_mapping**: Check if metric name exists using a stream connector +280. [x] **not1**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK HARD state. +281. [x] **not10**: This test case involves scheduling downtime on a down host that already had a critical notification. When The Host return to UP state we should receive a recovery notification. +282. [x] **not11**: This test case involves configuring one service and checking that three alerts are sent for it. +283. [x] **not12**: Escalations +284. [x] **not13**: notification for a dependencies host +285. [x] **not14**: notification for a Service dependency +286. [x] **not15**: several notification commands for the same user. +287. [x] **not16**: notification for dependencies services group +288. [x] **not17**: notification for a dependensies host group +289. [x] **not18**: notification delay where first notification delay equal retry check +290. [x] **not19**: notification delay where first notification delay greater than retry check +291. [x] **not1_WL_KO**: This test case configures a single service. When it is in non-OK HARD state a notification should be sent but it is not allowed by the whitelist +292. [x] **not1_WL_OK**: This test case configures a single service. When it is in non-OK HARD state a notification is sent because it is allowed by the whitelist +293. [x] **not2**: This test case configures a single service and verifies that a recovery notification is sent +294. [x] **not20**: notification delay where first notification delay samller than retry check +295. [x] **not3**: This test case configures a single service and verifies the notification system's behavior during and after downtime +296. [x] **not4**: This test case configures a single service and verifies the notification system's behavior during and after acknowledgement +297. [x] **not5**: This test case configures two services with two different users being notified when the services transition to a critical state. +298. [x] **not6**: This test case validate the behavior when the notification time period is set to null. +299. [x] **not7**: This test case simulates a host alert scenario. +300. [x] **not8**: This test validates the critical host notification. +301. [x] **not9**: This test case configures a single host and verifies that a recovery notification is sent after the host recovers from a non-OK state. +302. [x] **not_in_timeperiod_with_send_recovery_notifications_anyways**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK state and OK is sent outside timeperiod when _send_recovery_notifications_anyways is set +303. [x] **not_in_timeperiod_without_send_recovery_notifications_anyways**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK state and OK is not sent outside timeperiod when _send_recovery_notifications_anyways is not set ### Ccc 1. [x] **BECCC1**: ccc without port fails with an error message @@ -535,6 +549,8 @@ Here is the list of the currently implemented tests: 12. [x] **ESS4**: Start-Stop (300ms between start/stop) 5 times three instances of engine and no coredump 13. [x] **EXT_CONF1**: Engine configuration is overided by json conf 14. [x] **EXT_CONF2**: Engine configuration is overided by json conf after reload +15. [x] **E_HOST_DOWN_DISABLE_SERVICE_CHECKS**: host_down_disable_service_checks is set to 1, host down switch all services to UNKNOWN +16. [x] **E_HOST_UNREACHABLE_DISABLE_SERVICE_CHECKS**: host_down_disable_service_checks is set to 1, host unreachable switch all services to UNKNOWN ### Migration 1. [x] **MIGRATION**: Migration bbdo2 with sql/storage to bbdo2 with unified_sql and then to bbdo3 with unified_sql and then to bbdo2 with unified_sql and then to bbdo2 with sql/storage diff --git a/tests/bam/boolean_rules.robot b/tests/bam/boolean_rules.robot index fe6e834b791..c9f6ab27bcb 100644 --- a/tests/bam/boolean_rules.robot +++ b/tests/bam/boolean_rules.robot @@ -210,9 +210,7 @@ BABOOORREL ${start} Get Current Date Ctn Start engine # Let's wait for the external command check start - ${content} Create List check_for_external_commands() - ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} A message telling check_for_external_commands() should be available. + Ctn Wait For Engine To Be Ready ${start} ${1} # 302 is set to critical => {host_16 service_302} {IS} {OK} is then False Ctn Process Service Result Hard host_16 service_302 2 output critical for service_302 ${result} Ctn Check Service Status With Timeout host_16 service_302 2 30 HARD @@ -236,13 +234,12 @@ BABOOORREL ... ${id_bool} ... {host_16 service_302} {IS} {OK} {OR} {host_16 service_304} {IS} {OK} + ${start} Get Current Date Ctn Reload Engine Ctn Reload Broker # Let's wait for the external command check start - ${content} Create List check_for_external_commands() - ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} A message telling check_for_external_commands() should be available. + Ctn Wait For Engine To Be Ready ${start} ${1} Ctn Process Service Result Hard host_16 service_302 2 output ok for service_302 Ctn Process Service Result Hard host_16 service_304 0 output ok for service_304 diff --git a/tests/broker-engine/bbdo-protobuf.robot b/tests/broker-engine/bbdo-protobuf.robot index 835eef63903..f250fcb0cb9 100644 --- a/tests/broker-engine/bbdo-protobuf.robot +++ b/tests/broker-engine/bbdo-protobuf.robot @@ -175,78 +175,6 @@ BEPBCVS [Teardown] Ctn Stop Engine Broker And Save Logs True -BEPB_HOST_DEPENDENCY - [Documentation] BBDO 3 communication of host dependencies. - [Tags] broker engine protobuf bbdo - Ctn Config Engine ${1} - Ctn Config Engine Add Cfg File 0 dependencies.cfg - Ctn Add Host Dependency 0 host_1 host_2 - Ctn Config Broker central - Ctn Config Broker module - Ctn Config BBDO3 ${1} - Ctn Broker Config Log central sql trace - Ctn Config Broker Sql Output central unified_sql - Ctn Clear Retention - ${start} Get Current Date - Ctn Start Broker True - Ctn Start engine - - ${result} Common.Ctn Check Host Dependencies 2 1 24x7 1 ou dp 30 - Should Be True ${result} No notification dependency from 2 to 1 with timeperiod 24x7 on 'ou' - - Ctn Config Engine ${1} - Ctn Reload Engine - - ${result} Common.Ctn Check No Host Dependencies 30 - Should Be True ${result} No host dependency should be defined - - [Teardown] Ctn Stop Engine Broker And Save Logs True - -BEPB_SERVICE_DEPENDENCY - [Documentation] bbdo_version 3 communication of host dependencies. - [Tags] broker engine protobuf bbdo - Ctn Config Engine ${1} - Ctn Config Engine Add Cfg File 0 dependencies.cfg - Ctn Add Service Dependency 0 host_1 host_2 service_1 service_21 - Ctn Config Broker central - Ctn Config Broker module - Ctn Config BBDO3 ${1} - Ctn Broker Config Log central sql trace - Ctn Config Broker Sql Output central unified_sql - Ctn Clear Retention - ${start} Get Current Date - Ctn Start Broker True - Ctn Start engine - - Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} - - FOR ${index} IN RANGE 30 - ${output} Query - ... SELECT dependent_host_id, dependent_service_id, host_id, service_id, dependency_period, inherits_parent, notification_failure_options FROM services_services_dependencies; - - Log To Console ${output} - Sleep 1s - IF "${output}" == "((2, 21, 1, 1, '24x7', 1, 'c'),)" BREAK - END - Should Be Equal As Strings - ... ${output} - ... ((2, 21, 1, 1, '24x7', 1, 'c'),) - ... host dependency not found in database - - Ctn Config Engine ${1} - Ctn Reload Engine - - FOR ${index} IN RANGE 30 - ${output} Query - ... SELECT dependent_host_id, host_id, dependency_period, inherits_parent, notification_failure_options FROM hosts_hosts_dependencies - Log To Console ${output} - Sleep 1s - IF "${output}" == "()" BREAK - END - Should Be Equal As Strings ${output} () host dependency not deleted from database - - [Teardown] Ctn Stop Engine Broker And Save Logs True - BEPBHostParent [Documentation] bbdo_version 3 communication of host parent relations [Tags] broker engine protobuf bbdo diff --git a/tests/broker-engine/external-commands2.robot b/tests/broker-engine/external-commands2.robot index 65c6b7dc848..852d4a48c0a 100644 --- a/tests/broker-engine/external-commands2.robot +++ b/tests/broker-engine/external-commands2.robot @@ -1399,16 +1399,51 @@ BEHOSTCHECK Ctn Config Broker module ${1} Ctn Broker Config Log central sql trace Ctn Config BBDO3 1 - Ctn Config Broker Sql Output central unified_sql ${start} Get Current Date Ctn Start Broker - Ctn Start engine - ${content} Create List check_for_external_commands - ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} No check for external commands executed for 1mn. + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} ${1} - Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} - Execute SQL String UPDATE hosts SET command_line='toto' WHERE name='host_1' + ${start} Ctn Get Round Current Date Ctn Schedule Forced Host Check host_1 - ${result} Ctn Check Host Check With Timeout host_1 30 ${VarRoot}/lib/centreon-engine/check.pl --id 0 - Should Be True ${result} hosts table not updated + ${result} Ctn Check Host Check With Timeout host_1 ${start} 30 + Should Be True ${result} last_check column in resources table not updated. + + +BE_BACKSLASH_CHECK_RESULT + [Documentation] external command PROCESS_SERVICE_CHECK_RESULT with \: + [Tags] broker engine services extcmd MON-51121 + Ctn Config Engine ${1} ${50} ${20} + Ctn Set Services Passive ${0} service_.* + Ctn Config Broker rrd + Ctn Config Broker central + Ctn Config Broker module ${1} + Ctn ConfigBBDO3 1 + Ctn Config Broker Sql Output central unified_sql + Ctn Clear Retention + Ctn Broker Config Log central sql debug + FOR ${use_grpc} IN RANGE 0 2 + Log To Console external command PROCESS_SERVICE_CHECK_RESULT use_grpc=${use_grpc} + Ctn Clear Retention + ${start} Get Current Date + Ctn Start Broker + Ctn Start engine + Ctn Wait For Engine To Be Ready ${start} ${1} + + ${start} Ctn Get Round Current Date + Ctn Process Service Check Result host_1 service_1 0 output ok D: \\: Total: 1.205TB - Used: 1.203TB (100%) - Free: 2.541GB (0%) ${use_grpc} config0 ${use_grpc} + + ${result} Ctn Check Service Output Resource Status With Timeout + ... host_1 + ... service_1 + ... 35 + ... ${start} + ... 0 + ... HARD + ... output ok D: \\: Total: 1.205TB - Used: 1.203TB (100%) - Free: 2.541GB (0%) ${use_grpc} + Should Be True ${result} resources table not updated + + + Ctn Stop engine + Ctn Kindly Stop Broker + END diff --git a/tests/broker-engine/hostgroups.robot b/tests/broker-engine/hostgroups.robot index 4ffda9ae44c..4719678b489 100644 --- a/tests/broker-engine/hostgroups.robot +++ b/tests/broker-engine/hostgroups.robot @@ -23,7 +23,7 @@ EBNHG1 Ctn Broker Config Output Set central central-broker-master-perfdata connections_count 5 ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine Ctn Add Host Group ${0} ${1} ["host_1", "host_2", "host_3"] Sleep 3s @@ -51,7 +51,7 @@ EBNHGU1 Ctn Broker Config Output Set central central-broker-unified-sql connections_count 5 ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine Ctn Add Host Group ${0} ${1} ["host_1", "host_2", "host_3"] Sleep 3s @@ -80,7 +80,7 @@ EBNHGU2 Ctn Config BBDO3 3 ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine Ctn Add Host Group ${0} ${1} ["host_1", "host_2", "host_3"] Sleep 3s @@ -110,7 +110,7 @@ EBNHGU3 ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine Ctn Add Host Group ${0} ${1} ["host_1", "host_2", "host_3"] Ctn Add Host Group ${1} ${1} ["host_21", "host_22", "host_23"] Ctn Add Host Group ${2} ${1} ["host_31", "host_32", "host_33"] @@ -121,7 +121,7 @@ EBNHGU3 Ctn Reload Engine ${result} Ctn Check Number Of Relations Between Hostgroup And Hosts 1 12 30 - Should Be True ${result} We should have 12 hosts members of host 1. + Should Be True ${result} We should have 12 hosts members in the hostgroup 1. Ctn Config Engine Remove Cfg File ${0} hostgroups.cfg @@ -129,7 +129,7 @@ EBNHGU3 Ctn Reload Broker Ctn Reload Engine ${result} Ctn Check Number Of Relations Between Hostgroup And Hosts 1 9 30 - Should Be True ${result} We should have 12 hosts members of host 1. + Should Be True ${result} We should have 9 hosts members in the hostgroup 1. EBNHG4 [Documentation] New host group with several pollers and connections to DB with broker and rename this hostgroup @@ -144,7 +144,7 @@ EBNHG4 Ctn Broker Config Output Set central central-broker-master-perfdata connections_count 5 ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine Sleep 3s Ctn Add Host Group ${0} ${1} ["host_1", "host_2", "host_3"] @@ -183,6 +183,7 @@ EBNHGU4_${test_label} [Documentation] New host group with several pollers and connections to DB with broker and rename this hostgroup [Tags] broker engine hostgroup Ctn Config Engine ${3} + Ctn Engine Config Set Value ${0} log_level_config debug Ctn Config Broker rrd Ctn Config Broker central Ctn Config Broker module ${3} @@ -202,10 +203,13 @@ EBNHGU4_${test_label} ${start} Get Current Date Ctn Start Broker - Ctn Start engine - Sleep 3s + Ctn Start Engine + + Ctn Wait For Engine To Be Ready ${start} ${1} + Ctn Add Host Group ${0} ${1} ["host_1", "host_2", "host_3"] + ${start} Ctn Get Round Current Date Ctn Reload Broker Ctn Reload Engine @@ -242,7 +246,7 @@ EBNHGU4_${test_label} Ctn Rename Host Group ${0} ${1} test ["host_1", "host_2", "host_3"] - Sleep 10s + Sleep 3s Ctn Reload Engine Ctn Reload Broker @@ -289,14 +293,17 @@ EBNHGU4_${test_label} END Should Be Equal As Strings ${output} () hostgroup_test not deleted - Sleep 2s - # clear lua file - # this part of test is disable because group erasure is desactivated in macrocache.cc - # it will be reactivated when global cache will be implemented - # Create File /tmp/lua-engine.log - # Sleep 2s - # ${grep_result} Grep File /tmp/lua-engine.log no host_group_name 1 - # Should Be True len("""${grep_result}""") < 10 hostgroup 1 still exist + # Waiting to observe no host group. + FOR ${index} IN RANGE 60 + Create File /tmp/lua-engine.log + Sleep 1s + ${grep_result} Grep File /tmp/lua-engine.log no host_group_name + IF len("""${grep_result}""") > 0 BREAK + END + Sleep 10s + # Do we still have no host group? + ${grep_result} Grep File /tmp/lua-engine.log host_group_name: + Should Be True len("""${grep_result}""") == 0 The hostgroup 1 still exists Examples: Use_BBDO3 test_label -- ... True BBDO3 diff --git a/tests/broker-engine/muxer_filter.robot b/tests/broker-engine/muxer_filter.robot index cb00f4e333e..8ed6ad8174e 100644 --- a/tests/broker-engine/muxer_filter.robot +++ b/tests/broker-engine/muxer_filter.robot @@ -6,10 +6,32 @@ Resource ../resources/import.resource Suite Setup Ctn Clean Before Suite Suite Teardown Ctn Clean After Suite Test Setup Ctn Stop Processes -Test Teardown Ctn Save Logs If Failed +Test Teardown Ctn Stop Engine Broker And Save Logs True *** Test Cases *** +NO_FILTER_NO_ERROR + [Documentation] no filter configured => no filter error. + [Tags] broker engine filter + Ctn Config Engine ${1} ${50} ${20} + Ctn Config Broker central + Ctn Config Broker module ${1} + Ctn Config Broker rrd + Ctn Broker Config Log central sql debug + Ctn Config Broker Sql Output central unified_sql + Ctn Config BBDO3 1 + Ctn Clear Broker Logs + + ${start} Get Current Date + Ctn Start Broker True + Ctn Start engine + + ${content} Create List + ... are too restrictive contain forbidden filters + ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 15 + Should Not Be True ${result} An message of filter error has been found + + STUPID_FILTER [Documentation] Unified SQL is configured with only the bbdo category as filter. An error is raised by broker and broker should run correctly. [Tags] broker engine filter @@ -28,13 +50,10 @@ STUPID_FILTER Ctn Start engine ${content} Create List - ... The configured write filters for the endpoint 'central-broker-unified-sql' contain forbidden filters. These ones are removed The configured read filters for the endpoint 'central-broker-unified-sql' contain forbidden filters. These ones are removed + ... The configured write filters for the endpoint 'central-broker-unified-sql' contain forbidden filters. These ones are removed ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 60 Should Be True ${result} A message telling bad filter should be available. - Ctn Stop Engine - Ctn Kindly Stop Broker True - STORAGE_ON_LUA [Documentation] The category 'storage' is applied on the stream connector. Only events of this category should be sent to this stream. [Tags] broker engine filter @@ -62,9 +81,6 @@ STORAGE_ON_LUA ${grep_res} Grep File /tmp/all_lua_event.log "category":[^3] regexp=True Should Be Empty ${grep_res} Events of category different than 'storage' found. - Ctn Stop Engine - Ctn Kindly Stop Broker True - FILTER_ON_LUA_EVENT [Documentation] stream connector with a bad configured filter generate a log error message [Tags] broker engine filter @@ -109,9 +125,6 @@ FILTER_ON_LUA_EVENT ... All the lines in all_lua_event.log should contain "_type":196620 END - Ctn Stop Engine - Ctn Kindly Stop Broker True - BAM_STREAM_FILTER [Documentation] With bbdo version 3.0.1, a BA of type 'worst' with one service is ... configured. The BA is in critical state, because of its service. we watch its events @@ -203,9 +216,6 @@ BAM_STREAM_FILTER ... centreon-bam-reporting event neb:.* rejected by write filter regexp=True Should Not Be Empty ${grep_res} We should reject events of Neb category. They are not rejected. - Ctn Stop Engine - Ctn Kindly Stop Broker True - UNIFIED_SQL_FILTER [Documentation] With bbdo version 3.0.1, we watch events written or rejected in unified_sql [Tags] broker engine bam filter @@ -240,9 +250,6 @@ UNIFIED_SQL_FILTER Should Not Be Empty ${grep_res} END - Ctn Stop Engine - Ctn Kindly Stop Broker True - CBD_RELOAD_AND_FILTERS [Documentation] We start engine/broker with a classical configuration. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. [Tags] broker engine filter @@ -271,8 +278,8 @@ CBD_RELOAD_AND_FILTERS # We check that output filters to rrd are set to "all" ${content} Create List ... endpoint applier: The configured write filters for the endpoint 'centreon-broker-master-rrd' contain forbidden filters. These ones are removed - ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 60 - Should Be True ${result} No message about the output filters to rrd broker. + ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 15 + Should Not Be True ${result} No message about the output filters to rrd broker. # New configuration Ctn Broker Config Output Set Json central centreon-broker-master-rrd filters {"category": [ "storage"]} @@ -320,8 +327,8 @@ CBD_RELOAD_AND_FILTERS # We check that output filters to rrd are set to "all" ${content} Create List ... endpoint applier: The configured write filters for the endpoint 'centreon-broker-master-rrd' contain forbidden filters. These ones are removed - ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 60 - Should Be True ${result} No message about the output filters to rrd broker. + ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 15 + Should Not Be True ${result} No message about the output filters to rrd broker. ${start} Get Current Date # Let's wait for storage data written into rrd files @@ -337,9 +344,6 @@ CBD_RELOAD_AND_FILTERS ... False ... Some events are rejected by the rrd output whereas all categories are enabled. - Ctn Stop Engine - Ctn Kindly Stop Broker True - CBD_RELOAD_AND_FILTERS_WITH_OPR [Documentation] We start engine/broker with an almost classical configuration, just the connection between cbd central and cbd rrd is reversed with one peer retention. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. [Tags] broker engine filter @@ -435,9 +439,6 @@ CBD_RELOAD_AND_FILTERS_WITH_OPR ... False ... Some events are rejected by the rrd output whereas all categories are enabled. - Ctn Stop Engine - Ctn Kindly Stop Broker True - SEVERAL_FILTERS_ON_LUA_EVENT [Documentation] Two stream connectors with different filters are configured. [Tags] broker engine filter @@ -507,5 +508,3 @@ SEVERAL_FILTERS_ON_LUA_EVENT ... "_type":65565 ... All the lines in all_lua_event-bis.log should contain "_type":65565 END - Ctn Stop Engine - Ctn Kindly Stop Broker True diff --git a/tests/broker-engine/notifications.robot b/tests/broker-engine/notifications.robot index 6414796b0f7..03e560eb206 100644 --- a/tests/broker-engine/notifications.robot +++ b/tests/broker-engine/notifications.robot @@ -672,6 +672,7 @@ not12 Ctn Config Engine ${1} ${2} ${1} Ctn Engine Config Set Value 0 interval_length 1 True Ctn Config Engine Add Cfg File ${0} servicegroups.cfg + Ctn Engine Config Set Value ${0} log_level_config trace Ctn Add Service Group ${0} ${1} ["host_1","service_1", "host_2","service_2"] Ctn Config Notifications Ctn Config Escalations @@ -694,7 +695,7 @@ not12 Ctn Start Broker Ctn Start Engine - # Let's wait for the external command check start + # Let's wait for the external command check start Ctn Wait For Engine To Be Ready ${1} ${cmd_service_1} Ctn Get Service Command Id ${1} @@ -777,10 +778,11 @@ not12 Should Be True ${result} The second notification of U4 is not sent not13 - [Documentation] notification for a dependensies host - [Tags] broker engine host unified_sql + [Documentation] notification for a dependencies host + [Tags] broker engine host Ctn Clear Commands Status Ctn Config Engine ${1} ${2} ${1} + Ctn Engine Config Set Value ${0} log_level_config trace Ctn Config Notifications Ctn Config Engine Add Cfg File ${0} dependencies.cfg Ctn Engine Config Set Value In Hosts 0 host_1 notifications_enabled 1 @@ -969,7 +971,7 @@ not14 ${content} Create List This notifier won't send any notification since it depends on another notifier that has already sent one ${result} Ctn Find In Log With Timeout ${engineLog0} ${new_date} ${content} 60 - Should Be True ${result} the dependency not working and the service_é has recieved a notification + Should Be True ${result} The dependency not working and the service_é has recieved a notification ## Time to set the service1 to OK hard Ctn Set Command Status ${cmd_service_1} ${0} diff --git a/tests/broker-engine/opentelemetry.robot b/tests/broker-engine/opentelemetry.robot index 728e624a924..f520a079595 100644 --- a/tests/broker-engine/opentelemetry.robot +++ b/tests/broker-engine/opentelemetry.robot @@ -2,6 +2,7 @@ Documentation Engine/Broker tests on opentelemetry engine server Resource ../resources/import.resource +Library ../resources/Agent.py Suite Setup Ctn Clean Before Suite Suite Teardown Ctn Clean After Suite @@ -77,8 +78,8 @@ Test Teardown Ctn Stop Engine Broker And Save Logs # Should Be True ${test_ret} protobuf object sent to engine mus be in lua.log BEOTEL_TELEGRAF_CHECK_HOST - [Documentation] we send nagios telegraf formated datas and we expect to get it in check result - [Tags] broker engine opentelemetry mon-34004 + [Documentation] we send nagios telegraf formatted datas and we expect to get it in check result + [Tags] broker engine opentelemetry MON-34004 Ctn Config Engine ${1} ${2} ${2} Ctn Add Otl ServerModule ... 0 @@ -88,6 +89,7 @@ BEOTEL_TELEGRAF_CHECK_HOST ... OTEL connector ... opentelemetry --processor=nagios_telegraf --extractor=attributes --host_path=resource_metrics.scope_metrics.data.data_points.attributes.host --service_path=resource_metrics.scope_metrics.data.data_points.attributes.service Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp + Ctn Set Hosts Passive ${0} host_1 Ctn Engine Config Add Command ... ${0} ... otel_check_icmp @@ -116,16 +118,12 @@ BEOTEL_TELEGRAF_CHECK_HOST ${resources_list} Ctn Create Otl Request ${0} host_1 - # check without feed + Log To Console export metrics + # feed and check ${start} Ctn Get Round Current Date - Ctn Schedule Forced Host Check host_1 - ${result} Ctn Check Host Output Resource Status With Timeout - ... host_1 - ... 35 - ... ${start} - ... 0 - ... HARD - ... (No output returned from host check) + Ctn Send Otl To Engine 4317 ${resources_list} + + ${result} Ctn Check Host Output Resource Status With Timeout host_1 30 ${start} 0 HARD OK Should Be True ${result} hosts table not updated @@ -134,34 +132,27 @@ BEOTEL_TELEGRAF_CHECK_HOST Sleep 5 - - # feed and check - ${start} Ctn Get Round Current Date - Ctn Schedule Forced Host Check host_1 - - ${result} Ctn Check Host Output Resource Status With Timeout host_1 30 ${start} 0 HARD OK - Should Be True ${result} hosts table not updated - # check then feed, three times to modify hard state ${start} Ctn Get Round Current Date - Ctn Schedule Forced Host Check host_1 Sleep 2 ${resources_list} Ctn Create Otl Request ${2} host_1 Ctn Send Otl To Engine 4317 ${resources_list} - Ctn Schedule Forced Host Check host_1 - Sleep 2 + + ${result} Ctn Check Host Output Resource Status With Timeout host_1 30 ${start} 1 SOFT CRITICAL + Should Be True ${result} hosts table not updated + ${resources_list} Ctn Create Otl Request ${2} host_1 Ctn Send Otl To Engine 4317 ${resources_list} - Ctn Schedule Forced Host Check host_1 + + Sleep 2 ${resources_list} Ctn Create Otl Request ${2} host_1 Ctn Send Otl To Engine 4317 ${resources_list} - ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start} 1 CRITICAL - + ${result} Ctn Check Host Output Resource Status With Timeout host_1 30 ${start} 1 HARD CRITICAL Should Be True ${result} hosts table not updated BEOTEL_TELEGRAF_CHECK_SERVICE - [Documentation] we send nagios telegraf formated datas and we expect to get it in check result + [Documentation] we send nagios telegraf formatted datas and we expect to get it in check result [Tags] broker engine opentelemetry mon-34004 Ctn Config Engine ${1} ${2} ${2} Ctn Add Otl ServerModule 0 {"otel_server":{"host": "0.0.0.0","port": 4317},"max_length_grpc_log":0} @@ -170,6 +161,7 @@ BEOTEL_TELEGRAF_CHECK_SERVICE ... OTEL connector ... opentelemetry --processor=nagios_telegraf --extractor=attributes --host_path=resource_metrics.scope_metrics.data.data_points.attributes.host --service_path=resource_metrics.scope_metrics.data.data_points.attributes.service Ctn Engine Config Replace Value In Services ${0} service_1 check_command otel_check_icmp + Ctn Set Services Passive 0 service_1 Ctn Engine Config Add Command ... ${0} ... otel_check_icmp @@ -198,29 +190,11 @@ BEOTEL_TELEGRAF_CHECK_SERVICE ${resources_list} Ctn Create Otl Request ${0} host_1 service_1 - # check without feed - + # feed and check ${start} Ctn Get Round Current Date - Ctn Schedule Forced Svc Check host_1 service_1 - ${result} Ctn Check Service Output Resource Status With Timeout - ... host_1 - ... service_1 - ... 35 - ... ${start} - ... 0 - ... HARD - ... (No output returned from plugin) - Should Be True ${result} services table not updated - Log To Console export metrics Ctn Send Otl To Engine 4317 ${resources_list} - Sleep 5 - - # feed and check - ${start} Ctn Get Round Current Date - Ctn Schedule Forced Svc Check host_1 service_1 - ${result} Ctn Check Service Output Resource Status With Timeout host_1 service_1 30 ${start} 0 HARD OK Should Be True ${result} services table not updated @@ -228,18 +202,17 @@ BEOTEL_TELEGRAF_CHECK_SERVICE ${start} Ctn Get Round Current Date ${resources_list} Ctn Create Otl Request ${2} host_1 service_1 Ctn Send Otl To Engine 4317 ${resources_list} - Sleep 2 - Ctn Schedule Forced Svc Check host_1 service_1 + + ${result} Ctn Check Service Output Resource Status With Timeout host_1 service_1 30 ${start} 2 SOFT CRITICAL + Should Be True ${result} services table not updated + ${resources_list} Ctn Create Otl Request ${2} host_1 service_1 Ctn Send Otl To Engine 4317 ${resources_list} + Sleep 2 - Ctn Schedule Forced Svc Check host_1 service_1 ${resources_list} Ctn Create Otl Request ${2} host_1 service_1 Ctn Send Otl To Engine 4317 ${resources_list} - Sleep 2 - Ctn Schedule Forced Svc Check host_1 service_1 ${result} Ctn Check Service Output Resource Status With Timeout host_1 service_1 30 ${start} 2 HARD CRITICAL - Should Be True ${result} services table not updated BEOTEL_SERVE_TELEGRAF_CONFIGURATION_CRYPTED @@ -249,7 +222,7 @@ BEOTEL_SERVE_TELEGRAF_CONFIGURATION_CRYPTED Ctn Config Engine ${1} ${3} ${2} Ctn Add Otl ServerModule ... 0 - ... {"otel_server":{"host": "0.0.0.0","port": 4317},"max_length_grpc_log":0, "telegraf_conf_server": {"http_server":{"port": 1443, "encryption": true, "certificate_path": "/tmp/otel/server.crt", "key_path": "/tmp/otel/server.key"}, "cehck_interval":60, "engine_otel_endpoint": "127.0.0.1:4317"}} + ... {"otel_server":{"host": "0.0.0.0","port": 4317},"max_length_grpc_log":0, "telegraf_conf_server": {"http_server":{"port": 1443, "encryption": true, "certificate_path": "/tmp/otel/server.crt", "key_path": "/tmp/otel/server.key"}, "check_interval":60, "engine_otel_endpoint": "127.0.0.1:4317"}} Ctn Config Add Otl Connector ... 0 ... OTEL connector @@ -392,6 +365,348 @@ BEOTEL_SERVE_TELEGRAF_CONFIGURATION_NO_CRYPTED ... unexpected telegraf server response: ${telegraf_conf_response.text} +BEOTEL_CENTREON_AGENT_CHECK_HOST + [Documentation] agent check host and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Ctn Add Otl ServerModule + ... 0 + ... {"otel_server":{"host": "0.0.0.0","port": 4317},"max_length_grpc_log":0, "centreon_agent":{"check_interval":10, "export_period":10}} + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp + Ctn Set Hosts Passive ${0} host_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp + ... /bin/echo "OK - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Centreon Agent + Ctn Broker Config Log central sql trace + + Ctn Config BBDO3 1 + Ctn Clear Retention + + ${start} Get Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for the otel server start + ${content} Create List unencrypted server listening on 0.0.0.0:4317 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 10 + Should Be True ${result} "unencrypted server listening on 0.0.0.0:4317" should be available. + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start_int} 0 OK - 127.0.0.1 + Should Be True ${result} hosts table not updated + + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp_2 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp_2 + ... /bin/echo "OK check2 - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + #update conf engine, it must be taken into account by agent + Log To Console modify engine conf and reload engine + Ctn Reload Engine + + #wait for new data from agent + ${start} Ctn Get Round Current Date + ${content} Create List description: \"OK check2 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 22 + Should Be True ${result} "description: "OK check2" should be available. + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start} 0 OK check2 - 127.0.0.1: rta 0,010ms, lost 0% + Should Be True ${result} hosts table not updated + + +BEOTEL_CENTREON_AGENT_CHECK_SERVICE + [Documentation] agent check service and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Ctn Add Otl ServerModule + ... 0 + ... {"otel_server":{"host": "0.0.0.0","port": 4317},"max_length_grpc_log":0,"centreon_agent":{"check_interval":10, "export_period":15}} + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Services ${0} service_1 check_command otel_check + Ctn Set Services Passive 0 service_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check + ... /tmp/var/lib/centreon-engine/check.pl --id 456 + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + #service_1 check fail CRITICAL + Ctn Set Command Status 456 ${2} + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Centreon Agent + Ctn Broker Config Log central sql trace + + Ctn ConfigBBDO3 1 + Ctn Clear Retention + + ${start} Ctn Get Round Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for the otel server start + ${content} Create List unencrypted server listening on 0.0.0.0:4317 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 10 + Should Be True ${result} "unencrypted server listening on 0.0.0.0:4317" should be available. + + ${result} Ctn Check Service Check Status With Timeout host_1 service_1 60 ${start_int} 2 Test check 456 + Should Be True ${result} services table not updated + + ${start} Ctn Get Round Current Date + #service_1 check ok + Ctn Set Command Status 456 ${0} + + ${result} Ctn Check Service Check Status With Timeout host_1 service_1 60 ${start} 0 Test check 456 + Should Be True ${result} services table not updated + + +BEOTEL_REVERSE_CENTREON_AGENT_CHECK_HOST + [Documentation] agent check host with reversed connection and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Ctn Add Otl ServerModule + ... 0 + ... {"max_length_grpc_log":0,"centreon_agent":{"check_interval":10, "export_period":15, "reverse_connections":[{"host": "127.0.0.1","port": 4317}]}} + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp + Ctn Set Hosts Passive ${0} host_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp + ... /bin/echo "OK - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Reverse Centreon Agent + Ctn Broker Config Log central sql trace + + Ctn ConfigBBDO3 1 + Ctn Clear Retention + + ${start} Get Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for engine to connect to agent + ${content} Create List init from [.\\s]*127.0.0.1:4317 + ${result} Ctn Find Regex In Log With Timeout ${engineLog0} ${start} ${content} 10 + Should Be True ${result} "init from localhost:4317" not found in log + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start_int} 0 OK - 127.0.0.1 + Should Be True ${result} hosts table not updated + + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp_2 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp_2 + ... /bin/echo "OK check2 - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + #update conf engine, it must be taken into account by agent + Log To Console modify engine conf and reload engine + Ctn Reload Engine + + #wait for new data from agent + ${start} Ctn Get Round Current Date + ${content} Create List description: \"OK check2 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 30 + Should Be True ${result} "description: "OK check2" should be available. + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start} 0 OK check2 - 127.0.0.1: rta 0,010ms, lost 0% + Should Be True ${result} hosts table not updated + + +BEOTEL_REVERSE_CENTREON_AGENT_CHECK_SERVICE + [Documentation] agent check service with reversed connection and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Ctn Add Otl ServerModule + ... 0 + ... {"max_length_grpc_log":0,"centreon_agent":{"check_interval":10, "export_period":15, "reverse_connections":[{"host": "127.0.0.1","port": 4317}]}} + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Services ${0} service_1 check_command otel_check + Ctn Set Services Passive 0 service_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check + ... /tmp/var/lib/centreon-engine/check.pl --id 456 + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + #service_1 check fail CRITICAL + Ctn Set Command Status 456 ${2} + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Reverse Centreon Agent + Ctn Broker Config Log central sql trace + + Ctn ConfigBBDO3 1 + Ctn Clear Retention + + ${start} Ctn Get Round Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for engine to connect to agent + ${content} Create List init from [.\\s]*127.0.0.1:4317 + ${result} Ctn Find Regex In Log With Timeout ${engineLog0} ${start} ${content} 10 + Should Be True ${result} "init from 127.0.0.1:4317" not found in log + + + ${result} Ctn Check Service Check Status With Timeout host_1 service_1 60 ${start_int} 2 Test check 456 + Should Be True ${result} services table not updated + + ${start} Ctn Get Round Current Date + #service_1 check ok + Ctn Set Command Status 456 ${0} + + ${result} Ctn Check Service Check Status With Timeout host_1 service_1 60 ${start} 0 Test check 456 + Should Be True ${result} services table not updated + +BEOTEL_CENTREON_AGENT_CHECK_HOST_CRYPTED + [Documentation] agent check host with encrypted connection and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Copy File ../broker/grpc/test/grpc_test_keys/ca_1234.crt /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.key /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.crt /tmp/ + Ctn Add Otl ServerModule + ... 0 + ... {"otel_server":{"host": "0.0.0.0","port": 4317, "encryption": true, "public_cert": "/tmp/server_1234.crt", "private_key": "/tmp/server_1234.key", "ca_certificate": "/tmp/ca_1234.crt"},"max_length_grpc_log":0} + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp + Ctn Set Hosts Passive ${0} host_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp + ... /bin/echo "OK - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Centreon Agent ${None} ${None} /tmp/ca_1234.crt + + Ctn Config BBDO3 1 + Ctn Broker Config Log central sql trace + Ctn Clear Retention + + ${start} Get Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for the otel server start + ${content} Create List encrypted server listening on 0.0.0.0:4317 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 10 + Should Be True ${result} "encrypted server listening on 0.0.0.0:4317" should be available. + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start_int} 0 OK - 127.0.0.1 + Should Be True ${result} hosts table not updated + + + +BEOTEL_REVERSE_CENTREON_AGENT_CHECK_HOST_CRYPTED + [Documentation] agent check host with encrypted reversed connection and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Copy File ../broker/grpc/test/grpc_test_keys/ca_1234.crt /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.key /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.crt /tmp/ + + Ctn Add Otl ServerModule + ... 0 + ... {"max_length_grpc_log":0,"centreon_agent":{"check_interval":10, "export_period":15, "reverse_connections":[{"host": "localhost","port": 4317, "encryption": true, "ca_certificate": "/tmp/ca_1234.crt"}]}} + + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp + Ctn Set Hosts Passive ${0} host_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp + ... /bin/echo "OK - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Reverse Centreon Agent /tmp/server_1234.key /tmp/server_1234.crt /tmp/ca_1234.crt + Ctn Broker Config Log central sql trace + + Ctn Config BBDO3 1 + Ctn Clear Retention + + ${start} Get Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for engine to connect to agent + ${content} Create List init from localhost:4317 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 15 + Should Be True ${result} "init from localhost:4317" not found in log + Sleep 1 + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start_int} 0 OK - 127.0.0.1 + Should Be True ${result} hosts table not updated + + + + *** Keywords *** Ctn Create Otl Request [Documentation] create an otl request with nagios telegraf style diff --git a/tests/broker-engine/reload.robot b/tests/broker-engine/reload.robot new file mode 100644 index 00000000000..deead18f133 --- /dev/null +++ b/tests/broker-engine/reload.robot @@ -0,0 +1,50 @@ +*** Settings *** +Documentation Centreon Broker and Engine start/stop tests + +Resource ../resources/import.resource + +Suite Setup Ctn Clean Before Suite +Suite Teardown Ctn Clean After Suite +Test Setup Ctn Stop Processes +Test Teardown Ctn Save Logs If Failed + + +*** Test Cases *** +BENCV + [Documentation] Engine is configured with hosts/services. The first host has no customvariable. + ... Then we add a customvariable to the first host and we reload engine. + ... Then the host should have this new customvariable defined and centengine should not crash. + [Tags] broker engine MON-147499 + Ctn Config Engine ${1} + Ctn Create Template File ${0} host _CV ["test1","test2","test3"] + Ctn Engine Config Set Value ${0} log_level_config debug + Ctn Engine Config Set Value In Hosts ${0} host_1 use host_template_1 + Ctn Config Engine Add Cfg File ${0} hostTemplates.cfg + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config BBDO3 ${1} + ${start} Get Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} ${1} + ${content} Create List new custom variable 'CV' with value 'test1' on host 1 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} The host should have a new customvariable named _CV + + Ctn Engine Config Replace Value In Hosts ${0} host_1 use host_template_2,host_template_1 + ${start} Get Current Date + Ctn Reload Engine + ${content} Create List new custom variable 'CV' with value 'test2' on host 1 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} The host should have a new customvariable named _CV + + Ctn Engine Config Set Value In Hosts ${0} host_1 _CV cv_value + ${start} Get Current Date + Ctn Reload Engine + ${content} Create List new custom variable 'CV' with value 'cv_value' on host 1 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} The host should have a new customvariable named _CV + + Ctn Kindly Stop Broker + Ctn Stop Engine diff --git a/tests/broker-engine/rrd-from-db.robot b/tests/broker-engine/rrd-from-db.robot index 0637cab583e..a63a786e82e 100644 --- a/tests/broker-engine/rrd-from-db.robot +++ b/tests/broker-engine/rrd-from-db.robot @@ -115,8 +115,19 @@ BRRDRBDB1 ${result} Ctn Check Connections Should Be True ${result} Engine and Broker not connected - # We get 3 indexes to rebuild - ${index} Ctn Get Indexes To Rebuild 3 + # We need 3 indexes to rebuild + FOR ${idx} IN RANGE 60 + ${index} Ctn Get Indexes To Rebuild 3 + IF len(${index}) == 3 + BREAK + ELSE + # If not available, we force checks to have them. + Ctn Schedule Forced Svc Check host_1 service_1 + Ctn Schedule Forced Svc Check host_1 service_2 + Ctn Schedule Forced Svc Check host_1 service_3 + END + Sleep 1s + END Ctn Rebuild Rrd Graphs From Db ${index} Log To Console Indexes to rebuild: ${index} ${metrics} Ctn Get Metrics Matching Indexes ${index} @@ -139,7 +150,7 @@ BRRDRBDB1 ${result} Ctn Compare Rrd Average Value ${m} ${value} Should Be True ... ${result} - ... Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + ... Data before RRD rebuild for metric ${m} contained alternatively the metric ID and 0. The expected average is metric_id / 2 = ${value}. END BRRDRBUDB1 @@ -163,12 +174,24 @@ BRRDRBUDB1 ${result} Ctn Check Connections Should Be True ${result} Engine and Broker not connected - # We get 3 indexes to rebuild - ${index} Ctn Get Indexes To Rebuild 3 + # We need 3 indexes to rebuild + FOR ${idx} IN RANGE 60 + ${index} Ctn Get Indexes To Rebuild 3 + IF len(${index}) == 3 + BREAK + ELSE + # If not available, we force checks to have them. + Ctn Schedule Forced Svc Check host_1 service_1 + Ctn Schedule Forced Svc Check host_1 service_2 + Ctn Schedule Forced Svc Check host_1 service_3 + END + Sleep 1s + END Ctn Rebuild Rrd Graphs From Db ${index} Ctn Reload Broker Log To Console Indexes to rebuild: ${index} ${metrics} Ctn Get Metrics Matching Indexes ${index} + Log To Console Metrics to rebuild: ${metrics} ${content1} Create List RRD: Starting to rebuild metrics ${result} Ctn Find In Log With Timeout ${rrdLog} ${start} ${content1} 30 @@ -186,7 +209,7 @@ BRRDRBUDB1 ${result} Ctn Compare Rrd Average Value ${m} ${value} Should Be True ... ${result} - ... Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + ... Data before RRD rebuild for metric ${m} contained alternatively the metric ID and 0. The expected average is metric_id / 2 = ${value}. END BRRDUPLICATE @@ -209,12 +232,23 @@ BRRDUPLICATE ${result} Ctn Check Connections Should Be True ${result} Engine and Broker not connected - # We get 3 indexes to rebuild - ${index} Ctn Get Indexes To Rebuild 3 2 - ${duplicates} Ctn Add Duplicate Metrics + # We need 3 indexes to rebuild + FOR ${idx} IN RANGE 60 + ${index} Ctn Get Indexes To Rebuild 3 + IF len(${index}) == 3 + BREAK + ELSE + # If not available, we force checks to have them. + Ctn Schedule Forced Svc Check host_1 service_1 + Ctn Schedule Forced Svc Check host_1 service_2 + Ctn Schedule Forced Svc Check host_1 service_3 + END + Sleep 1s + END + ${metrics} Ctn Get Metrics Matching Indexes ${index} + ${duplicates} Ctn Add Duplicate Metrics ${metrics} Ctn Rebuild Rrd Graphs From Db ${index} Log To Console Indexes to rebuild: ${index} - ${metrics} Ctn Get Metrics Matching Indexes ${index} Log To Console Metrics to rebuild: ${metrics} Ctn Reload Broker diff --git a/tests/broker-engine/rrd.robot b/tests/broker-engine/rrd.robot index b242a3ada0a..0c009bf138a 100644 --- a/tests/broker-engine/rrd.robot +++ b/tests/broker-engine/rrd.robot @@ -299,7 +299,18 @@ BRRDRM1 Should Be True ${result} Engine and Broker not connected # We get 3 indexes to rebuild - ${index} Ctn Get Indexes To Rebuild 3 + FOR ${idx} IN RANGE 60 + ${index} Ctn Get Indexes To Rebuild 3 + IF len(${index}) == 3 + BREAK + ELSE + # If not available, we force checks to have them. + Ctn Schedule Forced Svc Check host_1 service_1 + Ctn Schedule Forced Svc Check host_1 service_2 + Ctn Schedule Forced Svc Check host_1 service_3 + END + Sleep 1s + END Ctn Rebuild Rrd Graphs 51001 ${index} 1 Log To Console Indexes to rebuild: ${index} ${metrics} Ctn Get Metrics Matching Indexes ${index} @@ -324,7 +335,7 @@ BRRDRM1 ${result} Ctn Compare Rrd Average Value ${m} ${value} Should Be True ... ${result} - ... Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + ... Data before RRD rebuild for metric ${m} contained alternatively the metric ID and 0. The expected average is metric_id / 2 = ${value}. END FOR ${index_id} IN @{index} @@ -358,7 +369,18 @@ BRRDRMU1 Should Be True ${result} Engine and Broker not connected # We get 3 indexes to rebuild - ${index} Ctn Get Indexes To Rebuild 3 + FOR ${idx} IN RANGE 60 + ${index} Ctn Get Indexes To Rebuild 3 + IF len(${index}) == 3 + BREAK + ELSE + # If not available, we force checks to have them. + Ctn Schedule Forced Svc Check host_1 service_1 + Ctn Schedule Forced Svc Check host_1 service_2 + Ctn Schedule Forced Svc Check host_1 service_3 + END + Sleep 1s + END Ctn Rebuild Rrd Graphs 51001 ${index} 1 Log To Console Indexes to rebuild: ${index} ${metrics} Ctn Get Metrics Matching Indexes ${index} @@ -383,7 +405,7 @@ BRRDRMU1 ${result} Ctn Compare Rrd Average Value ${m} ${value} Should Be True ... ${result} - ... Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + ... Data before RRD rebuild for metric ${m} contained alternatively the metric ID and 0. The expected average is metric_id / 2 = ${value}. # 48 = 60(octal) ${result} Ctn Has File Permissions ${VarRoot}/lib/centreon/metrics/${m}.rrd 48 Should Be True ${result} ${VarRoot}/lib/centreon/metrics/${m}.rrd has not RW group permission @@ -432,9 +454,99 @@ RRD1 ${result} Ctn Find In Log With Timeout ${rrdLog} ${start} ${content1} 45 Should Not Be True ${result} Database did not receive command to rebuild metrics +BRRDSTATUS + [Documentation] We are working with BBDO3. This test checks status are correctly handled independently from their value. + [Tags] rrd status bbdo3 MON-141934 + Ctn Config Engine ${1} + Ctn Config Broker rrd + Ctn Config Broker central + Ctn Config Broker module + Ctn Config BBDO3 ${1} + Ctn Broker Config Log central sql info + Ctn Broker Config Log rrd rrd debug + Ctn Broker Config Log rrd core error + Ctn Broker Config Flush Log central 0 + Ctn Broker Config Flush Log rrd 0 + Ctn Set Services Passive ${0} service_1 + + ${start} Get Current Date + Ctn Start Broker + Ctn Start engine + Ctn Wait For Engine To Be Ready ${start} ${1} + + Ctn Process Service Result Hard host_1 service_1 2 output critical for service_1 + ${index} Ctn Get Service Index 1 1 + log to console Service 1:1 has index ${index} + ${content} Create List RRD: new pb status data for index ${index} (state 2) + ${result} Ctn Find In Log With Timeout ${rrdLog} ${start} ${content} 60 + Should Be True ${result} host_1:service_1 is not CRITICAL as expected + + ${start} Ctn Get Round Current Date + Ctn Process Service Result Hard host_1 service_1 1 output warning for service_1 + ${content} Create List RRD: new pb status data for index ${index} (state 1) + ${result} Ctn Find In Log With Timeout ${rrdLog} ${start} ${content} 60 + Should Be True ${result} host_1:service_1 is not WARNING as expected + + ${start} Ctn Get Round Current Date + Ctn Process Service Result Hard host_1 service_1 0 output ok for service_1 + ${content} Create List RRD: new pb status data for index ${index} (state 0) + ${result} Ctn Find In Log With Timeout ${rrdLog} ${start} ${content} 60 + Should Be True ${result} host_1:service_1 is not OK as expected + + ${start} Ctn Get Round Current Date + Ctn Process Service Result Hard host_1 service_1 3 output UNKNOWN for service_1 + ${content} Create List RRD: new pb status data for index ${index} (state 3) + ${result} Ctn Find In Log With Timeout ${rrdLog} ${start} ${content} 60 + Should Be True ${result} host_1:service_1 is not UNKNOWN as expected + + ${content} Create List RRD: ignored update non-float value '' in file '${VarRoot}/lib/centreon/status/82884.rrd' + ${result} Ctn Find In Log With Timeout ${rrdLog} ${start} ${content} 1 + Should Be Equal ${result} ${False} We shouldn't have any error about empty value in RRD + + +BRRDSTATUSRETENTION + [Documentation] We are working with BBDO3. This test checks status are not sent twice after Engine reload. + [Tags] rrd status bbdo3 MON-139747 + Ctn Config Engine ${1} + Ctn Config Broker rrd + Ctn Config Broker central + Ctn Config Broker module + Ctn Config BBDO3 ${1} + Ctn Broker Config Log central sql info + Ctn Broker Config Log rrd rrd debug + Ctn Broker Config Log rrd core error + Ctn Broker Config Flush Log central 0 + Ctn Broker Config Flush Log rrd 0 + + ${start} Get Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} ${1} + + Ctn Schedule Forced Svc Check host_1 service_1 ${VarRoot}/lib/centreon-engine/config0/rw/centengine.cmd + Log To Console Engine works during 20s + Sleep 20s + + Log To Console We modify the check_interval of the service service_1 + Ctn Engine Config Replace Value In Services 0 service_1 check_interval 1 + + ${start} Ctn Get Round Current Date + Log To Console Reloading Engine and waiting for 20s again + Ctn Reload Engine + Sleep 20s + + Log To Console Find in logs if there is an error in rrd. + ${index} Ctn Get Service Index 1 1 + ${content} Create List RRD: ignored update error in file '${VarRoot}/lib/centreon/status/${index}.rrd': ${VarRoot}/lib/centreon/status/${index}.rrd: illegal attempt to update using time + ${result} Ctn Find In Log With Timeout ${rrdLog} ${start} ${content} 1 + Should Be Equal + ... ${result} ${False} + ... No message about an illegal attempt to update the rrd files should appear + Log To Console Test finished + *** Keywords *** Ctn Test Clean - Ctn Stop engine + Ctn Stop Engine Ctn Kindly Stop Broker Ctn Save Logs If Failed diff --git a/tests/broker-engine/rrdcached-from-db.robot b/tests/broker-engine/rrdcached-from-db.robot index db1732abdcd..fd110abccfc 100644 --- a/tests/broker-engine/rrdcached-from-db.robot +++ b/tests/broker-engine/rrdcached-from-db.robot @@ -119,7 +119,18 @@ BRRDCDRBDB1 Should Be True ${result} Engine and Broker not connected # We get 3 indexes to rebuild - ${index} Ctn Get Indexes To Rebuild 3 + FOR ${idx} IN RANGE 60 + ${index} Ctn Get Indexes To Rebuild 3 + IF len(${index}) == 3 + BREAK + ELSE + # If not available, we force checks to have them. + Ctn Schedule Forced Svc Check host_1 service_1 + Ctn Schedule Forced Svc Check host_1 service_2 + Ctn Schedule Forced Svc Check host_1 service_3 + END + Sleep 1s + END Ctn Rebuild Rrd Graphs From Db ${index} Log To Console Indexes to rebuild: ${index} ${metrics} Ctn Get Metrics Matching Indexes ${index} @@ -142,7 +153,7 @@ BRRDCDRBDB1 ${result} Ctn Compare Rrd Average Value ${m} ${value} Should Be True ... ${result} - ... Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + ... Data before RRD rebuild for metric ${m} contained alternatively the metric ID and 0. The expected average is metric_id / 2 = ${value}. END BRRDCDRBUDB1 @@ -168,7 +179,18 @@ BRRDCDRBUDB1 Should Be True ${result} Engine and Broker not connected # We get 3 indexes to rebuild - ${index} Ctn Get Indexes To Rebuild 3 + FOR ${idx} IN RANGE 60 + ${index} Ctn Get Indexes To Rebuild 3 + IF len(${index}) == 3 + BREAK + ELSE + # If not available, we force checks to have them. + Ctn Schedule Forced Svc Check host_1 service_1 + Ctn Schedule Forced Svc Check host_1 service_2 + Ctn Schedule Forced Svc Check host_1 service_3 + END + Sleep 1s + END Ctn Rebuild Rrd Graphs From Db ${index} Ctn Reload Broker Log To Console Indexes to rebuild: ${index} @@ -190,5 +212,5 @@ BRRDCDRBUDB1 ${result} Ctn Compare Rrd Average Value ${m} ${value} Should Be True ... ${result} - ... Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + ... Data before RRD rebuild for metric ${m} contained alternatively the metric ID and 0. The expected average is metric_id / 2 = ${value}. END diff --git a/tests/broker-engine/rrdcached.robot b/tests/broker-engine/rrdcached.robot index c130d06576f..a930179adc2 100644 --- a/tests/broker-engine/rrdcached.robot +++ b/tests/broker-engine/rrdcached.robot @@ -247,7 +247,18 @@ BRRDCDRB1 Should Be True ${result} Engine and Broker not connected # We get 3 indexes to rebuild - ${index} Ctn Get Indexes To Rebuild 3 + FOR ${idx} IN RANGE 60 + ${index} Ctn Get Indexes To Rebuild 3 + IF len(${index}) == 3 + BREAK + ELSE + # If not available, we force checks to have them. + Ctn Schedule Forced Svc Check host_1 service_1 + Ctn Schedule Forced Svc Check host_1 service_2 + Ctn Schedule Forced Svc Check host_1 service_3 + END + Sleep 1s + END Ctn Rebuild Rrd Graphs 51001 ${index} 1 Log To Console Indexes to rebuild: ${index} ${metrics} Ctn Get Metrics Matching Indexes ${index} @@ -272,7 +283,7 @@ BRRDCDRB1 ${result} Ctn Compare Rrd Average Value ${m} ${value} Should Be True ... ${result} - ... Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + ... Data before RRD rebuild for metric ${m} contained alternatively the metric ID and 0. The expected average is metric_id / 2 = ${value}. END FOR ${index_id} IN @{index} @@ -307,7 +318,18 @@ BRRDCDRBU1 Should Be True ${result} Engine and Broker not connected # We get 3 indexes to rebuild - ${index} Ctn Get Indexes To Rebuild 3 + FOR ${idx} IN RANGE 60 + ${index} Ctn Get Indexes To Rebuild 3 + IF len(${index}) == 3 + BREAK + ELSE + # If not available, we force checks to have them. + Ctn Schedule Forced Svc Check host_1 service_1 + Ctn Schedule Forced Svc Check host_1 service_2 + Ctn Schedule Forced Svc Check host_1 service_3 + END + Sleep 1s + END Ctn Rebuild Rrd Graphs 51001 ${index} 1 Log To Console Indexes to rebuild: ${index} ${metrics} Ctn Get Metrics Matching Indexes ${index} @@ -332,7 +354,7 @@ BRRDCDRBU1 ${result} Ctn Compare Rrd Average Value ${m} ${value} Should Be True ... ${result} - ... Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + ... Data before RRD rebuild for metric ${m} contained alternatively the metric ID and 0. The expected average is metric_id / 2 = ${value}. END FOR ${index_id} IN @{index} diff --git a/tests/broker-engine/servicegroups.robot b/tests/broker-engine/servicegroups.robot index 51f0e511542..88c3261a2c3 100644 --- a/tests/broker-engine/servicegroups.robot +++ b/tests/broker-engine/servicegroups.robot @@ -113,8 +113,9 @@ EBNSGU2 EBNSGU3_${test_label} [Documentation] New service group with several pollers and connections to DB with broker and rename this servicegroup - [Tags] broker engine servicegroup unified_sql + [Tags] broker engine servicegroup Ctn Config Engine ${3} + Ctn Engine Config Set Value ${0} log_level_config debug Ctn Config Broker rrd Ctn Config Broker central Ctn Config Broker module ${3} @@ -132,12 +133,12 @@ EBNSGU3_${test_label} IF ${Use_BBDO3} Ctn Config BBDO3 ${3} - Ctn Config BBDO3 ${3} - ${start} Get Current Date Ctn Start Broker Ctn Start engine - Sleep 3s + + Ctn Wait For Engine To Be Ready ${start} ${1} + Ctn Add Service Group ${0} ${1} ["host_1","service_1", "host_1","service_2","host_1", "service_3"] Ctn Add Service Group ${1} ${1} ["host_18","service_341", "host_19","service_362","host_19", "service_363"] Ctn Add Service Group ${2} ${1} ["host_35","service_681", "host_35","service_682","host_36", "service_706"] @@ -145,6 +146,7 @@ EBNSGU3_${test_label} Ctn Config Engine Add Cfg File ${1} servicegroups.cfg Ctn Config Engine Add Cfg File ${2} servicegroups.cfg + ${start} Ctn Get Round Current Date Ctn Reload Broker Ctn Reload Engine @@ -183,18 +185,22 @@ EBNSGU3_${test_label} Ctn Reload Engine Ctn Reload Broker - Log To Console \nremove servicegroup + Log To Console \nRemove servicegroup 1 ${result} Ctn Check Number Of Relations Between Servicegroup And Services 1 0 30 Should Be True ${result} still a relation between the servicegroup 1 and services. - # clear lua file - # this part of test is disable because group erasure is desactivated in macrocache.cc - # it will be reactivated when global cache will be implemented - # Create File /tmp/lua-engine.log - # Sleep 2s - # ${grep_result} Grep File /tmp/lua-engine.log no service_group_name 1 - # Should Be True len("""${grep_result}""") < 10 servicegroup 1 still exist + # Waiting to observe no service group. + FOR ${index} IN RANGE 60 + Create File /tmp/lua-engine.log + Sleep 1s + ${grep_result} Grep File /tmp/lua-engine.log no service_group_name + IF len("""${grep_result}""") > 0 BREAK + END + Sleep 10s + # Do we still have no service group? + ${grep_result} Grep File /tmp/lua-engine.log service_group_name: + Should Be True len("""${grep_result}""") == 0 The servicegroup 1 still exists Examples: Use_BBDO3 test_label -- ... True BBDO3 diff --git a/tests/broker-engine/services-increased.robot b/tests/broker-engine/services-increased.robot index 895c6187de0..d64909f265f 100644 --- a/tests/broker-engine/services-increased.robot +++ b/tests/broker-engine/services-increased.robot @@ -84,7 +84,17 @@ Service_increased_huge_check_interval ${content} Create List new pb data for metric ${result} Ctn Find In Log With Timeout ${rrdLog} ${start} ${content} 60 - ${index} Ctn Get Indexes To Rebuild 2 + FOR ${idx} IN RANGE 60 + ${index} Ctn Get Indexes To Rebuild 2 + IF len(${index}) == 2 + BREAK + ELSE + # If not available, we force checks to have them. + Ctn Schedule Forced Svc Check host_1 service_1 + Ctn Schedule Forced Svc Check host_1 service_2 + END + Sleep 1s + END ${metrics} Ctn Get Metrics Matching Indexes ${index} Log To Console Metrics: ${metrics} diff --git a/tests/broker/log.robot b/tests/broker/log.robot index da17dd6f797..b533b5bf212 100644 --- a/tests/broker/log.robot +++ b/tests/broker/log.robot @@ -67,3 +67,142 @@ BLEC3 Ctn Start Broker ${result} Ctn Set Broker Log Level 51001 foo trace Should Be Equal ${result} The 'foo' logger does not exist + +BLBD + [Documentation] Start Broker with loggers levels by default + [Tags] broker log-v2 MON-143565 + Ctn Config Broker rrd + Ctn Config Broker central + Ctn Broker Config Remove Item central log:loggers + ${start} Get Current Date + Ctn Start Broker + ${result} Ctn Get Broker Log Info 51001 ALL + log to console ${result} + ${LOG_RES} Catenate SEPARATOR=${\n} @{LOG_RESULT} + Should Be Equal ${result} ${LOG_RES} Default loggers levels are wrong + + +*** Variables *** +@{LOG_RESULT} log_name: "cbd" +... log_file: "/tmp/var/log/centreon-broker//central-broker-master.log" +... level { +... ${SPACE}${SPACE}key: "victoria_metrics" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "tls" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "tcp" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "stats" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "sql" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "runtime" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "rrd" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "process" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "processing" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "perfdata" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "otl" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "notifications" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "neb" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "macros" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "lua" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "influxdb" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "grpc" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "graphite" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "functions" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "external_command" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "events" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "eventbroker" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "downtimes" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "core" +... ${SPACE}${SPACE}value: "info" +... } +... level { +... ${SPACE}${SPACE}key: "config" +... ${SPACE}${SPACE}value: "info" +... } +... level { +... ${SPACE}${SPACE}key: "comments" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "commands" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "checks" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "bbdo" +... ${SPACE}${SPACE}value: "error" +... } +... level { +... ${SPACE}${SPACE}key: "bam" +... ${SPACE}${SPACE}value: "error" +... } +... diff --git a/tests/engine/forced_checks.robot b/tests/engine/forced_checks.robot index d7b3b81b0ce..372341419ac 100644 --- a/tests/engine/forced_checks.robot +++ b/tests/engine/forced_checks.robot @@ -327,15 +327,10 @@ E_HOST_DOWN_DISABLE_SERVICE_CHECKS Ctn Clear Retention ${start} Get Current Date - ${start} Get Current Date Ctn Start Engine Ctn Start Broker only_central=${True} - ${content} Create List INITIAL HOST STATE: host_1; - ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 - Should Be True - ... ${result} - ... An Initial host state on host_1 should be raised before we can start our external commands. + Ctn Wait For Engine To Be Ready ${start} ${1} FOR ${i} IN RANGE ${4} Ctn Process Host Check Result host_1 1 host_1 DOWN @@ -344,17 +339,17 @@ E_HOST_DOWN_DISABLE_SERVICE_CHECKS ${result} Ctn Check Host Status host_1 1 1 False 30 Should Be True ${result} host_1 should be down/hard - #after some time services should be in hard state + # After some time services should be in hard state FOR ${index} IN RANGE ${19} ${result} Ctn Check Service Status With Timeout host_1 service_${index+1} 3 30 HARD Should Be True ${result} service_${index+1} should be UNKNOWN hard END - #host_1 check returns UP + # host_1 check returns UP Ctn Set Command Status checkh1 0 Ctn Process Host Check Result host_1 0 host_1 UP - #after some time services should be in ok hard state + # After some time services should be in ok hard state FOR ${index} IN RANGE ${19} Ctn Process Service Check Result host_1 service_${index+1} 0 output END diff --git a/tests/engine/reload-and-logs.robot b/tests/engine/reload-and-logs.robot new file mode 100644 index 00000000000..a9ae964c372 --- /dev/null +++ b/tests/engine/reload-and-logs.robot @@ -0,0 +1,39 @@ +*** Settings *** +Documentation Centreon Engine forced checks tests + +Resource ../resources/import.resource + +Suite Setup Ctn Clean Before Suite +Suite Teardown Ctn Clean After Suite +Test Setup Ctn Stop Processes + + +*** Test Cases *** +ERL + [Documentation] Engine is started and writes logs in centengine.log. + ... Then we remove the log file. The file disappears but Engine is still writing into it. + ... Engine is reloaded and the centengine.log should appear again. + [Tags] engine log-v2 MON-146656 + Ctn Config Engine ${1} + Ctn Engine Config Set Value ${0} log_legacy_enabled ${0} + Ctn Engine Config Set Value ${0} log_v2_enabled ${1} + Ctn Engine Config Set Value ${0} log_level_events info + Ctn Engine Config Set Value ${0} log_flush_period 0 + + Ctn Clear Retention + Ctn Clear Db hosts + ${start} Ctn Get Round Current Date + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} ${1} + + File Should Exist ${VarRoot}/log/centreon-engine/config0/centengine.log + + Remove File ${VarRoot}/log/centreon-engine/config0/centengine.log + + Sleep 5s + + File Should Not Exist ${VarRoot}/log/centreon-engine/config0/centengine.log + Ctn Reload Engine + + Wait Until Created ${VarRoot}/log/centreon-engine/config0/centengine.log timeout=30s + Ctn Stop Engine diff --git a/tests/init-sql-docker.sh b/tests/init-sql-docker.sh index acbc4965601..70efc5b97ee 100755 --- a/tests/init-sql-docker.sh +++ b/tests/init-sql-docker.sh @@ -16,8 +16,8 @@ apt update && apt install -y mysql-client #create users if [ $database_type == 'mysql' ]; then echo "create users mysql" - mysql --user="$DBUserRoot" --password="$DBPassRoot" -h 127.0.0.1 -e "CREATE USER IF NOT EXISTS 'centreon'@'%' IDENTIFIED WITH mysql_native_password BY 'centreon'" - mysql --user="$DBUserRoot" --password="$DBPassRoot" -h 127.0.0.1 -e "CREATE USER IF NOT EXISTS 'root_centreon'@'%' IDENTIFIED WITH mysql_native_password BY 'centreon'" + mysql --user="$DBUserRoot" --password="$DBPassRoot" -h 127.0.0.1 -e "CREATE USER IF NOT EXISTS 'centreon'@'%' IDENTIFIED BY 'centreon'" + mysql --user="$DBUserRoot" --password="$DBPassRoot" -h 127.0.0.1 -e "CREATE USER IF NOT EXISTS 'root_centreon'@'%' IDENTIFIED BY 'centreon'" else #mariadb case ss -plant | grep -w 3306 diff --git a/tests/resources/Agent.py b/tests/resources/Agent.py new file mode 100644 index 00000000000..4497a4453f3 --- /dev/null +++ b/tests/resources/Agent.py @@ -0,0 +1,71 @@ +#!/usr/bin/python3 +# +# Copyright 2023-2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# For more information : contact@centreon.com +# + +from os import makedirs +from robot.libraries.BuiltIn import BuiltIn + +ETC_ROOT = BuiltIn().get_variable_value("${EtcRoot}") +CONF_DIR = ETC_ROOT + "/centreon-engine" + + +agent_config=""" +{ + "log_level":"trace", + "endpoint":"localhost:4317", + "host":"host_1", + "log_type":"file", + "log_file":"/tmp/var/log/centreon-engine/centreon-agent.log" """ + + +def ctn_config_centreon_agent(key_path:str = None, cert_path:str = None, ca_path:str = None): + """ctn_config_centreon_agent + Creates a default centreon agent config without encryption nor reverse connection + """ + makedirs(CONF_DIR, mode=0o777, exist_ok=True) + with open(f"{CONF_DIR}/centagent.json", "w") as ff: + ff.write(agent_config) + if key_path is not None or cert_path is not None or ca_path is not None: + ff.write(",\n \"encryption\":true") + if key_path is not None: + ff.write(f",\n \"private_key\":\"{key_path}\"") + if cert_path is not None: + ff.write(f",\n \"public_cert\":\"{cert_path}\"") + if ca_path is not None: + ff.write(f",\n \"ca_certificate\":\"{ca_path}\"") + ff.write("\n}\n") + + + +def ctn_config_reverse_centreon_agent(key_path:str = None, cert_path:str = None, ca_path:str = None): + """ctn_config_centreon_agent + Creates a default reversed centreon agent config without encryption listening on 0.0.0.0:4317 + """ + makedirs(CONF_DIR, mode=0o777, exist_ok=True) + with open(f"{CONF_DIR}/centagent.json", "w") as ff: + ff.write(agent_config) + ff.write(",\n \"reverse_connection\":true") + if key_path is not None or cert_path is not None or ca_path is not None: + ff.write(",\n \"encryption\":true") + if key_path is not None: + ff.write(f",\n \"private_key\":\"{key_path}\"") + if cert_path is not None: + ff.write(f",\n \"public_cert\":\"{cert_path}\"") + if ca_path is not None: + ff.write(f",\n \"ca_certificate\":\"{ca_path}\"") + ff.write("\n}\n") diff --git a/tests/resources/Broker.py b/tests/resources/Broker.py index fe65675d4eb..e5cb6e829ca 100755 --- a/tests/resources/Broker.py +++ b/tests/resources/Broker.py @@ -21,6 +21,7 @@ from os import setsid from os import makedirs from os.path import exists +import datetime import pymysql.cursors import time import re @@ -1095,7 +1096,8 @@ def ctn_broker_config_remove_item(name, key): Args: name: The broker instance name among central, rrd and module%d - key: The key to remove. It must be defined at the first level of the configuration. + key: The key to remove. It must be defined from the "centreonBroker" level. + We can define several levels by splitting them with a colon. *Example:* @@ -1111,7 +1113,14 @@ def ctn_broker_config_remove_item(name, key): with open(f"{ETC_ROOT}/centreon-broker/{filename}", "r") as f: buf = f.read() conf = json.loads(buf) - conf["centreonBroker"].pop(key) + cc = conf["centreonBroker"] + if ":" in key: + steps = key.split(':') + for s in steps[:-1]: + cc = cc[s] + key = steps[-1] + + cc.pop(key) with open(f"{ETC_ROOT}/centreon-broker/{filename}", "w") as f: f.write(json.dumps(conf, indent=2)) @@ -1653,6 +1662,41 @@ def ctn_check_rrd_info(metric_id: int, key: str, value, timeout: int = 60): return False +def ctn_get_service_index(host_id: int, service_id: int, timeout: int = 60): + """ + Try to get the index data of a service. + + Args: + host_id (int): The ID of the host. + service_id (int): The ID of the service. + + Returns: + An integer representing the index data. + """ + select_request = f"SELECT id FROM index_data WHERE host_id={host_id} AND service_id={service_id}" + limit = time.time() + timeout + while time.time() < limit: + # Connect to the database + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + with connection: + with connection.cursor() as cursor: + cursor.execute(select_request) + result = cursor.fetchall() + my_id = [r['id'] for r in result] + if len(my_id) > 0: + logger.console( + f"Index data {id} found for service {host_id}:{service_id}") + return my_id[0] + time.sleep(2) + logger.console(f"no index data found for service {host_id}:{service_id}") + return None + + def ctn_get_metrics_for_service(service_id: int, metric_name: str = "%", timeout: int = 60): """ Try to get the metric IDs of a service. @@ -1963,7 +2007,7 @@ def ctn_get_indexes_to_rebuild(count: int, nb_day=180): A list of indexes. """ files = [os.path.basename(x) for x in glob.glob( - VAR_ROOT + "/lib/centreon/metrics/[0-9]*.rrd")] + VAR_ROOT + "/lib/centreon/status/[0-9]*.rrd")] ids = [int(f.split(".")[0]) for f in files] # Connect to the database @@ -1973,47 +2017,66 @@ def ctn_get_indexes_to_rebuild(count: int, nb_day=180): database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) - retval = [] + retval = set() with connection: with connection.cursor() as cursor: # Read a single record - sql = "SELECT `metric_id`,`index_id` FROM `metrics`" + sql = "SELECT `metric_id`,`index_id` FROM `metrics` ORDER BY index_id" cursor.execute(sql) result = cursor.fetchall() + last_index = 0 for r in result: - if int(r['metric_id']) in ids: - index_id = int(r['index_id']) - logger.console( - "building data for metric {} index_id {}".format(r['metric_id'], index_id)) - # We go back to 180 days with steps of 5 mn - start = int(time.time() / 86400) * 86400 - \ - 24 * 60 * 60 * nb_day - value = int(r['metric_id']) // 2 - status_value = index_id % 3 - cursor.execute("DELETE FROM data_bin WHERE id_metric={} AND ctime >= {}".format( - r['metric_id'], start)) - # We set the value to a constant on 180 days - for i in range(0, 24 * 60 * 60 * nb_day, 60 * 5): - cursor.execute( - "INSERT INTO data_bin (id_metric, ctime, value, status) VALUES ({},{},{},'{}')".format( - r['metric_id'], start + i, value, status_value)) - connection.commit() - retval.append(index_id) + index_id = int(r['index_id']) + + if index_id not in ids: + continue - if len(retval) == count: + # We must rebuild all the metrics of a given index. + if last_index != index_id and len(retval) == count: return retval + logger.console( + f"building data for metric {r['metric_id']} index_id {index_id}") + # We go back to 180 days with steps of 5 mn + now = datetime.datetime.now() + dt = now.replace(hour=0, minute=0, second=0, microsecond=0) + start = dt - datetime.timedelta(days=nb_day) + start = int(start.timestamp()) + logger.console(f">>>>>>>>>> start = {datetime.datetime.fromtimestamp(start)}") + value = int(r['metric_id']) // 2 + status_value = index_id % 3 + cursor.execute("DELETE FROM data_bin WHERE id_metric={} AND ctime >= {}".format( + r['metric_id'], start)) + # We set the value to a constant on 180 days + now = int(now.timestamp()) + logger.console(f">>>>>>>>>> end = {datetime.datetime.fromtimestamp(now)}") + for i in range(start, now, 60 * 5): + if i == start: + logger.console( + "INSERT INTO data_bin (id_metric, ctime, value, status) VALUES ({},{},{},'{}')".format( + r['metric_id'], i, value, status_value)) + cursor.execute( + "INSERT INTO data_bin (id_metric, ctime, value, status) VALUES ({},{},{},'{}')".format( + r['metric_id'], i, value, status_value)) + connection.commit() + retval.add(index_id) + + last_index = index_id + # if the loop is already and retval length is not sufficiently long, we # still return what we get. return retval -def ctn_add_duplicate_metrics(): +def ctn_add_duplicate_metrics(metric_ids): """ - Add a value at the middle of the first day of each metric + Add a value at the middle of the last day of each metric in the provided list. + + Args: + metric_ids: A list of metric IDs. Returns: - A list of indexes of pair