From 3a4fa4c53a825b2a0284bb254f0a2f90d2adb2dd Mon Sep 17 00:00:00 2001 From: Mark Goddard Date: Fri, 22 Sep 2023 08:40:36 +0000 Subject: [PATCH 1/3] Add a custom playbook to fix OVN chassis priorities Sometimes, typically after restarting OVN services, the priorities of entries in the ha_chassis and gateway_chassis tables in the OVN northbound database can become misaligned. This results in broken routing for external (bare metal/SR-IOV) ports. This playbook can be used to fix the issue by realigning the priorities of the table entries. It does so by assigning the highest priority to the "first" (sorted alphabetically) OVN NB DB host. This results in all gateways being scheduled to a single host, but is less complicated than trying to balance them (and it's also not clear to me how to map between individual ha_chassis and gateway_chassis entries). The playbook can be run as follows: kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/ovn-fix-chassis-priorities.yml If the 'controllers' group does not align with the group used to deploy the OVN NB DB, this can be overridden by passing the following: '-e ovn_nb_db_group=some_other_group' --- .../ansible/ovn-fix-chassis-priorities.yml | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 etc/kayobe/ansible/ovn-fix-chassis-priorities.yml diff --git a/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml b/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml new file mode 100644 index 000000000..20542df88 --- /dev/null +++ b/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml @@ -0,0 +1,69 @@ +--- +# Sometimes, typically after restarting OVN services, the priorities of entries +# in the ha_chassis and gateway_chassis tables in the OVN northbound database +# can become misaligned. This results in broken routing for external (bare +# metal/SR-IOV) ports. + +# This playbook can be used to fix the issue by realigning the priorities of +# the table entries. It does so by assigning the highest priority to the +# "first" (sorted alphabetically) OVN NB DB host. This results in all gateways +# being scheduled to a single host, but is less complicated than trying to +# balance them (and it's also not clear to me how to map between individual +# ha_chassis and gateway_chassis entries). + +# The playbook can be run as follows: +# kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/ovn-fix-chassis-priorities.yml + +# If the 'controllers' group does not align with the group used to deploy the +# OVN NB DB, this can be overridden by passing the following: +# '-e ovn_nb_db_group=some_other_group' + +- name: Find OVN DB DB Leader + hosts: "{{ ovn_nb_db_group | default('controllers') }}" + tasks: + - name: Find the OVN NB DB leader + command: docker exec -it ovn_nb_db ovn-nbctl get-connection + changed_when: false + failed_when: false + register: ovn_check_result + check_mode: no + + - name: Group hosts by leader/follower role + group_by: + key: "ovn_nb_{{ 'leader' if ovn_check_result.rc == 0 else 'follower' }}" + changed_when: false + + - name: Assert one leader exists + assert: + that: + - groups['ovn_nb_leader'] | default([]) | length == 1 + +- name: Fix OVN chassis priorities + hosts: ovn_nb_leader + vars: + ovn_nb_db_group: controllers + ovn_nb_db_hosts_sorted: "{{ query('inventory_hostnames', ovn_nb_db_group) | sort | list }}" + ha_chassis_max_priority: 32767 + gateway_chassis_max_priority: "{{ ovn_nb_db_hosts_sorted | length }}" + tasks: + - name: Fix ha_chassis priorities + command: >- + docker exec -it ovn_nb_db + bash -c ' + ovn-nbctl find ha_chassis chassis_name={{ item }} | + awk '\''$1 == "_uuid" { print $3 }'\'' | + while read uuid; do ovn-nbctl set ha_chassis $uuid priority={{ priority }}; done' + loop: "{{ ovn_nb_db_hosts_sorted }}" + vars: + priority: "{{ ha_chassis_max_priority | int - ovn_nb_db_hosts_sorted.index(item) }}" + + - name: Fix gateway_chassis priorities + command: >- + docker exec -it ovn_nb_db + bash -c ' + ovn-nbctl find gateway_chassis chassis_name={{ item }} | + awk '\''$1 == "_uuid" { print $3 }'\'' | + while read uuid; do ovn-nbctl set gateway_chassis $uuid priority={{ priority }}; done' + loop: "{{ ovn_nb_db_hosts_sorted }}" + vars: + priority: "{{ gateway_chassis_max_priority | int - ovn_nb_db_hosts_sorted.index(item) }}" From e57542ccf2b1d868a35964928211f90078103f54 Mon Sep 17 00:00:00 2001 From: Pierre Riteau Date: Thu, 28 Sep 2023 21:02:24 +0200 Subject: [PATCH 2/3] Document stackhpc_pulp_images_kolla_filter variable --- doc/source/configuration/release-train.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/source/configuration/release-train.rst b/doc/source/configuration/release-train.rst index d4757f54f..1b0a48a00 100644 --- a/doc/source/configuration/release-train.rst +++ b/doc/source/configuration/release-train.rst @@ -170,6 +170,16 @@ promoted to production: kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/pulp-repo-promote-production.yml +Synchronising all Kolla container images can take a long time. A limited list +of images can be synchronised using the ``stackhpc_pulp_images_kolla_filter`` +variable, which accepts a whitespace-separated list of regular expressions +matching Kolla image names. Usage is similar to ``kolla-build`` CLI arguments. +For example: + +.. code-block:: console + + kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/pulp-container-sync.yml -e stackhpc_pulp_images_kolla_filter='"^glance nova-compute$"' + Initial seed deployment ----------------------- From 1722ffa2e7b411f3a0ecaae9c992ffe26a721848 Mon Sep 17 00:00:00 2001 From: Mark Goddard Date: Wed, 27 Sep 2023 16:17:11 +0100 Subject: [PATCH 3/3] docs: fix wazuh headings --- doc/source/configuration/wazuh.rst | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/doc/source/configuration/wazuh.rst b/doc/source/configuration/wazuh.rst index 8020d154a..1ba1574b2 100644 --- a/doc/source/configuration/wazuh.rst +++ b/doc/source/configuration/wazuh.rst @@ -2,8 +2,8 @@ Wazuh ===== -Wazuh Manager -============= +Wazuh Manager Host +================== Provision using infra-vms ------------------------- @@ -288,7 +288,7 @@ Encrypt the keys (and remember to commit to git): ``ansible-vault encrypt --vault-password-file ~/vault.pass $KAYOBE_CONFIG_PATH/ansible/wazuh/certificates/certs/*.key`` Verification -============== +------------ The Wazuh portal should be accessible on port 443 of the Wazuh manager’s IPs (using HTTPS, with the root CA cert in ``etc/kayobe/ansible/wazuh/certificates/wazuh-certificates/root-ca.pem``). @@ -300,11 +300,9 @@ Troubleshooting Logs are in ``/var/log/wazuh-indexer/wazuh.log``. There are also logs in the journal. -============ Wazuh agents ============ - Wazuh agent playbook is located in ``etc/kayobe/ansible/wazuh-agent.yml``. Wazuh agent variables file is located in ``etc/kayobe/inventory/group_vars/wazuh-agent/wazuh-agent``. @@ -318,13 +316,13 @@ Deploy the Wazuh agents: ``kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/wazuh-agent.yml`` Verification -============= +------------ The Wazuh agents should register with the Wazuh manager. This can be verified via the agents page in Wazuh Portal. Check CIS benchmark output in agent section. -Additional resources: -===================== +Additional resources +-------------------- For times when you need to upgrade wazuh with elasticsearch to version with opensearch or you just need to deinstall all wazuh components: Wazuh purge script: https://github.com/stackhpc/wazuh-server-purge