diff --git a/doc/source/configuration/release-train.rst b/doc/source/configuration/release-train.rst index bcb83d545..7bceaf157 100644 --- a/doc/source/configuration/release-train.rst +++ b/doc/source/configuration/release-train.rst @@ -192,6 +192,16 @@ promoted to production: kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/pulp-repo-promote-production.yml +Synchronising all Kolla container images can take a long time. A limited list +of images can be synchronised using the ``stackhpc_pulp_images_kolla_filter`` +variable, which accepts a whitespace-separated list of regular expressions +matching Kolla image names. Usage is similar to ``kolla-build`` CLI arguments. +For example: + +.. code-block:: console + + kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/pulp-container-sync.yml -e stackhpc_pulp_images_kolla_filter='"^glance nova-compute$"' + Initial seed deployment ----------------------- diff --git a/doc/source/configuration/wazuh.rst b/doc/source/configuration/wazuh.rst index ee8999339..ef6216580 100644 --- a/doc/source/configuration/wazuh.rst +++ b/doc/source/configuration/wazuh.rst @@ -17,8 +17,8 @@ The short version #. Deploy the Wazuh agents: ``kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/wazuh-agent.yml`` -Wazuh Manager -============= +Wazuh Manager Host +================== Provision using infra-vms ------------------------- @@ -303,7 +303,7 @@ Encrypt the keys (and remember to commit to git): ``ansible-vault encrypt --vault-password-file ~/vault.pass $KAYOBE_CONFIG_PATH/ansible/wazuh/certificates/certs/*.key`` Verification -============== +------------ The Wazuh portal should be accessible on port 443 of the Wazuh manager’s IPs (using HTTPS, with the root CA cert in ``etc/kayobe/ansible/wazuh/certificates/wazuh-certificates/root-ca.pem``). @@ -315,11 +315,9 @@ Troubleshooting Logs are in ``/var/log/wazuh-indexer/wazuh.log``. There are also logs in the journal. -============ Wazuh agents ============ - Wazuh agent playbook is located in ``etc/kayobe/ansible/wazuh-agent.yml``. Wazuh agent variables file is located in ``etc/kayobe/inventory/group_vars/wazuh-agent/wazuh-agent``. @@ -333,13 +331,13 @@ Deploy the Wazuh agents: ``kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/wazuh-agent.yml`` Verification -============= +------------ The Wazuh agents should register with the Wazuh manager. This can be verified via the agents page in Wazuh Portal. Check CIS benchmark output in agent section. -Additional resources: -===================== +Additional resources +-------------------- For times when you need to upgrade wazuh with elasticsearch to version with opensearch or you just need to deinstall all wazuh components: Wazuh purge script: https://github.com/stackhpc/wazuh-server-purge diff --git a/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml b/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml new file mode 100644 index 000000000..20542df88 --- /dev/null +++ b/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml @@ -0,0 +1,69 @@ +--- +# Sometimes, typically after restarting OVN services, the priorities of entries +# in the ha_chassis and gateway_chassis tables in the OVN northbound database +# can become misaligned. This results in broken routing for external (bare +# metal/SR-IOV) ports. + +# This playbook can be used to fix the issue by realigning the priorities of +# the table entries. It does so by assigning the highest priority to the +# "first" (sorted alphabetically) OVN NB DB host. This results in all gateways +# being scheduled to a single host, but is less complicated than trying to +# balance them (and it's also not clear to me how to map between individual +# ha_chassis and gateway_chassis entries). + +# The playbook can be run as follows: +# kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/ovn-fix-chassis-priorities.yml + +# If the 'controllers' group does not align with the group used to deploy the +# OVN NB DB, this can be overridden by passing the following: +# '-e ovn_nb_db_group=some_other_group' + +- name: Find OVN DB DB Leader + hosts: "{{ ovn_nb_db_group | default('controllers') }}" + tasks: + - name: Find the OVN NB DB leader + command: docker exec -it ovn_nb_db ovn-nbctl get-connection + changed_when: false + failed_when: false + register: ovn_check_result + check_mode: no + + - name: Group hosts by leader/follower role + group_by: + key: "ovn_nb_{{ 'leader' if ovn_check_result.rc == 0 else 'follower' }}" + changed_when: false + + - name: Assert one leader exists + assert: + that: + - groups['ovn_nb_leader'] | default([]) | length == 1 + +- name: Fix OVN chassis priorities + hosts: ovn_nb_leader + vars: + ovn_nb_db_group: controllers + ovn_nb_db_hosts_sorted: "{{ query('inventory_hostnames', ovn_nb_db_group) | sort | list }}" + ha_chassis_max_priority: 32767 + gateway_chassis_max_priority: "{{ ovn_nb_db_hosts_sorted | length }}" + tasks: + - name: Fix ha_chassis priorities + command: >- + docker exec -it ovn_nb_db + bash -c ' + ovn-nbctl find ha_chassis chassis_name={{ item }} | + awk '\''$1 == "_uuid" { print $3 }'\'' | + while read uuid; do ovn-nbctl set ha_chassis $uuid priority={{ priority }}; done' + loop: "{{ ovn_nb_db_hosts_sorted }}" + vars: + priority: "{{ ha_chassis_max_priority | int - ovn_nb_db_hosts_sorted.index(item) }}" + + - name: Fix gateway_chassis priorities + command: >- + docker exec -it ovn_nb_db + bash -c ' + ovn-nbctl find gateway_chassis chassis_name={{ item }} | + awk '\''$1 == "_uuid" { print $3 }'\'' | + while read uuid; do ovn-nbctl set gateway_chassis $uuid priority={{ priority }}; done' + loop: "{{ ovn_nb_db_hosts_sorted }}" + vars: + priority: "{{ gateway_chassis_max_priority | int - ovn_nb_db_hosts_sorted.index(item) }}"