Skip to content

Commit

Permalink
CI: Use the debug stdout callback instead of manual debug
Browse files Browse the repository at this point in the history
This display in a readable (by humans) way the result of most tasks, and
should be way more readable that what we have now, which is frequently a
bunch of unreadable json.

+ some small fixes (using delegated_to instead of when
  <control_plane_host>)
  • Loading branch information
VannTen committed Dec 13, 2024
1 parent 12ed1fc commit a669a07
Show file tree
Hide file tree
Showing 6 changed files with 68 additions and 151 deletions.
1 change: 1 addition & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ variables:
FAILFASTCI_NAMESPACE: 'kargo-ci'
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
ANSIBLE_FORCE_COLOR: "true"
ANSIBLE_STDOUT_CALLBACK: "debug"
MAGIC: "ci check this"
GS_ACCESS_KEY_ID: $GS_KEY
GS_SECRET_ACCESS_KEY: $GS_SECRET
Expand Down
3 changes: 0 additions & 3 deletions tests/testcases/010_check-apiserver.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,6 @@
delay: 5
until: apiserver_response is success

- debug: # noqa name[missing]
msg: "{{ apiserver_response.json }}"

- name: Check API servers version
assert:
that:
Expand Down
3 changes: 0 additions & 3 deletions tests/testcases/015_check-nodes-ready.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,6 @@
changed_when: false
register: get_nodes

- debug: # noqa name[missing]
msg: "{{ get_nodes.stdout.split('\n') }}"

- name: Check that all nodes are running and ready
command: "{{ bin_dir }}/kubectl get nodes --no-headers -o yaml"
changed_when: false
Expand Down
10 changes: 0 additions & 10 deletions tests/testcases/020_check-pods-running.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,6 @@
- name: Check kubectl output
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
changed_when: false
register: get_pods

- debug: # noqa name[missing]
msg: "{{ get_pods.stdout.split('\n') }}"

- name: Check that all pods are running and ready
command: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml"
Expand All @@ -35,13 +31,7 @@
- '(run_pods_log.stdout | from_yaml)["items"] | map(attribute = "status.containerStatuses") | map("map", attribute = "ready") | map("min") | min'
retries: 30
delay: 10
failed_when: false

- name: Check kubectl output
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
changed_when: false
register: get_pods

- debug: # noqa name[missing]
msg: "{{ get_pods.stdout.split('\n') }}"
failed_when: not run_pods_log is success
66 changes: 20 additions & 46 deletions tests/testcases/030_check-network.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,6 @@
register: csr_json
changed_when: false

- debug: # noqa name[missing]
var: csrs

- name: Check there are csrs
assert:
that: csrs | length > 0
Expand Down Expand Up @@ -67,10 +64,6 @@
when: get_csr.stdout_lines | length > 0
changed_when: certificate_approve.stdout

- debug: # noqa name[missing]
msg: "{{ certificate_approve.stdout.split('\n') }}"


- name: Create test namespace
command: "{{ bin_dir }}/kubectl create namespace test"
changed_when: false
Expand Down Expand Up @@ -107,52 +100,33 @@
type: RuntimeDefault
changed_when: false

- import_role: # noqa name[missing]
name: cluster-dump

- name: Check that all pods are running and ready
block:
- name: Check Deployment is ready
command: "{{ bin_dir }}/kubectl rollout status deploy --namespace test agnhost --timeout=180"
command: "{{ bin_dir }}/kubectl rollout status deploy --namespace test agnhost --timeout=180s"
changed_when: false
rescue:
- name: Get pod names
command: "{{ bin_dir }}/kubectl get pods -n test -o json"
changed_when: false
register: pods

- name: Get running pods
command: "{{ bin_dir }}/kubectl get pods -n test -o
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
changed_when: false
register: running_pods

- name: Check kubectl output
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
changed_when: false
register: get_pods

- debug: # noqa name[missing]
msg: "{{ get_pods.stdout.split('\n') }}"
- name: Check pods IP are in correct network
assert:
that: (pods.stdout | from_json)['items']
| selectattr('status.phase', '==', 'Running') |
| selectattr('status.podIP', 'ansible.utils.in_network', kube_pods_subnet)
| length == 2

- name: Curl between pods is working
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- curl {{ item[1] }}:8080"
with_nested:
- "{{ pod_names }}"
- "{{ pod_ips }}"
rescue:
- name: List pods cluster-wide
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
changed_when: false

- name: Set networking facts
set_fact:
kube_pods_subnet: 10.233.64.0/18
pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute='metadata.name') | list }}"
pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute='status.podIP') | list }}"
pods_running: |
{% set list = running_pods.stdout.split(" ") %}
{{ list }}
- name: Check pods IP are in correct network
assert:
that: item | ansible.utils.ipaddr(kube_pods_subnet)
when:
- item in pods_running
with_items: "{{ pod_ips }}"

- name: Curl between pods is working
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- curl {{ item[1] }}:8080"
with_nested:
- "{{ pod_names }}"
- "{{ pod_ips }}"
- import_role: # noqa name[missing]
name: cluster-dump
- fail: # noqa name[missing]
136 changes: 47 additions & 89 deletions tests/testcases/040_check-network-adv.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,97 +54,53 @@
- netchecker-agent-hostnet
when: not pods_json is success

- debug: # noqa name[missing]
var: nca_pod.stdout_lines
when: inventory_hostname == groups['kube_control_plane'][0]

- name: Get netchecker agents
uri:
url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/"
return_content: true
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
register: agents
retries: 18
delay: "{{ agent_report_interval }}"
until: agents.content | length > 0 and
agents.content[0] == '{' and
agents.content | from_json | length >= groups['k8s_cluster'] | intersect(ansible_play_hosts) | length * 2
failed_when: false

- name: Check netchecker status
uri:
url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check"
status_code: 200
return_content: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
register: connectivity_check
retries: 3
delay: "{{ agent_report_interval }}"
until: connectivity_check.content | length > 0 and
connectivity_check.content[0] == '{'
failed_when: false
when:
- agents.content != '{}'

- debug: # noqa name[missing]
var: pods_json
run_once: true

- name: Get kube-proxy logs
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not connectivity_check is success

- name: Get logs from other apps
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not connectivity_check is success
with_items:
- kube-router
- flannel
- canal-node
- calico-node
- cilium

- name: Parse agents list
set_fact:
agents_check_result: "{{ agents.content | from_json }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
when:
- agents is success
- agents.content is defined
- agents.content[0] == '{'

- debug: # noqa name[missing]
var: agents_check_result
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
when:
- agents_check_result is defined

- name: Parse connectivity check
set_fact:
connectivity_check_result: "{{ connectivity_check.content | from_json }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Perform netchecker tests
run_once: true
when:
- connectivity_check is success
- connectivity_check.content is defined
- connectivity_check.content[0] == '{'

- debug: # noqa name[missing]
var: connectivity_check_result
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
when:
- connectivity_check_result is defined
block:
- name: Get netchecker agents
uri:
url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/"
return_content: true
register: agents
retries: 18
delay: "{{ agent_report_interval }}"
until: agents.content | length > 0 and
agents.content[0] == '{' and
agents.content | from_json | length >= groups['k8s_cluster'] | intersect(ansible_play_hosts) | length * 2

- name: Check netchecker status
uri:
url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check"
status_code: 200
return_content: true
register: connectivity_check
retries: 3
delay: "{{ agent_report_interval }}"
until: connectivity_check.content | length > 0 and
connectivity_check.content[0] == '{'

rescue:
- name: Get kube-proxy logs
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"

- name: Get logs from other apps
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
with_items:
- kube-router
- flannel
- canal-node
- calico-node
- cilium

- name: Netchecker tests failed
fail:
msg: "netchecker tests failed"

- name: Check connectivity with all netchecker agents
vars:
connectivity_check_result: "{{ connectivity_check.content | from_json }}"
agents_check_result: "{{ agents_check.content | from_json }}"
assert:
that:
- agents_check_result is defined
Expand Down Expand Up @@ -193,8 +149,9 @@
- name: samplepod
command: ["/bin/bash", "-c", "sleep 2000000000000"]
image: dougbtv/centos-network
delegate_to: groups['kube_control_plane'][0]
run_once: true
when:
- inventory_hostname == groups['kube_control_plane'][0]
- kube_network_plugin_multus | default(false) | bool

- name: Check secondary macvlan interface
Expand All @@ -203,6 +160,7 @@
until: output.rc == 0
retries: 90
changed_when: false
delegate_to: groups['kube_control_plane'][0]
run_once: true
when:
- inventory_hostname == groups['kube_control_plane'][0]
- kube_network_plugin_multus | default(false) | bool

0 comments on commit a669a07

Please sign in to comment.