-
Notifications
You must be signed in to change notification settings - Fork 0
193 lines (167 loc) · 6.35 KB
/
helm-validation.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
name: Helm Validation
on:
push:
branches:
- '**'
workflow_dispatch:
inputs:
debug_enabled:
type: boolean
description: 'Run the build with tmate debugging enabled'
required: false
default: false
debug_delay_duration_minutes:
type: number
description: 'Duration to delay job completion in minutes'
required: false
default: 5
jobs:
e2e-tests-with-helm:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: "${{ github.repository_owner }}"
password: "${{ github.token }}"
- name: Restore Cached Docker Images
id: cache_docker_images
uses: actions/cache@v4
with:
path: /tmp/.docker_cache
key: docker-${{ runner.os }}-${{ hashFiles('.github/s3_and_iam_deployment/.env') }}
restore-keys: |
docker-${{ runner.os }}-
- name: Set up Helm
uses: azure/[email protected]
with:
version: v3.16.3
- name: Create Kind Cluster
uses: helm/[email protected]
with:
version: v0.21.0
wait: 90s
cluster_name: helm-test-cluster
- name: Verify KIND cluster is running
run: |
kubectl cluster-info
kubectl get nodes
- name: Setup COSI, S3 and IAM environments
run: |
set -e -o pipefail
(
echo "=== Setup COSI Controller, CRDs and Driver ==="
kubectl create -k github.com/kubernetes-sigs/container-object-storage-interface
make container
kind load docker-image ghcr.io/scality/cosi-driver:latest --name helm-test-cluster
) &
(
echo "=== Loading cached S3 and IAM Docker images ==="
if [ -d /tmp/.docker_cache ] && [ "$(ls -A /tmp/.docker_cache 2>/dev/null)" ]; then
for image in /tmp/.docker_cache/*.tar; do
docker load -i "$image" || true # continue on failure
done
else
echo "No cached images found. Skipping load."
fi
) &
# Wait for both background processes
wait
- name: "Debug: SSH to runner"
uses: scality/actions/action-ssh-to-runner@v1
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
detached: true
if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }}
timeout-minutes: 10
continue-on-error: true
- name: Setup IAM and S3 Services
run: |-
set -e -o pipefail;
mkdir -p logs/s3 logs/iam logs/cosi_driver data/vaultdb
chown -R runner:docker logs data
chmod -R ugo+rwx logs data
docker compose --profile iam_s3 up -d --quiet-pull
bash ../scripts/wait_for_local_port.bash 8600 30
bash ../scripts/wait_for_local_port.bash 8000 30
working-directory: .github/s3_and_iam_deployment
- name: Save Images to Cache if not present
if: steps.cache_docker_images.outputs.cache-hit != 'true'
run: |
source .github/s3_and_iam_deployment/.env
echo "Vault Image: $VAULT_IMAGE"
echo "CloudServer Image: $CLOUDSERVER_IMAGE"
mkdir -p /tmp/.docker_cache
docker save "$VAULT_IMAGE" -o /tmp/.docker_cache/vault_image.tar
docker save "$CLOUDSERVER_IMAGE" -o /tmp/.docker_cache/cloudserver_image.tar
shell: bash
- name: Install Scality COSI Driver using Helm Chart
run: |
helm install scality-cosi-driver ./helm/scality-cosi-driver \
--namespace container-object-storage-system \
--create-namespace \
--set image.tag=latest \
--set traces.otel_stdout=true
- name: Print all resources in container-object-storage-system namespace
run: |
kubectl get all -n container-object-storage-system
- name: Verify Helm Installation
run: |
.github/scripts/verify_helm_install.sh
- name: E2E tests for greenfield use case using kustomize
run: |
.github/scripts/e2e_tests_greenfield_use_case.sh
- name: E2E tests for brownfield use case using kustomize
run: |
.github/scripts/e2e_tests_brownfield_use_case.sh
# the script accepts number of requests for APIs: CREATE_BUCKET, DELETE_BUCKET, GET_INFO
# GRANT_ACCESS and REVOKE_ACCESS in order
# Example below we are testing for those API counts:
# - 0 CREATE_BUCKET
# - 0 DELETE_BUCKET
# - 1 GET_INFO
# - 0 GRANT_ACCESS
# - 0 REVOKE_ACCESS
- name: Verify metrics for healthcheck route
run: |
.github/scripts/e2e_tests_metrics.sh 2 1 1 2 2
- name: "Delay completion"
if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }}
uses: scality/actions/[email protected]
with:
completion_delay_m: ${{ inputs.debug_delay_duration_minutes }}
continue-on-error: true
- name: Cleaup IAM and S3 Services
run: docker compose --profile iam_s3 down
working-directory: .github/s3_and_iam_deployment
- name: Move S3 and IAM logs and data to artifacts directory
if: always()
run: |-
set -e -o pipefail;
mkdir -p .github/e2e_tests/artifacts/logs .github/e2e_tests/artifacts/data
cp -r .github/s3_and_iam_deployment/logs/* .github/e2e_tests/artifacts/logs/
cp -r .github/s3_and_iam_deployment/data/* .github/e2e_tests/artifacts/data/
- name: Capture Kubernetes Logs in artifacts directory
if: always()
run: |
.github/scripts/capture_k8s_logs.sh
- name: Cleanup Helm Release and Namespace
run: |
helm uninstall scality-cosi-driver -n container-object-storage-system
kubectl delete namespace container-object-storage-system
if: always()
- name: Upload logs and data to Scality artifacts
if: always()
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: .github/e2e_tests/artifacts