Skip to content

Commit bc0e586

Browse files
committed
ci(e2e): clean values and debug; simplify SDS wiring; minimal helper scripts; HTTPS-only ingress; param cleanup wait\n\n- remove report scripts and local tests task\n- drop unused values (storageProfiles, infra/virtualDisks/security, extra features)\n- add build_parent_kubeconfig.sh + inject_registry_cfg.sh and use them in workflow\n- call task nested:storage:sds directly from workflow; remove wrapper\n- trim logs (no lsblk, no set -x); keep concise status\n- ingress: drop 80/wildcard-http; keep HTTPS passthrough only\n- cleanup: param prefix + wait for namespace deletion
Signed-off-by: Anton Yachmenev <[email protected]>
1 parent b10fc10 commit bc0e586

File tree

12 files changed

+110
-1007
lines changed

12 files changed

+110
-1007
lines changed

.github/workflows/e2e-matrix.yml

Lines changed: 31 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -166,28 +166,10 @@ jobs:
166166
shell: bash
167167
run: |
168168
set -euo pipefail
169-
mkdir -p "$HOME/.kube"
170-
cat > "$HOME/.kube/config" <<EOF
171-
apiVersion: v1
172-
kind: Config
173-
clusters:
174-
- cluster:
175-
server: ${E2E_K8S_URL}
176-
insecure-skip-tls-verify: true
177-
name: parent
178-
contexts:
179-
- context:
180-
cluster: parent
181-
user: sa
182-
name: parent
183-
current-context: parent
184-
users:
185-
- name: sa
186-
user:
187-
token: "${{ secrets.E2E_NESTED_SA_SECRET }}"
188-
EOF
189-
chmod 600 "$HOME/.kube/config"
190-
echo "KUBECONFIG=$HOME/.kube/config" >> "$GITHUB_ENV"
169+
chmod +x ci/dvp-e2e/scripts/build_parent_kubeconfig.sh
170+
KCFG="$HOME/.kube/config"
171+
ci/dvp-e2e/scripts/build_parent_kubeconfig.sh -o "$KCFG" -a "${E2E_K8S_URL}" -t "${{ secrets.E2E_NESTED_SA_SECRET }}"
172+
echo "KUBECONFIG=$KCFG" >> "$GITHUB_ENV"
191173
192174
- name: Prepare run values.yaml
193175
working-directory: ci/dvp-e2e
@@ -222,7 +204,9 @@ jobs:
222204
- name: Inject REGISTRY_DOCKER_CFG into values.yaml
223205
working-directory: ci/dvp-e2e
224206
run: |
225-
yq eval --inplace '.deckhouse.registryDockerCfg = strenv(REGISTRY_DOCKER_CFG)' "${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/values.yaml"
207+
chmod +x scripts/inject_registry_cfg.sh
208+
VALS="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/values.yaml"
209+
REGISTRY_DOCKER_CFG="${REGISTRY_DOCKER_CFG}" scripts/inject_registry_cfg.sh -f "$VALS" -v "$REGISTRY_DOCKER_CFG"
226210
227211
- name: Docker login to Deckhouse registry
228212
uses: docker/login-action@v3
@@ -323,29 +307,14 @@ jobs:
323307
NESTED_KUBECONFIG="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/nested/kubeconfig" \
324308
PARENT_KUBECONFIG="${KUBECONFIG}"
325309
326-
- name: Configure storage classes
310+
- name: Configure SDS in nested cluster
327311
working-directory: ci/dvp-e2e
328312
run: |
329-
echo "💾 Configuring storage classes for profile: sds-replicated-volume -> sds"
330-
task nested:storage:configure \
331-
STORAGE_PROFILE="sds" \
332-
TARGET_STORAGE_CLASS="${{ steps.profile-config.outputs.storage_class }}" \
313+
echo "💾 Configuring SDS storage (sds-node-configurator + sds-replicated-volume)"
314+
task nested:storage:sds \
333315
TMP_DIR="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}" \
334-
VALUES_FILE="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/values.yaml" \
335-
GENERATED_VALUES_FILE="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/generated-values.yaml" \
336-
SSH_DIR="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/ssh" \
337-
SSH_FILE_NAME="id_ed" \
338-
PASSWORD_FILE="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/password.txt" \
339-
PASSWORD_HASH_FILE="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/password-hash.txt" \
340-
NAMESPACE="${{ env.RUN_ID }}" \
341-
DOMAIN="" \
342-
DEFAULT_USER="ubuntu" \
343-
NESTED_DIR="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/nested" \
344-
NESTED_KUBECONFIG="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/nested/kubeconfig"
345-
346-
# Ingress smoke disabled: not required for storage config
347-
348-
# Ceph CSI smoke check removed per request
316+
NESTED_KUBECONFIG="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/nested/kubeconfig" \
317+
SDS_SC_NAME="${{ steps.profile-config.outputs.storage_class }}"
349318
350319
- name: Upload run context
351320
if: always()
@@ -378,35 +347,29 @@ jobs:
378347
shell: bash
379348
run: |
380349
set -euo pipefail
381-
mkdir -p "$HOME/.kube"
382-
cat > "$HOME/.kube/config" <<EOF
383-
apiVersion: v1
384-
kind: Config
385-
clusters:
386-
- cluster:
387-
server: ${E2E_K8S_URL}
388-
insecure-skip-tls-verify: true
389-
name: parent
390-
contexts:
391-
- context:
392-
cluster: parent
393-
user: sa
394-
name: parent
395-
current-context: parent
396-
users:
397-
- name: sa
398-
user:
399-
token: "${{ secrets.E2E_NESTED_SA_SECRET }}"
400-
EOF
401-
chmod 600 "$HOME/.kube/config"
402-
echo "KUBECONFIG=$HOME/.kube/config" >> "$GITHUB_ENV"
350+
chmod +x ci/dvp-e2e/scripts/build_parent_kubeconfig.sh
351+
KCFG="$HOME/.kube/config"
352+
ci/dvp-e2e/scripts/build_parent_kubeconfig.sh -o "$KCFG" -a "${E2E_K8S_URL}" -t "${{ secrets.E2E_NESTED_SA_SECRET }}"
353+
echo "KUBECONFIG=$KCFG" >> "$GITHUB_ENV"
403354
404355
- name: Cleanup test namespaces
356+
env:
357+
CLEANUP_PREFIX: ${{ vars.CLEANUP_PREFIX || 'nightly-nested-e2e-' }}
405358
run: |
406359
set -euo pipefail
407-
echo "🧹 Cleaning up namespaces matching 'nightly-nested-e2e-*'"
408-
kubectl get ns -o name | grep "namespace/nightly-nested-e2e-" | cut -d/ -f2 | \
409-
xargs -r kubectl delete ns --wait=false || echo "[INFO] No namespaces to delete"
360+
echo "🧹 Cleaning namespaces with prefix '${CLEANUP_PREFIX}'"
361+
ns_list=$(kubectl get ns -o json | jq -r --arg p "$CLEANUP_PREFIX" '.items[].metadata.name | select(startswith($p))')
362+
if [ -z "$ns_list" ]; then
363+
echo "[INFO] No namespaces to delete"; exit 0
364+
fi
365+
for ns in $ns_list; do
366+
echo "[CLEANUP] Deleting namespace $ns ..."
367+
kubectl delete ns "$ns" --wait=false || true
368+
done
369+
echo "[CLEANUP] Waiting for namespaces to be deleted..."
370+
for ns in $ns_list; do
371+
kubectl wait --for=delete ns/"$ns" --timeout=600s || echo "[WARN] Namespace $ns was not fully deleted within timeout"
372+
done
410373
411374
- name: Report cleanup results
412375
if: always()

ci/dvp-e2e/Taskfile.yaml

Lines changed: 4 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -232,8 +232,6 @@ tasks:
232232
cmds:
233233
- |
234234
set -euo pipefail
235-
# Enable shell tracing when DEBUG_HOTPLUG is set
236-
[ -n "${DEBUG_HOTPLUG:-}" ] && set -x || true
237235
echo "[INFRA] Attaching {{ .DISK_COUNT }} storage disks to worker VMs using hotplug in namespace {{ .NAMESPACE }}"
238236
239237
# Wait for worker VMs
@@ -368,45 +366,18 @@ tasks:
368366
fi
369367
sleep 5
370368
371-
# Minimal periodic debug snapshot approximately every 60 seconds
372-
if [ $((i % 12)) -eq 0 ]; then
373-
echo "[DEBUG] VMBDA $vd summary:"
374-
kubectl -n {{ .NAMESPACE }} get virtualmachineblockdeviceattachment "$vd" -o json \
375-
| jq -r '{phase: .status.phase, conditions: (.status.conditions // []) | map({type, status, reason, message})}' || true
376-
echo "[DEBUG] VM $vm block devices (summary):"
377-
kubectl -n {{ .NAMESPACE }} get vm "$vm" -o json \
378-
| jq -r '{phase: .status.phase, blockDeviceRefs: (.status.blockDeviceRefs // []) | map({name, virtualMachineBlockDeviceAttachmentName, attached, hotplugged})}' || true
379-
fi
380-
done
369+
done
381370
382371
if [ "$phase" != "Attached" ] && [ "${success_by_vm:-0}" -ne 1 ]; then
383372
echo "[ERROR] Disk $vd failed to attach to VM $vm within timeout" >&2
384-
echo "[DEBUG] Final VMBDA summary:"
385-
kubectl -n {{ .NAMESPACE }} get virtualmachineblockdeviceattachment "$vd" -o json \
386-
| jq -r '{phase: .status.phase, conditions: (.status.conditions // []) | map({type, status, reason, message})}' || true
387-
echo "[DEBUG] VM $vm block devices (summary):"
388-
kubectl -n {{ .NAMESPACE }} get vm "$vm" -o json \
389-
| jq -r '{phase: .status.phase, blockDeviceRefs: (.status.blockDeviceRefs // []) | map({name, virtualMachineBlockDeviceAttachmentName, attached, hotplugged})}' || true
373+
# final debug snapshots removed
390374
exit 1
391375
fi
392376
done
393377
394378
echo "[INFRA] VM $vm configured with hotplug disks"
395379
396-
# Optional on-node lsblk debug snapshot (requires d8 and SSH key). Always sudo for block devices visibility.
397-
if command -v d8 >/dev/null 2>&1; then
398-
echo "[DEBUG] Collecting lsblk from VM $vm..."
399-
if ! d8 v ssh --username='{{ .DEFAULT_USER }}' \
400-
--identity-file='{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}' \
401-
--local-ssh=true \
402-
--local-ssh-opts='-o StrictHostKeyChecking=no' \
403-
--local-ssh-opts='-o UserKnownHostsFile=/dev/null' \
404-
"${vm}.{{ .NAMESPACE }}" -c "sudo lsblk -o NAME,KNAME,TYPE,SIZE,MODEL,TRAN,FSTYPE,MOUNTPOINT -p"; then
405-
echo "[WARN] lsblk collection failed for $vm (SSH)" >&2
406-
fi
407-
else
408-
echo "[WARN] 'd8' CLI not found, skipping lsblk collection for $vm" >&2
409-
fi
380+
410381
done
411382
412383
echo "[INFRA] All worker VMs configured with storage disks via hotplug"
@@ -711,26 +682,7 @@ tasks:
711682
task dhctl-bootstrap VALUES_FILE='{{ .VALUES_FILE }}' TMP_DIR='{{ .TMP_DIR }}' SSH_FILE_NAME='id_ed'
712683
} 2>&1 | tee '{{ .LOG_FILE }}'
713684
714-
local:tests:
715-
desc: Local flow — prepare nested kubeconfig and run E2E (logs saved)
716-
vars:
717-
RUN_ID: '{{ .RUN_ID | default (printf "local-%s" (now | date "20060102-150405")) }}'
718-
RUN_NAMESPACE: '{{ .RUN_NAMESPACE | default (printf "dvp-e2e-local-%s" .RUN_ID) }}'
719-
TMP_DIR: '{{ .TMP_DIR | default (printf "%s/runs/%s" .TMP_ROOT .RUN_ID) }}'
720-
LOG_FILE: '{{ .LOG_FILE | default (printf "%s/%s" .TMP_DIR "tests.log") }}'
721-
E2E_DIR: '{{ .E2E_DIR | default (env "E2E_DIR") | default "../../tests/e2e" }}'
722-
NESTED_SC: '{{ .NESTED_SC | default "ceph-pool-r2-csi-rbd-immediate" }}'
723-
cmds:
724-
- mkdir -p {{ .TMP_DIR }}
725-
- |
726-
set -euo pipefail
727-
{
728-
task nested:kubeconfig NAMESPACE='{{ .RUN_NAMESPACE }}' TMP_DIR='{{ .TMP_DIR }}'
729-
task nested:storage:sds NESTED_KUBECONFIG='{{ .TMP_DIR }}/nested-{{ .RUN_NAMESPACE }}/kubeconfig' SDS_SC_NAME='{{ .NESTED_SC }}'
730-
task nested:ensure-sc NAMESPACE='{{ .RUN_NAMESPACE }}' TMP_DIR='{{ .TMP_DIR }}' SC_NAME='{{ .NESTED_SC }}'
731-
task nested:ensure-vmclass-default NESTED_KUBECONFIG='{{ .TMP_DIR }}/nested-{{ .RUN_NAMESPACE }}/kubeconfig'
732-
task nested:e2e NAMESPACE='{{ .RUN_NAMESPACE }}' TMP_DIR='{{ .TMP_DIR }}' E2E_DIR='{{ .E2E_DIR }}'
733-
} 2>&1 | tee '{{ .LOG_FILE }}'
685+
734686

735687
# ------------------------------------------------------------
736688
# Nested cluster helpers (SC + kubeconfig)
@@ -851,33 +803,6 @@ tasks:
851803
sleep 10
852804
done
853805
854-
nested:storage:configure:
855-
desc: Configure SDS storage profile inside nested cluster
856-
vars:
857-
STORAGE_PROFILE: '{{ .STORAGE_PROFILE | default "sds" }}'
858-
NESTED_KUBECONFIG: "{{ .NESTED_KUBECONFIG }}"
859-
TARGET_STORAGE_CLASS: "{{ .TARGET_STORAGE_CLASS }}"
860-
STORAGE_PROFILE_NORMALIZED:
861-
sh: |
862-
case '{{ .STORAGE_PROFILE }}' in
863-
sds|sds-local|sds_local|sds-replicated|sds_replicated) echo sds ;;
864-
*) echo '{{ .STORAGE_PROFILE }}' ;;
865-
esac
866-
cmds:
867-
- cmd: 'echo "[STORAGE] normalized profile = {{ .STORAGE_PROFILE_NORMALIZED }}"'
868-
- |
869-
set -euo pipefail
870-
if [ '{{ .STORAGE_PROFILE_NORMALIZED }}' != "sds" ]; then
871-
echo "[ERR] Only SDS storage profile is supported. Got: {{ .STORAGE_PROFILE_NORMALIZED }}" >&2
872-
exit 1
873-
fi
874-
- |
875-
echo "[SDS] Configuring SDS storage..."
876-
- |
877-
task nested:storage:sds \
878-
NESTED_KUBECONFIG='{{ .NESTED_KUBECONFIG }}' \
879-
SDS_SC_NAME='{{ .TARGET_STORAGE_CLASS }}'
880-
881806
nested:storage:sds:
882807
desc: Configure SDS storage profile in nested cluster
883808
vars:

ci/dvp-e2e/charts/cluster-config/templates/virtualization.yaml

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
---
2-
{{- if hasKey .Values "features" }}
3-
{{- if .Values.features.virtualization }}
2+
{{- if and (hasKey .Values "features") (.Values.features.virtualization) }}
43
apiVersion: deckhouse.io/v1alpha1
54
kind: ModuleConfig
65
metadata:
@@ -25,4 +24,3 @@ spec:
2524
imageTag: {{ .Values.virtualization.tag }}
2625
scanInterval: 15s
2726
{{- end }}
28-
{{- end }}

ci/dvp-e2e/charts/cluster-config/values.yaml

Lines changed: 1 addition & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -46,32 +46,8 @@ storageClasses:
4646
workers:
4747
root: ceph-pool-r2-csi-rbd-immediate
4848

49-
# Infrastructure components
50-
infra:
51-
nfs:
52-
storageClass: nfs-4-1-wffc
53-
dvcr:
54-
storageClass: ceph-pool-r2-csi-rbd-immediate
55-
56-
# Virtual disks configuration
57-
virtualDisks:
58-
os:
59-
storageClass: ceph-pool-r2-csi-rbd-immediate
60-
data:
61-
storageClass: nfs-4-1-wffc
62-
63-
# Security settings
64-
security:
65-
admissionPolicyEngine:
66-
enabled: true
67-
networkPolicies:
68-
enabled: true
69-
70-
# Feature flags
49+
# Feature flags (only those used by templates)
7150
features:
7251
virtualization: true
73-
monitoring: true
74-
logging: true
75-
ingress: true
7652
nfs:
7753
enabled: false
Lines changed: 0 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,3 @@
1-
---
2-
apiVersion: v1
3-
kind: Service
4-
metadata:
5-
name: dvp-over-dvp-80
6-
namespace: {{ .Values.namespace }}
7-
spec:
8-
ports:
9-
- port: 80
10-
targetPort: 80
11-
protocol: TCP
12-
name: http
13-
selector:
14-
dvp.deckhouse.io/node-group: master
15-
---
161
apiVersion: v1
172
kind: Service
183
metadata:
@@ -50,25 +35,3 @@ spec:
5035
port:
5136
number: 443
5237
{{- end }}
53-
---
54-
apiVersion: networking.k8s.io/v1
55-
kind: Ingress
56-
metadata:
57-
name: wildcard-http
58-
namespace: {{ .Values.namespace }}
59-
annotations:
60-
nginx.ingress.kubernetes.io/ssl-redirect: "false"
61-
nginx.ingress.kubernetes.io/rewrite-target: /
62-
spec:
63-
ingressClassName: nginx
64-
rules:
65-
- host: "*.{{ .Values.namespace }}.{{ .Values.domain }}"
66-
http:
67-
paths:
68-
- path: /
69-
pathType: Prefix
70-
backend:
71-
service:
72-
name: dvp-over-dvp-80
73-
port:
74-
number: 80

0 commit comments

Comments
 (0)