Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
"ghcr.io/devcontainers/features/docker-in-docker": {},
"ghcr.io/devcontainers/features/github-cli:1": {},
"ghcr.io/devcontainers/features/node:1": {},
"ghcr.io/devcontainers/features/python:1": {},
"ghcr.io/devcontainers/features/sshd:1": {},
"ghcr.io/devcontainers-extra/features/kind:1": {},
"ghcr.io/devcontainers/features/aws-cli:1": {},
Expand Down
32 changes: 30 additions & 2 deletions .devcontainer/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,19 @@
set -e

env_variables=$(yq eval '.env | to_entries | .[] | .key + "=" + .value' .github/workflows/end2end.yaml | sed 's/\${{[^}]*}}//g') && export $env_variables
export GIT_ACCESS_TOKEN=${GITHUB_TOKEN}

# In CI, GIT_ACCESS_TOKEN comes from a GitHub App token.
# Locally, we need the user to provide one (usually via GITHUB_TOKEN).
if [[ -z "${GIT_ACCESS_TOKEN:-}" ]]; then
export GIT_ACCESS_TOKEN="${GITHUB_TOKEN:-}"
fi

if [[ -z "${GIT_ACCESS_TOKEN:-}" ]]; then
echo "ERROR: Missing GitHub token. Set GITHUB_TOKEN (or GIT_ACCESS_TOKEN) with access to scality/zenko-operator." >&2
echo "Example: export GITHUB_TOKEN=ghp_***" >&2
exit 1
fi

export E2E_IMAGE_TAG=latest

# Disable GCP tests as we don't have credentials setup in devcontainer
Expand All @@ -21,11 +33,27 @@ for i in $(seq 0 $array_length); do
#step=$(yq ".runs.steps[$i]" .github/actions/deploy/action.yaml)
working_dir=$(yq ".runs.steps[$i].working-directory" .github/actions/deploy/action.yaml)
run_command=$(yq ".runs.steps[$i].run" .github/actions/deploy/action.yaml)
step_if=$(yq ".runs.steps[$i].if" .github/actions/deploy/action.yaml)

# We don't want to run `run-e2e-test.sh` because it is used for linting here, user will run it manually if needed after deployment
# We can't run `configure-e2e.sh` here because it needs an image that is not yet built and sent to kind, will be run after
(
if [[ "$run_command" != "null" && "$run_command" != *"configure-e2e.sh"* && "$run_command" != *"run-e2e-test.sh"* ]]; then
should_run=true

# Best-effort support for composite action `if:` (CI evaluates these, local runner must emulate).
if [[ "$step_if" != "null" ]]; then
# Only conditional step in the deploy action today.
if [[ "$step_if" == *"inputs.deploy_metadata"* ]]; then
if [[ "${GITHUB_INPUTS_deploy_metadata:-false}" != "true" ]]; then
should_run=false
fi
else
echo "Skipping step with unsupported condition: $step_if"
should_run=false
fi
fi

if [[ "$should_run" == "true" && "$run_command" != "null" && "$run_command" != *"configure-e2e.sh"* && "$run_command" != *"run-e2e-test.sh"* ]]; then
# Inject env 'generated' from previous steps
source "$GITHUB_ENV"

Expand Down
3 changes: 3 additions & 0 deletions .github/scripts/end2end/configure-e2e-ctst.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@ set -exu

# Setup test environment variables
export ZENKO_NAME=${1:-"end2end"}
export NAMESPACE=${NAMESPACE:-default}
echo "=== Running configure-e2e-ctst.sh (ZENKO_NAME=${ZENKO_NAME}, NAMESPACE=${NAMESPACE}) ==="

# Getting kafka host from backbeat's config
KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=end2end \
-o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq .kafka.hosts)
Expand Down
29 changes: 27 additions & 2 deletions .github/scripts/end2end/configure-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ ZENKO_NAME=${1:-end2end}
E2E_IMAGE=${2:-ghcr.io/scality/zenko/zenko-e2e:latest}
NAMESPACE=${3:-default}

echo "=== Running configure-e2e.sh (ZENKO_NAME=${ZENKO_NAME}, E2E_IMAGE=${E2E_IMAGE}, NAMESPACE=${NAMESPACE}) ==="

SERVICE_ACCOUNT="${ZENKO_NAME}-config"
POD_NAME="${ZENKO_NAME}-config"
MANAGEMENT_ENDPOINT="http://${ZENKO_NAME}-management-orbit-api:5001"
Expand Down Expand Up @@ -52,10 +54,33 @@ KAFKA_REGISTRY_NAME=$(yq eval ".kafka.sourceRegistry" ../../../solution/deps.yam
KAFKA_IMAGE_NAME=$(yq eval ".kafka.image" ../../../solution/deps.yaml)
KAFKA_IMAGE_TAG=$(yq eval ".kafka.tag" ../../../solution/deps.yaml)
KAFKA_IMAGE=$KAFKA_REGISTRY_NAME/$KAFKA_IMAGE_NAME:$KAFKA_IMAGE_TAG
KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=end2end \
-o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq .kafka.hosts)

BACKBEAT_CONFIG_SELECTOR="app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=${ZENKO_NAME}"

# backbeat-config is produced by the operator during reconciliation; wait for it.
for i in $(seq 1 120); do
SECRET_NAME=$(kubectl -n ${NAMESPACE} get secret -l "${BACKBEAT_CONFIG_SELECTOR}" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || true)
if [ -n "${SECRET_NAME}" ]; then
break
fi
sleep 5
done

if [ -z "${SECRET_NAME:-}" ]; then
echo "Timed out waiting for backbeat-config secret (selector: ${BACKBEAT_CONFIG_SELECTOR}) in namespace ${NAMESPACE}" >&2
kubectl -n ${NAMESPACE} get secret -l "${BACKBEAT_CONFIG_SELECTOR}" -o yaml || true
exit 1
fi

KAFKA_HOST_PORT=$(kubectl -n ${NAMESPACE} get secret -l "${BACKBEAT_CONFIG_SELECTOR}" \
-o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq .kafka.hosts)
KAFKA_HOST_PORT=${KAFKA_HOST_PORT:1:-1}

if [ -z "${KAFKA_HOST_PORT}" ]; then
echo "Kafka bootstrap address is empty (from secret ${SECRET_NAME} in namespace ${NAMESPACE})" >&2
exit 1
fi

# Creating replication/transition and notification topics in kafka
kubectl run kafka-topics \
--image=$KAFKA_IMAGE \
Expand Down
143 changes: 137 additions & 6 deletions .github/scripts/end2end/fix-zookeeper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,14 @@ ZK_STS_NAME="${ZENKO_NAME}-base-quorum"
ZK_CONTAINER_NAME="zookeeper"
ZK_POD_NAME="${ZK_STS_NAME}-0"

# Prefer JAVA_TOOL_OPTIONS because the JVM always honors it, regardless of how the entrypoint scripts are wired.
# Keep JVMFLAGS as well for ZooKeeper scripts that may use it.
ZK_JAVA_TOOL_OPTIONS="-XX:-UseContainerSupport -XX:ActiveProcessorCount=1"
ZK_JVMFLAGS="-Xmx512m -Xms512m ${ZK_JAVA_TOOL_OPTIONS} -Djava.awt.headless=true -Dzookeeper.log.dir=/data/logs -Dzookeeper.root.logger=INFO,CONSOLE -Dlog4j.configuration=file:/data/conf/log4j.properties"

# Name of the Pravega zookeeper-operator deployment. Can be overridden; otherwise auto-detected.
ZK_OPERATOR_DEPLOYMENT="${ZK_OPERATOR_DEPLOYMENT:-}"

OPERATOR_WAIT_TIMEOUT=120
STATEFULSET_WAIT_TIMEOUT=180

Expand All @@ -21,6 +29,40 @@ get_elapsed() {
echo $(($(date +%s) - start_time))
}

has_zk_flags_on_sts_template() {
kubectl -n "${NAMESPACE}" get statefulset "${ZK_STS_NAME}" \
-o jsonpath='{.spec.template.spec.containers[?(@.name=="'"${ZK_CONTAINER_NAME}"'")].env[?(@.name=="JAVA_TOOL_OPTIONS")].value}{"\n"}' 2>/dev/null \
| grep -q -- "-XX:-UseContainerSupport"
}

has_zk_flags_on_pod() {
kubectl -n "${NAMESPACE}" get pod "${ZK_POD_NAME}" \
-o jsonpath='{.spec.containers[?(@.name=="'"${ZK_CONTAINER_NAME}"'")].env[?(@.name=="JAVA_TOOL_OPTIONS")].value}{"\n"}' 2>/dev/null \
| grep -q -- "-XX:-UseContainerSupport"
}

detect_zk_operator_deployment() {
if [ -n "${ZK_OPERATOR_DEPLOYMENT}" ]; then
return 0
fi

# Try the common deployment name first
if kubectl -n "${NAMESPACE}" get deploy zk-operator-zookeeper-operator > /dev/null 2>&1; then
ZK_OPERATOR_DEPLOYMENT="zk-operator-zookeeper-operator"
return 0
fi

# Fallback: best-effort name match
ZK_OPERATOR_DEPLOYMENT=$(kubectl -n "${NAMESPACE}" get deploy -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' 2>/dev/null \
| grep -E 'zookeeper-operator|zk-operator' | head -n 1 || true)

if [ -z "${ZK_OPERATOR_DEPLOYMENT}" ]; then
echo "ERROR: Could not detect zookeeper-operator deployment in namespace ${NAMESPACE}." >&2
echo "Hint: run 'kubectl -n ${NAMESPACE} get deploy' and set ZK_OPERATOR_DEPLOYMENT explicitly." >&2
exit 1
fi
}

# Wait for the Zenko operator to process the CR
OPERATOR_WAIT_START=$(date +%s)

Expand Down Expand Up @@ -61,8 +103,16 @@ while true; do
sleep 2
done

# Patch the StatefulSet with JVM flags to disable container support
# as ubuntu runners now are incompatible with zookeeper.
detect_zk_operator_deployment

# The zookeeper-operator reconciles the StatefulSet from the ZookeeperCluster CR.
# In practice, patching the StatefulSet while the operator is running is often reverted immediately.
# Temporarily scale down the operator, patch, restart, then scale it back up.
echo "Scaling down zookeeper-operator (${ZK_OPERATOR_DEPLOYMENT}) to prevent reconciliation..."
kubectl -n "${NAMESPACE}" scale "deployment/${ZK_OPERATOR_DEPLOYMENT}" --replicas=0
kubectl -n "${NAMESPACE}" rollout status "deployment/${ZK_OPERATOR_DEPLOYMENT}" --timeout=60s || true

echo "Patching Zookeeper StatefulSet (${ZK_STS_NAME}) with JVMFLAGS workaround..."
kubectl -n "${NAMESPACE}" patch statefulset "${ZK_STS_NAME}" --type='strategic' \
-p '{
"spec": {
Expand All @@ -72,9 +122,13 @@ kubectl -n "${NAMESPACE}" patch statefulset "${ZK_STS_NAME}" --type='strategic'
{
"name": "'"${ZK_CONTAINER_NAME}"'",
"env": [
{
"name": "JAVA_TOOL_OPTIONS",
"value": "'"${ZK_JAVA_TOOL_OPTIONS}"'"
},
{
"name": "JVMFLAGS",
"value": "-Xmx512m -Xms512m -XX:-UseContainerSupport -XX:ActiveProcessorCount=1 -Djava.awt.headless=true -Dzookeeper.log.dir=/data/logs -Dzookeeper.root.logger=INFO,CONSOLE -Dlog4j.configuration=file:/data/conf/log4j.properties"
"value": "'"${ZK_JVMFLAGS}"'"
}
]
}
Expand All @@ -84,14 +138,91 @@ kubectl -n "${NAMESPACE}" patch statefulset "${ZK_STS_NAME}" --type='strategic'
}
}'

echo "Verifying JAVA_TOOL_OPTIONS is present on StatefulSet template..."
if ! has_zk_flags_on_sts_template; then
echo "ERROR: JAVA_TOOL_OPTIONS not present on StatefulSet template after patch." >&2
kubectl -n "${NAMESPACE}" get statefulset "${ZK_STS_NAME}" -o yaml | sed -n '1,220p' >&2 || true
# Scale operator back up before exiting
kubectl -n "${NAMESPACE}" scale "deployment/${ZK_OPERATOR_DEPLOYMENT}" --replicas=1 || true
exit 1
fi


# Delete the pod to apply the patch
kubectl delete pod "${ZK_POD_NAME}" -n "${NAMESPACE}" --ignore-not-found=true --wait=false

# Wait for the pod to become Ready
if ! kubectl wait --for=condition=Ready "pod/${ZK_POD_NAME}" --timeout=300s -n "${NAMESPACE}"; then
echo "ERROR: Zookeeper pod ${ZK_POD_NAME} failed to become Ready after patching."
# Wait for the StatefulSet to recreate the pod, then for it to become Ready.
POD_WAIT_TIMEOUT=300
POD_WAIT_START=$(date +%s)

while true; do
ELAPSED=$(get_elapsed ${POD_WAIT_START})
if [ ${ELAPSED} -ge ${POD_WAIT_TIMEOUT} ]; then
echo "ERROR: Timed out after ${POD_WAIT_TIMEOUT}s waiting for ${ZK_POD_NAME} to be recreated and become Ready." >&2
echo "--- StatefulSet status ---" >&2
kubectl -n "${NAMESPACE}" get sts "${ZK_STS_NAME}" -o wide >&2 || true
kubectl -n "${NAMESPACE}" describe sts "${ZK_STS_NAME}" | sed -n '1,200p' >&2 || true
echo "--- Pod list (matching quorum) ---" >&2
kubectl -n "${NAMESPACE}" get pods -o wide | grep "${ZK_STS_NAME}" >&2 || true
echo "--- Zookeeper pod describe ---" >&2
kubectl -n "${NAMESPACE}" describe pod "${ZK_POD_NAME}" >&2 || true
echo "--- Zookeeper pod logs (tail) ---" >&2
kubectl -n "${NAMESPACE}" logs "${ZK_POD_NAME}" --tail=120 >&2 || true
exit 1
fi

if kubectl -n "${NAMESPACE}" get pod "${ZK_POD_NAME}" > /dev/null 2>&1; then
# If the pod was recreated but is missing the flags, don't wait pointlessly.
if ! has_zk_flags_on_pod; then
sleep 2
continue
fi
if kubectl -n "${NAMESPACE}" wait --for=condition=Ready "pod/${ZK_POD_NAME}" --timeout=10s > /dev/null 2>&1; then
break
fi
fi

sleep 2
done

echo "Scaling zookeeper-operator (${ZK_OPERATOR_DEPLOYMENT}) back up..."
kubectl -n "${NAMESPACE}" scale "deployment/${ZK_OPERATOR_DEPLOYMENT}" --replicas=1
kubectl -n "${NAMESPACE}" rollout status "deployment/${ZK_OPERATOR_DEPLOYMENT}" --timeout=120s || true

sleep 3

# In some environments the operator reconciles the StatefulSet back to its original template.
# Detect that and keep the operator scaled down (CI-like runs only) to prevent immediate reverts.
if ! has_zk_flags_on_sts_template; then
echo "WARN: zookeeper-operator appears to have reverted the StatefulSet template (flags missing)." >&2
echo "WARN: Re-applying patch and keeping zookeeper-operator scaled down to prevent reverts." >&2
kubectl -n "${NAMESPACE}" scale "deployment/${ZK_OPERATOR_DEPLOYMENT}" --replicas=0 || true
kubectl -n "${NAMESPACE}" rollout status "deployment/${ZK_OPERATOR_DEPLOYMENT}" --timeout=60s || true

kubectl -n "${NAMESPACE}" patch statefulset "${ZK_STS_NAME}" --type='strategic' \
-p '{
"spec": {
"template": {
"spec": {
"containers": [
{
"name": "'"${ZK_CONTAINER_NAME}"'",
"env": [
{"name": "JAVA_TOOL_OPTIONS", "value": "'"${ZK_JAVA_TOOL_OPTIONS}"'"},
{"name": "JVMFLAGS", "value": "'"${ZK_JVMFLAGS}"'"}
]
}
]
}
}
}
}'

kubectl delete pod "${ZK_POD_NAME}" -n "${NAMESPACE}" --ignore-not-found=true --wait=false
kubectl -n "${NAMESPACE}" wait --for=condition=Ready "pod/${ZK_POD_NAME}" --timeout=300s || true

echo "Zookeeper fix applied (operator left scaled down to avoid reverts)." >&2
exit 0
fi

echo "Zookeeper fix applied successfully."
6 changes: 3 additions & 3 deletions .github/scripts/end2end/install-kind-dependencies.sh
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,11 @@ helm repo add --force-update banzaicloud-stable https://kubernetes-charts.banzai
echo -n "::notice file=$(basename $0),line=$LINENO,title=Banzaicloud Charts not available::"
echo "Failed to add banzaicloud-stable repo, using local checkout"

kafa_operator="$(mktemp -d)"
kafka_operator="$(mktemp -d)"
git -c advice.detachedHead=false clone -q --depth 1 -b "v${KAFKA_OPERATOR_VERSION}" \
https://github.com/banzaicloud/koperator "${kafa_operator}"
https://github.com/banzaicloud/koperator "${kafka_operator}"

KAFKA_CHART="${kafa_operator}/charts/kafka-operator"
KAFKA_CHART="${kafka_operator}/charts/kafka-operator"
}
helm repo update

Expand Down
15 changes: 15 additions & 0 deletions config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
{
"mongodb": {
"rootUsername": "root",
"rootPassword": "rootpass",
"appUsername": "data",
"appPassword": "datapass",
"database": "39eda4f0-7fc0-43bb-a495-20fe773e14c1",
"replicaSet": null,
"replicaSetKey": "0123456789abcdef",
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"readPreference": "primary",
"shardCollection": "true"
}
}
2 changes: 2 additions & 0 deletions tests/ctst/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ cd ./tests/ctst/
docker build --build-arg CTST_TAG=0.2.0 . -t ghcr.io/scality/playground/<username>/custom-ctst:0.2.0

# Pushing the custom image into a repository
# no need to push it as its only gonna be used locally for testing :
# in codespce : build then use directly in codespace
docker push ghcr.io/scality/playground/<username>/custom-ctst:0.2.0
```

Expand Down
1 change: 1 addition & 0 deletions tests/ctst/features/crrReplicationS3utils.feature
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ Feature: Replication
@2.12.0
@PreMerge
@ReplicationTest
@Yoyoyo
Scenario Outline: Objects created before setting up replication should not be replicated automatically
Given an existing bucket "source-bucket-0" "with" versioning, "without" ObjectLock "without" retention mode
And an object "source-object-1" that "exists"
Expand Down
18 changes: 9 additions & 9 deletions tests/ctst/package.json
Original file line number Diff line number Diff line change
@@ -1,12 +1,16 @@
{
"name": "zenko-ctst",
"version": "1.0.0",
"description": "Testing Zenko features with CTST",
"main": "index.js",
"private": true,
"description": "Testing Zenko features with CTST Testing Zenko features with CTST",
"repository": "git+https://github.com/scality/Zenko.git",
"author": "Scality",
"license": "ISC",
"private": true,
"author": "Scality",
"main": "index.js",
"scripts": {
"build": "tsc --build tsconfig.json",
"lint": "eslint ."
},
"dependencies": {
"@kubernetes/client-node": "^0.21.0",
"@types/proper-lockfile": "^4.1.4",
Expand All @@ -25,13 +29,9 @@
"@aws-sdk/client-s3": "^3.901.0",
"@aws-sdk/client-sts": "^3.901.0",
"@eslint/compat": "^1.1.1",
"cli-testing": "github:scality/cli-testing.git#1.2.4",
"cli-testing": "https://github.com/scality/cli-testing.git#1.2.4",
"eslint": "^9.9.1",
"eslint-config-scality": "scality/Guidelines#8.3.0",
"typescript-eslint": "^8.4.0"
},
"scripts": {
"build": "tsc --build tsconfig.json",
"lint": "eslint ."
}
}
Loading
Loading