feat: Add deployment scripts and hard node affinity for nginx-load-balancer example
This commit is contained in:
202
examples/nginx-load-balancer/clean-deploy.sh
Executable file
202
examples/nginx-load-balancer/clean-deploy.sh
Executable file
@@ -0,0 +1,202 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Clean deploy script for nginx-load-balancer
|
||||
# Removes existing resources and deploys with fixes applied
|
||||
|
||||
set -e
|
||||
|
||||
echo "🧹 Clean Deploy nginx-load-balancer"
|
||||
echo "==================================="
|
||||
echo ""
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo "🗑️ Step 1: Cleaning up existing resources..."
|
||||
echo ""
|
||||
|
||||
# Delete existing resources if they exist
|
||||
echo "Removing existing nginx-load-balancer resources..."
|
||||
|
||||
if kubectl get deployment nginx-load-balancer &> /dev/null; then
|
||||
echo "• Deleting deployment..."
|
||||
kubectl delete deployment nginx-load-balancer --ignore-not-found=true
|
||||
fi
|
||||
|
||||
if kubectl get service nginx-load-balancer-service &> /dev/null; then
|
||||
echo "• Deleting service..."
|
||||
kubectl delete service nginx-load-balancer-service --ignore-not-found=true
|
||||
fi
|
||||
|
||||
if kubectl get configmap nginx-load-balancer-content &> /dev/null; then
|
||||
echo "• Deleting configmaps..."
|
||||
kubectl delete configmap nginx-load-balancer-content --ignore-not-found=true
|
||||
fi
|
||||
|
||||
if kubectl get configmap nginx-load-balancer-nginx-config &> /dev/null; then
|
||||
kubectl delete configmap nginx-load-balancer-nginx-config --ignore-not-found=true
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✅ Cleanup complete${NC}"
|
||||
echo ""
|
||||
|
||||
# Wait a moment for resources to be fully cleaned up
|
||||
echo "⏳ Waiting for cleanup to complete (5 seconds)..."
|
||||
sleep 5
|
||||
echo ""
|
||||
|
||||
echo "🚀 Step 2: Deploying with fixes applied..."
|
||||
echo ""
|
||||
|
||||
# Deploy ConfigMaps
|
||||
echo "📦 Deploying ConfigMaps..."
|
||||
if kubectl apply -f nginx-load-balancer-configmaps.yaml; then
|
||||
echo -e "${GREEN}✅ ConfigMaps deployed successfully${NC}"
|
||||
else
|
||||
echo -e "${RED}❌ Failed to deploy ConfigMaps${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Deploy Deployment (with hard node affinity)
|
||||
echo "📦 Deploying nginx application (3 replicas, worker-only)..."
|
||||
if kubectl apply -f nginx-load-balancer-deployment.yaml; then
|
||||
echo -e "${GREEN}✅ Deployment created successfully${NC}"
|
||||
echo " • Hard node affinity: Worker nodes only"
|
||||
echo " • Replicas: 3"
|
||||
echo " • Image: nginx:alpine"
|
||||
else
|
||||
echo -e "${RED}❌ Failed to create deployment${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Deploy Service (without externalTrafficPolicy: Local)
|
||||
echo "📦 Creating LoadBalancer service..."
|
||||
if kubectl apply -f nginx-load-balancer-service.yaml; then
|
||||
echo -e "${GREEN}✅ LoadBalancer service created successfully${NC}"
|
||||
echo " • Type: LoadBalancer"
|
||||
echo " • ExternalTrafficPolicy: None (optimized for Mycelium)"
|
||||
echo " • Dual-stack: IPv4 + IPv6"
|
||||
else
|
||||
echo -e "${RED}❌ Failed to create service${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "⏳ Step 3: Waiting for deployment to be ready..."
|
||||
echo " This may take up to 60 seconds..."
|
||||
echo ""
|
||||
|
||||
# Wait for deployment to be ready
|
||||
if kubectl wait --for=condition=available deployment/nginx-load-balancer --timeout=120s; then
|
||||
echo -e "${GREEN}✅ Deployment is ready${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Deployment not fully ready yet (pods may still be starting)${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check pod status
|
||||
echo "🔍 Step 4: Checking pod distribution..."
|
||||
PODS=$(kubectl get pods -l app=nginx-load-balancer -o wide)
|
||||
|
||||
if echo "$PODS" | grep -q "master"; then
|
||||
echo -e "${RED}❌ ISSUE: Pods found on master nodes (this shouldn't happen with hard affinity)${NC}"
|
||||
echo "$PODS"
|
||||
else
|
||||
echo -e "${GREEN}✅ EXCELLENT: No pods on master nodes (hard affinity working)${NC}"
|
||||
fi
|
||||
|
||||
echo "$PODS"
|
||||
echo ""
|
||||
|
||||
# Count pods
|
||||
TOTAL_PODS=$(echo "$PODS" | grep -v "NAME" | wc -l)
|
||||
echo "Total pods running: $TOTAL_PODS"
|
||||
|
||||
if [ "$TOTAL_PODS" -eq 3 ]; then
|
||||
echo -e "${GREEN}✅ Perfect: 3/3 pods running${NC}"
|
||||
elif [ "$TOTAL_PODS" -lt 3 ]; then
|
||||
echo -e "${YELLOW}⚠️ Partial: $TOTAL_PODS/3 pods running (pods may still be starting)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Unexpected: $TOTAL_PODS pods running (expected 3)${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check LoadBalancer service
|
||||
echo "🌐 Step 5: Checking LoadBalancer service..."
|
||||
SERVICE=$(kubectl get svc nginx-load-balancer-service)
|
||||
echo "$SERVICE"
|
||||
echo ""
|
||||
|
||||
SERVICE_IP=$(kubectl get svc nginx-load-balancer-service -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
SERVICE_PORT=$(kubectl get svc nginx-load-balancer-service -o jsonpath='{.spec.ports[0].port}')
|
||||
|
||||
if [ -n "$SERVICE_IP" ]; then
|
||||
echo -e "${GREEN}✅ LoadBalancer IP assigned: $SERVICE_IP${NC}"
|
||||
echo " Service URL: http://$SERVICE_IP:$SERVICE_PORT"
|
||||
|
||||
# Test LoadBalancer access
|
||||
echo ""
|
||||
echo "🧪 Testing LoadBalancer IP access..."
|
||||
if timeout 10 curl -s -f "http://$SERVICE_IP:$SERVICE_PORT" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}✅ LoadBalancer IP is accessible!${NC}"
|
||||
echo " Test command: curl -6 'http://$SERVICE_IP:$SERVICE_PORT'"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ LoadBalancer IP not yet accessible (may take time to propagate)${NC}"
|
||||
echo " Try: curl -6 'http://$SERVICE_IP:$SERVICE_PORT'"
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ LoadBalancer IP not yet assigned (may take time)${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Test direct node access
|
||||
echo "🔗 Step 6: Testing direct node access..."
|
||||
NODE_IPS=$(kubectl get nodes -o jsonpath='{range .items[*]}{.status.addresses[?@.type=="InternalIP"].address}{"\n"}{end}' | head -3)
|
||||
|
||||
for node_ip in $NODE_IPS; do
|
||||
echo "Testing node: $node_ip"
|
||||
if timeout 5 curl -s -f "http://$node_ip:8080" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN} ✅ Direct node access works${NC}"
|
||||
break
|
||||
else
|
||||
echo -e "${YELLOW} ⚠️ Direct node access failed (no pod on this node)${NC}"
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
# Final summary
|
||||
echo "==================================="
|
||||
echo "🎉 CLEAN DEPLOY COMPLETE!"
|
||||
echo "==================================="
|
||||
echo ""
|
||||
|
||||
echo -e "${BLUE}📊 Deployment Summary:${NC}"
|
||||
echo "• ✅ Clean deployment with fixes applied"
|
||||
echo "• ✅ Hard node affinity (worker nodes only)"
|
||||
echo "• ✅ LoadBalancer service (no externalTrafficPolicy)"
|
||||
echo "• ✅ 3 nginx replicas"
|
||||
echo ""
|
||||
|
||||
if [ -n "$SERVICE_IP" ]; then
|
||||
echo -e "${BLUE}🌐 Access Information:${NC}"
|
||||
echo "• LoadBalancer URL: http://$SERVICE_IP:$SERVICE_PORT"
|
||||
echo " (This should do real load balancing across pods)"
|
||||
echo ""
|
||||
echo "• Direct node access: Get node IPs with 'kubectl get nodes -o wide'"
|
||||
echo " (This bypasses load balancer, goes directly to pods)"
|
||||
echo ""
|
||||
echo -e "${BLUE}🧪 Test Commands:${NC}"
|
||||
echo "• LoadBalancer (load balancing): curl -6 'http://$SERVICE_IP:$SERVICE_PORT'"
|
||||
echo "• Node access: Get node IP and curl -6 'http://[node-ip]:8080'"
|
||||
echo "• Monitor pods: kubectl get pods -l app=nginx-load-balancer -o wide"
|
||||
echo "• Check service: kubectl get svc nginx-load-balancer-service"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}✅ Clean deploy with fixes completed successfully!${NC}"
|
||||
Reference in New Issue
Block a user