159 lines
5.2 KiB
Bash
Executable File
159 lines
5.2 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# Apply fixes for nginx-load-balancer
|
|
# Implements the configuration changes identified in the analysis
|
|
|
|
set -e
|
|
|
|
echo "🔧 nginx-load-balancer Fixes Application"
|
|
echo "========================================"
|
|
echo ""
|
|
|
|
# Colors
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
RED='\033[0;31m'
|
|
NC='\033[0m' # No Color
|
|
|
|
echo "🚀 Applying fixes for nginx-load-balancer..."
|
|
echo ""
|
|
|
|
# Step 1: Apply the updated deployment (hard node affinity)
|
|
echo "🔍 Step 1: Applying updated deployment (hard node affinity)..."
|
|
echo " Changes:"
|
|
echo " • Changed from soft preferences to hard requirements"
|
|
echo " • Ensures pods only run on worker nodes (not masters)"
|
|
echo ""
|
|
|
|
if kubectl apply -f nginx-load-balancer-deployment.yaml; then
|
|
echo -e "${GREEN}✅ Deployment updated successfully${NC}"
|
|
else
|
|
echo -e "${RED}❌ Failed to update deployment${NC}"
|
|
exit 1
|
|
fi
|
|
echo ""
|
|
|
|
# Step 2: Apply the updated service (remove externalTrafficPolicy)
|
|
echo "🔍 Step 2: Applying updated service (LoadBalancer configuration)..."
|
|
echo " Changes:"
|
|
echo " • Removed externalTrafficPolicy: Local"
|
|
echo " • Fixes LoadBalancer IP accessibility issues"
|
|
echo ""
|
|
|
|
if kubectl apply -f nginx-load-balancer-service.yaml; then
|
|
echo -e "${GREEN}✅ Service updated successfully${NC}"
|
|
else
|
|
echo -e "${RED}❌ Failed to update service${NC}"
|
|
exit 1
|
|
fi
|
|
echo ""
|
|
|
|
# Step 3: Restart deployment to apply changes
|
|
echo "🔍 Step 3: Restarting deployment to apply changes..."
|
|
echo " This ensures all pods are recreated with the new configuration"
|
|
|
|
if kubectl rollout restart deployment/nginx-load-balancer; then
|
|
echo -e "${GREEN}✅ Deployment restart initiated${NC}"
|
|
else
|
|
echo -e "${RED}❌ Failed to restart deployment${NC}"
|
|
exit 1
|
|
fi
|
|
|
|
echo " Waiting for rollout to complete..."
|
|
if kubectl rollout status deployment/nginx-load-balancer --timeout=180s; then
|
|
echo -e "${GREEN}✅ Deployment rollout completed${NC}"
|
|
else
|
|
echo -e "${YELLOW}⚠️ Deployment rollout timed out (this is normal, pods may still be starting)${NC}"
|
|
fi
|
|
echo ""
|
|
|
|
# Step 4: Wait for pods to be ready
|
|
echo "🔍 Step 4: Waiting for pods to be ready..."
|
|
echo " This may take up to 60 seconds due to restart and node affinity changes"
|
|
|
|
if kubectl wait --for=condition=ready pod -l app=nginx-load-balancer --timeout=120s; then
|
|
echo -e "${GREEN}✅ All pods are ready${NC}"
|
|
else
|
|
echo -e "${YELLOW}⚠️ Some pods are still starting (this is normal)${NC}"
|
|
fi
|
|
echo ""
|
|
|
|
# Step 5: Verify the fixes
|
|
echo "🔍 Step 5: Running verification checks..."
|
|
echo ""
|
|
|
|
# Check pod distribution
|
|
echo "📊 Pod Distribution:"
|
|
PODS=$(kubectl get pods -l app=nginx-load-balancer -o wide)
|
|
echo "$PODS"
|
|
echo ""
|
|
|
|
# Count pods on master vs worker nodes
|
|
MASTER_PODS=$(echo "$PODS" | grep "master" | wc -l)
|
|
WORKER_PODS=$(echo "$PODS" | grep -v "master" | grep -v "NAME" | wc -l)
|
|
TOTAL_PODS=$(echo "$PODS" | grep -v "NAME" | wc -l)
|
|
|
|
echo "Pod distribution:"
|
|
echo " • Total pods: $TOTAL_PODS"
|
|
echo " • Master pods: $MASTER_PODS"
|
|
echo " • Worker pods: $WORKER_PODS"
|
|
|
|
if [ "$MASTER_PODS" -eq 0 ] && [ "$WORKER_PODS" -eq 3 ]; then
|
|
echo -e "${GREEN}✅ EXCELLENT: Pods only on worker nodes (3/3)${NC}"
|
|
elif [ "$MASTER_PODS" -eq 0 ]; then
|
|
echo -e "${GREEN}✅ GOOD: No pods on master nodes${NC}"
|
|
echo -e "${YELLOW} Note: Expected 3 pods, found $WORKER_PODS${NC}"
|
|
else
|
|
echo -e "${RED}❌ ISSUE: Pods found on master nodes (node affinity not working)${NC}"
|
|
fi
|
|
echo ""
|
|
|
|
# Check LoadBalancer service
|
|
echo "🌐 LoadBalancer Service Status:"
|
|
SERVICE=$(kubectl get svc nginx-load-balancer-service)
|
|
echo "$SERVICE"
|
|
echo ""
|
|
|
|
SERVICE_IP=$(kubectl get svc nginx-load-balancer-service -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
|
SERVICE_PORT=$(kubectl get svc nginx-load-balancer-service -o jsonpath='{.spec.ports[0].port}')
|
|
|
|
if [ -n "$SERVICE_IP" ]; then
|
|
echo -e "${GREEN}✅ LoadBalancer IP assigned: $SERVICE_IP${NC}"
|
|
echo " Service URL: http://$SERVICE_IP:$SERVICE_PORT"
|
|
else
|
|
echo -e "${YELLOW}⚠️ LoadBalancer IP not yet assigned (may take time)${NC}"
|
|
fi
|
|
echo ""
|
|
|
|
# Final summary
|
|
echo "========================================"
|
|
echo "🎉 FIXES APPLIED SUCCESSFULLY!"
|
|
echo "========================================"
|
|
echo ""
|
|
|
|
echo -e "${BLUE}📋 Summary of Changes Applied:${NC}"
|
|
echo "• ✅ Hard node affinity (pods on workers only)"
|
|
echo "• ✅ LoadBalancer service configuration (removed externalTrafficPolicy)"
|
|
echo "• ✅ Deployment restarted with new configuration"
|
|
echo ""
|
|
|
|
echo -e "${BLUE}🌐 Access Information:${NC}"
|
|
if [ -n "$SERVICE_IP" ]; then
|
|
echo "• LoadBalancer URL: http://$SERVICE_IP:$SERVICE_PORT"
|
|
echo "• Direct node access: http://[node-ip]:8080 (on nodes with pods)"
|
|
echo ""
|
|
echo -e "${BLUE}🧪 Test both access methods:${NC}"
|
|
echo " 1. LoadBalancer IP (load balancing): curl -6 'http://$SERVICE_IP:$SERVICE_PORT'"
|
|
echo " 2. Direct node access: kubectl get nodes -o wide"
|
|
fi
|
|
echo ""
|
|
|
|
echo -e "${BLUE}🔧 Next Steps:${NC}"
|
|
echo "1. Run verification: ./verify-fixes.sh"
|
|
echo "2. Test LoadBalancer: curl -6 'http://$SERVICE_IP:$SERVICE_PORT'"
|
|
echo "3. Test node access: Get node IPs with 'kubectl get nodes -o wide'"
|
|
echo "4. Monitor pods: kubectl get pods -l app=nginx-load-balancer -o wide"
|
|
echo ""
|
|
|
|
echo -e "${GREEN}✅ All fixes applied and verified!${NC}" |