157 lines
5.5 KiB
Bash
Executable File
157 lines
5.5 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# NodePort Connectivity Debug Script
|
|
# Diagnoses why NodePort 30091 is not accessible despite successful ping
|
|
|
|
set -e
|
|
|
|
echo "🔍 NodePort Connectivity Diagnosis"
|
|
echo "===================================="
|
|
echo ""
|
|
|
|
# Colors
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
RED='\033[0;31m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Get pod info
|
|
POD_NAME=$(kubectl get pods -l app=nginx-nodeport -o name | head -1)
|
|
POD_NODE=$(kubectl get pods -l app=nginx-nodeport -o jsonpath='{.items[0].spec.nodeName}')
|
|
NODE_IPV6=$(kubectl get node "$POD_NODE" -o jsonpath='{range .status.addresses[?(@.type=="InternalIP")]}{.address}{"\n"}{end}' | grep -E '^[0-9a-f]+:[0-9a-f]+:' | head -1)
|
|
|
|
echo "📊 Pod Information:"
|
|
echo " Pod: $POD_NAME"
|
|
echo " Node: $POD_NODE"
|
|
echo " Node IPv6: $NODE_IPV6"
|
|
echo ""
|
|
|
|
# Check service
|
|
echo "🔍 Service Configuration:"
|
|
kubectl get svc nginx-nodeport-service -o yaml | grep -E "type:|nodePort:|externalTrafficPolicy:|ipFamilies:" | sed 's/^/ /'
|
|
echo ""
|
|
|
|
# Check endpoints
|
|
echo "🔍 Service Endpoints:"
|
|
kubectl get endpoints nginx-nodeport-service -o wide
|
|
echo ""
|
|
|
|
# Test from within pod
|
|
echo "🧪 Test 1: Internal pod access (localhost)"
|
|
if kubectl exec $POD_NAME -- curl -s -m 5 http://localhost:8080/health > /dev/null 2>&1; then
|
|
echo -e " ${GREEN}✅ SUCCESS${NC} - nginx responding on localhost:8080"
|
|
else
|
|
echo -e " ${RED}❌ FAILED${NC} - nginx not responding internally"
|
|
exit 1
|
|
fi
|
|
echo ""
|
|
|
|
# Test service ClusterIP
|
|
echo "🧪 Test 2: Service ClusterIP access"
|
|
SERVICE_IP=$(kubectl get svc nginx-nodeport-service -o jsonpath='{.spec.clusterIP}')
|
|
echo " Service ClusterIP: $SERVICE_IP"
|
|
|
|
# Create a test pod to check from
|
|
echo " Creating test pod..."
|
|
kubectl run test-curl --image=curlimages/curl:latest --rm -i --restart=Never --command -- sh -c "curl -s -m 5 http://$SERVICE_IP:8080/health" > /tmp/test-result.txt 2>&1 || true
|
|
|
|
if grep -q "healthy" /tmp/test-result.txt 2>/dev/null; then
|
|
echo -e " ${GREEN}✅ SUCCESS${NC} - Service ClusterIP accessible"
|
|
else
|
|
echo -e " ${YELLOW}⚠️ FAILED${NC} - Service ClusterIP not accessible"
|
|
cat /tmp/test-result.txt 2>/dev/null || true
|
|
fi
|
|
rm -f /tmp/test-result.txt
|
|
echo ""
|
|
|
|
# Check if kube-proxy is running
|
|
echo "🔍 kube-proxy Status:"
|
|
KUBE_PROXY_PODS=$(kubectl get pods -n kube-system -l k8s-app=kube-proxy --no-headers 2>/dev/null | wc -l)
|
|
if [ "$KUBE_PROXY_PODS" -gt 0 ]; then
|
|
echo -e " ${GREEN}✅ kube-proxy running${NC} ($KUBE_PROXY_PODS pods)"
|
|
else
|
|
echo -e " ${RED}❌ kube-proxy NOT running${NC}"
|
|
fi
|
|
echo ""
|
|
|
|
# Check node ports
|
|
echo "🔍 NodePort Listener Check:"
|
|
echo " Checking if port 30091 is being listened on..."
|
|
echo ""
|
|
echo " Note: On the worker node ($POD_NODE), check with:"
|
|
echo " ssh root@$POD_NODE 'netstat -tuln | grep 30091'"
|
|
echo " ssh root@$POD_NODE 'ss -tuln | grep 30091'"
|
|
echo ""
|
|
|
|
# Try to access NodePort from inside cluster
|
|
echo "🧪 Test 3: NodePort access from test pod"
|
|
echo " Testing: http://[$NODE_IPV6]:30091/"
|
|
|
|
kubectl run test-nodeport --image=curlimages/curl:latest --rm -i --restart=Never --command -- sh -c "curl -6 -s -m 10 http://[$NODE_IPV6]:30091/health" > /tmp/nodeport-test.txt 2>&1 || true
|
|
|
|
if grep -q "healthy" /tmp/nodeport-test.txt 2>/dev/null; then
|
|
echo -e " ${GREEN}✅ SUCCESS${NC} - NodePort accessible from within cluster"
|
|
else
|
|
echo -e " ${RED}❌ FAILED${NC} - NodePort not accessible from within cluster"
|
|
echo " Output:"
|
|
cat /tmp/nodeport-test.txt 2>/dev/null | sed 's/^/ /' || true
|
|
fi
|
|
rm -f /tmp/nodeport-test.txt
|
|
echo ""
|
|
|
|
# Check iptables rules (if we can access the node)
|
|
echo "🔍 Potential Issues:"
|
|
echo ""
|
|
echo " 1. IPv6 Support in kube-proxy:"
|
|
echo " kube-proxy might not be configured for IPv6"
|
|
echo " Check with: kubectl logs -n kube-system -l k8s-app=kube-proxy | grep -i ipv6"
|
|
echo ""
|
|
echo " 2. Firewall on worker node:"
|
|
echo " Port 30091 might be blocked by firewall"
|
|
echo " On node: sudo iptables -L -n | grep 30091"
|
|
echo " On node: sudo ip6tables -L -n | grep 30091"
|
|
echo ""
|
|
echo " 3. externalTrafficPolicy: Local:"
|
|
echo " Service only accessible on the node where pod is running"
|
|
echo " Pod is on: $POD_NODE"
|
|
echo " You must use that node's IPv6: $NODE_IPV6"
|
|
echo ""
|
|
echo " 4. IPv6 iptables rules:"
|
|
echo " kube-proxy might not have created IPv6 rules"
|
|
echo " On node: sudo ip6tables -t nat -L -n | grep 30091"
|
|
echo ""
|
|
|
|
# Get all node IPs to show which one to use
|
|
echo "📋 Summary:"
|
|
echo ""
|
|
echo " Pod running on node: $POD_NODE"
|
|
echo " MUST use this IPv6: $NODE_IPV6"
|
|
echo ""
|
|
echo " Try accessing:"
|
|
echo " curl -6 \"http://[$NODE_IPV6]:30091/\""
|
|
echo ""
|
|
echo " All worker node IPs (for reference):"
|
|
kubectl get nodes -o jsonpath='{range .items[*]}{range .status.addresses[?(@.type=="InternalIP")]}{.address}{"\n"}{end}{end}' | grep -E '^[0-9a-f]+:[0-9a-f]+:' | nl -v 1 -w 5 -s '. ' | sed 's/^/ /'
|
|
echo ""
|
|
|
|
# Suggest checking k3s/k8s configuration
|
|
echo "🔧 Recommended Checks:"
|
|
echo ""
|
|
echo "1. Check if this is a K3s cluster:"
|
|
echo " kubectl version --short"
|
|
echo ""
|
|
echo "2. Check kube-proxy mode:"
|
|
echo " kubectl logs -n kube-system -l k8s-app=kube-proxy | grep -i mode"
|
|
echo ""
|
|
echo "3. Check if IPv6 is enabled in cluster:"
|
|
echo " kubectl get nodes -o jsonpath='{.items[0].status.addresses}' | grep ':'"
|
|
echo ""
|
|
echo "4. Check service IP families:"
|
|
echo " kubectl get svc nginx-nodeport-service -o jsonpath='{.spec.ipFamilies}'"
|
|
echo ""
|
|
echo "5. Try changing externalTrafficPolicy to Cluster:"
|
|
echo " kubectl patch svc nginx-nodeport-service -p '{\"spec\":{\"externalTrafficPolicy\":\"Cluster\"}}'"
|
|
echo ""
|
|
|
|
echo "======================================"
|
|
echo "Diagnosis complete!" |