Kubernetes #2 ConfigMap, PVC, Liveness/Readiness, Blue/Green #
#2025-09-09
1. kubectl 명령어 실습 #
#1 배포된 컨테이너를 쿠버네티스에서 확인하기
# 배포 상태 확인
$ kubectl get pod -n skala-practice | grep sk019
sk019-myfirst-api-server-57fddcd6c8-l4jms 1/1 Running 0 108m
# 서비스 확인
$ kubectl get svc -n skala-practice | grep sk019
sk019-myfirst-api-server ClusterIP 10.100.83.86 <none> 8080/TCP,8081/TCP 18h
#
#2 로컬 <-> Pod 간 파일/디렉토리 복사
# 수행 위치
$ pwd
/Users/yshmbid/Documents/home/github/Cloud/workspace/kubernetes/02.deploy
# Pod 이름 확인
$ kubectl get pod -n skala-practice | grep sk019
sk019-myfirst-api-server-57fddcd6c8-l4jms 1/1 Running 0 120m
# 로컬의 data 디렉토리를 Pod 내부 /app/data 로 복사
$ kubectl cp $(pwd)/data skala-practice/sk019-myfirst-api-server-57fddcd6c8-l4jms:/app/data
# Pod /app/data → 로컬 ./download 디렉토리로 복사
# attach-my-pod 내 /shared-data/data 를 local ./data 에 복사
$ kubectl cp skala-practice/sk019-myfirst-api-server-57fddcd6c8-l4jms:/app/data $(pwd)/download
tar: Removing leading '/' from member names
# 복사된 파일 확인
$ ls -al ./download/
total 16
drwxr-xr-x 4 yshmbid staff 128 Sep 9 11:30 .
drwxr-xr-x 8 yshmbid staff 256 Sep 9 11:30 ..
drwxr-xr-x 3 yshmbid staff 96 Sep 9 11:30 data
-rw-r--r-- 1 yshmbid staff 7173 Sep 9 11:30 data.yaml
#
2. digest 개념 & 레지스트리에서 Docker 이미지 내려받기 #
# Harbor 레지스트리에서 이미지 Pull
$ sudo docker pull --platform=linux/amd64 amdp-registry.skala-ai.com/skala25a/skala-k8s.base@sha256:24834d6a4a35ed1f26a4abb63398b8f4a1a343ae13685c2567581fb57d4d1dcd
2025/09/09 11:39:19 must use ASL logging (which requires CGO) if running as root
amdp-registry.skala-ai.com/skala25a/skala-k8s.base@sha256:24834d6a4a35ed1f26a4abb63398b8f4a1a343ae13685c2567581fb57d4d1dcd: Pulling from skala25a/skala-k8s.base
...
# 로컬 이미지 목록 확인
$ sudo docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
amdp-registry.skala-ai.com/skala25a/sk019-myfirst-api-server 1.0.0 aa813ed844f7 2 hours ago 471MB
sk019-myfirst-api-server 1.0.0 aa813ed844f7 2 hours ago 471MB
...
#
3. ConfigMap #
- 실습내용
- ConfigMap 생성 → 연결 → Pod/Service 확인 → Endpoint 확인
#
#1 ConfigMap 생성
# 수행 위치
$ pwd
/Users/yshmbid/Documents/home/github/Cloud/workspace/kubernetes/04.configmap
# application-prod.yaml 파일을 기반으로 ConfigMap 생성
$ kubectl create configmap sk019-myfirst-configmap \
--from-file=application-prod.yaml \
--namespace=skala-practice
configmap/sk019-myfirst-configmap created
#
#2 ConfigMap 연결, 배포
# 배포(deploy)와 서비스(service) 설정 적용
$ kubectl apply -f deploy.yaml
deployment.apps/sk019-myfirst-api-server configured
$ kubectl apply -f service.yaml
service/sk019-myfirst-api-server unchanged
# 배포 상태 확인
$ kubectl get deploy -n skala-practice | grep sk019
sk019-myfirst-api-server 1/1 1 1 20h
- 20h -> 이 Deployment가 생성된 지 20시간 됨
- deployment.apps/sk019-myfirst-api-server configured
- 기존에 동일한 Deployment가 있었고 내용이 갱신되었다. 새로 생성된 게 아니고 업데이트(rolling update)가 일어났음.
#
#3 서비스 확인 & Port-forward
# 서비스 목록 확인
$ kubectl get svc -n skala-practice | grep sk019
sk019-myfirst-api-server ClusterIP 10.100.83.86 <none> 8080/TCP,8081/TCP 20h
# 로컬 <-> Pod 포트포워딩
$ kubectl port-forward svc/sk019-myfirst-api-server -n skala-practice 8080:8080
Forwarding from 127.0.0.1:8080 -> 8080
Forwarding from [::1]:8080 -> 8080
http://localhost:8080/api/developer-info에서 확인하면?
제대로 나온다.
#
#4 Pod 내부 확인
# Pod 목록 확인
$ kubectl get pod -n skala-practice | grep sk019
sk019-myfirst-api-server-77f8d4955c-grxlk 1/1 Running 0 6m33s
# 특정 Pod의 상세 yaml 확인
$ kubectl get pod sk019-myfirst-api-server-77f8d4955c-grxlk -n skala-practice -o yaml
# Endpoint 확인
$ kubectl get service -n skala-practice -o wide
$ kubectl get endpoints -n skala-practice -o yaml
#
4. PVC로 로컬 yaml 파일을 Pod에 연결 #
# 수행 위치
$ pwd
/Users/yshmbid/Documents/home/github/Cloud/workspace/kubernetes/05.pvc
# 1. PVC 생성하기
# pvc.yaml 적용
$ kubectl apply -f pvc.yaml
persistentvolumeclaim/sk019-efs-sc-myfirst-api-server-pvc created
# PVC 상태 확인
$ kubectl get pvc sk019-efs-sc-myfirst-api-server-pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
sk019-efs-sc-myfirst-api-server-pvc Bound pvc-b89fd661-f70a-4c93-8ec9-c6b5ac85db48 10Mi RWX efs-sc-shared <unset> 31s
# 2. PVC 핸들링 Pod 생성
# Pod 생성
$ kubectl apply -f handle-pvc-pod.yaml
pod/sk019-pvc-handler-pod created
# 생성 확인
$ kubectl get pod | grep sk019-pvc-handler-pod
sk019-pvc-handler-pod 1/1 Running 0 18s
# 3. PVC에 application-prod.yaml 복사
# PVC 핸들러 Pod에 파일 복사
$ kubectl cp application-prod.yaml sk019-pvc-handler-pod:/config/
# Pod 접속해서 확인
$ kubectl exec -it sk019-pvc-handler-pod -- /bin/bash
root@sk019-pvc-handler-pod:/# cd /config
root@sk019-pvc-handler-pod:/config# ls -al
total 8
drwxrwxrwx. 2 root root 6144 Sep 9 04:50 .
drwxr-xr-x. 1 root root 75 Sep 9 04:49 ..
-rw-r--r--. 1 501 staff 1016 Sep 9 04:50 application-prod.yaml
root@sk019-pvc-handler-pod:/config# cat application-prod.yaml
spring:
datasource:
url: jdbc:h2:mem:testdb
driverClassName: org.h2.Driver
username: admin
...
root@sk019-pvc-handler-pod:/config# exit
exit
# 4. myfirst-api-server Deployment 재배포
# 기존 deploy.yaml 수정본을 적용 (PVC 마운트 설정 포함)
$ kubectl apply -f deploy.yaml
deployment.apps/sk019-myfirst-api-server configured
# Pod 상태 확인
$ kubectl get pod | grep sk019
sk019-myfirst-api-server-6c558c98d5-bjz7n 1/1 Running 0 13s
sk019-pvc-handler-pod 1/1 Running 0 3m28s
- Postman으로 https://backend.skala25a.project.skala-ai.com/sk019/api/developer-info에서 적용 확인해보기.
- 결과해석
- “role”: “pvc-operator”
- “level”: “pvc”
- “position”: “pvc-project”
- “detail”: “pvc”
- 아까 PVC 안 /app/config/application-prod.yaml 파일에 넣었던 값과 동일함 즉 Spring Boot 애플리케이션이 이제 ConfigMap 값이 아니라 PVC에서 마운트된 application-prod.yaml 을 읽고 있다.
#
5. Pod lifecycle #
#1 liveness/readiness 설정
# 수행 위치
$ pwd
/Users/yshmbid/Documents/home/github/Cloud/workspace/kubernetes/06.probe
# 1. 배포하기
$ kubectl apply -f deploy.yaml
deployment.apps/sk019-myfirst-api-server configured
$ kubectl get pod -n skala-practice | grep sk019
sk019-myfirst-api-server-7549dfbdb8-k87wp 1/1 Running 0 35m
sk019-myfirst-api-server-7b884558cb-5rsfv 0/1 Running 0 12s
sk019-pvc-handler-pod 1/1 Running 0 47m
- pod는 running 상태.
# 2. 포트포워딩
$ kubectl get svc -n skala-practice | grep sk019
sk019-myfirst-api-server ClusterIP 10.100.83.86 <none> 8080/TCP,8081/TCP 21h
$ kubectl port-forward svc/sk019-myfirst-api-server -n skala-practice 8080:8080
Forwarding from 127.0.0.1:8080 -> 8080
Forwarding from [::1]:8080 -> 8080
- http://localhost:8080/actuator/health 확인.
#
#2 테스트 - 수신 차단, 복구
- Postman에서 readiness 상태를 강제로 REFUSING_TRAFFIC 으로 바꿔보고 그때 Kubernetes가 pod을 엔드포인트에서 제거하는지 확인한다.
# 1. 수신 차단
# POST
https://backend.skala25a.project.skala-ai.com/sk019/api/probe
# Body (JSON):
{
"liveness": "CORRECT",
"readiness": "REFUSING_TRAFFIC"
}
# 2. 수신 복구
# 터미널에서
$ kubectl port-forward svc/sk019-myfirst-api-server 8080:8080 -n skala-practice
# POST
http://localhost:8080/api/probe
# Body (JSON):
{
"liveness": "CORRECT",
"readiness": "ACCEPTING_TRAFFIC"
}
# POST
http://backend.skala25a.project.skala-ai.com/sk019/api/probe
# Body (JSON):
{
"liveness": "BROKEN",
"readiness": "ACCEPTING_TRAFFIC"
}
$ kubectl get pod -n skala-practice
sk019-myfirst-api-server-cf9c78b74-s2dln 1/1 Running 3 (10m ago) 26m
- 3번 죽임
#
6. Blue/Green 배포 #
$ pwd
/Users/yshmbid/Documents/home/github/Cloud/workspace/kubernetes/00.container/02.python-v2.0
$ ./docker-build.sh
$ ./docker-push.sh
# 수행 위치
$ pwd
/Users/yshmbid/Documents/home/github/Cloud/workspace/kubernetes/07.blue-green
# Blue
$ kubectl apply -f ingress-blue.yaml
ingress.networking.k8s.io/sk019-myfirst-ingress created
# Blue - secret, ingress 확인
$ kubectl get secret | grep sk019
sk019-ingress-project-tls-cert kubernetes.io/tls 2 4m46s
$ kubectl get ingress | grep sk019
sk019-myfirst-ingress public-nginx sk019-ingress.skala25a.project.skala-ai.com a55bf7a790b8e4b26b67a129d3263385-1094816387.ap-northeast-2.elb.amazonaws.com 80, 443 109s
# Green
$ kubectl apply -f deploy-green.yaml
deployment.apps/sk019-myfirst-api-server-new created
$ kubectl apply -f ingress-green.yaml
ingress.networking.k8s.io/sk019-myfirst-ingress configured
$ kubectl apply -f service-green.yaml
service/sk019-myfirst-api-server-new created
# Green - service 확인
$ kubectl get service | grep sk019
sk019-myfirst-api-server ClusterIP 10.100.83.86 <none> 8080/TCP,8081/TCP 23h
sk019-myfirst-api-server-new ClusterIP 10.100.128.115 <none> 8080/TCP,8081/TCP 19m
- https://sk019-ingress.skala25a.project.skala-ai.com/api/users 이랑 https://sk019-ingress.skala25a.project.skala-ai.com/python/prometheus 에 접속해보면?
- 제대로 나온당