namespace를 위한 리소스 제약
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
# namespace 생성한다.
ps0107@k8smaster1:~$ kubectl create namespace limit-test
namespace/limit-test created
# 생성된 namespace 확인
ps0107@k8smaster1:~$ kubectl get namespaces
NAME STATUS AGE
default Active 25h
kube-node-lease Active 25h
kube-public Active 25h
kube-system Active 25h
limit-test Active 8s
# namespace에 LimitRange를 걸기 위해 yaml 파일을 생성한다.
# kind는 LimitRange를 사용
#- limits > default : 리밋
#- limits > defaultRequest : 최소
ps0107@k8smaster1:~$ vi resource-range.yaml
apiVersion: v1
kind: LimitRange
metadata:
name: resource-range
spec:
limits:
- default:
cpu: 1
memory: 500Mi
defaultRequest:
cpu: 0.5
memory: 100Mi
type: Container
# 생성한 namespace에 LimitRange를 오브젝트를 생성해 준다.
ps0107@k8smaster1:~$ kubectl --namespace=limit-test create -f resource-range.yaml
limitrange/resource-range created
# 현재 default namespace라서 안나오기 때문에 namespace 지정하여 확인
ps0107@k8smaster1:~$ kubectl get LimitRange
No resources found.
# namespace를 지정하여 확인
ps0107@k8smaster1:~$ kubectl get LimitRange --all-namespaces
NAMESPACE NAME CREATED AT
limit-test resource-range 2020-01-29T09:52:24Z
# 생성된 namespace에 stress app 배포
ps0107@k8smaster1:~$ kubectl -n limit-test create deployment limited-stress --image vish/stress
deployment.apps/limited-stress created
# deployment 생성 확인
ps0107@k8smaster1:~$ kubectl get deployments --all-namespaces
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
default stress 1/1 1 1 14h
kube-system calico-typha 0/0 0 0 25h
kube-system coredns 2/2 2 2 25h
limit-test limited-stress 1/1 1 1 13s
# 생성된 namespace안에 하나의 pod가 생성되어 있다.
ps0107@k8smaster1:~$ kubectl -n limit-test get pods
NAME READY STATUS RESTARTS AGE
limited-stress-7db5f4dbd9-zfzkc 1/1 Running 0 38s
# 해당 pod 정보를 yaml 템플릿 형태로 본다
# 중간에 resource 부분에 자동으로 deployment에 할당이 된것을 알수 있다.
# namespace에 걸었던 limit이 스케쥴링때 pod에도 영향을 끼침을 알 수 있다.
ps0107@k8smaster1:~$ kubectl -n limit-test get pod limited-stress-7db5f4dbd9-zfzkc -o yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
cni.projectcalico.org/podIP: 192.168.1.10/32
kubernetes.io/limit-ranger: 'LimitRanger plugin set: cpu, memory request for container
stress; cpu, memory limit for container stress'
creationTimestamp: "2020-01-29T09:54:22Z"
generateName: limited-stress-7db5f4dbd9-
labels:
app: limited-stress
pod-template-hash: 7db5f4dbd9
name: limited-stress-7db5f4dbd9-zfzkc
namespace: limit-test
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: limited-stress-7db5f4dbd9
uid: 40b486a3-264a-4c40-8818-2961fe1dc19c
resourceVersion: "123051"
selfLink: /api/v1/namespaces/limit-test/pods/limited-stress-7db5f4dbd9-zfzkc
uid: 6f051ba8-21ac-4603-9153-6c0ac0167912
spec:
containers:
- image: vish/stress
imagePullPolicy: Always
name: stress
resources:
limits:
cpu: "1"
memory: 500Mi
requests:
cpu: 500m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-mq9xm
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: k8sworker1
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: default-token-mq9xm
secret:
defaultMode: 420
secretName: default-token-mq9xm
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2020-01-29T09:54:22Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2020-01-29T09:54:26Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2020-01-29T09:54:26Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2020-01-29T09:54:22Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://466cc651ca0cdc06f61c4240c77f5e5ee0e669a5396d6a3a155fc5d66d643f91
image: vish/stress:latest
imageID: docker-pullable://vish/stress@sha256:b6456a3df6db5e063e1783153627947484a3db387be99e49708c70a9a15e7177
lastState: {}
name: stress
ready: true
restartCount: 0
state:
running:
startedAt: "2020-01-29T09:54:25Z"
hostIP: 10.146.0.4
phase: Running
podIP: 192.168.1.10
qosClass: Burstable
startTime: "2020-01-29T09:54:22Z"
# limit-test 라는 namespace에 새로운 deployment 생성하기 위해 기존 yaml 파일 복사 후 수정
ps0107@k8smaster1:~$ cp stress.yaml stress2.yaml
# yaml 파일에 namespace 지정하도록 수정
ps0107@k8smaster1:~$ vi stress2.yaml
# 다른 터미널에 top 명령 수행 모니터링.
# 새로운 deployment 생성될 때 아마도 어떠한 stress를 받지 않는 노드에 스케쥴링 될것이다.
ps0107@k8smaster1:~$ kubectl create -f stress2.yaml
deployment.extensions/stress created
# namespace 확인
ps0107@k8smaster1:~$ kubectl get deployment --all-namespaces
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
default stress 1/1 1 1 14h
kube-system calico-typha 0/0 0 0 26h
kube-system coredns 2/2 2 2 26h
limit-test limited-stress 1/1 1 1 49m
limit-test stress 1/1 1 1 28s
# top 명령 결과를 보면, 메모리가 완전히 할당되면 두 개의 stress deployment 모두 거의 동일한 양의 리소스를 사용하고 있음을 알 수 있다.
# deployment별 설정은 글로벌 네임스페이스 설정을 재정의한다.
# 각 노드로 부터 총 8G 시스템위에 프로세서 1개와 메모리의 약 12%가 사용됨을 볼수 있다.
# 시스템 리소스를 복구 하기 위해 stress deployments 삭제
ps0107@k8smaster1:~$ kubectl -n limit-test delete deployment stress
deployment.extensions "stress" deleted
ps0107@k8smaster1:~$ kubectl delete deployment stress
deployment.extensions "stress" deleted