自学内容网 自学内容网

kubernetes笔记(四)

一、Pod调度策略

1.基于节点的调度

spec->nodeName

[root@master ~]# vim myhttp.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: myhttp
spec:
  nodeName: node-0001     # 基于节点名称进行调度
  containers:
  - name: apache
    image: myos:httpd

[root@master ~]# kubectl apply -f myhttp.yaml  

[root@master ~]# kubectl get pods -o wide

2.标签管理

1)查询标签

--show-labels

[root@master ~]# kubectl get pods --show-labels 


[root@master ~]# kubectl get namespaces --show-labels 


[root@master ~]# kubectl get nodes --show-labels 

2)使用标签过滤

在查询命令后面加: -l 标签名称

# 使用标签过滤资源对象
[root@master ~]# kubectl get nodes -l kubernetes.io/hostname=master

3)添加标签

kubectl label ~~~

[root@master ~]# kubectl label pod myhttp app=apache

[root@master ~]# kubectl get pods --show-labels 

4)删除标签

kubectl label ~~~ 标签名称-

[root@master ~]# kubectl label pod myhttp app-

[root@master ~]# kubectl get pods --show-labels 

5)资源文件标签

metadata->lebels->键值对

[root@master ~]# vim myhttp.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: myhttp
  labels:               # 声明标签
    app: apache         # 标签键值对
spec:
  containers:
  - name: apache
    image: myos:httpd

[root@master ~]# kubectl delete pods myhttp

[root@master ~]# kubectl apply -f myhttp.yaml  

[root@master ~]# kubectl get pods --show-labels 

3.基于标签的调度

[root@master ~]# kubectl get nodes node-0002 --show-labels 


[root@master ~]# vim myhttp.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: myhttp
  labels:
    app: apache
spec:
  nodeSelector:                        # 基于节点标签进行调度
    kubernetes.io/hostname: node-0002  # 标签
  containers:
  - name: apache
    image: myos:httpd

[root@master ~]# kubectl delete pods myhttp 

[root@master ~]# kubectl apply -f myhttp.yaml  

[root@master ~]# kubectl get pods -l app=apache -o wide

4.容器调度

[root@master ~]# kubectl label nodes node-0002 node-0003 disktype=ssd

[root@master ~]# vim myhttp.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: myhttp
  labels:
    app: apache
spec:
  nodeSelector:
    disktype: ssd
  containers:
  - name: apache
    image: myos:httpd

[root@master ~]# sed "s,myhttp,web1," myhttp.yaml |kubectl apply -f -
[root@master ~]# sed "s,myhttp,web2," myhttp.yaml |kubectl apply -f -
[root@master ~]# sed "s,myhttp,web3," myhttp.yaml |kubectl apply -f -
[root@master ~]# sed "s,myhttp,web4," myhttp.yaml |kubectl apply -f -
[root@master ~]# sed "s,myhttp,web5," myhttp.yaml |kubectl apply -f -

[root@master ~]# kubectl get pods -o wide

1)清理实验配置(根据标签删除容器,再删除标签)

[root@master ~]# kubectl delete pod -l app=apache


[root@master ~]# kubectl label nodes node-0002 node-0003 disktype-

二、Pod资源配额

1.资源对象文件

[root@master ~]# vim minpod.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: minpod
spec:
  terminationGracePeriodSeconds: 0
  containers:
  - name: linux
    image: myos:8.5
    command: ["awk", "BEGIN{while(1){}}"]

2.内存资源配额

spec->containers->resources->requests->memory

[root@master ~]# vim minpod.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: minpod
spec:
  terminationGracePeriodSeconds: 0
  nodeSelector:                        # 配置 Pod 调度节点
    kubernetes.io/hostname: node-0003  # 在 node-0003 节点创建
  containers:
  - name: linux
    image: myos:8.5
    command: ["awk", "BEGIN{while(1){}}"]
    resources:               # 资源策略
      requests:              # 配额策略
        memory: 1100Mi       # 内存配额

# 验证配额策略
[root@master ~]# for i in app{1..5};do sed "s,minpod,${i}," minpod.yaml;done |kubectl apply -f -

# 清理实验配置
[root@master ~]# kubectl delete pod --all

3.计算资源配额

spec->containers->resources->requests->cpu

[root@master ~]# vim minpod.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: minpod
spec:
  terminationGracePeriodSeconds: 0
  nodeSelector:
    kubernetes.io/hostname: node-0003
  containers:
  - name: linux
    image: myos:8.5
    command: ["awk", "BEGIN{while(1){}}"]
    resources:
      requests:
        cpu: 800m          # 计算资源配额

# 验证配额策略
[root@master ~]# for i in app{1..5};do sed "s,minpod,${i}," minpod.yaml;done |kubectl apply -f -

[root@master ~]# kubectl get pods


# 清理实验配置
[root@master ~]# kubectl delete pod --all

4.综合资源配额

spec->containers->resources->requests->xxx

[root@master ~]# vim minpod.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: minpod
spec:
  terminationGracePeriodSeconds: 0
  nodeSelector:
    kubernetes.io/hostname: node-0003
  containers:
  - name: linux
    image: myos:8.5
    command: ["awk", "BEGIN{while(1){}}"]
    resources:
      requests:
        cpu: 800m          # 计算资源配额
        memory: 1100Mi     # 内存资源配额

三、Pod资源限额

1.限额内存CPU

spec->containers->resources->limits->cpu/memory等

# 创建限额资源对象文件
[root@master ~]# vim maxpod.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: maxpod
spec:
  terminationGracePeriodSeconds: 0
  containers:
  - name: linux
    image: myos:8.5
    command: ["awk", "BEGIN{while(1){}}"]
    resources:
      limits:
        cpu: 800m
        memory: 2000Mi

[root@master ~]# kubectl apply -f maxpod.yaml 

2.验证内存限额

[root@master ~]# kubectl cp memtest.py maxpod:/usr/bin/
[root@master ~]# kubectl exec -it maxpod -- /bin/bash
[root@maxpod /]# memtest.py 2500

[root@maxpod /]# memtest.py 1500

3.验证CPU限额

[root@master ~]# kubectl exec -it maxpod -- ps aux

[root@master ~]# kubectl top pods


# 清理实验 Pod
[root@master ~]# kubectl delete pod maxpod

四、全局资源管理

1.LimitRange

1)默认配额策略

spec->limits->default

spec->limits->defaultRequest

# 创建名称空间
[root@master ~]# kubectl create namespace work
namespace/work created
# 设置默认配额
[root@master ~]# vim limit.yaml
---
apiVersion: v1
kind: LimitRange
metadata:
  name: mylimit 
  namespace: work   
spec:
  limits:               
  - type: Container     
    default:            
      cpu: 300m 
      memory: 500Mi     
    defaultRequest:
      cpu: 8m  
      memory: 8Mi 

[root@master ~]# kubectl -n work apply -f limit.yaml

验证配额策略

[root@master ~]# vim maxpod.yaml
---
kind: Pod
apiVersion: v1
metadata:
  name: maxpod
spec:
  terminationGracePeriodSeconds: 0
  containers:
  - name: linux
    image: myos:8.5
    command: ["awk", "BEGIN{while(1){}}"]

[root@master ~]# kubectl -n work apply -f maxpod.yaml

[root@master ~]# kubectl -n work describe pod maxpod
... ...
    Limits:
      cpu:     300m
      memory:  500Mi
    Requests:
      cpu:     10m
      memory:  8Mi
... ...

[root@master ~]# kubectl -n work top pods

2)自定义资源

spec->containers->resources->requests->xx

spec->containers->resources->limits->xx

[root@master ~]# vim maxpod.yaml
---
kind: Pod
apiVersion: v1
metadata:
  name: maxpod
spec:
  terminationGracePeriodSeconds: 0
  containers:
  - name: linux
    image: myos:8.5
    command: ["awk", "BEGIN{while(1){}}"]
    resources:
      requests:
        cpu: 10m
        memory: 10Mi
      limits:
        cpu: 1100m
        memory: 2000Mi

[root@master ~]# kubectl -n work delete -f maxpod.yaml 

[root@master ~]# kubectl -n work apply -f maxpod.yaml

[root@master ~]# kubectl -n work describe pod maxpod
... ...
    Limits:
      cpu:     1100m
      memory:  2000Mi
    Requests:
      cpu:     10m
      memory:  10Mi
... ...
[root@master ~]# kubectl -n work top pods maxpod

3)资源配额范围

spec->limits->max->cpu/memory等

spec->limits->min->cpu/memory等

[root@master ~]# vim limit.yaml 
---
apiVersion: v1
kind: LimitRange
metadata:
  name: mylimit
  namespace: work
spec:
  limits:               
  - type: Container     
    default:            
      cpu: 300m 
      memory: 500Mi     
    defaultRequest:
      cpu: 8m  
      memory: 8Mi 
    max:
      cpu: 800m
      memory: 1000Mi
    min:
      cpu: 2m
      memory: 8Mi

[root@master ~]# kubectl -n work apply -f limit.yaml 

[root@master ~]# kubectl -n work delete -f maxpod.yaml 

[root@master ~]# kubectl -n work apply -f maxpod.yaml 

4)多容器资源配额

spec->containers->resources->requests->xxx

spec->containers->resources->limits->xxx

[root@master ~]# vim maxpod.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: maxpod
spec:
  terminationGracePeriodSeconds: 0
  containers:
  - name: linux
    image: myos:8.5
    command: ["awk", "BEGIN{while(1){}}"]
    resources:
      requests:
        cpu: 10m
        memory: 10Mi
      limits:
        cpu: 800m
        memory: 1000Mi
  - name: linux1
    image: myos:8.5
    command: ["awk", "BEGIN{while(1){}}"]
    resources:
      requests:
        cpu: 10m
        memory: 10Mi
      limits:
        cpu: 800m
        memory: 1000Mi

[root@master ~]# kubectl -n work apply -f maxpod.yaml 

[root@master ~]# kubectl -n work get pods

[root@master ~]# kubectl -n work top pods maxpod

5)Pod资源配额

[root@master ~]# vim limit.yaml 
---
apiVersion: v1
kind: LimitRange
metadata:
  name: mylimit
  namespace: work
spec:
  limits:               
  - type: Container     
    default:            
      cpu: 300m 
      memory: 500Mi     
    defaultRequest:
      cpu: 8m  
      memory: 8Mi 
    max:
      cpu: 800m
      memory: 1000Mi
    min:
      cpu: 2m
      memory: 8Mi
  - type: Pod
    max:
      cpu: 1200m
      memory: 1200Mi
    min:
      cpu: 2m
      memory: 8Mi

[root@master ~]# kubectl -n work apply -f limit.yaml

[root@master ~]# kubectl -n work delete -f maxpod.yaml 

[root@master ~]# kubectl -n work apply -f maxpod.yaml 

多个Pod消耗资源

[root@master ~]# vim maxpod.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: maxpod
spec:
  terminationGracePeriodSeconds: 0
  containers:
  - name: linux
    image: myos:8.5
    command: ["awk", "BEGIN{while(1){}}"]
    resources:
      requests:
        cpu: 10m
        memory: 10Mi
      limits:
        cpu: 800m
        memory: 1000Mi

# 创建太多Pod,资源也会耗尽
[root@master ~]# for i in app{1..9};do sed "s,maxpod,${i}," maxpod.yaml ;done |kubectl -n work apply -f -

# Pod 创建成功后,查看节点资源使用情况
[root@master ~]# kubectl top nodes

# 清理实验配置
[root@master ~]# kubectl -n work delete pods --all

2.ResourceQuota

1)全局配额策略

spec->hard->requests.cpu

spec->hard->requests.memory

spec->hard->limits.cpu

spec->hard->limits.memory

spec->hard->pod->数值

[root@master ~]# vim quota.yaml
---
apiVersion: v1
kind: ResourceQuota
metadata:
  name: myquota
  namespace: work
spec:
  hard:
    requests.cpu: 1000m
    requests.memory: 2000Mi
    limits.cpu: 5000m
    limits.memory: 8Gi
    pods: 3

[root@master ~]# kubectl -n work apply -f quota.yaml 

验证quota配额

[root@master ~]# for i in app{1..5};do sed "s,maxpod,${i}," maxpod.yaml ;done |kubectl -n work apply -f -


# 删除实验 Pod 与限额规则
[root@master ~]# kubectl -n work delete pods --all

[root@master ~]# kubectl -n work delete -f limit.yaml -f quota.yaml

[root@master ~]# kubectl delete namespace work

原文地址:https://blog.csdn.net/qq_42750608/article/details/142686289

免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!