-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathcheck-resources.yaml
99 lines (99 loc) · 4.09 KB
/
check-resources.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: check-resources
annotations:
policies.kyverno.io/title: Check Resources
policies.kyverno.io/category: Resource Optimization
policies.kyverno.io/severity: medium
policies.kyverno.io/description: >-
This policy checks if the resources requested by a pod controller (Deployment or StatefulSet)
are within the bounds recommended by the Vertical Pod Autoscaler (VPA).
spec:
validationFailureAction: Audit
background: true
admission: false
rules:
- name: memory
match: &matchDef
any:
- resources:
kinds:
- Deployment
- StatefulSet
- DaemonSet
exclude:
any:
- resources:
selector:
matchLabels:
nirmata.io/resource-optimizer: "false"
namespaces:
- kube-system
context: &ruleContextDef
- name: vpa
# currently this will return errors if the VPA is not available
# so we fetch all VPAs and filter by the expected name
# see: https://github.com/kyverno/kyverno/issues/9723
# Note that this logic assumes that the VPA has the same name as the
# pod controller (Deployment or StatefulSet) with the suffix "-kyverno".
apiCall:
urlPath: "/apis/autoscaling.k8s.io/v1/namespaces/{{request.namespace}}/verticalpodautoscalers/"
jmesPath: "items[?metadata.name == '{{request.object.metadata.name}}-kyverno'] | @[0] || `{}`"
preconditions: &pre
all:
- key: "{{ vpa.status || `{}` }}"
operator: NotEquals
value: {}
- key: "{{ time_since('', '{{vpa.metadata.creationTimestamp}}', '') }}"
operator: GreaterThan
value: 0m # Set to >24h for production
validate:
foreach:
- list: vpa.status.recommendation.containerRecommendations
context:
- name: ctnr
variable: &ctnrVariable
value: "{{ request.object.spec.template.spec.containers[?name == '{{element.containerName}}'] | @[0] }}"
- name: memRequest
variable:
value: "{{ ctnr.resources.requests.memory || `0` }}"
deny:
conditions:
any:
- key: "{{ multiply(memRequest, `0.80`) }}"
operator: GreaterThan
value: "{{ element.upperBound.memory }}"
message: "overprovisioned resources: reduce memory.request from {{memRequest}} to {{ divide(element.target.memory, `1048576`) }}Mi"
- key: "{{ multiply(memRequest, `1.20`) }}"
operator: LessThan
value: "{{ element.lowerBound.memory }}"
message: "underprovisioned resources: increase memory.request from {{memRequest}} to {{ divide(element.target.memory, `1048576`) }}Mi"
# Using multiple rules to report each violation separately.
# Otherwise, processing stops at the first violation
# See: https://github.com/kyverno/kyverno/issues/8792
- name: cpu
match: *matchDef
context: *ruleContextDef
preconditions: *pre
validate:
foreach:
- list: vpa.status.recommendation.containerRecommendations
context:
- name: ctnr
variable: *ctnrVariable
- name: cpuRequest
variable:
value: "{{ ctnr.resources.requests.cpu || `0` }}"
deny:
conditions:
any:
- key: "{{ multiply(cpuRequest, `0.80`) }}"
operator: GreaterThan
value: "{{ element.upperBound.cpu }}"
message: "overprovisioned resources: reduce cpu.request from {{cpuRequest}} to {{element.target.cpu}}"
- key: "{{ multiply(cpuRequest, `1.20`) }}"
operator: LessThan
value: "{{ element.lowerBound.cpu }}"
message: "underprovisioned resources: increase cpu.request from {{cpuRequest}} to over {{element.target.cpu}}"