This repository has been archived by the owner on Feb 13, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 3
/
manifest.yaml
287 lines (269 loc) · 7.64 KB
/
manifest.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
apiVersion: v1
kind: ConfigMap
metadata:
name: vpcnet-configuration
namespace: kube-system
data:
config.toml: |
# indicates if we should add a k8s-vpcnet/no-free-ips:NoSchedule
# taint to the node when the IP address allocation pool is empty
taint_when_no_ips = true
# will make the allocator delete the pod from the
# kubernetes API when a pod IP allocation request fails (running pods
# remain untouched. This can help recover from this situation, but assumes
# that the user is OK when this happens (and has a mechanism in place for
# the pod to be re-created)
delete_pod_when_no_ips = true
[network]
# The range that pods will run in.
cluster_cidr = "10.0.0.0/18"
# The service range for the network. This is needed, because we need to
# explicitly link route it via the ENI, and there doesn't seem to be a way
# to easily infer/discover this
service_cidr = "100.64.0.0/10"
# If we should masquerade non-cluster traffic for pods out of the instances
# main interface. Generally this will be desired, unless you have a NAT
# Gateway set on the subnet the pods reside in.
pod_ip_masq = true
# If we're masquerading traffic, additional subnets that should be routed
# via the VPC subnet's gateway, rather than masqueraded from the host.
# Generally this will be things that you peer to etc.
vpc_routed = [ "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16" ]
# Where we should redirect traffic destined for the instance metadata
# endpoint. Calls to it will be rewrote to the machines main IP on the
# specified port. Intended for use with https://github.com/jtblin/kube2iam .
# If this is not set or empty, all traffic from pods to the instance
# metadata API will be dropped - it represents a security risk in most
# cases.
instance_metadata_redirect_port = 8181
[logging]
# The verbosity level for CNI plugin. Higher to begin with, to make sure we
# have diagnostic info
cni_v_level = 2
---
apiVersion: extensions/v1beta1
# Schedule as a daemonset - this will ignore the race where networking isn't
# ready which will block this coming up and configuring networking
kind: DaemonSet
metadata:
name: eni-controller
namespace: kube-system
labels:
# k8s-addon: cluster-autoscaler.addons.k8s.io
app: eni-controller
spec:
template:
metadata:
labels:
app: eni-controller
spec:
serviceAccountName: eni-controller
hostNetwork: true
containers:
- name: eni-controller
image: lstoll/eni-controller:{{.VersionTag}}
imagePullPolicy: IfNotPresent
args:
- --v=2
resources:
# TODO -assess these limits
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
volumeMounts:
- name: config-volume
mountPath: /etc/vpcnet
volumes:
- name: config-volume
configMap:
name: vpcnet-configuration
# Schedule on the master. Because this is critical for networking, we
# can't bring up any pods until this is running. To avoid all runtimes
# needing IAM privs to manage interfaces, we can run it in a cluster
# privileged context, ignoring network setup restrictions
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
- key: "k8s-vpcnet/no-interface-configured"
effect: NoSchedule
- key: "k8s-vpcnet/no-free-ips"
effect: NoSchedule
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
# k8s-addon: cluster-autoscaler.addons.k8s.io
app: eni-controller
name: eni-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: eni-controller
namespace: kube-system
labels:
# k8s-addon: cluster-autoscaler.addons.k8s.io
app: eni-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- watch
- list
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: eni-controller
namespace: kube-system
labels:
# k8s-addon: cluster-autoscaler.addons.k8s.io
app: eni-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: eni-controller
subjects:
- kind: ServiceAccount
name: eni-controller
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: vpcnet-daemon
namespace: kube-system
labels:
app: vpcnet-daemon
spec:
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: vpcnet-daemon
spec:
hostNetwork: true
serviceAccountName: vpcnet-daemon
containers:
- name: vpcnet-daemon
image: lstoll/vpcnet-daemon:{{.VersionTag}}
imagePullPolicy: IfNotPresent
args:
- -node-name=$(CURRENT_NODE_NAME)
- --v=4
env:
- name: CURRENT_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
# This is so we can manage interfaces
privileged: true
volumeMounts:
- mountPath: /var/lib/cni
name: cni-working
- mountPath: /etc/cni/net.d
name: cni-config
- mountPath: /opt/cni/bin
name: cni-bin
- mountPath: /etc/vpcnet
name: config-volume
- mountPath: /etc/iproute2
name: etc-iproute2
volumes:
- name: cni-working
hostPath:
path: /var/lib/cni
- name: cni-config
hostPath:
path: /etc/cni/net.d
- name: cni-bin
hostPath:
path: /opt/cni/bin
- name: etc-iproute2
hostPath:
path: /etc/iproute2
- name: config-volume
configMap:
name: vpcnet-configuration
tolerations:
# We can work without a network
- key: "k8s-vpcnet/no-interface-configured"
effect: NoSchedule
# We should run on master too
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
# It's fine if there's no IPs
- key: "k8s-vpcnet/no-free-ips"
effect: NoSchedule
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
# k8s-addon: cluster-autoscaler.addons.k8s.io
app: vpcnet-daemon
name: vpcnet-daemon
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: vpcnet-daemon
namespace: kube-system
labels:
# k8s-addon: cluster-autoscaler.addons.k8s.io
app: vpcnet-daemon
rules:
# Daemon needs to be able to watch and manipulate nodes to read
# interface configuration, and interact with taints
- apiGroups:
- ""
resources:
- nodes
verbs:
- watch
- list
- get
- update
# Daemon needs to be able to interact with pods to delete oversubscribed
# (and enforce policy in the future).
- apiGroups:
- ""
resources:
- pods
verbs:
- watch
- list
- get
- update
- delete
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: vpcnet-daemon
namespace: kube-system
labels:
# k8s-addon: cluster-autoscaler.addons.k8s.io
app: vpcnet-daemon
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: vpcnet-daemon
subjects:
- kind: ServiceAccount
name: vpcnet-daemon
namespace: kube-system