Factory/metallb-chart/charts/frr-k8s/templates/controller.yaml

432 lines
16 KiB
YAML

# FRR expects to have these files owned by frr:frr on startup.
# Having them in a ConfigMap allows us to modify behaviors: for example enabling more daemons on startup.
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "frrk8s.fullname" . }}-frr-startup
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "frrk8s.labels" . | nindent 4 }}
app.kubernetes.io/component: frr-k8s
data:
daemons: |
# This file tells the frr package which daemons to start.
#
# Sample configurations for these daemons can be found in
# /usr/share/doc/frr/examples/.
#
# ATTENTION:
#
# When activating a daemon for the first time, a config file, even if it is
# empty, has to be present *and* be owned by the user and group "frr", else
# the daemon will not be started by /etc/init.d/frr. The permissions should
# be u=rw,g=r,o=.
# When using "vtysh" such a config file is also needed. It should be owned by
# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too.
#
# The watchfrr and zebra daemons are always started.
#
bgpd=yes
ospfd=no
ospf6d=no
ripd=no
ripngd=no
isisd=no
pimd=no
ldpd=no
nhrpd=no
eigrpd=no
babeld=no
sharpd=no
pbrd=no
bfdd=yes
fabricd=no
vrrpd=no
#
# If this option is set the /etc/init.d/frr script automatically loads
# the config via "vtysh -b" when the servers are started.
# Check /etc/pam.d/frr if you intend to use "vtysh"!
#
vtysh_enable=yes
zebra_options=" -A 127.0.0.1 -s 90000000"
bgpd_options=" -A 127.0.0.1"
ospfd_options=" -A 127.0.0.1"
ospf6d_options=" -A ::1"
ripd_options=" -A 127.0.0.1"
ripngd_options=" -A ::1"
isisd_options=" -A 127.0.0.1"
pimd_options=" -A 127.0.0.1"
ldpd_options=" -A 127.0.0.1"
nhrpd_options=" -A 127.0.0.1"
eigrpd_options=" -A 127.0.0.1"
babeld_options=" -A 127.0.0.1"
sharpd_options=" -A 127.0.0.1"
pbrd_options=" -A 127.0.0.1"
staticd_options="-A 127.0.0.1"
bfdd_options=" -A 127.0.0.1"
fabricd_options="-A 127.0.0.1"
vrrpd_options=" -A 127.0.0.1"
# configuration profile
#
#frr_profile="traditional"
#frr_profile="datacenter"
#
# This is the maximum number of FD's that will be available.
# Upon startup this is read by the control files and ulimit
# is called. Uncomment and use a reasonable value for your
# setup if you are expecting a large number of peers in
# say BGP.
#MAX_FDS=1024
# The list of daemons to watch is automatically generated by the init script.
#watchfrr_options=""
# for debugging purposes, you can specify a "wrap" command to start instead
# of starting the daemon directly, e.g. to use valgrind on ospfd:
# ospfd_wrap="/usr/bin/valgrind"
# or you can use "all_wrap" for all daemons, e.g. to use perf record:
# all_wrap="/usr/bin/perf record --call-graph -"
# the normal daemon command is added to this at the end.
vtysh.conf: |+
service integrated-vtysh-config
frr.conf: |+
! This file gets overriden the first time the speaker renders a config.
! So anything configured here is only temporary.
frr version 8.0
frr defaults traditional
hostname Router
line vty
log file /etc/frr/frr.log informational
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ template "frrk8s.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "frrk8s.labels" . | nindent 4 }}
app.kubernetes.io/component: frr-k8s
{{- range $key, $value := .Values.frrk8s.labels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
{{- if .Values.frrk8s.updateStrategy }}
updateStrategy: {{- toYaml .Values.frrk8s.updateStrategy | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "frrk8s.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: frr-k8s
template:
metadata:
labels:
{{- include "frrk8s.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: frr-k8s
{{- range $key, $value := .Values.frrk8s.labels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
{{- if .Values.frrk8s.runtimeClassName }}
runtimeClassName: {{ .Values.frrk8s.runtimeClassName }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "frrk8s.serviceAccountName" . }}
terminationGracePeriodSeconds: 0
hostNetwork: true
volumes:
- name: frr-sockets
emptyDir: {}
- name: frr-startup
configMap:
name: {{ template "frrk8s.fullname" . }}-frr-startup
- name: frr-conf
emptyDir: {}
- name: reloader
emptyDir: {}
- name: metrics
emptyDir: {}
{{- if .Values.prometheus.metricsTLSSecret }}
- name: metrics-certs
secret:
secretName: {{ .Values.prometheus.metricsTLSSecret }}
{{- end }}
initContainers:
# Copies the initial config files with the right permissions to the shared volume.
- name: cp-frr-files
image: {{ .Values.frrk8s.frr.image.repository }}:{{ .Values.frrk8s.frr.image.tag | default .Chart.AppVersion }}
securityContext:
runAsUser: 100
runAsGroup: 101
command: ["/bin/sh", "-c", "cp -rLf /tmp/frr/* /etc/frr/"]
volumeMounts:
- name: frr-startup
mountPath: /tmp/frr
- name: frr-conf
mountPath: /etc/frr
# Copies the reloader to the shared volume between the speaker and reloader.
- name: cp-reloader
image: {{ .Values.frrk8s.image.repository }}:{{ .Values.frrk8s.image.tag | default .Chart.AppVersion }}
command: ["/bin/sh", "-c", "cp -f /frr-reloader.sh /etc/frr_reloader/"]
volumeMounts:
- name: reloader
mountPath: /etc/frr_reloader
# Copies the metrics exporter
- name: cp-metrics
image: {{ .Values.frrk8s.image.repository }}:{{ .Values.frrk8s.image.tag | default .Chart.AppVersion }}
command: ["/bin/sh", "-c", "cp -f /frr-metrics /etc/frr_metrics/"]
volumeMounts:
- name: metrics
mountPath: /etc/frr_metrics
shareProcessNamespace: true
containers:
- name: controller
image: {{ .Values.frrk8s.image.repository }}:{{ .Values.frrk8s.image.tag | default .Chart.AppVersion }}
{{- if .Values.frrk8s.image.pullPolicy }}
imagePullPolicy: {{ .Values.frrk8s.image.pullPolicy }}
{{- end }}
command:
- /frr-k8s
args:
- "--node-name=$(NODE_NAME)"
- "--namespace=$(NAMESPACE)"
- "--metrics-bind-address={{.Values.prometheus.metricsBindAddress}}:{{ .Values.prometheus.metricsPort }}"
{{- with .Values.frrk8s.logLevel }}
- --log-level={{ . }}
{{- end }}
- --health-probe-bind-address={{.Values.prometheus.metricsBindAddress}}:{{ .Values.frrk8s.healthPort }}
{{- if .Values.frrk8s.alwaysBlock }}
- --always-block={{ .Values.frrk8s.alwaysBlock }}
{{- end }}
env:
- name: FRR_CONFIG_FILE
value: /etc/frr_reloader/frr.conf
- name: FRR_RELOADER_PID_FILE
value: /etc/frr_reloader/reloader.pid
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: {{ .Values.prometheus.metricsPort }}
name: monitoring
{{- if .Values.frrk8s.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /healthz
port: {{ .Values.frrk8s.healthPort }}
host: {{ .Values.prometheus.metricsBindAddress }}
initialDelaySeconds: {{ .Values.frrk8s.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frrk8s.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frrk8s.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frrk8s.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.frrk8s.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.frrk8s.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /healthz
port: {{ .Values.frrk8s.healthPort }}
host: {{ .Values.prometheus.metricsBindAddress }}
initialDelaySeconds: {{ .Values.frrk8s.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frrk8s.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frrk8s.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frrk8s.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.frrk8s.readinessProbe.failureThreshold }}
{{- end }}
{{- with .Values.frrk8s.resources }}
resources:
{{- toYaml . | nindent 10 }}
{{- end }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
add:
- NET_RAW
volumeMounts:
- name: reloader
mountPath: /etc/frr_reloader
- name: frr
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
- NET_BIND_SERVICE
image: {{ .Values.frrk8s.frr.image.repository }}:{{ .Values.frrk8s.frr.image.tag | default .Chart.AppVersion }}
{{- if .Values.frrk8s.frr.image.pullPolicy }}
imagePullPolicy: {{ .Values.frrk8s.frr.image.pullPolicy }}
{{- end }}
env:
- name: TINI_SUBREAPER
value: "true"
volumeMounts:
- name: frr-sockets
mountPath: /var/run/frr
- name: frr-conf
mountPath: /etc/frr
# The command is FRR's default entrypoint & waiting for the log file to appear and tailing it.
# If the log file isn't created in 60 seconds the tail fails and the container is restarted.
# This workaround is needed to have the frr logs as part of kubectl logs -c frr < controller_pod_name >.
command:
- /bin/sh
- -c
- |
/sbin/tini -- /usr/lib/frr/docker-start &
attempts=0
until [[ -f /etc/frr/frr.log || $attempts -eq 60 ]]; do
sleep 1
attempts=$(( $attempts + 1 ))
done
tail -f /etc/frr/frr.log
{{- with .Values.frrk8s.frr.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.frrk8s.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /livez
port: {{ .Values.frrk8s.frr.metricsPort }}
host: {{ .Values.frrk8s.frr.metricsBindAddress }}
periodSeconds: {{ .Values.frrk8s.livenessProbe.periodSeconds }}
failureThreshold: {{ .Values.frrk8s.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.frrk8s.startupProbe.enabled }}
startupProbe:
httpGet:
path: /livez
port: {{ .Values.frrk8s.frr.metricsPort }}
host: {{ .Values.frrk8s.frr.metricsBindAddress }}
failureThreshold: {{ .Values.frrk8s.startupProbe.failureThreshold }}
periodSeconds: {{ .Values.frrk8s.startupProbe.periodSeconds }}
{{- end }}
- name: reloader
image: {{ .Values.frrk8s.frr.image.repository }}:{{ .Values.frrk8s.frr.image.tag | default .Chart.AppVersion }}
{{- if .Values.frrk8s.frr.image.pullPolicy }}
imagePullPolicy: {{ .Values.frrk8s.frr.image.pullPolicy }}
{{- end }}
command: ["/etc/frr_reloader/frr-reloader.sh"]
volumeMounts:
- name: frr-sockets
mountPath: /var/run/frr
- name: frr-conf
mountPath: /etc/frr
- name: reloader
mountPath: /etc/frr_reloader
{{- with .Values.frrk8s.reloader.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
- name: frr-metrics
image: {{ .Values.frrk8s.frr.image.repository }}:{{ .Values.frrk8s.frr.image.tag | default .Chart.AppVersion }}
command: ["/etc/frr_metrics/frr-metrics"]
args:
- --metrics-port={{ .Values.frrk8s.frr.metricsPort }}
- --metrics-bind-address={{ .Values.frrk8s.frr.metricsBindAddress }}
ports:
- containerPort: {{ .Values.frrk8s.frr.metricsPort }}
name: monitoring
volumeMounts:
- name: frr-sockets
mountPath: /var/run/frr
- name: frr-conf
mountPath: /etc/frr
- name: metrics
mountPath: /etc/frr_metrics
{{- with .Values.frrk8s.frrMetrics.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
- name: kube-rbac-proxy
image: {{ .Values.prometheus.rbacProxy.repository }}:{{ .Values.prometheus.rbacProxy.tag }}
imagePullPolicy: {{ .Values.prometheus.rbacProxy.pullPolicy }}
args:
- --logtostderr
- --secure-listen-address=:{{ .Values.prometheus.secureMetricsPort }}
- --upstream=http://{{.Values.prometheus.metricsBindAddress}}:{{ .Values.prometheus.metricsPort }}/
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
{{- if .Values.prometheus.metricsTLSSecret }}
- --tls-private-key-file=/etc/metrics/tls.key
- --tls-cert-file=/etc/metrics/tls.crt
{{- end }}
ports:
- containerPort: {{ .Values.prometheus.secureMetricsPort }}
name: metricshttps
resources:
requests:
cpu: 10m
memory: 20Mi
terminationMessagePolicy: FallbackToLogsOnError
{{- if .Values.prometheus.metricsTLSSecret }}
volumeMounts:
- name: metrics-certs
mountPath: /etc/metrics
readOnly: true
{{- end }}
- name: kube-rbac-proxy-frr
image: {{ .Values.prometheus.rbacProxy.repository }}:{{ .Values.prometheus.rbacProxy.tag | default .Chart.AppVersion }}
imagePullPolicy: {{ .Values.prometheus.rbacProxy.pullPolicy }}
args:
- --logtostderr
- --secure-listen-address=:{{ .Values.frrk8s.frr.secureMetricsPort }}
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- --upstream=http://{{ .Values.frrk8s.frr.metricsBindAddress }}:{{ .Values.frrk8s.frr.metricsPort }}/
{{- if .Values.prometheus.metricsTLSSecret }}
- --tls-private-key-file=/etc/metrics/tls.key
- --tls-cert-file=/etc/metrics/tls.crt
{{- end }}
ports:
- containerPort: {{ .Values.frrk8s.frr.secureMetricsPort }}
name: metricshttps
resources:
requests:
cpu: 10m
memory: 20Mi
terminationMessagePolicy: FallbackToLogsOnError
{{- if .Values.prometheus.metricsTLSSecret }}
volumeMounts:
- name: metrics-certs
mountPath: /etc/metrics
readOnly: true
{{- end }}
nodeSelector:
"kubernetes.io/os": linux
{{- with .Values.frrk8s.nodeSelector }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.frrk8s.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or .Values.frrk8s.tolerateMaster .Values.frrk8s.tolerations }}
tolerations:
{{- if .Values.frrk8s.tolerateMaster }}
- key: node-role.kubernetes.io/master
effect: NoSchedule
operator: Exists
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
operator: Exists
{{- end }}
{{- with .Values.frrk8s.tolerations }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}
{{- with .Values.frrk8s.priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}